Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 3,128 Lines • ▼ Show 20 Lines | for (i = 0; i < bq->bq_cnt; i++) { | ||||
m = bq->bq_pa[i]; | m = bq->bq_pa[i]; | ||||
if (__predict_false(m->queue != queue)) | if (__predict_false(m->queue != queue)) | ||||
continue; | continue; | ||||
vm_pqbatch_process_page(pq, m); | vm_pqbatch_process_page(pq, m); | ||||
} | } | ||||
vm_batchqueue_init(bq); | vm_batchqueue_init(bq); | ||||
} | } | ||||
static void | void | ||||
vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) | vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) | ||||
{ | { | ||||
struct vm_batchqueue *bq; | struct vm_batchqueue *bq; | ||||
struct vm_pagequeue *pq; | struct vm_pagequeue *pq; | ||||
int domain; | int domain; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("page %p is unmanaged", m)); | ("page %p is unmanaged", m)); | ||||
KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL, | KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL, | ||||
Show All 31 Lines | else { | ||||
KASSERT((m->aflags & PGA_ENQUEUED) == 0, | KASSERT((m->aflags & PGA_ENQUEUED) == 0, | ||||
("page %p is enqueued with invalid queue index", m)); | ("page %p is enqueued with invalid queue index", m)); | ||||
} | } | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
critical_exit(); | critical_exit(); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_drain_pqbatch: [ internal use only ] | * vm_page_pqbatch_drain: [ internal use only ] | ||||
* | * | ||||
* Force all per-CPU page queue batch queues to be drained. This is | * Force all per-CPU page queue batch queues to be drained. This is | ||||
* intended for use in severe memory shortages, to ensure that pages | * intended for use in severe memory shortages, to ensure that pages | ||||
* do not remain stuck in the batch queues. | * do not remain stuck in the batch queues. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_drain_pqbatch(void) | vm_page_pqbatch_drain(void) | ||||
{ | { | ||||
struct thread *td; | struct thread *td; | ||||
struct vm_domain *vmd; | struct vm_domain *vmd; | ||||
struct vm_pagequeue *pq; | struct vm_pagequeue *pq; | ||||
int cpu, domain, queue; | int cpu, domain, queue; | ||||
td = curthread; | td = curthread; | ||||
CPU_FOREACH(cpu) { | CPU_FOREACH(cpu) { | ||||
▲ Show 20 Lines • Show All 56 Lines • ▼ Show 20 Lines | vm_page_dequeue_deferred(vm_page_t m) | ||||
/* | /* | ||||
* Set PGA_DEQUEUE if it is not already set to handle a concurrent call | * Set PGA_DEQUEUE if it is not already set to handle a concurrent call | ||||
* to vm_page_dequeue_deferred_free(). In particular, avoid modifying | * to vm_page_dequeue_deferred_free(). In particular, avoid modifying | ||||
* the page's queue state once vm_page_dequeue_deferred_free() has been | * the page's queue state once vm_page_dequeue_deferred_free() has been | ||||
* called. In the event of a race, two batch queue entries for the page | * called. In the event of a race, two batch queue entries for the page | ||||
* will be created, but the second will have no effect. | * will be created, but the second will have no effect. | ||||
*/ | */ | ||||
if (vm_page_pqstate_cmpset(m, queue, queue, PGA_DEQUEUE, PGA_DEQUEUE)) | if (vm_page_pqstate_cmpset(m, queue, queue, PGA_DEQUEUE, PGA_DEQUEUE)) | ||||
vm_pqbatch_submit_page(m, queue); | vm_page_pqbatch_submit(m, queue); | ||||
} | } | ||||
/* | /* | ||||
* A variant of vm_page_dequeue_deferred() that does not assert the page | * A variant of vm_page_dequeue_deferred() that does not assert the page | ||||
* lock and is only to be called from vm_page_free_prep(). Because the | * lock and is only to be called from vm_page_free_prep(). Because the | ||||
* page is being freed, we can assume that nothing other than the page | * page is being freed, we can assume that nothing other than the page | ||||
* daemon is scheduling queue operations on this page, so we get for | * daemon is scheduling queue operations on this page, so we get for | ||||
* free the mutual exclusion that is otherwise provided by the page lock. | * free the mutual exclusion that is otherwise provided by the page lock. | ||||
* To handle races, the page daemon must take care to atomically check | * To handle races, the page daemon must take care to atomically check | ||||
* for PGA_DEQUEUE when updating queue state. | * for PGA_DEQUEUE when updating queue state. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_dequeue_deferred_free(vm_page_t m) | vm_page_dequeue_deferred_free(vm_page_t m) | ||||
{ | { | ||||
uint8_t queue; | uint8_t queue; | ||||
KASSERT(m->ref_count == 0, ("page %p has references", m)); | KASSERT(m->ref_count == 0, ("page %p has references", m)); | ||||
if ((m->aflags & PGA_DEQUEUE) != 0) | if ((m->aflags & PGA_DEQUEUE) != 0) | ||||
return; | return; | ||||
atomic_thread_fence_acq(); | atomic_thread_fence_acq(); | ||||
if ((queue = m->queue) == PQ_NONE) | if ((queue = m->queue) == PQ_NONE) | ||||
return; | return; | ||||
vm_page_aflag_set(m, PGA_DEQUEUE); | vm_page_aflag_set(m, PGA_DEQUEUE); | ||||
vm_pqbatch_submit_page(m, queue); | vm_page_pqbatch_submit(m, queue); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_dequeue: | * vm_page_dequeue: | ||||
* | * | ||||
* Remove the page from whichever page queue it's in, if any. | * Remove the page from whichever page queue it's in, if any. | ||||
* The page must either be locked or unallocated. This constraint | * The page must either be locked or unallocated. This constraint | ||||
* ensures that the queue state of the page will remain consistent | * ensures that the queue state of the page will remain consistent | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | vm_page_enqueue(vm_page_t m, uint8_t queue) | ||||
vm_page_assert_locked(m); | vm_page_assert_locked(m); | ||||
KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, | KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, | ||||
("%s: page %p is already enqueued", __func__, m)); | ("%s: page %p is already enqueued", __func__, m)); | ||||
m->queue = queue; | m->queue = queue; | ||||
if ((m->aflags & PGA_REQUEUE) == 0) | if ((m->aflags & PGA_REQUEUE) == 0) | ||||
vm_page_aflag_set(m, PGA_REQUEUE); | vm_page_aflag_set(m, PGA_REQUEUE); | ||||
vm_pqbatch_submit_page(m, queue); | vm_page_pqbatch_submit(m, queue); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_requeue: [ internal use only ] | * vm_page_requeue: [ internal use only ] | ||||
* | * | ||||
* Schedule a requeue of the given page. | * Schedule a requeue of the given page. | ||||
* | * | ||||
* The page must be locked. | * The page must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_requeue(vm_page_t m) | vm_page_requeue(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_locked(m); | vm_page_assert_locked(m); | ||||
KASSERT(vm_page_queue(m) != PQ_NONE, | KASSERT(vm_page_queue(m) != PQ_NONE, | ||||
("%s: page %p is not logically enqueued", __func__, m)); | ("%s: page %p is not logically enqueued", __func__, m)); | ||||
if ((m->aflags & PGA_REQUEUE) == 0) | if ((m->aflags & PGA_REQUEUE) == 0) | ||||
vm_page_aflag_set(m, PGA_REQUEUE); | vm_page_aflag_set(m, PGA_REQUEUE); | ||||
vm_pqbatch_submit_page(m, atomic_load_8(&m->queue)); | vm_page_pqbatch_submit(m, atomic_load_8(&m->queue)); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_swapqueue: [ internal use only ] | * vm_page_swapqueue: [ internal use only ] | ||||
* | * | ||||
* Move the page from one queue to another, or to the tail of its | * Move the page from one queue to another, or to the tail of its | ||||
* current queue, in the face of a possible concurrent call to | * current queue, in the face of a possible concurrent call to | ||||
* vm_page_dequeue_deferred_free(). | * vm_page_dequeue_deferred_free(). | ||||
Show All 11 Lines | vm_page_swapqueue(vm_page_t m, int oldq, int newq) | ||||
if (oldq == newq) { | if (oldq == newq) { | ||||
/* | /* | ||||
* Atomically set PGA_REQUEUE if the page belongs to the | * Atomically set PGA_REQUEUE if the page belongs to the | ||||
* specified queue and does not have PGA_DEQUEUE set. | * specified queue and does not have PGA_DEQUEUE set. | ||||
*/ | */ | ||||
if (vm_page_pqstate_cmpset(m, oldq, newq, | if (vm_page_pqstate_cmpset(m, oldq, newq, | ||||
PGA_DEQUEUE | PGA_REQUEUE, PGA_REQUEUE)) | PGA_DEQUEUE | PGA_REQUEUE, PGA_REQUEUE)) | ||||
vm_pqbatch_submit_page(m, newq); | vm_page_pqbatch_submit(m, newq); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
* Atomically update the queue field and set PGA_REQUEUE while | * Atomically update the queue field and set PGA_REQUEUE while | ||||
* ensuring that PGA_DEQUEUE has not been set. | * ensuring that PGA_DEQUEUE has not been set. | ||||
*/ | */ | ||||
pq = &vm_pagequeue_domain(m)->vmd_pagequeues[oldq]; | pq = &vm_pagequeue_domain(m)->vmd_pagequeues[oldq]; | ||||
vm_pagequeue_lock(pq); | vm_pagequeue_lock(pq); | ||||
if (!vm_page_pqstate_cmpset(m, oldq, newq, PGA_DEQUEUE, PGA_REQUEUE)) { | if (!vm_page_pqstate_cmpset(m, oldq, newq, PGA_DEQUEUE, PGA_REQUEUE)) { | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
return; | return; | ||||
} | } | ||||
if ((m->aflags & PGA_ENQUEUED) != 0) { | if ((m->aflags & PGA_ENQUEUED) != 0) { | ||||
vm_pagequeue_remove(pq, m); | vm_pagequeue_remove(pq, m); | ||||
vm_page_aflag_clear(m, PGA_ENQUEUED); | vm_page_aflag_clear(m, PGA_ENQUEUED); | ||||
} | } | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
vm_pqbatch_submit_page(m, newq); | vm_page_pqbatch_submit(m, newq); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_free_prep: | * vm_page_free_prep: | ||||
* | * | ||||
* Prepares the given page to be put on the free list, | * Prepares the given page to be put on the free list, | ||||
* disassociating it from any VM object. The caller may return | * disassociating it from any VM object. The caller may return | ||||
* the page to the free list only if this function returns true. | * the page to the free list only if this function returns true. | ||||
▲ Show 20 Lines • Show All 352 Lines • ▼ Show 20 Lines | _vm_page_deactivate_noreuse(vm_page_t m) | ||||
vm_page_assert_locked(m); | vm_page_assert_locked(m); | ||||
if (!vm_page_inactive(m)) { | if (!vm_page_inactive(m)) { | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
m->queue = PQ_INACTIVE; | m->queue = PQ_INACTIVE; | ||||
} | } | ||||
if ((m->aflags & PGA_REQUEUE_HEAD) == 0) | if ((m->aflags & PGA_REQUEUE_HEAD) == 0) | ||||
vm_page_aflag_set(m, PGA_REQUEUE_HEAD); | vm_page_aflag_set(m, PGA_REQUEUE_HEAD); | ||||
vm_pqbatch_submit_page(m, PQ_INACTIVE); | vm_page_pqbatch_submit(m, PQ_INACTIVE); | ||||
} | } | ||||
void | void | ||||
vm_page_deactivate_noreuse(vm_page_t m) | vm_page_deactivate_noreuse(vm_page_t m) | ||||
{ | { | ||||
KASSERT(m->object != NULL, | KASSERT(m->object != NULL, | ||||
("vm_page_deactivate_noreuse: page %p has no object", m)); | ("vm_page_deactivate_noreuse: page %p has no object", m)); | ||||
▲ Show 20 Lines • Show All 922 Lines • Show Last 20 Lines |