Changeset View
Standalone View
vm/vm_page.c
Show First 20 Lines • Show All 101 Lines • ▼ Show 20 Lines | |||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/vnode.h> | #include <sys/vnode.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/pmap.h> | #include <vm/pmap.h> | ||||
#include <vm/vm_param.h> | #include <vm/vm_param.h> | ||||
#include <vm/vm_domain.h> | |||||
#include <vm/vm_kern.h> | #include <vm/vm_kern.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_pageout.h> | #include <vm/vm_pageout.h> | ||||
#include <vm/vm_pager.h> | #include <vm/vm_pager.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_radix.h> | #include <vm/vm_radix.h> | ||||
#include <vm/vm_reserv.h> | #include <vm/vm_reserv.h> | ||||
▲ Show 20 Lines • Show All 1,454 Lines • ▼ Show 20 Lines | |||||
vm_page_t | vm_page_t | ||||
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | ||||
{ | { | ||||
return (vm_page_alloc_after(object, pindex, req, object != NULL ? | return (vm_page_alloc_after(object, pindex, req, object != NULL ? | ||||
vm_radix_lookup_le(&object->rtree, pindex) : NULL)); | vm_radix_lookup_le(&object->rtree, pindex) : NULL)); | ||||
} | } | ||||
vm_page_t | |||||
vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, | |||||
int req) | |||||
{ | |||||
return (vm_page_alloc_domain_after(object, pindex, domain, req, | |||||
object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) : | |||||
NULL)); | |||||
} | |||||
/* | /* | ||||
* Allocate a page in the specified object with the given page index. To | * Allocate a page in the specified object with the given page index. To | ||||
* optimize insertion of the page into the object, the caller must also specifiy | * optimize insertion of the page into the object, the caller must also specifiy | ||||
* the resident page in the object with largest index smaller than the given | * the resident page in the object with largest index smaller than the given | ||||
* page index, or NULL if no such page exists. | * page index, or NULL if no such page exists. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req, | vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, | ||||
vm_page_t mpred) | int req, vm_page_t mpred) | ||||
kib: Perhaps these two lines can be kept as is, without reformatting. | |||||
{ | { | ||||
struct vm_domain_iterator vi; | |||||
vm_page_t m; | vm_page_t m; | ||||
int domain, wait; | |||||
m = NULL; | |||||
vm_policy_iterator_init(&vi); | |||||
wait = req & (VM_ALLOC_WAITFAIL | VM_ALLOC_WAITOK); | |||||
req &= ~wait; | |||||
while ((vm_domain_iterator_run(&vi, &domain)) == 0) { | |||||
if (vm_domain_iterator_isdone(&vi)) | |||||
req |= wait; | |||||
kibUnsubmitted Not Done Inline ActionsSo assume that all non-last domains failed to provide a page to allocate, and the request is WAITOK. In this case, only last domain in the iteration is retried, and we would only return the page when that domain gets some free page. IMO this is not right, we should restart the whole iteration for WAITOK. With the current arrangements of the counters, the patch is fine for WAITFAIL. kib: So assume that all non-last domains failed to provide a page to allocate, and the request is… | |||||
jeffAuthorUnsubmitted Not Done Inline ActionsYou are either going to block on the first or last in the list. You could convert to WAITFAIL and rescan for WAITOK. I think waiting for multiple domains to reclaim memory is going to be very complex and not worth it. With my page daemon refactoring I can survive extremely high memory pressure and never block. I would prefer to refine this when I replace the policy and iterators. I want to do that after I refactor the VM APIs. The new iterators could provide more facilities for managing consistent policies in blocking allocations. For now this works so unless you strongly object I would prefer to do it as a later action. jeff: You are either going to block on the first or last in the list. You could convert to WAITFAIL… | |||||
kibUnsubmitted Not Done Inline ActionsMy point is not about waiting only for the last domain. Rather, I am stating that you would allocate from domains 0,1,2,...,n<all fails, wait>n,n,n,n,.... In other words, after the the wait, iteration should be restarted. kib: My point is not about waiting only for the last domain. Rather, I am stating that you would… | |||||
m = vm_page_alloc_domain_after(object, pindex, domain, req, | |||||
mpred); | |||||
if (m != NULL) | |||||
break; | |||||
} | |||||
vm_policy_iterator_finish(&vi); | |||||
return (m); | |||||
} | |||||
vm_page_t | |||||
vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, | |||||
int req, vm_page_t mpred) | |||||
{ | |||||
vm_page_t m; | |||||
int flags, req_class; | int flags, req_class; | ||||
u_int free_count; | u_int free_count; | ||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | ||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | ||||
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ||||
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), | (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), | ||||
("inconsistent object(%p)/req(%x)", object, req)); | ("inconsistent object(%p)/req(%x)", object, req)); | ||||
Show All 13 Lines | vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, | ||||
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | ||||
req_class = VM_ALLOC_SYSTEM; | req_class = VM_ALLOC_SYSTEM; | ||||
/* | /* | ||||
* Allocate a page if the number of free pages exceeds the minimum | * Allocate a page if the number of free pages exceeds the minimum | ||||
* for the request class. | * for the request class. | ||||
*/ | */ | ||||
again: | again: | ||||
m = NULL; | |||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || | if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || | ||||
(req_class == VM_ALLOC_SYSTEM && | (req_class == VM_ALLOC_SYSTEM && | ||||
vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || | vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || | ||||
(req_class == VM_ALLOC_INTERRUPT && | (req_class == VM_ALLOC_INTERRUPT && | ||||
vm_cnt.v_free_count > 0)) { | vm_cnt.v_free_count > 0)) { | ||||
/* | /* | ||||
* Can we allocate the page from a reservation? | * Can we allocate the page from a reservation? | ||||
*/ | */ | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (object == NULL || (object->flags & (OBJ_COLORED | | if (object == NULL || (object->flags & (OBJ_COLORED | | ||||
OBJ_FICTITIOUS)) != OBJ_COLORED || (m = | OBJ_FICTITIOUS)) != OBJ_COLORED || (m = | ||||
vm_reserv_alloc_page(object, pindex, mpred)) == NULL) | vm_reserv_alloc_page(object, pindex, domain, | ||||
mpred)) == NULL) | |||||
#endif | #endif | ||||
{ | { | ||||
/* | /* | ||||
* If not, allocate it from the free page queues. | * If not, allocate it from the free page queues. | ||||
*/ | */ | ||||
m = vm_phys_alloc_pages(object != NULL ? | m = vm_phys_alloc_pages(domain, object != NULL ? | ||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); | VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (m == NULL && vm_reserv_reclaim_inactive()) { | if (m == NULL && vm_reserv_reclaim_inactive(domain)) { | ||||
m = vm_phys_alloc_pages(object != NULL ? | m = vm_phys_alloc_pages(domain, | ||||
object != NULL ? | |||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, | VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, | ||||
0); | 0); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
} else { | } | ||||
if (m == NULL) { | |||||
/* | /* | ||||
* Not allocatable, give up. | * Not allocatable, give up. | ||||
*/ | */ | ||||
if (vm_page_alloc_fail(object, req)) | if (vm_page_alloc_fail(object, req)) | ||||
goto again; | goto again; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* This routine may not sleep. | * This routine may not sleep. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, | vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, | ||||
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, | u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, | ||||
vm_paddr_t boundary, vm_memattr_t memattr) | vm_paddr_t boundary, vm_memattr_t memattr) | ||||
{ | { | ||||
struct vm_domain_iterator vi; | |||||
vm_page_t m; | |||||
int domain, wait; | |||||
m = NULL; | |||||
vm_policy_iterator_init(&vi); | |||||
wait = req & (VM_ALLOC_WAITFAIL | VM_ALLOC_WAITOK); | |||||
req &= ~wait; | |||||
while ((vm_domain_iterator_run(&vi, &domain)) == 0) { | |||||
markjUnsubmitted Not Done Inline ActionsStyle nit: unneeded parens. Ditto in vm_page_alloc_after(). markj: Style nit: unneeded parens. Ditto in vm_page_alloc_after(). | |||||
if (vm_domain_iterator_isdone(&vi)) | |||||
req |= wait; | |||||
m = vm_page_alloc_contig_domain(object, pindex, domain, req, | |||||
npages, low, high, alignment, boundary, memattr); | |||||
if (m != NULL) | |||||
break; | |||||
} | |||||
vm_policy_iterator_finish(&vi); | |||||
return (m); | |||||
} | |||||
vm_page_t | |||||
vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, | |||||
int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, | |||||
vm_paddr_t boundary, vm_memattr_t memattr) | |||||
{ | |||||
vm_page_t m, m_ret, mpred; | vm_page_t m, m_ret, mpred; | ||||
u_int busy_lock, flags, oflags; | u_int busy_lock, flags, oflags; | ||||
int req_class; | int req_class; | ||||
mpred = NULL; /* XXX: pacify gcc */ | mpred = NULL; /* XXX: pacify gcc */ | ||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | ||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | ||||
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ||||
Show All 23 Lines | KASSERT(mpred == NULL || mpred->pindex != pindex, | ||||
("vm_page_alloc_contig: pindex already allocated")); | ("vm_page_alloc_contig: pindex already allocated")); | ||||
} | } | ||||
/* | /* | ||||
* Can we allocate the pages without the number of free pages falling | * Can we allocate the pages without the number of free pages falling | ||||
* below the lower bound for the allocation class? | * below the lower bound for the allocation class? | ||||
*/ | */ | ||||
again: | again: | ||||
m_ret = NULL; | |||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved || | if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved || | ||||
(req_class == VM_ALLOC_SYSTEM && | (req_class == VM_ALLOC_SYSTEM && | ||||
vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) || | vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) || | ||||
(req_class == VM_ALLOC_INTERRUPT && | (req_class == VM_ALLOC_INTERRUPT && | ||||
vm_cnt.v_free_count >= npages)) { | vm_cnt.v_free_count >= npages)) { | ||||
/* | /* | ||||
* Can we allocate the pages from a reservation? | * Can we allocate the pages from a reservation? | ||||
*/ | */ | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
retry: | retry: | ||||
if (object == NULL || (object->flags & OBJ_COLORED) == 0 || | if (object == NULL || (object->flags & OBJ_COLORED) == 0 || | ||||
(m_ret = vm_reserv_alloc_contig(object, pindex, npages, | (m_ret = vm_reserv_alloc_contig(object, pindex, domain, | ||||
low, high, alignment, boundary, mpred)) == NULL) | npages, low, high, alignment, boundary, mpred)) == NULL) | ||||
#endif | #endif | ||||
/* | /* | ||||
* If not, allocate them from the free page queues. | * If not, allocate them from the free page queues. | ||||
*/ | */ | ||||
m_ret = vm_phys_alloc_contig(npages, low, high, | m_ret = vm_phys_alloc_contig(domain, npages, low, high, | ||||
alignment, boundary); | alignment, boundary); | ||||
} else { | #if VM_NRESERVLEVEL > 0 | ||||
if (m_ret == NULL && vm_reserv_reclaim_contig( | |||||
domain, npages, low, high, alignment, boundary)) | |||||
goto retry; | |||||
#endif | |||||
} | |||||
if (m_ret == NULL) { | |||||
if (vm_page_alloc_fail(object, req)) | if (vm_page_alloc_fail(object, req)) | ||||
goto again; | goto again; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if (m_ret != NULL) | |||||
vm_phys_freecnt_adj(m_ret, -npages); | vm_phys_freecnt_adj(m_ret, -npages); | ||||
else { | |||||
#if VM_NRESERVLEVEL > 0 | |||||
if (vm_reserv_reclaim_contig(npages, low, high, alignment, | |||||
boundary)) | |||||
goto retry; | |||||
#endif | |||||
} | |||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
if (m_ret == NULL) | |||||
return (NULL); | |||||
for (m = m_ret; m < &m_ret[npages]; m++) | for (m = m_ret; m < &m_ret[npages]; m++) | ||||
vm_page_alloc_check(m); | vm_page_alloc_check(m); | ||||
/* | /* | ||||
* Initialize the pages. Only the PG_ZERO flag is inherited. | * Initialize the pages. Only the PG_ZERO flag is inherited. | ||||
*/ | */ | ||||
flags = 0; | flags = 0; | ||||
if ((req & VM_ALLOC_ZERO) != 0) | if ((req & VM_ALLOC_ZERO) != 0) | ||||
▲ Show 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | |||||
* VM_ALLOC_WIRED wire the allocated page | * VM_ALLOC_WIRED wire the allocated page | ||||
* VM_ALLOC_ZERO prefer a zeroed page | * VM_ALLOC_ZERO prefer a zeroed page | ||||
* | * | ||||
* This routine may not sleep. | * This routine may not sleep. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_alloc_freelist(int flind, int req) | vm_page_alloc_freelist(int flind, int req) | ||||
{ | { | ||||
struct vm_domain_iterator vi; | |||||
vm_page_t m; | vm_page_t m; | ||||
int domain, wait; | |||||
m = NULL; | |||||
vm_policy_iterator_init(&vi); | |||||
wait = req & (VM_ALLOC_WAITFAIL | VM_ALLOC_WAITOK); | |||||
req &= ~wait; | |||||
while ((vm_domain_iterator_run(&vi, &domain)) == 0) { | |||||
if (vm_domain_iterator_isdone(&vi)) | |||||
req |= wait; | |||||
m = vm_page_alloc_freelist_domain(domain, flind, req); | |||||
if (m != NULL) | |||||
break; | |||||
} | |||||
vm_policy_iterator_finish(&vi); | |||||
return (m); | |||||
} | |||||
vm_page_t | |||||
vm_page_alloc_freelist_domain(int domain, int flind, int req) | |||||
{ | |||||
vm_page_t m; | |||||
u_int flags, free_count; | u_int flags, free_count; | ||||
int req_class; | int req_class; | ||||
req_class = req & VM_ALLOC_CLASS_MASK; | req_class = req & VM_ALLOC_CLASS_MASK; | ||||
/* | /* | ||||
* The page daemon is allowed to dig deeper into the free page list. | * The page daemon is allowed to dig deeper into the free page list. | ||||
*/ | */ | ||||
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | ||||
req_class = VM_ALLOC_SYSTEM; | req_class = VM_ALLOC_SYSTEM; | ||||
/* | /* | ||||
* Do not allocate reserved pages unless the req has asked for it. | * Do not allocate reserved pages unless the req has asked for it. | ||||
*/ | */ | ||||
again: | again: | ||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || | if (vm_cnt.v_free_count > vm_cnt.v_free_reserved || | ||||
(req_class == VM_ALLOC_SYSTEM && | (req_class == VM_ALLOC_SYSTEM && | ||||
vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || | vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) || | ||||
(req_class == VM_ALLOC_INTERRUPT && | (req_class == VM_ALLOC_INTERRUPT && | ||||
vm_cnt.v_free_count > 0)) { | vm_cnt.v_free_count > 0)) | ||||
m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); | m = vm_phys_alloc_freelist_pages(domain, flind, | ||||
} else { | VM_FREEPOOL_DIRECT, 0); | ||||
if (m == NULL) { | |||||
if (vm_page_alloc_fail(NULL, req)) | if (vm_page_alloc_fail(NULL, req)) | ||||
goto again; | goto again; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if (m == NULL) { | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
return (NULL); | |||||
} | |||||
free_count = vm_phys_freecnt_adj(m, -1); | free_count = vm_phys_freecnt_adj(m, -1); | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
vm_page_alloc_check(m); | vm_page_alloc_check(m); | ||||
/* | /* | ||||
* Initialize the page. Only the PG_ZERO flag is inherited. | * Initialize the page. Only the PG_ZERO flag is inherited. | ||||
*/ | */ | ||||
m->aflags = 0; | m->aflags = 0; | ||||
▲ Show 20 Lines • Show All 629 Lines • ▼ Show 20 Lines | |||||
struct vm_pagequeue * | struct vm_pagequeue * | ||||
vm_page_pagequeue(vm_page_t m) | vm_page_pagequeue(vm_page_t m) | ||||
{ | { | ||||
if (vm_page_in_laundry(m)) | if (vm_page_in_laundry(m)) | ||||
return (&vm_dom[0].vmd_pagequeues[m->queue]); | return (&vm_dom[0].vmd_pagequeues[m->queue]); | ||||
else | else | ||||
return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]); | return (&vm_page_domain(m)->vmd_pagequeues[m->queue]); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_dequeue: | * vm_page_dequeue: | ||||
* | * | ||||
* Remove the given page from its current page queue. | * Remove the given page from its current page queue. | ||||
* | * | ||||
* The page must be locked. | * The page must be locked. | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | vm_page_enqueue(uint8_t queue, vm_page_t m) | ||||
vm_page_lock_assert(m, MA_OWNED); | vm_page_lock_assert(m, MA_OWNED); | ||||
KASSERT(queue < PQ_COUNT, | KASSERT(queue < PQ_COUNT, | ||||
("vm_page_enqueue: invalid queue %u request for page %p", | ("vm_page_enqueue: invalid queue %u request for page %p", | ||||
queue, m)); | queue, m)); | ||||
if (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE) | if (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE) | ||||
pq = &vm_dom[0].vmd_pagequeues[queue]; | pq = &vm_dom[0].vmd_pagequeues[queue]; | ||||
else | else | ||||
pq = &vm_phys_domain(m)->vmd_pagequeues[queue]; | pq = &vm_page_domain(m)->vmd_pagequeues[queue]; | ||||
vm_pagequeue_lock(pq); | vm_pagequeue_lock(pq); | ||||
m->queue = queue; | m->queue = queue; | ||||
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); | TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); | ||||
vm_pagequeue_cnt_inc(pq); | vm_pagequeue_cnt_inc(pq); | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 336 Lines • ▼ Show 20 Lines | _vm_page_deactivate(vm_page_t m, boolean_t noreuse) | ||||
/* | /* | ||||
* Ignore if the page is already inactive, unless it is unlikely to be | * Ignore if the page is already inactive, unless it is unlikely to be | ||||
* reactivated. | * reactivated. | ||||
*/ | */ | ||||
if ((queue = m->queue) == PQ_INACTIVE && !noreuse) | if ((queue = m->queue) == PQ_INACTIVE && !noreuse) | ||||
return; | return; | ||||
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { | if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { | ||||
pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; | pq = &vm_page_domain(m)->vmd_pagequeues[PQ_INACTIVE]; | ||||
/* Avoid multiple acquisitions of the inactive queue lock. */ | /* Avoid multiple acquisitions of the inactive queue lock. */ | ||||
if (queue == PQ_INACTIVE) { | if (queue == PQ_INACTIVE) { | ||||
vm_pagequeue_lock(pq); | vm_pagequeue_lock(pq); | ||||
vm_page_dequeue_locked(m); | vm_page_dequeue_locked(m); | ||||
} else { | } else { | ||||
if (queue != PQ_NONE) | if (queue != PQ_NONE) | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
vm_pagequeue_lock(pq); | vm_pagequeue_lock(pq); | ||||
} | } | ||||
m->queue = PQ_INACTIVE; | m->queue = PQ_INACTIVE; | ||||
if (noreuse) | if (noreuse) | ||||
TAILQ_INSERT_BEFORE(&vm_phys_domain(m)->vmd_inacthead, | TAILQ_INSERT_BEFORE(&vm_page_domain(m)->vmd_inacthead, | ||||
m, plinks.q); | m, plinks.q); | ||||
else | else | ||||
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); | TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); | ||||
vm_pagequeue_cnt_inc(pq); | vm_pagequeue_cnt_inc(pq); | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 821 Lines • Show Last 20 Lines |
Perhaps these two lines can be kept as is, without reformatting.