Changeset View
Changeset View
Standalone View
Standalone View
vm/vm_page.c
Context not available. | |||||
static uma_zone_t fakepg_zone; | static uma_zone_t fakepg_zone; | ||||
static struct vnode *vm_page_alloc_init(vm_page_t m); | static void vm_page_alloc_check(vm_page_t m); | ||||
static void vm_page_cache_turn_free(vm_page_t m); | |||||
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | ||||
static void vm_page_enqueue(uint8_t queue, vm_page_t m); | static void vm_page_enqueue(uint8_t queue, vm_page_t m); | ||||
static void vm_page_free_wakeup(void); | static void vm_page_free_wakeup(void); | ||||
Context not available. | |||||
vm_page_dirty_KBI(vm_page_t m) | vm_page_dirty_KBI(vm_page_t m) | ||||
{ | { | ||||
/* These assertions refer to this operation by its public name. */ | /* Refer to this operation by its public name. */ | ||||
KASSERT((m->flags & PG_CACHED) == 0, | |||||
("vm_page_dirty: page in cache!")); | |||||
KASSERT(m->valid == VM_PAGE_BITS_ALL, | KASSERT(m->valid == VM_PAGE_BITS_ALL, | ||||
("vm_page_dirty: page is invalid!")); | ("vm_page_dirty: page is invalid!")); | ||||
m->dirty = VM_PAGE_BITS_ALL; | m->dirty = VM_PAGE_BITS_ALL; | ||||
Context not available. | |||||
} | } | ||||
/* | /* | ||||
* Convert all of the given object's cached pages that have a | |||||
* pindex within the given range into free pages. If the value | |||||
* zero is given for "end", then the range's upper bound is | |||||
* infinity. If the given object is backed by a vnode and it | |||||
* transitions from having one or more cached pages to none, the | |||||
* vnode's hold count is reduced. | |||||
*/ | |||||
void | |||||
vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) | |||||
{ | |||||
vm_page_t m; | |||||
boolean_t empty; | |||||
mtx_lock(&vm_page_queue_free_mtx); | |||||
if (__predict_false(vm_radix_is_empty(&object->cache))) { | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
return; | |||||
} | |||||
while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) { | |||||
if (end != 0 && m->pindex >= end) | |||||
break; | |||||
vm_radix_remove(&object->cache, m->pindex); | |||||
vm_page_cache_turn_free(m); | |||||
} | |||||
empty = vm_radix_is_empty(&object->cache); | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
if (object->type == OBJT_VNODE && empty) | |||||
vdrop(object->handle); | |||||
} | |||||
/* | |||||
* Returns the cached page that is associated with the given | |||||
* object and offset. If, however, none exists, returns NULL. | |||||
* | |||||
* The free page queue must be locked. | |||||
*/ | |||||
static inline vm_page_t | |||||
vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) | |||||
{ | |||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | |||||
return (vm_radix_lookup(&object->cache, pindex)); | |||||
} | |||||
/* | |||||
* Remove the given cached page from its containing object's | |||||
* collection of cached pages. | |||||
* | |||||
* The free page queue must be locked. | |||||
*/ | |||||
static void | |||||
vm_page_cache_remove(vm_page_t m) | |||||
{ | |||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | |||||
KASSERT((m->flags & PG_CACHED) != 0, | |||||
("vm_page_cache_remove: page %p is not cached", m)); | |||||
vm_radix_remove(&m->object->cache, m->pindex); | |||||
m->object = NULL; | |||||
vm_cnt.v_cache_count--; | |||||
} | |||||
/* | |||||
* Transfer all of the cached pages with offset greater than or | |||||
* equal to 'offidxstart' from the original object's cache to the | |||||
* new object's cache. However, any cached pages with offset | |||||
* greater than or equal to the new object's size are kept in the | |||||
* original object. Initially, the new object's cache must be | |||||
* empty. Offset 'offidxstart' in the original object must | |||||
* correspond to offset zero in the new object. | |||||
* | |||||
* The new object must be locked. | |||||
*/ | |||||
void | |||||
vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, | |||||
vm_object_t new_object) | |||||
{ | |||||
vm_page_t m; | |||||
/* | |||||
* Insertion into an object's collection of cached pages | |||||
* requires the object to be locked. In contrast, removal does | |||||
* not. | |||||
*/ | |||||
VM_OBJECT_ASSERT_WLOCKED(new_object); | |||||
KASSERT(vm_radix_is_empty(&new_object->cache), | |||||
("vm_page_cache_transfer: object %p has cached pages", | |||||
new_object)); | |||||
mtx_lock(&vm_page_queue_free_mtx); | |||||
while ((m = vm_radix_lookup_ge(&orig_object->cache, | |||||
offidxstart)) != NULL) { | |||||
/* | |||||
* Transfer all of the pages with offset greater than or | |||||
* equal to 'offidxstart' from the original object's | |||||
* cache to the new object's cache. | |||||
*/ | |||||
if ((m->pindex - offidxstart) >= new_object->size) | |||||
break; | |||||
vm_radix_remove(&orig_object->cache, m->pindex); | |||||
/* Update the page's object and offset. */ | |||||
m->object = new_object; | |||||
m->pindex -= offidxstart; | |||||
if (vm_radix_insert(&new_object->cache, m)) | |||||
vm_page_cache_turn_free(m); | |||||
} | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
} | |||||
/* | |||||
* Returns TRUE if a cached page is associated with the given object and | |||||
* offset, and FALSE otherwise. | |||||
* | |||||
* The object must be locked. | |||||
*/ | |||||
boolean_t | |||||
vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) | |||||
{ | |||||
vm_page_t m; | |||||
/* | |||||
* Insertion into an object's collection of cached pages requires the | |||||
* object to be locked. Therefore, if the object is locked and the | |||||
* object's collection is empty, there is no need to acquire the free | |||||
* page queues lock in order to prove that the specified page doesn't | |||||
* exist. | |||||
*/ | |||||
VM_OBJECT_ASSERT_WLOCKED(object); | |||||
if (__predict_true(vm_object_cache_is_empty(object))) | |||||
return (FALSE); | |||||
mtx_lock(&vm_page_queue_free_mtx); | |||||
m = vm_page_cache_lookup(object, pindex); | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
return (m != NULL); | |||||
} | |||||
/* | |||||
* vm_page_alloc: | * vm_page_alloc: | ||||
* | * | ||||
* Allocate and return a page that is associated with the specified | * Allocate and return a page that is associated with the specified | ||||
Context not available. | |||||
* optional allocation flags: | * optional allocation flags: | ||||
* VM_ALLOC_COUNT(number) the number of additional pages that the caller | * VM_ALLOC_COUNT(number) the number of additional pages that the caller | ||||
* intends to allocate | * intends to allocate | ||||
* VM_ALLOC_IFCACHED return page only if it is cached | |||||
* VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page | |||||
* is cached | |||||
* VM_ALLOC_NOBUSY do not exclusive busy the page | * VM_ALLOC_NOBUSY do not exclusive busy the page | ||||
* VM_ALLOC_NODUMP do not include the page in a kernel core dump | * VM_ALLOC_NODUMP do not include the page in a kernel core dump | ||||
* VM_ALLOC_NOOBJ page is not associated with an object and | * VM_ALLOC_NOOBJ page is not associated with an object and | ||||
Context not available. | |||||
vm_page_t | vm_page_t | ||||
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | ||||
{ | { | ||||
struct vnode *vp = NULL; | |||||
vm_object_t m_object; | |||||
vm_page_t m, mpred; | vm_page_t m, mpred; | ||||
int flags, req_class; | int flags, req_class; | ||||
Context not available. | |||||
* Allocate from the free queue if the number of free pages | * Allocate from the free queue if the number of free pages | ||||
* exceeds the minimum for the request class. | * exceeds the minimum for the request class. | ||||
*/ | */ | ||||
if (object != NULL && | |||||
(m = vm_page_cache_lookup(object, pindex)) != NULL) { | |||||
if ((req & VM_ALLOC_IFNOTCACHED) != 0) { | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
return (NULL); | |||||
} | |||||
if (vm_phys_unfree_page(m)) | |||||
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); | |||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
else if (!vm_reserv_reactivate_page(m)) | if (object == NULL || (object->flags & (OBJ_COLORED | | ||||
#else | |||||
else | |||||
#endif | |||||
panic("vm_page_alloc: cache page %p is missing" | |||||
" from the free queue", m); | |||||
} else if ((req & VM_ALLOC_IFCACHED) != 0) { | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
return (NULL); | |||||
#if VM_NRESERVLEVEL > 0 | |||||
} else if (object == NULL || (object->flags & (OBJ_COLORED | | |||||
OBJ_FICTITIOUS)) != OBJ_COLORED || (m = | OBJ_FICTITIOUS)) != OBJ_COLORED || (m = | ||||
vm_reserv_alloc_page(object, pindex, mpred)) == NULL) { | vm_reserv_alloc_page(object, pindex, mpred)) == NULL) | ||||
#else | |||||
} else { | |||||
#endif | #endif | ||||
{ | |||||
m = vm_phys_alloc_pages(object != NULL ? | m = vm_phys_alloc_pages(object != NULL ? | ||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); | VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
Context not available. | |||||
* At this point we had better have found a good page. | * At this point we had better have found a good page. | ||||
*/ | */ | ||||
KASSERT(m != NULL, ("vm_page_alloc: missing page")); | KASSERT(m != NULL, ("vm_page_alloc: missing page")); | ||||
KASSERT(m->queue == PQ_NONE, | vm_phys_freecnt_adj(m, -1); | ||||
("vm_page_alloc: page %p has unexpected queue %d", m, m->queue)); | |||||
KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m)); | |||||
KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); | |||||
KASSERT(!vm_page_busied(m), ("vm_page_alloc: page %p is busy", m)); | |||||
KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); | |||||
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | |||||
("vm_page_alloc: page %p has unexpected memattr %d", m, | |||||
pmap_page_get_memattr(m))); | |||||
if ((m->flags & PG_CACHED) != 0) { | |||||
KASSERT((m->flags & PG_ZERO) == 0, | |||||
("vm_page_alloc: cached page %p is PG_ZERO", m)); | |||||
KASSERT(m->valid != 0, | |||||
("vm_page_alloc: cached page %p is invalid", m)); | |||||
if (m->object != object || m->pindex != pindex) | |||||
m->valid = 0; | |||||
m_object = m->object; | |||||
vm_page_cache_remove(m); | |||||
if (m_object->type == OBJT_VNODE && | |||||
vm_object_cache_is_empty(m_object)) | |||||
vp = m_object->handle; | |||||
} else { | |||||
KASSERT(m->valid == 0, | |||||
("vm_page_alloc: free page %p is valid", m)); | |||||
vm_phys_freecnt_adj(m, -1); | |||||
} | |||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
vm_page_alloc_check(m); | |||||
/* | /* | ||||
* Initialize the page. Only the PG_ZERO flag is inherited. | * Initialize the page. Only the PG_ZERO flag is inherited. | ||||
Context not available. | |||||
if (object != NULL) { | if (object != NULL) { | ||||
if (vm_page_insert_after(m, object, pindex, mpred)) { | if (vm_page_insert_after(m, object, pindex, mpred)) { | ||||
/* See the comment below about hold count. */ | |||||
if (vp != NULL) | |||||
vdrop(vp); | |||||
pagedaemon_wakeup(); | pagedaemon_wakeup(); | ||||
if (req & VM_ALLOC_WIRED) { | if (req & VM_ALLOC_WIRED) { | ||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1); | atomic_subtract_int(&vm_cnt.v_wire_count, 1); | ||||
Context not available. | |||||
m->pindex = pindex; | m->pindex = pindex; | ||||
/* | /* | ||||
* The following call to vdrop() must come after the above call | |||||
* to vm_page_insert() in case both affect the same object and | |||||
* vnode. Otherwise, the affected vnode's hold count could | |||||
* temporarily become zero. | |||||
*/ | |||||
if (vp != NULL) | |||||
vdrop(vp); | |||||
/* | |||||
* Don't wakeup too often - wakeup the pageout daemon when | * Don't wakeup too often - wakeup the pageout daemon when | ||||
* we would be nearly out of memory. | * we would be nearly out of memory. | ||||
*/ | */ | ||||
Context not available. | |||||
return (m); | return (m); | ||||
} | } | ||||
static void | |||||
vm_page_alloc_contig_vdrop(struct spglist *lst) | |||||
{ | |||||
while (!SLIST_EMPTY(lst)) { | |||||
vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv); | |||||
SLIST_REMOVE_HEAD(lst, plinks.s.ss); | |||||
} | |||||
} | |||||
/* | /* | ||||
* vm_page_alloc_contig: | * vm_page_alloc_contig: | ||||
* | * | ||||
Context not available. | |||||
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, | u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, | ||||
vm_paddr_t boundary, vm_memattr_t memattr) | vm_paddr_t boundary, vm_memattr_t memattr) | ||||
{ | { | ||||
struct vnode *drop; | |||||
struct spglist deferred_vdrop_list; | |||||
vm_page_t m, m_tmp, m_ret; | vm_page_t m, m_tmp, m_ret; | ||||
u_int flags; | u_int flags; | ||||
int req_class; | int req_class; | ||||
Context not available. | |||||
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | ||||
req_class = VM_ALLOC_SYSTEM; | req_class = VM_ALLOC_SYSTEM; | ||||
SLIST_INIT(&deferred_vdrop_list); | |||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages + | if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages + | ||||
vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && | vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && | ||||
Context not available. | |||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if (m_ret != NULL) | if (m_ret != NULL) | ||||
for (m = m_ret; m < &m_ret[npages]; m++) { | vm_phys_freecnt_adj(m_ret, -npages); | ||||
drop = vm_page_alloc_init(m); | |||||
if (drop != NULL) { | |||||
/* | |||||
* Enqueue the vnode for deferred vdrop(). | |||||
*/ | |||||
m->plinks.s.pv = drop; | |||||
SLIST_INSERT_HEAD(&deferred_vdrop_list, m, | |||||
plinks.s.ss); | |||||
} | |||||
} | |||||
else { | else { | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (vm_reserv_reclaim_contig(npages, low, high, alignment, | if (vm_reserv_reclaim_contig(npages, low, high, alignment, | ||||
Context not available. | |||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
if (m_ret == NULL) | if (m_ret == NULL) | ||||
return (NULL); | return (NULL); | ||||
for (m = m_ret; m < &m_ret[npages]; m++) | |||||
vm_page_alloc_check(m); | |||||
/* | /* | ||||
* Initialize the pages. Only the PG_ZERO flag is inherited. | * Initialize the pages. Only the PG_ZERO flag is inherited. | ||||
Context not available. | |||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
if (object != NULL) { | if (object != NULL) { | ||||
if (vm_page_insert(m, object, pindex)) { | if (vm_page_insert(m, object, pindex)) { | ||||
vm_page_alloc_contig_vdrop( | |||||
&deferred_vdrop_list); | |||||
if (vm_paging_needed()) | if (vm_paging_needed()) | ||||
pagedaemon_wakeup(); | pagedaemon_wakeup(); | ||||
if ((req & VM_ALLOC_WIRED) != 0) | if ((req & VM_ALLOC_WIRED) != 0) | ||||
Context not available. | |||||
pmap_page_set_memattr(m, memattr); | pmap_page_set_memattr(m, memattr); | ||||
pindex++; | pindex++; | ||||
} | } | ||||
vm_page_alloc_contig_vdrop(&deferred_vdrop_list); | |||||
if (vm_paging_needed()) | if (vm_paging_needed()) | ||||
pagedaemon_wakeup(); | pagedaemon_wakeup(); | ||||
return (m_ret); | return (m_ret); | ||||
Context not available. | |||||
} | } | ||||
kib: The description 'initialize' is somewhat weird for the remnants of this function. It only… | |||||
Not Done Inline ActionsI'd like to propose some refactoring: Move the vm_phys_freecnt_adj() back to the callers, leaving only the KASSERT()s in this function. (This is actually an optimization for vm_page_alloc_contig().) Rename the function to vm_page_alloc_check(), and move the calls to a point after the free queue lock is released. alc: I'd like to propose some refactoring: Move the vm_phys_freecnt_adj() back to the callers… | |||||
/* | /* | ||||
* Initialize a page that has been freshly dequeued from a freelist. | * Check a page that has been freshly dequeued from a freelist. | ||||
* The caller has to drop the vnode returned, if it is not NULL. | |||||
* | |||||
* This function may only be used to initialize unmanaged pages. | |||||
* | |||||
* To be called with vm_page_queue_free_mtx held. | |||||
*/ | */ | ||||
static struct vnode * | static void | ||||
vm_page_alloc_init(vm_page_t m) | vm_page_alloc_check(vm_page_t m) | ||||
{ | { | ||||
struct vnode *drop; | |||||
vm_object_t m_object; | |||||
KASSERT(m->queue == PQ_NONE, | KASSERT(m->queue == PQ_NONE, | ||||
("vm_page_alloc_init: page %p has unexpected queue %d", | ("page %p has unexpected queue %d", m, m->queue)); | ||||
m, m->queue)); | KASSERT(m->wire_count == 0, ("page %p is wired", m)); | ||||
KASSERT(m->wire_count == 0, | KASSERT(m->hold_count == 0, ("page %p is held", m)); | ||||
("vm_page_alloc_init: page %p is wired", m)); | KASSERT(!vm_page_busied(m), ("page %p is busy", m)); | ||||
KASSERT(m->hold_count == 0, | KASSERT(m->dirty == 0, ("page %p is dirty", m)); | ||||
("vm_page_alloc_init: page %p is held", m)); | |||||
KASSERT(!vm_page_busied(m), | |||||
("vm_page_alloc_init: page %p is busy", m)); | |||||
KASSERT(m->dirty == 0, | |||||
("vm_page_alloc_init: page %p is dirty", m)); | |||||
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | ||||
("vm_page_alloc_init: page %p has unexpected memattr %d", | ("page %p has unexpected memattr %d", | ||||
m, pmap_page_get_memattr(m))); | m, pmap_page_get_memattr(m))); | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | KASSERT(m->valid == 0, ("free page %p is valid", m)); | ||||
drop = NULL; | |||||
if ((m->flags & PG_CACHED) != 0) { | |||||
KASSERT((m->flags & PG_ZERO) == 0, | |||||
("vm_page_alloc_init: cached page %p is PG_ZERO", m)); | |||||
m->valid = 0; | |||||
m_object = m->object; | |||||
vm_page_cache_remove(m); | |||||
if (m_object->type == OBJT_VNODE && | |||||
vm_object_cache_is_empty(m_object)) | |||||
drop = m_object->handle; | |||||
} else { | |||||
KASSERT(m->valid == 0, | |||||
("vm_page_alloc_init: free page %p is valid", m)); | |||||
vm_phys_freecnt_adj(m, -1); | |||||
} | |||||
return (drop); | |||||
} | } | ||||
/* | /* | ||||
Context not available. | |||||
vm_page_t | vm_page_t | ||||
vm_page_alloc_freelist(int flind, int req) | vm_page_alloc_freelist(int flind, int req) | ||||
{ | { | ||||
struct vnode *drop; | |||||
vm_page_t m; | vm_page_t m; | ||||
u_int flags; | u_int flags; | ||||
int req_class; | int req_class; | ||||
Context not available. | |||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
drop = vm_page_alloc_init(m); | vm_phys_freecnt_adj(m, -1); | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
vm_page_alloc_check(m); | |||||
/* | /* | ||||
* Initialize the page. Only the PG_ZERO flag is inherited. | * Initialize the page. Only the PG_ZERO flag is inherited. | ||||
Context not available. | |||||
} | } | ||||
/* Unmanaged pages don't use "act_count". */ | /* Unmanaged pages don't use "act_count". */ | ||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
if (drop != NULL) | |||||
vdrop(drop); | |||||
if (vm_paging_needed()) | if (vm_paging_needed()) | ||||
pagedaemon_wakeup(); | pagedaemon_wakeup(); | ||||
return (m); | return (m); | ||||
Context not available. | |||||
/* Don't care: PG_NODUMP, PG_ZERO. */ | /* Don't care: PG_NODUMP, PG_ZERO. */ | ||||
if (object->type != OBJT_DEFAULT && | if (object->type != OBJT_DEFAULT && | ||||
object->type != OBJT_SWAP && | object->type != OBJT_SWAP && | ||||
object->type != OBJT_VNODE) | object->type != OBJT_VNODE) { | ||||
run_ext = 0; | run_ext = 0; | ||||
else if ((m->flags & PG_CACHED) != 0 || | |||||
m != vm_page_lookup(object, m->pindex)) { | |||||
/* | |||||
* The page is cached or recently converted | |||||
* from cached to free. | |||||
*/ | |||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (level >= 0) { | |||||
/* | |||||
* The page is reserved. Extend the | |||||
* current run by one page. | |||||
*/ | |||||
run_ext = 1; | |||||
} else | |||||
#endif | |||||
if ((order = m->order) < VM_NFREEORDER) { | |||||
/* | |||||
* The page is enqueued in the | |||||
* physical memory allocator's cache/ | |||||
* free page queues. Moreover, it is | |||||
* the first page in a power-of-two- | |||||
* sized run of contiguous cache/free | |||||
* pages. Add these pages to the end | |||||
* of the current run, and jump | |||||
* ahead. | |||||
*/ | |||||
run_ext = 1 << order; | |||||
m_inc = 1 << order; | |||||
} else | |||||
run_ext = 0; | |||||
#if VM_NRESERVLEVEL > 0 | |||||
} else if ((options & VPSC_NOSUPER) != 0 && | } else if ((options & VPSC_NOSUPER) != 0 && | ||||
(level = vm_reserv_level_iffullpop(m)) >= 0) { | (level = vm_reserv_level_iffullpop(m)) >= 0) { | ||||
run_ext = 0; | run_ext = 0; | ||||
Context not available. | |||||
object->type != OBJT_SWAP && | object->type != OBJT_SWAP && | ||||
object->type != OBJT_VNODE) | object->type != OBJT_VNODE) | ||||
error = EINVAL; | error = EINVAL; | ||||
else if ((m->flags & PG_CACHED) != 0 || | else if (object->memattr != VM_MEMATTR_DEFAULT) | ||||
m != vm_page_lookup(object, m->pindex)) { | |||||
/* | |||||
* The page is cached or recently converted | |||||
* from cached to free. | |||||
*/ | |||||
VM_OBJECT_WUNLOCK(object); | |||||
goto cached; | |||||
} else if (object->memattr != VM_MEMATTR_DEFAULT) | |||||
error = EINVAL; | error = EINVAL; | ||||
else if (m->queue != PQ_NONE && !vm_page_busied(m)) { | else if (m->queue != PQ_NONE && !vm_page_busied(m)) { | ||||
KASSERT(pmap_page_get_memattr(m) == | KASSERT(pmap_page_get_memattr(m) == | ||||
Context not available. | |||||
unlock: | unlock: | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
} else { | } else { | ||||
cached: | |||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
order = m->order; | order = m->order; | ||||
if (order < VM_NFREEORDER) { | if (order < VM_NFREEORDER) { | ||||
Context not available. | |||||
} | } | ||||
/* | /* | ||||
* Turn a cached page into a free page, by changing its attributes. | |||||
* Keep the statistics up-to-date. | |||||
* | |||||
* The free page queue must be locked. | |||||
*/ | |||||
static void | |||||
vm_page_cache_turn_free(vm_page_t m) | |||||
{ | |||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | |||||
m->object = NULL; | |||||
m->valid = 0; | |||||
KASSERT((m->flags & PG_CACHED) != 0, | |||||
("vm_page_cache_turn_free: page %p is not cached", m)); | |||||
m->flags &= ~PG_CACHED; | |||||
vm_cnt.v_cache_count--; | |||||
vm_phys_freecnt_adj(m, 1); | |||||
} | |||||
/* | |||||
* vm_page_free_toq: | * vm_page_free_toq: | ||||
* | * | ||||
* Returns the given page to the free list, | * Returns the given page to the free list, | ||||
Context not available. | |||||
VM_WAIT; | VM_WAIT; | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
goto retrylookup; | goto retrylookup; | ||||
} else if (m->valid != 0) | } | ||||
return (m); | |||||
if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) | if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(m); | pmap_zero_page(m); | ||||
return (m); | return (m); | ||||
Context not available. |
The description 'initialize' is somewhat weird for the remnants of this function. It only manages bookkeeping for the phys allocator, effectively.