Changeset View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 155 Lines • ▼ Show 20 Lines | |||||
static uma_zone_t fakepg_zone; | static uma_zone_t fakepg_zone; | ||||
static void vm_page_alloc_check(vm_page_t m); | static void vm_page_alloc_check(vm_page_t m); | ||||
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | ||||
static void vm_page_dequeue_complete(vm_page_t m); | static void vm_page_dequeue_complete(vm_page_t m); | ||||
static void vm_page_enqueue(vm_page_t m, uint8_t queue); | static void vm_page_enqueue(vm_page_t m, uint8_t queue); | ||||
static void vm_page_init(void *dummy); | static void vm_page_init(void *dummy); | ||||
static int vm_page_insert_after(vm_page_t m, vm_object_t object, | static int vm_page_insert_after(vm_page_t m, vm_object_t object, | ||||
vm_pindex_t pindex, vm_page_t mpred); | vm_pindex_t pindex, vm_page_t mpred, const bool alloc); | ||||
static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, | static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, | ||||
vm_page_t mpred); | vm_page_t mpred); | ||||
static void vm_page_mvqueue(vm_page_t m, int queue); | |||||
static int vm_page_reclaim_run(int req_class, int domain, u_long npages, | static int vm_page_reclaim_run(int req_class, int domain, u_long npages, | ||||
vm_page_t m_run, vm_paddr_t high); | vm_page_t m_run, vm_paddr_t high); | ||||
static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, | static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, | ||||
int req); | int req); | ||||
static int vm_page_import(void *arg, void **store, int cnt, int domain, | static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, | ||||
int flags); | int flags); | ||||
static void vm_page_release(void *arg, void **store, int cnt); | static void vm_page_zone_release(void *arg, void **store, int cnt); | ||||
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); | SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL); | ||||
static void | static void | ||||
vm_page_init(void *dummy) | vm_page_init(void *dummy) | ||||
{ | { | ||||
fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, | fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL, | ||||
Show All 17 Lines | for (i = 0; i < vm_ndomains; i++) { | ||||
/* | /* | ||||
* Don't allow the page cache to take up more than .25% of | * Don't allow the page cache to take up more than .25% of | ||||
* memory. | * memory. | ||||
*/ | */ | ||||
if (vmd->vmd_page_count / 400 < 256 * mp_ncpus) | if (vmd->vmd_page_count / 400 < 256 * mp_ncpus) | ||||
continue; | continue; | ||||
vmd->vmd_pgcache = uma_zcache_create("vm pgcache", | vmd->vmd_pgcache = uma_zcache_create("vm pgcache", | ||||
sizeof(struct vm_page), NULL, NULL, NULL, NULL, | sizeof(struct vm_page), NULL, NULL, NULL, NULL, | ||||
vm_page_import, vm_page_release, vmd, | vm_page_zone_import, vm_page_zone_release, vmd, | ||||
UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); | UMA_ZONE_MAXBUCKET | UMA_ZONE_VM); | ||||
(void )uma_zone_set_maxcache(vmd->vmd_pgcache, 0); | (void )uma_zone_set_maxcache(vmd->vmd_pgcache, 0); | ||||
} | } | ||||
} | } | ||||
SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); | SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL); | ||||
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ | /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ | ||||
#if PAGE_SIZE == 32768 | #if PAGE_SIZE == 32768 | ||||
▲ Show 20 Lines • Show All 281 Lines • ▼ Show 20 Lines | |||||
* Initialize a physical page in preparation for adding it to the free | * Initialize a physical page in preparation for adding it to the free | ||||
* lists. | * lists. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) | vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind) | ||||
{ | { | ||||
m->object = NULL; | m->object = NULL; | ||||
m->wire_count = 0; | m->ref_count = 0; | ||||
m->busy_lock = VPB_UNBUSIED; | m->busy_lock = VPB_UNBUSIED; | ||||
m->flags = m->aflags = 0; | m->flags = m->aflags = 0; | ||||
m->phys_addr = pa; | m->phys_addr = pa; | ||||
m->queue = PQ_NONE; | m->queue = PQ_NONE; | ||||
m->psind = 0; | m->psind = 0; | ||||
m->segind = segind; | m->segind = segind; | ||||
m->order = VM_NFREEORDER; | m->order = VM_NFREEORDER; | ||||
m->pool = VM_FREEPOOL_DEFAULT; | m->pool = VM_FREEPOOL_DEFAULT; | ||||
▲ Show 20 Lines • Show All 579 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
vm_page_unhold_pages(vm_page_t *ma, int count) | vm_page_unhold_pages(vm_page_t *ma, int count) | ||||
{ | { | ||||
struct mtx *mtx; | struct mtx *mtx; | ||||
mtx = NULL; | mtx = NULL; | ||||
for (; count != 0; count--) { | for (; count != 0; count--) { | ||||
vm_page_change_lock(*ma, &mtx); | vm_page_change_lock(*ma, &mtx); | ||||
if (vm_page_unwire(*ma, PQ_ACTIVE) && (*ma)->object == NULL) | vm_page_unwire(*ma, PQ_ACTIVE); | ||||
vm_page_free(*ma); | |||||
ma++; | ma++; | ||||
} | } | ||||
if (mtx != NULL) | if (mtx != NULL) | ||||
mtx_unlock(mtx); | mtx_unlock(mtx); | ||||
} | } | ||||
vm_page_t | vm_page_t | ||||
PHYS_TO_VM_PAGE(vm_paddr_t pa) | PHYS_TO_VM_PAGE(vm_paddr_t pa) | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) | ||||
} | } | ||||
m->phys_addr = paddr; | m->phys_addr = paddr; | ||||
m->queue = PQ_NONE; | m->queue = PQ_NONE; | ||||
/* Fictitious pages don't use "segind". */ | /* Fictitious pages don't use "segind". */ | ||||
m->flags = PG_FICTITIOUS; | m->flags = PG_FICTITIOUS; | ||||
/* Fictitious pages don't use "order" or "pool". */ | /* Fictitious pages don't use "order" or "pool". */ | ||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
m->busy_lock = VPB_SINGLE_EXCLUSIVER; | m->busy_lock = VPB_SINGLE_EXCLUSIVER; | ||||
m->wire_count = 1; | /* Fictitious pages are unevictable. */ | ||||
m->ref_count = 1; | |||||
pmap_page_init(m); | pmap_page_init(m); | ||||
memattr: | memattr: | ||||
pmap_page_set_memattr(m, memattr); | pmap_page_set_memattr(m, memattr); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_putfake: | * vm_page_putfake: | ||||
* | * | ||||
▲ Show 20 Lines • Show All 143 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
int | int | ||||
vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) | vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
vm_page_t mpred; | vm_page_t mpred; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
mpred = vm_radix_lookup_le(&object->rtree, pindex); | mpred = vm_radix_lookup_le(&object->rtree, pindex); | ||||
return (vm_page_insert_after(m, object, pindex, mpred)); | return (vm_page_insert_after(m, object, pindex, mpred, false)); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_insert_after: | * vm_page_insert_after: | ||||
* | * | ||||
* Inserts the page "m" into the specified object at offset "pindex". | * Inserts the page "m" into the specified object at offset "pindex". | ||||
* | * | ||||
* The page "mpred" must immediately precede the offset "pindex" within | * The page "mpred" must immediately precede the offset "pindex" within | ||||
* the specified object. | * the specified object. | ||||
* | * | ||||
* "alloc" should be true if the page is being allocated and false | |||||
* otherwise. | |||||
* | |||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
static int | static int | ||||
vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, | vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, | ||||
vm_page_t mpred) | vm_page_t mpred, const bool alloc) | ||||
jeff: I assume the comment is not intentional? | |||||
{ | { | ||||
vm_page_t msucc; | vm_page_t msucc; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT(m->object == NULL, | KASSERT(m->object == NULL, | ||||
("vm_page_insert_after: page already inserted")); | ("vm_page_insert_after: page already inserted")); | ||||
if (mpred != NULL) { | if (mpred != NULL) { | ||||
KASSERT(mpred->object == object, | KASSERT(mpred->object == object, | ||||
("vm_page_insert_after: object doesn't contain mpred")); | ("vm_page_insert_after: object doesn't contain mpred")); | ||||
KASSERT(mpred->pindex < pindex, | KASSERT(mpred->pindex < pindex, | ||||
("vm_page_insert_after: mpred doesn't precede pindex")); | ("vm_page_insert_after: mpred doesn't precede pindex")); | ||||
msucc = TAILQ_NEXT(mpred, listq); | msucc = TAILQ_NEXT(mpred, listq); | ||||
} else | } else | ||||
msucc = TAILQ_FIRST(&object->memq); | msucc = TAILQ_FIRST(&object->memq); | ||||
if (msucc != NULL) | if (msucc != NULL) | ||||
KASSERT(msucc->pindex > pindex, | KASSERT(msucc->pindex > pindex, | ||||
("vm_page_insert_after: msucc doesn't succeed pindex")); | ("vm_page_insert_after: msucc doesn't succeed pindex")); | ||||
/* | /* | ||||
* Record the object/offset pair in this page | * Record the object/offset pair in this page. | ||||
*/ | */ | ||||
m->object = object; | m->object = object; | ||||
m->pindex = pindex; | m->pindex = pindex; | ||||
if (alloc) | |||||
Not Done Inline ActionsMy _feel_ is that this op should be atomic. kib: My _feel_ is that this op should be atomic. | |||||
Done Inline ActionsThe most common case is page allocation, where it is preferable to avoid atomic ops (and they are unnecessary there). In other cases we are inserting an unmanaged page, and among existing callers I did not see any possible races. It would be straightforward to add a bool parameter to vm_page_insert_after() to differentiate between these cases, though. markj: The most common case is page allocation, where it is preferable to avoid atomic ops (and they… | |||||
m->ref_count |= VPRC_OBJREF; | |||||
else | |||||
atomic_set_int(&m->ref_count, VPRC_OBJREF); | |||||
/* | /* | ||||
* Now link into the object's ordered list of backed pages. | * Now link into the object's ordered list of backed pages. | ||||
*/ | */ | ||||
if (vm_radix_insert(&object->rtree, m)) { | if (vm_radix_insert(&object->rtree, m)) { | ||||
m->object = NULL; | m->object = NULL; | ||||
m->pindex = 0; | m->pindex = 0; | ||||
if (alloc) | |||||
m->ref_count &= ~VPRC_OBJREF; | |||||
else | |||||
atomic_clear_int(&m->ref_count, VPRC_OBJREF); | |||||
return (1); | return (1); | ||||
} | } | ||||
vm_page_insert_radixdone(m, object, mpred); | vm_page_insert_radixdone(m, object, mpred); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_insert_radixdone: | * vm_page_insert_radixdone: | ||||
* | * | ||||
* Complete page "m" insertion into the specified object after the | * Complete page "m" insertion into the specified object after the | ||||
* radix trie hooking. | * radix trie hooking. | ||||
* | * | ||||
* The page "mpred" must precede the offset "m->pindex" within the | * The page "mpred" must precede the offset "m->pindex" within the | ||||
* specified object. | * specified object. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) | vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) | ||||
{ | { | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT(object != NULL && m->object == object, | KASSERT(object != NULL && m->object == object, | ||||
("vm_page_insert_radixdone: page %p has inconsistent object", m)); | ("vm_page_insert_radixdone: page %p has inconsistent object", m)); | ||||
KASSERT((m->ref_count & VPRC_OBJREF) != 0, | |||||
("vm_page_insert_radixdone: page %p is missing object ref", m)); | |||||
if (mpred != NULL) { | if (mpred != NULL) { | ||||
KASSERT(mpred->object == object, | KASSERT(mpred->object == object, | ||||
("vm_page_insert_after: object doesn't contain mpred")); | ("vm_page_insert_radixdone: object doesn't contain mpred")); | ||||
KASSERT(mpred->pindex < m->pindex, | KASSERT(mpred->pindex < m->pindex, | ||||
("vm_page_insert_after: mpred doesn't precede pindex")); | ("vm_page_insert_radixdone: mpred doesn't precede pindex")); | ||||
} | } | ||||
if (mpred != NULL) | if (mpred != NULL) | ||||
TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); | TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq); | ||||
else | else | ||||
TAILQ_INSERT_HEAD(&object->memq, m, listq); | TAILQ_INSERT_HEAD(&object->memq, m, listq); | ||||
/* | /* | ||||
Show All 14 Lines | vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred) | ||||
if (pmap_page_is_write_mapped(m)) | if (pmap_page_is_write_mapped(m)) | ||||
vm_object_set_writeable_dirty(object); | vm_object_set_writeable_dirty(object); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_remove: | * vm_page_remove: | ||||
* | * | ||||
* Removes the specified page from its containing object, but does not | * Removes the specified page from its containing object, but does not | ||||
* invalidate any backing storage. | * invalidate any backing storage. Returns true if the object's reference | ||||
* was the last reference to the page, and false otherwise. | |||||
* | * | ||||
* The object must be locked. The page must be locked if it is managed. | * The object must be locked. | ||||
*/ | */ | ||||
void | bool | ||||
vm_page_remove(vm_page_t m) | vm_page_remove(vm_page_t m) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
vm_page_t mrem; | vm_page_t mrem; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | object = m->object; | ||||
vm_page_assert_locked(m); | |||||
if ((object = m->object) == NULL) | |||||
return; | |||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT((m->ref_count & VPRC_OBJREF) != 0, | |||||
("page %p is missing its object ref", m)); | |||||
if (vm_page_xbusied(m)) | if (vm_page_xbusied(m)) | ||||
vm_page_xunbusy_maybelocked(m); | vm_page_xunbusy_maybelocked(m); | ||||
mrem = vm_radix_remove(&object->rtree, m->pindex); | mrem = vm_radix_remove(&object->rtree, m->pindex); | ||||
KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | ||||
/* | /* | ||||
* Now remove from the object's list of backed pages. | * Now remove from the object's list of backed pages. | ||||
*/ | */ | ||||
TAILQ_REMOVE(&object->memq, m, listq); | TAILQ_REMOVE(&object->memq, m, listq); | ||||
/* | /* | ||||
* And show that the object has one fewer resident page. | * And show that the object has one fewer resident page. | ||||
*/ | */ | ||||
object->resident_page_count--; | object->resident_page_count--; | ||||
/* | /* | ||||
* The vnode may now be recycled. | * The vnode may now be recycled. | ||||
*/ | */ | ||||
if (object->resident_page_count == 0 && object->type == OBJT_VNODE) | if (object->resident_page_count == 0 && object->type == OBJT_VNODE) | ||||
vdrop(object->handle); | vdrop(object->handle); | ||||
/* | |||||
* Release the object reference. The caller may free the page | |||||
* after this point. | |||||
*/ | |||||
m->object = NULL; | m->object = NULL; | ||||
return (vm_page_drop(m, -VPRC_OBJREF) == VPRC_OBJREF); | |||||
} | } | ||||
Not Done Inline ActionsCan you assert that VPRC_BLOCKED is not returned ? kib: Can you assert that VPRC_BLOCKED is not returned ? | |||||
Done Inline ActionsYes, I will add it. markj: Yes, I will add it. | |||||
/* | /* | ||||
* vm_page_lookup: | * vm_page_lookup: | ||||
* | * | ||||
* Returns the page associated with the object/offset | * Returns the page associated with the object/offset | ||||
* pair specified; if none is found, NULL is returned. | * pair specified; if none is found, NULL is returned. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | if (prev->pindex != m->pindex - 1) | ||||
prev = NULL; | prev = NULL; | ||||
} | } | ||||
return (prev); | return (prev); | ||||
} | } | ||||
/* | /* | ||||
* Uses the page mnew as a replacement for an existing page at index | * Uses the page mnew as a replacement for an existing page at index | ||||
* pindex which must be already present in the object. | * pindex which must be already present in the object. | ||||
* | |||||
* The existing page must not be on a paging queue. | |||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) | vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
vm_page_t mold; | vm_page_t mold; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT(mnew->object == NULL, | KASSERT(mnew->object == NULL, | ||||
("vm_page_replace: page %p already in object", mnew)); | ("vm_page_replace: page %p already in object", mnew)); | ||||
KASSERT(mnew->queue == PQ_NONE || vm_page_wired(mnew), | |||||
("vm_page_replace: new page %p is on a paging queue", mnew)); | |||||
/* | /* | ||||
* This function mostly follows vm_page_insert() and | * This function mostly follows vm_page_insert() and | ||||
* vm_page_remove() without the radix, object count and vnode | * vm_page_remove() without the radix, object count and vnode | ||||
* dance. Double check such functions for more comments. | * dance. Double check such functions for more comments. | ||||
*/ | */ | ||||
mnew->object = object; | mnew->object = object; | ||||
mnew->pindex = pindex; | mnew->pindex = pindex; | ||||
atomic_set_int(&mnew->ref_count, VPRC_OBJREF); | |||||
mold = vm_radix_replace(&object->rtree, mnew); | mold = vm_radix_replace(&object->rtree, mnew); | ||||
KASSERT(mold->queue == PQ_NONE, | KASSERT(mold->queue == PQ_NONE, | ||||
("vm_page_replace: old page %p is on a paging queue", mold)); | ("vm_page_replace: old page %p is on a paging queue", mold)); | ||||
/* Keep the resident page list in sorted order. */ | /* Keep the resident page list in sorted order. */ | ||||
TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); | TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); | ||||
TAILQ_REMOVE(&object->memq, mold, listq); | TAILQ_REMOVE(&object->memq, mold, listq); | ||||
mold->object = NULL; | mold->object = NULL; | ||||
atomic_clear_int(&mold->ref_count, VPRC_OBJREF); | |||||
vm_page_xunbusy_maybelocked(mold); | vm_page_xunbusy_maybelocked(mold); | ||||
/* | /* | ||||
* The object's resident_page_count does not change because we have | * The object's resident_page_count does not change because we have | ||||
* swapped one page for another, but OBJ_MIGHTBEDIRTY. | * swapped one page for another, but OBJ_MIGHTBEDIRTY. | ||||
*/ | */ | ||||
if (pmap_page_is_write_mapped(mnew)) | if (pmap_page_is_write_mapped(mnew)) | ||||
vm_object_set_writeable_dirty(object); | vm_object_set_writeable_dirty(object); | ||||
Show All 21 Lines | |||||
int | int | ||||
vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) | vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) | ||||
{ | { | ||||
vm_page_t mpred; | vm_page_t mpred; | ||||
vm_pindex_t opidx; | vm_pindex_t opidx; | ||||
VM_OBJECT_ASSERT_WLOCKED(new_object); | VM_OBJECT_ASSERT_WLOCKED(new_object); | ||||
KASSERT(m->ref_count != 0, ("vm_page_rename: page %p has no refs", m)); | |||||
mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); | mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex); | ||||
KASSERT(mpred == NULL || mpred->pindex != new_pindex, | KASSERT(mpred == NULL || mpred->pindex != new_pindex, | ||||
("vm_page_rename: pindex already renamed")); | ("vm_page_rename: pindex already renamed")); | ||||
/* | /* | ||||
* Create a custom version of vm_page_insert() which does not depend | * Create a custom version of vm_page_insert() which does not depend | ||||
* by m_prev and can cheat on the implementation aspects of the | * by m_prev and can cheat on the implementation aspects of the | ||||
* function. | * function. | ||||
*/ | */ | ||||
opidx = m->pindex; | opidx = m->pindex; | ||||
m->pindex = new_pindex; | m->pindex = new_pindex; | ||||
if (vm_radix_insert(&new_object->rtree, m)) { | if (vm_radix_insert(&new_object->rtree, m)) { | ||||
m->pindex = opidx; | m->pindex = opidx; | ||||
return (1); | return (1); | ||||
} | } | ||||
/* | /* | ||||
* The operation cannot fail anymore. The removal must happen before | * The operation cannot fail anymore. The removal must happen before | ||||
* the listq iterator is tainted. | * the listq iterator is tainted. | ||||
*/ | */ | ||||
m->pindex = opidx; | m->pindex = opidx; | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
vm_page_remove(m); | (void)vm_page_remove(m); | ||||
/* Return back to the new pindex to complete vm_page_insert(). */ | /* Return back to the new pindex to complete vm_page_insert(). */ | ||||
m->pindex = new_pindex; | m->pindex = new_pindex; | ||||
m->object = new_object; | m->object = new_object; | ||||
atomic_set_int(&m->ref_count, VPRC_OBJREF); | |||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
vm_page_insert_radixdone(m, new_object, mpred); | vm_page_insert_radixdone(m, new_object, mpred); | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_alloc: | * vm_page_alloc: | ||||
▲ Show 20 Lines • Show All 202 Lines • ▼ Show 20 Lines | found: | ||||
if ((req & VM_ALLOC_SBUSY) != 0) | if ((req & VM_ALLOC_SBUSY) != 0) | ||||
m->busy_lock = VPB_SHARERS_WORD(1); | m->busy_lock = VPB_SHARERS_WORD(1); | ||||
if (req & VM_ALLOC_WIRED) { | if (req & VM_ALLOC_WIRED) { | ||||
/* | /* | ||||
* The page lock is not required for wiring a page until that | * The page lock is not required for wiring a page until that | ||||
* page is inserted into the object. | * page is inserted into the object. | ||||
*/ | */ | ||||
vm_wire_add(1); | vm_wire_add(1); | ||||
m->wire_count = 1; | m->ref_count = 1; | ||||
} | } | ||||
m->act_count = 0; | m->act_count = 0; | ||||
if (object != NULL) { | if (object != NULL) { | ||||
if (vm_page_insert_after(m, object, pindex, mpred)) { | if (vm_page_insert_after(m, object, pindex, mpred, true)) { | ||||
if (req & VM_ALLOC_WIRED) { | if (req & VM_ALLOC_WIRED) { | ||||
vm_wire_sub(1); | vm_wire_sub(1); | ||||
m->wire_count = 0; | m->ref_count = 0; | ||||
} | } | ||||
KASSERT(m->object == NULL, ("page %p has object", m)); | KASSERT(m->object == NULL, ("page %p has object", m)); | ||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
m->busy_lock = VPB_UNBUSIED; | m->busy_lock = VPB_UNBUSIED; | ||||
/* Don't change PG_ZERO. */ | /* Don't change PG_ZERO. */ | ||||
vm_page_free_toq(m); | vm_page_free_toq(m); | ||||
if (req & VM_ALLOC_WAITFAIL) { | if (req & VM_ALLOC_WAITFAIL) { | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
▲ Show 20 Lines • Show All 175 Lines • ▼ Show 20 Lines | if (object->memattr != VM_MEMATTR_DEFAULT && | ||||
memattr == VM_MEMATTR_DEFAULT) | memattr == VM_MEMATTR_DEFAULT) | ||||
memattr = object->memattr; | memattr = object->memattr; | ||||
} | } | ||||
for (m = m_ret; m < &m_ret[npages]; m++) { | for (m = m_ret; m < &m_ret[npages]; m++) { | ||||
m->aflags = 0; | m->aflags = 0; | ||||
m->flags = (m->flags | PG_NODUMP) & flags; | m->flags = (m->flags | PG_NODUMP) & flags; | ||||
m->busy_lock = busy_lock; | m->busy_lock = busy_lock; | ||||
if ((req & VM_ALLOC_WIRED) != 0) | if ((req & VM_ALLOC_WIRED) != 0) | ||||
m->wire_count = 1; | m->ref_count = 1; | ||||
m->act_count = 0; | m->act_count = 0; | ||||
m->oflags = oflags; | m->oflags = oflags; | ||||
if (object != NULL) { | if (object != NULL) { | ||||
if (vm_page_insert_after(m, object, pindex, mpred)) { | if (vm_page_insert_after(m, object, pindex, mpred, | ||||
true)) { | |||||
if ((req & VM_ALLOC_WIRED) != 0) | if ((req & VM_ALLOC_WIRED) != 0) | ||||
vm_wire_sub(npages); | vm_wire_sub(npages); | ||||
KASSERT(m->object == NULL, | KASSERT(m->object == NULL, | ||||
("page %p has object", m)); | ("page %p has object", m)); | ||||
mpred = m; | mpred = m; | ||||
for (m = m_ret; m < &m_ret[npages]; m++) { | for (m = m_ret; m < &m_ret[npages]; m++) { | ||||
if (m <= mpred && | if (m <= mpred && | ||||
(req & VM_ALLOC_WIRED) != 0) | (req & VM_ALLOC_WIRED) != 0) | ||||
m->wire_count = 0; | m->ref_count = 0; | ||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
m->busy_lock = VPB_UNBUSIED; | m->busy_lock = VPB_UNBUSIED; | ||||
/* Don't change PG_ZERO. */ | /* Don't change PG_ZERO. */ | ||||
vm_page_free_toq(m); | vm_page_free_toq(m); | ||||
} | } | ||||
if (req & VM_ALLOC_WAITFAIL) { | if (req & VM_ALLOC_WAITFAIL) { | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
vm_radix_wait(); | vm_radix_wait(); | ||||
Show All 17 Lines | |||||
static void | static void | ||||
vm_page_alloc_check(vm_page_t m) | vm_page_alloc_check(vm_page_t m) | ||||
{ | { | ||||
KASSERT(m->object == NULL, ("page %p has object", m)); | KASSERT(m->object == NULL, ("page %p has object", m)); | ||||
KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, | KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0, | ||||
("page %p has unexpected queue %d, flags %#x", | ("page %p has unexpected queue %d, flags %#x", | ||||
m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK))); | m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK))); | ||||
KASSERT(!vm_page_wired(m), ("page %p is wired", m)); | KASSERT(m->ref_count == 0, ("page %p has references", m)); | ||||
KASSERT(!vm_page_busied(m), ("page %p is busy", m)); | KASSERT(!vm_page_busied(m), ("page %p is busy", m)); | ||||
KASSERT(m->dirty == 0, ("page %p is dirty", m)); | KASSERT(m->dirty == 0, ("page %p is dirty", m)); | ||||
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | ||||
("page %p has unexpected memattr %d", | ("page %p has unexpected memattr %d", | ||||
m, pmap_page_get_memattr(m))); | m, pmap_page_get_memattr(m))); | ||||
KASSERT(m->valid == 0, ("free page %p is valid", m)); | KASSERT(m->valid == 0, ("free page %p is valid", m)); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | if ((req & VM_ALLOC_ZERO) != 0) | ||||
flags = PG_ZERO; | flags = PG_ZERO; | ||||
m->flags &= flags; | m->flags &= flags; | ||||
if ((req & VM_ALLOC_WIRED) != 0) { | if ((req & VM_ALLOC_WIRED) != 0) { | ||||
/* | /* | ||||
* The page lock is not required for wiring a page that does | * The page lock is not required for wiring a page that does | ||||
* not belong to an object. | * not belong to an object. | ||||
*/ | */ | ||||
vm_wire_add(1); | vm_wire_add(1); | ||||
m->wire_count = 1; | m->ref_count = 1; | ||||
} | } | ||||
/* Unmanaged pages don't use "act_count". */ | /* Unmanaged pages don't use "act_count". */ | ||||
m->oflags = VPO_UNMANAGED; | m->oflags = VPO_UNMANAGED; | ||||
return (m); | return (m); | ||||
} | } | ||||
static int | static int | ||||
vm_page_import(void *arg, void **store, int cnt, int domain, int flags) | vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags) | ||||
{ | { | ||||
struct vm_domain *vmd; | struct vm_domain *vmd; | ||||
int i; | int i; | ||||
vmd = arg; | vmd = arg; | ||||
/* Only import if we can bring in a full bucket. */ | /* Only import if we can bring in a full bucket. */ | ||||
if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) | if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt)) | ||||
return (0); | return (0); | ||||
domain = vmd->vmd_domain; | domain = vmd->vmd_domain; | ||||
vm_domain_free_lock(vmd); | vm_domain_free_lock(vmd); | ||||
i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, | i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt, | ||||
(vm_page_t *)store); | (vm_page_t *)store); | ||||
vm_domain_free_unlock(vmd); | vm_domain_free_unlock(vmd); | ||||
if (cnt != i) | if (cnt != i) | ||||
vm_domain_freecnt_inc(vmd, cnt - i); | vm_domain_freecnt_inc(vmd, cnt - i); | ||||
return (i); | return (i); | ||||
} | } | ||||
static void | static void | ||||
vm_page_release(void *arg, void **store, int cnt) | vm_page_zone_release(void *arg, void **store, int cnt) | ||||
{ | { | ||||
struct vm_domain *vmd; | struct vm_domain *vmd; | ||||
vm_page_t m; | vm_page_t m; | ||||
int i; | int i; | ||||
vmd = arg; | vmd = arg; | ||||
vm_domain_free_lock(vmd); | vm_domain_free_lock(vmd); | ||||
for (i = 0; i < cnt; i++) { | for (i = 0; i < cnt; i++) { | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | #endif | ||||
KASSERT(powerof2(alignment), ("alignment is not a power of 2")); | KASSERT(powerof2(alignment), ("alignment is not a power of 2")); | ||||
KASSERT(powerof2(boundary), ("boundary is not a power of 2")); | KASSERT(powerof2(boundary), ("boundary is not a power of 2")); | ||||
m_run = NULL; | m_run = NULL; | ||||
run_len = 0; | run_len = 0; | ||||
m_mtx = NULL; | m_mtx = NULL; | ||||
for (m = m_start; m < m_end && run_len < npages; m += m_inc) { | for (m = m_start; m < m_end && run_len < npages; m += m_inc) { | ||||
KASSERT((m->flags & PG_MARKER) == 0, | KASSERT((m->flags & PG_MARKER) == 0, | ||||
("page %p is PG_MARKER", m)); | ("page %p is PG_MARKER", m)); | ||||
KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1, | KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, | ||||
("fictitious page %p has invalid wire count", m)); | ("fictitious page %p has invalid ref count", m)); | ||||
/* | /* | ||||
* If the current page would be the start of a run, check its | * If the current page would be the start of a run, check its | ||||
* physical address against the end, alignment, and boundary | * physical address against the end, alignment, and boundary | ||||
* conditions. If it doesn't satisfy these conditions, either | * conditions. If it doesn't satisfy these conditions, either | ||||
* terminate the scan or advance to the next page that | * terminate the scan or advance to the next page that | ||||
* satisfies the failed condition. | * satisfies the failed condition. | ||||
*/ | */ | ||||
Show All 40 Lines | else if ((object = m->object) != NULL) { | ||||
VM_OBJECT_RLOCK(object); | VM_OBJECT_RLOCK(object); | ||||
mtx_lock(m_mtx); | mtx_lock(m_mtx); | ||||
if (m->object != object) { | if (m->object != object) { | ||||
/* | /* | ||||
* The page may have been freed. | * The page may have been freed. | ||||
*/ | */ | ||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
goto retry; | goto retry; | ||||
} else if (vm_page_wired(m)) { | |||||
run_ext = 0; | |||||
goto unlock; | |||||
} | } | ||||
} | } | ||||
/* Don't care: PG_NODUMP, PG_ZERO. */ | /* Don't care: PG_NODUMP, PG_ZERO. */ | ||||
if (object->type != OBJT_DEFAULT && | if (object->type != OBJT_DEFAULT && | ||||
object->type != OBJT_SWAP && | object->type != OBJT_SWAP && | ||||
object->type != OBJT_VNODE) { | object->type != OBJT_VNODE) { | ||||
run_ext = 0; | run_ext = 0; | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
} else if ((options & VPSC_NOSUPER) != 0 && | } else if ((options & VPSC_NOSUPER) != 0 && | ||||
(level = vm_reserv_level_iffullpop(m)) >= 0) { | (level = vm_reserv_level_iffullpop(m)) >= 0) { | ||||
run_ext = 0; | run_ext = 0; | ||||
/* Advance to the end of the superpage. */ | /* Advance to the end of the superpage. */ | ||||
pa = VM_PAGE_TO_PHYS(m); | pa = VM_PAGE_TO_PHYS(m); | ||||
m_inc = atop(roundup2(pa + 1, | m_inc = atop(roundup2(pa + 1, | ||||
vm_reserv_size(level)) - pa); | vm_reserv_size(level)) - pa); | ||||
#endif | #endif | ||||
} else if (object->memattr == VM_MEMATTR_DEFAULT && | } else if (object->memattr == VM_MEMATTR_DEFAULT && | ||||
vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) { | vm_page_queue(m) != PQ_NONE && !vm_page_busied(m) && | ||||
!vm_page_wired(m)) { | |||||
/* | /* | ||||
* The page is allocated but eligible for | * The page is allocated but eligible for | ||||
* relocation. Extend the current run by one | * relocation. Extend the current run by one | ||||
* page. | * page. | ||||
*/ | */ | ||||
KASSERT(pmap_page_get_memattr(m) == | KASSERT(pmap_page_get_memattr(m) == | ||||
VM_MEMATTR_DEFAULT, | VM_MEMATTR_DEFAULT, | ||||
("page %p has an unexpected memattr", m)); | ("page %p has an unexpected memattr", m)); | ||||
KASSERT((m->oflags & (VPO_SWAPINPROG | | KASSERT((m->oflags & (VPO_SWAPINPROG | | ||||
VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, | VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, | ||||
("page %p has unexpected oflags", m)); | ("page %p has unexpected oflags", m)); | ||||
/* Don't care: VPO_NOSYNC. */ | /* Don't care: VPO_NOSYNC. */ | ||||
run_ext = 1; | run_ext = 1; | ||||
} else | } else | ||||
run_ext = 0; | run_ext = 0; | ||||
unlock: | |||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
} else if (level >= 0) { | } else if (level >= 0) { | ||||
/* | /* | ||||
* The page is reserved but not yet allocated. In | * The page is reserved but not yet allocated. In | ||||
* other words, it is still free. Extend the current | * other words, it is still free. Extend the current | ||||
* run by one page. | * run by one page. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 104 Lines • ▼ Show 20 Lines | else if ((object = m->object) != NULL) { | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
mtx_lock(m_mtx); | mtx_lock(m_mtx); | ||||
if (m->object != object) { | if (m->object != object) { | ||||
/* | /* | ||||
* The page may have been freed. | * The page may have been freed. | ||||
*/ | */ | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
goto retry; | goto retry; | ||||
} else if (vm_page_wired(m)) { | |||||
error = EBUSY; | |||||
goto unlock; | |||||
} | } | ||||
} | } | ||||
/* Don't care: PG_NODUMP, PG_ZERO. */ | /* Don't care: PG_NODUMP, PG_ZERO. */ | ||||
if (object->type != OBJT_DEFAULT && | if (object->type != OBJT_DEFAULT && | ||||
object->type != OBJT_SWAP && | object->type != OBJT_SWAP && | ||||
object->type != OBJT_VNODE) | object->type != OBJT_VNODE) | ||||
error = EINVAL; | error = EINVAL; | ||||
else if (object->memattr != VM_MEMATTR_DEFAULT) | else if (object->memattr != VM_MEMATTR_DEFAULT) | ||||
error = EINVAL; | error = EINVAL; | ||||
else if (vm_page_queue(m) != PQ_NONE && | else if (vm_page_queue(m) != PQ_NONE && | ||||
!vm_page_busied(m)) { | !vm_page_busied(m) && !vm_page_wired(m)) { | ||||
KASSERT(pmap_page_get_memattr(m) == | KASSERT(pmap_page_get_memattr(m) == | ||||
VM_MEMATTR_DEFAULT, | VM_MEMATTR_DEFAULT, | ||||
("page %p has an unexpected memattr", m)); | ("page %p has an unexpected memattr", m)); | ||||
KASSERT((m->oflags & (VPO_SWAPINPROG | | KASSERT((m->oflags & (VPO_SWAPINPROG | | ||||
VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, | VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, | ||||
("page %p has unexpected oflags", m)); | ("page %p has unexpected oflags", m)); | ||||
/* Don't care: VPO_NOSYNC. */ | /* Don't care: VPO_NOSYNC. */ | ||||
if (m->valid != 0) { | if (m->valid != 0) { | ||||
Show All 32 Lines | else if ((object = m->object) != NULL) { | ||||
NULL, 0, req, 1, | NULL, 0, req, 1, | ||||
pa, high, PAGE_SIZE, 0, | pa, high, PAGE_SIZE, 0, | ||||
VM_MEMATTR_DEFAULT); | VM_MEMATTR_DEFAULT); | ||||
} | } | ||||
if (m_new == NULL) { | if (m_new == NULL) { | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto unlock; | goto unlock; | ||||
} | } | ||||
KASSERT(!vm_page_wired(m_new), | |||||
("page %p is wired", m_new)); | |||||
/* | /* | ||||
* Replace "m" with the new page. For | * Replace "m" with the new page. For | ||||
* vm_page_replace(), "m" must be busy | * vm_page_replace(), "m" must be busy | ||||
* and dequeued. Finally, change "m" | * and dequeued. Finally, change "m" | ||||
* as if vm_page_free() was called. | * as if vm_page_free() was called. | ||||
*/ | */ | ||||
if (object->ref_count != 0) | if (object->ref_count != 0 && | ||||
pmap_remove_all(m); | !vm_page_try_remove_all(m)) { | ||||
error = EBUSY; | |||||
goto unlock; | |||||
} | |||||
m_new->aflags = m->aflags & | m_new->aflags = m->aflags & | ||||
~PGA_QUEUE_STATE_MASK; | ~PGA_QUEUE_STATE_MASK; | ||||
KASSERT(m_new->oflags == VPO_UNMANAGED, | KASSERT(m_new->oflags == VPO_UNMANAGED, | ||||
("page %p is managed", m_new)); | ("page %p is managed", m_new)); | ||||
m_new->oflags = m->oflags & VPO_NOSYNC; | m_new->oflags = m->oflags & VPO_NOSYNC; | ||||
pmap_copy_page(m, m_new); | pmap_copy_page(m, m_new); | ||||
m_new->valid = m->valid; | m_new->valid = m->valid; | ||||
m_new->dirty = m->dirty; | m_new->dirty = m->dirty; | ||||
▲ Show 20 Lines • Show All 543 Lines • ▼ Show 20 Lines | |||||
vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) | vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) | ||||
{ | { | ||||
struct vm_batchqueue *bq; | struct vm_batchqueue *bq; | ||||
struct vm_pagequeue *pq; | struct vm_pagequeue *pq; | ||||
int domain; | int domain; | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("page %p is unmanaged", m)); | ("page %p is unmanaged", m)); | ||||
KASSERT(mtx_owned(vm_page_lockptr(m)) || | KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL, | ||||
(m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0), | |||||
("missing synchronization for page %p", m)); | ("missing synchronization for page %p", m)); | ||||
KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); | KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); | ||||
domain = vm_phys_domain(m); | domain = vm_phys_domain(m); | ||||
pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue]; | pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue]; | ||||
critical_enter(); | critical_enter(); | ||||
bq = DPCPU_PTR(pqbatch[domain][queue]); | bq = DPCPU_PTR(pqbatch[domain][queue]); | ||||
▲ Show 20 Lines • Show All 112 Lines • ▼ Show 20 Lines | |||||
* operations on this page, so we get for free the mutual exclusion that | * operations on this page, so we get for free the mutual exclusion that | ||||
* is otherwise provided by the page lock. | * is otherwise provided by the page lock. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_dequeue_deferred_free(vm_page_t m) | vm_page_dequeue_deferred_free(vm_page_t m) | ||||
{ | { | ||||
uint8_t queue; | uint8_t queue; | ||||
KASSERT(m->object == NULL, ("page %p has an object reference", m)); | KASSERT(m->ref_count == 0, ("page %p has references", m)); | ||||
if ((m->aflags & PGA_DEQUEUE) != 0) | if ((m->aflags & PGA_DEQUEUE) != 0) | ||||
return; | return; | ||||
atomic_thread_fence_acq(); | atomic_thread_fence_acq(); | ||||
if ((queue = m->queue) == PQ_NONE) | if ((queue = m->queue) == PQ_NONE) | ||||
return; | return; | ||||
vm_page_aflag_set(m, PGA_DEQUEUE); | vm_page_aflag_set(m, PGA_DEQUEUE); | ||||
vm_pqbatch_submit_page(m, queue); | vm_pqbatch_submit_page(m, queue); | ||||
▲ Show 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | KASSERT(vm_page_queue(m) != PQ_NONE, | ||||
("%s: page %p is not logically enqueued", __func__, m)); | ("%s: page %p is not logically enqueued", __func__, m)); | ||||
if ((m->aflags & PGA_REQUEUE) == 0) | if ((m->aflags & PGA_REQUEUE) == 0) | ||||
vm_page_aflag_set(m, PGA_REQUEUE); | vm_page_aflag_set(m, PGA_REQUEUE); | ||||
vm_pqbatch_submit_page(m, atomic_load_8(&m->queue)); | vm_pqbatch_submit_page(m, atomic_load_8(&m->queue)); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_activate: | |||||
* | |||||
* Put the specified page on the active list (if appropriate). | |||||
* Ensure that act_count is at least ACT_INIT but do not otherwise | |||||
* mess with it. | |||||
* | |||||
* The page must be locked. | |||||
*/ | |||||
void | |||||
vm_page_activate(vm_page_t m) | |||||
{ | |||||
vm_page_assert_locked(m); | |||||
if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) | |||||
return; | |||||
if (vm_page_queue(m) == PQ_ACTIVE) { | |||||
if (m->act_count < ACT_INIT) | |||||
m->act_count = ACT_INIT; | |||||
return; | |||||
} | |||||
vm_page_dequeue(m); | |||||
if (m->act_count < ACT_INIT) | |||||
m->act_count = ACT_INIT; | |||||
vm_page_enqueue(m, PQ_ACTIVE); | |||||
} | |||||
/* | |||||
* vm_page_free_prep: | * vm_page_free_prep: | ||||
* | * | ||||
* Prepares the given page to be put on the free list, | * Prepares the given page to be put on the free list, | ||||
* disassociating it from any VM object. The caller may return | * disassociating it from any VM object. The caller may return | ||||
* the page to the free list only if this function returns true. | * the page to the free list only if this function returns true. | ||||
* | * | ||||
* The object must be locked. The page must be locked if it is | * The object must be locked. The page must be locked if it is | ||||
* managed. | * managed. | ||||
*/ | */ | ||||
bool | bool | ||||
vm_page_free_prep(vm_page_t m) | vm_page_free_prep(vm_page_t m) | ||||
{ | { | ||||
/* | |||||
* Synchronize with vm_page_drop(): ensure that all page modifications | |||||
* are visible before proceeding. | |||||
*/ | |||||
atomic_thread_fence_acq(); | |||||
#if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) | #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP) | ||||
if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { | if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) { | ||||
uint64_t *p; | uint64_t *p; | ||||
int i; | int i; | ||||
p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); | p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); | ||||
for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) | for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++) | ||||
KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", | KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx", | ||||
m, i, (uintmax_t)*p)); | m, i, (uintmax_t)*p)); | ||||
} | } | ||||
#endif | #endif | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_lock_assert(m, MA_OWNED); | |||||
KASSERT(!pmap_page_is_mapped(m), | KASSERT(!pmap_page_is_mapped(m), | ||||
("vm_page_free_prep: freeing mapped page %p", m)); | ("vm_page_free_prep: freeing mapped page %p", m)); | ||||
} else | else | ||||
KASSERT(m->queue == PQ_NONE, | KASSERT(m->queue == PQ_NONE, | ||||
("vm_page_free_prep: unmanaged page %p is queued", m)); | ("vm_page_free_prep: unmanaged page %p is queued", m)); | ||||
KASSERT((m->flags & PG_FICTITIOUS) != 0 || !vm_page_wired(m), | |||||
("vm_page_free_prep: page %p is wired", m)); | |||||
VM_CNT_INC(v_tfree); | VM_CNT_INC(v_tfree); | ||||
if (vm_page_sbusied(m)) | if (vm_page_sbusied(m)) | ||||
panic("vm_page_free_prep: freeing busy page %p", m); | panic("vm_page_free_prep: freeing busy page %p", m); | ||||
vm_page_remove(m); | /* | ||||
* The page may carry a transient reference by the page daemon, | |||||
* in which case it will race to free the page. | |||||
*/ | |||||
if (m->object != NULL && !vm_page_remove(m)) | |||||
return (false); | |||||
/* | /* | ||||
* If fictitious remove object association and | * If fictitious remove object association and | ||||
* return. | * return. | ||||
*/ | */ | ||||
if ((m->flags & PG_FICTITIOUS) != 0) { | if ((m->flags & PG_FICTITIOUS) != 0) { | ||||
KASSERT(m->wire_count == 1, | KASSERT(m->ref_count == 1, | ||||
("fictitious page %p is not wired", m)); | ("fictitious page %p is referenced", m)); | ||||
KASSERT(m->queue == PQ_NONE, | KASSERT(m->queue == PQ_NONE, | ||||
("fictitious page %p is queued", m)); | ("fictitious page %p is queued", m)); | ||||
return (false); | return (false); | ||||
} | } | ||||
/* | /* | ||||
* Pages need not be dequeued before they are returned to the physical | * Pages need not be dequeued before they are returned to the physical | ||||
* memory allocator, but they must at least be marked for a deferred | * memory allocator, but they must at least be marked for a deferred | ||||
* dequeue. | * dequeue. | ||||
*/ | */ | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_dequeue_deferred_free(m); | vm_page_dequeue_deferred_free(m); | ||||
m->valid = 0; | m->valid = 0; | ||||
vm_page_undirty(m); | vm_page_undirty(m); | ||||
if (vm_page_wired(m) != 0) | if (m->ref_count != 0) | ||||
panic("vm_page_free_prep: freeing wired page %p", m); | panic("vm_page_free_prep: page %p has references", m); | ||||
/* | /* | ||||
* Restore the default memory attribute to the page. | * Restore the default memory attribute to the page. | ||||
*/ | */ | ||||
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) | if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) | ||||
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); | pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | while ((m = SLIST_FIRST(free)) != NULL) { | ||||
vm_page_free_toq(m); | vm_page_free_toq(m); | ||||
} | } | ||||
if (update_wire_count) | if (update_wire_count) | ||||
vm_wire_sub(count); | vm_wire_sub(count); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_wire: | * Mark this page as wired down, preventing reclamation by the page daemon | ||||
* | * or when the containing object is destroyed. | ||||
* Mark this page as wired down. If the page is fictitious, then | |||||
* its wire count must remain one. | |||||
* | |||||
* The page must be locked. | |||||
*/ | */ | ||||
void | void | ||||
vm_page_wire(vm_page_t m) | vm_page_wire(vm_page_t m) | ||||
{ | { | ||||
u_int old; | |||||
vm_page_assert_locked(m); | KASSERT(m->object != NULL, | ||||
if ((m->flags & PG_FICTITIOUS) != 0) { | ("vm_page_wire: page %p does not belong to an object", m)); | ||||
KASSERT(m->wire_count == 1, | if (!vm_page_busied(m)) | ||||
("vm_page_wire: fictitious page %p's wire count isn't one", | VM_OBJECT_ASSERT_LOCKED(m->object); | ||||
m)); | KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->ref_count >= 1, | ||||
return; | ("vm_page_wire: fictitious page %p has zero refs", m)); | ||||
old = atomic_fetchadd_int(&m->ref_count, 1); | |||||
KASSERT(VPRC_WIRE_COUNT(old) != VPRC_WIRE_COUNT_MAX, | |||||
("vm_page_wire: counter overflow for page %p", m)); | |||||
if (VPRC_WIRE_COUNT(old) == 0) | |||||
vm_wire_add(1); | |||||
} | } | ||||
if (!vm_page_wired(m)) { | |||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0 || | /* | ||||
m->queue == PQ_NONE, | * Attempt to wire a mapped page following a pmap lookup of that page. | ||||
("vm_page_wire: unmanaged page %p is queued", m)); | * This may fail if a thread is concurrently tearing down mappings of the page. | ||||
*/ | |||||
bool | |||||
vm_page_wire_mapped(vm_page_t m) | |||||
{ | |||||
u_int old; | |||||
KASSERT(m->object != NULL, | |||||
("vm_page_try_wire: page %p does not belong to an object", m)); | |||||
old = m->ref_count; | |||||
do { | |||||
KASSERT(old > 0, | |||||
("vm_page_try_wire: wiring unreferenced page %p", m)); | |||||
if ((old & VPRC_BLOCKED) != 0) | |||||
return (false); | |||||
} while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1)); | |||||
if (VPRC_WIRE_COUNT(old) == 0) | |||||
vm_wire_add(1); | vm_wire_add(1); | ||||
return (true); | |||||
} | } | ||||
m->wire_count++; | |||||
KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); | |||||
} | |||||
/* | /* | ||||
* vm_page_unwire: | |||||
* | |||||
* Release one wiring of the specified page, potentially allowing it to be | * Release one wiring of the specified page, potentially allowing it to be | ||||
* paged out. Returns TRUE if the number of wirings transitions to zero and | * paged out. | ||||
* FALSE otherwise. | |||||
* | * | ||||
* Only managed pages belonging to an object can be paged out. If the number | * Only managed pages belonging to an object can be paged out. If the number | ||||
* of wirings transitions to zero and the page is eligible for page out, then | * of wirings transitions to zero and the page is eligible for page out, then | ||||
* the page is added to the specified paging queue (unless PQ_NONE is | * the page is added to the specified paging queue. If the released wiring | ||||
* specified, in which case the page is dequeued if it belongs to a paging | * represented the last reference to the page, the page is freed. | ||||
* queue). | |||||
* | * | ||||
* If a page is fictitious, then its wire count must always be one. | |||||
* | |||||
* A managed page must be locked. | * A managed page must be locked. | ||||
*/ | */ | ||||
bool | void | ||||
vm_page_unwire(vm_page_t m, uint8_t queue) | vm_page_unwire(vm_page_t m, uint8_t queue) | ||||
{ | { | ||||
bool unwired; | u_int old; | ||||
bool queued; | |||||
KASSERT(queue < PQ_COUNT || queue == PQ_NONE, | KASSERT(queue < PQ_COUNT, | ||||
("vm_page_unwire: invalid queue %u request for page %p", | ("vm_page_unwire: invalid queue %u request for page %p", queue, m)); | ||||
queue, m)); | |||||
if ((m->oflags & VPO_UNMANAGED) == 0) | |||||
vm_page_assert_locked(m); | |||||
unwired = vm_page_unwire_noq(m); | if ((m->oflags & VPO_UNMANAGED) != 0) { | ||||
if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) | if (vm_page_unwire_noq(m) && m->ref_count == 0) | ||||
return (unwired); | vm_page_free(m); | ||||
return; | |||||
} | |||||
if (vm_page_queue(m) == queue) { | vm_page_assert_locked(m); | ||||
if (queue == PQ_ACTIVE) | |||||
/* | |||||
* Update LRU state before releasing the wiring reference. | |||||
* We only need to do this once since we hold the page lock. | |||||
* Use a release store when updating the reference count to | |||||
* synchronize with vm_page_free_prep(). | |||||
*/ | |||||
old = m->ref_count; | |||||
queued = false; | |||||
do { | |||||
KASSERT(VPRC_WIRE_COUNT(old) > 0, | |||||
("vm_page_unwire: wire count underflow for page %p", m)); | |||||
if (!queued && VPRC_WIRE_COUNT(old) == 1) { | |||||
if (queue == PQ_ACTIVE && vm_page_queue(m) == PQ_ACTIVE) | |||||
vm_page_reference(m); | vm_page_reference(m); | ||||
else if (queue != PQ_NONE) | else | ||||
vm_page_requeue(m); | vm_page_mvqueue(m, queue); | ||||
} else { | queued = true; | ||||
Not Done Inline ActionsI think it should be possible to check whether you are dropping the final reference and use that to only pick up the lock when necessary. For certain kinds of operations this may be common. jeff: I think it should be possible to check whether you are dropping the final reference and use… | |||||
Done Inline ActionsYes. I will do that in a separate diff since this one is already too big, and that proposal changes the KPI, which currently requires the caller to do the locking. markj: Yes. I will do that in a separate diff since this one is already too big, and that proposal… | |||||
vm_page_dequeue(m); | |||||
if (queue != PQ_NONE) { | |||||
vm_page_enqueue(m, queue); | |||||
if (queue == PQ_ACTIVE) | |||||
/* Initialize act_count. */ | |||||
vm_page_activate(m); | |||||
} | } | ||||
} while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); | |||||
if (VPRC_WIRE_COUNT(old) == 1) { | |||||
vm_wire_sub(1); | |||||
if (old == 1) | |||||
vm_page_free(m); | |||||
} | } | ||||
return (unwired); | |||||
} | } | ||||
/* | /* | ||||
* | |||||
* vm_page_unwire_noq: | |||||
* | |||||
* Unwire a page without (re-)inserting it into a page queue. It is up | * Unwire a page without (re-)inserting it into a page queue. It is up | ||||
* to the caller to enqueue, requeue, or free the page as appropriate. | * to the caller to enqueue, requeue, or free the page as appropriate. | ||||
* In most cases, vm_page_unwire() should be used instead. | * In most cases involving managed pages, vm_page_unwire() should be used | ||||
* instead. | |||||
*/ | */ | ||||
bool | bool | ||||
vm_page_unwire_noq(vm_page_t m) | vm_page_unwire_noq(vm_page_t m) | ||||
{ | { | ||||
u_int old; | |||||
if ((m->oflags & VPO_UNMANAGED) == 0) | old = vm_page_drop(m, -1); | ||||
vm_page_assert_locked(m); | KASSERT(VPRC_WIRE_COUNT(old) != 0, | ||||
if ((m->flags & PG_FICTITIOUS) != 0) { | ("vm_page_unref: counter underflow for page %p", m)); | ||||
KASSERT(m->wire_count == 1, | KASSERT((m->flags & PG_FICTITIOUS) == 0 || VPRC_WIRE_COUNT(old) > 1, | ||||
("vm_page_unwire: fictitious page %p's wire count isn't one", m)); | ("vm_page_unref: missing ref on fictitious page %p", m)); | ||||
if (VPRC_WIRE_COUNT(old) > 1) | |||||
return (false); | return (false); | ||||
} | |||||
if (!vm_page_wired(m)) | |||||
panic("vm_page_unwire: page %p's wire count is zero", m); | |||||
m->wire_count--; | |||||
if (m->wire_count == 0) { | |||||
vm_wire_sub(1); | vm_wire_sub(1); | ||||
return (true); | return (true); | ||||
} else | |||||
return (false); | |||||
} | } | ||||
/* | /* | ||||
* Ensure that the page is in the specified page queue. If the page is | |||||
* active or being moved to the active queue, ensure that its act_count is | |||||
* at least ACT_INIT but do not otherwise mess with it. Otherwise, ensure that | |||||
* the page is at the tail of its page queue. | |||||
* | |||||
* The page may be wired. The caller should release any wiring references | |||||
* before releasing the page lock, otherwise the page daemon may immediately | |||||
* dequeue the page. | |||||
*/ | |||||
static __always_inline void | |||||
vm_page_mvqueue(vm_page_t m, const int nqueue) | |||||
{ | |||||
vm_page_assert_locked(m); | |||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | |||||
("vm_page_mvqueue: page %p is unmanaged", m)); | |||||
if (vm_page_queue(m) != nqueue) { | |||||
vm_page_dequeue(m); | |||||
vm_page_enqueue(m, nqueue); | |||||
} else if (nqueue != PQ_ACTIVE) { | |||||
vm_page_requeue(m); | |||||
} | |||||
if (nqueue == PQ_ACTIVE && m->act_count < ACT_INIT) | |||||
m->act_count = ACT_INIT; | |||||
} | |||||
/* | |||||
* Put the specified page on the active list (if appropriate). | |||||
* | |||||
* A managed page must be locked. | |||||
*/ | |||||
void | |||||
vm_page_activate(vm_page_t m) | |||||
{ | |||||
if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) | |||||
return; | |||||
vm_page_mvqueue(m, PQ_ACTIVE); | |||||
} | |||||
/* | |||||
* Move the specified page to the tail of the inactive queue, or requeue | * Move the specified page to the tail of the inactive queue, or requeue | ||||
* the page if it is already in the inactive queue. | * the page if it is already in the inactive queue. | ||||
* | * | ||||
* The page must be locked. | * A managed page must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_deactivate(vm_page_t m) | vm_page_deactivate(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_locked(m); | if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) | ||||
if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) | |||||
return; | return; | ||||
vm_page_mvqueue(m, PQ_INACTIVE); | |||||
if (!vm_page_inactive(m)) { | |||||
vm_page_dequeue(m); | |||||
vm_page_enqueue(m, PQ_INACTIVE); | |||||
} else | |||||
vm_page_requeue(m); | |||||
} | } | ||||
/* | /* | ||||
* Move the specified page close to the head of the inactive queue, | * Move the specified page close to the head of the inactive queue, | ||||
* bypassing LRU. A marker page is used to maintain FIFO ordering. | * bypassing LRU. A marker page is used to maintain FIFO ordering. | ||||
* As with regular enqueues, we use a per-CPU batch queue to reduce | * As with regular enqueues, we use a per-CPU batch queue to reduce | ||||
* contention on the page queue lock. | * contention on the page queue lock. | ||||
* | |||||
* The page must be locked. | |||||
*/ | */ | ||||
void | static void | ||||
vm_page_deactivate_noreuse(vm_page_t m) | _vm_page_deactivate_noreuse(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_locked(m); | vm_page_assert_locked(m); | ||||
if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) | |||||
return; | |||||
if (!vm_page_inactive(m)) { | if (!vm_page_inactive(m)) { | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
m->queue = PQ_INACTIVE; | m->queue = PQ_INACTIVE; | ||||
} | } | ||||
if ((m->aflags & PGA_REQUEUE_HEAD) == 0) | if ((m->aflags & PGA_REQUEUE_HEAD) == 0) | ||||
vm_page_aflag_set(m, PGA_REQUEUE_HEAD); | vm_page_aflag_set(m, PGA_REQUEUE_HEAD); | ||||
vm_pqbatch_submit_page(m, PQ_INACTIVE); | vm_pqbatch_submit_page(m, PQ_INACTIVE); | ||||
} | } | ||||
void | |||||
vm_page_deactivate_noreuse(vm_page_t m) | |||||
{ | |||||
KASSERT(m->object != NULL, | |||||
("vm_page_deactivate_noreuse: page %p has no object", m)); | |||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_wired(m)) | |||||
_vm_page_deactivate_noreuse(m); | |||||
} | |||||
/* | /* | ||||
* vm_page_launder | |||||
* | |||||
* Put a page in the laundry, or requeue it if it is already there. | * Put a page in the laundry, or requeue it if it is already there. | ||||
* | |||||
* The page must be locked. | |||||
*/ | */ | ||||
void | void | ||||
vm_page_launder(vm_page_t m) | vm_page_launder(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_locked(m); | if ((m->oflags & VPO_UNMANAGED) != 0 || vm_page_wired(m)) | ||||
if (vm_page_wired(m) || (m->oflags & VPO_UNMANAGED) != 0) | |||||
return; | return; | ||||
vm_page_mvqueue(m, PQ_LAUNDRY); | |||||
if (vm_page_in_laundry(m)) | |||||
vm_page_requeue(m); | |||||
else { | |||||
vm_page_dequeue(m); | |||||
vm_page_enqueue(m, PQ_LAUNDRY); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* vm_page_unswappable | |||||
* | |||||
* Put a page in the PQ_UNSWAPPABLE holding queue. | * Put a page in the PQ_UNSWAPPABLE holding queue. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_unswappable(vm_page_t m) | vm_page_unswappable(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_locked(m); | vm_page_assert_locked(m); | ||||
KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, | KASSERT(!vm_page_wired(m) && (m->oflags & VPO_UNMANAGED) == 0, | ||||
("page %p already unswappable", m)); | ("page %p already unswappable", m)); | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
vm_page_enqueue(m, PQ_UNSWAPPABLE); | vm_page_enqueue(m, PQ_UNSWAPPABLE); | ||||
} | } | ||||
/* | /* | ||||
* Attempt to free the page. If it cannot be freed, do nothing. Returns true | * Release a wired page to the page cache, and optionally attempt to free it. | ||||
* if the page is freed and false otherwise. | * The page's object must be locked. See the comment above vm_page_release(). | ||||
* | |||||
* The page must be managed. The page and its containing object must be | |||||
* locked. | |||||
*/ | */ | ||||
bool | void | ||||
vm_page_try_to_free(vm_page_t m) | vm_page_release_locked(vm_page_t m, bool nocache) | ||||
{ | { | ||||
vm_object_t object; | |||||
vm_page_assert_locked(m); | object = m->object; | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m)); | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m)) | ("vm_page_release_locked: page %p is unmanaged", m)); | ||||
return (false); | |||||
if (m->object->ref_count != 0) { | if (!vm_page_unwire_noq(m)) | ||||
pmap_remove_all(m); | return; | ||||
if (m->dirty != 0) | if (m->valid == 0 || nocache) { | ||||
return (false); | if ((object->ref_count == 0 || !pmap_page_is_mapped(m)) && | ||||
m->dirty == 0 && !vm_page_busied(m) && !vm_page_wired(m)) { | |||||
vm_page_free(m); | |||||
} else { | |||||
vm_page_lock(m); | |||||
vm_page_deactivate_noreuse(m); | |||||
vm_page_unlock(m); | |||||
} | } | ||||
} else { | |||||
vm_page_lock(m); | |||||
if (vm_page_active(m)) | |||||
vm_page_reference(m); | |||||
else | |||||
vm_page_deactivate(m); | |||||
vm_page_unlock(m); | |||||
} | |||||
} | |||||
/* | |||||
* Release a wired page to the page cache, and optionally attempt to free it. | |||||
* If the caller wishes to attempt to free the page, and the page is mapped, | |||||
* dirty, busy or wired, we do not free it but instead place it near the head of | |||||
* the inactive queue to accelerate reclamation. | |||||
*/ | |||||
void | |||||
vm_page_release(vm_page_t m, bool nocache) | |||||
{ | |||||
vm_object_t object; | |||||
u_int old; | |||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | |||||
("vm_page_release: page %p is unmanaged", m)); | |||||
if (nocache) { | |||||
/* | |||||
* Attempt to free the page. The page may be renamed between | |||||
* objects so we must verify the page's object pointer after | |||||
* acquiring the lock and retry if they do not match. | |||||
*/ | |||||
while ((object = m->object) != NULL) { | |||||
if (!VM_OBJECT_TRYWLOCK(object)) { | |||||
object = NULL; | |||||
break; | |||||
} | |||||
if (m->object == object) | |||||
break; | |||||
VM_OBJECT_WUNLOCK(object); | |||||
} | |||||
if (object != NULL) { | |||||
vm_page_release_locked(m, nocache); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
return; | |||||
} | |||||
} | |||||
/* | |||||
* Update LRU state before releasing the wiring reference. | |||||
* Use a release store when updating the reference count to | |||||
* synchronize with vm_page_free_prep(). | |||||
*/ | |||||
old = m->ref_count; | |||||
do { | |||||
if (VPRC_WIRE_COUNT(old) == 1) { | |||||
vm_page_lock(m); | |||||
/* | |||||
* Use a racy check of the valid bits to determine | |||||
* whether we can accelerate reclamation of the page. | |||||
* The valid bits will be stable unless the page is | |||||
* being mapped or is referenced by multiple buffers, | |||||
* and in those cases we expect races to be rare. At | |||||
* worst we will either accelerate reclamation of a | |||||
* valid page and violate LRU, or unnecessarily defer | |||||
* reclamation of an invalid page. | |||||
*/ | |||||
if (m->valid == 0 || nocache) | |||||
_vm_page_deactivate_noreuse(m); | |||||
else if (vm_page_active(m)) | |||||
vm_page_reference(m); | |||||
else | |||||
vm_page_mvqueue(m, PQ_INACTIVE); | |||||
vm_page_unlock(m); | |||||
} | |||||
} while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1)); | |||||
if (VPRC_WIRE_COUNT(old) == 1) { | |||||
vm_wire_sub(1); | |||||
if (old == 1) | |||||
vm_page_free(m); | vm_page_free(m); | ||||
} | |||||
} | |||||
/* | |||||
* Attempt to invoke the requested operation while blocking new wirings of the | |||||
* page. | |||||
*/ | |||||
static bool | |||||
vm_page_try_blocked_op(vm_page_t m, void (*op)(vm_page_t)) | |||||
{ | |||||
u_int old; | |||||
vm_page_assert_locked(m); | |||||
KASSERT(m->object != NULL && (m->oflags & VPO_UNMANAGED) == 0, | |||||
("vm_page_try_blocked_op: page %p has no object", m)); | |||||
KASSERT(!vm_page_busied(m), | |||||
("vm_page_try_blocked_op: page %p is busy", m)); | |||||
VM_OBJECT_ASSERT_LOCKED(m->object); | |||||
old = m->ref_count; | |||||
do { | |||||
KASSERT(old != 0, | |||||
("vm_page_try_blocked_op: page %p has no references", m)); | |||||
if (VPRC_WIRE_COUNT(old) != 0) | |||||
return (false); | |||||
} while (!atomic_fcmpset_int(&m->ref_count, &old, old | VPRC_BLOCKED)); | |||||
(op)(m); | |||||
atomic_clear_int(&m->ref_count, VPRC_BLOCKED); | |||||
return (true); | return (true); | ||||
} | } | ||||
bool | |||||
vm_page_try_remove_all(vm_page_t m) | |||||
{ | |||||
return (vm_page_try_blocked_op(m, pmap_remove_all)); | |||||
} | |||||
bool | |||||
vm_page_try_remove_write(vm_page_t m) | |||||
{ | |||||
return (vm_page_try_blocked_op(m, pmap_remove_write)); | |||||
} | |||||
/* | /* | ||||
* vm_page_advise | * vm_page_advise | ||||
* | * | ||||
Done Inline ActionsWe would expect a failure of this mechanism to produce a page with additional references after the op. Can you safely assert that this hasn't happened? jeff: We would expect a failure of this mechanism to produce a page with additional references after… | |||||
Done Inline ActionsYes, I can strengthen the assertion below. markj: Yes, I can strengthen the assertion below. | |||||
* Apply the specified advice to the given page. | * Apply the specified advice to the given page. | ||||
* | * | ||||
* The object and page must be locked. | * The object and page must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_advise(vm_page_t m, int advice) | vm_page_advise(vm_page_t m, int advice) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 75 Lines • ▼ Show 20 Lines | if (sleep) { | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
vm_page_busy_sleep(m, "pgrbwt", (allocflags & | vm_page_busy_sleep(m, "pgrbwt", (allocflags & | ||||
VM_ALLOC_IGN_SBUSY) != 0); | VM_ALLOC_IGN_SBUSY) != 0); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
goto retrylookup; | goto retrylookup; | ||||
} else { | } else { | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) { | if ((allocflags & VM_ALLOC_WIRED) != 0) | ||||
vm_page_lock(m); | |||||
vm_page_wire(m); | vm_page_wire(m); | ||||
vm_page_unlock(m); | |||||
} | |||||
if ((allocflags & | if ((allocflags & | ||||
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) | (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0) | ||||
vm_page_xbusy(m); | vm_page_xbusy(m); | ||||
if ((allocflags & VM_ALLOC_SBUSY) != 0) | if ((allocflags & VM_ALLOC_SBUSY) != 0) | ||||
vm_page_sbusy(m); | vm_page_sbusy(m); | ||||
return (m); | return (m); | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | if (m != NULL) { | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
vm_page_busy_sleep(m, "grbmaw", (allocflags & | vm_page_busy_sleep(m, "grbmaw", (allocflags & | ||||
VM_ALLOC_IGN_SBUSY) != 0); | VM_ALLOC_IGN_SBUSY) != 0); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) { | if ((allocflags & VM_ALLOC_WIRED) != 0) | ||||
vm_page_lock(m); | |||||
vm_page_wire(m); | vm_page_wire(m); | ||||
vm_page_unlock(m); | |||||
} | |||||
if ((allocflags & (VM_ALLOC_NOBUSY | | if ((allocflags & (VM_ALLOC_NOBUSY | | ||||
VM_ALLOC_SBUSY)) == 0) | VM_ALLOC_SBUSY)) == 0) | ||||
vm_page_xbusy(m); | vm_page_xbusy(m); | ||||
if ((allocflags & VM_ALLOC_SBUSY) != 0) | if ((allocflags & VM_ALLOC_SBUSY) != 0) | ||||
vm_page_sbusy(m); | vm_page_sbusy(m); | ||||
} else { | } else { | ||||
m = vm_page_alloc_after(object, pindex + i, | m = vm_page_alloc_after(object, pindex + i, | ||||
pflags | VM_ALLOC_COUNT(count - i), mpred); | pflags | VM_ALLOC_COUNT(count - i), mpred); | ||||
▲ Show 20 Lines • Show All 512 Lines • ▼ Show 20 Lines | DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo) | ||||
virt = strchr(modif, 'v') != NULL; | virt = strchr(modif, 'v') != NULL; | ||||
if (virt) | if (virt) | ||||
m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); | m = PHYS_TO_VM_PAGE(pmap_kextract(addr)); | ||||
else if (phys) | else if (phys) | ||||
m = PHYS_TO_VM_PAGE(addr); | m = PHYS_TO_VM_PAGE(addr); | ||||
else | else | ||||
m = (vm_page_t)addr; | m = (vm_page_t)addr; | ||||
db_printf( | db_printf( | ||||
"page %p obj %p pidx 0x%jx phys 0x%jx q %d wire %d\n" | "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref %u\n" | ||||
" af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", | " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n", | ||||
m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, | m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr, | ||||
m->queue, m->wire_count, m->aflags, m->oflags, | m->queue, m->ref_count, m->aflags, m->oflags, | ||||
m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); | m->flags, m->act_count, m->busy_lock, m->valid, m->dirty); | ||||
} | } | ||||
#endif /* DDB */ | #endif /* DDB */ |
I assume the comment is not intentional?