Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_page.c
Show First 20 Lines • Show All 824 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
vm_page_reference(vm_page_t m) | vm_page_reference(vm_page_t m) | ||||
{ | { | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
} | } | ||||
/* | |||||
* vm_page_trybusy | |||||
* | |||||
* Helper routine for grab functions to trylock busy. | |||||
* | |||||
* Returns true on success and false on failure. | |||||
*/ | |||||
static bool | static bool | ||||
vm_page_acquire_flags(vm_page_t m, int allocflags) | vm_page_trybusy(vm_page_t m, int allocflags) | ||||
{ | { | ||||
bool locked; | |||||
if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) | if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) | ||||
locked = vm_page_trysbusy(m); | return (vm_page_trysbusy(m)); | ||||
else | else | ||||
locked = vm_page_tryxbusy(m); | return (vm_page_tryxbusy(m)); | ||||
if (locked && (allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
return (locked); | |||||
} | } | ||||
/* | /* | ||||
* vm_page_busy_sleep_flags | * vm_page_tryacquire | ||||
* | * | ||||
* Sleep for busy according to VM_ALLOC_ parameters. Returns true | * Helper routine for grab functions to trylock busy and wire. | ||||
* if the caller should retry and false otherwise. | * | ||||
* Returns true on success and false on failure. | |||||
*/ | */ | ||||
static bool | static inline bool | ||||
vm_page_busy_sleep_flags(vm_object_t object, vm_page_t m, const char *wmesg, | vm_page_tryacquire(vm_page_t m, int allocflags) | ||||
int allocflags) | |||||
{ | { | ||||
bool locked; | |||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | locked = vm_page_trybusy(m, allocflags); | ||||
return (false); | if (locked && (allocflags & VM_ALLOC_WIRED) != 0) | ||||
vm_page_wire(m); | |||||
/* | return (locked); | ||||
* Reference the page before unlocking and sleeping so that | |||||
* the page daemon is less likely to reclaim it. | |||||
*/ | |||||
if ((allocflags & VM_ALLOC_NOCREAT) == 0) | |||||
vm_page_reference(m); | |||||
if (_vm_page_busy_sleep(object, m, m->pindex, wmesg, allocflags, true)) | |||||
VM_OBJECT_WLOCK(object); | |||||
if ((allocflags & VM_ALLOC_WAITFAIL) != 0) | |||||
return (false); | |||||
return (true); | |||||
} | } | ||||
/* | /* | ||||
* vm_page_busy_acquire: | * vm_page_busy_acquire: | ||||
* | * | ||||
* Acquire the busy lock as described by VM_ALLOC_* flags. Will loop | * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop | ||||
* and drop the object lock if necessary. | * and drop the object lock if necessary. | ||||
*/ | */ | ||||
bool | bool | ||||
vm_page_busy_acquire(vm_page_t m, int allocflags) | vm_page_busy_acquire(vm_page_t m, int allocflags) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
bool locked; | bool locked; | ||||
/* | /* | ||||
* The page-specific object must be cached because page | * The page-specific object must be cached because page | ||||
* identity can change during the sleep, causing the | * identity can change during the sleep, causing the | ||||
* re-lock of a different object. | * re-lock of a different object. | ||||
* It is assumed that a reference to the object is already | * It is assumed that a reference to the object is already | ||||
* held by the callers. | * held by the callers. | ||||
*/ | */ | ||||
obj = m->object; | obj = m->object; | ||||
for (;;) { | for (;;) { | ||||
if (vm_page_acquire_flags(m, allocflags)) | if (vm_page_tryacquire(m, allocflags)) | ||||
return (true); | return (true); | ||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | if ((allocflags & VM_ALLOC_NOWAIT) != 0) | ||||
return (false); | return (false); | ||||
if (obj != NULL) | if (obj != NULL) | ||||
locked = VM_OBJECT_WOWNED(obj); | locked = VM_OBJECT_WOWNED(obj); | ||||
else | else | ||||
locked = false; | locked = false; | ||||
MPASS(locked || vm_page_wired(m)); | MPASS(locked || vm_page_wired(m)); | ||||
▲ Show 20 Lines • Show All 693 Lines • ▼ Show 20 Lines | vm_page_object_remove(vm_page_t m) | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT((m->ref_count & VPRC_OBJREF) != 0, | KASSERT((m->ref_count & VPRC_OBJREF) != 0, | ||||
("page %p is missing its object ref", m)); | ("page %p is missing its object ref", m)); | ||||
/* Deferred free of swap space. */ | /* Deferred free of swap space. */ | ||||
if ((m->a.flags & PGA_SWAP_FREE) != 0) | if ((m->a.flags & PGA_SWAP_FREE) != 0) | ||||
vm_pager_page_unswapped(m); | vm_pager_page_unswapped(m); | ||||
m->object = NULL; | |||||
mrem = vm_radix_remove(&object->rtree, m->pindex); | mrem = vm_radix_remove(&object->rtree, m->pindex); | ||||
KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | ||||
/* | /* | ||||
* Now remove from the object's list of backed pages. | * Now remove from the object's list of backed pages. | ||||
*/ | */ | ||||
TAILQ_REMOVE(&object->memq, m, listq); | TAILQ_REMOVE(&object->memq, m, listq); | ||||
Show All 38 Lines | |||||
* Removes the page but leaves the xbusy held. Returns true if this | * Removes the page but leaves the xbusy held. Returns true if this | ||||
* removed the final ref and false otherwise. | * removed the final ref and false otherwise. | ||||
*/ | */ | ||||
bool | bool | ||||
vm_page_remove_xbusy(vm_page_t m) | vm_page_remove_xbusy(vm_page_t m) | ||||
{ | { | ||||
vm_page_object_remove(m); | vm_page_object_remove(m); | ||||
m->object = NULL; | |||||
return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); | return (vm_page_drop(m, VPRC_OBJREF) == VPRC_OBJREF); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_lookup: | * vm_page_lookup: | ||||
* | * | ||||
* Returns the page associated with the object/offset | * Returns the page associated with the object/offset | ||||
* pair specified; if none is found, NULL is returned. | * pair specified; if none is found, NULL is returned. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_lookup(vm_object_t object, vm_pindex_t pindex) | vm_page_lookup(vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
VM_OBJECT_ASSERT_LOCKED(object); | VM_OBJECT_ASSERT_LOCKED(object); | ||||
return (vm_radix_lookup(&object->rtree, pindex)); | return (vm_radix_lookup(&object->rtree, pindex)); | ||||
} | } | ||||
/* | /* | ||||
* This should only be used by lockless functions for releasing transient | |||||
* incorrect acquires. The page may have been freed after we acquired a | |||||
* busy lock. In this case busy_lock == VPB_FREED and we have nothing | |||||
* further to do. | |||||
*/ | |||||
static void | |||||
vm_page_busy_release(vm_page_t m) | |||||
{ | |||||
u_int x; | |||||
x = atomic_load_int(&m->busy_lock); | |||||
for (;;) { | |||||
if (x == VPB_FREED) | |||||
break; | |||||
if ((x & VPB_BIT_SHARED) != 0 && VPB_SHARERS(x) > 1) { | |||||
if (atomic_fcmpset_int(&m->busy_lock, &x, | |||||
x - VPB_ONE_SHARER)) | |||||
break; | |||||
continue; | |||||
} | |||||
KASSERT((x & VPB_BIT_SHARED) != 0 || | |||||
(x & ~VPB_BIT_WAITERS) == VPB_CURTHREAD_EXCLUSIVE, | |||||
("vm_page_busy_release: %p xbusy not owned.", m)); | |||||
if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) | |||||
continue; | |||||
if ((x & VPB_BIT_WAITERS) != 0) | |||||
wakeup(m); | |||||
break; | |||||
} | |||||
} | |||||
/* | |||||
* vm_page_find_least: | * vm_page_find_least: | ||||
* | * | ||||
* Returns the page associated with the object with least pindex | * Returns the page associated with the object with least pindex | ||||
* greater than or equal to the parameter pindex, or NULL. | * greater than or equal to the parameter pindex, or NULL. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
▲ Show 20 Lines • Show All 1,993 Lines • ▼ Show 20 Lines | if (m->object != NULL) { | ||||
* The object reference can be released without an atomic | * The object reference can be released without an atomic | ||||
* operation. | * operation. | ||||
*/ | */ | ||||
KASSERT((m->flags & PG_FICTITIOUS) != 0 || | KASSERT((m->flags & PG_FICTITIOUS) != 0 || | ||||
m->ref_count == VPRC_OBJREF, | m->ref_count == VPRC_OBJREF, | ||||
("vm_page_free_prep: page %p has unexpected ref_count %u", | ("vm_page_free_prep: page %p has unexpected ref_count %u", | ||||
m, m->ref_count)); | m, m->ref_count)); | ||||
vm_page_object_remove(m); | vm_page_object_remove(m); | ||||
m->object = NULL; | |||||
m->ref_count -= VPRC_OBJREF; | m->ref_count -= VPRC_OBJREF; | ||||
} else | } else | ||||
vm_page_assert_unbusied(m); | vm_page_assert_unbusied(m); | ||||
vm_page_busy_free(m); | vm_page_busy_free(m); | ||||
/* | /* | ||||
* If fictitious remove object association and | * If fictitious remove object association and | ||||
▲ Show 20 Lines • Show All 540 Lines • ▼ Show 20 Lines | vm_page_advise(vm_page_t m, int advice) | ||||
* laundry are moved there. | * laundry are moved there. | ||||
*/ | */ | ||||
if (m->dirty == 0) | if (m->dirty == 0) | ||||
vm_page_deactivate_noreuse(m); | vm_page_deactivate_noreuse(m); | ||||
else if (!vm_page_in_laundry(m)) | else if (!vm_page_in_laundry(m)) | ||||
vm_page_launder(m); | vm_page_launder(m); | ||||
} | } | ||||
static inline int | /* | ||||
vm_page_grab_pflags(int allocflags) | * vm_page_grab_release | ||||
* | |||||
* Helper routine for grab functions to release busy on return. | |||||
*/ | |||||
static inline void | |||||
vm_page_grab_release(vm_page_t m, int allocflags) | |||||
{ | { | ||||
int pflags; | |||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | |||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | |||||
vm_page_sunbusy(m); | |||||
else | |||||
vm_page_xunbusy(m); | |||||
} | |||||
} | |||||
/* | |||||
* vm_page_grab_sleep | |||||
* | |||||
* Sleep for busy according to VM_ALLOC_ parameters. Returns true | |||||
* if the caller should retry and false otherwise. | |||||
* | |||||
* If the object is locked on entry the object will be unlocked with | |||||
* false returns and still locked but possibly having been dropped | |||||
* with true returns. | |||||
*/ | |||||
static bool | |||||
vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, | |||||
const char *wmesg, int allocflags, bool locked) | |||||
{ | |||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | |||||
return (false); | |||||
/* | |||||
* Reference the page before unlocking and sleeping so that | |||||
* the page daemon is less likely to reclaim it. | |||||
*/ | |||||
if (locked && (allocflags & VM_ALLOC_NOCREAT) == 0) | |||||
vm_page_reference(m); | |||||
if (_vm_page_busy_sleep(object, m, m->pindex, wmesg, allocflags, | |||||
locked) && locked) | |||||
VM_OBJECT_WLOCK(object); | |||||
if ((allocflags & VM_ALLOC_WAITFAIL) != 0) | |||||
return (false); | |||||
return (true); | |||||
} | |||||
/* | |||||
* Assert that the grab flags are valid. | |||||
*/ | |||||
static inline void | |||||
vm_page_grab_check(int allocflags) | |||||
{ | |||||
KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || | KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 || | ||||
(allocflags & VM_ALLOC_WIRED) != 0, | (allocflags & VM_ALLOC_WIRED) != 0, | ||||
("vm_page_grab_pflags: the pages must be busied or wired")); | ("vm_page_grab*: the pages must be busied or wired")); | ||||
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | ||||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0, | (allocflags & VM_ALLOC_IGN_SBUSY) != 0, | ||||
("vm_page_grab_pflags: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " | ("vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); | ||||
"mismatch")); | } | ||||
/* | |||||
* Calculate the page allocation flags for grab. | |||||
*/ | |||||
static inline int | |||||
vm_page_grab_pflags(int allocflags) | |||||
{ | |||||
int pflags; | |||||
pflags = allocflags & | pflags = allocflags & | ||||
~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | | ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | | ||||
VM_ALLOC_NOBUSY); | VM_ALLOC_NOBUSY); | ||||
if ((allocflags & VM_ALLOC_NOWAIT) == 0) | if ((allocflags & VM_ALLOC_NOWAIT) == 0) | ||||
pflags |= VM_ALLOC_WAITFAIL; | pflags |= VM_ALLOC_WAITFAIL; | ||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | ||||
pflags |= VM_ALLOC_SBUSY; | pflags |= VM_ALLOC_SBUSY; | ||||
Show All 10 Lines | |||||
* | * | ||||
* The object must be locked on entry. The lock will, however, be released | * The object must be locked on entry. The lock will, however, be released | ||||
* and reacquired if the routine sleeps. | * and reacquired if the routine sleeps. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) | vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
int pflags; | |||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
pflags = vm_page_grab_pflags(allocflags); | vm_page_grab_check(allocflags); | ||||
retrylookup: | retrylookup: | ||||
if ((m = vm_page_lookup(object, pindex)) != NULL) { | if ((m = vm_page_lookup(object, pindex)) != NULL) { | ||||
if (!vm_page_acquire_flags(m, allocflags)) { | if (!vm_page_tryacquire(m, allocflags)) { | ||||
if (vm_page_busy_sleep_flags(object, m, "pgrbwt", | if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", | ||||
allocflags)) | allocflags, true)) | ||||
goto retrylookup; | goto retrylookup; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
goto out; | goto out; | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | if ((allocflags & VM_ALLOC_NOCREAT) != 0) | ||||
return (NULL); | return (NULL); | ||||
m = vm_page_alloc(object, pindex, pflags); | m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) | if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) | ||||
return (NULL); | return (NULL); | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) | if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(m); | pmap_zero_page(m); | ||||
out: | out: | ||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | vm_page_grab_release(m, allocflags); | ||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | |||||
vm_page_sunbusy(m); | return (m); | ||||
else | |||||
vm_page_xunbusy(m); | |||||
} | } | ||||
/* | |||||
* Locklessly attempt to acquire a page given a (object, pindex) tuple | |||||
* and an optional previous page to avoid the radix lookup. The resulting | |||||
* page will be validated against the identity tuple and busied or wired | |||||
* as requested. A NULL *mp return guarantees that the page was not in | |||||
* radix at the time of the call but callers must perform higher level | |||||
* synchronization or retry the operation under a lock if they require | |||||
* an atomic answer. This is the only lock free validation routine, | |||||
* other routines can depend on the resulting page state. | |||||
* | |||||
* The return value indicates whether the operation failed due to caller | |||||
* flags. The return is tri-state with mp: | |||||
* | |||||
* (true, *mp != NULL) - The operation was successful. | |||||
* (true, *mp == NULL) - The page was not found in tree. | |||||
* (false, *mp == NULL) - WAITFAIL or NOWAIT prevented acquisition. | |||||
*/ | |||||
static bool | |||||
vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, | |||||
vm_page_t prev, vm_page_t *mp, int allocflags) | |||||
{ | |||||
vm_page_t m; | |||||
vm_page_grab_check(allocflags); | |||||
MPASS(prev == NULL || vm_page_busied(prev) || vm_page_wired(prev)); | |||||
*mp = NULL; | |||||
for (;;) { | |||||
/* | |||||
* We may see a false NULL here because the previous page | |||||
* has been removed or just inserted and the list is loaded | |||||
* without barriers. Switch to radix to verify. | |||||
*/ | |||||
if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL || | |||||
m->pindex != pindex || | |||||
atomic_load_ptr(&m->object) != object) { | |||||
prev = NULL; | |||||
/* | |||||
* This guarantees the result is instantaneously | |||||
* correct. | |||||
*/ | |||||
m = vm_radix_lookup_unlocked(&object->rtree, pindex); | |||||
} | |||||
if (m == NULL) | |||||
return (true); | |||||
if (vm_page_trybusy(m, allocflags)) { | |||||
if (m->object == object && m->pindex == pindex) | |||||
break; | |||||
/* relookup. */ | |||||
vm_page_busy_release(m); | |||||
cpu_spinwait(); | |||||
continue; | |||||
} | |||||
if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", | |||||
allocflags, false)) | |||||
return (false); | |||||
} | |||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
vm_page_grab_release(m, allocflags); | |||||
*mp = m; | |||||
return (true); | |||||
} | |||||
/* | |||||
* Try to locklessly grab a page and fall back to the object lock if NOCREAT | |||||
* is not set. | |||||
*/ | |||||
vm_page_t | |||||
vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags) | |||||
{ | |||||
vm_page_t m; | |||||
vm_page_grab_check(allocflags); | |||||
if (!vm_page_acquire_unlocked(object, pindex, NULL, &m, allocflags)) | |||||
return (NULL); | |||||
if (m != NULL) | |||||
return (m); | return (m); | ||||
/* | |||||
* The radix lockless lookup should never return a false negative | |||||
* errors. If the user specifies NOCREAT they are guaranteed there | |||||
* was no page present at the instant of the call. A NOCREAT caller | |||||
* must handle create races gracefully. | |||||
*/ | |||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | |||||
return (NULL); | |||||
VM_OBJECT_WLOCK(object); | |||||
m = vm_page_grab(object, pindex, allocflags); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
return (m); | |||||
} | } | ||||
/* | /* | ||||
* Grab a page and make it valid, paging in if necessary. Pages missing from | * Grab a page and make it valid, paging in if necessary. Pages missing from | ||||
* their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied | * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied | ||||
* and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought | * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought | ||||
* in simultaneously. Additional pages will be left on a paging queue but | * in simultaneously. Additional pages will be left on a paging queue but | ||||
* will neither be wired nor busy regardless of allocflags. | * will neither be wired nor busy regardless of allocflags. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) | vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_page_t ma[VM_INITIAL_PAGEIN]; | vm_page_t ma[VM_INITIAL_PAGEIN]; | ||||
bool sleep, xbusy; | |||||
int after, i, pflags, rv; | int after, i, pflags, rv; | ||||
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | ||||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0, | (allocflags & VM_ALLOC_IGN_SBUSY) != 0, | ||||
("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); | ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); | ||||
KASSERT((allocflags & | KASSERT((allocflags & | ||||
(VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, | (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, | ||||
("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); | ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY); | pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY); | ||||
pflags |= VM_ALLOC_WAITFAIL; | pflags |= VM_ALLOC_WAITFAIL; | ||||
retrylookup: | retrylookup: | ||||
xbusy = false; | |||||
if ((m = vm_page_lookup(object, pindex)) != NULL) { | if ((m = vm_page_lookup(object, pindex)) != NULL) { | ||||
/* | /* | ||||
* If the page is fully valid it can only become invalid | * If the page is fully valid it can only become invalid | ||||
* with the object lock held. If it is not valid it can | * with the object lock held. If it is not valid it can | ||||
* become valid with the busy lock held. Therefore, we | * become valid with the busy lock held. Therefore, we | ||||
* may unnecessarily lock the exclusive busy here if we | * may unnecessarily lock the exclusive busy here if we | ||||
* race with I/O completion not using the object lock. | * race with I/O completion not using the object lock. | ||||
* However, we will not end up with an invalid page and a | * However, we will not end up with an invalid page and a | ||||
* shared lock. | * shared lock. | ||||
*/ | */ | ||||
if (!vm_page_all_valid(m) || | if (!vm_page_trybusy(m, | ||||
(allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) { | vm_page_all_valid(m) ? allocflags : 0)) { | ||||
sleep = !vm_page_tryxbusy(m); | (void)vm_page_grab_sleep(object, m, pindex, "pgrbwt", | ||||
xbusy = true; | allocflags, true); | ||||
} else | |||||
sleep = !vm_page_trysbusy(m); | |||||
if (sleep) { | |||||
(void)vm_page_busy_sleep_flags(object, m, "pgrbwt", | |||||
allocflags); | |||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0 && | if (vm_page_all_valid(m)) | ||||
!vm_page_all_valid(m)) { | goto out; | ||||
if (xbusy) | if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | ||||
vm_page_xunbusy(m); | vm_page_busy_release(m); | ||||
else | |||||
vm_page_sunbusy(m); | |||||
*mp = NULL; | *mp = NULL; | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
if (vm_page_all_valid(m)) | |||||
goto out; | |||||
} else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | ||||
*mp = NULL; | *mp = NULL; | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
} else if ((m = vm_page_alloc(object, pindex, pflags)) != NULL) { | } else if ((m = vm_page_alloc(object, pindex, pflags)) == NULL) { | ||||
xbusy = true; | |||||
} else { | |||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
vm_page_assert_xbusied(m); | vm_page_assert_xbusied(m); | ||||
MPASS(xbusy); | |||||
if (vm_pager_has_page(object, pindex, NULL, &after)) { | if (vm_pager_has_page(object, pindex, NULL, &after)) { | ||||
after = MIN(after, VM_INITIAL_PAGEIN); | after = MIN(after, VM_INITIAL_PAGEIN); | ||||
after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); | after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT); | ||||
after = MAX(after, 1); | after = MAX(after, 1); | ||||
ma[0] = m; | ma[0] = m; | ||||
for (i = 1; i < after; i++) { | for (i = 1; i < after; i++) { | ||||
if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { | if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { | ||||
if (ma[i]->valid || !vm_page_tryxbusy(ma[i])) | if (ma[i]->valid || !vm_page_tryxbusy(ma[i])) | ||||
Show All 9 Lines | if (vm_pager_has_page(object, pindex, NULL, &after)) { | ||||
vm_object_pip_add(object, after); | vm_object_pip_add(object, after); | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
rv = vm_pager_get_pages(object, ma, after, NULL, NULL); | rv = vm_pager_get_pages(object, ma, after, NULL, NULL); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
vm_object_pip_wakeupn(object, after); | vm_object_pip_wakeupn(object, after); | ||||
/* Pager may have replaced a page. */ | /* Pager may have replaced a page. */ | ||||
m = ma[0]; | m = ma[0]; | ||||
if (rv != VM_PAGER_OK) { | if (rv != VM_PAGER_OK) { | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_unwire_noq(m); | |||||
for (i = 0; i < after; i++) { | for (i = 0; i < after; i++) { | ||||
if (!vm_page_wired(ma[i])) | if (!vm_page_wired(ma[i])) | ||||
vm_page_free(ma[i]); | vm_page_free(ma[i]); | ||||
else | else | ||||
vm_page_xunbusy(ma[i]); | vm_page_xunbusy(ma[i]); | ||||
} | } | ||||
*mp = NULL; | *mp = NULL; | ||||
return (rv); | return (rv); | ||||
} | } | ||||
for (i = 1; i < after; i++) | for (i = 1; i < after; i++) | ||||
vm_page_readahead_finish(ma[i]); | vm_page_readahead_finish(ma[i]); | ||||
MPASS(vm_page_all_valid(m)); | MPASS(vm_page_all_valid(m)); | ||||
} else { | } else { | ||||
vm_page_zero_invalid(m, TRUE); | vm_page_zero_invalid(m, TRUE); | ||||
} | } | ||||
out: | out: | ||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | if ((allocflags & VM_ALLOC_WIRED) != 0) | ||||
if (xbusy) | vm_page_wire(m); | ||||
vm_page_xunbusy(m); | if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) | ||||
else | |||||
vm_page_sunbusy(m); | |||||
} | |||||
if ((allocflags & VM_ALLOC_SBUSY) != 0 && xbusy) | |||||
vm_page_busy_downgrade(m); | vm_page_busy_downgrade(m); | ||||
else if ((allocflags & VM_ALLOC_NOBUSY) != 0) | |||||
vm_page_busy_release(m); | |||||
*mp = m; | *mp = m; | ||||
return (VM_PAGER_OK); | return (VM_PAGER_OK); | ||||
} | } | ||||
/* | /* | ||||
* Locklessly grab a valid page. If the page is not valid or not yet | |||||
* allocated this will fall back to the object lock method. | |||||
*/ | |||||
int | |||||
vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, | |||||
vm_pindex_t pindex, int allocflags) | |||||
{ | |||||
vm_page_t m; | |||||
int flags; | |||||
int error; | |||||
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | |||||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0, | |||||
("vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY " | |||||
"mismatch")); | |||||
KASSERT((allocflags & | |||||
(VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, | |||||
("vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags)); | |||||
/* | |||||
* Attempt a lockless lookup and busy. We need at least an sbusy | |||||
* before we can inspect the valid field and return a wired page. | |||||
*/ | |||||
flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); | |||||
if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) | |||||
return (VM_PAGER_FAIL); | |||||
if ((m = *mp) != NULL) { | |||||
if (vm_page_all_valid(m)) { | |||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
vm_page_grab_release(m, allocflags); | |||||
return (VM_PAGER_OK); | |||||
} | |||||
vm_page_busy_release(m); | |||||
} | |||||
VM_OBJECT_WLOCK(object); | |||||
error = vm_page_grab_valid(mp, object, pindex, allocflags); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
return (error); | |||||
} | |||||
/* | |||||
* Return the specified range of pages from the given object. For each | * Return the specified range of pages from the given object. For each | ||||
* page offset within the range, if a page already exists within the object | * page offset within the range, if a page already exists within the object | ||||
* at that offset and it is busy, then wait for it to change state. If, | * at that offset and it is busy, then wait for it to change state. If, | ||||
* instead, the page doesn't exist, then allocate it. | * instead, the page doesn't exist, then allocate it. | ||||
* | * | ||||
* The caller must always specify an allocation class. | * The caller must always specify an allocation class. | ||||
* | * | ||||
* allocation classes: | * allocation classes: | ||||
Show All 20 Lines | |||||
{ | { | ||||
vm_page_t m, mpred; | vm_page_t m, mpred; | ||||
int pflags; | int pflags; | ||||
int i; | int i; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, | KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0, | ||||
("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); | ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed")); | ||||
vm_page_grab_check(allocflags); | |||||
pflags = vm_page_grab_pflags(allocflags); | pflags = vm_page_grab_pflags(allocflags); | ||||
if (count == 0) | if (count == 0) | ||||
return (0); | return (0); | ||||
i = 0; | i = 0; | ||||
retrylookup: | retrylookup: | ||||
m = vm_radix_lookup_le(&object->rtree, pindex + i); | m = vm_radix_lookup_le(&object->rtree, pindex + i); | ||||
if (m == NULL || m->pindex != pindex + i) { | if (m == NULL || m->pindex != pindex + i) { | ||||
mpred = m; | mpred = m; | ||||
m = NULL; | m = NULL; | ||||
} else | } else | ||||
mpred = TAILQ_PREV(m, pglist, listq); | mpred = TAILQ_PREV(m, pglist, listq); | ||||
for (; i < count; i++) { | for (; i < count; i++) { | ||||
if (m != NULL) { | if (m != NULL) { | ||||
if (!vm_page_acquire_flags(m, allocflags)) { | if (!vm_page_tryacquire(m, allocflags)) { | ||||
if (vm_page_busy_sleep_flags(object, m, | if (vm_page_grab_sleep(object, m, pindex, | ||||
"grbmaw", allocflags)) | "grbmaw", allocflags, true)) | ||||
goto retrylookup; | goto retrylookup; | ||||
break; | break; | ||||
} | } | ||||
} else { | } else { | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | if ((allocflags & VM_ALLOC_NOCREAT) != 0) | ||||
break; | break; | ||||
m = vm_page_alloc_after(object, pindex + i, | m = vm_page_alloc_after(object, pindex + i, | ||||
pflags | VM_ALLOC_COUNT(count - i), mpred); | pflags | VM_ALLOC_COUNT(count - i), mpred); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if ((allocflags & (VM_ALLOC_NOWAIT | | if ((allocflags & (VM_ALLOC_NOWAIT | | ||||
VM_ALLOC_WAITFAIL)) != 0) | VM_ALLOC_WAITFAIL)) != 0) | ||||
break; | break; | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
} | } | ||||
if (vm_page_none_valid(m) && | if (vm_page_none_valid(m) && | ||||
(allocflags & VM_ALLOC_ZERO) != 0) { | (allocflags & VM_ALLOC_ZERO) != 0) { | ||||
if ((m->flags & PG_ZERO) == 0) | if ((m->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(m); | pmap_zero_page(m); | ||||
vm_page_valid(m); | vm_page_valid(m); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | vm_page_grab_release(m, allocflags); | ||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | |||||
vm_page_sunbusy(m); | |||||
else | |||||
vm_page_xunbusy(m); | |||||
} | |||||
ma[i] = mpred = m; | ma[i] = mpred = m; | ||||
m = vm_page_next(m); | m = vm_page_next(m); | ||||
} | } | ||||
return (i); | |||||
} | |||||
/* | |||||
* Unlocked variant of vm_page_grab_pages(). This accepts the same flags | |||||
* and will fall back to the locked variant to handle allocation. | |||||
*/ | |||||
int | |||||
vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, | |||||
int allocflags, vm_page_t *ma, int count) | |||||
{ | |||||
vm_page_t m, pred; | |||||
int flags; | |||||
int i; | |||||
vm_page_grab_check(allocflags); | |||||
/* | |||||
* Modify flags for lockless acquire to hold the page until we | |||||
* set it valid if necessary. | |||||
*/ | |||||
flags = allocflags & ~VM_ALLOC_NOBUSY; | |||||
pred = NULL; | |||||
for (i = 0; i < count; i++, pindex++) { | |||||
if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) | |||||
return (i); | |||||
if (m == NULL) | |||||
break; | |||||
if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { | |||||
if ((m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
vm_page_valid(m); | |||||
} | |||||
/* m will still be wired or busy according to flags. */ | |||||
vm_page_grab_release(m, allocflags); | |||||
pred = ma[i] = m; | |||||
} | |||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | |||||
return (i); | |||||
count -= i; | |||||
VM_OBJECT_WLOCK(object); | |||||
i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
return (i); | return (i); | ||||
} | } | ||||
/* | /* | ||||
* Mapping function for valid or dirty bits in a page. | * Mapping function for valid or dirty bits in a page. | ||||
* | * | ||||
* Inputs are required to range within a page. | * Inputs are required to range within a page. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 668 Lines • Show Last 20 Lines |