Changeset View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 78 Lines • ▼ Show 20 Lines | |||||
#include <sys/limits.h> | #include <sys/limits.h> | ||||
#include <sys/linker.h> | #include <sys/linker.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/mman.h> | #include <sys/mman.h> | ||||
#include <sys/msgbuf.h> | #include <sys/msgbuf.h> | ||||
#include <sys/mutex.h> | #include <sys/mutex.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/rwlock.h> | #include <sys/rwlock.h> | ||||
#include <sys/sleepqueue.h> | |||||
#include <sys/sbuf.h> | #include <sys/sbuf.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/vnode.h> | #include <sys/vnode.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
▲ Show 20 Lines • Show All 776 Lines • ▼ Show 20 Lines | |||||
* vm_page_busy_downgrade: | * vm_page_busy_downgrade: | ||||
* | * | ||||
* Downgrade an exclusive busy page into a single shared busy page. | * Downgrade an exclusive busy page into a single shared busy page. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_busy_downgrade(vm_page_t m) | vm_page_busy_downgrade(vm_page_t m) | ||||
{ | { | ||||
u_int x; | u_int x; | ||||
bool locked; | |||||
vm_page_assert_xbusied(m); | vm_page_assert_xbusied(m); | ||||
locked = mtx_owned(vm_page_lockptr(m)); | |||||
for (;;) { | |||||
x = m->busy_lock; | x = m->busy_lock; | ||||
x &= VPB_BIT_WAITERS; | for (;;) { | ||||
if (x != 0 && !locked) | if (atomic_fcmpset_rel_int(&m->busy_lock, | ||||
vm_page_lock(m); | &x, VPB_SHARERS_WORD(1))) | ||||
if (atomic_cmpset_rel_int(&m->busy_lock, | |||||
VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1))) | |||||
break; | break; | ||||
if (x != 0 && !locked) | |||||
vm_page_unlock(m); | |||||
} | } | ||||
if (x != 0) { | if ((x & VPB_BIT_WAITERS) != 0) | ||||
wakeup(m); | wakeup(m); | ||||
if (!locked) | |||||
vm_page_unlock(m); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* vm_page_sbusied: | * vm_page_sbusied: | ||||
* | * | ||||
* Return a positive value if the page is shared busied, 0 otherwise. | * Return a positive value if the page is shared busied, 0 otherwise. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_sbusied(vm_page_t m) | vm_page_sbusied(vm_page_t m) | ||||
Show All 9 Lines | |||||
* | * | ||||
* Shared unbusy a page. | * Shared unbusy a page. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_sunbusy(vm_page_t m) | vm_page_sunbusy(vm_page_t m) | ||||
{ | { | ||||
u_int x; | u_int x; | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | |||||
vm_page_assert_sbusied(m); | vm_page_assert_sbusied(m); | ||||
for (;;) { | |||||
x = m->busy_lock; | x = m->busy_lock; | ||||
for (;;) { | |||||
if (VPB_SHARERS(x) > 1) { | if (VPB_SHARERS(x) > 1) { | ||||
if (atomic_cmpset_int(&m->busy_lock, x, | if (atomic_fcmpset_int(&m->busy_lock, &x, | ||||
x - VPB_ONE_SHARER)) | x - VPB_ONE_SHARER)) | ||||
break; | break; | ||||
continue; | continue; | ||||
} | } | ||||
if ((x & VPB_BIT_WAITERS) == 0) { | KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), | ||||
KASSERT(x == VPB_SHARERS_WORD(1), | |||||
("vm_page_sunbusy: invalid lock state")); | ("vm_page_sunbusy: invalid lock state")); | ||||
if (atomic_cmpset_int(&m->busy_lock, | if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED)) | ||||
kib: I wonder if this needs atomc_cmpset_int_rel. Before, the vm_page_unlock() provided the release… | |||||
VPB_SHARERS_WORD(1), VPB_UNBUSIED)) | |||||
break; | |||||
continue; | continue; | ||||
} | if ((x & VPB_BIT_WAITERS) == 0) | ||||
KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), | break; | ||||
("vm_page_sunbusy: invalid lock state for waiters")); | |||||
vm_page_lock(m); | |||||
if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { | |||||
vm_page_unlock(m); | |||||
continue; | |||||
} | |||||
wakeup(m); | wakeup(m); | ||||
vm_page_unlock(m); | |||||
break; | break; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* vm_page_busy_sleep: | * vm_page_busy_sleep: | ||||
* | * | ||||
* Sleep and release the page lock, using the page pointer as wchan. | * Sleep if the page is busy, using the page pointer as wchan. | ||||
* This is used to implement the hard-path of busying mechanism. | * This is used to implement the hard-path of busying mechanism. | ||||
* | * | ||||
* The given page must be locked. | |||||
* | |||||
* If nonshared is true, sleep only if the page is xbusy. | * If nonshared is true, sleep only if the page is xbusy. | ||||
* | |||||
* The object lock must be held on entry and will be released on exit. | |||||
*/ | */ | ||||
void | void | ||||
vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) | vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) | ||||
{ | { | ||||
vm_object_t obj; | |||||
u_int x; | u_int x; | ||||
vm_page_assert_locked(m); | obj = m->object; | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | |||||
VM_OBJECT_ASSERT_LOCKED(obj); | |||||
sleepq_lock(m); | |||||
x = m->busy_lock; | x = m->busy_lock; | ||||
if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || | if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || | ||||
((x & VPB_BIT_WAITERS) == 0 && | ((x & VPB_BIT_WAITERS) == 0 && | ||||
!atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { | !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { | ||||
vm_page_unlock(m); | VM_OBJECT_DROP(obj); | ||||
Not Done Inline ActionsThen assert that the object ref count is > 0 ? I would prefer to convert the nonshared argument to flags, and add two flags, one for nonshared, another for obj_locked, instead of using VM_OBJECT_WOWNED(). kib: Then assert that the object ref count is > 0 ?
I would prefer to convert the nonshared… | |||||
Done Inline ActionsI copied the comment from elsewhere. I'm not comfortable enough with all callers to add a new assert. This may be used in the object tear-down path for example. We rely on object type stability elsewhere. I thought about the flag. It is a tradeoff. With a flag you are more explicit and so less likely to have bugs where the caller doesn't realize the object lock is dropped. However, you're then subject to bugs where the flag was just specified wrong. With the silent operation you won't have bugs with releasing/re-acquiring the lock but the caller may not anticipate losing synchronization. If you follow all of my patch sets you will see that ultimately there are few cases that are called with any lock held so I settled on this method. jeff: I copied the comment from elsewhere. I'm not comfortable enough with all callers to add a new… | |||||
sleepq_release(m); | |||||
return; | return; | ||||
} | } | ||||
msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); | VM_OBJECT_DROP(obj); | ||||
sleepq_add(m, NULL, wmesg, 0, 0); | |||||
Not Done Inline ActionsIf the page lock isn't held and the object lock isn't held, what prevents m from being freed after the call to vm_page_busy_sleep() and before the dereference of m->object? markj: If the page lock isn't held and the object lock isn't held, what prevents `m` from being freed… | |||||
Done Inline ActionsNothing prevents it. I believe we handle the race gracefully. m->object could become NULL at any point after we drop the object lock. However, this means the busy holder is removing the page and will unbusy at some point, waking us up, or avoiding the sleep below. All vm_page_busy_sleep callers are basically doing what we call "WAITFAIL" elsewhere. They don't return with the busy lock held. They restart compound operations and look for the correct page again. It is possible that the page could be freed, reallocated, and we sleep for the wrong thing. However, I believe this is harmless and rare. It would be possible to treat the object lock as an interlock here with a slightly more complicated and expensive sleepq operation. Cases that don't hold the object lock are protected by a wire count. jeff: Nothing prevents it. I believe we handle the race gracefully.
m->object could become NULL at… | |||||
Not Done Inline ActionsThe wiring doesn't prevent the page from being removed from its object, in which case m->object will become NULL. See vm_object_terminate_pages() for instance. markj: The wiring doesn't prevent the page from being removed from its object, in which case m->object… | |||||
Done Inline ActionsYes I understand but it does prevent the page from being reallocated to some other purpose. So there is no chance of a spurious sleep. jeff: Yes I understand but it does prevent the page from being reallocated to some other purpose. So… | |||||
Not Done Inline ActionsWe still need to check that obj is non-NULL before dereferencing it. markj: We still need to check that `obj` is non-NULL before dereferencing it. | |||||
sleepq_wait(m, PVM); | |||||
} | } | ||||
/* | /* | ||||
* vm_page_trysbusy: | * vm_page_trysbusy: | ||||
* | * | ||||
* Try to shared busy a page. | * Try to shared busy a page. | ||||
* If the operation succeeds 1 is returned otherwise 0. | * If the operation succeeds 1 is returned otherwise 0. | ||||
* The operation never sleeps. | * The operation never sleeps. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_trysbusy(vm_page_t m) | vm_page_trysbusy(vm_page_t m) | ||||
{ | { | ||||
u_int x; | u_int x; | ||||
for (;;) { | |||||
x = m->busy_lock; | x = m->busy_lock; | ||||
for (;;) { | |||||
if ((x & VPB_BIT_SHARED) == 0) | if ((x & VPB_BIT_SHARED) == 0) | ||||
return (0); | return (0); | ||||
if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER)) | if (atomic_fcmpset_acq_int(&m->busy_lock, &x, | ||||
x + VPB_ONE_SHARER)) | |||||
return (1); | return (1); | ||||
} | } | ||||
} | } | ||||
static void | |||||
vm_page_xunbusy_locked(vm_page_t m) | |||||
{ | |||||
vm_page_assert_xbusied(m); | |||||
vm_page_assert_locked(m); | |||||
atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); | |||||
/* There is a waiter, do wakeup() instead of vm_page_flash(). */ | |||||
wakeup(m); | |||||
} | |||||
void | |||||
vm_page_xunbusy_maybelocked(vm_page_t m) | |||||
{ | |||||
bool lockacq; | |||||
vm_page_assert_xbusied(m); | |||||
/* | /* | ||||
* Fast path for unbusy. If it succeeds, we know that there | |||||
* are no waiters, so we do not need a wakeup. | |||||
*/ | |||||
if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER, | |||||
VPB_UNBUSIED)) | |||||
return; | |||||
lockacq = !mtx_owned(vm_page_lockptr(m)); | |||||
if (lockacq) | |||||
vm_page_lock(m); | |||||
vm_page_xunbusy_locked(m); | |||||
if (lockacq) | |||||
vm_page_unlock(m); | |||||
} | |||||
/* | |||||
* vm_page_xunbusy_hard: | * vm_page_xunbusy_hard: | ||||
* | * | ||||
* Called after the first try the exclusive unbusy of a page failed. | * Called when unbusy has failed because there is a waiter. | ||||
* It is assumed that the waiters bit is on. | |||||
*/ | */ | ||||
void | void | ||||
vm_page_xunbusy_hard(vm_page_t m) | vm_page_xunbusy_hard(vm_page_t m) | ||||
{ | { | ||||
vm_page_assert_xbusied(m); | vm_page_assert_xbusied(m); | ||||
vm_page_lock(m); | |||||
vm_page_xunbusy_locked(m); | |||||
vm_page_unlock(m); | |||||
} | |||||
/* | /* | ||||
* vm_page_flash: | * Wake the waiter. | ||||
* | |||||
* Wakeup anyone waiting for the page. | |||||
* The ownership bits do not change. | |||||
* | |||||
* The given page must be locked. | |||||
*/ | */ | ||||
void | atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); | ||||
vm_page_flash(vm_page_t m) | |||||
{ | |||||
u_int x; | |||||
vm_page_lock_assert(m, MA_OWNED); | |||||
for (;;) { | |||||
x = m->busy_lock; | |||||
if ((x & VPB_BIT_WAITERS) == 0) | |||||
return; | |||||
if (atomic_cmpset_int(&m->busy_lock, x, | |||||
x & (~VPB_BIT_WAITERS))) | |||||
break; | |||||
} | |||||
wakeup(m); | wakeup(m); | ||||
} | } | ||||
/* | /* | ||||
* Avoid releasing and reacquiring the same page lock. | * Avoid releasing and reacquiring the same page lock. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_change_lock(vm_page_t m, struct mtx **mtx) | vm_page_change_lock(vm_page_t m, struct mtx **mtx) | ||||
▲ Show 20 Lines • Show All 177 Lines • ▼ Show 20 Lines | else | ||||
vm_page_deactivate(m); | vm_page_deactivate(m); | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_sleep_if_busy: | * vm_page_sleep_if_busy: | ||||
* | * | ||||
* Sleep and release the page queues lock if the page is busied. | * Sleep and release the object lock if the page is busied. | ||||
* Returns TRUE if the thread slept. | * Returns TRUE if the thread slept. | ||||
* | * | ||||
* The given page must be unlocked and object containing it must | * The given page must be unlocked and object containing it must | ||||
* be locked. | * be locked. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_sleep_if_busy(vm_page_t m, const char *msg) | vm_page_sleep_if_busy(vm_page_t m, const char *msg) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | vm_page_lock_assert(m, MA_NOTOWNED); | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(m->object); | ||||
if (vm_page_busied(m)) { | if (vm_page_busied(m)) { | ||||
/* | /* | ||||
* The page-specific object must be cached because page | * The page-specific object must be cached because page | ||||
* identity can change during the sleep, causing the | * identity can change during the sleep, causing the | ||||
* re-lock of a different object. | * re-lock of a different object. | ||||
* It is assumed that a reference to the object is already | * It is assumed that a reference to the object is already | ||||
* held by the callers. | * held by the callers. | ||||
*/ | */ | ||||
obj = m->object; | obj = m->object; | ||||
vm_page_lock(m); | |||||
VM_OBJECT_WUNLOCK(obj); | |||||
vm_page_busy_sleep(m, msg, false); | vm_page_busy_sleep(m, msg, false); | ||||
VM_OBJECT_WLOCK(obj); | VM_OBJECT_WLOCK(obj); | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_sleep_if_xbusy: | |||||
* | |||||
* Sleep and release the object lock if the page is xbusied. | |||||
* Returns TRUE if the thread slept. | |||||
* | |||||
* The given page must be unlocked and object containing it must | |||||
* be locked. | |||||
*/ | |||||
int | |||||
vm_page_sleep_if_xbusy(vm_page_t m, const char *msg) | |||||
{ | |||||
vm_object_t obj; | |||||
vm_page_lock_assert(m, MA_NOTOWNED); | |||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | |||||
if (vm_page_xbusied(m)) { | |||||
/* | |||||
* The page-specific object must be cached because page | |||||
* identity can change during the sleep, causing the | |||||
* re-lock of a different object. | |||||
* It is assumed that a reference to the object is already | |||||
* held by the callers. | |||||
*/ | |||||
obj = m->object; | |||||
vm_page_busy_sleep(m, msg, true); | |||||
VM_OBJECT_WLOCK(obj); | |||||
return (TRUE); | |||||
} | |||||
return (FALSE); | |||||
} | |||||
/* | |||||
* vm_page_dirty_KBI: [ internal use only ] | * vm_page_dirty_KBI: [ internal use only ] | ||||
* | * | ||||
* Set all bits in the page's dirty field. | * Set all bits in the page's dirty field. | ||||
* | * | ||||
* The object containing the specified page must be locked if the | * The object containing the specified page must be locked if the | ||||
* call is made from the machine-independent layer. | * call is made from the machine-independent layer. | ||||
* | * | ||||
* See vm_page_clear_dirty_mask(). | * See vm_page_clear_dirty_mask(). | ||||
▲ Show 20 Lines • Show All 141 Lines • ▼ Show 20 Lines | vm_page_remove(vm_page_t m) | ||||
vm_page_t mrem; | vm_page_t mrem; | ||||
object = m->object; | object = m->object; | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_assert_locked(m); | vm_page_assert_locked(m); | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
if (vm_page_xbusied(m)) | if (vm_page_xbusied(m)) | ||||
vm_page_xunbusy_maybelocked(m); | vm_page_xunbusy(m); | ||||
mrem = vm_radix_remove(&object->rtree, m->pindex); | mrem = vm_radix_remove(&object->rtree, m->pindex); | ||||
KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | ||||
/* | /* | ||||
* Now remove from the object's list of backed pages. | * Now remove from the object's list of backed pages. | ||||
*/ | */ | ||||
TAILQ_REMOVE(&object->memq, m, listq); | TAILQ_REMOVE(&object->memq, m, listq); | ||||
▲ Show 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) | ||||
KASSERT(mold->queue == PQ_NONE, | KASSERT(mold->queue == PQ_NONE, | ||||
("vm_page_replace: old page %p is on a paging queue", mold)); | ("vm_page_replace: old page %p is on a paging queue", mold)); | ||||
/* Keep the resident page list in sorted order. */ | /* Keep the resident page list in sorted order. */ | ||||
TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); | TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq); | ||||
TAILQ_REMOVE(&object->memq, mold, listq); | TAILQ_REMOVE(&object->memq, mold, listq); | ||||
mold->object = NULL; | mold->object = NULL; | ||||
vm_page_xunbusy_maybelocked(mold); | vm_page_xunbusy(mold); | ||||
/* | /* | ||||
* The object's resident_page_count does not change because we have | * The object's resident_page_count does not change because we have | ||||
* swapped one page for another, but OBJ_MIGHTBEDIRTY. | * swapped one page for another, but OBJ_MIGHTBEDIRTY. | ||||
*/ | */ | ||||
if (pmap_page_is_write_mapped(mnew)) | if (pmap_page_is_write_mapped(mnew)) | ||||
vm_object_set_writeable_dirty(object); | vm_object_set_writeable_dirty(object); | ||||
return (mold); | return (mold); | ||||
▲ Show 20 Lines • Show All 2,317 Lines • ▼ Show 20 Lines | if (sleep) { | ||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | if ((allocflags & VM_ALLOC_NOWAIT) != 0) | ||||
return (NULL); | return (NULL); | ||||
/* | /* | ||||
* Reference the page before unlocking and | * Reference the page before unlocking and | ||||
* sleeping so that the page daemon is less | * sleeping so that the page daemon is less | ||||
* likely to reclaim it. | * likely to reclaim it. | ||||
*/ | */ | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
vm_page_lock(m); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
vm_page_busy_sleep(m, "pgrbwt", (allocflags & | vm_page_busy_sleep(m, "pgrbwt", (allocflags & | ||||
VM_ALLOC_IGN_SBUSY) != 0); | VM_ALLOC_IGN_SBUSY) != 0); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
goto retrylookup; | goto retrylookup; | ||||
} else { | } else { | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) { | if ((allocflags & VM_ALLOC_WIRED) != 0) { | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
vm_page_wire(m); | vm_page_wire(m); | ||||
▲ Show 20 Lines • Show All 84 Lines • ▼ Show 20 Lines | if (m != NULL) { | ||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | if ((allocflags & VM_ALLOC_NOWAIT) != 0) | ||||
break; | break; | ||||
/* | /* | ||||
* Reference the page before unlocking and | * Reference the page before unlocking and | ||||
* sleeping so that the page daemon is less | * sleeping so that the page daemon is less | ||||
* likely to reclaim it. | * likely to reclaim it. | ||||
*/ | */ | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
vm_page_lock(m); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
vm_page_busy_sleep(m, "grbmaw", (allocflags & | vm_page_busy_sleep(m, "grbmaw", (allocflags & | ||||
VM_ALLOC_IGN_SBUSY) != 0); | VM_ALLOC_IGN_SBUSY) != 0); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
goto retrylookup; | goto retrylookup; | ||||
Done Inline ActionsThis is a merge bug and related to a later diff. jeff: This is a merge bug and related to a later diff. | |||||
} | } | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) { | if ((allocflags & VM_ALLOC_WIRED) != 0) { | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
vm_page_wire(m); | vm_page_wire(m); | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
} | } | ||||
if ((allocflags & (VM_ALLOC_NOBUSY | | if ((allocflags & (VM_ALLOC_NOBUSY | | ||||
VM_ALLOC_SBUSY)) == 0) | VM_ALLOC_SBUSY)) == 0) | ||||
▲ Show 20 Lines • Show All 533 Lines • Show Last 20 Lines |
I wonder if this needs atomc_cmpset_int_rel. Before, the vm_page_unlock() provided the release semantic, sunbusy acts as unlock. vm_page_xunbusy does use _rel.