Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_page.c
Show First 20 Lines • Show All 174 Lines • ▼ Show 20 Lines | |||||
static TAILQ_HEAD(, vm_page) blacklist_head; | static TAILQ_HEAD(, vm_page) blacklist_head; | ||||
static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); | static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); | ||||
SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | | SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | | ||||
CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); | CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); | ||||
static uma_zone_t fakepg_zone; | static uma_zone_t fakepg_zone; | ||||
static void vm_page_alloc_check(vm_page_t m); | static void vm_page_alloc_check(vm_page_t m); | ||||
static void _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, | |||||
const char *wmesg, bool nonshared, bool locked); | |||||
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | ||||
static void vm_page_dequeue_complete(vm_page_t m); | static void vm_page_dequeue_complete(vm_page_t m); | ||||
static void vm_page_enqueue(vm_page_t m, uint8_t queue); | static void vm_page_enqueue(vm_page_t m, uint8_t queue); | ||||
static void vm_page_init(void *dummy); | static void vm_page_init(void *dummy); | ||||
static int vm_page_insert_after(vm_page_t m, vm_object_t object, | static int vm_page_insert_after(vm_page_t m, vm_object_t object, | ||||
vm_pindex_t pindex, vm_page_t mpred); | vm_pindex_t pindex, vm_page_t mpred); | ||||
static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, | static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, | ||||
vm_page_t mpred); | vm_page_t mpred); | ||||
▲ Show 20 Lines • Show All 703 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* Acquire the busy lock as described by VM_ALLOC_* flags. Will loop | * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop | ||||
* and drop the object lock if necessary. | * and drop the object lock if necessary. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_busy_acquire(vm_page_t m, int allocflags) | vm_page_busy_acquire(vm_page_t m, int allocflags) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
u_int x; | |||||
bool locked; | bool locked; | ||||
/* | /* | ||||
* The page-specific object must be cached because page | * The page-specific object must be cached because page | ||||
* identity can change during the sleep, causing the | * identity can change during the sleep, causing the | ||||
* re-lock of a different object. | * re-lock of a different object. | ||||
* It is assumed that a reference to the object is already | * It is assumed that a reference to the object is already | ||||
* held by the callers. | * held by the callers. | ||||
*/ | */ | ||||
obj = m->object; | obj = m->object; | ||||
for (;;) { | for (;;) { | ||||
if ((allocflags & VM_ALLOC_SBUSY) == 0) { | if ((allocflags & VM_ALLOC_SBUSY) == 0) { | ||||
if (vm_page_tryxbusy(m)) | if (vm_page_tryxbusy(m)) | ||||
return (TRUE); | return (TRUE); | ||||
} else { | } else { | ||||
if (vm_page_trysbusy(m)) | if (vm_page_trysbusy(m)) | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | if ((allocflags & VM_ALLOC_NOWAIT) != 0) | ||||
return (FALSE); | return (FALSE); | ||||
if (obj != NULL) { | if (obj != NULL) | ||||
locked = VM_OBJECT_WOWNED(obj); | locked = VM_OBJECT_WOWNED(obj); | ||||
} else { | else | ||||
MPASS(vm_page_wired(m)); | |||||
locked = FALSE; | locked = FALSE; | ||||
} | MPASS(locked || vm_page_wired(m)); | ||||
sleepq_lock(m); | _vm_page_busy_sleep(obj, m, "vmpba", | ||||
x = m->busy_lock; | (allocflags & VM_ALLOC_SBUSY) != 0, locked); | ||||
if (x == VPB_UNBUSIED || | |||||
((allocflags & VM_ALLOC_SBUSY) != 0 && | |||||
(x & VPB_BIT_SHARED) != 0) || | |||||
((x & VPB_BIT_WAITERS) == 0 && | |||||
!atomic_cmpset_int(&m->busy_lock, x, | |||||
x | VPB_BIT_WAITERS))) { | |||||
sleepq_release(m); | |||||
continue; | |||||
} | |||||
if (locked) | if (locked) | ||||
VM_OBJECT_WUNLOCK(obj); | |||||
sleepq_add(m, NULL, "vmpba", 0, 0); | |||||
sleepq_wait(m, PVM); | |||||
if (locked) | |||||
VM_OBJECT_WLOCK(obj); | VM_OBJECT_WLOCK(obj); | ||||
MPASS(m->object == obj || m->object == NULL); | MPASS(m->object == obj || m->object == NULL); | ||||
if ((allocflags & VM_ALLOC_WAITFAIL) != 0) | if ((allocflags & VM_ALLOC_WAITFAIL) != 0) | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | |||||
* If nonshared is true, sleep only if the page is xbusy. | * If nonshared is true, sleep only if the page is xbusy. | ||||
* | * | ||||
* The object lock must be held on entry and will be released on exit. | * The object lock must be held on entry and will be released on exit. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) | vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
u_int x; | |||||
obj = m->object; | obj = m->object; | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | |||||
VM_OBJECT_ASSERT_LOCKED(obj); | VM_OBJECT_ASSERT_LOCKED(obj); | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | |||||
_vm_page_busy_sleep(obj, m, wmesg, nonshared, true); | |||||
} | |||||
static void | |||||
_vm_page_busy_sleep(vm_object_t obj, vm_page_t m, const char *wmesg, | |||||
bool nonshared, bool locked) | |||||
{ | |||||
u_int x; | |||||
/* | |||||
* If the object is busy we must wait for that to drain to zero | |||||
* before trying the page again. | |||||
*/ | |||||
if (obj != NULL && vm_object_busied(obj)) { | |||||
if (locked) | |||||
VM_OBJECT_DROP(obj); | |||||
vm_object_busy_wait(obj, wmesg); | |||||
return; | |||||
} | |||||
sleepq_lock(m); | sleepq_lock(m); | ||||
x = m->busy_lock; | x = m->busy_lock; | ||||
if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || | if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || | ||||
((x & VPB_BIT_WAITERS) == 0 && | ((x & VPB_BIT_WAITERS) == 0 && | ||||
!atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { | !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { | ||||
if (locked) | |||||
VM_OBJECT_DROP(obj); | VM_OBJECT_DROP(obj); | ||||
sleepq_release(m); | sleepq_release(m); | ||||
return; | return; | ||||
} | } | ||||
if (locked) | |||||
VM_OBJECT_DROP(obj); | VM_OBJECT_DROP(obj); | ||||
sleepq_add(m, NULL, wmesg, 0, 0); | sleepq_add(m, NULL, wmesg, 0, 0); | ||||
sleepq_wait(m, PVM); | sleepq_wait(m, PVM); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_trysbusy: | * vm_page_trysbusy: | ||||
* | * | ||||
* Try to shared busy a page. | * Try to shared busy a page. | ||||
* If the operation succeeds 1 is returned otherwise 0. | * If the operation succeeds 1 is returned otherwise 0. | ||||
* The operation never sleeps. | * The operation never sleeps. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_trysbusy(vm_page_t m) | vm_page_trysbusy(vm_page_t m) | ||||
{ | { | ||||
vm_object_t obj; | |||||
u_int x; | u_int x; | ||||
obj = m->object; | |||||
x = m->busy_lock; | x = m->busy_lock; | ||||
for (;;) { | for (;;) { | ||||
if ((x & VPB_BIT_SHARED) == 0) | if ((x & VPB_BIT_SHARED) == 0) | ||||
return (0); | return (0); | ||||
/* | |||||
* Reduce the window for transient busies that will trigger | |||||
* false negatives in vm_page_ps_test(). | |||||
*/ | |||||
if (obj != NULL && vm_object_busied(obj)) | |||||
return (0); | |||||
if (atomic_fcmpset_acq_int(&m->busy_lock, &x, | if (atomic_fcmpset_acq_int(&m->busy_lock, &x, | ||||
x + VPB_ONE_SHARER)) | x + VPB_ONE_SHARER)) | ||||
break; | |||||
} | |||||
/* Refetch the object now that we're guaranteed that it is stable. */ | |||||
obj = m->object; | |||||
if (obj != NULL && vm_object_busied(obj)) { | |||||
vm_page_sunbusy(m); | |||||
return (0); | |||||
} | |||||
return (1); | return (1); | ||||
} | } | ||||
/* | |||||
* vm_page_tryxbusy: | |||||
* | |||||
* Try to exclusive busy a page. | |||||
* If the operation succeeds 1 is returned otherwise 0. | |||||
* The operation never sleeps. | |||||
*/ | |||||
int | |||||
vm_page_tryxbusy(vm_page_t m) | |||||
{ | |||||
vm_object_t obj; | |||||
if (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED, | |||||
VPB_SINGLE_EXCLUSIVER) == 0) | |||||
return (0); | |||||
obj = m->object; | |||||
if (obj != NULL && vm_object_busied(obj)) { | |||||
vm_page_xunbusy(m); | |||||
return (0); | |||||
} | } | ||||
return (1); | |||||
} | |||||
/* | /* | ||||
* vm_page_xunbusy_hard: | * vm_page_xunbusy_hard: | ||||
* | * | ||||
* Called when unbusy has failed because there is a waiter. | * Called when unbusy has failed because there is a waiter. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_xunbusy_hard(vm_page_t m) | vm_page_xunbusy_hard(vm_page_t m) | ||||
▲ Show 20 Lines • Show All 204 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
vm_page_sleep_if_busy(vm_page_t m, const char *msg) | vm_page_sleep_if_busy(vm_page_t m, const char *msg) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | vm_page_lock_assert(m, MA_NOTOWNED); | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(m->object); | ||||
if (vm_page_busied(m)) { | |||||
/* | /* | ||||
* The page-specific object must be cached because page | * The page-specific object must be cached because page | ||||
* identity can change during the sleep, causing the | * identity can change during the sleep, causing the | ||||
* re-lock of a different object. | * re-lock of a different object. | ||||
* It is assumed that a reference to the object is already | * It is assumed that a reference to the object is already | ||||
* held by the callers. | * held by the callers. | ||||
*/ | */ | ||||
obj = m->object; | obj = m->object; | ||||
if (vm_page_busied(m) || (obj != NULL && obj->busy)) { | |||||
vm_page_busy_sleep(m, msg, false); | vm_page_busy_sleep(m, msg, false); | ||||
VM_OBJECT_WLOCK(obj); | VM_OBJECT_WLOCK(obj); | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_sleep_if_xbusy: | * vm_page_sleep_if_xbusy: | ||||
* | * | ||||
* Sleep and release the object lock if the page is xbusied. | * Sleep and release the object lock if the page is xbusied. | ||||
* Returns TRUE if the thread slept. | * Returns TRUE if the thread slept. | ||||
* | * | ||||
* The given page must be unlocked and object containing it must | * The given page must be unlocked and object containing it must | ||||
* be locked. | * be locked. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_sleep_if_xbusy(vm_page_t m, const char *msg) | vm_page_sleep_if_xbusy(vm_page_t m, const char *msg) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
vm_page_lock_assert(m, MA_NOTOWNED); | vm_page_lock_assert(m, MA_NOTOWNED); | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(m->object); | ||||
if (vm_page_xbusied(m)) { | |||||
/* | /* | ||||
* The page-specific object must be cached because page | * The page-specific object must be cached because page | ||||
* identity can change during the sleep, causing the | * identity can change during the sleep, causing the | ||||
* re-lock of a different object. | * re-lock of a different object. | ||||
* It is assumed that a reference to the object is already | * It is assumed that a reference to the object is already | ||||
* held by the callers. | * held by the callers. | ||||
*/ | */ | ||||
obj = m->object; | obj = m->object; | ||||
if (vm_page_xbusied(m) || (obj != NULL && obj->busy)) { | |||||
vm_page_busy_sleep(m, msg, true); | vm_page_busy_sleep(m, msg, true); | ||||
VM_OBJECT_WLOCK(obj); | VM_OBJECT_WLOCK(obj); | ||||
return (TRUE); | return (TRUE); | ||||
} | } | ||||
return (FALSE); | return (FALSE); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 3,508 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
mtx_assert_(vm_page_lockptr(m), a, file, line); | mtx_assert_(vm_page_lockptr(m), a, file, line); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
void | void | ||||
vm_page_object_lock_assert(vm_page_t m) | vm_page_object_busy_assert(vm_page_t m) | ||||
{ | { | ||||
/* | /* | ||||
* Certain of the page's fields may only be modified by the | * Certain of the page's fields may only be modified by the | ||||
* holder of the containing object's lock or the exclusive busy. | * holder of a page or object busy. | ||||
* holder. Unfortunately, the holder of the write busy is | |||||
* not recorded, and thus cannot be checked here. | |||||
*/ | */ | ||||
if (m->object != NULL && !vm_page_xbusied(m)) | if (m->object != NULL && !vm_page_busied(m)) | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_BUSY(m->object); | ||||
} | } | ||||
void | void | ||||
vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) | vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits) | ||||
{ | { | ||||
if ((bits & PGA_WRITEABLE) == 0) | if ((bits & PGA_WRITEABLE) == 0) | ||||
return; | return; | ||||
/* | /* | ||||
* The PGA_WRITEABLE flag can only be set if the page is | * The PGA_WRITEABLE flag can only be set if the page is | ||||
* managed, is exclusively busied or the object is locked. | * managed, is exclusively busied or the object is locked. | ||||
* Currently, this flag is only set by pmap_enter(). | * Currently, this flag is only set by pmap_enter(). | ||||
*/ | */ | ||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, | KASSERT((m->oflags & VPO_UNMANAGED) == 0, | ||||
("PGA_WRITEABLE on unmanaged page")); | ("PGA_WRITEABLE on unmanaged page")); | ||||
if (!vm_page_xbusied(m)) | if (!vm_page_xbusied(m)) | ||||
VM_OBJECT_ASSERT_LOCKED(m->object); | VM_OBJECT_ASSERT_BUSY(m->object); | ||||
} | } | ||||
#endif | #endif | ||||
#include "opt_ddb.h" | #include "opt_ddb.h" | ||||
#ifdef DDB | #ifdef DDB | ||||
#include <sys/kernel.h> | #include <sys/kernel.h> | ||||
#include <ddb/ddb.h> | #include <ddb/ddb.h> | ||||
▲ Show 20 Lines • Show All 59 Lines • Show Last 20 Lines |