Page MenuHomeFreeBSD

D21255.id60810.diff
No OneTemporary

D21255.id60810.diff

Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c
+++ sys/vm/vm_fault.c
@@ -508,7 +508,7 @@
*m_hold = &m[i];
vm_page_wire(&m[i]);
}
- vm_page_xunbusy_maybelocked(&m[i]);
+ vm_page_xunbusy(&m[i]);
}
if (m_mtx != NULL)
mtx_unlock(m_mtx);
@@ -1009,7 +1009,7 @@
if (!vm_page_wired(fs.m))
vm_page_free(fs.m);
else
- vm_page_xunbusy_maybelocked(fs.m);
+ vm_page_xunbusy(fs.m);
vm_page_unlock(fs.m);
fs.m = NULL;
unlock_and_deallocate(&fs);
@@ -1032,7 +1032,7 @@
if (!vm_page_wired(fs.m))
vm_page_free(fs.m);
else
- vm_page_xunbusy_maybelocked(fs.m);
+ vm_page_xunbusy(fs.m);
vm_page_unlock(fs.m);
fs.m = NULL;
}
Index: sys/vm/vm_object.c
===================================================================
--- sys/vm/vm_object.c
+++ sys/vm/vm_object.c
@@ -1226,6 +1226,7 @@
*/
vm_page_aflag_set(tm, PGA_REFERENCED);
}
+ vm_page_unlock(tm);
vm_page_busy_sleep(tm, "madvpo", false);
goto relookup;
}
@@ -1399,7 +1400,6 @@
*/
if (vm_page_busied(m)) {
VM_OBJECT_WUNLOCK(new_object);
- vm_page_lock(m);
VM_OBJECT_WUNLOCK(orig_object);
vm_page_busy_sleep(m, "spltwt", false);
VM_OBJECT_WLOCK(orig_object);
@@ -1469,8 +1469,6 @@
("invalid ownership %p %p %p", p, object, backing_object));
if ((op & OBSC_COLLAPSE_NOWAIT) != 0)
return (next);
- if (p != NULL)
- vm_page_lock(p);
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_WUNLOCK(backing_object);
/* The page is only NULL when rename fails. */
@@ -1930,6 +1928,7 @@
vm_page_change_lock(p, &mtx);
if (vm_page_xbusied(p)) {
VM_OBJECT_WUNLOCK(object);
+ mtx_unlock(mtx);
vm_page_busy_sleep(p, "vmopax", true);
VM_OBJECT_WLOCK(object);
goto again;
@@ -1946,6 +1945,7 @@
}
if (vm_page_busied(p)) {
VM_OBJECT_WUNLOCK(object);
+ mtx_unlock(mtx);
vm_page_busy_sleep(p, "vmopar", false);
VM_OBJECT_WLOCK(object);
goto again;
@@ -2250,7 +2250,6 @@
tm = m;
m = TAILQ_NEXT(m, listq);
}
- vm_page_lock(tm);
if (vm_page_xbusied(tm)) {
for (tobject = object; locked_depth >= 1;
locked_depth--) {
@@ -2261,6 +2260,7 @@
vm_page_busy_sleep(tm, "unwbo", true);
goto again;
}
+ vm_page_lock(tm);
vm_page_unwire(tm, queue);
vm_page_unlock(tm);
next_page:
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -513,7 +513,6 @@
void vm_page_busy_downgrade(vm_page_t m);
void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
-void vm_page_flash(vm_page_t m);
void vm_page_free(vm_page_t m);
void vm_page_free_zero(vm_page_t m);
@@ -586,7 +585,6 @@
void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_wire (vm_page_t);
void vm_page_xunbusy_hard(vm_page_t m);
-void vm_page_xunbusy_maybelocked(vm_page_t m);
void vm_page_set_validclean (vm_page_t, int, int);
void vm_page_clear_dirty (vm_page_t, int, int);
void vm_page_set_invalid (vm_page_t, int, int);
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -84,6 +84,7 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
+#include <sys/sleepqueue.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/smp.h>
@@ -869,27 +870,17 @@
vm_page_busy_downgrade(vm_page_t m)
{
u_int x;
- bool locked;
vm_page_assert_xbusied(m);
- locked = mtx_owned(vm_page_lockptr(m));
+ x = m->busy_lock;
for (;;) {
- x = m->busy_lock;
- x &= VPB_BIT_WAITERS;
- if (x != 0 && !locked)
- vm_page_lock(m);
- if (atomic_cmpset_rel_int(&m->busy_lock,
- VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1)))
+ if (atomic_fcmpset_rel_int(&m->busy_lock,
+ &x, VPB_SHARERS_WORD(1)))
break;
- if (x != 0 && !locked)
- vm_page_unlock(m);
}
- if (x != 0) {
+ if ((x & VPB_BIT_WAITERS) != 0)
wakeup(m);
- if (!locked)
- vm_page_unlock(m);
- }
}
/*
@@ -916,35 +907,23 @@
{
u_int x;
- vm_page_lock_assert(m, MA_NOTOWNED);
vm_page_assert_sbusied(m);
+ x = m->busy_lock;
for (;;) {
- x = m->busy_lock;
if (VPB_SHARERS(x) > 1) {
- if (atomic_cmpset_int(&m->busy_lock, x,
+ if (atomic_fcmpset_int(&m->busy_lock, &x,
x - VPB_ONE_SHARER))
break;
continue;
}
- if ((x & VPB_BIT_WAITERS) == 0) {
- KASSERT(x == VPB_SHARERS_WORD(1),
- ("vm_page_sunbusy: invalid lock state"));
- if (atomic_cmpset_int(&m->busy_lock,
- VPB_SHARERS_WORD(1), VPB_UNBUSIED))
- break;
+ KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
+ ("vm_page_sunbusy: invalid lock state"));
+ if (!atomic_fcmpset_rel_int(&m->busy_lock, &x, VPB_UNBUSIED))
continue;
- }
- KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
- ("vm_page_sunbusy: invalid lock state for waiters"));
-
- vm_page_lock(m);
- if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
- vm_page_unlock(m);
- continue;
- }
+ if ((x & VPB_BIT_WAITERS) == 0)
+ break;
wakeup(m);
- vm_page_unlock(m);
break;
}
}
@@ -955,25 +934,44 @@
* Sleep and release the page lock, using the page pointer as wchan.
* This is used to implement the hard-path of busying mechanism.
*
- * The given page must be locked.
- *
* If nonshared is true, sleep only if the page is xbusy.
*/
void
vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
{
+ vm_object_t obj;
+ bool locked;
u_int x;
- vm_page_assert_locked(m);
+ vm_page_lock_assert(m, MA_NOTOWNED);
+ /*
+ * The page-specific object must be cached because page
+ * identity can change during the sleep, causing the
+ * re-lock of a different object.
+ * It is assumed that a reference to the object is already
+ * held by the callers.
+ */
+ obj = m->object;
+ if (obj != NULL)
+ locked = VM_OBJECT_WOWNED(obj);
+ else
+ locked = FALSE;
+ if (locked)
+ VM_OBJECT_WUNLOCK(obj);
+ sleepq_lock(m);
x = m->busy_lock;
if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) ||
((x & VPB_BIT_WAITERS) == 0 &&
!atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) {
- vm_page_unlock(m);
- return;
+ sleepq_release(m);
+ goto out;
}
- msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
+ sleepq_add(m, NULL, wmesg, 0, 0);
+ sleepq_wait(m, PVM);
+out:
+ if (locked)
+ VM_OBJECT_WLOCK(obj);
}
/*
@@ -988,55 +986,20 @@
{
u_int x;
+ x = m->busy_lock;
for (;;) {
- x = m->busy_lock;
if ((x & VPB_BIT_SHARED) == 0)
return (0);
- if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER))
+ if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
+ x + VPB_ONE_SHARER))
return (1);
}
}
-static void
-vm_page_xunbusy_locked(vm_page_t m)
-{
-
- vm_page_assert_xbusied(m);
- vm_page_assert_locked(m);
-
- atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
- /* There is a waiter, do wakeup() instead of vm_page_flash(). */
- wakeup(m);
-}
-
-void
-vm_page_xunbusy_maybelocked(vm_page_t m)
-{
- bool lockacq;
-
- vm_page_assert_xbusied(m);
-
- /*
- * Fast path for unbusy. If it succeeds, we know that there
- * are no waiters, so we do not need a wakeup.
- */
- if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER,
- VPB_UNBUSIED))
- return;
-
- lockacq = !mtx_owned(vm_page_lockptr(m));
- if (lockacq)
- vm_page_lock(m);
- vm_page_xunbusy_locked(m);
- if (lockacq)
- vm_page_unlock(m);
-}
-
/*
* vm_page_xunbusy_hard:
*
- * Called after the first try the exclusive unbusy of a page failed.
- * It is assumed that the waiters bit is on.
+ * Called when unbusy has failed because there is a waiter.
*/
void
vm_page_xunbusy_hard(vm_page_t m)
@@ -1044,34 +1007,10 @@
vm_page_assert_xbusied(m);
- vm_page_lock(m);
- vm_page_xunbusy_locked(m);
- vm_page_unlock(m);
-}
-
-/*
- * vm_page_flash:
- *
- * Wakeup anyone waiting for the page.
- * The ownership bits do not change.
- *
- * The given page must be locked.
- */
-void
-vm_page_flash(vm_page_t m)
-{
- u_int x;
-
- vm_page_lock_assert(m, MA_OWNED);
-
- for (;;) {
- x = m->busy_lock;
- if ((x & VPB_BIT_WAITERS) == 0)
- return;
- if (atomic_cmpset_int(&m->busy_lock, x,
- x & (~VPB_BIT_WAITERS)))
- break;
- }
+ /*
+ * Wake the waiter.
+ */
+ atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
wakeup(m);
}
@@ -1268,7 +1207,7 @@
* Sleep and release the page queues lock if the page is busied.
* Returns TRUE if the thread slept.
*
- * The given page must be unlocked and object containing it must
+ * The given page must be unlocked and object containing it may
* be locked.
*/
int
@@ -1277,21 +1216,10 @@
vm_object_t obj;
vm_page_lock_assert(m, MA_NOTOWNED);
- VM_OBJECT_ASSERT_WLOCKED(m->object);
+ obj = m->object;
if (vm_page_busied(m)) {
- /*
- * The page-specific object must be cached because page
- * identity can change during the sleep, causing the
- * re-lock of a different object.
- * It is assumed that a reference to the object is already
- * held by the callers.
- */
- obj = m->object;
- vm_page_lock(m);
- VM_OBJECT_WUNLOCK(obj);
vm_page_busy_sleep(m, msg, false);
- VM_OBJECT_WLOCK(obj);
return (TRUE);
}
return (FALSE);
@@ -1455,7 +1383,7 @@
vm_page_assert_locked(m);
VM_OBJECT_ASSERT_WLOCKED(object);
if (vm_page_xbusied(m))
- vm_page_xunbusy_maybelocked(m);
+ vm_page_xunbusy(m);
mrem = vm_radix_remove(&object->rtree, m->pindex);
KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
@@ -1588,7 +1516,7 @@
TAILQ_REMOVE(&object->memq, mold, listq);
mold->object = NULL;
- vm_page_xunbusy_maybelocked(mold);
+ vm_page_xunbusy(mold);
/*
* The object's resident_page_count does not change because we have
@@ -3922,11 +3850,8 @@
* likely to reclaim it.
*/
vm_page_aflag_set(m, PGA_REFERENCED);
- vm_page_lock(m);
- VM_OBJECT_WUNLOCK(object);
vm_page_busy_sleep(m, "pgrbwt", (allocflags &
VM_ALLOC_IGN_SBUSY) != 0);
- VM_OBJECT_WLOCK(object);
goto retrylookup;
} else {
if ((allocflags & VM_ALLOC_WIRED) != 0) {
@@ -4024,11 +3949,10 @@
* likely to reclaim it.
*/
vm_page_aflag_set(m, PGA_REFERENCED);
- vm_page_lock(m);
- VM_OBJECT_WUNLOCK(object);
vm_page_busy_sleep(m, "grbmaw", (allocflags &
VM_ALLOC_IGN_SBUSY) != 0);
- VM_OBJECT_WLOCK(object);
+ if ((allocflags & VM_ALLOC_WAITFAIL) != 0)
+ break;
goto retrylookup;
}
if ((allocflags & VM_ALLOC_WIRED) != 0) {

File Metadata

Mime Type
text/plain
Expires
Sun, Dec 29, 8:47 AM (6 h, 30 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15629100
Default Alt Text
D21255.id60810.diff (10 KB)

Event Timeline