Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1224,9 +1224,8 @@ } vm_object_pip_wakeup(fs->object); unlock_map(fs); - if (fs->m == vm_page_lookup(fs->object, fs->pindex)) - vm_page_busy_sleep(fs->m, "vmpfw", false); - else + if (fs->m != vm_page_lookup(fs->object, fs->pindex) || + !vm_page_busy_sleep(fs->m, "vmpfw", 0)) VM_OBJECT_WUNLOCK(fs->object); VM_CNT_INC(v_intrans); vm_object_deallocate(fs->first_object); Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -1390,7 +1390,8 @@ */ vm_page_aflag_set(tm, PGA_REFERENCED); } - vm_page_busy_sleep(tm, "madvpo", false); + if (!vm_page_busy_sleep(tm, "madvpo", 0)) + VM_OBJECT_WUNLOCK(tobject); goto relookup; } vm_page_advise(tm, advice); @@ -1568,7 +1569,8 @@ */ if (vm_page_tryxbusy(m) == 0) { VM_OBJECT_WUNLOCK(new_object); - vm_page_sleep_if_busy(m, "spltwt"); + if (vm_page_busy_sleep(m, "spltwt", 0)) + VM_OBJECT_WLOCK(orig_object); VM_OBJECT_WLOCK(new_object); goto retry; } @@ -1646,14 +1648,17 @@ VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(backing_object); vm_radix_wait(); + VM_OBJECT_WLOCK(object); + } else if (p->object == object) { + VM_OBJECT_WUNLOCK(backing_object); + if (vm_page_busy_sleep(p, "vmocol", 0)) + VM_OBJECT_WLOCK(object); } else { - if (p->object == object) + VM_OBJECT_WUNLOCK(object); + if (!vm_page_busy_sleep(p, "vmocol", 0)) VM_OBJECT_WUNLOCK(backing_object); - else - VM_OBJECT_WUNLOCK(object); - vm_page_busy_sleep(p, "vmocol", false); + VM_OBJECT_WLOCK(object); } - VM_OBJECT_WLOCK(object); VM_OBJECT_WLOCK(backing_object); return (TAILQ_FIRST(&backing_object->memq)); } @@ -2087,7 +2092,8 @@ * not specified. */ if (vm_page_tryxbusy(p) == 0) { - vm_page_sleep_if_busy(p, "vmopar"); + if (vm_page_busy_sleep(p, "vmopar", 0)) + VM_OBJECT_WLOCK(object); goto again; } if (vm_page_wired(p)) { @@ -2392,7 +2398,9 @@ VM_OBJECT_RUNLOCK(tobject); tobject = t1object; } - vm_page_busy_sleep(tm, "unwbo", true); + tobject = tm->object; + if (!vm_page_busy_sleep(tm, "unwbo", VM_ALLOC_SBUSY)) + VM_OBJECT_RUNLOCK(tobject); goto again; } vm_page_unwire(tm, queue); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -117,7 +117,7 @@ * busy lock ordering rules are not verified, lock recursion is not * detected, and an attempt to xbusy a busy page or sbusy an xbusy page * results will trigger a panic rather than causing the thread to block. - * vm_page_sleep_if_busy() can be used to sleep until the page's busy + * vm_page_busy_sleep() can be used to sleep until the page's busy * state changes, after which the caller must re-lookup the page and * re-evaluate its state. vm_page_busy_acquire() will block until * the lock is acquired. @@ -590,12 +590,11 @@ bool vm_page_busy_acquire(vm_page_t m, int allocflags); void vm_page_busy_downgrade(vm_page_t m); int vm_page_busy_tryupgrade(vm_page_t m); -void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared); +bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags); void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, - vm_pindex_t pindex, const char *wmesg, bool nonshared); + vm_pindex_t pindex, const char *wmesg, int allocflags); void vm_page_free(vm_page_t m); void vm_page_free_zero(vm_page_t m); - void vm_page_activate (vm_page_t); void vm_page_advise(vm_page_t m, int advice); vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int); @@ -664,8 +663,6 @@ vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options); vm_page_bits_t vm_page_set_dirty(vm_page_t m); void vm_page_set_valid_range(vm_page_t m, int base, int size); -int vm_page_sleep_if_busy(vm_page_t m, const char *msg); -int vm_page_sleep_if_xbusy(vm_page_t m, const char *msg); vm_offset_t vm_page_startup(vm_offset_t vaddr); void vm_page_sunbusy(vm_page_t m); bool vm_page_try_remove_all(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -983,24 +983,23 @@ * vm_page_busy_sleep: * * Sleep if the page is busy, using the page pointer as wchan. - * This is used to implement the hard-path of busying mechanism. + * This is used to implement the hard-path of the busying mechanism. + * Returns true if it sleeps and drops the lock or false if there is + * no sleep and the lock is still held. * - * If nonshared is true, sleep only if the page is xbusy. + * allocflags uses VM_ALLOC flags to specify the lock required. * * The object lock must be held on entry and will be released on exit. */ -void -vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) +bool +vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags) { - vm_object_t obj; - obj = m->object; - VM_OBJECT_ASSERT_LOCKED(obj); + VM_OBJECT_ASSERT_LOCKED(m->object); vm_page_lock_assert(m, MA_NOTOWNED); - if (!_vm_page_busy_sleep(obj, m, m->pindex, wmesg, - nonshared ? VM_ALLOC_SBUSY : 0 , true)) - VM_OBJECT_DROP(obj); + return (_vm_page_busy_sleep(m->object, m, m->pindex, wmesg, allocflags, + true)); } /* @@ -1009,21 +1008,20 @@ * Sleep if the page is busy, using the page pointer as wchan. * This is used to implement the hard-path of busying mechanism. * - * If nonshared is true, sleep only if the page is xbusy. + * allocflags uses VM_ALLOC flags to specify the lock required. * * The object lock must not be held on entry. The operation will * return if the page changes identity. */ void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, - const char *wmesg, bool nonshared) + const char *wmesg, int allocflags) { VM_OBJECT_ASSERT_UNLOCKED(obj); vm_page_lock_assert(m, MA_NOTOWNED); - _vm_page_busy_sleep(obj, m, pindex, wmesg, - nonshared ? VM_ALLOC_SBUSY : 0, false); + (void)_vm_page_busy_sleep(obj, m, pindex, wmesg, allocflags, false); } /* @@ -1348,71 +1346,6 @@ vm_page_xunbusy_unchecked(m); } -/* - * vm_page_sleep_if_busy: - * - * Sleep and release the object lock if the page is busied. - * Returns TRUE if the thread slept. - * - * The given page must be unlocked and object containing it must - * be locked. - */ -int -vm_page_sleep_if_busy(vm_page_t m, const char *wmesg) -{ - vm_object_t obj; - - vm_page_lock_assert(m, MA_NOTOWNED); - VM_OBJECT_ASSERT_WLOCKED(m->object); - - /* - * The page-specific object must be cached because page - * identity can change during the sleep, causing the - * re-lock of a different object. - * It is assumed that a reference to the object is already - * held by the callers. - */ - obj = m->object; - if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, 0, true)) { - VM_OBJECT_WLOCK(obj); - return (TRUE); - } - return (FALSE); -} - -/* - * vm_page_sleep_if_xbusy: - * - * Sleep and release the object lock if the page is xbusied. - * Returns TRUE if the thread slept. - * - * The given page must be unlocked and object containing it must - * be locked. - */ -int -vm_page_sleep_if_xbusy(vm_page_t m, const char *wmesg) -{ - vm_object_t obj; - - vm_page_lock_assert(m, MA_NOTOWNED); - VM_OBJECT_ASSERT_WLOCKED(m->object); - - /* - * The page-specific object must be cached because page - * identity can change during the sleep, causing the - * re-lock of a different object. - * It is assumed that a reference to the object is already - * held by the callers. - */ - obj = m->object; - if (_vm_page_busy_sleep(obj, m, m->pindex, wmesg, VM_ALLOC_SBUSY, - true)) { - VM_OBJECT_WLOCK(obj); - return (TRUE); - } - return (FALSE); -} - /* * vm_page_dirty_KBI: [ internal use only ] *