Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -508,7 +508,7 @@ *m_hold = &m[i]; vm_page_wire(&m[i]); } - vm_page_xunbusy_maybelocked(&m[i]); + vm_page_xunbusy(&m[i]); } if (m_mtx != NULL) mtx_unlock(m_mtx); @@ -1009,7 +1009,7 @@ if (!vm_page_wired(fs.m)) vm_page_free(fs.m); else - vm_page_xunbusy_maybelocked(fs.m); + vm_page_xunbusy(fs.m); vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); @@ -1032,7 +1032,7 @@ if (!vm_page_wired(fs.m)) vm_page_free(fs.m); else - vm_page_xunbusy_maybelocked(fs.m); + vm_page_xunbusy(fs.m); vm_page_unlock(fs.m); fs.m = NULL; } Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -1226,11 +1226,13 @@ */ vm_page_aflag_set(tm, PGA_REFERENCED); } + vm_page_unlock(tm); vm_page_busy_sleep(tm, "madvpo", false); goto relookup; } vm_page_advise(tm, advice); vm_page_unlock(tm); + vm_page_xunbusy(tm); vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); next_pindex: if (tobject != object) @@ -1399,7 +1401,6 @@ */ if (vm_page_busied(m)) { VM_OBJECT_WUNLOCK(new_object); - vm_page_lock(m); VM_OBJECT_WUNLOCK(orig_object); vm_page_busy_sleep(m, "spltwt", false); VM_OBJECT_WLOCK(orig_object); @@ -1469,8 +1470,6 @@ ("invalid ownership %p %p %p", p, object, backing_object)); if ((op & OBSC_COLLAPSE_NOWAIT) != 0) return (next); - if (p != NULL) - vm_page_lock(p); VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(backing_object); /* The page is only NULL when rename fails. */ @@ -1930,6 +1929,7 @@ vm_page_change_lock(p, &mtx); if (vm_page_xbusied(p)) { VM_OBJECT_WUNLOCK(object); + mtx_unlock(mtx); vm_page_busy_sleep(p, "vmopax", true); VM_OBJECT_WLOCK(object); goto again; @@ -1946,6 +1946,7 @@ } if (vm_page_busied(p)) { VM_OBJECT_WUNLOCK(object); + mtx_unlock(mtx); vm_page_busy_sleep(p, "vmopar", false); VM_OBJECT_WLOCK(object); goto again; @@ -2250,7 +2251,6 @@ tm = m; m = TAILQ_NEXT(m, listq); } - vm_page_lock(tm); if (vm_page_xbusied(tm)) { for (tobject = object; locked_depth >= 1; locked_depth--) { @@ -2261,6 +2261,7 @@ vm_page_busy_sleep(tm, "unwbo", true); goto again; } + vm_page_lock(tm); vm_page_unwire(tm, queue); vm_page_unlock(tm); next_page: Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -513,7 +513,6 @@ void vm_page_busy_downgrade(vm_page_t m); void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared); -void vm_page_flash(vm_page_t m); void vm_page_free(vm_page_t m); void vm_page_free_zero(vm_page_t m); @@ -586,7 +585,6 @@ void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_wire (vm_page_t); void vm_page_xunbusy_hard(vm_page_t m); -void vm_page_xunbusy_maybelocked(vm_page_t m); void vm_page_set_validclean (vm_page_t, int, int); void vm_page_clear_dirty (vm_page_t, int, int); void vm_page_set_invalid (vm_page_t, int, int); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -84,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -869,27 +870,18 @@ vm_page_busy_downgrade(vm_page_t m) { u_int x; - bool locked; vm_page_assert_xbusied(m); - locked = mtx_owned(vm_page_lockptr(m)); for (;;) { x = m->busy_lock; x &= VPB_BIT_WAITERS; - if (x != 0 && !locked) - vm_page_lock(m); if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1))) break; - if (x != 0 && !locked) - vm_page_unlock(m); } - if (x != 0) { + if (x != 0) wakeup(m); - if (!locked) - vm_page_unlock(m); - } } /* @@ -916,7 +908,6 @@ { u_int x; - vm_page_lock_assert(m, MA_NOTOWNED); vm_page_assert_sbusied(m); for (;;) { @@ -938,13 +929,9 @@ KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS), ("vm_page_sunbusy: invalid lock state for waiters")); - vm_page_lock(m); - if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) { - vm_page_unlock(m); + if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) continue; - } wakeup(m); - vm_page_unlock(m); break; } } @@ -955,25 +942,41 @@ * Sleep and release the page lock, using the page pointer as wchan. * This is used to implement the hard-path of busying mechanism. * - * The given page must be locked. - * * If nonshared is true, sleep only if the page is xbusy. */ void vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared) { + vm_object_t obj; + bool locked; u_int x; - vm_page_assert_locked(m); + vm_page_lock_assert(m, MA_NOTOWNED); + /* + * The page-specific object must be cached because page + * identity can change during the sleep, causing the + * re-lock of a different object. + * It is assumed that a reference to the object is already + * held by the callers. + */ + obj = m->object; + locked = VM_OBJECT_WOWNED(obj); + if (locked) + VM_OBJECT_WUNLOCK(obj); + sleepq_lock(m); x = m->busy_lock; if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) || ((x & VPB_BIT_WAITERS) == 0 && !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) { - vm_page_unlock(m); - return; + sleepq_release(m); + goto out; } - msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0); + sleepq_add(m, NULL, wmesg, 0, 0); + sleepq_wait(m, PVM); +out: + if (locked) + VM_OBJECT_WLOCK(obj); } /* @@ -997,22 +1000,9 @@ } } -static void -vm_page_xunbusy_locked(vm_page_t m) -{ - - vm_page_assert_xbusied(m); - vm_page_assert_locked(m); - - atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); - /* There is a waiter, do wakeup() instead of vm_page_flash(). */ - wakeup(m); -} - void -vm_page_xunbusy_maybelocked(vm_page_t m) +vm_page_xunbusy_hard(vm_page_t m) { - bool lockacq; vm_page_assert_xbusied(m); @@ -1024,54 +1014,10 @@ VPB_UNBUSIED)) return; - lockacq = !mtx_owned(vm_page_lockptr(m)); - if (lockacq) - vm_page_lock(m); - vm_page_xunbusy_locked(m); - if (lockacq) - vm_page_unlock(m); -} - -/* - * vm_page_xunbusy_hard: - * - * Called after the first try the exclusive unbusy of a page failed. - * It is assumed that the waiters bit is on. - */ -void -vm_page_xunbusy_hard(vm_page_t m) -{ - - vm_page_assert_xbusied(m); - - vm_page_lock(m); - vm_page_xunbusy_locked(m); - vm_page_unlock(m); -} - -/* - * vm_page_flash: - * - * Wakeup anyone waiting for the page. - * The ownership bits do not change. - * - * The given page must be locked. - */ -void -vm_page_flash(vm_page_t m) -{ - u_int x; - - vm_page_lock_assert(m, MA_OWNED); - - for (;;) { - x = m->busy_lock; - if ((x & VPB_BIT_WAITERS) == 0) - return; - if (atomic_cmpset_int(&m->busy_lock, x, - x & (~VPB_BIT_WAITERS))) - break; - } + /* + * Wake the waiter. + */ + atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); wakeup(m); } @@ -1268,7 +1214,7 @@ * Sleep and release the page queues lock if the page is busied. * Returns TRUE if the thread slept. * - * The given page must be unlocked and object containing it must + * The given page must be unlocked and object containing it may * be locked. */ int @@ -1277,21 +1223,10 @@ vm_object_t obj; vm_page_lock_assert(m, MA_NOTOWNED); - VM_OBJECT_ASSERT_WLOCKED(m->object); + obj = m->object; if (vm_page_busied(m)) { - /* - * The page-specific object must be cached because page - * identity can change during the sleep, causing the - * re-lock of a different object. - * It is assumed that a reference to the object is already - * held by the callers. - */ - obj = m->object; - vm_page_lock(m); - VM_OBJECT_WUNLOCK(obj); vm_page_busy_sleep(m, msg, false); - VM_OBJECT_WLOCK(obj); return (TRUE); } return (FALSE); @@ -1455,7 +1390,7 @@ vm_page_assert_locked(m); VM_OBJECT_ASSERT_WLOCKED(object); if (vm_page_xbusied(m)) - vm_page_xunbusy_maybelocked(m); + vm_page_xunbusy(m); mrem = vm_radix_remove(&object->rtree, m->pindex); KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); @@ -1588,7 +1523,7 @@ TAILQ_REMOVE(&object->memq, mold, listq); mold->object = NULL; - vm_page_xunbusy_maybelocked(mold); + vm_page_xunbusy(mold); /* * The object's resident_page_count does not change because we have @@ -3922,11 +3857,8 @@ * likely to reclaim it. */ vm_page_aflag_set(m, PGA_REFERENCED); - vm_page_lock(m); - VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(m, "pgrbwt", (allocflags & VM_ALLOC_IGN_SBUSY) != 0); - VM_OBJECT_WLOCK(object); goto retrylookup; } else { if ((allocflags & VM_ALLOC_WIRED) != 0) { @@ -4024,11 +3956,14 @@ * likely to reclaim it. */ vm_page_aflag_set(m, PGA_REFERENCED); - vm_page_lock(m); - VM_OBJECT_WUNLOCK(object); vm_page_busy_sleep(m, "grbmaw", (allocflags & VM_ALLOC_IGN_SBUSY) != 0); - VM_OBJECT_WLOCK(object); + if ((allocflags & VM_ALLOC_NOCREAT) != 0) + vm_page_aflag_set(m, PGA_REFERENCED); + vm_page_busy_sleep(m, "grbmaw", (allocflags & + VM_ALLOC_IGN_SBUSY) != 0); + if ((allocflags & VM_ALLOC_WAITFAIL) != 0) + break; goto retrylookup; } if ((allocflags & VM_ALLOC_WIRED) != 0) { Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -375,6 +375,7 @@ vm_page_lock(p); if (vm_page_wired(p) || !vm_page_in_laundry(p)) { vm_page_unlock(p); + vm_page_xunbusy(p); ib = 0; break; } @@ -401,6 +402,7 @@ vm_page_lock(p); if (vm_page_wired(p) || !vm_page_in_laundry(p)) { vm_page_unlock(p); + vm_page_xunbusy(p); break; } pmap_remove_write(p); @@ -601,6 +603,7 @@ */ if (object->type == OBJT_VNODE) { vm_page_unlock(m); + vm_page_xunbusy(m); vp = object->handle; if (vp->v_type == VREG && vn_start_write(vp, &mp, V_NOWAIT) != 0) {