Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -166,11 +166,9 @@ } static void -unlock_and_deallocate(struct faultstate *fs) +unlock_and_deallocate2(struct faultstate *fs) { - vm_object_pip_wakeup(fs->object); - VM_OBJECT_WUNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_WLOCK(fs->first_object); vm_page_lock(fs->first_m); @@ -185,6 +183,15 @@ unlock_vp(fs); } +static void +unlock_and_deallocate(struct faultstate *fs) +{ + + vm_object_pip_wakeup(fs->object); + VM_OBJECT_WUNLOCK(fs->object); + unlock_and_deallocate2(fs); +} + static void vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, vm_prot_t fault_type, int fault_flags, bool set_wd) @@ -1248,6 +1255,7 @@ */ KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, ("vm_fault: page %p partially invalid", fs.m)); + vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); /* @@ -1263,7 +1271,7 @@ vm_fault_prefault(&fs, vaddr, faultcount > 0 ? behind : PFBAK, faultcount > 0 ? ahead : PFFOR, false); - VM_OBJECT_WLOCK(fs.object); + vm_page_lock(fs.m); /* @@ -1279,13 +1287,13 @@ *m_hold = fs.m; vm_page_hold(fs.m); } + vm_page_xunbusy_maybelocked(fs.m); vm_page_unlock(fs.m); - vm_page_xunbusy(fs.m); /* * Unlock everything, and return */ - unlock_and_deallocate(&fs); + unlock_and_deallocate2(&fs); if (hardfault) { VM_CNT_INC(v_io_faults); curthread->td_ru.ru_majflt++; Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -724,6 +724,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); +restart: mtx = NULL; /* @@ -733,13 +734,19 @@ * the object, the page and object are reset to any empty state. */ TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { - vm_page_assert_unbusied(p); if ((object->flags & OBJ_UNMANAGED) == 0) /* * vm_page_free_prep() only needs the page * lock for managed pages. */ vm_page_change_lock(p, &mtx); + if (vm_page_busied(p)) { + vm_page_change_lock(p, &mtx); + VM_OBJECT_WUNLOCK(object); + vm_page_busy_sleep(p, "termbu", false); + VM_OBJECT_WLOCK(object); + goto restart; + } p->object = NULL; if (p->wire_count != 0) continue; @@ -1491,7 +1498,6 @@ backing_object = object->backing_object; VM_OBJECT_ASSERT_WLOCKED(backing_object); - KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); KASSERT(p == NULL || p->object == object || p->object == backing_object, ("invalid ownership %p %p %p", p, object, backing_object)); if ((op & OBSC_COLLAPSE_NOWAIT) != 0) Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -377,6 +377,7 @@ * Page flags. If changed at any other time than page allocation or * freeing, the modification must be protected by the vm_page lock. */ +#define PG_FREEPREP 0x0001 /* vm_page_free_prep() completed */ #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */ #define PG_ZERO 0x0008 /* page is zeroed */ #define PG_MARKER 0x0010 /* special queue marker page */ Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -3434,6 +3434,8 @@ vm_page_lock_assert(m, MA_OWNED); KASSERT(!pmap_page_is_mapped(m), ("vm_page_free_prep: freeing mapped page %p", m)); + if ((m->flags & PG_FREEPREP) != 0) + return (true); } else KASSERT(m->queue == PQ_NONE, ("vm_page_free_prep: unmanaged page %p is queued", m)); @@ -3483,6 +3485,9 @@ if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); + if ((m->oflags & VPO_UNMANAGED) == 0) + m->flags |= PG_FREEPREP; + #if VM_NRESERVLEVEL > 0 if (vm_reserv_free_page(m)) return (false);