Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -132,7 +132,7 @@ static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead); static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, - int backward, int forward); + int backward, int forward, bool obj_locked); static inline void release_page(struct faultstate *fs) @@ -166,11 +166,9 @@ } static void -unlock_and_deallocate(struct faultstate *fs) +unlock_and_deallocate2(struct faultstate *fs) { - vm_object_pip_wakeup(fs->object); - VM_OBJECT_WUNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_WLOCK(fs->first_object); vm_page_lock(fs->first_m); @@ -185,6 +183,15 @@ unlock_vp(fs); } +static void +unlock_and_deallocate(struct faultstate *fs) +{ + + vm_object_pip_wakeup(fs->object); + VM_OBJECT_WUNLOCK(fs->object); + unlock_and_deallocate2(fs); +} + static void vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, vm_prot_t fault_type, int fault_flags, bool set_wd) @@ -320,9 +327,9 @@ return (rv); vm_fault_fill_hold(m_hold, m); vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); - VM_OBJECT_RUNLOCK(fs->first_object); if (psind == 0 && !wired) - vm_fault_prefault(fs, vaddr, PFBAK, PFFOR); + vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); + VM_OBJECT_RUNLOCK(fs->first_object); vm_map_lookup_done(fs->map, fs->entry); curthread->td_ru.ru_minflt++; return (KERN_SUCCESS); @@ -1248,6 +1255,7 @@ */ KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, ("vm_fault: page %p partially invalid", fs.m)); + vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); /* @@ -1262,8 +1270,8 @@ wired == 0) vm_fault_prefault(&fs, vaddr, faultcount > 0 ? behind : PFBAK, - faultcount > 0 ? ahead : PFFOR); - VM_OBJECT_WLOCK(fs.object); + faultcount > 0 ? ahead : PFFOR, false); + vm_page_lock(fs.m); /* @@ -1279,13 +1287,13 @@ *m_hold = fs.m; vm_page_hold(fs.m); } + vm_page_xunbusy_maybelocked(fs.m); vm_page_unlock(fs.m); - vm_page_xunbusy(fs.m); /* * Unlock everything, and return */ - unlock_and_deallocate(&fs); + unlock_and_deallocate2(&fs); if (hardfault) { VM_CNT_INC(v_io_faults); curthread->td_ru.ru_majflt++; @@ -1395,7 +1403,7 @@ */ static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, - int backward, int forward) + int backward, int forward, bool obj_locked) { pmap_t pmap; vm_map_entry_t entry; @@ -1441,7 +1449,8 @@ pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = entry->object.vm_object; - VM_OBJECT_RLOCK(lobject); + if (!obj_locked) + VM_OBJECT_RLOCK(lobject); while ((m = vm_page_lookup(lobject, pindex)) == NULL && lobject->type == OBJT_DEFAULT && (backing_object = lobject->backing_object) != NULL) { @@ -1449,17 +1458,20 @@ 0, ("vm_fault_prefault: unaligned object offset")); pindex += lobject->backing_object_offset >> PAGE_SHIFT; VM_OBJECT_RLOCK(backing_object); - VM_OBJECT_RUNLOCK(lobject); + if (!obj_locked || lobject != entry->object.vm_object) + VM_OBJECT_RUNLOCK(lobject); lobject = backing_object; } if (m == NULL) { - VM_OBJECT_RUNLOCK(lobject); + if (!obj_locked || lobject != entry->object.vm_object) + VM_OBJECT_RUNLOCK(lobject); break; } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) pmap_enter_quick(pmap, addr, m, entry->protection); - VM_OBJECT_RUNLOCK(lobject); + if (!obj_locked || lobject != entry->object.vm_object) + VM_OBJECT_RUNLOCK(lobject); } } Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -726,6 +726,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); +restart: mtx = NULL; pq = NULL; @@ -736,7 +737,13 @@ * the object, the page and object are reset to any empty state. */ TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { - vm_page_assert_unbusied(p); + if (vm_page_busied(p)) { + vm_page_lock(p); + VM_OBJECT_WUNLOCK(object); + vm_page_busy_sleep(p, "termbu", false); + VM_OBJECT_WLOCK(object); + goto restart; + } if ((object->flags & OBJ_UNMANAGED) == 0) { /* * vm_page_free_prep() only needs the page Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -323,6 +323,7 @@ * Page flags. If changed at any other time than page allocation or * freeing, the modification must be protected by the vm_page lock. */ +#define PG_FREEPREP 0x0001 /* vm_page_free_prep() called */ #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */ #define PG_ZERO 0x0008 /* page is zeroed */ #define PG_MARKER 0x0010 /* special queue marker page */ Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -3207,6 +3207,8 @@ vm_page_lock_assert(m, MA_OWNED); KASSERT(!pmap_page_is_mapped(m), ("vm_page_free_toq: freeing mapped page %p", m)); + if ((m->flags & PG_FREEPREP) != 0) + return (true); } else KASSERT(m->queue == PQ_NONE, ("vm_page_free_toq: unmanaged page %p is queued", m)); @@ -3259,6 +3261,8 @@ return (false); #endif + if ((m->oflags & VPO_UNMANAGED) == 0) + m->flags |= PG_FREEPREP; return (true); }