Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -2361,9 +2361,7 @@ if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL) return; - VM_OBJECT_RLOCK(object); if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { - VM_OBJECT_RUNLOCK(object); VM_OBJECT_WLOCK(object); if (object->type == OBJT_DEVICE || object->type == OBJT_SG) { pmap_object_init_pt(map->pmap, addr, object, pindex, @@ -2372,7 +2370,8 @@ return; } VM_OBJECT_LOCK_DOWNGRADE(object); - } + } else + VM_OBJECT_RLOCK(object); psize = atop(size); if (psize + pindex > object->size) { @@ -2539,6 +2538,8 @@ continue; } + if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) + continue; VM_OBJECT_WLOCK(obj); if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { VM_OBJECT_WUNLOCK(obj); @@ -3725,14 +3726,14 @@ /* * - * vm_map_copy_anon_object: + * vm_map_copy_swap_object: * - * Copies an anonymous object from an existing map entry to a + * Copies a swap-backed object from an existing map entry to a * new one. Carries forward the swap charge. May change the * src object on return. */ static void -vm_map_copy_anon_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry, +vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry, vm_offset_t size, vm_ooffset_t *fork_charge) { vm_object_t src_object; @@ -3814,8 +3815,9 @@ */ size = src_entry->end - src_entry->start; if ((src_object = src_entry->object.vm_object) != NULL) { - if ((src_object->flags & OBJ_ANON) != 0) { - vm_map_copy_anon_object(src_entry, dst_entry, + if (src_object->type == OBJT_DEFAULT || + src_object->type == OBJT_SWAP) { + vm_map_copy_swap_object(src_entry, dst_entry, size, fork_charge); /* May have split/collapsed, reload obj. */ src_object = src_entry->object.vm_object; Index: sys/vm/vm_mmap.c =================================================================== --- sys/vm/vm_mmap.c +++ sys/vm/vm_mmap.c @@ -1325,12 +1325,14 @@ } else { KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, ("wrong object type")); - VM_OBJECT_WLOCK(obj); - vm_object_reference_locked(obj); + vm_object_reference(obj); #if VM_NRESERVLEVEL > 0 - vm_object_color(obj, 0); + if ((obj->flags & OBJ_COLORED) == 0) { + VM_OBJECT_WLOCK(obj); + vm_object_color(obj, 0); + VM_OBJECT_WUNLOCK(obj); + } #endif - VM_OBJECT_WUNLOCK(obj); } *objp = obj; *flagsp = flags; Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -520,15 +520,22 @@ vm_object_vndeallocate(vm_object_t object) { struct vnode *vp = (struct vnode *) object->handle; + bool last; KASSERT(object->type == OBJT_VNODE, ("vm_object_vndeallocate: not a vnode object")); KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); + /* Object lock to protect handle lookup. */ + last = refcount_release(&object->ref_count); + VM_OBJECT_RUNLOCK(object); + + if (!last) + return; + if (!umtx_shm_vnobj_persistent) umtx_shm_object_terminated(object); - VM_OBJECT_WUNLOCK(object); /* vrele may need the vnode lock. */ vrele(vp); } @@ -548,7 +555,7 @@ vm_object_deallocate(vm_object_t object) { vm_object_t robject, temp; - bool last, released; + bool released; while (object != NULL) { /* @@ -565,18 +572,22 @@ if (released) return; - VM_OBJECT_WLOCK(object); - KASSERT(object->ref_count != 0, - ("vm_object_deallocate: object deallocated too many times: %d", object->type)); - - last = refcount_release(&object->ref_count); if (object->type == OBJT_VNODE) { - if (last) + VM_OBJECT_RLOCK(object); + if (object->type == OBJT_VNODE) { vm_object_vndeallocate(object); - else - VM_OBJECT_WUNLOCK(object); - return; + return; + } + VM_OBJECT_RUNLOCK(object); } + + VM_OBJECT_WLOCK(object); + KASSERT(object->ref_count > 0, + ("vm_object_deallocate: object deallocated too many times: %d", + object->type)); + + if (refcount_release(&object->ref_count)) + goto doterm; if (object->ref_count > 1) { VM_OBJECT_WUNLOCK(object); return;