Index: head/sys/vm/default_pager.c =================================================================== --- head/sys/vm/default_pager.c +++ head/sys/vm/default_pager.c @@ -113,6 +113,7 @@ /* * OBJT_DEFAULT objects have no special resources allocated to them. */ + object->type = OBJT_DEAD; } /* Index: head/sys/vm/device_pager.c =================================================================== --- head/sys/vm/device_pager.c +++ head/sys/vm/device_pager.c @@ -254,6 +254,8 @@ != NULL) dev_pager_free_page(object, m); } + object->handle = NULL; + object->type = OBJT_DEAD; } static int Index: head/sys/vm/phys_pager.c =================================================================== --- head/sys/vm/phys_pager.c +++ head/sys/vm/phys_pager.c @@ -131,6 +131,8 @@ mtx_unlock(&phys_pager_mtx); VM_OBJECT_WLOCK(object); } + object->handle = NULL; + object->type = OBJT_DEAD; } /* Index: head/sys/vm/sg_pager.c =================================================================== --- head/sys/vm/sg_pager.c +++ head/sys/vm/sg_pager.c @@ -130,6 +130,8 @@ sg = object->handle; sglist_free(sg); + object->handle = NULL; + object->type = OBJT_DEAD; } static int Index: head/sys/vm/swap_pager.c =================================================================== --- head/sys/vm/swap_pager.c +++ head/sys/vm/swap_pager.c @@ -697,6 +697,8 @@ * if paging is still in progress on some objects. */ swp_pager_meta_free_all(object); + object->handle = NULL; + object->type = OBJT_DEAD; } /************************************************************************ Index: head/sys/vm/vm_meter.c =================================================================== --- head/sys/vm/vm_meter.c +++ head/sys/vm/vm_meter.c @@ -111,14 +111,7 @@ */ mtx_lock(&vm_object_list_mtx); TAILQ_FOREACH(object, &vm_object_list, object_list) { - if (!VM_OBJECT_TRYWLOCK(object)) { - /* - * Avoid a lock-order reversal. Consequently, - * the reported number of active pages may be - * greater than the actual number. - */ - continue; - } + VM_OBJECT_WLOCK(object); vm_object_clear_flag(object, OBJ_ACTIVE); VM_OBJECT_WUNLOCK(object); } @@ -196,10 +189,9 @@ mtx_lock(&vm_object_list_mtx); TAILQ_FOREACH(object, &vm_object_list, object_list) { /* - * Perform unsynchronized reads on the object to avoid - * a lock-order reversal. In this case, the lack of - * synchronization should not impair the accuracy of - * the reported statistics. + * Perform unsynchronized reads on the object. In + * this case, the lack of synchronization should not + * impair the accuracy of the reported statistics. */ if ((object->flags & OBJ_FICTITIOUS) != 0) { /* Index: head/sys/vm/vm_object.c =================================================================== --- head/sys/vm/vm_object.c +++ head/sys/vm/vm_object.c @@ -166,6 +166,8 @@ vm_object_t object; object = (vm_object_t)mem; + KASSERT(object->ref_count == 0, + ("object %p ref_count = %d", object, object->ref_count)); KASSERT(TAILQ_EMPTY(&object->memq), ("object %p has resident pages in its memq", object)); KASSERT(vm_radix_is_empty(&object->rtree), @@ -187,6 +189,9 @@ KASSERT(object->shadow_count == 0, ("object %p shadow_count = %d", object, object->shadow_count)); + KASSERT(object->type == OBJT_DEAD, + ("object %p has non-dead type %d", + object, object->type)); } #endif @@ -199,6 +204,8 @@ rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); /* These are true for any object that has been freed */ + object->type = OBJT_DEAD; + object->ref_count = 0; object->rtree.rt_root = 0; object->rtree.rt_flags = 0; object->paging_in_progress = 0; @@ -206,6 +213,10 @@ object->shadow_count = 0; object->cache.rt_root = 0; object->cache.rt_flags = 0; + + mtx_lock(&vm_object_list_mtx); + TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); + mtx_unlock(&vm_object_list_mtx); return (0); } @@ -252,10 +263,6 @@ #if VM_NRESERVLEVEL > 0 LIST_INIT(&object->rvq); #endif - - mtx_lock(&vm_object_list_mtx); - TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); - mtx_unlock(&vm_object_list_mtx); } /* @@ -671,19 +678,9 @@ { /* - * Remove the object from the global object list. - */ - mtx_lock(&vm_object_list_mtx); - TAILQ_REMOVE(&vm_object_list, object, object_list); - mtx_unlock(&vm_object_list_mtx); - - /* * Release the allocation charge. */ if (object->cred != NULL) { - KASSERT(object->type == OBJT_DEFAULT || - object->type == OBJT_SWAP, - ("%s: non-swap obj %p has cred", __func__, object)); swap_release_by_cred(object->charge, object->cred); object->charge = 0; crfree(object->cred); @@ -788,6 +785,10 @@ if (__predict_false(!vm_object_cache_is_empty(object))) vm_page_cache_free(object, 0, 0); + KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || + object->type == OBJT_SWAP, + ("%s: non-swap obj %p has cred", __func__, object)); + /* * Let the pager know object is dead. */ @@ -1803,6 +1804,8 @@ KASSERT(backing_object->ref_count == 1, ( "backing_object %p was somehow re-referenced during collapse!", backing_object)); + backing_object->type = OBJT_DEAD; + backing_object->ref_count = 0; VM_OBJECT_WUNLOCK(backing_object); vm_object_destroy(backing_object);