Index: share/man/man9/vm_page_wire.9 =================================================================== --- share/man/man9/vm_page_wire.9 +++ share/man/man9/vm_page_wire.9 @@ -51,7 +51,7 @@ .Fn vm_page_wire and .Fn vm_page_wire_mapped -function wire the page, prevent it from being reclaimed by the page +functions wire the page, which prevents it from being reclaimed by the page daemon or when its containing object is destroyed. Both functions require that the page belong to an object. The Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -3064,10 +3064,8 @@ { pd_entry_t pde, *pdep; pt_entry_t pte, PG_RW, PG_V; - vm_paddr_t pa; vm_page_t m; - pa = 0; m = NULL; PG_RW = pmap_rw_bit(pmap); PG_V = pmap_valid_bit(pmap); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -784,8 +784,6 @@ { uint32_t *addr, nval, oval, qsmask; - vm_page_assert_locked(m); - fflags <<= VM_PAGE_AFLAG_SHIFT; nflags <<= VM_PAGE_AFLAG_SHIFT; newq <<= VM_PAGE_QUEUE_SHIFT; @@ -905,13 +903,17 @@ static inline u_int vm_page_drop(vm_page_t m, u_int val) { + u_int old; /* * Synchronize with vm_page_free_prep(): ensure that all updates to the * page structure are visible before it is freed. */ atomic_thread_fence_rel(); - return (atomic_fetchadd_int(&m->ref_count, -val)); + old = atomic_fetchadd_int(&m->ref_count, -val); + KASSERT(old != VPRC_BLOCKED, + ("vm_page_drop: page %p has an invalid refcount value", m)); + return (old); } /* Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -2583,16 +2583,23 @@ } /* - * Replace "m" with the new page. For - * vm_page_replace(), "m" must be busy - * and dequeued. Finally, change "m" - * as if vm_page_free() was called. + * Unmap the page and check for new + * wirings that may have been acquired + * through a pmap lookup. */ if (object->ref_count != 0 && !vm_page_try_remove_all(m)) { + vm_page_free(m_new); error = EBUSY; goto unlock; } + + /* + * Replace "m" with the new page. For + * vm_page_replace(), "m" must be busy + * and dequeued. Finally, change "m" + * as if vm_page_free() was called. + */ m_new->aflags = m->aflags & ~PGA_QUEUE_STATE_MASK; KASSERT(m_new->oflags == VPO_UNMANAGED, @@ -3294,13 +3301,18 @@ KASSERT(m->ref_count == 0, ("page %p has references", m)); - if ((m->aflags & PGA_DEQUEUE) != 0) - return; - atomic_thread_fence_acq(); - if ((queue = m->queue) == PQ_NONE) - return; - vm_page_aflag_set(m, PGA_DEQUEUE); - vm_page_pqbatch_submit(m, queue); + for (;;) { + if ((m->aflags & PGA_DEQUEUE) != 0) + return; + atomic_thread_fence_acq(); + if ((queue = atomic_load_8(&m->queue)) == PQ_NONE) + return; + if (vm_page_pqstate_cmpset(m, queue, queue, PGA_DEQUEUE, + PGA_DEQUEUE)) { + vm_page_pqbatch_submit(m, queue); + break; + } + } } /*