Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -351,8 +351,10 @@ * queue, and cleared when the dequeue request is processed. A page may * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue * is requested after the page is scheduled to be enqueued but before it is - * actually inserted into the page queue. The page lock must be held to set - * this flag, and the queue lock for the page must be held to clear it. + * actually inserted into the page queue. For allocated pages, the page lock + * must be held to set this flag, but it may be set by vm_page_free_prep() + * without the page lock held. The page queue lock must be held to clear the + * PGA_DEQUEUE flag. * * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued * in its page queue. The page lock must be held to set this flag, and the Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -3175,7 +3175,11 @@ struct vm_pagequeue *pq; int domain; - vm_page_assert_locked(m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("page %p is unmanaged", m)); + KASSERT(mtx_owned(vm_page_lockptr(m)) || + (m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0), + ("missing synchronization for page %p", m)); KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue)); domain = vm_phys_domain(m); @@ -3197,8 +3201,9 @@ /* * The page may have been logically dequeued before we acquired the - * page queue lock. In this case, the page lock prevents the page - * from being logically enqueued elsewhere. + * page queue lock. In this case, since we either hold the page lock + * or the page is being freed, a different thread cannot be concurrently + * enqueuing the page. */ if (__predict_true(m->queue == queue)) vm_pqbatch_process_page(pq, m); @@ -3289,6 +3294,30 @@ vm_pqbatch_submit_page(m, queue); } +/* + * A variant of vm_page_dequeue_deferred() that does not assert the page + * lock and is only to be called from vm_page_free_prep(). It is just an + * open-coded implementation of vm_page_dequeue_deferred(). Because the + * page is being freed, we can assume that nothing else is scheduling queue + * operations on this page, so we get for free the mutual exclusion that + * is otherwise provided by the page lock. + */ +static void +vm_page_dequeue_deferred_free(vm_page_t m) +{ + uint8_t queue; + + KASSERT(m->object == NULL, ("page %p has an object reference", m)); + + if ((m->aflags & PGA_DEQUEUE) != 0) + return; + atomic_thread_fence_acq(); + if ((queue = m->queue) == PQ_NONE) + return; + vm_page_aflag_set(m, PGA_DEQUEUE); + vm_pqbatch_submit_page(m, queue); +} + /* * vm_page_dequeue: * @@ -3474,7 +3503,7 @@ * dequeue. */ if ((m->oflags & VPO_UNMANAGED) == 0) - vm_page_dequeue_deferred(m); + vm_page_dequeue_deferred_free(m); m->valid = 0; vm_page_undirty(m);