Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -552,7 +552,6 @@ vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *); -struct vm_pagequeue *vm_page_pagequeue(vm_page_t m); vm_page_t vm_page_prev(vm_page_t m); bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); void vm_page_putfake(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -3056,21 +3056,15 @@ mtx_unlock(&vm_domainset_lock); } -struct vm_pagequeue * +static struct vm_pagequeue * vm_page_pagequeue(vm_page_t m) { - return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]); -} - -static struct mtx * -vm_page_pagequeue_lockptr(vm_page_t m) -{ uint8_t queue; if ((queue = atomic_load_8(&m->queue)) == PQ_NONE) return (NULL); - return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex); + return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]); } static inline void @@ -3093,10 +3087,8 @@ m, pq, qflags)); if ((qflags & PGA_DEQUEUE) != 0) { - if (__predict_true((qflags & PGA_ENQUEUED) != 0)) { - TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); - vm_pagequeue_cnt_dec(pq); - } + if (__predict_true((qflags & PGA_ENQUEUED) != 0)) + vm_pagequeue_remove(pq, m); vm_page_dequeue_complete(m); } else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) { if ((qflags & PGA_ENQUEUED) != 0) @@ -3299,16 +3291,15 @@ void vm_page_dequeue(vm_page_t m) { - struct mtx *lock, *lock1; struct vm_pagequeue *pq; uint8_t aflags; - KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER, + KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL, ("page %p is allocated and unlocked", m)); for (;;) { - lock = vm_page_pagequeue_lockptr(m); - if (lock == NULL) { + pq = vm_page_pagequeue(m); + if (pq == NULL) { /* * A thread may be concurrently executing * vm_page_dequeue_complete(). Ensure that all queue @@ -3329,25 +3320,21 @@ cpu_spinwait(); continue; } - mtx_lock(lock); - if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock) + vm_pagequeue_lock(pq); + if (pq == vm_page_pagequeue(m)) break; - mtx_unlock(lock); - lock = lock1; + vm_pagequeue_unlock(pq); } - KASSERT(lock == vm_page_pagequeue_lockptr(m), + KASSERT(pq == vm_page_pagequeue(m), ("%s: page %p migrated directly between queues", __func__, m)); KASSERT((m->aflags & PGA_DEQUEUE) != 0 || mtx_owned(vm_page_lockptr(m)), ("%s: queued unlocked page %p", __func__, m)); - if ((m->aflags & PGA_ENQUEUED) != 0) { - pq = vm_page_pagequeue(m); - TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); - vm_pagequeue_cnt_dec(pq); - } + if ((m->aflags & PGA_ENQUEUED) != 0) + vm_pagequeue_remove(pq, m); vm_page_dequeue_complete(m); - mtx_unlock(lock); + vm_pagequeue_unlock(pq); } /* Index: sys/vm/vm_pagequeue.h =================================================================== --- sys/vm/vm_pagequeue.h +++ sys/vm/vm_pagequeue.h @@ -198,6 +198,14 @@ #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1) #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1) +static inline void +vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m) +{ + + TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); + vm_pagequeue_cnt_dec(pq); +} + static inline void vm_batchqueue_init(struct vm_batchqueue *bq) {