Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -229,7 +229,6 @@ void *zone; } uma; } plinks; - TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ vm_object_t object; /* which object am I in (O) */ vm_pindex_t pindex; /* offset into object (O,P) */ vm_paddr_t phys_addr; /* physical address of page (C) */ Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -341,7 +341,7 @@ vm_domain_free_unlock(vmd); if (found) { vm_domain_freecnt_inc(vmd, -1); - TAILQ_INSERT_TAIL(&blacklist_head, m, listq); + TAILQ_INSERT_TAIL(&blacklist_head, m, plinks.q); if (verbose) printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); } @@ -411,7 +411,7 @@ if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); - TAILQ_FOREACH(m, &blacklist_head, listq) { + TAILQ_FOREACH(m, &blacklist_head, plinks.q) { sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",", (uintmax_t)m->phys_addr); first = 0; @@ -2470,6 +2470,11 @@ } found: + /* + * Suppose that the page comes from the free page cache. Then, the + * page could still be in a paging queue because vm_page_zone_import() + * had to obtain pages from a different pool. + */ vm_page_dequeue(m); vm_page_alloc_check(m); @@ -2536,17 +2541,18 @@ return (NULL); } m->ref_count = count - 1; - TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, listq); + TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, plinks.q); VM_CNT_ADD(v_nofree_count, count); } m = TAILQ_FIRST(&vmd->vmd_nofreeq); - TAILQ_REMOVE(&vmd->vmd_nofreeq, m, listq); + TAILQ_REMOVE(&vmd->vmd_nofreeq, m, plinks.q); if (m->ref_count > 0) { vm_page_t m_next; m_next = &m[1]; + vm_page_dequeue(m_next); m_next->ref_count = m->ref_count - 1; - TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m_next, listq); + TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m_next, plinks.q); m->ref_count = 0; } vm_domain_free_unlock(vmd); @@ -2566,7 +2572,7 @@ { vm_domain_free_lock(vmd); MPASS(m->ref_count == 0); - TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, listq); + TAILQ_INSERT_HEAD(&vmd->vmd_nofreeq, m, plinks.q); vm_domain_free_unlock(vmd); VM_CNT_ADD(v_nofree_count, 1); } @@ -3971,7 +3977,7 @@ old = vm_page_astate_load(m); do { - if (old.queue == PQ_NONE) { + if (__predict_true(old.queue == PQ_NONE)) { KASSERT((old.flags & PGA_QUEUE_STATE_MASK) == 0, ("%s: page %p has unexpected queue state", __func__, m)); Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c +++ sys/vm/vm_phys.c @@ -393,13 +393,14 @@ vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int pool, int tail) { - + if (__predict_false(vm_page_astate_load(m).queue != PQ_NONE)) + vm_page_dequeue(m); m->order = order; m->pool = pool; if (tail) - TAILQ_INSERT_TAIL(&fl[order].pl, m, listq); + TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); else - TAILQ_INSERT_HEAD(&fl[order].pl, m, listq); + TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); fl[order].lcnt++; } @@ -407,7 +408,7 @@ vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) { - TAILQ_REMOVE(&fl[order].pl, m, listq); + TAILQ_REMOVE(&fl[order].pl, m, plinks.q); fl[order].lcnt--; m->order = VM_NFREEORDER; } @@ -1582,7 +1583,7 @@ * check if there are enough free blocks starting at a properly aligned * block. Thus, no block is checked for free-ness more than twice. */ - TAILQ_FOREACH(m, &fl[max_order].pl, listq) { + TAILQ_FOREACH(m, &fl[max_order].pl, plinks.q) { /* * Skip m unless it is first in a sequence of free max page * blocks >= low in its segment. @@ -1655,7 +1656,7 @@ for (oind = order; oind < VM_NFREEORDER; oind++) { for (pind = vm_default_freepool; pind < VM_NFREEPOOL; pind++) { fl = (*queues)[pind]; - TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) { + TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { /* * Determine if the address range starting at pa * is within the given range, satisfies the