Index: sys/vm/vm_meter.c =================================================================== --- sys/vm/vm_meter.c +++ sys/vm/vm_meter.c @@ -366,7 +366,6 @@ VM_STATS_VM(v_intrans, "In transit page faults"); VM_STATS_VM(v_reactivated, "Pages reactivated by pagedaemon"); VM_STATS_VM(v_pdwakeups, "Pagedaemon wakeups"); -VM_STATS_VM(v_pdpages, "Pages analyzed by pagedaemon"); VM_STATS_VM(v_pdshortfalls, "Page reclamation shortfalls"); VM_STATS_VM(v_dfree, "Pages freed by pagedaemon"); VM_STATS_VM(v_pfree, "Pages freed by exiting processes"); @@ -438,8 +437,7 @@ return (v); } -static -u_int +static u_int vm_pagequeue_count(int pq) { u_int v; @@ -456,22 +454,41 @@ vm_active_count(void) { - return vm_pagequeue_count(PQ_ACTIVE); + return (vm_pagequeue_count(PQ_ACTIVE)); } u_int vm_inactive_count(void) { - return vm_pagequeue_count(PQ_INACTIVE); + return (vm_pagequeue_count(PQ_INACTIVE)); } u_int vm_laundry_count(void) { - return vm_pagequeue_count(PQ_LAUNDRY); + return (vm_pagequeue_count(PQ_LAUNDRY)); +} + +static int +sysctl_vm_pdpages(SYSCTL_HANDLER_ARGS) +{ + struct vm_pagequeue *pq; + uint64_t ret; + int dom, i; + + ret = counter_u64_fetch(vm_cnt.v_pdpages); + for (dom = 0; dom < vm_ndomains; dom++) + for (i = 0; i < PQ_COUNT; i++) { + pq = &VM_DOMAIN(dom)->vmd_pagequeues[i]; + ret += pq->pq_pdpages; + } + return (SYSCTL_OUT(req, &ret, sizeof(ret))); } +SYSCTL_PROC(_vm_stats_vm, OID_AUTO, v_pdpages, + CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_vm_pdpages, "QU", + "Pages analyzed by pagedaemon"); static void vm_domain_stats_init(struct vm_domain *vmd, struct sysctl_oid *parent) @@ -488,15 +505,31 @@ SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "active", CTLFLAG_RD, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_cnt, 0, "Active pages"); + SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, + "actpdpgs", CTLFLAG_RD, + &vmd->vmd_pagequeues[PQ_ACTIVE].pq_pdpages, 0, + "Active pages scanned by the page daemon"); SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "inactive", CTLFLAG_RD, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt, 0, "Inactive pages"); + SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, + "inactpdpgs", CTLFLAG_RD, + &vmd->vmd_pagequeues[PQ_INACTIVE].pq_pdpages, 0, + "Inactive pages scanned by the page daemon"); SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "laundry", CTLFLAG_RD, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt, 0, "laundry pages"); + SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, + "laundpdpgs", CTLFLAG_RD, + &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_pdpages, 0, + "Laundry pages scanned by the page daemon"); SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "unswappable", CTLFLAG_RD, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt, 0, "Unswappable pages"); + SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, + "unswppdpgs", CTLFLAG_RD, + &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_pdpages, 0, + "Unswappable pages scanned by the page daemon"); SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, "inactive_target", CTLFLAG_RD, &vmd->vmd_inactive_target, 0, "Target inactive pages"); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -428,6 +428,7 @@ TAILQ_INIT(&pq->pq_pl); mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue", MTX_DEF | MTX_DUPOK); + pq->pq_pdpages = 0; } mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF); snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain); Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -731,13 +731,17 @@ KASSERT(queue_locked, ("unlocked laundry queue")); KASSERT(vm_page_in_laundry(m), ("page %p has an inconsistent queue", m)); + + pq->pq_pdpages++; next = TAILQ_NEXT(m, plinks.q); if ((m->flags & PG_MARKER) != 0) continue; + KASSERT((m->flags & PG_FICTITIOUS) == 0, ("PG_FICTITIOUS page %p cannot be in laundry queue", m)); KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("VPO_UNMANAGED page %p cannot be in laundry queue", m)); + if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) { vm_page_unlock(m); continue; @@ -1170,13 +1174,9 @@ KASSERT(queue_locked, ("unlocked inactive queue")); KASSERT(vm_page_inactive(m), ("Inactive queue %p", m)); - VM_CNT_INC(v_pdpages); + pq->pq_pdpages++; next = TAILQ_NEXT(m, plinks.q); - - /* - * skip marker pages - */ - if (m->flags & PG_MARKER) + if ((m->flags & PG_MARKER) != 0) continue; KASSERT((m->flags & PG_FICTITIOUS) == 0, @@ -1429,24 +1429,22 @@ scanned++) { KASSERT(m->queue == PQ_ACTIVE, ("vm_pageout_scan: page %p isn't active", m)); + + pq->pq_pdpages++; next = TAILQ_NEXT(m, plinks.q); if ((m->flags & PG_MARKER) != 0) continue; + KASSERT((m->flags & PG_FICTITIOUS) == 0, ("Fictitious page %p cannot be in active queue", m)); KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("Unmanaged page %p cannot be in active queue", m)); + if (!vm_pageout_page_lock(m, &next)) { vm_page_unlock(m); continue; } - /* - * The count for page daemon pages is updated after checking - * the page for eligibility. - */ - VM_CNT_INC(v_pdpages); - /* * Wired pages are dequeued lazily. */ Index: sys/vm/vm_pagequeue.h =================================================================== --- sys/vm/vm_pagequeue.h +++ sys/vm/vm_pagequeue.h @@ -71,6 +71,7 @@ struct pglist pq_pl; int pq_cnt; const char * const pq_name; + uint64_t pq_pdpages; } __aligned(CACHE_LINE_SIZE); #include