Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_meter.c
Show First 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | |||||
#include <sys/sx.h> | #include <sys/sx.h> | ||||
#include <sys/vmmeter.h> | #include <sys/vmmeter.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <vm/vm.h> | #include <vm/vm.h> | ||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/vm_param.h> | #include <vm/vm_param.h> | ||||
#include <vm/vm_phys.h> | |||||
#include <vm/vm_pagequeue.h> | |||||
#include <vm/pmap.h> | #include <vm/pmap.h> | ||||
#include <vm/vm_map.h> | #include <vm/vm_map.h> | ||||
#include <vm/vm_object.h> | #include <vm/vm_object.h> | ||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
struct vmmeter __exclusive_cache_line vm_cnt = { | struct vmmeter __exclusive_cache_line vm_cnt = { | ||||
.v_swtch = EARLY_COUNTER, | .v_swtch = EARLY_COUNTER, | ||||
.v_trap = EARLY_COUNTER, | .v_trap = EARLY_COUNTER, | ||||
▲ Show 20 Lines • Show All 144 Lines • ▼ Show 20 Lines | if (p->p_state != PRS_NEW) { | ||||
case TDS_INHIBITED: | case TDS_INHIBITED: | ||||
if (TD_IS_SWAPPED(td)) | if (TD_IS_SWAPPED(td)) | ||||
total.t_sw++; | total.t_sw++; | ||||
else if (TD_IS_SLEEPING(td)) { | else if (TD_IS_SLEEPING(td)) { | ||||
if (td->td_priority <= PZERO) | if (td->td_priority <= PZERO) | ||||
total.t_dw++; | total.t_dw++; | ||||
else | else | ||||
total.t_sl++; | total.t_sl++; | ||||
if (td->td_wchan == | |||||
&vm_cnt.v_free_count) | |||||
total.t_pw++; | |||||
} | } | ||||
break; | break; | ||||
case TDS_CAN_RUN: | case TDS_CAN_RUN: | ||||
total.t_sw++; | total.t_sw++; | ||||
break; | break; | ||||
jeff: I forgot that I need to fix this. There are now many wait channels. I'm open to suggestions. | |||||
Done Inline ActionsIf we counted sleepers in vm_wait_domain() using a per-domain var, we could write t_pw as a sum of vm_min_waiters, vm_severe_waiters and the per-domain sleeper count. Such a count might be useful elsewhere (OOM killer logic perhaps). markj: If we counted sleepers in vm_wait_domain() using a per-domain var, we could write t_pw as a sum… | |||||
Done Inline ActionsYes, that's probably good. I'll make that change. jeff: Yes, that's probably good. I'll make that change. | |||||
case TDS_RUNQ: | case TDS_RUNQ: | ||||
case TDS_RUNNING: | case TDS_RUNNING: | ||||
total.t_rq++; | total.t_rq++; | ||||
break; | break; | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
thread_unlock(td); | thread_unlock(td); | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | if (object->shadow_count > 1) { | ||||
total.t_rmshr += object->resident_page_count; | total.t_rmshr += object->resident_page_count; | ||||
if (is_object_active(object)) { | if (is_object_active(object)) { | ||||
total.t_avmshr += object->size; | total.t_avmshr += object->size; | ||||
total.t_armshr += object->resident_page_count; | total.t_armshr += object->resident_page_count; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
mtx_unlock(&vm_object_list_mtx); | mtx_unlock(&vm_object_list_mtx); | ||||
total.t_free = vm_cnt.v_free_count; | total.t_pw = vm_wait_count(); | ||||
total.t_free = vm_free_count(); | |||||
#if defined(COMPAT_FREEBSD11) | #if defined(COMPAT_FREEBSD11) | ||||
/* sysctl(8) allocates twice as much memory as reported by sysctl(3) */ | /* sysctl(8) allocates twice as much memory as reported by sysctl(3) */ | ||||
if (curproc->p_osrel < P_OSREL_VMTOTAL64 && (req->oldlen == | if (curproc->p_osrel < P_OSREL_VMTOTAL64 && (req->oldlen == | ||||
sizeof(total11) || req->oldlen == 2 * sizeof(total11))) { | sizeof(total11) || req->oldlen == 2 * sizeof(total11))) { | ||||
bzero(&total11, sizeof(total11)); | bzero(&total11, sizeof(total11)); | ||||
total11.t_rq = total.t_rq; | total11.t_rq = total.t_rq; | ||||
total11.t_dw = total.t_dw; | total11.t_dw = total.t_dw; | ||||
total11.t_pw = total.t_pw; | total11.t_pw = total.t_pw; | ||||
Show All 39 Lines | if (req->oldlen == sizeof(val32)) { | ||||
return (SYSCTL_OUT(req, &val32, sizeof(val32))); | return (SYSCTL_OUT(req, &val32, sizeof(val32))); | ||||
} | } | ||||
#endif | #endif | ||||
return (SYSCTL_OUT(req, &val, sizeof(val))); | return (SYSCTL_OUT(req, &val, sizeof(val))); | ||||
} | } | ||||
#define VM_STATS(parent, var, descr) \ | #define VM_STATS(parent, var, descr) \ | ||||
SYSCTL_OID(parent, OID_AUTO, var, CTLTYPE_U64 | CTLFLAG_MPSAFE | \ | SYSCTL_OID(parent, OID_AUTO, var, CTLTYPE_U64 | CTLFLAG_MPSAFE | \ | ||||
CTLFLAG_RD, &vm_cnt.var, 0, sysctl_handle_vmstat, "QU", descr); | CTLFLAG_RD, &vm_cnt.var, 0, sysctl_handle_vmstat, "QU", descr) | ||||
#define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr) | #define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr) | ||||
#define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr) | #define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr) | ||||
VM_STATS_SYS(v_swtch, "Context switches"); | VM_STATS_SYS(v_swtch, "Context switches"); | ||||
VM_STATS_SYS(v_trap, "Traps"); | VM_STATS_SYS(v_trap, "Traps"); | ||||
VM_STATS_SYS(v_syscall, "System calls"); | VM_STATS_SYS(v_syscall, "System calls"); | ||||
VM_STATS_SYS(v_intr, "Device interrupts"); | VM_STATS_SYS(v_intr, "Device interrupts"); | ||||
VM_STATS_SYS(v_soft, "Software interrupts"); | VM_STATS_SYS(v_soft, "Software interrupts"); | ||||
Show All 23 Lines | |||||
VM_STATS_VM(v_vforks, "Number of vfork() calls"); | VM_STATS_VM(v_vforks, "Number of vfork() calls"); | ||||
VM_STATS_VM(v_rforks, "Number of rfork() calls"); | VM_STATS_VM(v_rforks, "Number of rfork() calls"); | ||||
VM_STATS_VM(v_kthreads, "Number of fork() calls by kernel"); | VM_STATS_VM(v_kthreads, "Number of fork() calls by kernel"); | ||||
VM_STATS_VM(v_forkpages, "VM pages affected by fork()"); | VM_STATS_VM(v_forkpages, "VM pages affected by fork()"); | ||||
VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()"); | VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()"); | ||||
VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()"); | VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()"); | ||||
VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); | VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); | ||||
static int | |||||
sysctl_handle_vmstat_proc(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
u_int (*fn)(void); | |||||
uint32_t val; | |||||
fn = arg1; | |||||
val = fn(); | |||||
return (SYSCTL_OUT(req, &val, sizeof(val))); | |||||
} | |||||
#define VM_STATS_PROC(var, descr, fn) \ | |||||
SYSCTL_OID(_vm_stats_vm, OID_AUTO, var, CTLTYPE_U32 | CTLFLAG_MPSAFE | \ | |||||
CTLFLAG_RD, fn, 0, sysctl_handle_vmstat_proc, "IU", descr) | |||||
#define VM_STATS_UINT(var, descr) \ | #define VM_STATS_UINT(var, descr) \ | ||||
SYSCTL_UINT(_vm_stats_vm, OID_AUTO, var, CTLFLAG_RD, &vm_cnt.var, 0, descr) | SYSCTL_UINT(_vm_stats_vm, OID_AUTO, var, CTLFLAG_RD, &vm_cnt.var, 0, descr) | ||||
VM_STATS_UINT(v_page_size, "Page size in bytes"); | VM_STATS_UINT(v_page_size, "Page size in bytes"); | ||||
VM_STATS_UINT(v_page_count, "Total number of pages in system"); | VM_STATS_UINT(v_page_count, "Total number of pages in system"); | ||||
VM_STATS_UINT(v_free_reserved, "Pages reserved for deadlock"); | VM_STATS_UINT(v_free_reserved, "Pages reserved for deadlock"); | ||||
VM_STATS_UINT(v_free_target, "Pages desired free"); | VM_STATS_UINT(v_free_target, "Pages desired free"); | ||||
VM_STATS_UINT(v_free_min, "Minimum low-free-pages threshold"); | VM_STATS_UINT(v_free_min, "Minimum low-free-pages threshold"); | ||||
VM_STATS_UINT(v_free_count, "Free pages"); | VM_STATS_PROC(v_free_count, "Free pages", vm_free_count); | ||||
VM_STATS_UINT(v_wire_count, "Wired pages"); | VM_STATS_UINT(v_wire_count, "Wired pages"); | ||||
VM_STATS_UINT(v_active_count, "Active pages"); | VM_STATS_PROC(v_active_count, "Active pages", vm_active_count); | ||||
VM_STATS_UINT(v_inactive_target, "Desired inactive pages"); | VM_STATS_UINT(v_inactive_target, "Desired inactive pages"); | ||||
VM_STATS_UINT(v_inactive_count, "Inactive pages"); | VM_STATS_PROC(v_inactive_count, "Inactive pages", vm_inactive_count); | ||||
VM_STATS_UINT(v_laundry_count, "Pages eligible for laundering"); | VM_STATS_PROC(v_laundry_count, "Pages eligible for laundering", | ||||
vm_laundry_count); | |||||
VM_STATS_UINT(v_pageout_free_min, "Min pages reserved for kernel"); | VM_STATS_UINT(v_pageout_free_min, "Min pages reserved for kernel"); | ||||
VM_STATS_UINT(v_interrupt_free_min, "Reserved pages for interrupt code"); | VM_STATS_UINT(v_interrupt_free_min, "Reserved pages for interrupt code"); | ||||
VM_STATS_UINT(v_free_severe, "Severe page depletion point"); | VM_STATS_UINT(v_free_severe, "Severe page depletion point"); | ||||
#ifdef COMPAT_FREEBSD11 | #ifdef COMPAT_FREEBSD11 | ||||
/* | /* | ||||
* Provide compatibility sysctls for the benefit of old utilities which exit | * Provide compatibility sysctls for the benefit of old utilities which exit | ||||
* with an error if they cannot be found. | * with an error if they cannot be found. | ||||
*/ | */ | ||||
SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_cache_count, CTLFLAG_RD, | SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_cache_count, CTLFLAG_RD, | ||||
SYSCTL_NULL_UINT_PTR, 0, "Dummy for compatibility"); | SYSCTL_NULL_UINT_PTR, 0, "Dummy for compatibility"); | ||||
SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_tcached, CTLFLAG_RD, | SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_tcached, CTLFLAG_RD, | ||||
SYSCTL_NULL_UINT_PTR, 0, "Dummy for compatibility"); | SYSCTL_NULL_UINT_PTR, 0, "Dummy for compatibility"); | ||||
#endif | #endif | ||||
u_int | |||||
vm_free_count(void) | |||||
{ | |||||
u_int v; | |||||
int i; | |||||
v = 0; | |||||
for (i = 0; i < vm_ndomains; i++) | |||||
v += vm_dom[i].vmd_free_count; | |||||
return (v); | |||||
} | |||||
static | |||||
u_int | |||||
Done Inline ActionsThese should be consolidated into a single iterator. jeff: These should be consolidated into a single iterator. | |||||
Done Inline ActionsYou could make use of the "arg" parameter to SYSCTL_PROC. markj: You could make use of the "arg" parameter to SYSCTL_PROC. | |||||
vm_pagequeue_count(int pq) | |||||
{ | |||||
u_int v; | |||||
int i; | |||||
v = 0; | |||||
for (i = 0; i < vm_ndomains; i++) | |||||
v += vm_dom[i].vmd_pagequeues[pq].pq_cnt; | |||||
return (v); | |||||
} | |||||
u_int | |||||
vm_active_count(void) | |||||
{ | |||||
return vm_pagequeue_count(PQ_ACTIVE); | |||||
} | |||||
u_int | |||||
vm_inactive_count(void) | |||||
{ | |||||
return vm_pagequeue_count(PQ_INACTIVE); | |||||
} | |||||
u_int | |||||
vm_laundry_count(void) | |||||
{ | |||||
return vm_pagequeue_count(PQ_LAUNDRY); | |||||
} | |||||
I forgot that I need to fix this. There are now many wait channels. I'm open to suggestions.