diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h --- a/sys/sys/vmmeter.h +++ b/sys/sys/vmmeter.h @@ -120,6 +120,7 @@ counter_u64_t v_rforkpages; /* (p) pages affected by rfork() */ counter_u64_t v_kthreadpages; /* (p) ... and by kernel fork() */ counter_u64_t v_wire_count; /* (p) pages wired down */ + counter_u64_t v_nofree_count; /* (p) permanently allocated pages */ #define VM_METER_NCOUNTERS \ (offsetof(struct vmmeter, v_page_size) / sizeof(counter_u64_t)) /* @@ -174,6 +175,13 @@ return (VM_CNT_FETCH(v_wire_count)); } +static inline u_int +vm_nofree_count(void) +{ + + return (VM_CNT_FETCH(v_nofree_count)); +} + /* * Return TRUE if we are under our severe low-free-pages threshold * diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -90,6 +90,7 @@ .v_rforkpages = EARLY_COUNTER, .v_kthreadpages = EARLY_COUNTER, .v_wire_count = EARLY_COUNTER, + .v_nofree_count = EARLY_COUNTER, }; u_long __exclusive_cache_line vm_user_wire_count; @@ -386,6 +387,7 @@ VM_STATS_UINT(v_free_min, "Minimum low-free-pages threshold"); VM_STATS_PROC(v_free_count, "Free pages", vm_free_count); VM_STATS_PROC(v_wire_count, "Wired pages", vm_wire_count); +VM_STATS_PROC(v_nofree_count, "Permanently allocated pages", vm_nofree_count); VM_STATS_PROC(v_active_count, "Active pages", vm_active_count); VM_STATS_UINT(v_inactive_target, "Desired inactive pages"); VM_STATS_PROC(v_inactive_count, "Inactive pages", vm_inactive_count);