Index: vm/vm_fault.c =================================================================== --- vm/vm_fault.c +++ vm/vm_fault.c @@ -132,7 +132,7 @@ static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead); static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, - int backward, int forward, bool obj_locked); + int backward, int forward, bool obj_locked, int faultcount); static inline void release_page(struct faultstate *fs) @@ -323,7 +323,7 @@ vm_fault_fill_hold(m_hold, m); vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); if (psind == 0 && !wired) - vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); + vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true, 0); VM_OBJECT_RUNLOCK(fs->first_object); vm_map_lookup_done(fs->map, fs->entry); curthread->td_ru.ru_minflt++; @@ -1181,6 +1181,9 @@ */ vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); + if (faultcount == 0) + faultcount = -1; + /* * Only use the new page below... */ @@ -1289,7 +1292,7 @@ wired == 0) vm_fault_prefault(&fs, vaddr, faultcount > 0 ? behind : PFBAK, - faultcount > 0 ? ahead : PFFOR, false); + faultcount > 0 ? ahead : PFFOR, false, faultcount); VM_OBJECT_WLOCK(fs.object); vm_page_lock(fs.m); @@ -1413,6 +1416,17 @@ VM_OBJECT_WUNLOCK(first_object); } +static SYSCTL_NODE(_debug, OID_AUTO, counters, CTLFLAG_RD, 0, ""); + +static long counter_negati; +SYSCTL_ULONG(_debug_counters, OID_AUTO, negati, CTLFLAG_RD, &counter_negati, 0, ""); + +static long counter_nonneg; +SYSCTL_ULONG(_debug_counters, OID_AUTO, nonneg, CTLFLAG_RD, &counter_nonneg, 0, ""); + +static long counter_negmap; +SYSCTL_ULONG(_debug_counters, OID_AUTO, negmap, CTLFLAG_RD, &counter_negmap, 0, ""); + /* * vm_fault_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" @@ -1421,7 +1435,7 @@ */ static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, - int backward, int forward, bool obj_locked) + int backward, int forward, bool obj_locked, int faultcount) { pmap_t pmap; vm_map_entry_t entry; @@ -1462,6 +1476,10 @@ if (addr < starta || addr >= entry->end) continue; + if (faultcount == -1) + atomic_add_long(&counter_negati, 1); + else + atomic_add_long(&counter_nonneg, 1); if (!pmap_is_prefaultable(pmap, addr)) continue; @@ -1486,8 +1504,11 @@ break; } if (m->valid == VM_PAGE_BITS_ALL && - (m->flags & PG_FICTITIOUS) == 0) + (m->flags & PG_FICTITIOUS) == 0) { pmap_enter_quick(pmap, addr, m, entry->protection); + if (faultcount == -1) + atomic_add_long(&counter_negmap, 1); + } if (!obj_locked || lobject != entry->object.vm_object) VM_OBJECT_RUNLOCK(lobject); }