diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -287,7 +287,6 @@ #define VDBATCH_SIZE 8 struct vdbatch { u_int index; - long freevnodes; struct mtx lock; struct vnode *tab[VDBATCH_SIZE]; }; @@ -1418,48 +1417,62 @@ * at any given moment can still exceed slop, but it should not be by significant * margin in practice. */ -#define VNLRU_FREEVNODES_SLOP 128 +#define VNLRU_FREEVNODES_SLOP 126 + +static void __noinline +vfs_freevnodes_rollup(int8_t *lfreevnodes) +{ + + atomic_add_long(&freevnodes, *lfreevnodes); + *lfreevnodes = 0; + critical_exit(); +} static __inline void vfs_freevnodes_inc(void) { - struct vdbatch *vd; + int8_t *lfreevnodes; critical_enter(); - vd = DPCPU_PTR(vd); - vd->freevnodes++; - critical_exit(); + lfreevnodes = PCPU_PTR(vfs_freevnodes); + (*lfreevnodes)++; + if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) + vfs_freevnodes_rollup(lfreevnodes); + else + critical_exit(); } static __inline void vfs_freevnodes_dec(void) { - struct vdbatch *vd; + int8_t *lfreevnodes; critical_enter(); - vd = DPCPU_PTR(vd); - vd->freevnodes--; - critical_exit(); + lfreevnodes = PCPU_PTR(vfs_freevnodes); + (*lfreevnodes)--; + if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) + vfs_freevnodes_rollup(lfreevnodes); + else + critical_exit(); } static u_long vnlru_read_freevnodes(void) { - struct vdbatch *vd; - long slop; + long slop, rfreevnodes; int cpu; - mtx_assert(&vnode_list_mtx, MA_OWNED); - if (freevnodes > freevnodes_old) - slop = freevnodes - freevnodes_old; + rfreevnodes = atomic_load_long(&freevnodes); + + if (rfreevnodes > freevnodes_old) + slop = rfreevnodes - freevnodes_old; else - slop = freevnodes_old - freevnodes; + slop = freevnodes_old - rfreevnodes; if (slop < VNLRU_FREEVNODES_SLOP) - return (freevnodes >= 0 ? freevnodes : 0); - freevnodes_old = freevnodes; + return (rfreevnodes >= 0 ? rfreevnodes : 0); + freevnodes_old = rfreevnodes; CPU_FOREACH(cpu) { - vd = DPCPU_ID_PTR((cpu), vd); - freevnodes_old += vd->freevnodes; + freevnodes_old += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; } return (freevnodes_old >= 0 ? freevnodes_old : 0); } @@ -3513,7 +3526,6 @@ mtx_lock(&vnode_list_mtx); critical_enter(); - freevnodes += vd->freevnodes; for (i = 0; i < VDBATCH_SIZE; i++) { vp = vd->tab[i]; TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); @@ -3522,7 +3534,6 @@ vp->v_dbatchcpu = NOCPU; } mtx_unlock(&vnode_list_mtx); - vd->freevnodes = 0; bzero(vd->tab, sizeof(vd->tab)); vd->index = 0; critical_exit(); diff --git a/sys/sys/pcpu.h b/sys/sys/pcpu.h --- a/sys/sys/pcpu.h +++ b/sys/sys/pcpu.h @@ -189,7 +189,8 @@ long pc_cp_time[CPUSTATES]; /* statclock ticks */ struct _device *pc_device; /* CPU device handle */ void *pc_netisr; /* netisr SWI cookie */ - int pc_unused1; /* unused field */ + int8_t pc_vfs_freevnodes; /* freevnodes counter */ + char pc_unused1[3]; /* unused pad */ int pc_domain; /* Memory domain. */ struct rm_queue pc_rm_queue; /* rmlock list of trackers */ uintptr_t pc_dynamic; /* Dynamic per-cpu data area */