Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -1264,23 +1264,6 @@ vlowat = vhiwat / 2; } -/* XXX some names and initialization are bad for limits and watermarks. */ -static int -vspace(void) -{ - u_long rnumvnodes, rfreevnodes; - int space; - - rnumvnodes = atomic_load_long(&numvnodes); - rfreevnodes = atomic_load_long(&freevnodes); - if (rnumvnodes > desiredvnodes) - return (0); - space = desiredvnodes - rnumvnodes; - if (freevnodes > wantfreevnodes) - space += rfreevnodes - wantfreevnodes; - return (space); -} - static void vnlru_return_batch_locked(struct mount *mp) { @@ -1346,13 +1329,41 @@ static struct proc *vnlruproc; static int vnlruproc_sig; +static bool +vnlru_under(u_long rnumvnodes, u_long limit) +{ + u_long rfreevnodes, space; + + if (__predict_false(rnumvnodes > desiredvnodes)) + return (true); + + space = desiredvnodes - rnumvnodes; + if (space < limit) { + rfreevnodes = atomic_load_long(&freevnodes); + if (rfreevnodes > wantfreevnodes) + space += rfreevnodes - wantfreevnodes; + } + return (space < limit); +} + +static void +vnlru_kick(void) +{ + + mtx_assert(&vnode_free_list_mtx, MA_OWNED); + if (vnlruproc_sig == 0) { + vnlruproc_sig = 1; + wakeup(vnlruproc); + } +} + static void vnlru_proc(void) { u_long rnumvnodes, rfreevnodes; struct mount *mp, *nmp; unsigned long onumvnodes; - int done, force, trigger, usevnodes, vsp; + int done, force, trigger, usevnodes; bool reclaim_nc_src; EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, @@ -1368,8 +1379,10 @@ * adjusted using its sysctl, or emergency growth), first * try to reduce it by discarding from the free list. */ - if (rnumvnodes > desiredvnodes) + if (rnumvnodes > desiredvnodes) { vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); + rnumvnodes = atomic_load_long(&numvnodes); + } /* * Sleep if the vnode cache is in a good state. This is * when it is not over-full and has space for about a 4% @@ -1381,8 +1394,7 @@ force = 1; vstir = 0; } - vsp = vspace(); - if (vsp >= vlowat && force == 0) { + if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { vnlruproc_sig = 0; wakeup(&vnlruproc_sig); msleep(vnlruproc, &vnode_free_list_mtx, @@ -1452,8 +1464,7 @@ * After becoming active to expand above low water, keep * active until above high water. */ - vsp = vspace(); - force = vsp < vhiwat; + force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; } } @@ -1529,18 +1540,6 @@ return (0); } -static void -vcheckspace(void) -{ - int vsp; - - vsp = vspace(); - if (vsp < vlowat && vnlruproc_sig == 0) { - vnlruproc_sig = 1; - wakeup(vnlruproc); - } -} - /* * Wait if necessary for space for a new vnode. */ @@ -1574,11 +1573,19 @@ static struct vnode * vn_alloc(struct mount *mp) { + u_long rnumvnodes; static int cyclecount; int error __unused; + rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; + if (__predict_true(!vnlru_under(rnumvnodes, vlowat))) + goto alloc; + + atomic_subtract_long(&numvnodes, 1); + mtx_lock(&vnode_free_list_mtx); - if (numvnodes < desiredvnodes) + rnumvnodes = atomic_load_long(&numvnodes); + if (rnumvnodes < desiredvnodes) cyclecount = 0; else if (cyclecount++ >= freevnodes) { cyclecount = 0; @@ -1594,7 +1601,7 @@ * should be chosen so that we never wait or even reclaim from * the free list to below its target minimum. */ - if (numvnodes + 1 <= desiredvnodes) + if (rnumvnodes + 1 <= desiredvnodes) ; else if (freevnodes > 0) vnlru_free_locked(1, NULL); @@ -1608,9 +1615,11 @@ } #endif } - vcheckspace(); - atomic_add_long(&numvnodes, 1); + rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; + if (vnlru_under(rnumvnodes, vlowat)) + vnlru_kick(); mtx_unlock(&vnode_free_list_mtx); +alloc: return (uma_zalloc(vnode_zone, M_WAITOK)); }