Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -6022,8 +6022,12 @@ * Try-lock because this is the wrong lock order. If that does * not succeed, drop the mount vnode list lock and try to * reacquire it and the vnode interlock in the right order. + * + * We VI_TRYLOCK_CONTENDED since with the sheer number of + * vnodes to be inspected quite a few of them can already have + * the interlock held if the machine is under load. */ - if (!VI_TRYLOCK(vp) && + if (!VI_TRYLOCK_CONTENDED(vp) && !mnt_vnode_next_active_relock(*mvp, mp, vp)) goto restart; KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); Index: sys/sys/vnode.h =================================================================== --- sys/sys/vnode.h +++ sys/sys/vnode.h @@ -438,6 +438,19 @@ #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock) #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags)) #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock) +/* + * To be used when the OP is useful to do, but likely enough to fail that it + * makes sense to pre-read the lock first (and consequently avoid dirtying + * the cache line). + */ +#define VI_TRYLOCK_CONTENDED(vp) ({ \ + int _rv; \ + if (mtx_owner(VI_MTX(vp)) != NULL) \ + _rv = 0; \ + else \ + _rv = mtx_trylock(&(vp)->v_interlock); \ + _rv; \ +}) #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock) #define VI_MTX(vp) (&(vp)->v_interlock)