Changeset View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 2,814 Lines • ▼ Show 20 Lines | v_decr_devcount(struct vnode *vp) | ||||
if (vp->v_type == VCHR && vp->v_rdev != NULL) { | if (vp->v_type == VCHR && vp->v_rdev != NULL) { | ||||
dev_lock(); | dev_lock(); | ||||
vp->v_rdev->si_usecount--; | vp->v_rdev->si_usecount--; | ||||
dev_unlock(); | dev_unlock(); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Try to perform inactive processing. | |||||
* | |||||
* We are called with hold count on the vnode. | |||||
* | |||||
* This can race with vref which will bump v_usecount without locking the vnode. | |||||
* There is no way to lock it out, making this best effort. | |||||
*/ | |||||
static int __noinline | |||||
vget_inactive(struct vnode *vp, int flags) | |||||
{ | |||||
bool upgraded; | |||||
ASSERT_VOP_LOCKED(vp, __func__); | |||||
/* | |||||
* There is nothing we can do if we raced with vref. | |||||
*/ | |||||
if (vp->v_usecount > 0) | |||||
return (0); | |||||
if (flags & LK_NOWAIT) { | |||||
VOP_UNLOCK(vp); | |||||
vdrop(vp); | |||||
return (EAGAIN); | |||||
} | |||||
upgraded = false; | |||||
if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { | |||||
VOP_LOCK(vp, LK_UPGRADE); | |||||
if (VN_IS_DOOMED(vp)) { | |||||
VOP_UNLOCK(vp); | |||||
kib: I think you should not unlock there, only downgrade, and even this is optional. I think it is… | |||||
mjgAuthorUnsubmitted Done Inline ActionsNow that I look at it it's a bug - completely ignores LK_RETRY. Should LK_RETRY be specified, there is nothing to do, otherwise we do need to unlock to remain compatible. mjg: Now that I look at it it's a bug - completely ignores LK_RETRY. Should LK_RETRY be specified… | |||||
kibUnsubmitted Not Done Inline ActionsIn fact I think you should not upgrade. If vget() sees the VIRF_OWEINACT flag, the locking mode should be changed to exclusive. kib: In fact I think you should not upgrade. If vget() sees the VIRF_OWEINACT flag, the locking… | |||||
vdrop(vp); | |||||
return (ENOENT); | |||||
} | |||||
upgraded = true; | |||||
} | |||||
/* | |||||
* By the time we get here v_usecount may be > 0 because of vref | |||||
* or some other vget caller managed to call vinactive. The first | |||||
* condition is a race which affects vputx anyway and the latter | |||||
* is harmless. | |||||
*/ | |||||
VI_LOCK(vp); | |||||
vinactive(vp); | |||||
VI_UNLOCK(vp); | |||||
if (upgraded) | |||||
VOP_LOCK(vp, LK_DOWNGRADE); | |||||
return (0); | |||||
} | |||||
/* | |||||
* Grab a particular vnode from the free list, increment its | * Grab a particular vnode from the free list, increment its | ||||
* reference count and lock it. VIRF_DOOMED is set if the vnode | * reference count and lock it. VIRF_DOOMED is set if the vnode | ||||
* is being destroyed. Only callers who specify LK_RETRY will | * is being destroyed. Only callers who specify LK_RETRY will | ||||
* see doomed vnodes. If inactive processing was delayed in | * see doomed vnodes. If inactive processing was delayed in | ||||
* vput try to do it here. | * vput try to do it here. | ||||
* | * | ||||
* usecount is manipulated using atomics without holding any locks. | * usecount is manipulated using atomics without holding any locks. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | if (vs == VGET_USECOUNT) | ||||
vrele(vp); | vrele(vp); | ||||
else | else | ||||
vdrop(vp); | vdrop(vp); | ||||
CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, | CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, | ||||
vp); | vp); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | |||||
* There is no way to assert VIRF_OWEINACT is not set since may be | |||||
* acquired because of vref. | |||||
kibUnsubmitted Not Done Inline ActionsI think there is some missed word in the sentence. kib: I think there is some missed word in the sentence. | |||||
mjgAuthorUnsubmitted Done Inline Actionsindeed mjg: indeed | |||||
*/ | |||||
if (vs == VGET_USECOUNT) { | if (vs == VGET_USECOUNT) { | ||||
return (0); | return (0); | ||||
} | } | ||||
if (__predict_false(vp->v_irflag & VIRF_OWEINACT)) { | |||||
error = vget_inactive(vp, flags); | |||||
if (error != 0) | |||||
return (error); | |||||
} | |||||
if (__predict_false(vp->v_type == VCHR)) | if (__predict_false(vp->v_type == VCHR)) | ||||
return (vget_finish_vchr(vp)); | return (vget_finish_vchr(vp)); | ||||
/* | /* | ||||
* We hold the vnode. If the usecount is 0 it will be utilized to keep | * We hold the vnode. If the usecount is 0 it will be utilized to keep | ||||
* the vnode around. Otherwise someone else lended their hold count and | * the vnode around. Otherwise someone else lended their hold count and | ||||
* we have to drop ours. | * we have to drop ours. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 164 Lines • ▼ Show 20 Lines | if (VN_IS_DOOMED(vp)) { | ||||
return; | return; | ||||
} | } | ||||
if (vp->v_iflag & VI_DEFINACT) { | if (vp->v_iflag & VI_DEFINACT) { | ||||
VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); | VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
if (vp->v_usecount > 0) { | if (vp->v_usecount > 0) { | ||||
vp->v_iflag &= ~VI_OWEINACT; | vp->v_irflag &= ~VIRF_OWEINACT; | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
vlazy(vp); | vlazy(vp); | ||||
vp->v_iflag |= VI_DEFINACT; | vp->v_iflag |= VI_DEFINACT; | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
counter_u64_add(deferred_inact, 1); | counter_u64_add(deferred_inact, 1); | ||||
} | } | ||||
static void | static void | ||||
vdefer_inactive_unlocked(struct vnode *vp) | vdefer_inactive_unlocked(struct vnode *vp) | ||||
{ | { | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
if ((vp->v_iflag & VI_OWEINACT) == 0) { | if ((vp->v_irflag & VIRF_OWEINACT) == 0) { | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
vdefer_inactive(vp); | vdefer_inactive(vp); | ||||
} | } | ||||
enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; | enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | if (__predict_false(VN_IS_DOOMED(vp)) || | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
* We must call VOP_INACTIVE with the node locked. Mark | * We must call VOP_INACTIVE with the node locked. Mark | ||||
* as VI_DOINGINACT to avoid recursion. | * as VI_DOINGINACT to avoid recursion. | ||||
*/ | */ | ||||
vp->v_iflag |= VI_OWEINACT; | vp->v_irflag |= VIRF_OWEINACT; | ||||
switch (func) { | switch (func) { | ||||
case VPUTX_VRELE: | case VPUTX_VRELE: | ||||
error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); | error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
break; | break; | ||||
case VPUTX_VPUT: | case VPUTX_VPUT: | ||||
error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); | error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
▲ Show 20 Lines • Show All 217 Lines • ▼ Show 20 Lines | vdrop_deactivate(struct vnode *vp) | ||||
/* | /* | ||||
* Mark a vnode as free: remove it from its active list | * Mark a vnode as free: remove it from its active list | ||||
* and put it up for recycling on the freelist. | * and put it up for recycling on the freelist. | ||||
*/ | */ | ||||
VNASSERT(!VN_IS_DOOMED(vp), vp, | VNASSERT(!VN_IS_DOOMED(vp), vp, | ||||
("vdrop: returning doomed vnode")); | ("vdrop: returning doomed vnode")); | ||||
VNASSERT(vp->v_op != NULL, vp, | VNASSERT(vp->v_op != NULL, vp, | ||||
("vdrop: vnode already reclaimed.")); | ("vdrop: vnode already reclaimed.")); | ||||
VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, | VNASSERT((vp->v_irflag & VIRF_OWEINACT) == 0, vp, | ||||
("vnode with VI_OWEINACT set")); | ("vnode with VIRF_OWEINACT set")); | ||||
VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, | VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, | ||||
("vnode with VI_DEFINACT set")); | ("vnode with VI_DEFINACT set")); | ||||
if (vp->v_mflag & VMP_LAZYLIST) { | if (vp->v_mflag & VMP_LAZYLIST) { | ||||
mp = vp->v_mount; | mp = vp->v_mount; | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); | VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); | ||||
/* | /* | ||||
* Don't remove the vnode from the lazy list if another thread | * Don't remove the vnode from the lazy list if another thread | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | vinactivef(struct vnode *vp) | ||||
struct vm_object *obj; | struct vm_object *obj; | ||||
ASSERT_VOP_ELOCKED(vp, "vinactive"); | ASSERT_VOP_ELOCKED(vp, "vinactive"); | ||||
ASSERT_VI_LOCKED(vp, "vinactive"); | ASSERT_VI_LOCKED(vp, "vinactive"); | ||||
VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, | VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, | ||||
("vinactive: recursed on VI_DOINGINACT")); | ("vinactive: recursed on VI_DOINGINACT")); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
vp->v_iflag |= VI_DOINGINACT; | vp->v_iflag |= VI_DOINGINACT; | ||||
vp->v_iflag &= ~VI_OWEINACT; | vp->v_irflag &= ~VIRF_OWEINACT; | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
/* | /* | ||||
* Before moving off the active list, we must be sure that any | * Before moving off the active list, we must be sure that any | ||||
* modified pages are converted into the vnode's dirty | * modified pages are converted into the vnode's dirty | ||||
* buffers, since these will no longer be checked once the | * buffers, since these will no longer be checked once the | ||||
* vnode is on the inactive list. | * vnode is on the inactive list. | ||||
* | * | ||||
* The write-out of the dirty pages is asynchronous. At the | * The write-out of the dirty pages is asynchronous. At the | ||||
Show All 16 Lines | |||||
void | void | ||||
vinactive(struct vnode *vp) | vinactive(struct vnode *vp) | ||||
{ | { | ||||
ASSERT_VOP_ELOCKED(vp, "vinactive"); | ASSERT_VOP_ELOCKED(vp, "vinactive"); | ||||
ASSERT_VI_LOCKED(vp, "vinactive"); | ASSERT_VI_LOCKED(vp, "vinactive"); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
if ((vp->v_iflag & VI_OWEINACT) == 0) | if ((vp->v_irflag & VIRF_OWEINACT) == 0) | ||||
return; | return; | ||||
if (vp->v_iflag & VI_DOINGINACT) | if (vp->v_iflag & VI_DOINGINACT) | ||||
return; | return; | ||||
if (vp->v_usecount > 0) { | if (vp->v_usecount > 0) { | ||||
vp->v_iflag &= ~VI_OWEINACT; | vp->v_irflag &= ~VIRF_OWEINACT; | ||||
kibUnsubmitted Not Done Inline ActionsI would not call this mandatory inactive processing. With unlocked vrefing, this is as uncertain as before. Since we own the excl lock, why not inactivate ? kib: I would not call this mandatory inactive processing. With unlocked vrefing, this is as… | |||||
mjgAuthorUnsubmitted Done Inline Actionswell mandatory only appears in the title, i should change it to "best effort" like i state in comments. If you are arguing for renaming the flag VIRF_OWEINACT i have no opinion. mjg: well mandatory only appears in the title, i should change it to "best effort" like i state in… | |||||
kibUnsubmitted Not Done Inline ActionsNo, I think this place should be changed to call vinactivef() if the thread sees the flag set. kib: No, I think this place should be changed to call vinactivef() if the thread sees the flag set. | |||||
return; | return; | ||||
} | } | ||||
vinactivef(vp); | vinactivef(vp); | ||||
} | } | ||||
/* | /* | ||||
* Remove any vnodes in the vnode table belonging to mount point mp. | * Remove any vnodes in the vnode table belonging to mount point mp. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 265 Lines • ▼ Show 20 Lines | if (vp->v_irflag & VIRF_DOOMED) | ||||
return; | return; | ||||
vp->v_irflag |= VIRF_DOOMED; | vp->v_irflag |= VIRF_DOOMED; | ||||
/* | /* | ||||
* Check to see if the vnode is in use. If so, we have to call | * Check to see if the vnode is in use. If so, we have to call | ||||
* VOP_CLOSE() and VOP_INACTIVE(). | * VOP_CLOSE() and VOP_INACTIVE(). | ||||
*/ | */ | ||||
active = vp->v_usecount > 0; | active = vp->v_usecount > 0; | ||||
oweinact = (vp->v_iflag & VI_OWEINACT) != 0; | oweinact = (vp->v_irflag & VIRF_OWEINACT) != 0; | ||||
/* | /* | ||||
* If we need to do inactive VI_OWEINACT will be set. | * If we need to do inactive VIRF_OWEINACT will be set. | ||||
*/ | */ | ||||
if (vp->v_iflag & VI_DEFINACT) { | if (vp->v_iflag & VI_DEFINACT) { | ||||
VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); | VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); | ||||
vp->v_iflag &= ~VI_DEFINACT; | vp->v_iflag &= ~VI_DEFINACT; | ||||
vdropl(vp); | vdropl(vp); | ||||
} else { | } else { | ||||
VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); | VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
▲ Show 20 Lines • Show All 133 Lines • ▼ Show 20 Lines | vn_printf(struct vnode *vp, const char *fmt, ...) | ||||
default: | default: | ||||
printf("\n"); | printf("\n"); | ||||
break; | break; | ||||
} | } | ||||
buf[0] = '\0'; | buf[0] = '\0'; | ||||
buf[1] = '\0'; | buf[1] = '\0'; | ||||
if (vp->v_irflag & VIRF_DOOMED) | if (vp->v_irflag & VIRF_DOOMED) | ||||
strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | ||||
flags = vp->v_irflag & ~(VIRF_DOOMED); | if (vp->v_irflag & VIRF_OWEINACT) | ||||
strlcat(buf, "|VIRF_OWEINACT", sizeof(buf)); | |||||
flags = vp->v_irflag & ~(VIRF_DOOMED | VIRF_OWEINACT); | |||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_vflag & VV_ROOT) | if (vp->v_vflag & VV_ROOT) | ||||
strlcat(buf, "|VV_ROOT", sizeof(buf)); | strlcat(buf, "|VV_ROOT", sizeof(buf)); | ||||
if (vp->v_vflag & VV_ISTTY) | if (vp->v_vflag & VV_ISTTY) | ||||
strlcat(buf, "|VV_ISTTY", sizeof(buf)); | strlcat(buf, "|VV_ISTTY", sizeof(buf)); | ||||
Show All 29 Lines | if (flags != 0) { | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_iflag & VI_TEXT_REF) | if (vp->v_iflag & VI_TEXT_REF) | ||||
strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); | strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); | ||||
if (vp->v_iflag & VI_MOUNT) | if (vp->v_iflag & VI_MOUNT) | ||||
strlcat(buf, "|VI_MOUNT", sizeof(buf)); | strlcat(buf, "|VI_MOUNT", sizeof(buf)); | ||||
if (vp->v_iflag & VI_DOINGINACT) | if (vp->v_iflag & VI_DOINGINACT) | ||||
strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); | strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); | ||||
if (vp->v_iflag & VI_OWEINACT) | |||||
strlcat(buf, "|VI_OWEINACT", sizeof(buf)); | |||||
if (vp->v_iflag & VI_DEFINACT) | if (vp->v_iflag & VI_DEFINACT) | ||||
strlcat(buf, "|VI_DEFINACT", sizeof(buf)); | strlcat(buf, "|VI_DEFINACT", sizeof(buf)); | ||||
flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | | flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | | ||||
VI_OWEINACT | VI_DEFINACT); | VI_DEFINACT); | ||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_mflag & VMP_LAZYLIST) | if (vp->v_mflag & VMP_LAZYLIST) | ||||
strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); | strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); | ||||
flags = vp->v_mflag & ~(VMP_LAZYLIST); | flags = vp->v_mflag & ~(VMP_LAZYLIST); | ||||
if (flags != 0) { | if (flags != 0) { | ||||
▲ Show 20 Lines • Show All 546 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static void | static void | ||||
vfs_deferred_inactive(struct vnode *vp, int lkflags) | vfs_deferred_inactive(struct vnode *vp, int lkflags) | ||||
{ | { | ||||
ASSERT_VI_LOCKED(vp, __func__); | ASSERT_VI_LOCKED(vp, __func__); | ||||
VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); | VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); | ||||
if ((vp->v_iflag & VI_OWEINACT) == 0) { | if ((vp->v_irflag & VIRF_OWEINACT) == 0) { | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
if (vn_lock(vp, lkflags) == 0) { | if (vn_lock(vp, lkflags) == 0) { | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
vinactive(vp); | vinactive(vp); | ||||
VOP_UNLOCK(vp); | VOP_UNLOCK(vp); | ||||
vdropl(vp); | vdropl(vp); | ||||
▲ Show 20 Lines • Show All 1,806 Lines • Show Last 20 Lines |
I think you should not unlock there, only downgrade, and even this is optional. I think it is less work to keep the vnode exclusively locked there.