Changeset View
Changeset View
Standalone View
Standalone View
head/sys/kern/vfs_subr.c
Show First 20 Lines • Show All 3,082 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; | enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; | ||||
/* | /* | ||||
* Decrement the use and hold counts for a vnode. | * Decrement the use and hold counts for a vnode. | ||||
* | * | ||||
* See an explanation near vget() as to why atomic operation is safe. | * See an explanation near vget() as to why atomic operation is safe. | ||||
* | |||||
* XXX Some filesystems pass in an exclusively locked vnode and strongly depend | |||||
* on the lock being held all the way until VOP_INACTIVE. This in particular | |||||
* happens with UFS which adds half-constructed vnodes to the hash, where they | |||||
* can be found by other code. | |||||
*/ | */ | ||||
static void | static void | ||||
vputx(struct vnode *vp, enum vputx_op func) | vputx(struct vnode *vp, enum vputx_op func) | ||||
{ | { | ||||
int error; | int error; | ||||
KASSERT(vp != NULL, ("vputx: null vp")); | KASSERT(vp != NULL, ("vputx: null vp")); | ||||
if (func == VPUTX_VUNREF) | if (func == VPUTX_VUNREF) | ||||
ASSERT_VOP_LOCKED(vp, "vunref"); | ASSERT_VOP_LOCKED(vp, "vunref"); | ||||
else if (func == VPUTX_VPUT) | |||||
ASSERT_VOP_LOCKED(vp, "vput"); | |||||
ASSERT_VI_UNLOCKED(vp, __func__); | ASSERT_VI_UNLOCKED(vp, __func__); | ||||
VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, | VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, | ||||
("%s: wrong ref counts", __func__)); | ("%s: wrong ref counts", __func__)); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
/* | /* | ||||
* We want to hold the vnode until the inactive finishes to | * We want to hold the vnode until the inactive finishes to | ||||
* prevent vgone() races. We drop the use count here and the | * prevent vgone() races. We drop the use count here and the | ||||
* hold count below when we're done. | * hold count below when we're done. | ||||
* | * | ||||
* If we release the last usecount we take ownership of the hold | * If we release the last usecount we take ownership of the hold | ||||
* count which provides liveness of the vnode, in which case we | * count which provides liveness of the vnode, in which case we | ||||
* have to vdrop. | * have to vdrop. | ||||
*/ | */ | ||||
if (!refcount_release(&vp->v_usecount)) | if (!refcount_release(&vp->v_usecount)) { | ||||
if (func == VPUTX_VPUT) | |||||
VOP_UNLOCK(vp); | |||||
return; | return; | ||||
} | |||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
v_decr_devcount(vp); | v_decr_devcount(vp); | ||||
/* | /* | ||||
* By the time we got here someone else might have transitioned | * By the time we got here someone else might have transitioned | ||||
* the count back to > 0. | * the count back to > 0. | ||||
*/ | */ | ||||
if (vp->v_usecount > 0) { | if (vp->v_usecount > 0 || vp->v_iflag & VI_DOINGINACT) | ||||
vdropl(vp); | goto out; | ||||
return; | |||||
} | |||||
if (vp->v_iflag & VI_DOINGINACT) { | |||||
vdropl(vp); | |||||
return; | |||||
} | |||||
/* | /* | ||||
* Check if the fs wants to perform inactive processing. Note we | * Check if the fs wants to perform inactive processing. Note we | ||||
* may be only holding the interlock, in which case it is possible | * may be only holding the interlock, in which case it is possible | ||||
* someone else called vgone on the vnode and ->v_data is now NULL. | * someone else called vgone on the vnode and ->v_data is now NULL. | ||||
* Since vgone performs inactive on its own there is nothing to do | * Since vgone performs inactive on its own there is nothing to do | ||||
* here but to drop our hold count. | * here but to drop our hold count. | ||||
*/ | */ | ||||
if (__predict_false(VN_IS_DOOMED(vp)) || | if (__predict_false(VN_IS_DOOMED(vp)) || | ||||
VOP_NEED_INACTIVE(vp) == 0) { | VOP_NEED_INACTIVE(vp) == 0) | ||||
vdropl(vp); | goto out; | ||||
return; | |||||
} | |||||
/* | /* | ||||
* We must call VOP_INACTIVE with the node locked. Mark | * We must call VOP_INACTIVE with the node locked. Mark | ||||
* as VI_DOINGINACT to avoid recursion. | * as VI_DOINGINACT to avoid recursion. | ||||
*/ | */ | ||||
vp->v_iflag |= VI_OWEINACT; | vp->v_iflag |= VI_OWEINACT; | ||||
switch (func) { | switch (func) { | ||||
case VPUTX_VRELE: | case VPUTX_VRELE: | ||||
error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); | error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
break; | break; | ||||
case VPUTX_VPUT: | case VPUTX_VPUT: | ||||
error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); | error = 0; | ||||
if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { | |||||
error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | | |||||
LK_NOWAIT); | |||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
} | |||||
break; | break; | ||||
case VPUTX_VUNREF: | case VPUTX_VUNREF: | ||||
error = 0; | error = 0; | ||||
if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { | if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { | ||||
error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); | error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
} | } | ||||
break; | break; | ||||
} | } | ||||
VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, | VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, | ||||
("vnode with usecount and VI_OWEINACT set")); | ("vnode with usecount and VI_OWEINACT set")); | ||||
if (error == 0) { | if (error == 0) { | ||||
if (vp->v_iflag & VI_OWEINACT) | if (vp->v_iflag & VI_OWEINACT) | ||||
vinactive(vp); | vinactive(vp); | ||||
if (func != VPUTX_VUNREF) | if (func != VPUTX_VUNREF) | ||||
VOP_UNLOCK(vp); | VOP_UNLOCK(vp); | ||||
vdropl(vp); | vdropl(vp); | ||||
} else if (vp->v_iflag & VI_OWEINACT) { | } else if (vp->v_iflag & VI_OWEINACT) { | ||||
vdefer_inactive(vp); | vdefer_inactive(vp); | ||||
} else { | } else { | ||||
vdropl(vp); | vdropl(vp); | ||||
} | } | ||||
return; | |||||
out: | |||||
if (func == VPUTX_VPUT) | |||||
VOP_UNLOCK(vp); | |||||
vdropl(vp); | |||||
} | } | ||||
/* | /* | ||||
* Vnode put/release. | * Vnode put/release. | ||||
* If count drops to zero, call inactive routine and return to freelist. | * If count drops to zero, call inactive routine and return to freelist. | ||||
*/ | */ | ||||
void | void | ||||
vrele(struct vnode *vp) | vrele(struct vnode *vp) | ||||
{ | { | ||||
vputx(vp, VPUTX_VRELE); | vputx(vp, VPUTX_VRELE); | ||||
} | } | ||||
/* | /* | ||||
* Release an already locked vnode. This give the same effects as | * Release an already locked vnode. This give the same effects as | ||||
* unlock+vrele(), but takes less time and avoids releasing and | * unlock+vrele(), but takes less time and avoids releasing and | ||||
* re-aquiring the lock (as vrele() acquires the lock internally.) | * re-aquiring the lock (as vrele() acquires the lock internally.) | ||||
* | |||||
* It is an invariant that all VOP_* calls operate on a held vnode. | |||||
* We may be only having an implicit hold stemming from our usecount, | |||||
* which we are about to release. If we unlock the vnode afterwards we | |||||
* open a time window where someone else dropped the last usecount and | |||||
* proceeded to free the vnode before our unlock finished. For this | |||||
* reason we unlock the vnode early. This is a little bit wasteful as | |||||
* it may be the vnode is exclusively locked and inactive processing is | |||||
* needed, in which case we are adding work. | |||||
*/ | */ | ||||
void | void | ||||
vput(struct vnode *vp) | vput(struct vnode *vp) | ||||
{ | { | ||||
VOP_UNLOCK(vp); | |||||
vputx(vp, VPUTX_VPUT); | vputx(vp, VPUTX_VPUT); | ||||
} | } | ||||
/* | /* | ||||
* Release an exclusively locked vnode. Do not unlock the vnode lock. | * Release an exclusively locked vnode. Do not unlock the vnode lock. | ||||
*/ | */ | ||||
void | void | ||||
vunref(struct vnode *vp) | vunref(struct vnode *vp) | ||||
▲ Show 20 Lines • Show All 3,122 Lines • Show Last 20 Lines |