Changeset View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 2,835 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Decrement the use and hold counts for a vnode. | * Decrement the use and hold counts for a vnode. | ||||
* | * | ||||
* See an explanation near vget() as to why atomic operation is safe. | * See an explanation near vget() as to why atomic operation is safe. | ||||
*/ | */ | ||||
static void | static void | ||||
vputx(struct vnode *vp, int func) | vputx(struct vnode *vp, int func) | ||||
{ | { | ||||
struct vm_object *obj; | |||||
int error; | int error; | ||||
KASSERT(vp != NULL, ("vputx: null vp")); | KASSERT(vp != NULL, ("vputx: null vp")); | ||||
if (func == VPUTX_VUNREF) | if (func == VPUTX_VUNREF) | ||||
ASSERT_VOP_LOCKED(vp, "vunref"); | ASSERT_VOP_LOCKED(vp, "vunref"); | ||||
else if (func == VPUTX_VPUT) | else if (func == VPUTX_VPUT) | ||||
ASSERT_VOP_LOCKED(vp, "vput"); | ASSERT_VOP_LOCKED(vp, "vput"); | ||||
else | else | ||||
Show All 22 Lines | if (func == VPUTX_VPUT) | ||||
VOP_UNLOCK(vp, 0); | VOP_UNLOCK(vp, 0); | ||||
v_decr_devcount(vp); | v_decr_devcount(vp); | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
v_decr_devcount(vp); | v_decr_devcount(vp); | ||||
error = 0; | |||||
if (vp->v_usecount != 0) { | if (vp->v_usecount != 0) { | ||||
vn_printf(vp, "vputx: usecount not zero for vnode "); | vn_printf(vp, "vputx: usecount not zero for vnode "); | ||||
panic("vputx: usecount not zero"); | panic("vputx: usecount not zero"); | ||||
} | } | ||||
CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); | CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); | ||||
error = VOP_NEED_INACTIVE(vp); | |||||
if (error != 0) | |||||
panic("VOP_NEED_INACTIVE failed %d", error); | |||||
if ((vp->v_iflag & VI_OWEINACT) == 0) { | |||||
/* | /* | ||||
* We must call VOP_INACTIVE with the node locked. Mark | * v_object association is protected by the vnode lock which we don't | ||||
* as VI_DOINGINACT to avoid recursion. | * necessarily have. However, objects are type stable and this check | ||||
* is harmless if incorrect. | |||||
*/ | */ | ||||
if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && | |||||
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) | |||||
vp->v_iflag |= VI_OWEINACT; | vp->v_iflag |= VI_OWEINACT; | ||||
kib: Why not to push this condition into stdneed_inactive() ? | |||||
kibUnsubmitted Not Done Inline ActionsOr rather, I do not understand the presence of this test at all, since stdneed_inactive() always set VI_OWEINACT. kib: Or rather, I do not understand the presence of this test at all, since stdneed_inactive()… | |||||
mjgAuthorUnsubmitted Done Inline ActionsThen who is supposed to be perform this check? vinactive performs the check and calls vm_object_page_clean(), that's outside of whatever happens in VOP_INACTIVE. Adding the check here preserves the behavior (of not requiring NEED_INACTIVE to look at it). I don't have a strong opinion one way or the other, just want to push optional inactive processing through. mjg: Then who is supposed to be perform this check? vinactive performs the check and calls… | |||||
kibUnsubmitted Not Done Inline ActionsThis check and the action that it gates, are required when moving the vnode from active to inactive list, because syncer only flushes pages for active vnodes. Without the check, dirty pages for the inactivated vnode could keep unsynced for arbitrary long time, causing user data loss on crash or power failure. In the current patch, vinactive() is called always, except for the tmpfs vnodes. If a filesystem starts providing its own implementation of NEED_INACTIVE(), it should handle that as well (but e.g. if the filesystem is ro, there is no need). kib: This check and the action that it gates, are required when moving the vnode from active to… | |||||
} | |||||
if ((vp->v_iflag & VI_OWEINACT) == 0) { | |||||
if (func == VPUTX_VPUT) | |||||
VOP_UNLOCK(vp, 0); | |||||
vdropl(vp); | |||||
return; | |||||
} | |||||
switch (func) { | switch (func) { | ||||
case VPUTX_VRELE: | case VPUTX_VRELE: | ||||
error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); | error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
break; | break; | ||||
case VPUTX_VPUT: | case VPUTX_VPUT: | ||||
if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { | if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { | ||||
error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | | error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | | ||||
▲ Show 20 Lines • Show All 1,442 Lines • ▼ Show 20 Lines | |||||
static int sync_inactive(struct vop_inactive_args *); | static int sync_inactive(struct vop_inactive_args *); | ||||
static int sync_reclaim(struct vop_reclaim_args *); | static int sync_reclaim(struct vop_reclaim_args *); | ||||
static struct vop_vector sync_vnodeops = { | static struct vop_vector sync_vnodeops = { | ||||
.vop_bypass = VOP_EOPNOTSUPP, | .vop_bypass = VOP_EOPNOTSUPP, | ||||
.vop_close = sync_close, /* close */ | .vop_close = sync_close, /* close */ | ||||
.vop_fsync = sync_fsync, /* fsync */ | .vop_fsync = sync_fsync, /* fsync */ | ||||
.vop_inactive = sync_inactive, /* inactive */ | .vop_inactive = sync_inactive, /* inactive */ | ||||
.vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ | |||||
.vop_reclaim = sync_reclaim, /* reclaim */ | .vop_reclaim = sync_reclaim, /* reclaim */ | ||||
.vop_lock1 = vop_stdlock, /* lock */ | .vop_lock1 = vop_stdlock, /* lock */ | ||||
.vop_unlock = vop_stdunlock, /* unlock */ | .vop_unlock = vop_stdunlock, /* unlock */ | ||||
.vop_islocked = vop_stdislocked, /* islocked */ | .vop_islocked = vop_stdislocked, /* islocked */ | ||||
}; | }; | ||||
/* | /* | ||||
* Create a new filesystem syncer vnode for the specified mount point. | * Create a new filesystem syncer vnode for the specified mount point. | ||||
▲ Show 20 Lines • Show All 524 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
vop_unlock_post(void *ap, int rc) | vop_unlock_post(void *ap, int rc) | ||||
{ | { | ||||
struct vop_unlock_args *a = ap; | struct vop_unlock_args *a = ap; | ||||
if (a->a_flags & LK_INTERLOCK) | if (a->a_flags & LK_INTERLOCK) | ||||
ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); | ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); | ||||
} | |||||
void | |||||
vop_need_inactive_pre(void *ap) | |||||
{ | |||||
struct vop_need_inactive_args *a = ap; | |||||
ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); | |||||
} | |||||
void | |||||
vop_need_inactive_post(void *ap, int rc) | |||||
{ | |||||
struct vop_need_inactive_args *a = ap; | |||||
ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); | |||||
} | } | ||||
#endif | #endif | ||||
void | void | ||||
vop_create_post(void *ap, int rc) | vop_create_post(void *ap, int rc) | ||||
{ | { | ||||
struct vop_create_args *a = ap; | struct vop_create_args *a = ap; | ||||
▲ Show 20 Lines • Show All 818 Lines • Show Last 20 Lines |
Why not to push this condition into stdneed_inactive() ?