Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 1,153 Lines • ▼ Show 20 Lines | for (; count > 0; count--) { | ||||
* The clear of VI_FREE prevents activation of the | * The clear of VI_FREE prevents activation of the | ||||
* vnode. There is no sense in putting the vnode on | * vnode. There is no sense in putting the vnode on | ||||
* the mount point active list, only to remove it | * the mount point active list, only to remove it | ||||
* later during recycling. Inline the relevant part | * later during recycling. Inline the relevant part | ||||
* of vholdl(), to avoid triggering assertions or | * of vholdl(), to avoid triggering assertions or | ||||
* activating. | * activating. | ||||
*/ | */ | ||||
freevnodes--; | freevnodes--; | ||||
vp->v_iflag &= ~VI_FREE; | |||||
VNODE_REFCOUNT_FENCE_REL(); | |||||
refcount_acquire(&vp->v_holdcnt); | |||||
mtx_unlock(&vnode_free_list_mtx); | mtx_unlock(&vnode_free_list_mtx); | ||||
VI_UNLOCK(vp); | |||||
vtryrecycle(vp); | vtryrecycle(vp); | ||||
/* | /* | ||||
* If the recycled succeeded this vdrop will actually free | * If recycle failed the vnode is at the tail or in the batch. | ||||
* the vnode. If not it will simply place it back on | |||||
* the free list. | |||||
*/ | */ | ||||
vdrop(vp); | |||||
mtx_lock(&vnode_free_list_mtx); | mtx_lock(&vnode_free_list_mtx); | ||||
} | } | ||||
} | } | ||||
void | void | ||||
vnlru_free(int count, struct vfsops *mnt_op) | vnlru_free(int count, struct vfsops *mnt_op) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 206 Lines • ▼ Show 20 Lines | |||||
* before we actually vgone(). This function must be called with the vnode | * before we actually vgone(). This function must be called with the vnode | ||||
* held to prevent the vnode from being returned to the free list midway | * held to prevent the vnode from being returned to the free list midway | ||||
* through vgone(). | * through vgone(). | ||||
*/ | */ | ||||
static int | static int | ||||
vtryrecycle(struct vnode *vp) | vtryrecycle(struct vnode *vp) | ||||
{ | { | ||||
struct mount *vnmp; | struct mount *vnmp; | ||||
int error; | |||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
VNASSERT(vp->v_holdcnt, vp, | |||||
("vtryrecycle: Recycling vp %p without a reference.", vp)); | ASSERT_VI_LOCKED(vp, __func__); | ||||
VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp, | |||||
("%s: vnode already doomed", __func__)); | |||||
VNASSERT(vp->v_holdcnt == 0 && vp->v_usecount == 0, vp, | |||||
("%s: vnode has non-zero counts", __func__)); | |||||
/* | /* | ||||
* This vnode may found and locked via some other list, if so we | * This vnode may found and locked via some other list, if so we | ||||
* can't recycle it yet. | * can't recycle it yet. | ||||
*/ | */ | ||||
if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { | if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_IGNORE_INTERLOCK) != 0) { | ||||
CTR2(KTR_VFS, | CTR2(KTR_VFS, | ||||
"%s: impossible to recycle, vp %p lock is already held", | "%s: impossible to recycle, vp %p lock is already held", | ||||
__func__, vp); | __func__, vp); | ||||
return (EWOULDBLOCK); | error = EWOULDBLOCK; | ||||
goto out_no_recycle; | |||||
} | } | ||||
/* | /* | ||||
* Don't recycle if its filesystem is being suspended. | * Don't recycle if its filesystem is being suspended. | ||||
*/ | */ | ||||
if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { | if (vn_start_write(vp, &vnmp, V_NONBLOCKING) != 0) { | ||||
VOP_UNLOCK(vp, 0); | VOP_UNLOCK(vp, 0); | ||||
CTR2(KTR_VFS, | CTR2(KTR_VFS, | ||||
"%s: impossible to recycle, cannot start the write for %p", | "%s: impossible to recycle, cannot start the write for %p", | ||||
__func__, vp); | __func__, vp); | ||||
return (EBUSY); | error = EBUSY; | ||||
goto out_no_recycle; | |||||
} | } | ||||
/* | /* | ||||
* If we got this far, we need to acquire the interlock and see if | * Fake enough of hold count for vgone et al to pass. While this allows | ||||
* anyone picked up this vnode from another list. If not, we will | * other threads to bump it with atomics they will see a VI_DOOMED | ||||
* mark it with DOOMED via vgonel() so that anyone who does find it | * vnode by the time they get the interlock. | ||||
* will skip over it. | |||||
*/ | */ | ||||
VI_LOCK(vp); | vp->v_iflag &= ~VI_FREE; | ||||
if (vp->v_usecount) { | VNODE_REFCOUNT_FENCE_REL(); | ||||
VOP_UNLOCK(vp, 0); | refcount_acquire(&vp->v_holdcnt); | ||||
VI_UNLOCK(vp); | |||||
vn_finished_write(vnmp); | |||||
CTR2(KTR_VFS, | |||||
"%s: impossible to recycle, %p is already referenced", | |||||
__func__, vp); | |||||
return (EBUSY); | |||||
} | |||||
if ((vp->v_iflag & VI_DOOMED) == 0) { | |||||
counter_u64_add(recycles_count, 1); | |||||
vgonel(vp); | vgonel(vp); | ||||
} | |||||
VOP_UNLOCK(vp, 0); | VOP_UNLOCK(vp, 0); | ||||
VI_UNLOCK(vp); | vdropl(vp); | ||||
vn_finished_write(vnmp); | vn_finished_write(vnmp); | ||||
counter_u64_add(recycles_count, 1); | |||||
return (0); | return (0); | ||||
out_no_recycle: | |||||
kib: There should be no space before label. | |||||
/* | |||||
* Put the vnode back on the free list. | |||||
*/ | |||||
mtx_lock(&vnode_free_list_mtx); | |||||
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); | |||||
freevnodes++; | |||||
mtx_unlock(&vnode_free_list_mtx); | |||||
VI_UNLOCK(vp); | |||||
if (error == EBUSY && vnmp != NULL) | |||||
vfs_rel(vnmp); | |||||
return (error); | |||||
} | } | ||||
static void | static void | ||||
vcheckspace(void) | vcheckspace(void) | ||||
{ | { | ||||
if (vspace() < vlowat && vnlruproc_sig == 0) { | if (vspace() < vlowat && vnlruproc_sig == 0) { | ||||
vnlruproc_sig = 1; | vnlruproc_sig = 1; | ||||
▲ Show 20 Lines • Show All 3,577 Lines • ▼ Show 20 Lines | vop_strategy_pre(void *ap) | ||||
} | } | ||||
} | } | ||||
void | void | ||||
vop_lock_pre(void *ap) | vop_lock_pre(void *ap) | ||||
{ | { | ||||
struct vop_lock1_args *a = ap; | struct vop_lock1_args *a = ap; | ||||
if ((a->a_flags & LK_IGNORE_INTERLOCK) != 0) { | |||||
KASSERT(((a->a_flags & LK_NOWAIT) != 0), | |||||
("passing LK_IGNORE_INTERLOCK requires LK_NOWAIT")); | |||||
return; | |||||
} | |||||
if ((a->a_flags & LK_INTERLOCK) == 0) | if ((a->a_flags & LK_INTERLOCK) == 0) | ||||
ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); | ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); | ||||
else | else | ||||
ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); | ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); | ||||
} | } | ||||
void | void | ||||
vop_lock_post(void *ap, int rc) | vop_lock_post(void *ap, int rc) | ||||
{ | { | ||||
struct vop_lock1_args *a = ap; | struct vop_lock1_args *a = ap; | ||||
if ((a->a_flags & LK_IGNORE_INTERLOCK) == 0) | |||||
ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); | ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); | ||||
if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) | if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) | ||||
ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); | ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); | ||||
} | } | ||||
void | void | ||||
vop_unlock_pre(void *ap) | vop_unlock_pre(void *ap) | ||||
{ | { | ||||
struct vop_unlock_args *a = ap; | struct vop_unlock_args *a = ap; | ||||
▲ Show 20 Lines • Show All 855 Lines • Show Last 20 Lines |
There should be no space before label.