Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | |||||
#define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() | #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() | ||||
#else | #else | ||||
#define VNODE_REFCOUNT_FENCE_ACQ() | #define VNODE_REFCOUNT_FENCE_ACQ() | ||||
#define VNODE_REFCOUNT_FENCE_REL() | #define VNODE_REFCOUNT_FENCE_REL() | ||||
#endif | #endif | ||||
/* | /* | ||||
* Number of vnodes in existence. Increased whenever getnewvnode() | * Number of vnodes in existence. Increased whenever getnewvnode() | ||||
* allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. | * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. | ||||
*/ | */ | ||||
static unsigned long numvnodes; | static unsigned long numvnodes; | ||||
SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, | SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, | ||||
"Number of vnodes in existence"); | "Number of vnodes in existence"); | ||||
static counter_u64_t vnodes_created; | static counter_u64_t vnodes_created; | ||||
SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, | SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, | ||||
▲ Show 20 Lines • Show All 895 Lines • ▼ Show 20 Lines | if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { | ||||
vdrop(vp); | vdrop(vp); | ||||
goto next_iter_mntunlocked; | goto next_iter_mntunlocked; | ||||
} | } | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
/* | /* | ||||
* v_usecount may have been bumped after VOP_LOCK() dropped | * v_usecount may have been bumped after VOP_LOCK() dropped | ||||
* the vnode interlock and before it was locked again. | * the vnode interlock and before it was locked again. | ||||
* | * | ||||
* It is not necessary to recheck VI_DOOMED because it can | * It is not necessary to recheck VIRF_DOOMED because it can | ||||
* only be set by another thread that holds both the vnode | * only be set by another thread that holds both the vnode | ||||
* lock and vnode interlock. If another thread has the | * lock and vnode interlock. If another thread has the | ||||
* vnode lock before we get to VOP_LOCK() and obtains the | * vnode lock before we get to VOP_LOCK() and obtains the | ||||
* vnode interlock after VOP_LOCK() drops the vnode | * vnode interlock after VOP_LOCK() drops the vnode | ||||
* interlock, the other thread will be unable to drop the | * interlock, the other thread will be unable to drop the | ||||
* vnode lock before our VOP_LOCK() call fails. | * vnode lock before our VOP_LOCK() call fails. | ||||
*/ | */ | ||||
if (vp->v_usecount || | if (vp->v_usecount || | ||||
(!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || | (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || | ||||
(vp->v_iflag & VI_FREE) != 0 || | (vp->v_iflag & VI_FREE) != 0 || | ||||
(vp->v_object != NULL && | (vp->v_object != NULL && | ||||
vp->v_object->resident_page_count > trigger)) { | vp->v_object->resident_page_count > trigger)) { | ||||
VOP_UNLOCK(vp, 0); | VOP_UNLOCK(vp, 0); | ||||
vdropl(vp); | vdropl(vp); | ||||
goto next_iter_mntunlocked; | goto next_iter_mntunlocked; | ||||
} | } | ||||
KASSERT((vp->v_iflag & VI_DOOMED) == 0, | KASSERT(!VN_IS_DOOMED(vp), | ||||
("VI_DOOMED unexpectedly detected in vlrureclaim()")); | ("VIRF_DOOMED unexpectedly detected in vlrureclaim()")); | ||||
counter_u64_add(recycles_count, 1); | counter_u64_add(recycles_count, 1); | ||||
vgonel(vp); | vgonel(vp); | ||||
VOP_UNLOCK(vp, 0); | VOP_UNLOCK(vp, 0); | ||||
vdropl(vp); | vdropl(vp); | ||||
done++; | done++; | ||||
next_iter_mntunlocked: | next_iter_mntunlocked: | ||||
if (!should_yield()) | if (!should_yield()) | ||||
goto relock_mnt; | goto relock_mnt; | ||||
▲ Show 20 Lines • Show All 1,612 Lines • ▼ Show 20 Lines | if (vp->v_type == VCHR && vp->v_rdev != NULL) { | ||||
dev_lock(); | dev_lock(); | ||||
vp->v_rdev->si_usecount--; | vp->v_rdev->si_usecount--; | ||||
dev_unlock(); | dev_unlock(); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Grab a particular vnode from the free list, increment its | * Grab a particular vnode from the free list, increment its | ||||
* reference count and lock it. VI_DOOMED is set if the vnode | * reference count and lock it. VIRF_DOOMED is set if the vnode | ||||
* is being destroyed. Only callers who specify LK_RETRY will | * is being destroyed. Only callers who specify LK_RETRY will | ||||
* see doomed vnodes. If inactive processing was delayed in | * see doomed vnodes. If inactive processing was delayed in | ||||
* vput try to do it here. | * vput try to do it here. | ||||
* | * | ||||
* Both holdcnt and usecount can be manipulated using atomics without holding | * Both holdcnt and usecount can be manipulated using atomics without holding | ||||
* any locks except in these cases which require the vnode interlock: | * any locks except in these cases which require the vnode interlock: | ||||
* holdcnt: 1->0 and 0->1 | * holdcnt: 1->0 and 0->1 | ||||
* usecount: 0->1 | * usecount: 0->1 | ||||
▲ Show 20 Lines • Show All 430 Lines • ▼ Show 20 Lines | |||||
#else | #else | ||||
atomic_add_int(&vp->v_holdcnt, 1); | atomic_add_int(&vp->v_holdcnt, 1); | ||||
#endif | #endif | ||||
} | } | ||||
/* | /* | ||||
* Drop the hold count of the vnode. If this is the last reference to | * Drop the hold count of the vnode. If this is the last reference to | ||||
* the vnode we place it on the free list unless it has been vgone'd | * the vnode we place it on the free list unless it has been vgone'd | ||||
* (marked VI_DOOMED) in which case we will free it. | * (marked VIRF_DOOMED) in which case we will free it. | ||||
* | * | ||||
* Because the vnode vm object keeps a hold reference on the vnode if | * Because the vnode vm object keeps a hold reference on the vnode if | ||||
* there is at least one resident non-cached page, the vnode cannot | * there is at least one resident non-cached page, the vnode cannot | ||||
* leave the active list without the page cleanup done. | * leave the active list without the page cleanup done. | ||||
*/ | */ | ||||
void | void | ||||
_vdrop(struct vnode *vp, bool locked) | _vdrop(struct vnode *vp, bool locked) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 109 Lines • ▼ Show 20 Lines | #ifdef INVARIANTS | ||||
/* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ | /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ | ||||
vp->v_op = NULL; | vp->v_op = NULL; | ||||
#endif | #endif | ||||
vp->v_mountedhere = NULL; | vp->v_mountedhere = NULL; | ||||
vp->v_unpcb = NULL; | vp->v_unpcb = NULL; | ||||
vp->v_rdev = NULL; | vp->v_rdev = NULL; | ||||
vp->v_fifoinfo = NULL; | vp->v_fifoinfo = NULL; | ||||
vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; | vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; | ||||
vp->v_irflag = 0; | |||||
vp->v_iflag = 0; | vp->v_iflag = 0; | ||||
vp->v_vflag = 0; | vp->v_vflag = 0; | ||||
bo->bo_flag = 0; | bo->bo_flag = 0; | ||||
uma_zfree(vnode_zone, vp); | uma_zfree(vnode_zone, vp); | ||||
} | } | ||||
/* | /* | ||||
* Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT | * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT | ||||
▲ Show 20 Lines • Show All 299 Lines • ▼ Show 20 Lines | vgonel(struct vnode *vp) | ||||
VNASSERT(vp->v_holdcnt, vp, | VNASSERT(vp->v_holdcnt, vp, | ||||
("vgonel: vp %p has no reference.", vp)); | ("vgonel: vp %p has no reference.", vp)); | ||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | CTR2(KTR_VFS, "%s: vp %p", __func__, vp); | ||||
td = curthread; | td = curthread; | ||||
/* | /* | ||||
* Don't vgonel if we're already doomed. | * Don't vgonel if we're already doomed. | ||||
*/ | */ | ||||
if (vp->v_iflag & VI_DOOMED) | if (vp->v_irflag & VIRF_DOOMED) | ||||
rpokala: `VN_IS_DOOMED(vp)`? | |||||
mjgAuthorUnsubmitted Done Inline ActionsI think it's cleaner given the way the flag is set below to not use the macro in here. mjg: I think it's cleaner given the way the flag is set below to not use the macro in here. | |||||
return; | return; | ||||
vp->v_iflag |= VI_DOOMED; | vp->v_irflag |= VIRF_DOOMED; | ||||
/* | /* | ||||
* Check to see if the vnode is in use. If so, we have to call | * Check to see if the vnode is in use. If so, we have to call | ||||
* VOP_CLOSE() and VOP_INACTIVE(). | * VOP_CLOSE() and VOP_INACTIVE(). | ||||
*/ | */ | ||||
active = vp->v_usecount > 0; | active = vp->v_usecount > 0; | ||||
oweinact = (vp->v_iflag & VI_OWEINACT) != 0; | oweinact = (vp->v_iflag & VI_OWEINACT) != 0; | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
▲ Show 20 Lines • Show All 165 Lines • ▼ Show 20 Lines | flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | | ||||
VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | | VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | | ||||
VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); | VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); | ||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_iflag & VI_MOUNT) | if (vp->v_iflag & VI_MOUNT) | ||||
strlcat(buf, "|VI_MOUNT", sizeof(buf)); | strlcat(buf, "|VI_MOUNT", sizeof(buf)); | ||||
if (vp->v_iflag & VI_DOOMED) | |||||
strlcat(buf, "|VI_DOOMED", sizeof(buf)); | |||||
if (vp->v_iflag & VI_FREE) | if (vp->v_iflag & VI_FREE) | ||||
strlcat(buf, "|VI_FREE", sizeof(buf)); | strlcat(buf, "|VI_FREE", sizeof(buf)); | ||||
if (vp->v_iflag & VI_ACTIVE) | if (vp->v_iflag & VI_ACTIVE) | ||||
strlcat(buf, "|VI_ACTIVE", sizeof(buf)); | strlcat(buf, "|VI_ACTIVE", sizeof(buf)); | ||||
if (vp->v_iflag & VI_DOINGINACT) | if (vp->v_iflag & VI_DOINGINACT) | ||||
strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); | strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); | ||||
if (vp->v_iflag & VI_OWEINACT) | if (vp->v_iflag & VI_OWEINACT) | ||||
strlcat(buf, "|VI_OWEINACT", sizeof(buf)); | strlcat(buf, "|VI_OWEINACT", sizeof(buf)); | ||||
if (vp->v_iflag & VI_TEXT_REF) | if (vp->v_iflag & VI_TEXT_REF) | ||||
strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); | strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); | ||||
flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | | flags = vp->v_iflag & ~(VI_MOUNT | VI_FREE | VI_ACTIVE | VI_DOINGINACT | | ||||
VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT | VI_TEXT_REF); | VI_OWEINACT | VI_TEXT_REF); | ||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_irflag & VIRF_DOOMED) | |||||
rpokalaUnsubmitted Not Done Inline ActionsVN_IS_DOOMED(vp)? rpokala: `VN_IS_DOOMED(vp)`? | |||||
mjgAuthorUnsubmitted Done Inline ActionsThis dumps flags so it should not. mjg: This dumps flags so it should not. | |||||
strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); | |||||
flags = vp->v_irflag & ~(VIRF_DOOMED); | |||||
if (flags != 0) { | |||||
snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); | |||||
strlcat(buf, buf2, sizeof(buf)); | |||||
} | |||||
printf(" flags (%s)\n", buf + 1); | printf(" flags (%s)\n", buf + 1); | ||||
if (mtx_owned(VI_MTX(vp))) | if (mtx_owned(VI_MTX(vp))) | ||||
printf(" VI_LOCKed"); | printf(" VI_LOCKed"); | ||||
if (vp->v_object != NULL) | if (vp->v_object != NULL) | ||||
printf(" v_object %p ref %d pages %d " | printf(" v_object %p ref %d pages %d " | ||||
"cleanbuf %d dirtybuf %d\n", | "cleanbuf %d dirtybuf %d\n", | ||||
vp->v_object, vp->v_object->ref_count, | vp->v_object, vp->v_object->ref_count, | ||||
vp->v_object->resident_page_count, | vp->v_object->resident_page_count, | ||||
▲ Show 20 Lines • Show All 1,986 Lines • ▼ Show 20 Lines | __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) | ||||
struct vnode *vp; | struct vnode *vp; | ||||
if (should_yield()) | if (should_yield()) | ||||
kern_yield(PRI_USER); | kern_yield(PRI_USER); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); | KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); | ||||
for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; | for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; | ||||
vp = TAILQ_NEXT(vp, v_nmntvnodes)) { | vp = TAILQ_NEXT(vp, v_nmntvnodes)) { | ||||
/* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ | /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ | ||||
if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) | if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) | ||||
continue; | continue; | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
if ((vp->v_iflag & VI_DOOMED) != 0) { | if ((vp->v_iflag & VI_DOOMED) != 0) { | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
continue; | continue; | ||||
} | } | ||||
break; | break; | ||||
} | } | ||||
Show All 16 Lines | __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) | ||||
*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); | *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
MNT_REF(mp); | MNT_REF(mp); | ||||
(*mvp)->v_mount = mp; | (*mvp)->v_mount = mp; | ||||
(*mvp)->v_type = VMARKER; | (*mvp)->v_type = VMARKER; | ||||
TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { | TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { | ||||
/* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ | /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ | ||||
if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) | if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) | ||||
continue; | continue; | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
if ((vp->v_iflag & VI_DOOMED) != 0) { | if ((vp->v_iflag & VI_DOOMED) != 0) { | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
continue; | continue; | ||||
} | } | ||||
break; | break; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 217 Lines • Show Last 20 Lines |
VN_IS_DOOMED(vp)?