Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_subr.c
Show First 20 Lines • Show All 1,778 Lines • ▼ Show 20 Lines | KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, | ||||
mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); | mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); | ||||
if (vp->v_iflag & VI_ACTIVE) { | if (vp->v_iflag & VI_ACTIVE) { | ||||
vp->v_iflag &= ~VI_ACTIVE; | vp->v_iflag &= ~VI_ACTIVE; | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); | TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); | ||||
mp->mnt_activevnodelistsize--; | mp->mnt_activevnodelistsize--; | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
} | } | ||||
if (vp->v_mflag & VMP_DIRTYLIST) { | |||||
mtx_lock(&mp->mnt_listmtx); | |||||
if (vp->v_mflag & VMP_DIRTYLIST) { | |||||
vp->v_mflag &= ~VMP_DIRTYLIST; | |||||
TAILQ_REMOVE(&mp->mnt_dirtyvnodelist, vp, v_dirtylist); | |||||
mp->mnt_dirtyvnodelistsize--; | |||||
} | |||||
mtx_unlock(&mp->mnt_listmtx); | |||||
} | |||||
vp->v_mount = NULL; | vp->v_mount = NULL; | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
VNASSERT(mp->mnt_nvnodelistsize > 0, vp, | VNASSERT(mp->mnt_nvnodelistsize > 0, vp, | ||||
("bad mount point vnode list size")); | ("bad mount point vnode list size")); | ||||
TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); | TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); | ||||
mp->mnt_nvnodelistsize--; | mp->mnt_nvnodelistsize--; | ||||
MNT_REL(mp); | MNT_REL(mp); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
▲ Show 20 Lines • Show All 1,211 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
int | int | ||||
vrefcnt(struct vnode *vp) | vrefcnt(struct vnode *vp) | ||||
{ | { | ||||
return (vp->v_usecount); | return (vp->v_usecount); | ||||
} | } | ||||
void | |||||
vdirty(struct vnode *vp) | |||||
{ | |||||
struct mount *mp; | |||||
VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); | |||||
if ((vp->v_mflag & VMP_DIRTYLIST) != 0) | |||||
return; | |||||
mp = vp->v_mount; | |||||
mtx_lock(&mp->mnt_listmtx); | |||||
if ((vp->v_mflag & VMP_DIRTYLIST) == 0) { | |||||
vp->v_mflag |= VMP_DIRTYLIST; | |||||
TAILQ_INSERT_TAIL(&mp->mnt_dirtyvnodelist, vp, v_dirtylist); | |||||
mp->mnt_dirtyvnodelistsize++; | |||||
} | |||||
mtx_unlock(&mp->mnt_listmtx); | |||||
} | |||||
static void | static void | ||||
vdefer_inactive(struct vnode *vp) | vdefer_inactive(struct vnode *vp) | ||||
{ | { | ||||
ASSERT_VI_LOCKED(vp, __func__); | ASSERT_VI_LOCKED(vp, __func__); | ||||
VNASSERT(vp->v_iflag & VI_OWEINACT, vp, | VNASSERT(vp->v_iflag & VI_OWEINACT, vp, | ||||
("%s: vnode without VI_OWEINACT", __func__)); | ("%s: vnode without VI_OWEINACT", __func__)); | ||||
VNASSERT(!VN_IS_DOOMED(vp), vp, | VNASSERT(!VN_IS_DOOMED(vp), vp, | ||||
("%s: doomed vnode", __func__)); | ("%s: doomed vnode", __func__)); | ||||
if (vp->v_iflag & VI_DEFINACT) { | if (vp->v_iflag & VI_DEFINACT) { | ||||
VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); | VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
vdirty(vp); | |||||
vp->v_iflag |= VI_DEFINACT; | vp->v_iflag |= VI_DEFINACT; | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
counter_u64_add(deferred_inact, 1); | counter_u64_add(deferred_inact, 1); | ||||
} | } | ||||
static void | static void | ||||
vdefer_inactive_cond(struct vnode *vp) | vdefer_inactive_cond(struct vnode *vp) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 94 Lines • ▼ Show 20 Lines | VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, | ||||
("vnode with usecount and VI_OWEINACT set")); | ("vnode with usecount and VI_OWEINACT set")); | ||||
if (error == 0) { | if (error == 0) { | ||||
if (vp->v_iflag & VI_OWEINACT) | if (vp->v_iflag & VI_OWEINACT) | ||||
vinactive(vp); | vinactive(vp); | ||||
if (func != VPUTX_VUNREF) | if (func != VPUTX_VUNREF) | ||||
VOP_UNLOCK(vp); | VOP_UNLOCK(vp); | ||||
vdropl(vp); | vdropl(vp); | ||||
} else if (vp->v_iflag & VI_OWEINACT) { | } else if (vp->v_iflag & VI_OWEINACT) { | ||||
vdefer_inactive(vp); | vdefer_inactive(vp); | ||||
kib: You do not need a nested block there.
```
} else if (vp->v_iflag & VI_OWEINACT) {… | |||||
Done Inline Actionsok mjg: ok | |||||
} else { | } else { | ||||
vdropl(vp); | vdropl(vp); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Vnode put/release. | * Vnode put/release. | ||||
* If count drops to zero, call inactive routine and return to freelist. | * If count drops to zero, call inactive routine and return to freelist. | ||||
▲ Show 20 Lines • Show All 149 Lines • ▼ Show 20 Lines | vdrop_deactivate(struct vnode *vp) | ||||
VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, | VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, | ||||
("vnode with VI_OWEINACT set")); | ("vnode with VI_OWEINACT set")); | ||||
VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, | VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, | ||||
("vnode with VI_DEFINACT set")); | ("vnode with VI_DEFINACT set")); | ||||
VNASSERT(vp->v_holdcnt == 0, vp, | VNASSERT(vp->v_holdcnt == 0, vp, | ||||
("vdrop: freeing when we shouldn't")); | ("vdrop: freeing when we shouldn't")); | ||||
mp = vp->v_mount; | mp = vp->v_mount; | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
if (vp->v_mflag & VMP_DIRTYLIST) { | |||||
vp->v_mflag &= ~VMP_DIRTYLIST; | |||||
TAILQ_REMOVE(&mp->mnt_dirtyvnodelist, vp, v_dirtylist); | |||||
mp->mnt_dirtyvnodelistsize--; | |||||
} | |||||
if (vp->v_iflag & VI_ACTIVE) { | if (vp->v_iflag & VI_ACTIVE) { | ||||
vp->v_iflag &= ~VI_ACTIVE; | vp->v_iflag &= ~VI_ACTIVE; | ||||
TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); | TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); | ||||
mp->mnt_activevnodelistsize--; | mp->mnt_activevnodelistsize--; | ||||
} | } | ||||
TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); | TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); | ||||
mp->mnt_tmpfreevnodelistsize++; | mp->mnt_tmpfreevnodelistsize++; | ||||
vp->v_iflag |= VI_FREE; | vp->v_iflag |= VI_FREE; | ||||
▲ Show 20 Lines • Show All 561 Lines • ▼ Show 20 Lines | vn_printf(struct vnode *vp, const char *fmt, ...) | ||||
flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_FREE | VI_ACTIVE | | flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_FREE | VI_ACTIVE | | ||||
VI_DOINGINACT | VI_OWEINACT | VI_DEFINACT); | VI_DOINGINACT | VI_OWEINACT | VI_DEFINACT); | ||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
if (vp->v_mflag & VMP_TMPMNTFREELIST) | if (vp->v_mflag & VMP_TMPMNTFREELIST) | ||||
strlcat(buf, "|VMP_TMPMNTFREELIST", sizeof(buf)); | strlcat(buf, "|VMP_TMPMNTFREELIST", sizeof(buf)); | ||||
flags = vp->v_mflag & ~(VMP_TMPMNTFREELIST); | if (vp->v_mflag & VMP_DIRTYLIST) | ||||
strlcat(buf, "|VMP_DIRTYLIST", sizeof(buf)); | |||||
flags = vp->v_mflag & ~(VMP_TMPMNTFREELIST | VMP_DIRTYLIST); | |||||
if (flags != 0) { | if (flags != 0) { | ||||
snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); | snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); | ||||
strlcat(buf, buf2, sizeof(buf)); | strlcat(buf, buf2, sizeof(buf)); | ||||
} | } | ||||
printf(" flags (%s)\n", buf + 1); | printf(" flags (%s)\n", buf + 1); | ||||
if (mtx_owned(VI_MTX(vp))) | if (mtx_owned(VI_MTX(vp))) | ||||
printf(" VI_LOCKed"); | printf(" VI_LOCKed"); | ||||
if (vp->v_object != NULL) | if (vp->v_object != NULL) | ||||
▲ Show 20 Lines • Show All 203 Lines • ▼ Show 20 Lines | if (jailed(mp->mnt_cred)) | ||||
db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); | db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); | ||||
db_printf(" }\n"); | db_printf(" }\n"); | ||||
db_printf(" mnt_ref = %d (with %d in the struct)\n", | db_printf(" mnt_ref = %d (with %d in the struct)\n", | ||||
vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); | vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); | ||||
db_printf(" mnt_gen = %d\n", mp->mnt_gen); | db_printf(" mnt_gen = %d\n", mp->mnt_gen); | ||||
db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); | db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); | ||||
db_printf(" mnt_activevnodelistsize = %d\n", | db_printf(" mnt_activevnodelistsize = %d\n", | ||||
mp->mnt_activevnodelistsize); | mp->mnt_activevnodelistsize); | ||||
db_printf(" mnt_dirtyvnodelistsize = %d\n", | |||||
mp->mnt_dirtyvnodelistsize); | |||||
db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", | db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", | ||||
vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); | vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); | ||||
db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); | db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); | ||||
db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); | db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); | ||||
db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); | db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); | ||||
db_printf(" mnt_lockref = %d (with %d in the struct)\n", | db_printf(" mnt_lockref = %d (with %d in the struct)\n", | ||||
vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); | vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); | ||||
db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); | db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); | ||||
▲ Show 20 Lines • Show All 335 Lines • ▼ Show 20 Lines | if ((vp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == VI_OWEINACT) | ||||
vinactive(vp); | vinactive(vp); | ||||
VOP_UNLOCK(vp); | VOP_UNLOCK(vp); | ||||
vdropl(vp); | vdropl(vp); | ||||
return; | return; | ||||
} | } | ||||
vdefer_inactive_cond(vp); | vdefer_inactive_cond(vp); | ||||
} | } | ||||
static int | |||||
vfs_periodic_inactive_filter(struct vnode *vp, void *arg) | |||||
{ | |||||
return (vp->v_iflag & VI_DEFINACT); | |||||
} | |||||
static void __noinline | static void __noinline | ||||
vfs_periodic_inactive(struct mount *mp, int flags) | vfs_periodic_inactive(struct mount *mp, int flags) | ||||
{ | { | ||||
struct vnode *vp, *mvp; | struct vnode *vp, *mvp; | ||||
int lkflags; | int lkflags; | ||||
lkflags = LK_EXCLUSIVE | LK_INTERLOCK; | lkflags = LK_EXCLUSIVE | LK_INTERLOCK; | ||||
if (flags != MNT_WAIT) | if (flags != MNT_WAIT) | ||||
lkflags |= LK_NOWAIT; | lkflags |= LK_NOWAIT; | ||||
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { | MNT_VNODE_FOREACH_DIRTY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { | ||||
if ((vp->v_iflag & VI_DEFINACT) == 0) { | if ((vp->v_iflag & VI_DEFINACT) == 0) { | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
continue; | continue; | ||||
} | } | ||||
vp->v_iflag &= ~VI_DEFINACT; | vp->v_iflag &= ~VI_DEFINACT; | ||||
vfs_deferred_inactive(vp, lkflags); | vfs_deferred_inactive(vp, lkflags); | ||||
} | } | ||||
} | } | ||||
static inline bool | static inline bool | ||||
vfs_want_msync(struct vnode *vp) | vfs_want_msync(struct vnode *vp) | ||||
{ | { | ||||
struct vm_object *obj; | struct vm_object *obj; | ||||
/* | |||||
* This test may be performed without any locks held. | |||||
* We realy on vm_object's type stability. | |||||
*/ | |||||
if (vp->v_vflag & VV_NOSYNC) | if (vp->v_vflag & VV_NOSYNC) | ||||
return (false); | return (false); | ||||
obj = vp->v_object; | obj = vp->v_object; | ||||
return (obj != NULL && vm_object_mightbedirty(obj)); | return (obj != NULL && vm_object_mightbedirty(obj)); | ||||
} | } | ||||
static int | |||||
Not Done Inline ActionsIt is strange that you do not check VV_NOSYNC there. Once the flag is set, it is not cleared. And I do not like VOP_ISLOCKED() there, as before. It allows the rare and then esp. hard to note race where vnode' pages could be left unsynced for at least one syncer run. I do not see why not use trylock. kib: It is strange that you do not check VV_NOSYNC there. Once the flag is set, it is not cleared. | |||||
Done Inline ActionsThe check in the filter is just a copy-paste of what msync was doing in the loop. I can add VV_NOSYNC no problem. Similarly, VOP_ISLOCKED was there. I think it's rather iffy and I'm more than happy to just remove it. mjg: The check in the filter is just a copy-paste of what msync was doing in the loop. I can add… | |||||
vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) | |||||
{ | |||||
if (vp->v_vflag & VV_NOSYNC) | |||||
return (false); | |||||
if (vp->v_iflag & VI_DEFINACT) | |||||
return (true); | |||||
return (vfs_want_msync(vp)); | |||||
} | |||||
static void __noinline | static void __noinline | ||||
vfs_periodic_msync_inactive(struct mount *mp, int flags) | vfs_periodic_msync_inactive(struct mount *mp, int flags) | ||||
{ | { | ||||
struct vnode *vp, *mvp; | struct vnode *vp, *mvp; | ||||
struct vm_object *obj; | struct vm_object *obj; | ||||
struct thread *td; | struct thread *td; | ||||
int lkflags, objflags; | int lkflags, objflags; | ||||
bool seen_defer; | bool seen_defer; | ||||
td = curthread; | td = curthread; | ||||
lkflags = LK_EXCLUSIVE | LK_INTERLOCK; | lkflags = LK_EXCLUSIVE | LK_INTERLOCK; | ||||
if (flags != MNT_WAIT) { | if (flags != MNT_WAIT) { | ||||
lkflags |= LK_NOWAIT; | lkflags |= LK_NOWAIT; | ||||
objflags = OBJPC_NOSYNC; | objflags = OBJPC_NOSYNC; | ||||
} else { | } else { | ||||
objflags = OBJPC_SYNC; | objflags = OBJPC_SYNC; | ||||
} | } | ||||
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { | MNT_VNODE_FOREACH_DIRTY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { | ||||
seen_defer = false; | seen_defer = false; | ||||
if (vp->v_iflag & VI_DEFINACT) { | if (vp->v_iflag & VI_DEFINACT) { | ||||
vp->v_iflag &= ~VI_DEFINACT; | vp->v_iflag &= ~VI_DEFINACT; | ||||
seen_defer = true; | seen_defer = true; | ||||
} | } | ||||
if (!vfs_want_msync(vp)) { | if (!vfs_want_msync(vp)) { | ||||
if (seen_defer) | if (seen_defer) | ||||
vfs_deferred_inactive(vp, lkflags); | vfs_deferred_inactive(vp, lkflags); | ||||
▲ Show 20 Lines • Show All 1,510 Lines • ▼ Show 20 Lines | __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) | ||||
MNT_REL(mp); | MNT_REL(mp); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
vn_free_marker(*mvp); | vn_free_marker(*mvp); | ||||
*mvp = NULL; | *mvp = NULL; | ||||
} | } | ||||
/* | /* | ||||
* These are helper functions for filesystems to traverse their | * These are helper functions for filesystems to traverse their | ||||
* active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h | * dirty vnodes. See MNT_VNODE_FOREACH_DIRTY() in sys/mount.h | ||||
*/ | */ | ||||
static void | static void | ||||
mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) | mnt_vnode_markerfree_dirty(struct vnode **mvp, struct mount *mp) | ||||
{ | { | ||||
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); | KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
MNT_REL(mp); | MNT_REL(mp); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
vn_free_marker(*mvp); | free(*mvp, M_VNODE_MARKER); | ||||
*mvp = NULL; | *mvp = NULL; | ||||
} | } | ||||
/* | /* | ||||
* Relock the mp mount vnode list lock with the vp vnode interlock in the | * Relock the mp mount vnode list lock with the vp vnode interlock in the | ||||
* conventional lock order during mnt_vnode_next_active iteration. | * conventional lock order during mnt_vnode_next_dirty iteration. | ||||
* | * | ||||
* On entry, the mount vnode list lock is held and the vnode interlock is not. | * On entry, the mount vnode list lock is held and the vnode interlock is not. | ||||
* The list lock is dropped and reacquired. On success, both locks are held. | * The list lock is dropped and reacquired. On success, both locks are held. | ||||
* On failure, the mount vnode list lock is held but the vnode interlock is | * On failure, the mount vnode list lock is held but the vnode interlock is | ||||
* not, and the procedure may have yielded. | * not, and the procedure may have yielded. | ||||
*/ | */ | ||||
static bool | static bool | ||||
mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, | mnt_vnode_next_dirty_relock(struct vnode *mvp, struct mount *mp, | ||||
struct vnode *vp) | struct vnode *vp) | ||||
{ | { | ||||
const struct vnode *tmp; | const struct vnode *tmp; | ||||
bool held, ret; | bool held, ret; | ||||
VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && | VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && | ||||
TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, | TAILQ_NEXT(mvp, v_dirtylist) != NULL, mvp, | ||||
("%s: bad marker", __func__)); | ("%s: bad marker", __func__)); | ||||
VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, | VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, | ||||
("%s: inappropriate vnode", __func__)); | ("%s: inappropriate vnode", __func__)); | ||||
ASSERT_VI_UNLOCKED(vp, __func__); | ASSERT_VI_UNLOCKED(vp, __func__); | ||||
mtx_assert(&mp->mnt_listmtx, MA_OWNED); | mtx_assert(&mp->mnt_listmtx, MA_OWNED); | ||||
ret = false; | ret = false; | ||||
TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); | TAILQ_REMOVE(&mp->mnt_dirtyvnodelist, mvp, v_dirtylist); | ||||
TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); | TAILQ_INSERT_BEFORE(vp, mvp, v_dirtylist); | ||||
/* | /* | ||||
* Use a hold to prevent vp from disappearing while the mount vnode | * Use a hold to prevent vp from disappearing while the mount vnode | ||||
* list lock is dropped and reacquired. Normally a hold would be | * list lock is dropped and reacquired. Normally a hold would be | ||||
* acquired with vhold(), but that might try to acquire the vnode | * acquired with vhold(), but that might try to acquire the vnode | ||||
* interlock, which would be a LOR with the mount vnode list lock. | * interlock, which would be a LOR with the mount vnode list lock. | ||||
*/ | */ | ||||
held = refcount_acquire_if_not_zero(&vp->v_holdcnt); | held = refcount_acquire_if_not_zero(&vp->v_holdcnt); | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
if (!held) | if (!held) | ||||
goto abort; | goto abort; | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
if (!refcount_release_if_not_last(&vp->v_holdcnt)) { | if (!refcount_release_if_not_last(&vp->v_holdcnt)) { | ||||
vdropl(vp); | vdropl(vp); | ||||
goto abort; | goto abort; | ||||
} | } | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
/* | /* | ||||
* Determine whether the vnode is still the next one after the marker, | * Determine whether the vnode is still the next one after the marker, | ||||
* excepting any other markers. If the vnode has not been doomed by | * excepting any other markers. If the vnode has not been doomed by | ||||
* vgone() then the hold should have ensured that it remained on the | * vgone() then the hold should have ensured that it remained on the | ||||
* active list. If it has been doomed but is still on the active list, | * dirty list. If it has been doomed but is still on the dirty list, | ||||
* don't abort, but rather skip over it (avoid spinning on doomed | * don't abort, but rather skip over it (avoid spinning on doomed | ||||
* vnodes). | * vnodes). | ||||
*/ | */ | ||||
tmp = mvp; | tmp = mvp; | ||||
do { | do { | ||||
tmp = TAILQ_NEXT(tmp, v_actfreelist); | tmp = TAILQ_NEXT(tmp, v_dirtylist); | ||||
} while (tmp != NULL && tmp->v_type == VMARKER); | } while (tmp != NULL && tmp->v_type == VMARKER); | ||||
if (tmp != vp) { | if (tmp != vp) { | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
goto abort; | goto abort; | ||||
} | } | ||||
ret = true; | ret = true; | ||||
goto out; | goto out; | ||||
abort: | abort: | ||||
maybe_yield(); | maybe_yield(); | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
out: | out: | ||||
if (ret) | if (ret) | ||||
ASSERT_VI_LOCKED(vp, __func__); | ASSERT_VI_LOCKED(vp, __func__); | ||||
else | else | ||||
ASSERT_VI_UNLOCKED(vp, __func__); | ASSERT_VI_UNLOCKED(vp, __func__); | ||||
mtx_assert(&mp->mnt_listmtx, MA_OWNED); | mtx_assert(&mp->mnt_listmtx, MA_OWNED); | ||||
return (ret); | return (ret); | ||||
} | } | ||||
static struct vnode * | static struct vnode * | ||||
mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) | mnt_vnode_next_dirty(struct vnode **mvp, struct mount *mp, mnt_dirty_cb_t *cb, | ||||
void *cbarg) | |||||
{ | { | ||||
struct vnode *vp, *nvp; | struct vnode *vp, *nvp; | ||||
mtx_assert(&mp->mnt_listmtx, MA_OWNED); | mtx_assert(&mp->mnt_listmtx, MA_OWNED); | ||||
KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); | KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); | ||||
restart: | restart: | ||||
vp = TAILQ_NEXT(*mvp, v_actfreelist); | vp = TAILQ_NEXT(*mvp, v_dirtylist); | ||||
while (vp != NULL) { | while (vp != NULL) { | ||||
if (vp->v_type == VMARKER) { | if (vp->v_type == VMARKER) { | ||||
vp = TAILQ_NEXT(vp, v_actfreelist); | vp = TAILQ_NEXT(vp, v_dirtylist); | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* See if we want to process the vnode. Note we may encounter a | |||||
* long string of vnodes we don't care about and hog the list | |||||
* as a result. Check for it and requeue the marker. | |||||
*/ | |||||
if (VN_IS_DOOMED(vp) || !cb(vp, cbarg)) { | |||||
if (!should_yield()) { | |||||
vp = TAILQ_NEXT(vp, v_dirtylist); | |||||
continue; | |||||
} | |||||
TAILQ_REMOVE(&mp->mnt_dirtyvnodelist, *mvp, | |||||
v_dirtylist); | |||||
TAILQ_INSERT_AFTER(&mp->mnt_dirtyvnodelist, vp, *mvp, | |||||
v_dirtylist); | |||||
mtx_unlock(&mp->mnt_listmtx); | |||||
kern_yield(PRI_USER); | |||||
mtx_lock(&mp->mnt_listmtx); | |||||
goto restart; | |||||
} | |||||
/* | |||||
* Try-lock because this is the wrong lock order. If that does | * Try-lock because this is the wrong lock order. If that does | ||||
* not succeed, drop the mount vnode list lock and try to | * not succeed, drop the mount vnode list lock and try to | ||||
* reacquire it and the vnode interlock in the right order. | * reacquire it and the vnode interlock in the right order. | ||||
*/ | */ | ||||
if (!VI_TRYLOCK(vp) && | if (!VI_TRYLOCK(vp) && | ||||
!mnt_vnode_next_active_relock(*mvp, mp, vp)) | !mnt_vnode_next_dirty_relock(*mvp, mp, vp)) | ||||
goto restart; | goto restart; | ||||
KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); | KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); | ||||
KASSERT(vp->v_mount == mp || vp->v_mount == NULL, | KASSERT(vp->v_mount == mp || vp->v_mount == NULL, | ||||
("alien vnode on the active list %p %p", vp, mp)); | ("alien vnode on the dirty list %p %p", vp, mp)); | ||||
if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) | if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) | ||||
break; | break; | ||||
nvp = TAILQ_NEXT(vp, v_actfreelist); | nvp = TAILQ_NEXT(vp, v_dirtylist); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
vp = nvp; | vp = nvp; | ||||
} | } | ||||
TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); | TAILQ_REMOVE(&mp->mnt_dirtyvnodelist, *mvp, v_dirtylist); | ||||
/* Check if we are done */ | /* Check if we are done */ | ||||
if (vp == NULL) { | if (vp == NULL) { | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
mnt_vnode_markerfree_active(mvp, mp); | mnt_vnode_markerfree_dirty(mvp, mp); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); | TAILQ_INSERT_AFTER(&mp->mnt_dirtyvnodelist, vp, *mvp, v_dirtylist); | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
ASSERT_VI_LOCKED(vp, "active iter"); | ASSERT_VI_LOCKED(vp, "dirty iter"); | ||||
KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); | KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); | ||||
return (vp); | return (vp); | ||||
} | } | ||||
struct vnode * | struct vnode * | ||||
__mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) | __mnt_vnode_next_dirty(struct vnode **mvp, struct mount *mp, mnt_dirty_cb_t *cb, | ||||
void *cbarg) | |||||
{ | { | ||||
if (should_yield()) | if (should_yield()) | ||||
kern_yield(PRI_USER); | kern_yield(PRI_USER); | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
return (mnt_vnode_next_active(mvp, mp)); | return (mnt_vnode_next_dirty(mvp, mp, cb, cbarg)); | ||||
} | } | ||||
struct vnode * | struct vnode * | ||||
__mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) | __mnt_vnode_first_dirty(struct vnode **mvp, struct mount *mp, mnt_dirty_cb_t *cb, | ||||
void *cbarg) | |||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
*mvp = vn_alloc_marker(mp); | *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
MNT_REF(mp); | MNT_REF(mp); | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
(*mvp)->v_type = VMARKER; | |||||
(*mvp)->v_mount = mp; | |||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
vp = TAILQ_FIRST(&mp->mnt_activevnodelist); | vp = TAILQ_FIRST(&mp->mnt_dirtyvnodelist); | ||||
if (vp == NULL) { | if (vp == NULL) { | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
mnt_vnode_markerfree_active(mvp, mp); | mnt_vnode_markerfree_dirty(mvp, mp); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); | TAILQ_INSERT_BEFORE(vp, *mvp, v_dirtylist); | ||||
return (mnt_vnode_next_active(mvp, mp)); | return (mnt_vnode_next_dirty(mvp, mp, cb, cbarg)); | ||||
} | } | ||||
void | void | ||||
__mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) | __mnt_vnode_markerfree_dirty(struct vnode **mvp, struct mount *mp) | ||||
{ | { | ||||
if (*mvp == NULL) | if (*mvp == NULL) | ||||
return; | return; | ||||
mtx_lock(&mp->mnt_listmtx); | mtx_lock(&mp->mnt_listmtx); | ||||
TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); | TAILQ_REMOVE(&mp->mnt_dirtyvnodelist, *mvp, v_dirtylist); | ||||
mtx_unlock(&mp->mnt_listmtx); | mtx_unlock(&mp->mnt_listmtx); | ||||
mnt_vnode_markerfree_active(mvp, mp); | mnt_vnode_markerfree_dirty(mvp, mp); | ||||
} | } |
You do not need a nested block there.