Changeset View
Changeset View
Standalone View
Standalone View
sys/ufs/ffs/ffs_vnops.c
Show First 20 Lines • Show All 247 Lines • ▼ Show 20 Lines | |||||
ffs_syncvnode(struct vnode *vp, int waitfor, int flags) | ffs_syncvnode(struct vnode *vp, int waitfor, int flags) | ||||
{ | { | ||||
struct inode *ip; | struct inode *ip; | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
struct ufsmount *ump; | struct ufsmount *ump; | ||||
struct buf *bp, *nbp; | struct buf *bp, *nbp; | ||||
ufs_lbn_t lbn; | ufs_lbn_t lbn; | ||||
int error, passes; | int error, passes; | ||||
bool still_dirty, wait; | bool still_dirty, unlocked, wait; | ||||
ip = VTOI(vp); | ip = VTOI(vp); | ||||
ip->i_flag &= ~IN_NEEDSYNC; | ip->i_flag &= ~IN_NEEDSYNC; | ||||
bo = &vp->v_bufobj; | bo = &vp->v_bufobj; | ||||
ump = VFSTOUFS(vp->v_mount); | ump = VFSTOUFS(vp->v_mount); | ||||
/* | /* | ||||
* When doing MNT_WAIT we must first flush all dependencies | * When doing MNT_WAIT we must first flush all dependencies | ||||
* on the inode. | * on the inode. | ||||
*/ | */ | ||||
if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT && | if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT && | ||||
(error = softdep_sync_metadata(vp)) != 0) { | (error = softdep_sync_metadata(vp)) != 0) { | ||||
if (ffs_fsfail_cleanup(ump, error)) | if (ffs_fsfail_cleanup(ump, error)) | ||||
error = 0; | error = 0; | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Flush all dirty buffers associated with a vnode. | * Flush all dirty buffers associated with a vnode. | ||||
*/ | */ | ||||
error = 0; | error = 0; | ||||
passes = 0; | passes = 0; | ||||
wait = false; /* Always do an async pass first. */ | wait = false; /* Always do an async pass first. */ | ||||
unlocked = false; | |||||
lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1)); | lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1)); | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
loop: | loop: | ||||
TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) | TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) | ||||
bp->b_vflags &= ~BV_SCANNED; | bp->b_vflags &= ~BV_SCANNED; | ||||
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { | TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { | ||||
/* | /* | ||||
* Reasons to skip this buffer: it has already been considered | * Reasons to skip this buffer: it has already been considered | ||||
Show All 32 Lines | TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { | ||||
if ((bp->b_flags & B_DELWRI) == 0) | if ((bp->b_flags & B_DELWRI) == 0) | ||||
panic("ffs_fsync: not dirty"); | panic("ffs_fsync: not dirty"); | ||||
/* | /* | ||||
* Check for dependencies and potentially complete them. | * Check for dependencies and potentially complete them. | ||||
*/ | */ | ||||
if (!LIST_EMPTY(&bp->b_dep) && | if (!LIST_EMPTY(&bp->b_dep) && | ||||
(error = softdep_sync_buf(vp, bp, | (error = softdep_sync_buf(vp, bp, | ||||
wait ? MNT_WAIT : MNT_NOWAIT)) != 0) { | wait ? MNT_WAIT : MNT_NOWAIT)) != 0) { | ||||
/* | |||||
* Lock order conflict, buffer was already unlocked, | |||||
* and vnode possibly unlocked. | |||||
*/ | |||||
if (error == ERELOOKUP) { | |||||
if (vp->v_data == NULL) | |||||
return (EBADF); | |||||
unlocked = true; | |||||
if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT && | |||||
(error = softdep_sync_metadata(vp)) != 0) { | |||||
if (ffs_fsfail_cleanup(ump, error)) | |||||
error = 0; | |||||
return (unlocked && error == 0 ? | |||||
ERELOOKUP : error); | |||||
} | |||||
/* Re-evaluate inode size */ | |||||
lbn = lblkno(ITOFS(ip), (ip->i_size + | |||||
ITOFS(ip)->fs_bsize - 1)); | |||||
goto next; | |||||
} | |||||
/* I/O error. */ | /* I/O error. */ | ||||
if (error != EBUSY) { | if (error != EBUSY) { | ||||
BUF_UNLOCK(bp); | BUF_UNLOCK(bp); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* If we deferred once, don't defer again. */ | /* If we deferred once, don't defer again. */ | ||||
if ((bp->b_flags & B_DEFERRED) == 0) { | if ((bp->b_flags & B_DEFERRED) == 0) { | ||||
bp->b_flags |= B_DEFERRED; | bp->b_flags |= B_DEFERRED; | ||||
Show All 20 Lines | next: | ||||
* to start from a known point. | * to start from a known point. | ||||
*/ | */ | ||||
BO_LOCK(bo); | BO_LOCK(bo); | ||||
nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd); | nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd); | ||||
} | } | ||||
if (waitfor != MNT_WAIT) { | if (waitfor != MNT_WAIT) { | ||||
BO_UNLOCK(bo); | BO_UNLOCK(bo); | ||||
if ((flags & NO_INO_UPDT) != 0) | if ((flags & NO_INO_UPDT) != 0) | ||||
return (0); | return (unlocked ? ERELOOKUP : 0); | ||||
else | error = ffs_update(vp, 0); | ||||
return (ffs_update(vp, 0)); | if (error == 0 && unlocked) | ||||
error = ERELOOKUP; | |||||
return (error); | |||||
} | } | ||||
/* Drain IO to see if we're done. */ | /* Drain IO to see if we're done. */ | ||||
bufobj_wwait(bo, 0, 0); | bufobj_wwait(bo, 0, 0); | ||||
/* | /* | ||||
* Block devices associated with filesystems may have new I/O | * Block devices associated with filesystems may have new I/O | ||||
* requests posted for them even if the vnode is locked, so no | * requests posted for them even if the vnode is locked, so no | ||||
* amount of trying will get them clean. We make several passes | * amount of trying will get them clean. We make several passes | ||||
* as a best effort. | * as a best effort. | ||||
Show All 39 Lines | next: | ||||
if ((flags & DATA_ONLY) == 0) { | if ((flags & DATA_ONLY) == 0) { | ||||
if ((flags & NO_INO_UPDT) == 0) | if ((flags & NO_INO_UPDT) == 0) | ||||
error = ffs_update(vp, 1); | error = ffs_update(vp, 1); | ||||
if (DOINGSUJ(vp)) | if (DOINGSUJ(vp)) | ||||
softdep_journal_fsync(VTOI(vp)); | softdep_journal_fsync(VTOI(vp)); | ||||
} else if ((ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)) != 0) { | } else if ((ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)) != 0) { | ||||
error = ffs_update(vp, 1); | error = ffs_update(vp, 1); | ||||
} | } | ||||
if (error == 0 && unlocked) | |||||
error = ERELOOKUP; | |||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
ffs_fdatasync(struct vop_fdatasync_args *ap) | ffs_fdatasync(struct vop_fdatasync_args *ap) | ||||
{ | { | ||||
return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY)); | return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY)); | ||||
} | } | ||||
static int | static int | ||||
ffs_lock(ap) | ffs_lock(ap) | ||||
struct vop_lock1_args /* { | struct vop_lock1_args /* { | ||||
struct vnode *a_vp; | struct vnode *a_vp; | ||||
int a_flags; | int a_flags; | ||||
struct thread *a_td; | |||||
char *file; | char *file; | ||||
int line; | int line; | ||||
} */ *ap; | } */ *ap; | ||||
{ | { | ||||
struct vnode *vp = ap->a_vp; | |||||
#ifdef DIAGNOSTIC | |||||
struct inode *ip; | |||||
#endif | |||||
int result; | |||||
#ifndef NO_FFS_SNAPSHOT | #ifndef NO_FFS_SNAPSHOT | ||||
struct vnode *vp; | |||||
int flags; | int flags; | ||||
struct lock *lkp; | struct lock *lkp; | ||||
int result; | |||||
/* | /* | ||||
* Adaptive spinning mixed with SU leads to trouble. use a giant hammer | * Adaptive spinning mixed with SU leads to trouble. use a giant hammer | ||||
* and only use it when LK_NODDLKTREAT is set. Currently this means it | * and only use it when LK_NODDLKTREAT is set. Currently this means it | ||||
* is only used during path lookup. | * is only used during path lookup. | ||||
*/ | */ | ||||
if ((ap->a_flags & LK_NODDLKTREAT) != 0) | if ((ap->a_flags & LK_NODDLKTREAT) != 0) | ||||
ap->a_flags |= LK_ADAPTIVE; | ap->a_flags |= LK_ADAPTIVE; | ||||
switch (ap->a_flags & LK_TYPE_MASK) { | switch (ap->a_flags & LK_TYPE_MASK) { | ||||
case LK_SHARED: | case LK_SHARED: | ||||
case LK_UPGRADE: | case LK_UPGRADE: | ||||
case LK_EXCLUSIVE: | case LK_EXCLUSIVE: | ||||
vp = ap->a_vp; | |||||
flags = ap->a_flags; | flags = ap->a_flags; | ||||
for (;;) { | for (;;) { | ||||
#ifdef DEBUG_VFS_LOCKS | #ifdef DEBUG_VFS_LOCKS | ||||
VNPASS(vp->v_holdcnt != 0, vp); | VNPASS(vp->v_holdcnt != 0, vp); | ||||
#endif | #endif | ||||
lkp = vp->v_vnlock; | lkp = vp->v_vnlock; | ||||
result = lockmgr_lock_flags(lkp, flags, | result = lockmgr_lock_flags(lkp, flags, | ||||
&VI_MTX(vp)->lock_object, ap->a_file, ap->a_line); | &VI_MTX(vp)->lock_object, ap->a_file, ap->a_line); | ||||
Show All 10 Lines | #endif | ||||
lockmgr_unlock(lkp); | lockmgr_unlock(lkp); | ||||
if ((flags & (LK_INTERLOCK | LK_NOWAIT)) == | if ((flags & (LK_INTERLOCK | LK_NOWAIT)) == | ||||
(LK_INTERLOCK | LK_NOWAIT)) | (LK_INTERLOCK | LK_NOWAIT)) | ||||
return (EBUSY); | return (EBUSY); | ||||
if ((flags & LK_TYPE_MASK) == LK_UPGRADE) | if ((flags & LK_TYPE_MASK) == LK_UPGRADE) | ||||
flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; | flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; | ||||
flags &= ~LK_INTERLOCK; | flags &= ~LK_INTERLOCK; | ||||
} | } | ||||
#ifdef DIAGNOSTIC | |||||
switch (ap->a_flags & LK_TYPE_MASK) { | |||||
case LK_UPGRADE: | |||||
case LK_EXCLUSIVE: | |||||
if (result == 0 && vp->v_vnlock->lk_recurse == 0) { | |||||
ip = VTOI(vp); | |||||
if (ip != NULL) | |||||
ip->i_lock_gen++; | |||||
} | |||||
} | |||||
#endif | |||||
break; | break; | ||||
default: | default: | ||||
#ifdef DIAGNOSTIC | |||||
if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) { | |||||
ip = VTOI(vp); | |||||
if (ip != NULL) | |||||
ufs_unlock_tracker(ip); | |||||
} | |||||
#endif | |||||
result = VOP_LOCK1_APV(&ufs_vnodeops, ap); | result = VOP_LOCK1_APV(&ufs_vnodeops, ap); | ||||
break; | |||||
} | } | ||||
return (result); | |||||
#else | #else | ||||
/* | /* | ||||
* See above for an explanation. | * See above for an explanation. | ||||
*/ | */ | ||||
if ((ap->a_flags & LK_NODDLKTREAT) != 0) | if ((ap->a_flags & LK_NODDLKTREAT) != 0) | ||||
ap->a_flags |= LK_ADAPTIVE; | ap->a_flags |= LK_ADAPTIVE; | ||||
return (VOP_LOCK1_APV(&ufs_vnodeops, ap)); | #ifdef DIAGNOSTIC | ||||
if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) { | |||||
ip = VTOI(vp); | |||||
if (ip != NULL) | |||||
ufs_unlock_tracker(ip); | |||||
} | |||||
#endif | #endif | ||||
result = VOP_LOCK1_APV(&ufs_vnodeops, ap); | |||||
#endif | |||||
#ifdef DIAGNOSTIC | |||||
switch (ap->a_flags & LK_TYPE_MASK) { | |||||
case LK_UPGRADE: | |||||
case LK_EXCLUSIVE: | |||||
if (result == 0 && vp->v_vnlock->lk_recurse == 0) { | |||||
ip = VTOI(vp); | |||||
if (ip != NULL) | |||||
ip->i_lock_gen++; | |||||
} | } | ||||
} | |||||
#endif | |||||
return (result); | |||||
} | |||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
static int | static int | ||||
ffs_unlock_debug(struct vop_unlock_args *ap) | ffs_unlock_debug(struct vop_unlock_args *ap) | ||||
{ | { | ||||
struct vnode *vp = ap->a_vp; | struct vnode *vp; | ||||
struct inode *ip = VTOI(vp); | struct inode *ip; | ||||
vp = ap->a_vp; | |||||
ip = VTOI(vp); | |||||
if (ip->i_flag & UFS_INODE_FLAG_LAZY_MASK_ASSERTABLE) { | if (ip->i_flag & UFS_INODE_FLAG_LAZY_MASK_ASSERTABLE) { | ||||
if ((vp->v_mflag & VMP_LAZYLIST) == 0) { | if ((vp->v_mflag & VMP_LAZYLIST) == 0) { | ||||
VI_LOCK(vp); | VI_LOCK(vp); | ||||
VNASSERT((vp->v_mflag & VMP_LAZYLIST), vp, | VNASSERT((vp->v_mflag & VMP_LAZYLIST), vp, | ||||
("%s: modified vnode (%x) not on lazy list", | ("%s: modified vnode (%x) not on lazy list", | ||||
__func__, ip->i_flag)); | __func__, ip->i_flag)); | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
} | } | ||||
} | } | ||||
#ifdef DIAGNOSTIC | |||||
if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && ip != NULL && | |||||
vp->v_vnlock->lk_recurse == 0) | |||||
ufs_unlock_tracker(ip); | |||||
#endif | |||||
return (VOP_UNLOCK_APV(&ufs_vnodeops, ap)); | return (VOP_UNLOCK_APV(&ufs_vnodeops, ap)); | ||||
} | } | ||||
#endif | #endif | ||||
static int | static int | ||||
ffs_read_hole(struct uio *uio, long xfersize, long *size) | ffs_read_hole(struct uio *uio, long xfersize, long *size) | ||||
{ | { | ||||
ssize_t saved_resid, tlen; | ssize_t saved_resid, tlen; | ||||
▲ Show 20 Lines • Show All 1,319 Lines • Show Last 20 Lines |