diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -263,16 +263,14 @@ * some time after construction, which is typical case. * null_open rechecks. */ - if ((lowervp->v_irflag & VIRF_PGREAD) != 0) { + if ((vn_irflag_read(lowervp) & VIRF_PGREAD) != 0) { MPASS(lowervp->v_object != NULL); - if ((vp->v_irflag & VIRF_PGREAD) == 0) { + if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) { if (vp->v_object == NULL) vp->v_object = lowervp->v_object; else MPASS(vp->v_object == lowervp->v_object); - VI_LOCK(vp); - vp->v_irflag |= VIRF_PGREAD; - VI_UNLOCK(vp); + vn_irflag_set_cond(vp, VIRF_PGREAD); } else { MPASS(vp->v_object != NULL); } diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -458,12 +458,10 @@ retval = null_bypass(&ap->a_gen); if (retval == 0) { vp->v_object = ldvp->v_object; - if ((ldvp->v_irflag & VIRF_PGREAD) != 0) { + if ((vn_irflag_read(ldvp) & VIRF_PGREAD) != 0) { MPASS(vp->v_object != NULL); - if ((vp->v_irflag & VIRF_PGREAD) == 0) { - VI_LOCK(vp); - vp->v_irflag |= VIRF_PGREAD; - VI_UNLOCK(vp); + if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) { + vn_irflag_set_cond(vp, VIRF_PGREAD); } } } diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -698,7 +698,7 @@ vp->v_object = object; object->un_pager.swp.swp_tmpfs = vp; vm_object_set_flag(object, OBJ_TMPFS); - vp->v_irflag |= VIRF_PGREAD; + vn_irflag_set_locked(vp, VIRF_PGREAD); VI_UNLOCK(vp); VM_OBJECT_WUNLOCK(object); break; diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -600,7 +600,7 @@ int error; vp = v->a_vp; - VNPASS((vp->v_irflag & VIRF_PGREAD) != 0, vp); + VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp); if (v->a_uio->uio_offset < 0) return (EINVAL); diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -1717,6 +1717,7 @@ KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); vp->v_type = VNON; vp->v_op = vops; + vp->v_irflag = 0; v_init_counters(vp); vp->v_bufobj.bo_ops = &buf_ops_bio; #ifdef DIAGNOSTIC @@ -1821,7 +1822,6 @@ vp->v_rdev = NULL; vp->v_fifoinfo = NULL; vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; - vp->v_irflag = 0; vp->v_iflag = 0; vp->v_vflag = 0; bo->bo_flag = 0; @@ -3868,14 +3868,14 @@ /* * Don't vgonel if we're already doomed. */ - if (vp->v_irflag & VIRF_DOOMED) + if (VN_IS_DOOMED(vp)) return; /* * Paired with freevnode. */ vn_seqc_write_begin_locked(vp); vunlazy_gone(vp); - vp->v_irflag |= VIRF_DOOMED; + vn_irflag_set_locked(vp, VIRF_DOOMED); /* * Check to see if the vnode is in use. If so, we have to @@ -4001,6 +4001,7 @@ char buf[256], buf2[16]; u_long flags; u_int holdcnt; + short irflag; va_start(ap, fmt); vprintf(fmt, ap); @@ -4036,11 +4037,12 @@ buf[0] = '\0'; buf[1] = '\0'; - if (vp->v_irflag & VIRF_DOOMED) + irflag = vn_irflag_read(vp); + if (irflag & VIRF_DOOMED) strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); - if (vp->v_irflag & VIRF_PGREAD) + if (irflag & VIRF_PGREAD) strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); - flags = vp->v_irflag & ~(VIRF_DOOMED | VIRF_PGREAD); + flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD); if (flags != 0) { snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); strlcat(buf, buf2, sizeof(buf)); @@ -6794,3 +6796,66 @@ vn_seqc_write_end_locked(vp); VI_UNLOCK(vp); } + +void +vn_irflag_set_locked(struct vnode *vp, short toset) +{ + short flags; + + ASSERT_VI_LOCKED(vp, __func__); + flags = vn_irflag_read(vp); + VNASSERT((flags & toset) == 0, vp, + ("%s: some of the passed flags already set (have %d, passed %d)\n", + __func__, flags, toset)); + atomic_store_short(&vp->v_irflag, flags | toset); +} + +void +vn_irflag_set(struct vnode *vp, short toset) +{ + + VI_LOCK(vp); + vn_irflag_set_locked(vp, toset); + VI_UNLOCK(vp); +} + +void +vn_irflag_set_cond_locked(struct vnode *vp, short toset) +{ + short flags; + + ASSERT_VI_LOCKED(vp, __func__); + flags = vn_irflag_read(vp); + atomic_store_short(&vp->v_irflag, flags | toset); +} + +void +vn_irflag_set_cond(struct vnode *vp, short toset) +{ + + VI_LOCK(vp); + vn_irflag_set_cond_locked(vp, toset); + VI_UNLOCK(vp); +} + +void +vn_irflag_unset_locked(struct vnode *vp, short tounset) +{ + short flags; + + ASSERT_VI_LOCKED(vp, __func__); + flags = vn_irflag_read(vp); + VNASSERT((flags & tounset) == tounset, vp, + ("%s: some of the passed flags not set (have %d, passed %d)\n", + __func__, flags, tounset)); + atomic_store_short(&vp->v_irflag, flags & ~tounset); +} + +void +vn_irflag_unset(struct vnode *vp, short tounset) +{ + + VI_LOCK(vp); + vn_irflag_unset_locked(vp, tounset); + VI_UNLOCK(vp); +} diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -991,7 +991,7 @@ * allows us to avoid unneeded work outright. */ if (vn_io_pgcache_read_enable && !mac_vnode_check_read_enabled() && - (vp->v_irflag & (VIRF_DOOMED | VIRF_PGREAD)) == VIRF_PGREAD) { + (vn_irflag_read(vp) & (VIRF_DOOMED | VIRF_PGREAD)) == VIRF_PGREAD) { error = VOP_READ_PGCACHE(vp, uio, ioflag, fp->f_cred); if (error == 0) { fp->f_nextoff[UIO_READ] = uio->uio_offset; diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -797,6 +797,14 @@ #define vn_rangelock_trywlock(vp, start, end) \ rangelock_trywlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) +#define vn_irflag_read(vp) atomic_load_short(&(vp)->v_irflag) +void vn_irflag_set_locked(struct vnode *vp, short toset); +void vn_irflag_set(struct vnode *vp, short toset); +void vn_irflag_set_cond_locked(struct vnode *vp, short toset); +void vn_irflag_set_cond(struct vnode *vp, short toset); +void vn_irflag_unset_locked(struct vnode *vp, short tounset); +void vn_irflag_unset(struct vnode *vp, short tounset); + int vfs_cache_lookup(struct vop_lookup_args *ap); int vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp); void vfs_timestamp(struct timespec *); @@ -979,7 +987,7 @@ #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp)) #endif -#define VN_IS_DOOMED(vp) __predict_false((vp)->v_irflag & VIRF_DOOMED) +#define VN_IS_DOOMED(vp) __predict_false((vn_irflag_read(vp) & VIRF_DOOMED) != 0) void vput(struct vnode *vp); void vrele(struct vnode *vp); diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c --- a/sys/ufs/ufs/ufs_vnops.c +++ b/sys/ufs/ufs/ufs_vnops.c @@ -283,10 +283,8 @@ ip = VTOI(vp); vnode_create_vobject(vp, DIP(ip, i_size), ap->a_td); - if (vp->v_type == VREG && (vp->v_irflag & VIRF_PGREAD) == 0) { - VI_LOCK(vp); - vp->v_irflag |= VIRF_PGREAD; - VI_UNLOCK(vp); + if (vp->v_type == VREG && (vn_irflag_read(vp) & VIRF_PGREAD) == 0) { + vn_irflag_set_cond(vp, VIRF_PGREAD); } /* @@ -2947,7 +2945,7 @@ uio = ap->a_uio; vp = ap->a_vp; - VNPASS((vp->v_irflag & VIRF_PGREAD) != 0, vp); + VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp); if (uio->uio_resid > ptoa(io_hold_cnt) || uio->uio_offset < 0 || (ap->a_ioflag & IO_DIRECT) != 0)