Index: sys/fs/tmpfs/tmpfs.h =================================================================== --- sys/fs/tmpfs/tmpfs.h +++ sys/fs/tmpfs/tmpfs.h @@ -316,10 +316,11 @@ #define TMPFS_ASSERT_LOCKED(node) (void)0 #endif -#define TMPFS_VNODE_ALLOCATING 1 -#define TMPFS_VNODE_WANT 2 -#define TMPFS_VNODE_DOOMED 4 -#define TMPFS_VNODE_WRECLAIM 8 +#define TMPFS_VNODE_ALLOCATING 0x01 +#define TMPFS_VNODE_WANT 0x02 +#define TMPFS_VNODE_DOOMED 0x04 +#define TMPFS_VNODE_WRECLAIM 0x08 +#define TMPFS_VNODE_MMAPPED 0x10 /* * Internal representation of a tmpfs mount point. Index: sys/fs/tmpfs/tmpfs_vfsops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vfsops.c +++ sys/fs/tmpfs/tmpfs_vfsops.c @@ -98,15 +98,53 @@ "from", "export", "nomtime", "size", NULL }; -/* - * Handle updates of time from writes to mmaped regions, if allowed. - * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since - * unmap of the tmpfs-backed vnode does not call vinactive(), due to - * vm object type is OBJT_SWAP. If lazy, only handle delayed update - * of mtime due to the writes to mapped files. - */ +static int +tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg) +{ + struct vm_object *obj; + + if (vp->v_type != VREG) + return (0); + + if (vp->v_writecount <= 0) + return (1); + + obj = atomic_load_ptr(&vp->v_object); + if (obj == NULL) + return (0); + + return (obj->generation != obj->cleangeneration); +} + +static void +tmpfs_update_mtime_lazy(struct mount *mp) +{ + struct vnode *vp, *mvp; + struct tmpfs_node *node; + + MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) { + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) != 0) + continue; + tmpfs_check_mtime(vp); + if (vp->v_writecount <= 0) { + /* + * The vnode is no longer mmapped for writing. + * Remove the extra hold we have on it. + */ + node = VP_TO_TMPFS_NODE(vp); + TMPFS_NODE_LOCK(node); + if ((node->tn_vpstate & TMPFS_VNODE_MMAPPED) != 0) { + node->tn_vpstate &= ~TMPFS_VNODE_MMAPPED; + vdrop(vp); + } + TMPFS_NODE_UNLOCK(node); + } + vput(vp); + } +} + static void -tmpfs_update_mtime(struct mount *mp, bool lazy) +tmpfs_update_mtime_all(struct mount *mp) { struct vnode *vp, *mvp; struct vm_object *obj; @@ -121,26 +159,11 @@ obj = vp->v_object; KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); - - /* - * In lazy case, do unlocked read, avoid taking vnode - * lock if not needed. Lost update will be handled on - * the next call. - * For non-lazy case, we must flush all pending - * metadata changes now. - */ - if (!lazy || obj->generation != obj->cleangeneration) { - if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, - curthread) != 0) - continue; - tmpfs_check_mtime(vp); - if (!lazy) - tmpfs_update(vp); - vput(vp); - } else { - VI_UNLOCK(vp); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) != 0) continue; - } + tmpfs_check_mtime(vp); + tmpfs_update(vp); + vput(vp); } } @@ -302,7 +325,7 @@ MNT_IUNLOCK(mp); for (;;) { tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); - tmpfs_update_mtime(mp, false); + tmpfs_update_mtime_all(mp); error = vflush(mp, 0, flags, curthread); if (error != 0) { VFS_TO_TMPFS(mp)->tm_ronly = 0; @@ -654,7 +677,7 @@ mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; MNT_IUNLOCK(mp); } else if (waitfor == MNT_LAZY) { - tmpfs_update_mtime(mp, true); + tmpfs_update_mtime_lazy(mp); } return (0); } Index: sys/fs/tmpfs/tmpfs_vnops.h =================================================================== --- sys/fs/tmpfs/tmpfs_vnops.h +++ sys/fs/tmpfs/tmpfs_vnops.h @@ -56,5 +56,6 @@ vop_pathconf_t tmpfs_pathconf; vop_print_t tmpfs_print; vop_reclaim_t tmpfs_reclaim; +vop_mmapped_t tmpfs_mmapped; #endif /* _FS_TMPFS_TMPFS_VNOPS_H_ */ Index: sys/fs/tmpfs/tmpfs_vnops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vnops.c +++ sys/fs/tmpfs/tmpfs_vnops.c @@ -1443,6 +1443,10 @@ cache_purge(vp); TMPFS_NODE_LOCK(node); + if ((node->tn_vpstate & TMPFS_VNODE_MMAPPED) != 0) { + node->tn_vpstate &= ~TMPFS_VNODE_MMAPPED; + vdrop(vp); + } tmpfs_free_vp(vp); /* If the node referenced by this vnode was deleted by the user, @@ -1460,6 +1464,33 @@ return 0; } +int +tmpfs_mmapped(struct vop_mmapped_args *v) +{ + struct vnode *vp = v->a_vp; + struct tmpfs_node *node; + + node = VP_TO_TMPFS_NODE(vp); + + /* + * exec + */ + if (vp->v_writecount < 0) + return (0); + + if ((node->tn_vpstate & TMPFS_VNODE_MMAPPED) != 0) + return (0); + + vlazy(vp); + TMPFS_NODE_LOCK(node); + if ((node->tn_vpstate & TMPFS_VNODE_MMAPPED) == 0) { + node->tn_vpstate |= TMPFS_VNODE_MMAPPED; + vholdnz(vp); + } + TMPFS_NODE_UNLOCK(node); + return (0); +} + int tmpfs_print(struct vop_print_args *v) { @@ -1732,6 +1763,7 @@ .vop_remove = tmpfs_remove, .vop_link = tmpfs_link, .vop_rename = tmpfs_rename, + .vop_mmapped = tmpfs_mmapped, .vop_mkdir = tmpfs_mkdir, .vop_rmdir = tmpfs_rmdir, .vop_symlink = tmpfs_symlink,