Changeset View
Changeset View
Standalone View
Standalone View
sys/fs/tmpfs/tmpfs_vfsops.c
Show First 20 Lines • Show All 93 Lines • ▼ Show 20 Lines | static const char *tmpfs_opts[] = { | ||||
"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", | "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", | ||||
"union", "nonc", "nomtime", NULL | "union", "nonc", "nomtime", NULL | ||||
}; | }; | ||||
static const char *tmpfs_updateopts[] = { | static const char *tmpfs_updateopts[] = { | ||||
"from", "export", "nomtime", "size", NULL | "from", "export", "nomtime", "size", NULL | ||||
}; | }; | ||||
/* | static int | ||||
* Handle updates of time from writes to mmaped regions, if allowed. | tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg) | ||||
* Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since | { | ||||
* unmap of the tmpfs-backed vnode does not call vinactive(), due to | struct vm_object *obj; | ||||
* vm object type is basically OBJT_SWAP. If lazy, only handle | |||||
* delayed update of mtime due to the writes to mapped files. | if (vp->v_type != VREG) | ||||
*/ | return (0); | ||||
obj = atomic_load_ptr(&vp->v_object); | |||||
if (obj == NULL) | |||||
return (0); | |||||
return (vm_object_mightbedirty_(obj)); | |||||
kib: This should be spelled as vm_object_mightbedirty(obj) or vm_object_mightbedirty_(obj). | |||||
} | |||||
static void | static void | ||||
tmpfs_update_mtime(struct mount *mp, bool lazy) | tmpfs_update_mtime_lazy(struct mount *mp) | ||||
{ | { | ||||
struct vnode *vp, *mvp; | struct vnode *vp, *mvp; | ||||
struct vm_object *obj; | |||||
MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) { | |||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) | |||||
continue; | |||||
tmpfs_check_mtime(vp); | |||||
vput(vp); | |||||
} | |||||
} | |||||
static void | |||||
tmpfs_update_mtime_all(struct mount *mp) | |||||
{ | |||||
struct vnode *vp, *mvp; | |||||
if (VFS_TO_TMPFS(mp)->tm_nomtime) | if (VFS_TO_TMPFS(mp)->tm_nomtime) | ||||
return; | return; | ||||
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { | MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { | ||||
if (vp->v_type != VREG) { | if (vp->v_type != VREG) { | ||||
VI_UNLOCK(vp); | VI_UNLOCK(vp); | ||||
continue; | continue; | ||||
} | } | ||||
obj = vp->v_object; | |||||
MPASS(obj->type == tmpfs_pager_type); | |||||
MPASS((obj->flags & OBJ_TMPFS) != 0); | |||||
/* | |||||
* In lazy case, do unlocked read, avoid taking vnode | |||||
* lock if not needed. Lost update will be handled on | |||||
* the next call. | |||||
* For non-lazy case, we must flush all pending | |||||
* metadata changes now. | |||||
*/ | |||||
if (!lazy || obj->generation != obj->cleangeneration) { | |||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) | if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) | ||||
Not Done Inline ActionsYou did checked for obj == NULL in tmpfs_update_mtime_lazy_filter(), but not there. tmpfs_reclaim() only owns the vnode lock when clearing v_object to NULL. So either the vnode cannot appear on lazy list while being reclaimed (I doubt it), or this place needs the check. kib: You did checked for obj == NULL in tmpfs_update_mtime_lazy_filter(), but not there. | |||||
continue; | continue; | ||||
tmpfs_check_mtime(vp); | tmpfs_check_mtime(vp); | ||||
if (!lazy) | |||||
tmpfs_update(vp); | tmpfs_update(vp); | ||||
vput(vp); | vput(vp); | ||||
} else { | |||||
VI_UNLOCK(vp); | |||||
continue; | |||||
} | } | ||||
} | } | ||||
} | |||||
struct tmpfs_check_rw_maps_arg { | struct tmpfs_check_rw_maps_arg { | ||||
bool found; | bool found; | ||||
}; | }; | ||||
static bool | static bool | ||||
tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, | tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, | ||||
vm_map_entry_t entry __unused, void *arg) | vm_map_entry_t entry __unused, void *arg) | ||||
▲ Show 20 Lines • Show All 141 Lines • ▼ Show 20 Lines | if (!forced && tmpfs_check_rw_maps(mp)) { | ||||
goto out; | goto out; | ||||
} | } | ||||
VFS_TO_TMPFS(mp)->tm_ronly = 1; | VFS_TO_TMPFS(mp)->tm_ronly = 1; | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
mp->mnt_flag |= MNT_RDONLY; | mp->mnt_flag |= MNT_RDONLY; | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
for (;;) { | for (;;) { | ||||
tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); | tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); | ||||
tmpfs_update_mtime(mp, false); | tmpfs_update_mtime_all(mp); | ||||
error = vflush(mp, 0, flags, curthread); | error = vflush(mp, 0, flags, curthread); | ||||
if (error != 0) { | if (error != 0) { | ||||
VFS_TO_TMPFS(mp)->tm_ronly = 0; | VFS_TO_TMPFS(mp)->tm_ronly = 0; | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
mp->mnt_flag &= ~MNT_RDONLY; | mp->mnt_flag &= ~MNT_RDONLY; | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
goto out; | goto out; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 336 Lines • ▼ Show 20 Lines | |||||
tmpfs_sync(struct mount *mp, int waitfor) | tmpfs_sync(struct mount *mp, int waitfor) | ||||
{ | { | ||||
if (waitfor == MNT_SUSPEND) { | if (waitfor == MNT_SUSPEND) { | ||||
MNT_ILOCK(mp); | MNT_ILOCK(mp); | ||||
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; | mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; | ||||
MNT_IUNLOCK(mp); | MNT_IUNLOCK(mp); | ||||
} else if (waitfor == MNT_LAZY) { | } else if (waitfor == MNT_LAZY) { | ||||
tmpfs_update_mtime(mp, true); | tmpfs_update_mtime_lazy(mp); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
tmpfs_init(struct vfsconf *conf) | tmpfs_init(struct vfsconf *conf) | ||||
{ | { | ||||
int res; | int res; | ||||
Show All 31 Lines |
This should be spelled as vm_object_mightbedirty(obj) or vm_object_mightbedirty_(obj).