Index: sys/cddl/compat/opensolaris/kern/opensolaris_vm.c =================================================================== --- sys/cddl/compat/opensolaris/kern/opensolaris_vm.c +++ sys/cddl/compat/opensolaris/kern/opensolaris_vm.c @@ -28,6 +28,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include Index: sys/compat/cloudabi/cloudabi_vdso.c =================================================================== --- sys/compat/cloudabi/cloudabi_vdso.c +++ sys/compat/cloudabi/cloudabi_vdso.c @@ -26,7 +26,8 @@ #include __FBSDID("$FreeBSD$"); -#include +#include +#include #include #include #include Index: sys/fs/tmpfs/tmpfs.h =================================================================== --- sys/fs/tmpfs/tmpfs.h +++ sys/fs/tmpfs/tmpfs.h @@ -382,6 +382,8 @@ bool tm_ronly; /* Do not use namecache. */ bool tm_nonc; + /* Do not update mtime. */ + bool tm_nomtime; }; #define TMPFS_LOCK(tm) mtx_lock(&(tm)->tm_allnode_lock) #define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->tm_allnode_lock) Index: sys/fs/tmpfs/tmpfs_vfsops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vfsops.c +++ sys/fs/tmpfs/tmpfs_vfsops.c @@ -92,20 +92,19 @@ static const char *tmpfs_opts[] = { "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", - "union", "nonc", NULL + "union", "nonc", "nomtime", NULL }; static const char *tmpfs_updateopts[] = { - "from", "export", "size", NULL + "from", "export", "nomtime", "size", NULL }; /* - * Handle updates of time from writes to mmaped regions. Use - * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since + * Handle updates of time from writes to mmaped regions, if allowed. + * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since * unmap of the tmpfs-backed vnode does not call vinactive(), due to - * vm object type is OBJT_SWAP. - * If lazy, only handle delayed update of mtime due to the writes to - * mapped files. + * vm object type is OBJT_SWAP. If lazy, only handle delayed update + * of mtime due to the writes to mapped files. */ static void tmpfs_update_mtime(struct mount *mp, bool lazy) @@ -113,6 +112,8 @@ struct vnode *vp, *mvp; struct vm_object *obj; + if (VFS_TO_TMPFS(mp)->tm_nomtime) + return; MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type != VREG) { VI_UNLOCK(vp); @@ -327,7 +328,7 @@ struct tmpfs_mount *tmp; struct tmpfs_node *root; int error; - bool nonc; + bool nomtime, nonc; /* Size counters. */ u_quad_t pages; off_t nodes_max, size_max, maxfilesize; @@ -346,6 +347,7 @@ /* Only support update mounts for certain options. */ if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) return (EOPNOTSUPP); + tmp = VFS_TO_TMPFS(mp); if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { /* * On-the-fly resizing is not supported (yet). We still @@ -354,21 +356,23 @@ * parameter, say trying to change rw to ro or vice * versa, would cause vfs_filteropt() to bail. */ - if (size_max != VFS_TO_TMPFS(mp)->tm_size_max) + if (size_max != tmp->tm_size_max) return (EOPNOTSUPP); } if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && - !(VFS_TO_TMPFS(mp)->tm_ronly)) { + !tmp->tm_ronly) { /* RW -> RO */ return (tmpfs_rw_to_ro(mp)); } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && - VFS_TO_TMPFS(mp)->tm_ronly) { + tmp->tm_ronly) { /* RO -> RW */ - VFS_TO_TMPFS(mp)->tm_ronly = 0; + tmp->tm_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } + tmp->tm_nomtime = vfs_flagopt(mp->mnt_optnew, "nomtime", NULL, + 0) == 0; return (0); } @@ -394,6 +398,7 @@ if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) maxfilesize = 0; nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; + nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0; /* Do not allow mounts if we do not have enough memory to preserve * the minimum reserved pages. */ @@ -440,6 +445,7 @@ new_unrhdr64(&tmp->tm_ino_unr, 2); tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; tmp->tm_nonc = nonc; + tmp->tm_nomtime = nomtime; /* Allocate the root node. */ error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, Index: sys/vm/vm_object.h =================================================================== --- sys/vm/vm_object.h +++ sys/vm/vm_object.h @@ -322,8 +322,15 @@ vm_object_mightbedirty(vm_object_t object) { - return (object->type == OBJT_VNODE && - object->generation != object->cleangeneration); + if (object->type != OBJT_VNODE) { + if ((object->flags & OBJ_TMPFS_NODE) == 0) + return (false); +#if defined(_KERNEL) && defined(KASSERT) + KASSERT(object->type == OBJT_SWAP, + ("TMPFS_NODE obj %p is not swap", object)); +#endif + } + return (object->generation != object->cleangeneration); } void vm_object_clear_flag(vm_object_t object, u_short bits); Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -1017,6 +1017,10 @@ * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC), * leaving the object dirty. * + * For swap objects backing tmpfs regular files, do not flush anything, + * but remove write protection on the mapped pages to update mtime through + * mmaped writes. + * * When stuffing pages asynchronously, allow clustering. XXX we need a * synchronous clustering mode implementation. * @@ -1038,8 +1042,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); - if (object->type != OBJT_VNODE || !vm_object_mightbedirty(object) || - object->resident_page_count == 0) + if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) return (TRUE); pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? @@ -1072,32 +1075,36 @@ vm_page_xunbusy(p); continue; } + if (object->type == OBJT_VNODE) { + n = vm_object_page_collect_flush(object, p, pagerflags, + flags, &allclean, &eio); + if (eio) { + res = FALSE; + allclean = FALSE; + } + if (object->generation != curgeneration && + (flags & OBJPC_SYNC) != 0) + goto rescan; - n = vm_object_page_collect_flush(object, p, pagerflags, - flags, &allclean, &eio); - if (eio) { - res = FALSE; - allclean = FALSE; - } - if (object->generation != curgeneration && - (flags & OBJPC_SYNC) != 0) - goto rescan; - - /* - * If the VOP_PUTPAGES() did a truncated write, so - * that even the first page of the run is not fully - * written, vm_pageout_flush() returns 0 as the run - * length. Since the condition that caused truncated - * write may be permanent, e.g. exhausted free space, - * accepting n == 0 would cause an infinite loop. - * - * Forwarding the iterator leaves the unwritten page - * behind, but there is not much we can do there if - * filesystem refuses to write it. - */ - if (n == 0) { + /* + * If the VOP_PUTPAGES() did a truncated write, so + * that even the first page of the run is not fully + * written, vm_pageout_flush() returns 0 as the run + * length. Since the condition that caused truncated + * write may be permanent, e.g. exhausted free space, + * accepting n == 0 would cause an infinite loop. + * + * Forwarding the iterator leaves the unwritten page + * behind, but there is not much we can do there if + * filesystem refuses to write it. + */ + if (n == 0) { + n = 1; + allclean = FALSE; + } + } else { n = 1; - allclean = FALSE; + vm_page_xunbusy(p); } np = vm_page_find_least(object, pi + n); }