Index: sys/fs/tmpfs/tmpfs.h =================================================================== --- sys/fs/tmpfs/tmpfs.h +++ sys/fs/tmpfs/tmpfs.h @@ -382,6 +382,8 @@ bool tm_ronly; /* Do not use namecache. */ bool tm_nonc; + /* Do not update mtime. */ + bool tm_nomtime; }; #define TMPFS_LOCK(tm) mtx_lock(&(tm)->tm_allnode_lock) #define TMPFS_UNLOCK(tm) mtx_unlock(&(tm)->tm_allnode_lock) Index: sys/fs/tmpfs/tmpfs_vfsops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vfsops.c +++ sys/fs/tmpfs/tmpfs_vfsops.c @@ -113,6 +113,8 @@ struct vnode *vp, *mvp; struct vm_object *obj; + if (VFS_TO_TMPFS(mp)->tm_nomtime) + return; MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type != VREG) { VI_UNLOCK(vp); @@ -327,7 +329,7 @@ struct tmpfs_mount *tmp; struct tmpfs_node *root; int error; - bool nonc; + bool nomtime, nonc; /* Size counters. */ u_quad_t pages; off_t nodes_max, size_max, maxfilesize; @@ -394,6 +396,7 @@ if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) maxfilesize = 0; nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; + nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0; /* Do not allow mounts if we do not have enough memory to preserve * the minimum reserved pages. */ @@ -440,6 +443,7 @@ new_unrhdr64(&tmp->tm_ino_unr, 2); tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; tmp->tm_nonc = nonc; + tmp->tm_nomtime = nomtime; /* Allocate the root node. */ error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, Index: sys/vm/vm_object.h =================================================================== --- sys/vm/vm_object.h +++ sys/vm/vm_object.h @@ -322,7 +322,8 @@ vm_object_mightbedirty(vm_object_t object) { - return (object->type == OBJT_VNODE && + return ((object->type == OBJT_VNODE || (object->type == OBJT_SWAP && + (object->flags & OBJ_TMPFS_NODE) != 0)) && object->generation != object->cleangeneration); } Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c +++ sys/vm/vm_object.c @@ -1017,6 +1017,10 @@ * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC), * leaving the object dirty. * + * For swap objects backing tmpfs regular files, do not flush anything, + * but remove write protection on the mapped pages to update mtime through + * mmaped writes. + * * When stuffing pages asynchronously, allow clustering. XXX we need a * synchronous clustering mode implementation. * @@ -1038,8 +1042,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); - if (object->type != OBJT_VNODE || !vm_object_mightbedirty(object) || - object->resident_page_count == 0) + if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) return (TRUE); pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? @@ -1149,7 +1152,13 @@ for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) ma[i] = tp; - vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); + if (object->type == OBJT_VNODE) { + vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); + } else { + for (i = 0; i < count; i++) + vm_page_xunbusy(ma[i]); + runlen = count - mreq + 1; + } return (runlen); }