Page MenuHomeFreeBSD

D30065.id88452.diff
No OneTemporary

D30065.id88452.diff

Index: sys/fs/tmpfs/tmpfs_subr.c
===================================================================
--- sys/fs/tmpfs/tmpfs_subr.c
+++ sys/fs/tmpfs/tmpfs_subr.c
@@ -588,6 +588,8 @@
VM_OBJECT_WLOCK(obj);
VI_LOCK(vp);
+ VNASSERT((obj->flags & OBJ_TMPFS_VREF) == 0, vp,
+ ("referenced vnode when it should not be"));
vm_object_clear_flag(obj, OBJ_TMPFS);
obj->un_pager.swp.swp_tmpfs = NULL;
if (vp->v_writecount < 0)
Index: sys/fs/tmpfs/tmpfs_vfsops.c
===================================================================
--- sys/fs/tmpfs/tmpfs_vfsops.c
+++ sys/fs/tmpfs/tmpfs_vfsops.c
@@ -99,15 +99,36 @@
"from", "export", "nomtime", "size", NULL
};
-/*
- * Handle updates of time from writes to mmaped regions, if allowed.
- * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
- * unmap of the tmpfs-backed vnode does not call vinactive(), due to
- * vm object type is OBJT_SWAP. If lazy, only handle delayed update
- * of mtime due to the writes to mapped files.
- */
+static int
+tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg)
+{
+ struct vm_object *obj;
+
+ if (vp->v_type != VREG)
+ return (0);
+
+ obj = atomic_load_ptr(&vp->v_object);
+ if (obj == NULL)
+ return (0);
+
+ return (obj->generation != obj->cleangeneration);
+}
+
static void
-tmpfs_update_mtime(struct mount *mp, bool lazy)
+tmpfs_update_mtime_lazy(struct mount *mp)
+{
+ struct vnode *vp, *mvp;
+
+ MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) {
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
+ continue;
+ tmpfs_check_mtime(vp);
+ vput(vp);
+ }
+}
+
+static void
+tmpfs_update_mtime_all(struct mount *mp)
{
struct vnode *vp, *mvp;
struct vm_object *obj;
@@ -122,25 +143,11 @@
obj = vp->v_object;
KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
(OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
-
- /*
- * In lazy case, do unlocked read, avoid taking vnode
- * lock if not needed. Lost update will be handled on
- * the next call.
- * For non-lazy case, we must flush all pending
- * metadata changes now.
- */
- if (!lazy || obj->generation != obj->cleangeneration) {
- if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
- continue;
- tmpfs_check_mtime(vp);
- if (!lazy)
- tmpfs_update(vp);
- vput(vp);
- } else {
- VI_UNLOCK(vp);
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
continue;
- }
+ tmpfs_check_mtime(vp);
+ tmpfs_update(vp);
+ vput(vp);
}
}
@@ -302,7 +309,7 @@
MNT_IUNLOCK(mp);
for (;;) {
tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
- tmpfs_update_mtime(mp, false);
+ tmpfs_update_mtime_all(mp);
error = vflush(mp, 0, flags, curthread);
if (error != 0) {
VFS_TO_TMPFS(mp)->tm_ronly = 0;
@@ -655,7 +662,7 @@
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
MNT_IUNLOCK(mp);
} else if (waitfor == MNT_LAZY) {
- tmpfs_update_mtime(mp, true);
+ tmpfs_update_mtime_lazy(mp);
}
return (0);
}
Index: sys/vm/swap_pager.c
===================================================================
--- sys/vm/swap_pager.c
+++ sys/vm/swap_pager.c
@@ -3104,22 +3104,67 @@
swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
vm_offset_t end)
{
+ struct vnode *vp;
VM_OBJECT_WLOCK(object);
KASSERT((object->flags & OBJ_ANON) == 0,
("Splittable object with writecount"));
+ if ((object->flags & OBJ_TMPFS) != 0) {
+ if (object->un_pager.swp.writemappings != 0) {
+ KASSERT((object->flags & OBJ_TMPFS_VREF) != 0,
+ ("Unheld vnode with writable mappings"));
+ }
+ }
object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
- VM_OBJECT_WUNLOCK(object);
+ if ((object->flags & OBJ_TMPFS) == 0) {
+ VM_OBJECT_WUNLOCK(object);
+ return;
+ }
+ vp = object->un_pager.swp.swp_tmpfs;
+ if (object->un_pager.swp.writemappings == 0) {
+ vm_object_clear_flag(object, OBJ_TMPFS_VREF);
+ VM_OBJECT_WUNLOCK(object);
+ vrele(vp);
+ } else {
+ if ((object->flags & OBJ_TMPFS_VREF) == 0) {
+ vref(vp);
+ /*
+ * Put the vnode on the lazy list so that
+ * tmpfs_update_mtime_lazy can find it.
+ */
+ vlazy(vp);
+ vm_object_set_flag(object, OBJ_TMPFS_VREF);
+ }
+ VM_OBJECT_WUNLOCK(object);
+ }
}
static void
swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
vm_offset_t end)
{
+ struct vnode *vp;
VM_OBJECT_WLOCK(object);
KASSERT((object->flags & OBJ_ANON) == 0,
("Splittable object with writecount"));
+ if ((object->flags & OBJ_TMPFS) != 0) {
+ if (object->un_pager.swp.writemappings != 0) {
+ KASSERT((object->flags & OBJ_TMPFS_VREF) != 0,
+ ("Unheld vnode with writable mappings"));
+ }
+ }
object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
- VM_OBJECT_WUNLOCK(object);
+ if ((object->flags & OBJ_TMPFS) == 0) {
+ VM_OBJECT_WUNLOCK(object);
+ return;
+ }
+ if (object->un_pager.swp.writemappings == 0) {
+ vp = object->un_pager.swp.swp_tmpfs;
+ vm_object_clear_flag(object, OBJ_TMPFS_VREF);
+ VM_OBJECT_WUNLOCK(object);
+ vrele(vp);
+ } else {
+ VM_OBJECT_WUNLOCK(object);
+ }
}
Index: sys/vm/vm_object.h
===================================================================
--- sys/vm/vm_object.h
+++ sys/vm/vm_object.h
@@ -201,6 +201,7 @@
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
#define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */
#define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
+#define OBJ_TMPFS_VREF 0x0100 /* tmpfs vnode is referenced */
#define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
#define OBJ_SPLIT 0x0400 /* object is being split */
#define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 28, 12:47 PM (16 h, 42 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
32278574
Default Alt Text
D30065.id88452.diff (5 KB)

Event Timeline