Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F136597284
D30065.id.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
6 KB
Referenced Files
None
Subscribers
None
D30065.id.diff
View Options
diff --git a/sys/fs/tmpfs/tmpfs.h b/sys/fs/tmpfs/tmpfs.h
--- a/sys/fs/tmpfs/tmpfs.h
+++ b/sys/fs/tmpfs/tmpfs.h
@@ -46,6 +46,7 @@
#endif
#define OBJ_TMPFS OBJ_PAGERPRIV1 /* has tmpfs vnode allocated */
+#define OBJ_TMPFS_VREF OBJ_PAGERPRIV2 /* vnode is referenced */
/*
* Internal representation of a tmpfs directory entry.
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -99,6 +99,92 @@
return (object);
}
+/*
+ * Make sure tmpfs vnodes with writable mappings can be found on the lazy list.
+ *
+ * This allows for periodic mtime updates while only scanning vnodes which are
+ * plausibly dirty, see tmpfs_update_mtime_lazy.
+ */
+static void
+tmpfs_pager_writecount_recalc(vm_object_t object, vm_offset_t old,
+ vm_offset_t new)
+{
+ struct vnode *vp;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+
+ vp = object->un_pager.swp.swp_tmpfs;
+
+ /*
+ * Forced unmount?
+ */
+ if (vp == NULL) {
+ KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
+ ("object %p with OBJ_TMPFS_VREF but without vnode", object));
+ VM_OBJECT_WUNLOCK(object);
+ return;
+ }
+
+ if (old == 0) {
+ VNASSERT((object->flags & OBJ_TMPFS_VREF) == 0, vp,
+ ("object without writable mappings has a reference"));
+ VNPASS(vp->v_usecount > 0, vp);
+ } else {
+ VNASSERT((object->flags & OBJ_TMPFS_VREF) != 0, vp,
+ ("object with writable mappings does not have a reference"));
+ }
+
+ if (old == new) {
+ VM_OBJECT_WUNLOCK(object);
+ return;
+ }
+
+ if (new == 0) {
+ vm_object_clear_flag(object, OBJ_TMPFS_VREF);
+ VM_OBJECT_WUNLOCK(object);
+ vrele(vp);
+ } else {
+ if ((object->flags & OBJ_TMPFS_VREF) == 0) {
+ vref(vp);
+ vlazy(vp);
+ vm_object_set_flag(object, OBJ_TMPFS_VREF);
+ }
+ VM_OBJECT_WUNLOCK(object);
+ }
+}
+
+static void
+tmpfs_pager_update_writecount(vm_object_t object, vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_offset_t new, old;
+
+ VM_OBJECT_WLOCK(object);
+ KASSERT((object->flags & OBJ_ANON) == 0,
+ ("%s: object %p with OBJ_ANON", __func__, object));
+ old = object->un_pager.swp.writemappings;
+ object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
+ new = object->un_pager.swp.writemappings;
+ tmpfs_pager_writecount_recalc(object, old, new);
+ VM_OBJECT_ASSERT_UNLOCKED(object);
+}
+
+static void
+tmpfs_pager_release_writecount(vm_object_t object, vm_offset_t start,
+ vm_offset_t end)
+{
+ vm_offset_t new, old;
+
+ VM_OBJECT_WLOCK(object);
+ KASSERT((object->flags & OBJ_ANON) == 0,
+ ("%s: object %p with OBJ_ANON", __func__, object));
+ old = object->un_pager.swp.writemappings;
+ object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
+ new = object->un_pager.swp.writemappings;
+ tmpfs_pager_writecount_recalc(object, old, new);
+ VM_OBJECT_ASSERT_UNLOCKED(object);
+}
+
static void
tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
{
@@ -131,6 +217,8 @@
.pgo_kvme_type = KVME_TYPE_VNODE,
.pgo_alloc = tmpfs_pager_alloc,
.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
+ .pgo_update_writecount = tmpfs_pager_update_writecount,
+ .pgo_release_writecount = tmpfs_pager_release_writecount,
.pgo_mightbedirty = vm_object_mightbedirty_,
.pgo_getvp = tmpfs_pager_getvp,
};
@@ -643,6 +731,7 @@
void
tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj)
{
+ bool want_vrele;
ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject");
if (vp->v_type != VREG || obj == NULL)
@@ -650,12 +739,24 @@
VM_OBJECT_WLOCK(obj);
VI_LOCK(vp);
+ /*
+ * May be going through forced unmount.
+ */
+ want_vrele = false;
+ if ((obj->flags & OBJ_TMPFS_VREF) != 0) {
+ vm_object_clear_flag(obj, OBJ_TMPFS_VREF);
+ want_vrele = true;
+ }
+
vm_object_clear_flag(obj, OBJ_TMPFS);
obj->un_pager.swp.swp_tmpfs = NULL;
if (vp->v_writecount < 0)
vp->v_writecount = 0;
VI_UNLOCK(vp);
VM_OBJECT_WUNLOCK(obj);
+ if (want_vrele) {
+ vrele(vp);
+ }
}
/*
@@ -792,6 +893,12 @@
case VREG:
object = node->tn_reg.tn_aobj;
VM_OBJECT_WLOCK(object);
+ KASSERT((object->flags & OBJ_TMPFS_VREF) == 0,
+ ("%s: object %p with OBJ_TMPFS_VREF but without vnode",
+ __func__, object));
+ KASSERT(object->un_pager.swp.writemappings == 0,
+ ("%s: object %p has writemappings",
+ __func__, object));
VI_LOCK(vp);
KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs"));
vp->v_object = object;
diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c
--- a/sys/fs/tmpfs/tmpfs_vfsops.c
+++ b/sys/fs/tmpfs/tmpfs_vfsops.c
@@ -99,18 +99,38 @@
"from", "export", "nomtime", "size", NULL
};
-/*
- * Handle updates of time from writes to mmaped regions, if allowed.
- * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
- * unmap of the tmpfs-backed vnode does not call vinactive(), due to
- * vm object type is basically OBJT_SWAP. If lazy, only handle
- * delayed update of mtime due to the writes to mapped files.
- */
+static int
+tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg)
+{
+ struct vm_object *obj;
+
+ if (vp->v_type != VREG)
+ return (0);
+
+ obj = atomic_load_ptr(&vp->v_object);
+ if (obj == NULL)
+ return (0);
+
+ return (vm_object_mightbedirty_(obj));
+}
+
static void
-tmpfs_update_mtime(struct mount *mp, bool lazy)
+tmpfs_update_mtime_lazy(struct mount *mp)
+{
+ struct vnode *vp, *mvp;
+
+ MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) {
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
+ continue;
+ tmpfs_check_mtime(vp);
+ vput(vp);
+ }
+}
+
+static void
+tmpfs_update_mtime_all(struct mount *mp)
{
struct vnode *vp, *mvp;
- struct vm_object *obj;
if (VFS_TO_TMPFS(mp)->tm_nomtime)
return;
@@ -119,28 +139,11 @@
VI_UNLOCK(vp);
continue;
}
- obj = vp->v_object;
- MPASS(obj->type == tmpfs_pager_type);
- MPASS((obj->flags & OBJ_TMPFS) != 0);
-
- /*
- * In lazy case, do unlocked read, avoid taking vnode
- * lock if not needed. Lost update will be handled on
- * the next call.
- * For non-lazy case, we must flush all pending
- * metadata changes now.
- */
- if (!lazy || obj->generation != obj->cleangeneration) {
- if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
- continue;
- tmpfs_check_mtime(vp);
- if (!lazy)
- tmpfs_update(vp);
- vput(vp);
- } else {
- VI_UNLOCK(vp);
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
continue;
- }
+ tmpfs_check_mtime(vp);
+ tmpfs_update(vp);
+ vput(vp);
}
}
@@ -300,7 +303,7 @@
MNT_IUNLOCK(mp);
for (;;) {
tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
- tmpfs_update_mtime(mp, false);
+ tmpfs_update_mtime_all(mp);
error = vflush(mp, 0, flags, curthread);
if (error != 0) {
VFS_TO_TMPFS(mp)->tm_ronly = 0;
@@ -653,7 +656,7 @@
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
MNT_IUNLOCK(mp);
} else if (waitfor == MNT_LAZY) {
- tmpfs_update_mtime(mp, true);
+ tmpfs_update_mtime_lazy(mp);
}
return (0);
}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Nov 19, 12:43 PM (13 h, 38 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25632811
Default Alt Text
D30065.id.diff (6 KB)
Attached To
Mode
D30065: Extend swap pager to put tmpfs vnodes on lazy list.
Attached
Detach File
Event Timeline
Log In to Comment