Index: sys/fs/tmpfs/tmpfs.h =================================================================== --- sys/fs/tmpfs/tmpfs.h +++ sys/fs/tmpfs/tmpfs.h @@ -437,8 +437,8 @@ struct tmpfs_dirent * tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f, struct componentname *cnp); -int tmpfs_dir_getdents(struct tmpfs_node *, struct uio *, int, - u_long *, int *); +int tmpfs_dir_getdents(struct tmpfs_mount *, struct tmpfs_node *, + struct uio *, int, u_long *, int *); int tmpfs_dir_whiteout_add(struct vnode *, struct componentname *); void tmpfs_dir_whiteout_remove(struct vnode *, struct componentname *); int tmpfs_reg_resize(struct vnode *, off_t, boolean_t); @@ -452,7 +452,8 @@ void tmpfs_itimes(struct vnode *, const struct timespec *, const struct timespec *); -void tmpfs_set_status(struct tmpfs_node *node, int status); +void tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, + int status); void tmpfs_update(struct vnode *); int tmpfs_truncate(struct vnode *, off_t); struct tmpfs_dirent *tmpfs_dir_first(struct tmpfs_node *dnode, Index: sys/fs/tmpfs/tmpfs_fifoops.c =================================================================== --- sys/fs/tmpfs/tmpfs_fifoops.c +++ sys/fs/tmpfs/tmpfs_fifoops.c @@ -56,7 +56,8 @@ struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(v->a_vp); - tmpfs_set_status(node, TMPFS_NODE_ACCESSED); + tmpfs_set_status(VFS_TO_TMPFS(v->a_vp->v_mount), node, + TMPFS_NODE_ACCESSED); tmpfs_update(v->a_vp); return (fifo_specops.vop_close(v)); } Index: sys/fs/tmpfs/tmpfs_subr.c =================================================================== --- sys/fs/tmpfs/tmpfs_subr.c +++ sys/fs/tmpfs/tmpfs_subr.c @@ -218,6 +218,8 @@ */ return (EBUSY); } + if ((mp->mnt_kern_flag & MNT_RDONLY) != 0) + return (EROFS); nnode = (struct tmpfs_node *)uma_zalloc_arg(tmp->tm_node_pool, tmp, M_WAITOK); @@ -1108,7 +1110,8 @@ * error happens. */ static int -tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio) +tmpfs_dir_getdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node, + struct uio *uio) { int error; struct dirent dent; @@ -1128,7 +1131,7 @@ else error = uiomove(&dent, dent.d_reclen, uio); - tmpfs_set_status(node, TMPFS_NODE_ACCESSED); + tmpfs_set_status(tm, node, TMPFS_NODE_ACCESSED); return (error); } @@ -1141,7 +1144,8 @@ * error happens. */ static int -tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio) +tmpfs_dir_getdotdotdent(struct tmpfs_mount *tm, struct tmpfs_node *node, + struct uio *uio) { int error; struct dirent dent; @@ -1172,7 +1176,7 @@ else error = uiomove(&dent, dent.d_reclen, uio); - tmpfs_set_status(node, TMPFS_NODE_ACCESSED); + tmpfs_set_status(tm, node, TMPFS_NODE_ACCESSED); return (error); } @@ -1185,8 +1189,8 @@ * error code if another error happens. */ int -tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, int maxcookies, - u_long *cookies, int *ncookies) +tmpfs_dir_getdents(struct tmpfs_mount *tm, struct tmpfs_node *node, + struct uio *uio, int maxcookies, u_long *cookies, int *ncookies) { struct tmpfs_dir_cursor dc; struct tmpfs_dirent *de; @@ -1207,7 +1211,7 @@ */ switch (uio->uio_offset) { case TMPFS_DIRCOOKIE_DOT: - error = tmpfs_dir_getdotdent(node, uio); + error = tmpfs_dir_getdotdent(tm, node, uio); if (error != 0) return (error); uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT; @@ -1215,7 +1219,7 @@ cookies[(*ncookies)++] = off = uio->uio_offset; /* FALLTHROUGH */ case TMPFS_DIRCOOKIE_DOTDOT: - error = tmpfs_dir_getdotdotdent(node, uio); + error = tmpfs_dir_getdotdotdent(tm, node, uio); if (error != 0) return (error); de = tmpfs_dir_first(node, &dc); @@ -1317,7 +1321,7 @@ node->tn_dir.tn_readdir_lastn = off; node->tn_dir.tn_readdir_lastp = de; - tmpfs_set_status(node, TMPFS_NODE_ACCESSED); + tmpfs_set_status(tm, node, TMPFS_NODE_ACCESSED); return error; } @@ -1772,10 +1776,10 @@ } void -tmpfs_set_status(struct tmpfs_node *node, int status) +tmpfs_set_status(struct tmpfs_mount *tm, struct tmpfs_node *node, int status) { - if ((node->tn_status & status) == status) + if ((node->tn_status & status) == status || tm->tm_ronly) return; TMPFS_NODE_LOCK(node); node->tn_status |= status; Index: sys/fs/tmpfs/tmpfs_vfsops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vfsops.c +++ sys/fs/tmpfs/tmpfs_vfsops.c @@ -60,10 +60,15 @@ #include #include #include +#include #include #include #include +#include +#include +#include +#include #include #include @@ -137,6 +142,227 @@ mtx_destroy(&node->tn_interlock); } +/* + * Handle updates of time from writes to mmaped regions. Use + * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since + * unmap of the tmpfs-backed vnode does not call vinactive(), due to + * vm object type is OBJT_SWAP. + * If lazy, only handle delayed update of mtime due to the writes to + * mapped files. + */ +static void +tmpfs_update_mtime(struct mount *mp, bool lazy) +{ + struct vnode *vp, *mvp; + struct vm_object *obj; + + MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { + if (vp->v_type != VREG) { + VI_UNLOCK(vp); + continue; + } + obj = vp->v_object; + KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == + (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); + + /* + * In lazy case, do unlocked read, avoid taking vnode + * lock if not needed. Lost update will be handled on + * the next call. + * For non-lazy case, we must flush all pending + * metadata changes now. + */ + if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) { + if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, + curthread) != 0) + continue; + tmpfs_check_mtime(vp); + if (!lazy) + tmpfs_update(vp); + vput(vp); + } else { + VI_UNLOCK(vp); + continue; + } + } +} + +struct tmpfs_check_rw_maps_arg { + bool found; +}; + +static bool +tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, + vm_map_entry_t entry __unused, void *arg) +{ + struct tmpfs_check_rw_maps_arg *a; + + a = arg; + a->found = true; + return (true); +} + +/* + * Revoke write permissions from all mappings of regular files + * belonging to the specified tmpfs mount. + */ +static bool +tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, + vm_map_entry_t entry, void *arg __unused) +{ + + /* + * XXXKIB: might be invalidate the mapping + * instead ? The process is not going to be + * happy in any case. + */ + entry->max_protection &= ~VM_PROT_WRITE; + if ((entry->protection & VM_PROT_WRITE) != 0) { + entry->protection &= ~VM_PROT_WRITE; + pmap_protect(map->pmap, entry->start, entry->end, + entry->protection); + } + return (false); +} + +static void +tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, + vm_map_entry_t, void *), void *cb_arg) +{ + struct proc *p; + struct vmspace *vm; + vm_map_t map; + vm_map_entry_t entry; + vm_object_t object; + struct vnode *vp; + int gen; + bool terminate; + + terminate = false; + sx_slock(&allproc_lock); +again: + gen = allproc_gen; + FOREACH_PROC_IN_SYSTEM(p) { + PROC_LOCK(p); + if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | + P_SYSTEM | P_WEXIT)) != 0) { + PROC_UNLOCK(p); + continue; + } + vm = vmspace_acquire_ref(p); + _PHOLD_LITE(p); + PROC_UNLOCK(p); + if (vm == NULL) { + PRELE(p); + continue; + } + sx_sunlock(&allproc_lock); + map = &vm->vm_map; + + vm_map_lock(map); + if (map->busy) + vm_map_wait_busy(map); + for (entry = map->header.next; entry != &map->header; + entry = entry->next) { + if ((entry->eflags & (MAP_ENTRY_GUARD | + MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || + (entry->max_protection & VM_PROT_WRITE) == 0) + continue; + object = entry->object.vm_object; + if (object == NULL || object->type != OBJT_SWAP || + (object->flags & OBJ_TMPFS_NODE) == 0) + continue; + /* + * No need to dig into shadow chain, mapping + * of the object not at top is readonly. + */ + + VM_OBJECT_RLOCK(object); + if (object->type == OBJT_DEAD) { + VM_OBJECT_RUNLOCK(object); + continue; + } + MPASS(object->ref_count > 1); + if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) != + (OBJ_TMPFS_NODE | OBJ_TMPFS)) { + VM_OBJECT_RUNLOCK(object); + continue; + } + vp = object->un_pager.swp.swp_tmpfs; + if (vp->v_mount != mp) { + VM_OBJECT_RUNLOCK(object); + continue; + } + + terminate = cb(mp, map, entry, cb_arg); + VM_OBJECT_RUNLOCK(object); + if (terminate) + break; + } + vm_map_unlock(map); + + vmspace_free(vm); + sx_slock(&allproc_lock); + PRELE(p); + if (terminate) + break; + } + if (!terminate && gen != allproc_gen) + goto again; + sx_sunlock(&allproc_lock); +} + +static bool +tmpfs_check_rw_maps(struct mount *mp) +{ + struct tmpfs_check_rw_maps_arg ca; + + ca.found = false; + tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); + return (ca.found); +} + +static int +tmpfs_rw_to_ro(struct mount *mp) +{ + int error, flags; + bool forced; + + forced = (mp->mnt_flag & MNT_FORCE) != 0; + flags = WRITECLOSE | (forced ? FORCECLOSE : 0); + + if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) + return (error); + error = vfs_write_suspend_umnt(mp); + if (error != 0) + return (error); + if (!forced && tmpfs_check_rw_maps(mp)) { + error = EBUSY; + goto out; + } + VFS_TO_TMPFS(mp)->tm_ronly = 1; + MNT_ILOCK(mp); + mp->mnt_flag |= MNT_RDONLY; + MNT_IUNLOCK(mp); + for (;;) { + tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); + tmpfs_update_mtime(mp, false); + error = vflush(mp, 0, flags, curthread); + if (error != 0) { + VFS_TO_TMPFS(mp)->tm_ronly = 0; + MNT_ILOCK(mp); + mp->mnt_flag &= ~MNT_RDONLY; + MNT_IUNLOCK(mp); + goto out; + } + if (!tmpfs_check_rw_maps(mp)) + break; + } +out: + vfs_write_resume(mp, 0); + return (error); +} + static int tmpfs_mount(struct mount *mp) { @@ -144,7 +370,7 @@ sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); struct tmpfs_mount *tmp; struct tmpfs_node *root; - int error, flags; + int error; bool nonc; /* Size counters. */ u_quad_t pages; @@ -178,19 +404,7 @@ if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && !(VFS_TO_TMPFS(mp)->tm_ronly)) { /* RW -> RO */ - error = VFS_SYNC(mp, MNT_WAIT); - if (error) - return (error); - flags = WRITECLOSE; - if (mp->mnt_flag & MNT_FORCE) - flags |= FORCECLOSE; - error = vflush(mp, 0, flags, curthread); - if (error) - return (error); - VFS_TO_TMPFS(mp)->tm_ronly = 1; - MNT_ILOCK(mp); - mp->mnt_flag |= MNT_RDONLY; - MNT_IUNLOCK(mp); + return (tmpfs_rw_to_ro(mp)); } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && VFS_TO_TMPFS(mp)->tm_ronly) { /* RO -> RW */ @@ -469,45 +683,13 @@ static int tmpfs_sync(struct mount *mp, int waitfor) { - struct vnode *vp, *mvp; - struct vm_object *obj; if (waitfor == MNT_SUSPEND) { MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; MNT_IUNLOCK(mp); } else if (waitfor == MNT_LAZY) { - /* - * Handle lazy updates of mtime from writes to mmaped - * regions. Use MNT_VNODE_FOREACH_ALL instead of - * MNT_VNODE_FOREACH_ACTIVE, since unmap of the - * tmpfs-backed vnode does not call vinactive(), due - * to vm object type is OBJT_SWAP. - */ - MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { - if (vp->v_type != VREG) { - VI_UNLOCK(vp); - continue; - } - obj = vp->v_object; - KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == - (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); - - /* - * Unlocked read, avoid taking vnode lock if - * not needed. Lost update will be handled on - * the next call. - */ - if ((obj->flags & OBJ_TMPFS_DIRTY) == 0) { - VI_UNLOCK(vp); - continue; - } - if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, - curthread) != 0) - continue; - tmpfs_check_mtime(vp); - vput(vp); - } + tmpfs_update_mtime(mp, true); } return (0); } Index: sys/fs/tmpfs/tmpfs_vnops.c =================================================================== --- sys/fs/tmpfs/tmpfs_vnops.c +++ sys/fs/tmpfs/tmpfs_vnops.c @@ -481,7 +481,7 @@ if (uio->uio_offset < 0) return (EINVAL); node = VP_TO_TMPFS_NODE(vp); - tmpfs_set_status(node, TMPFS_NODE_ACCESSED); + tmpfs_set_status(VFS_TO_TMPFS(vp->v_mount), node, TMPFS_NODE_ACCESSED); return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio)); } @@ -1188,18 +1188,22 @@ } static int -tmpfs_readdir(struct vop_readdir_args *v) +tmpfs_readdir(struct vop_readdir_args *va) { - struct vnode *vp = v->a_vp; - struct uio *uio = v->a_uio; - int *eofflag = v->a_eofflag; - u_long **cookies = v->a_cookies; - int *ncookies = v->a_ncookies; - - int error; - ssize_t startresid; - int maxcookies; + struct vnode *vp; + struct uio *uio; + struct tmpfs_mount *tm; struct tmpfs_node *node; + u_long **cookies; + int *eofflag, *ncookies; + ssize_t startresid; + int error, maxcookies; + + vp = va->a_vp; + uio = va->a_uio; + eofflag = va->a_eofflag; + cookies = va->a_cookies; + ncookies = va->a_ncookies; /* This operation only makes sense on directory nodes. */ if (vp->v_type != VDIR) @@ -1207,6 +1211,7 @@ maxcookies = 0; node = VP_TO_TMPFS_DIR(vp); + tm = VFS_TO_TMPFS(vp->v_mount); startresid = uio->uio_resid; @@ -1220,9 +1225,9 @@ } if (cookies == NULL) - error = tmpfs_dir_getdents(node, uio, 0, NULL, NULL); + error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL); else - error = tmpfs_dir_getdents(node, uio, maxcookies, *cookies, + error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies, ncookies); /* Buffer was filled without hitting EOF. */ @@ -1258,7 +1263,7 @@ error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid), uio); - tmpfs_set_status(node, TMPFS_NODE_ACCESSED); + tmpfs_set_status(VFS_TO_TMPFS(vp->v_mount), node, TMPFS_NODE_ACCESSED); return (error); }