diff --git a/sys/fs/procfs/procfs_map.c b/sys/fs/procfs/procfs_map.c --- a/sys/fs/procfs/procfs_map.c +++ b/sys/fs/procfs/procfs_map.c @@ -166,16 +166,14 @@ vp = lobj->handle; vref(vp); break; - case OBJT_SWAP: - if ((lobj->flags & OBJ_TMPFS_NODE) != 0) { - type = "vnode"; - if ((lobj->flags & OBJ_TMPFS) != 0) { - vp = lobj->un_pager.swp.swp_tmpfs; - vref(vp); - } - } else { - type = "swap"; + case OBJT_SWAP_TMPFS: + type = "vnode"; + if ((lobj->flags & OBJ_TMPFS) != 0) { + vp = lobj->un_pager.swp.swp_tmpfs; + vref(vp); } + case OBJT_SWAP: + type = "swap"; break; case OBJT_SG: case OBJT_DEVICE: diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -364,12 +364,10 @@ case VREG: obj = nnode->tn_reg.tn_aobj = - vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0, + vm_pager_allocate(OBJT_SWAP_TMPFS, NULL, 0, + VM_PROT_DEFAULT, 0, NULL /* XXXKIB - tmpfs needs swap reservation */); - VM_OBJECT_WLOCK(obj); /* OBJ_TMPFS is set together with the setting of vp->v_object */ - vm_object_set_flag(obj, OBJ_TMPFS_NODE); - VM_OBJECT_WUNLOCK(obj); nnode->tn_reg.tn_tmp = tmp; break; @@ -1590,8 +1588,9 @@ if (vp->v_type != VREG) return; obj = vp->v_object; - KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == - (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); + KASSERT(obj->type == OBJT_SWAP_TMPFS && + (obj->flags & (OBJ_SWAP | OBJ_TMPFS)) == + (OBJ_SWAP | OBJ_TMPFS), ("non-tmpfs obj")); /* unlocked read */ if (obj->generation != obj->cleangeneration) { VM_OBJECT_WLOCK(obj); diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c --- a/sys/fs/tmpfs/tmpfs_vfsops.c +++ b/sys/fs/tmpfs/tmpfs_vfsops.c @@ -120,8 +120,8 @@ continue; } obj = vp->v_object; - KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == - (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); + MPASS(obj->type == OBJT_SWAP_TMPFS); + MPASS((obj->flags & OBJ_TMPFS) != 0); /* * In lazy case, do unlocked read, avoid taking vnode @@ -225,8 +225,7 @@ (entry->max_protection & VM_PROT_WRITE) == 0) continue; object = entry->object.vm_object; - if (object == NULL || object->type != OBJT_SWAP || - (object->flags & OBJ_TMPFS_NODE) == 0) + if (object == NULL || object->type != OBJT_SWAP_TMPFS) continue; /* * No need to dig into shadow chain, mapping @@ -239,8 +238,7 @@ continue; } MPASS(object->ref_count > 1); - if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) != - (OBJ_TMPFS_NODE | OBJ_TMPFS)) { + if ((object->flags & OBJ_TMPFS) == 0) { VM_OBJECT_RUNLOCK(object); continue; } diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -623,8 +623,9 @@ if (object == NULL) goto out_smr; - MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_TMPFS_NODE)) == - OBJ_TMPFS_NODE); + MPASS(object->type == OBJT_SWAP_TMPFS); + MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) == + OBJ_SWAP); if (!VN_IS_DOOMED(vp)) { /* size cannot become shorter due to rangelock. */ size = node->tn_size; diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h --- a/sys/vm/swap_pager.h +++ b/sys/vm/swap_pager.h @@ -76,7 +76,6 @@ int swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len); void swap_pager_copy(vm_object_t, vm_object_t, vm_pindex_t, int); vm_pindex_t swap_pager_find_least(vm_object_t object, vm_pindex_t pindex); -void swap_pager_freespace(vm_object_t, vm_pindex_t, vm_size_t); void swap_pager_swap_init(void); int swap_pager_nswapdev(void); int swap_pager_reserve(vm_object_t, vm_pindex_t, vm_size_t); diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -418,6 +418,9 @@ static vm_object_t swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset, struct ucred *); +static vm_object_t + swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size, + vm_prot_t prot, vm_ooffset_t offset, struct ucred *); static void swap_pager_dealloc(vm_object_t object); static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); @@ -433,18 +436,39 @@ vm_offset_t start, vm_offset_t end); static void swap_pager_release_writecount(vm_object_t object, vm_offset_t start, vm_offset_t end); +static void swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, + bool *vp_heldp); +static void swap_pager_freespace(vm_object_t object, vm_pindex_t start, + vm_size_t size); struct pagerops swappagerops = { .pgo_init = swap_pager_init, /* early system initialization of pager */ - .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ - .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ - .pgo_getpages = swap_pager_getpages, /* pagein */ - .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */ - .pgo_putpages = swap_pager_putpages, /* pageout */ - .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ - .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ + .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ + .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ + .pgo_getpages = swap_pager_getpages, /* pagein */ + .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */ + .pgo_putpages = swap_pager_putpages, /* pageout */ + .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ + .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ .pgo_update_writecount = swap_pager_update_writecount, .pgo_release_writecount = swap_pager_release_writecount, + .pgo_freespace = swap_pager_freespace, +}; + +struct pagerops swaptmpfspagerops = { + .pgo_alloc = swap_tmpfs_pager_alloc, + .pgo_dealloc = swap_pager_dealloc, + .pgo_getpages = swap_pager_getpages, + .pgo_getpages_async = swap_pager_getpages_async, + .pgo_putpages = swap_pager_putpages, + .pgo_haspage = swap_pager_haspage, + .pgo_pageunswapped = swap_pager_unswapped, + .pgo_update_writecount = swap_pager_update_writecount, + .pgo_release_writecount = swap_pager_release_writecount, + .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, + .pgo_mightbedirty = vm_object_mightbedirty_, + .pgo_getvp = swap_tmpfs_pager_getvp, + .pgo_freespace = swap_pager_freespace, }; /* @@ -655,8 +679,8 @@ } static vm_object_t -swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size, - vm_ooffset_t offset) +swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred, + vm_ooffset_t size, vm_ooffset_t offset) { vm_object_t object; @@ -671,7 +695,7 @@ * vm_object_allocate() to ensure the correct order of * visibility to other threads. */ - object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset + + object = vm_object_allocate(otype, OFF_TO_IDX(offset + PAGE_MASK + size)); object->un_pager.swp.writemappings = 0; @@ -710,8 +734,8 @@ sx_xlock(&sw_alloc_sx); object = vm_pager_object_lookup(NOBJLIST(handle), handle); if (object == NULL) { - object = swap_pager_alloc_init(handle, cred, size, - offset); + object = swap_pager_alloc_init(OBJT_SWAP, handle, cred, + size, offset); if (object != NULL) { TAILQ_INSERT_TAIL(NOBJLIST(object->handle), object, pager_object_list); @@ -719,11 +743,24 @@ } sx_xunlock(&sw_alloc_sx); } else { - object = swap_pager_alloc_init(handle, cred, size, offset); + object = swap_pager_alloc_init(OBJT_SWAP, handle, cred, + size, offset); } return (object); } +static vm_object_t +swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, + vm_ooffset_t offset, struct ucred *cred) +{ + vm_object_t object; + + MPASS(handle == NULL); + object = swap_pager_alloc_init(OBJT_SWAP_TMPFS, handle, cred, + size, offset); + return (object); +} + /* * SWAP_PAGER_DEALLOC() - remove swap metadata from object * @@ -765,6 +802,7 @@ swp_pager_meta_free_all(object); object->handle = NULL; object->type = OBJT_DEAD; + vm_object_clear_flag(object, OBJ_SWAP); } /************************************************************************ @@ -935,8 +973,6 @@ * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page * range within an object. * - * This is a globally accessible routine. - * * This routine removes swapblk assignments from swap metadata. * * The external callers of this routine typically have already destroyed @@ -945,7 +981,7 @@ * * The object must be locked. */ -void +static void swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) { @@ -995,9 +1031,9 @@ { daddr_t dstaddr; - KASSERT(srcobject->type == OBJT_SWAP, + KASSERT((srcobject->flags & OBJ_SWAP) != 0, ("%s: Srcobject not swappable", __func__)); - if (dstobject->type == OBJT_SWAP && + if ((dstobject->flags & OBJ_SWAP) != 0 && swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) { /* Caller should destroy the source block. */ return (false); @@ -1078,6 +1114,7 @@ * queues. */ srcobject->type = OBJT_DEFAULT; + vm_object_clear_flag(srcobject, OBJ_SWAP); } } @@ -1099,7 +1136,7 @@ int i; VM_OBJECT_ASSERT_LOCKED(object); - KASSERT(object->type == OBJT_SWAP, + KASSERT((object->flags & OBJ_SWAP) != 0, ("%s: object not swappable", __func__)); /* @@ -1195,7 +1232,7 @@ * The meta data only exists if the object is OBJT_SWAP * and even then might not be allocated yet. */ - KASSERT(m->object->type == OBJT_SWAP, + KASSERT((m->object->flags & OBJ_SWAP) != 0, ("Free object not swappable")); sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks, @@ -1233,7 +1270,7 @@ VM_OBJECT_ASSERT_WLOCKED(object); reqcount = count; - KASSERT(object->type == OBJT_SWAP, + KASSERT((object->flags & OBJ_SWAP) != 0, ("%s: object not swappable", __func__)); if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) { VM_OBJECT_WUNLOCK(object); @@ -1467,7 +1504,7 @@ * * Turn object into OBJT_SWAP. Force sync if not a pageout process. */ - if (object->type != OBJT_SWAP) { + if ((object->flags & OBJ_SWAP) == 0) { addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE); KASSERT(addr == SWAPBLK_NONE, ("unexpected object swap block")); @@ -1772,7 +1809,7 @@ int i; VM_OBJECT_ASSERT_LOCKED(object); - if (object->type != OBJT_SWAP) + if ((object->flags & OBJ_SWAP) == 0) return (0); for (res = 0, pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( @@ -1801,7 +1838,7 @@ daddr_t blk; int i, nv, rahead, rv; - KASSERT(object->type == OBJT_SWAP, + KASSERT((object->flags & OBJ_SWAP) != 0, ("%s: Object not swappable", __func__)); for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( @@ -1909,7 +1946,7 @@ full_rescan: mtx_lock(&vm_object_list_mtx); TAILQ_FOREACH(object, &vm_object_list, object_list) { - if (object->type != OBJT_SWAP) + if ((object->flags & OBJ_SWAP) == 0) continue; mtx_unlock(&vm_object_list_mtx); /* Depends on type-stability. */ @@ -1928,7 +1965,7 @@ * dead. */ atomic_thread_fence_acq(); - if (object->type != OBJT_SWAP) + if ((object->flags & OBJ_SWAP) == 0) goto next_obj; swap_pager_swapoff_object(sp, object); @@ -2023,7 +2060,7 @@ /* * Convert default object to swap object if necessary */ - if (object->type != OBJT_SWAP) { + if ((object->flags & OBJ_SWAP) == 0) { pctrie_init(&object->un_pager.swp.swp_blks); /* @@ -2033,6 +2070,7 @@ atomic_thread_fence_rel(); object->type = OBJT_SWAP; + vm_object_set_flag(object, OBJ_SWAP); object->un_pager.swp.writemappings = 0; KASSERT((object->flags & OBJ_ANON) != 0 || object->handle == NULL, @@ -2141,7 +2179,7 @@ int i, limit, start; VM_OBJECT_ASSERT_WLOCKED(srcobject); - if (srcobject->type != OBJT_SWAP || count == 0) + if ((srcobject->flags & OBJ_SWAP) == 0 || count == 0) return; swp_pager_init_freerange(&s_free, &n_free); @@ -2208,7 +2246,7 @@ int i; VM_OBJECT_ASSERT_WLOCKED(object); - if (object->type != OBJT_SWAP) + if ((object->flags & OBJ_SWAP) == 0) return; swp_pager_init_freerange(&s_free, &n_free); @@ -2248,7 +2286,7 @@ * The meta data only exists if the object is OBJT_SWAP * and even then might not be allocated yet. */ - KASSERT(object->type == OBJT_SWAP, + KASSERT((object->flags & OBJ_SWAP) != 0, ("Lookup object not swappable")); sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, @@ -2272,7 +2310,7 @@ int i; VM_OBJECT_ASSERT_LOCKED(object); - if (object->type != OBJT_SWAP) + if ((object->flags & OBJ_SWAP) == 0) return (object->size); sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks, @@ -2727,10 +2765,10 @@ if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) continue; object = cur->object.vm_object; - if (object == NULL || object->type != OBJT_SWAP) + if (object == NULL || (object->flags & OBJ_SWAP) == 0) continue; VM_OBJECT_RLOCK(object); - if (object->type != OBJT_SWAP) + if ((object->flags & OBJ_SWAP) == 0) goto unlock; pi = OFF_TO_IDX(cur->offset); e = pi + OFF_TO_IDX(cur->end - cur->start); @@ -3123,3 +3161,25 @@ object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start; VM_OBJECT_WUNLOCK(object); } + +static void +swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) +{ + struct vnode *vp; + + /* + * Tmpfs VREG node, which was reclaimed, has OBJT_SWAP_TMPFS + * type, but not OBJ_TMPFS flag. In this case there is no + * v_writecount to adjust. + */ + VM_OBJECT_RLOCK(object); + if ((object->flags & OBJ_TMPFS) != 0) { + vp = object->un_pager.swp.swp_tmpfs; + if (vp != NULL && vp_heldp != NULL) { + vhold(vp); + *vpp = vp; + *vp_heldp = true; + } + } + VM_OBJECT_RUNLOCK(object); +} diff --git a/sys/vm/vm.h b/sys/vm/vm.h --- a/sys/vm/vm.h +++ b/sys/vm/vm.h @@ -88,8 +88,17 @@ #define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE) #define VM_PROT_DEFAULT VM_PROT_ALL -enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS, - OBJT_DEAD, OBJT_SG, OBJT_MGTDEVICE }; +enum obj_type { + OBJT_DEFAULT, + OBJT_SWAP, + OBJT_VNODE, + OBJT_DEVICE, + OBJT_PHYS, + OBJT_DEAD, + OBJT_SG, + OBJT_MGTDEVICE, + OBJT_SWAP_TMPFS, +}; typedef u_char objtype_t; union vm_map_object; diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1947,7 +1947,7 @@ crhold(dst_object->cred); *fork_charge += dst_object->charge; } else if ((dst_object->type == OBJT_DEFAULT || - dst_object->type == OBJT_SWAP) && + (dst_object->flags & OBJ_SWAP) != 0) && dst_object->cred == NULL) { KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", dst_entry)); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -561,38 +561,7 @@ * referenced by the entry we are processing, so it cannot go * away. */ - vp = NULL; - vp_held = false; - if (object->type == OBJT_DEAD) { - /* - * For OBJT_DEAD objects, v_writecount was handled in - * vnode_pager_dealloc(). - */ - } else if (object->type == OBJT_VNODE) { - vp = object->handle; - } else if (object->type == OBJT_SWAP) { - KASSERT((object->flags & OBJ_TMPFS_NODE) != 0, - ("vm_map_entry_set_vnode_text: swap and !TMPFS " - "entry %p, object %p, add %d", entry, object, add)); - /* - * Tmpfs VREG node, which was reclaimed, has - * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS. In - * this case there is no v_writecount to adjust. - */ - VM_OBJECT_RLOCK(object); - if ((object->flags & OBJ_TMPFS) != 0) { - vp = object->un_pager.swp.swp_tmpfs; - if (vp != NULL) { - vhold(vp); - vp_held = true; - } - } - VM_OBJECT_RUNLOCK(object); - } else { - KASSERT(0, - ("vm_map_entry_set_vnode_text: wrong object type, " - "entry %p, object %p, add %d", entry, object, add)); - } + vm_pager_getvp(object, &vp, &vp_held); if (vp != NULL) { if (add) { VOP_SET_TEXT_CHECKED(vp); @@ -2857,10 +2826,12 @@ continue; } - if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) + if (obj->type != OBJT_DEFAULT && + (obj->flags & OBJ_SWAP) == 0) continue; VM_OBJECT_WLOCK(obj); - if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) { + if (obj->type != OBJT_DEFAULT && + (obj->flags & OBJ_SWAP) == 0) { VM_OBJECT_WUNLOCK(obj); continue; } @@ -4171,7 +4142,7 @@ size = src_entry->end - src_entry->start; if ((src_object = src_entry->object.vm_object) != NULL) { if (src_object->type == OBJT_DEFAULT || - src_object->type == OBJT_SWAP) { + (src_object->flags == OBJ_SWAP) != 0) { vm_map_copy_swap_object(src_entry, dst_entry, size, fork_charge); /* May have split/collapsed, reload obj. */ diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -930,7 +930,7 @@ VM_OBJECT_WLOCK(object); } if (object->type == OBJT_DEFAULT || - object->type == OBJT_SWAP || + (object->flags & OBJ_SWAP) != 0 || object->type == OBJT_VNODE) { pindex = OFF_TO_IDX(current->offset + (addr - current->start)); @@ -1357,7 +1357,8 @@ goto done; } } else { - KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, + KASSERT(obj->type == OBJT_DEFAULT || + (obj->flags & OBJ_SWAP) != 0, ("wrong object type")); vm_object_reference(obj); #if VM_NRESERVLEVEL > 0 diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -201,7 +201,7 @@ #define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */ #define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */ #define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */ -#define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */ +#define OBJ_SWAP 0x0200 /* object swaps */ #define OBJ_SPLIT 0x0400 /* object is being split */ #define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */ #define OBJ_COLORED 0x1000 /* pg_color is defined */ @@ -330,21 +330,6 @@ return (false); } -static __inline bool -vm_object_mightbedirty(vm_object_t object) -{ - - if (object->type != OBJT_VNODE) { - if ((object->flags & OBJ_TMPFS_NODE) == 0) - return (false); -#ifdef KASSERT - KASSERT(object->type == OBJT_SWAP, - ("TMPFS_NODE obj %p is not swap", object)); -#endif - } - return (object->generation != object->cleangeneration); -} - void vm_object_clear_flag(vm_object_t object, u_short bits); void vm_object_pip_add(vm_object_t object, short i); void vm_object_pip_wakeup(vm_object_t object); @@ -378,6 +363,9 @@ void vm_object_destroy (vm_object_t); void vm_object_terminate (vm_object_t); void vm_object_set_writeable_dirty (vm_object_t); +void vm_object_set_writeable_dirty_(vm_object_t object); +bool vm_object_mightbedirty(vm_object_t object); +bool vm_object_mightbedirty_(vm_object_t object); void vm_object_init (void); int vm_object_kvme_type(vm_object_t object, struct vnode **vpp); void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -240,7 +240,7 @@ LIST_INIT(&object->shadow_head); object->type = type; - if (type == OBJT_SWAP) + if ((flags & OBJ_SWAP) != 0) pctrie_init(&object->un_pager.swp.swp_blks); /* @@ -337,6 +337,7 @@ case OBJT_PHYS: case OBJT_SG: case OBJT_SWAP: + case OBJT_SWAP_TMPFS: case OBJT_VNODE: if (!TAILQ_EMPTY(&object->memq)) return (KERN_FAILURE); @@ -421,9 +422,12 @@ case OBJT_DEAD: panic("vm_object_allocate: can't create OBJT_DEAD"); case OBJT_DEFAULT: - case OBJT_SWAP: flags = OBJ_COLORED; break; + case OBJT_SWAP: + case OBJT_SWAP_TMPFS: + flags = OBJ_COLORED | OBJ_SWAP; + break; case OBJT_DEVICE: case OBJT_SG: flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; @@ -573,7 +577,7 @@ KASSERT(object != NULL && backing_object->shadow_count == 1, ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d", backing_object->ref_count, backing_object->shadow_count)); - KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON, + KASSERT((object->flags & OBJ_ANON) != 0, ("invalid shadow object %p", object)); if (!VM_OBJECT_TRYWLOCK(object)) { @@ -677,7 +681,7 @@ umtx_shm_object_terminated(object); temp = object->backing_object; if (temp != NULL) { - KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, + KASSERT(object->type != OBJT_SWAP_TMPFS, ("shadowed tmpfs v_object 2 %p", object)); vm_object_backing_remove(object); } @@ -958,7 +962,7 @@ #endif KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || - object->type == OBJT_SWAP, + object->type == OBJT_SWAP || object->type == OBJT_SWAP_TMPFS, ("%s: non-swap obj %p has cred", __func__, object)); /* @@ -1277,8 +1281,8 @@ vm_size_t size) { - if (advice == MADV_FREE && object->type == OBJT_SWAP) - swap_pager_freespace(object, pindex, size); + if (advice == MADV_FREE) + vm_pager_freespace(object, pindex, size); } /* @@ -1627,7 +1631,7 @@ else if (m_busy == NULL) m_busy = m; } - if (orig_object->type == OBJT_SWAP) { + if ((orig_object->flags & OBJ_SWAP) != 0) { /* * swap_pager_copy() can sleep, in which case the orig_object's * and new_object's locks are released and reacquired. @@ -1798,9 +1802,7 @@ if (p->pindex < backing_offset_index || new_pindex >= object->size) { - if (backing_object->type == OBJT_SWAP) - swap_pager_freespace(backing_object, p->pindex, - 1); + vm_pager_freespace(backing_object, p->pindex, 1); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); @@ -1849,9 +1851,7 @@ * page alone. Destroy the original page from the * backing object. */ - if (backing_object->type == OBJT_SWAP) - swap_pager_freespace(backing_object, p->pindex, - 1); + vm_pager_freespace(backing_object, p->pindex, 1); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (vm_page_remove(p)) @@ -1875,9 +1875,8 @@ } /* Use the old pindex to free the right page. */ - if (backing_object->type == OBJT_SWAP) - swap_pager_freespace(backing_object, - new_pindex + backing_offset_index, 1); + vm_pager_freespace(backing_object, new_pindex + + backing_offset_index, 1); #if VM_NRESERVLEVEL > 0 /* @@ -1960,7 +1959,7 @@ /* * Move the pager from backing_object to object. */ - if (backing_object->type == OBJT_SWAP) { + if ((backing_object->flags & OBJ_SWAP) != 0) { /* * swap_pager_copy() can sleep, in which case * the backing_object's and object's locks are @@ -2138,11 +2137,8 @@ } vm_object_pip_wakeup(object); - if (object->type == OBJT_SWAP) { - if (end == 0) - end = object->size; - swap_pager_freespace(object, start, end - start); - } + vm_pager_freespace(object, start, (end == 0 ? object->size : end) - + start); } /* @@ -2332,16 +2328,17 @@ } void -vm_object_set_writeable_dirty(vm_object_t object) +vm_object_set_writeable_dirty_(vm_object_t object) { - - /* Only set for vnodes & tmpfs */ - if (object->type != OBJT_VNODE && - (object->flags & OBJ_TMPFS_NODE) == 0) - return; atomic_add_int(&object->generation, 1); } +bool +vm_object_mightbedirty_(vm_object_t object) +{ + return (object->generation != object->cleangeneration); +} + /* * vm_object_unwire: * @@ -2436,16 +2433,7 @@ struct vnode *vp; VM_OBJECT_ASSERT_LOCKED(object); - if (object->type == OBJT_VNODE) { - vp = object->handle; - KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__)); - } else if (object->type == OBJT_SWAP && - (object->flags & OBJ_TMPFS) != 0) { - vp = object->un_pager.swp.swp_tmpfs; - KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__)); - } else { - vp = NULL; - } + vm_pager_getvp(object, &vp, NULL); return (vp); } @@ -2498,9 +2486,9 @@ case OBJT_VNODE: return (KVME_TYPE_VNODE); case OBJT_SWAP: - if ((object->flags & OBJ_TMPFS_NODE) != 0) - return (KVME_TYPE_VNODE); return (KVME_TYPE_SWAP); + case OBJT_SWAP_TMPFS: + return (KVME_TYPE_VNODE); case OBJT_DEVICE: return (KVME_TYPE_DEVICE); case OBJT_PHYS: diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2639,7 +2639,7 @@ } /* Don't care: PG_NODUMP, PG_ZERO. */ if (object->type != OBJT_DEFAULT && - object->type != OBJT_SWAP && + (object->flags & OBJ_SWAP) == 0 && object->type != OBJT_VNODE) { run_ext = 0; #if VM_NRESERVLEVEL > 0 @@ -2777,7 +2777,7 @@ /* Don't care: PG_NODUMP, PG_ZERO. */ if (m->object != object || (object->type != OBJT_DEFAULT && - object->type != OBJT_SWAP && + (object->flags & OBJ_SWAP) == 0 && object->type != OBJT_VNODE)) error = EINVAL; else if (object->memattr != VM_MEMATTR_DEFAULT) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -545,7 +545,7 @@ * clog the laundry and inactive queues. (We will try * paging it out again later.) */ - if (object->type == OBJT_SWAP && + if ((object->flags & OBJ_SWAP) != 0 && pageout_status[i] == VM_PAGER_FAIL) { vm_page_unswappable(mt); numpagedout++; @@ -897,7 +897,7 @@ vm_page_free(m); VM_CNT_INC(v_dfree); } else if ((object->flags & OBJ_DEAD) == 0) { - if (object->type != OBJT_SWAP && + if ((object->flags & OBJ_SWAP) == 0 && object->type != OBJT_DEFAULT) pageout_ok = true; else if (disable_swap_pageouts) @@ -1890,6 +1890,7 @@ switch (obj->type) { case OBJT_DEFAULT: case OBJT_SWAP: + case OBJT_SWAP_TMPFS: case OBJT_PHYS: case OBJT_VNODE: res += obj->resident_page_count; diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h --- a/sys/vm/vm_pager.h +++ b/sys/vm/vm_pager.h @@ -47,6 +47,7 @@ #include TAILQ_HEAD(pagerlst, vm_object); +struct vnode; typedef void pgo_init_t(void); typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t, @@ -62,6 +63,12 @@ vm_pindex_t *, vm_pindex_t *); typedef void pgo_pageunswapped_t(vm_page_t); typedef void pgo_writecount_t(vm_object_t, vm_offset_t, vm_offset_t); +typedef void pgo_set_writeable_dirty_t(vm_object_t); +typedef bool pgo_mightbedirty_t(vm_object_t); +typedef void pgo_getvp_t(vm_object_t object, struct vnode **vpp, + bool *vp_heldp); +typedef void pgo_freespace_t(vm_object_t object, vm_pindex_t start, + vm_size_t size); struct pagerops { pgo_init_t *pgo_init; /* Initialize pager. */ @@ -73,9 +80,12 @@ pgo_haspage_t *pgo_haspage; /* Query page. */ pgo_populate_t *pgo_populate; /* Bulk spec pagein. */ pgo_pageunswapped_t *pgo_pageunswapped; - /* Operations for specialized writecount handling */ pgo_writecount_t *pgo_update_writecount; pgo_writecount_t *pgo_release_writecount; + pgo_set_writeable_dirty_t *pgo_set_writeable_dirty; + pgo_mightbedirty_t *pgo_mightbedirty; + pgo_getvp_t *pgo_getvp; + pgo_freespace_t *pgo_freespace; }; extern struct pagerops defaultpagerops; @@ -85,6 +95,7 @@ extern struct pagerops physpagerops; extern struct pagerops sgpagerops; extern struct pagerops mgtdevicepagerops; +extern struct pagerops swaptmpfspagerops; /* * get/put return values @@ -129,13 +140,9 @@ vm_object_t vm_pager_object_lookup(struct pagerlst *, void *); static __inline void -vm_pager_put_pages( - vm_object_t object, - vm_page_t *m, - int count, - int flags, - int *rtvals -) { +vm_pager_put_pages(vm_object_t object, vm_page_t *m, int count, int flags, + int *rtvals) +{ VM_OBJECT_ASSERT_WLOCKED(object); (*pagertab[object->type]->pgo_putpages) (object, m, count, flags, rtvals); @@ -152,12 +159,9 @@ * The object must be locked. */ static __inline boolean_t -vm_pager_has_page( - vm_object_t object, - vm_pindex_t offset, - int *before, - int *after -) { +vm_pager_has_page(vm_object_t object, vm_pindex_t offset, int *before, + int *after) +{ boolean_t ret; VM_OBJECT_ASSERT_LOCKED(object); @@ -191,29 +195,57 @@ static __inline void vm_pager_page_unswapped(vm_page_t m) { + pgo_pageunswapped_t *method; - if (pagertab[m->object->type]->pgo_pageunswapped) - (*pagertab[m->object->type]->pgo_pageunswapped)(m); + method = pagertab[m->object->type]->pgo_pageunswapped; + if (method != NULL) + method(m); } static __inline void vm_pager_update_writecount(vm_object_t object, vm_offset_t start, vm_offset_t end) { + pgo_writecount_t *method; - if (pagertab[object->type]->pgo_update_writecount) - pagertab[object->type]->pgo_update_writecount(object, start, - end); + method = pagertab[object->type]->pgo_update_writecount; + if (method != NULL) + method(object, start, end); } static __inline void vm_pager_release_writecount(vm_object_t object, vm_offset_t start, vm_offset_t end) { + pgo_writecount_t *method; + + method = pagertab[object->type]->pgo_release_writecount; + if (method != NULL) + method(object, start, end); +} + +static __inline void +vm_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) +{ + pgo_getvp_t *method; + + *vpp = NULL; + if (vp_heldp != NULL) + *vp_heldp = false; + method = pagertab[object->type]->pgo_getvp; + if (method != NULL) + method(object, vpp, vp_heldp); +} + +static __inline void +vm_pager_freespace(vm_object_t object, vm_pindex_t start, + vm_size_t size) +{ + pgo_freespace_t *method; - if (pagertab[object->type]->pgo_release_writecount) - pagertab[object->type]->pgo_release_writecount(object, start, - end); + method = pagertab[object->type]->pgo_freespace; + if (method != NULL) + method(object, start, size); } struct cdev_pager_ops { diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -100,6 +100,7 @@ static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); static void dead_pager_dealloc(vm_object_t); +static void dead_pager_getvp(vm_object_t, struct vnode **, bool *); static int dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind, @@ -144,23 +145,34 @@ } +static void +dead_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) +{ + /* + * For OBJT_DEAD objects, v_writecount was handled in + * vnode_pager_dealloc(). + */ +} + static struct pagerops deadpagerops = { .pgo_alloc = dead_pager_alloc, .pgo_dealloc = dead_pager_dealloc, .pgo_getpages = dead_pager_getpages, .pgo_putpages = dead_pager_putpages, .pgo_haspage = dead_pager_haspage, + .pgo_getvp = dead_pager_getvp, }; struct pagerops *pagertab[] = { - &defaultpagerops, /* OBJT_DEFAULT */ - &swappagerops, /* OBJT_SWAP */ - &vnodepagerops, /* OBJT_VNODE */ - &devicepagerops, /* OBJT_DEVICE */ - &physpagerops, /* OBJT_PHYS */ - &deadpagerops, /* OBJT_DEAD */ - &sgpagerops, /* OBJT_SG */ - &mgtdevicepagerops, /* OBJT_MGTDEVICE */ + [OBJT_DEFAULT] = &defaultpagerops, + [OBJT_SWAP] = &swappagerops, + [OBJT_VNODE] = &vnodepagerops, + [OBJT_DEVICE] = &devicepagerops, + [OBJT_PHYS] = &physpagerops, + [OBJT_DEAD] = &deadpagerops, + [OBJT_SG] = &sgpagerops, + [OBJT_MGTDEVICE] = &mgtdevicepagerops, + [OBJT_SWAP_TMPFS] = &swaptmpfspagerops, }; void @@ -497,3 +509,24 @@ bp->b_bufobj = NULL; bp->b_flags &= ~B_PAGING; } + +void +vm_object_set_writeable_dirty(vm_object_t object) +{ + pgo_set_writeable_dirty_t *method; + + method = pagertab[object->type]->pgo_set_writeable_dirty; + if (method != NULL) + method(object); +} + +bool +vm_object_mightbedirty(vm_object_t object) +{ + pgo_mightbedirty_t *method; + + method = pagertab[object->type]->pgo_mightbedirty; + if (method == NULL) + return (false); + return (method(object)); +} diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -105,6 +105,7 @@ vm_offset_t); static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, vm_offset_t); +static void vnode_pager_getvp(vm_object_t, struct vnode **, bool *); struct pagerops vnodepagerops = { .pgo_alloc = vnode_pager_alloc, @@ -115,6 +116,9 @@ .pgo_haspage = vnode_pager_haspage, .pgo_update_writecount = vnode_pager_update_writecount, .pgo_release_writecount = vnode_pager_release_writecount, + .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, + .pgo_mightbedirty = vm_object_mightbedirty_, + .pgo_getvp = vnode_pager_getvp, }; static struct domainset *vnode_domainset = NULL; @@ -1600,3 +1604,9 @@ if (mp != NULL) vn_finished_write(mp); } + +static void +vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) +{ + *vpp = object->handle; +}