diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -6009,7 +6009,7 @@ * If the page table page is not leftover from an earlier promotion, * initialize it. */ - if (mpte->valid == 0) + if (vm_page_none_valid(mpte)) pmap_fill_ptp(firstpte, newpte); pmap_demote_pde_check(firstpte, newpte); @@ -6085,7 +6085,7 @@ * If this page table page was unmapped by a promotion, then it * contains valid mappings. Zero it to invalidate those mappings. */ - if (mpte->valid != 0) + if (vm_page_any_valid(mpte)) pagezero((void *)PHYS_TO_DMAP(mptepa)); /* @@ -6151,7 +6151,7 @@ } else { mpte = pmap_remove_pt_page(pmap, sva); if (mpte != NULL) { - KASSERT(mpte->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(mpte), ("pmap_remove_pde: pte page not promoted")); pmap_pt_page_count_adj(pmap, -1); KASSERT(mpte->ref_count == NPTEPG, @@ -7678,7 +7678,7 @@ if (!vm_object_populate(object, pindex, pindex + atop(size))) return; p = vm_page_lookup(object, pindex); - KASSERT(p->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(p), ("pmap_object_init_pt: invalid page %p", p)); pat_mode = p->md.pat_mode; @@ -7698,7 +7698,7 @@ p = TAILQ_NEXT(p, listq); for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; pa += PAGE_SIZE) { - KASSERT(p->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(p), ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || pat_mode != p->md.pat_mode) @@ -8445,7 +8445,7 @@ } mpte = pmap_remove_pt_page(pmap, pv->pv_va); if (mpte != NULL) { - KASSERT(mpte->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(mpte), ("pmap_remove_pages: pte page not promoted")); pmap_pt_page_count_adj(pmap, -1); KASSERT(mpte->ref_count == NPTEPG, diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -350,17 +350,17 @@ return (meta_pages + tmp->tm_pages_used); } -static size_t +static bool tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages) { if (tmpfs_mem_avail() < req_pages) - return (0); + return (false); if (tmp->tm_pages_max != ULONG_MAX && tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp)) - return (0); + return (false); - return (1); + return (true); } static int @@ -468,7 +468,7 @@ if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max) return (ENOSPC); - if (tmpfs_pages_check_avail(tmp, 1) == 0) + if (!tmpfs_pages_check_avail(tmp, 1)) return (ENOSPC); if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { @@ -1737,7 +1737,7 @@ } if (newpages > oldpages && - tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0) + !tmpfs_pages_check_avail(tmp, newpages - oldpages)) return (ENOSPC); VM_OBJECT_WLOCK(uobj); diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -52,17 +53,20 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include #include #include +#include +#include +#include #include #include @@ -1630,6 +1634,10 @@ *retval = 64; break; + case _PC_MIN_HOLE_SIZE: + *retval = PAGE_SIZE; + break; + default: error = vop_stdpathconf(v); } @@ -1820,6 +1828,123 @@ return (ENOENT); } +static off_t +tmpfs_seek_data_locked(vm_object_t obj, off_t noff) +{ + vm_page_t m; + vm_pindex_t p, p_m, p_swp; + + p = OFF_TO_IDX(noff); + m = vm_page_find_least(obj, p); + + /* + * Microoptimize most common case for SEEK_DATA, where there + * is no hole and the page is resident. + */ + if (m != NULL && vm_page_any_valid(m) && m->pindex == p) + return (noff); + + p_swp = swap_pager_find_least(obj, p); + if (p_swp == p) + return (noff); + + p_m = m == NULL ? obj->size : m->pindex; + return (IDX_TO_OFF(MIN(p_m, p_swp))); +} + +static off_t +tmpfs_seek_next(off_t noff) +{ + return (noff + PAGE_SIZE - (noff & PAGE_MASK)); +} + +static off_t +tmpfs_seek_hole_locked(vm_object_t obj, off_t noff) +{ + vm_page_t m; + vm_pindex_t p, p_swp; + + for (;; noff = tmpfs_seek_next(noff)) { + /* + * Walk over the largest sequential run of the valid pages. + */ + for (m = vm_page_lookup(obj, OFF_TO_IDX(noff)); + m != NULL && vm_page_any_valid(m); + m = vm_page_next(m), noff = tmpfs_seek_next(noff)) + ; + + /* + * Found a hole in the object's page queue. Check if + * there is a hole in the swap at the same place. + */ + p = OFF_TO_IDX(noff); + p_swp = swap_pager_find_least(obj, p); + if (p_swp != p) { + noff = IDX_TO_OFF(p); + break; + } + } + return (noff); +} + +static int +tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata) +{ + struct tmpfs_node *tn; + vm_object_t obj; + off_t noff; + int error; + + if (vp->v_type != VREG) + return (ENOTTY); + tn = VP_TO_TMPFS_NODE(vp); + noff = *off; + if (noff < 0 || noff >= tn->tn_size) + return (ENXIO); + obj = tn->tn_reg.tn_aobj; + + VM_OBJECT_RLOCK(obj); + noff = seekdata ? tmpfs_seek_data_locked(obj, noff) : + tmpfs_seek_hole_locked(obj, noff); + VM_OBJECT_RUNLOCK(obj); + + error = 0; + if (noff >= tn->tn_size) { + if (seekdata) + error = ENXIO; + else + noff = tn->tn_size; + } + if (error == 0) + *off = noff; + return (error); +} + +static int +tmpfs_ioctl(struct vop_ioctl_args *ap) +{ + struct vnode *vp = ap->a_vp; + int error = 0; + + switch (ap->a_command) { + case FIOSEEKDATA: + case FIOSEEKHOLE: + error = vn_lock(vp, LK_SHARED); + if (error != 0) { + error = EBADF; + break; + } + error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data, + ap->a_command == FIOSEEKDATA); + VOP_UNLOCK(vp); + break; + default: + error = ENOTTY; + break; + } + return (error); +} + /* * Vnode operations vector used for files stored in a tmpfs file system. */ @@ -1863,6 +1988,7 @@ .vop_unlock = vop_unlock, .vop_islocked = vop_islocked, .vop_add_writecount = vop_stdadd_writecount_nomsync, + .vop_ioctl = tmpfs_ioctl, }; VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2847,7 +2847,7 @@ * If the page table page is not leftover from an earlier promotion, * initialize it. */ - if (mpte->valid == 0) + if (vm_page_none_valid(mpte)) pmap_fill_ptp(firstpte, newpte); KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME), @@ -2922,7 +2922,7 @@ * If this page table page was unmapped by a promotion, then it * contains valid mappings. Zero it to invalidate those mappings. */ - if (mpte->valid != 0) + if (vm_page_any_valid(mpte)) pagezero((void *)&KPTmap[i386_btop(trunc_4mpage(va))]); /* @@ -2986,7 +2986,7 @@ } else { mpte = pmap_remove_pt_page(pmap, sva); if (mpte != NULL) { - KASSERT(mpte->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(mpte), ("pmap_remove_pde: pte page not promoted")); pmap->pm_stats.resident_count--; KASSERT(mpte->ref_count == NPTEPG, @@ -4209,7 +4209,7 @@ if (!vm_object_populate(object, pindex, pindex + atop(size))) return; p = vm_page_lookup(object, pindex); - KASSERT(p->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(p), ("pmap_object_init_pt: invalid page %p", p)); pat_mode = p->md.pat_mode; @@ -4229,7 +4229,7 @@ p = TAILQ_NEXT(p, listq); for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; pa += PAGE_SIZE) { - KASSERT(p->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(p), ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || pat_mode != p->md.pat_mode) @@ -4837,7 +4837,7 @@ } mpte = pmap_remove_pt_page(pmap, pv->pv_va); if (mpte != NULL) { - KASSERT(mpte->valid == VM_PAGE_BITS_ALL, + KASSERT(vm_page_all_valid(mpte), ("pmap_remove_pages: pte page not promoted")); pmap->pm_stats.resident_count--; KASSERT(mpte->ref_count == NPTEPG, diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -2576,7 +2576,7 @@ if (error != 0) goto out; noff = *off; - if (noff >= va.va_size) { + if (noff < 0 || noff >= va.va_size) { error = ENXIO; goto out; } diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -2259,6 +2259,7 @@ int i; VM_OBJECT_ASSERT_LOCKED(object); + MPASS((object->flags & OBJ_SWAP) != 0); if (pctrie_is_empty(&object->un_pager.swp.swp_blks)) return (object->size); diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -993,6 +993,13 @@ return (m->valid == VM_PAGE_BITS_ALL); } +static inline bool +vm_page_any_valid(vm_page_t m) +{ + + return (m->valid != 0); +} + static inline bool vm_page_none_valid(vm_page_t m) { diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -2548,7 +2548,7 @@ KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("page %p has unexpected memattr %d", m, pmap_page_get_memattr(m))); - KASSERT(m->valid == 0, ("free page %p is valid", m)); + KASSERT(vm_page_none_valid(m), ("free page %p is valid", m)); pmap_vm_page_alloc_check(m); } @@ -4226,7 +4226,7 @@ * If we were asked to not cache the page, place it near the head of the * inactive queue so that is reclaimed sooner. */ - if (noreuse || m->valid == 0) { + if (noreuse || vm_page_none_valid(m)) { nqueue = PQ_INACTIVE; nflag = PGA_REQUEUE_HEAD; } else { @@ -4704,7 +4704,8 @@ ma[0] = m; for (i = 1; i < after; i++) { if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) { - if (ma[i]->valid || !vm_page_tryxbusy(ma[i])) + if (vm_page_any_valid(ma[i]) || + !vm_page_tryxbusy(ma[i])) break; } else { ma[i] = vm_page_alloc(object, m->pindex + i, @@ -5392,7 +5393,7 @@ vm_page_bits_t bits; bits = vm_page_bits(base, size); - return (m->valid != 0 && (m->valid & bits) == bits); + return (vm_page_any_valid(m) && (m->valid & bits) == bits); } /*