Index: head/sys/kern/vfs_bio.c =================================================================== --- head/sys/kern/vfs_bio.c +++ head/sys/kern/vfs_bio.c @@ -154,7 +154,6 @@ /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */ struct proc *bufdaemonproc; -static int inmem(struct vnode *vp, daddr_t blkno); static void vm_hold_free_pages(struct buf *bp, int newbsize); static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to); @@ -3585,48 +3584,54 @@ * associated VM object. This is like incore except * it also hunts around in the VM system for the data. */ - -static int +bool inmem(struct vnode * vp, daddr_t blkno) { vm_object_t obj; vm_offset_t toff, tinc, size; - vm_page_t m; + vm_page_t m, n; vm_ooffset_t off; + int valid; ASSERT_VOP_LOCKED(vp, "inmem"); if (incore(&vp->v_bufobj, blkno)) - return 1; + return (true); if (vp->v_mount == NULL) - return 0; + return (false); obj = vp->v_object; if (obj == NULL) - return (0); + return (false); size = PAGE_SIZE; if (size > vp->v_mount->mnt_stat.f_iosize) size = vp->v_mount->mnt_stat.f_iosize; off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; - VM_OBJECT_RLOCK(obj); for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { - m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); - if (!m) - goto notinmem; + m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff)); +recheck: + if (m == NULL) + return (false); + tinc = size; if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); - if (vm_page_is_valid(m, - (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) - goto notinmem; + /* + * Consider page validity only if page mapping didn't change + * during the check. + */ + valid = vm_page_is_valid(m, + (vm_offset_t)((toff + off) & PAGE_MASK), tinc); + n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff)); + if (m != n) { + m = n; + goto recheck; + } + if (!valid) + return (false); } - VM_OBJECT_RUNLOCK(obj); - return 1; - -notinmem: - VM_OBJECT_RUNLOCK(obj); - return (0); + return (true); } /* Index: head/sys/sys/buf.h =================================================================== --- head/sys/sys/buf.h +++ head/sys/sys/buf.h @@ -549,6 +549,7 @@ void vfs_busy_pages_acquire(struct buf *bp); void vfs_busy_pages_release(struct buf *bp); struct buf *incore(struct bufobj *, daddr_t); +bool inmem(struct vnode *, daddr_t); struct buf *gbincore(struct bufobj *, daddr_t); struct buf *gbincore_unlocked(struct bufobj *, daddr_t); struct buf *getblk(struct vnode *, daddr_t, int, int, int, int); Index: head/sys/vm/vm_page.h =================================================================== --- head/sys/vm/vm_page.h +++ head/sys/vm/vm_page.h @@ -700,6 +700,7 @@ void vm_page_invalid(vm_page_t m); void vm_page_launder(vm_page_t m); vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t); +vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); void vm_page_pqbatch_drain(void); void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c +++ head/sys/vm/vm_page.c @@ -1698,6 +1698,21 @@ } /* + * vm_page_lookup_unlocked: + * + * Returns the page associated with the object/offset pair specified; + * if none is found, NULL is returned. The page may be no longer be + * present in the object at the time that this function returns. Only + * useful for opportunistic checks such as inmem(). + */ +vm_page_t +vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex) +{ + + return (vm_radix_lookup_unlocked(&object->rtree, pindex)); +} + +/* * vm_page_relookup: * * Returns a page that must already have been busied by