Index: sys/kern/uipc_shm.c =================================================================== --- sys/kern/uipc_shm.c +++ sys/kern/uipc_shm.c @@ -176,23 +176,25 @@ offset = uio->uio_offset & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); - VM_OBJECT_WLOCK(obj); + rv = vm_page_grab_valid_unlocked(&m, obj, idx, + VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT); + if (rv == VM_PAGER_OK) + goto found; /* * Read I/O without either a corresponding resident page or swap * page: use zero_region. This is intended to avoid instantiating * pages on read from a sparse region. */ - if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL && + VM_OBJECT_WLOCK(obj); + m = vm_page_lookup(obj, idx); + if (uio->uio_rw == UIO_READ && m == NULL && !vm_pager_has_page(obj, idx, NULL, NULL)) { VM_OBJECT_WUNLOCK(obj); return (uiomove(__DECONST(void *, zero_region), tlen, uio)); } /* - * Parallel reads of the page content from disk are prevented - * by exclusive busy. - * * Although the tmpfs vnode lock is held here, it is * nonetheless safe to sleep waiting for a free page. The * pageout daemon does not need to acquire the tmpfs vnode @@ -208,11 +210,13 @@ return (EIO); } VM_OBJECT_WUNLOCK(obj); + +found: error = uiomove_fromphys(&m, offset, tlen, uio); if (uio->uio_rw == UIO_WRITE && error == 0) vm_page_set_dirty(m); - vm_page_activate(m); vm_page_sunbusy(m); + vm_page_activate(m); return (error); } Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -4657,6 +4657,10 @@ } vm_page_busy_release(m); } + if ((allocflags & VM_ALLOC_NOCREAT) != 0) { + *mp = NULL; + return (VM_PAGER_FAIL); + } VM_OBJECT_WLOCK(object); error = vm_page_grab_valid(mp, object, pindex, allocflags); VM_OBJECT_WUNLOCK(object);