Index: sys/dev/md/md.c =================================================================== --- sys/dev/md/md.c +++ sys/dev/md/md.c @@ -1062,12 +1062,14 @@ for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); + VM_OBJECT_WUNLOCK(sc->object); if (bp->bio_cmd == BIO_READ) { if (vm_page_all_valid(m)) rv = VM_PAGER_OK; else rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(sc->object); if (rv == VM_PAGER_ERROR) { vm_page_free(m); break; @@ -1098,6 +1100,7 @@ else rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(sc->object); if (rv == VM_PAGER_ERROR) { vm_page_free(m); break; @@ -1122,6 +1125,7 @@ else rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(sc->object); if (rv == VM_PAGER_ERROR) { vm_page_free(m); break; @@ -1139,7 +1143,8 @@ m = NULL; } } - } + } else + VM_OBJECT_WLOCK(sc->object); if (m != NULL) { vm_page_xunbusy(m); Index: sys/fs/tmpfs/tmpfs_subr.c =================================================================== --- sys/fs/tmpfs/tmpfs_subr.c +++ sys/fs/tmpfs/tmpfs_subr.c @@ -1479,8 +1479,10 @@ VM_ALLOC_WAITFAIL); if (m == NULL) goto retry; + VM_OBJECT_WUNLOCK(uobj); rv = vm_pager_get_pages(uobj, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(uobj); if (rv == VM_PAGER_OK) { /* * Since the page was not resident, Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -421,9 +421,11 @@ } refcount_acquire(&sfio->nios); + VM_OBJECT_WUNLOCK(obj); rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, i + count == npages ? &rhpages : NULL, &sendfile_iodone, sfio); + VM_OBJECT_WLOCK(obj); if (__predict_false(rv != VM_PAGER_OK)) { /* * Perform full pages recovery before returning EIO. Index: sys/kern/uipc_shm.c =================================================================== --- sys/kern/uipc_shm.c +++ sys/kern/uipc_shm.c @@ -502,8 +502,10 @@ VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); if (m == NULL) goto retry; + VM_OBJECT_WUNLOCK(object); rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(object); if (rv == VM_PAGER_OK) { /* * Since the page was not resident, Index: sys/vm/device_pager.c =================================================================== --- sys/vm/device_pager.c +++ sys/vm/device_pager.c @@ -289,9 +289,9 @@ /* Since our haspage reports zero after/before, the count is 1. */ KASSERT(count == 1, ("%s: count %d", __func__, count)); - VM_OBJECT_ASSERT_WLOCKED(object); if (object->un_pager.devp.ops->cdev_pg_fault == NULL) return (VM_PAGER_FAIL); + VM_OBJECT_WLOCK(object); error = object->un_pager.devp.ops->cdev_pg_fault(object, IDX_TO_OFF(ma[0]->pindex), PROT_READ, &ma[0]); @@ -312,6 +312,7 @@ if (rahead) *rahead = 0; } + VM_OBJECT_WUNLOCK(object); return (error); } Index: sys/vm/phys_pager.c =================================================================== --- sys/vm/phys_pager.c +++ sys/vm/phys_pager.c @@ -143,7 +143,6 @@ { int i; - VM_OBJECT_ASSERT_WLOCKED(object); for (i = 0; i < count; i++) { if (vm_page_none_valid(m[i])) { if ((m[i]->flags & PG_ZERO) == 0) Index: sys/vm/sg_pager.c =================================================================== --- sys/vm/sg_pager.c +++ sys/vm/sg_pager.c @@ -155,10 +155,9 @@ /* Since our haspage reports zero after/before, the count is 1. */ KASSERT(count == 1, ("%s: count %d", __func__, count)); - VM_OBJECT_ASSERT_WLOCKED(object); + /* Handle is stable while paging is in progress. */ sg = object->handle; memattr = object->memattr; - VM_OBJECT_WUNLOCK(object); offset = m[0]->pindex; /* @@ -196,6 +195,7 @@ VM_OBJECT_WLOCK(object); TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, plinks.q); vm_page_replace(page, object, offset, m[0]); + VM_OBJECT_WUNLOCK(object); m[0] = page; vm_page_valid(page); Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -1197,12 +1197,15 @@ daddr_t blk; int i, maxahead, maxbehind, reqcount; + VM_OBJECT_WLOCK(object); reqcount = count; KASSERT(object->type == OBJT_SWAP, ("%s: object not swappable", __func__)); - if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) + if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) { + VM_OBJECT_WUNLOCK(object); return (VM_PAGER_FAIL); + } KASSERT(reqcount - 1 <= maxahead, ("page count %d extends beyond swap block", reqcount)); @@ -1319,6 +1322,7 @@ * is set in the metadata for each page in the request. */ VM_OBJECT_WLOCK(object); + /* This could be implemented more efficiently with aflags */ while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) { ma[0]->oflags |= VPO_SWAPSLEEP; VM_CNT_INC(v_intrans); @@ -1329,6 +1333,7 @@ bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); } } + VM_OBJECT_WUNLOCK(object); /* * If we had an unrecoverable read error pages will not be valid. @@ -1360,7 +1365,6 @@ int r, error; r = swap_pager_getpages(object, ma, count, rbehind, rahead); - VM_OBJECT_WUNLOCK(object); switch (r) { case VM_PAGER_OK: error = 0; @@ -1375,7 +1379,6 @@ panic("unhandled swap_pager_getpages() error %d", r); } (iodone)(arg, ma, count, error); - VM_OBJECT_WLOCK(object); return (r); } @@ -1756,10 +1759,12 @@ if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL) continue; if (j < i) { + VM_OBJECT_WUNLOCK(object); /* Page-in nonresident pages. Mark for laundering. */ if (swap_pager_getpages(object, &ma[j], i - j, NULL, NULL) != VM_PAGER_OK) panic("%s: read from swap failed", __func__); + VM_OBJECT_WLOCK(object); do { swp_pager_force_launder(ma[j]); } while (++j < i); Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1078,8 +1078,10 @@ } ahead = ulmin(ahead, atop(e_end - vaddr) - 1); } + VM_OBJECT_WUNLOCK(fs.object); rv = vm_pager_get_pages(fs.object, &fs.m, 1, &behind, &ahead); + VM_OBJECT_WLOCK(fs.object); if (rv == VM_PAGER_OK) { faultcount = behind + 1 + ahead; hardfault = true; Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -4440,7 +4440,9 @@ } } after = i; + VM_OBJECT_WUNLOCK(object); rv = vm_pager_get_pages(object, ma, after, NULL, NULL); + VM_OBJECT_WLOCK(object); /* Pager may have replaced a page. */ m = ma[0]; if (rv != VM_PAGER_OK) { Index: sys/vm/vm_pager.c =================================================================== --- sys/vm/vm_pager.c +++ sys/vm/vm_pager.c @@ -263,7 +263,7 @@ * bogus page, but the first and last pages must be a real ones. */ - VM_OBJECT_ASSERT_WLOCKED(object); + VM_OBJECT_ASSERT_UNLOCKED(object); KASSERT(count > 0, ("%s: 0 count", __func__)); for (int i = 0 ; i < count; i++) { if (m[i] == bogus_page) { @@ -311,9 +311,13 @@ * If pager has replaced a page, assert that it had * updated the array. */ +#ifdef INVARIANTS + VM_OBJECT_RLOCK(object); KASSERT(m[i] == vm_page_lookup(object, pindex++), ("%s: mismatch page %p pindex %ju", __func__, m[i], (uintmax_t )pindex - 1)); + VM_OBJECT_RUNLOCK(object); +#endif /* * Zero out partially filled data. */ Index: sys/vm/vm_swapout.c =================================================================== --- sys/vm/vm_swapout.c +++ sys/vm/vm_swapout.c @@ -560,6 +560,7 @@ VM_OBJECT_WLOCK(ksobj); (void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma, pages); + VM_OBJECT_WUNLOCK(ksobj); for (i = 0; i < pages;) { vm_page_assert_xbusied(ma[i]); if (vm_page_all_valid(ma[i])) { @@ -571,7 +572,9 @@ for (j = i + 1; j < pages; j++) if (vm_page_all_valid(ma[j])) break; + VM_OBJECT_WLOCK(ksobj); rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a); + VM_OBJECT_WUNLOCK(ksobj); KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i])); count = min(a + 1, j - i); rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL); @@ -582,7 +585,6 @@ vm_page_xunbusy(ma[j]); i += count; } - VM_OBJECT_WUNLOCK(ksobj); pmap_qenter(td->td_kstack, ma, pages); cpu_thread_swapin(td); } Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c +++ sys/vm/vnode_pager.c @@ -735,12 +735,11 @@ struct vnode *vp; int rtval; + /* Handle is stable with paging in progress. */ vp = object->handle; - VM_OBJECT_WUNLOCK(object); rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); KASSERT(rtval != EOPNOTSUPP, ("vnode_pager: FS getpages not implemented\n")); - VM_OBJECT_WLOCK(object); return rtval; } @@ -752,11 +751,9 @@ int rtval; vp = object->handle; - VM_OBJECT_WUNLOCK(object); rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); KASSERT(rtval != EOPNOTSUPP, ("vnode_pager: FS getpages_async not implemented\n")); - VM_OBJECT_WLOCK(object); return (rtval); }