Index: head/sys/dev/md/md.c =================================================================== --- head/sys/dev/md/md.c +++ head/sys/dev/md/md.c @@ -1057,11 +1057,12 @@ lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; rv = VM_PAGER_OK; - VM_OBJECT_WLOCK(sc->object); vm_object_pip_add(sc->object, 1); for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; + VM_OBJECT_WLOCK(sc->object); m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM); + VM_OBJECT_WUNLOCK(sc->object); if (bp->bio_cmd == BIO_READ) { if (vm_page_all_valid(m)) rv = VM_PAGER_OK; @@ -1069,7 +1070,9 @@ rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); if (rv == VM_PAGER_ERROR) { + VM_OBJECT_WLOCK(sc->object); vm_page_free(m); + VM_OBJECT_WUNLOCK(sc->object); break; } else if (rv == VM_PAGER_FAIL) { /* @@ -1099,7 +1102,9 @@ rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); if (rv == VM_PAGER_ERROR) { + VM_OBJECT_WLOCK(sc->object); vm_page_free(m); + VM_OBJECT_WUNLOCK(sc->object); break; } else if (rv == VM_PAGER_FAIL) pmap_zero_page(m); @@ -1122,8 +1127,10 @@ else rv = vm_pager_get_pages(sc->object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(sc->object); if (rv == VM_PAGER_ERROR) { vm_page_free(m); + VM_OBJECT_WUNLOCK(sc->object); break; } else if (rv == VM_PAGER_FAIL) { vm_page_free(m); @@ -1139,6 +1146,7 @@ m = NULL; } } + VM_OBJECT_WUNLOCK(sc->object); } if (m != NULL) { vm_page_xunbusy(m); @@ -1160,7 +1168,6 @@ ma_offs += len; } vm_object_pip_wakeup(sc->object); - VM_OBJECT_WUNLOCK(sc->object); return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); } Index: head/sys/fs/tmpfs/tmpfs_subr.c =================================================================== --- head/sys/fs/tmpfs/tmpfs_subr.c +++ head/sys/fs/tmpfs/tmpfs_subr.c @@ -1480,8 +1480,12 @@ VM_ALLOC_WAITFAIL); if (m == NULL) goto retry; + vm_object_pip_add(uobj, 1); + VM_OBJECT_WUNLOCK(uobj); rv = vm_pager_get_pages(uobj, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(uobj); + vm_object_pip_wakeup(uobj); if (rv == VM_PAGER_OK) { /* * Since the page was not resident, Index: head/sys/kern/kern_sendfile.c =================================================================== --- head/sys/kern/kern_sendfile.c +++ head/sys/kern/kern_sendfile.c @@ -89,6 +89,7 @@ int npages; struct socket *so; struct mbuf *m; + vm_object_t obj; #ifdef KERN_TLS struct ktls_session *tls; #endif @@ -269,6 +270,8 @@ if (!refcount_release(&sfio->nios)) return; + vm_object_pip_wakeup(sfio->obj); + if (__predict_false(sfio->error && sfio->m == NULL)) { /* * I/O operation failed, but pru_send hadn't been executed - @@ -421,9 +424,11 @@ } refcount_acquire(&sfio->nios); + VM_OBJECT_WUNLOCK(obj); rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, i + count == npages ? &rhpages : NULL, &sendfile_iodone, sfio); + VM_OBJECT_WLOCK(obj); if (__predict_false(rv != VM_PAGER_OK)) { /* * Perform full pages recovery before returning EIO. @@ -815,7 +820,9 @@ npages * sizeof(vm_page_t), M_TEMP, M_WAITOK); refcount_init(&sfio->nios, 1); sfio->so = so; + sfio->obj = obj; sfio->error = 0; + vm_object_pip_add(obj, 1); #ifdef KERN_TLS /* @@ -1053,6 +1060,7 @@ * we can send data right now without the * PRUS_NOTREADY flag. */ + vm_object_pip_wakeup(sfio->obj); free(sfio, M_TEMP); #ifdef KERN_TLS if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) { Index: head/sys/kern/uipc_shm.c =================================================================== --- head/sys/kern/uipc_shm.c +++ head/sys/kern/uipc_shm.c @@ -504,8 +504,12 @@ VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); if (m == NULL) goto retry; + vm_object_pip_add(object, 1); + VM_OBJECT_WUNLOCK(object); rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(object); + vm_object_pip_wakeup(object); if (rv == VM_PAGER_OK) { /* * Since the page was not resident, Index: head/sys/vm/device_pager.c =================================================================== --- head/sys/vm/device_pager.c +++ head/sys/vm/device_pager.c @@ -289,9 +289,9 @@ /* Since our haspage reports zero after/before, the count is 1. */ KASSERT(count == 1, ("%s: count %d", __func__, count)); - VM_OBJECT_ASSERT_WLOCKED(object); if (object->un_pager.devp.ops->cdev_pg_fault == NULL) return (VM_PAGER_FAIL); + VM_OBJECT_WLOCK(object); error = object->un_pager.devp.ops->cdev_pg_fault(object, IDX_TO_OFF(ma[0]->pindex), PROT_READ, &ma[0]); @@ -312,6 +312,7 @@ if (rahead) *rahead = 0; } + VM_OBJECT_WUNLOCK(object); return (error); } Index: head/sys/vm/phys_pager.c =================================================================== --- head/sys/vm/phys_pager.c +++ head/sys/vm/phys_pager.c @@ -143,7 +143,6 @@ { int i; - VM_OBJECT_ASSERT_WLOCKED(object); for (i = 0; i < count; i++) { if (vm_page_none_valid(m[i])) { if ((m[i]->flags & PG_ZERO) == 0) Index: head/sys/vm/sg_pager.c =================================================================== --- head/sys/vm/sg_pager.c +++ head/sys/vm/sg_pager.c @@ -155,10 +155,9 @@ /* Since our haspage reports zero after/before, the count is 1. */ KASSERT(count == 1, ("%s: count %d", __func__, count)); - VM_OBJECT_ASSERT_WLOCKED(object); + /* Handle is stable while paging is in progress. */ sg = object->handle; memattr = object->memattr; - VM_OBJECT_WUNLOCK(object); offset = m[0]->pindex; /* @@ -196,6 +195,7 @@ VM_OBJECT_WLOCK(object); TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, plinks.q); vm_page_replace(page, object, offset, m[0]); + VM_OBJECT_WUNLOCK(object); m[0] = page; vm_page_valid(page); Index: head/sys/vm/swap_pager.c =================================================================== --- head/sys/vm/swap_pager.c +++ head/sys/vm/swap_pager.c @@ -1197,12 +1197,15 @@ daddr_t blk; int i, maxahead, maxbehind, reqcount; + VM_OBJECT_WLOCK(object); reqcount = count; KASSERT(object->type == OBJT_SWAP, ("%s: object not swappable", __func__)); - if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) + if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) { + VM_OBJECT_WUNLOCK(object); return (VM_PAGER_FAIL); + } KASSERT(reqcount - 1 <= maxahead, ("page count %d extends beyond swap block", reqcount)); @@ -1319,6 +1322,7 @@ * is set in the metadata for each page in the request. */ VM_OBJECT_WLOCK(object); + /* This could be implemented more efficiently with aflags */ while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) { ma[0]->oflags |= VPO_SWAPSLEEP; VM_CNT_INC(v_intrans); @@ -1329,6 +1333,7 @@ bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount); } } + VM_OBJECT_WUNLOCK(object); /* * If we had an unrecoverable read error pages will not be valid. @@ -1360,7 +1365,6 @@ int r, error; r = swap_pager_getpages(object, ma, count, rbehind, rahead); - VM_OBJECT_WUNLOCK(object); switch (r) { case VM_PAGER_OK: error = 0; @@ -1375,7 +1379,6 @@ panic("unhandled swap_pager_getpages() error %d", r); } (iodone)(arg, ma, count, error); - VM_OBJECT_WLOCK(object); return (r); } @@ -1756,10 +1759,12 @@ if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL) continue; if (j < i) { + VM_OBJECT_WUNLOCK(object); /* Page-in nonresident pages. Mark for laundering. */ if (swap_pager_getpages(object, &ma[j], i - j, NULL, NULL) != VM_PAGER_OK) panic("%s: read from swap failed", __func__); + VM_OBJECT_WLOCK(object); do { swp_pager_force_launder(ma[j]); } while (++j < i); Index: head/sys/vm/vm_fault.c =================================================================== --- head/sys/vm/vm_fault.c +++ head/sys/vm/vm_fault.c @@ -1080,8 +1080,10 @@ } ahead = ulmin(ahead, atop(e_end - vaddr) - 1); } + VM_OBJECT_WUNLOCK(fs.object); rv = vm_pager_get_pages(fs.object, &fs.m, 1, &behind, &ahead); + VM_OBJECT_WLOCK(fs.object); if (rv == VM_PAGER_OK) { faultcount = behind + 1 + ahead; hardfault = true; Index: head/sys/vm/vm_object.h =================================================================== --- head/sys/vm/vm_object.h +++ head/sys/vm/vm_object.h @@ -264,6 +264,13 @@ #define VM_OBJECT_PICKUP(object, state) \ lock_class_rw.lc_lock(&(object)->lock.lock_object, (state)) +#define VM_OBJECT_ASSERT_PAGING(object) \ + KASSERT((object)->paging_in_progress != 0, \ + ("vm_object %p is not paging", object)) +#define VM_OBJECT_ASSERT_REFERENCE(object) \ + KASSERT((object)->reference_count != 0, \ + ("vm_object %p is not referenced", object)) + struct vnode; /* Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c +++ head/sys/vm/vm_page.c @@ -4398,7 +4398,11 @@ } } after = i; + vm_object_pip_add(object, after); + VM_OBJECT_WUNLOCK(object); rv = vm_pager_get_pages(object, ma, after, NULL, NULL); + VM_OBJECT_WLOCK(object); + vm_object_pip_wakeupn(object, after); /* Pager may have replaced a page. */ m = ma[0]; if (rv != VM_PAGER_OK) { Index: head/sys/vm/vm_pager.c =================================================================== --- head/sys/vm/vm_pager.c +++ head/sys/vm/vm_pager.c @@ -263,7 +263,8 @@ * bogus page, but the first and last pages must be a real ones. */ - VM_OBJECT_ASSERT_WLOCKED(object); + VM_OBJECT_ASSERT_UNLOCKED(object); + VM_OBJECT_ASSERT_PAGING(object); KASSERT(count > 0, ("%s: 0 count", __func__)); for (int i = 0 ; i < count; i++) { if (m[i] == bogus_page) { @@ -311,9 +312,13 @@ * If pager has replaced a page, assert that it had * updated the array. */ +#ifdef INVARIANTS + VM_OBJECT_RLOCK(object); KASSERT(m[i] == vm_page_lookup(object, pindex++), ("%s: mismatch page %p pindex %ju", __func__, m[i], (uintmax_t )pindex - 1)); + VM_OBJECT_RUNLOCK(object); +#endif /* * Zero out partially filled data. */ Index: head/sys/vm/vm_swapout.c =================================================================== --- head/sys/vm/vm_swapout.c +++ head/sys/vm/vm_swapout.c @@ -560,6 +560,7 @@ VM_OBJECT_WLOCK(ksobj); (void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma, pages); + VM_OBJECT_WUNLOCK(ksobj); for (i = 0; i < pages;) { vm_page_assert_xbusied(ma[i]); if (vm_page_all_valid(ma[i])) { @@ -571,7 +572,9 @@ for (j = i + 1; j < pages; j++) if (vm_page_all_valid(ma[j])) break; + VM_OBJECT_WLOCK(ksobj); rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a); + VM_OBJECT_WUNLOCK(ksobj); KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i])); count = min(a + 1, j - i); rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL); @@ -582,7 +585,6 @@ vm_page_xunbusy(ma[j]); i += count; } - VM_OBJECT_WUNLOCK(ksobj); pmap_qenter(td->td_kstack, ma, pages); cpu_thread_swapin(td); } Index: head/sys/vm/vnode_pager.c =================================================================== --- head/sys/vm/vnode_pager.c +++ head/sys/vm/vnode_pager.c @@ -735,12 +735,11 @@ struct vnode *vp; int rtval; + /* Handle is stable with paging in progress. */ vp = object->handle; - VM_OBJECT_WUNLOCK(object); rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); KASSERT(rtval != EOPNOTSUPP, ("vnode_pager: FS getpages not implemented\n")); - VM_OBJECT_WLOCK(object); return rtval; } @@ -752,11 +751,9 @@ int rtval; vp = object->handle; - VM_OBJECT_WUNLOCK(object); rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); KASSERT(rtval != EOPNOTSUPP, ("vnode_pager: FS getpages_async not implemented\n")); - VM_OBJECT_WLOCK(object); return (rtval); }