Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -644,6 +644,7 @@ bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); void vm_page_putfake(vm_page_t m); void vm_page_readahead_finish(vm_page_t m); +void vm_page_readahead_free(vm_page_t m); bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1362,6 +1362,29 @@ } /* + * Destroy the identity of read ahead/behind pages after read failures. + */ +void +vm_page_readahead_free(vm_page_t m) +{ + + KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); + KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); + vm_page_assert_xbusied(m); + VM_OBJECT_ASSERT_WLOCKED(m->object); + + /* + * If someone has wired this page while the object lock + * was not held, then the thread that unwires is responsible + * for freeing the page. Otherwise just free the page now. + * The wire count of this unmapped page cannot change while + * we have the page xbusy and the page's object wlocked. + */ + if (vm_page_remove(m)) + vm_page_free(m); +} + +/* * vm_page_sleep_if_busy: * * Sleep and release the object lock if the page is busied. @@ -4592,12 +4615,8 @@ /* Pager may have replaced a page. */ m = ma[0]; if (rv != VM_PAGER_OK) { - for (i = 0; i < after; i++) { - if (!vm_page_wired(ma[i])) - vm_page_free(ma[i]); - else - vm_page_xunbusy(ma[i]); - } + for (i = 0; i < after; i++) + vm_page_readahead_free(ma[i]); *mp = NULL; return (rv); } Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c +++ sys/vm/vnode_pager.c @@ -1139,6 +1139,21 @@ bp->b_data = unmapped_buf; } + /* + * If the read failed, we must free any read ahead/behind pages here. + * The requested pages are freed by the caller (for sync requests) + * or by the bp->b_pgiodone callback (for async requests). + */ + if (error != 0) { + VM_OBJECT_WLOCK(object); + for (i = 0; i < bp->b_pgbefore; i++) + vm_page_readahead_free(bp->b_pages[i]); + for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) + vm_page_readahead_free(bp->b_pages[i]); + VM_OBJECT_WUNLOCK(object); + return (error); + } + /* Read lock to protect size. */ VM_OBJECT_RLOCK(object); for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); @@ -1150,30 +1165,28 @@ if (mt == bogus_page) continue; - if (error == 0) { - if (nextoff <= object->un_pager.vnp.vnp_size) { - /* - * Read filled up entire page. - */ - vm_page_valid(mt); - KASSERT(mt->dirty == 0, - ("%s: page %p is dirty", __func__, mt)); - KASSERT(!pmap_page_is_mapped(mt), - ("%s: page %p is mapped", __func__, mt)); - } else { - /* - * Read did not fill up entire page. - * - * Currently we do not set the entire page - * valid, we just try to clear the piece that - * we couldn't read. - */ - vm_page_set_valid_range(mt, 0, - object->un_pager.vnp.vnp_size - tfoff); - KASSERT((mt->dirty & vm_page_bits(0, - object->un_pager.vnp.vnp_size - tfoff)) == - 0, ("%s: page %p is dirty", __func__, mt)); - } + if (nextoff <= object->un_pager.vnp.vnp_size) { + /* + * Read filled up entire page. + */ + vm_page_valid(mt); + KASSERT(mt->dirty == 0, + ("%s: page %p is dirty", __func__, mt)); + KASSERT(!pmap_page_is_mapped(mt), + ("%s: page %p is mapped", __func__, mt)); + } else { + /* + * Read did not fill up entire page. + * + * Currently we do not set the entire page valid, + * we just try to clear the piece that we couldn't + * read. + */ + vm_page_set_valid_range(mt, 0, + object->un_pager.vnp.vnp_size - tfoff); + KASSERT((mt->dirty & vm_page_bits(0, + object->un_pager.vnp.vnp_size - tfoff)) == 0, + ("%s: page %p is dirty", __func__, mt)); } if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter)