Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -629,6 +629,7 @@ void vm_page_dequeue(vm_page_t m); void vm_page_dequeue_deferred(vm_page_t m); vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); +void vm_page_free_invalid(vm_page_t); vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -1362,6 +1362,31 @@ } /* + * Destroy the identity of an invalid page and free it if possible. + * This is intended to be used when reading a page from backing store fails. + */ +void +vm_page_free_invalid(vm_page_t m) +{ + + KASSERT(vm_page_none_valid(m), ("page %p is valid", m)); + KASSERT(!pmap_page_is_mapped(m), ("page %p is mapped", m)); + vm_page_assert_xbusied(m); + KASSERT(m->object != NULL, ("page %p has no object", m)); + VM_OBJECT_ASSERT_WLOCKED(m->object); + + /* + * If someone has wired this page while the object lock + * was not held, then the thread that unwires is responsible + * for freeing the page. Otherwise just free the page now. + * The wire count of this unmapped page cannot change while + * we have the page xbusy and the page's object wlocked. + */ + if (vm_page_remove(m)) + vm_page_free(m); +} + +/* * vm_page_sleep_if_busy: * * Sleep and release the object lock if the page is busied. Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c +++ sys/vm/vnode_pager.c @@ -1139,6 +1139,21 @@ bp->b_data = unmapped_buf; } + /* + * If the read failed, we must free any read ahead/behind pages here. + * The requested pages are freed by the caller (for sync requests) + * or by the bp->b_pgiodone callback (for async requests). + */ + if (error != 0) { + VM_OBJECT_WLOCK(object); + for (i = 0; i < bp->b_pgbefore; i++) + vm_page_free_invalid(bp->b_pages[i]); + for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) + vm_page_free_invalid(bp->b_pages[i]); + VM_OBJECT_WUNLOCK(object); + return (error); + } + /* Read lock to protect size. */ VM_OBJECT_RLOCK(object); for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); @@ -1150,30 +1165,28 @@ if (mt == bogus_page) continue; - if (error == 0) { - if (nextoff <= object->un_pager.vnp.vnp_size) { - /* - * Read filled up entire page. - */ - vm_page_valid(mt); - KASSERT(mt->dirty == 0, - ("%s: page %p is dirty", __func__, mt)); - KASSERT(!pmap_page_is_mapped(mt), - ("%s: page %p is mapped", __func__, mt)); - } else { - /* - * Read did not fill up entire page. - * - * Currently we do not set the entire page - * valid, we just try to clear the piece that - * we couldn't read. - */ - vm_page_set_valid_range(mt, 0, - object->un_pager.vnp.vnp_size - tfoff); - KASSERT((mt->dirty & vm_page_bits(0, - object->un_pager.vnp.vnp_size - tfoff)) == - 0, ("%s: page %p is dirty", __func__, mt)); - } + if (nextoff <= object->un_pager.vnp.vnp_size) { + /* + * Read filled up entire page. + */ + vm_page_valid(mt); + KASSERT(mt->dirty == 0, + ("%s: page %p is dirty", __func__, mt)); + KASSERT(!pmap_page_is_mapped(mt), + ("%s: page %p is mapped", __func__, mt)); + } else { + /* + * Read did not fill up entire page. + * + * Currently we do not set the entire page valid, + * we just try to clear the piece that we couldn't + * read. + */ + vm_page_set_valid_range(mt, 0, + object->un_pager.vnp.vnp_size - tfoff); + KASSERT((mt->dirty & vm_page_bits(0, + object->un_pager.vnp.vnp_size - tfoff)) == 0, + ("%s: page %p is dirty", __func__, mt)); } if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter)