Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c +++ sys/vm/vnode_pager.c @@ -1144,39 +1144,44 @@ for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); i < bp->b_npages; i++, tfoff = nextoff) { vm_page_t mt; + bool rab = i < bp->b_pgbefore || + i >= bp->b_npages - bp->b_pgafter; nextoff = tfoff + PAGE_SIZE; mt = bp->b_pages[i]; + if (error != 0) { + if (rab) + vm_page_xunbusy_unchecked(mt); + continue; + } if (mt == bogus_page) continue; - if (error == 0) { - if (nextoff <= object->un_pager.vnp.vnp_size) { - /* - * Read filled up entire page. - */ - vm_page_valid(mt); - KASSERT(mt->dirty == 0, - ("%s: page %p is dirty", __func__, mt)); - KASSERT(!pmap_page_is_mapped(mt), - ("%s: page %p is mapped", __func__, mt)); - } else { - /* - * Read did not fill up entire page. - * - * Currently we do not set the entire page - * valid, we just try to clear the piece that - * we couldn't read. - */ - vm_page_set_valid_range(mt, 0, - object->un_pager.vnp.vnp_size - tfoff); - KASSERT((mt->dirty & vm_page_bits(0, - object->un_pager.vnp.vnp_size - tfoff)) == - 0, ("%s: page %p is dirty", __func__, mt)); - } + if (nextoff <= object->un_pager.vnp.vnp_size) { + /* + * Read filled up entire page. + */ + vm_page_valid(mt); + KASSERT(mt->dirty == 0, + ("%s: page %p is dirty", __func__, mt)); + KASSERT(!pmap_page_is_mapped(mt), + ("%s: page %p is mapped", __func__, mt)); + } else { + /* + * Read did not fill up entire page. + * + * Currently we do not set the entire page valid, + * we just try to clear the piece that we couldn't + * read. + */ + vm_page_set_valid_range(mt, 0, + object->un_pager.vnp.vnp_size - tfoff); + KASSERT((mt->dirty & vm_page_bits(0, + object->un_pager.vnp.vnp_size - tfoff)) == 0, + ("%s: page %p is dirty", __func__, mt)); } - if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) + if (rab) vm_page_readahead_finish(mt); } VM_OBJECT_RUNLOCK(object);