Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vnode_pager.c
Show First 20 Lines • Show All 1,133 Lines • ▼ Show 20 Lines | if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { | ||||
bzero(bp->b_data + bp->b_bcount, | bzero(bp->b_data + bp->b_bcount, | ||||
PAGE_SIZE * bp->b_npages - bp->b_bcount); | PAGE_SIZE * bp->b_npages - bp->b_bcount); | ||||
} | } | ||||
if (buf_mapped(bp)) { | if (buf_mapped(bp)) { | ||||
pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); | pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); | ||||
bp->b_data = unmapped_buf; | bp->b_data = unmapped_buf; | ||||
} | } | ||||
/* | |||||
* If the read failed, we must free any read ahead/behind pages here. | |||||
* The requested pages are freed by the caller (for sync requests) | |||||
* or by the bp->b_pgiodone callback (for async requests). | |||||
*/ | |||||
if (error != 0) { | |||||
VM_OBJECT_WLOCK(object); | |||||
for (i = 0; i < bp->b_pgbefore; i++) | |||||
vm_page_free(bp->b_pages[i]); | |||||
markj: Per my comments below, we should check for wirings before freeing. Again, the wire count will… | |||||
for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) | |||||
vm_page_free(bp->b_pages[i]); | |||||
VM_OBJECT_WUNLOCK(object); | |||||
return (error); | |||||
} | |||||
/* Read lock to protect size. */ | /* Read lock to protect size. */ | ||||
VM_OBJECT_RLOCK(object); | VM_OBJECT_RLOCK(object); | ||||
for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); | for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); | ||||
i < bp->b_npages; i++, tfoff = nextoff) { | i < bp->b_npages; i++, tfoff = nextoff) { | ||||
vm_page_t mt; | vm_page_t mt; | ||||
nextoff = tfoff + PAGE_SIZE; | nextoff = tfoff + PAGE_SIZE; | ||||
mt = bp->b_pages[i]; | mt = bp->b_pages[i]; | ||||
if (mt == bogus_page) | if (mt == bogus_page) | ||||
continue; | continue; | ||||
if (error == 0) { | |||||
if (nextoff <= object->un_pager.vnp.vnp_size) { | if (nextoff <= object->un_pager.vnp.vnp_size) { | ||||
/* | /* | ||||
* Read filled up entire page. | * Read filled up entire page. | ||||
*/ | */ | ||||
vm_page_valid(mt); | vm_page_valid(mt); | ||||
KASSERT(mt->dirty == 0, | KASSERT(mt->dirty == 0, | ||||
("%s: page %p is dirty", __func__, mt)); | ("%s: page %p is dirty", __func__, mt)); | ||||
KASSERT(!pmap_page_is_mapped(mt), | KASSERT(!pmap_page_is_mapped(mt), | ||||
("%s: page %p is mapped", __func__, mt)); | ("%s: page %p is mapped", __func__, mt)); | ||||
} else { | } else { | ||||
/* | /* | ||||
* Read did not fill up entire page. | * Read did not fill up entire page. | ||||
* | * | ||||
* Currently we do not set the entire page | * Currently we do not set the entire page valid, | ||||
* valid, we just try to clear the piece that | * we just try to clear the piece that we couldn't | ||||
* we couldn't read. | * read. | ||||
*/ | */ | ||||
vm_page_set_valid_range(mt, 0, | vm_page_set_valid_range(mt, 0, | ||||
object->un_pager.vnp.vnp_size - tfoff); | object->un_pager.vnp.vnp_size - tfoff); | ||||
KASSERT((mt->dirty & vm_page_bits(0, | KASSERT((mt->dirty & vm_page_bits(0, | ||||
object->un_pager.vnp.vnp_size - tfoff)) == | object->un_pager.vnp.vnp_size - tfoff)) == 0, | ||||
0, ("%s: page %p is dirty", __func__, mt)); | ("%s: page %p is dirty", __func__, mt)); | ||||
} | |||||
} | } | ||||
if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) | if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) | ||||
vm_page_readahead_finish(mt); | vm_page_readahead_finish(mt); | ||||
} | } | ||||
VM_OBJECT_RUNLOCK(object); | VM_OBJECT_RUNLOCK(object); | ||||
return (error); | return (error); | ||||
▲ Show 20 Lines • Show All 406 Lines • Show Last 20 Lines |
Per my comments below, we should check for wirings before freeing. Again, the wire count will not increase while:
Since this is a read-ahead page, it was allocated by the getpages method and must be invalid, which implies that it is unmapped. We could reasonably assert that these pages are invalid as well.