Index: sys/vm/vm_extern.h =================================================================== --- sys/vm/vm_extern.h +++ sys/vm/vm_extern.h @@ -120,6 +120,7 @@ void vmspace_exitfree(struct proc *); void vmspace_switch_aio(struct vmspace *); void vnode_pager_setsize(struct vnode *, vm_ooffset_t); +void vnode_pager_purge_range(struct vnode *, vm_ooffset_t, vm_ooffset_t); int vslock(void *, size_t); void vsunlock(void *, size_t); struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset); Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c +++ sys/vm/vnode_pager.c @@ -528,6 +528,106 @@ VM_OBJECT_WUNLOCK(object); } +/* + * Lets the VM system know about the purged range for a file. We toss away any + * cached pages in the associated object that are affected by the purge + * operation. Content not aligned to page boundaries will be discarded and the + * dirty blocks in DEV_BSIZE unit within a page will not be flushed. + * + * Write lock of the VM object in vnode will be held. + */ +void +vnode_pager_purge_range( + struct vnode *vp, vm_ooffset_t startoff, vm_ooffset_t endoff) +{ + struct vm_page *m; + struct vm_object *object; + + object = vp->v_object; + if (object == NULL) + return; + + VM_OBJECT_WLOCK(object); + vm_object_page_remove( + object, OFF_TO_IDX(startoff), OFF_TO_IDX(endoff), 0); + if ((startoff & PAGE_MASK) != 0) { + m = vm_page_grab( + vp->v_object, OFF_TO_IDX(startoff), VM_ALLOC_NOCREAT); + if (m != NULL && !vm_page_none_valid(m)) { + int base = (int)startoff & PAGE_MASK; + int size = PAGE_SIZE - base; + + /* + * Clear out partial-page garbage in case + * the page has been mapped. + */ + pmap_zero_page_area(m, base, size); + + /* + * Update the valid bits to reflect the blocks + * that have been zeroed. Some of these valid + * bits may have already been set. + */ + vm_page_set_valid_range(m, base, size); + + /* + * Round up "base" to the next block boundary so that + * the dirty bit for a partially zeroed block is not + * cleared. + */ + base = roundup2(base, DEV_BSIZE); + + /* + * Clear out partial-page dirty bits. + * + * note that we do not clear out the valid + * bits. This would prevent bogus_page + * replacement from working properly. + */ + vm_page_clear_dirty(m, base, PAGE_SIZE - base); + } + vm_page_xunbusy(m); + } + if ((endoff & PAGE_MASK) != 0) { + m = vm_page_grab( + vp->v_object, OFF_TO_IDX(endoff), VM_ALLOC_NOCREAT); + if (m != NULL && !vm_page_none_valid(m)) { + int end = (int)endoff & PAGE_MASK; + + /* + * Clear out partial-page garbage in case + * the page has been mapped. + */ + pmap_zero_page_area(m, 0, end); + + /* + * Update the valid bits to reflect the blocks + * that have been zeroed. Some of these valid + * bits may have already been set. + */ + vm_page_set_valid_range(m, 0, end); + + /* + * Round down "end" to the previous block boundary so + * that the dirty bit for a partially zeroed block is + * not cleared. + */ + end = rounddown2(end, DEV_BSIZE); + + /* + * Clear out partial-page dirty bits. + * + * note that we do not clear out the valid + * bits. This would prevent bogus_page + * replacement from working properly. + */ + vm_page_clear_dirty(m, 0, end); + } + vm_page_xunbusy(m); + } + VM_OBJECT_WUNLOCK(object); +} + /* * calculate the linear (byte) disk address of specified virtual * file address