Changeset View
Standalone View
sys/vm/vnode_pager.c
Show First 20 Lines • Show All 421 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
/* | /* | ||||
* Lets the VM system know about a change in size for a file. | * Lets the VM system know about a change in size for a file. | ||||
* We adjust our own internal size and flush any cached pages in | * We adjust our own internal size and flush any cached pages in | ||||
* the associated object that are affected by the size change. | * the associated object that are affected by the size change. | ||||
* | * | ||||
* Note: this routine may be invoked as a result of a pager put | * Note: this routine may be invoked as a result of a pager put | ||||
* operation (possibly at object termination time), so we must be careful. | * operation (possibly at object termination time), so we must be careful. | ||||
kib: Assert that end > base, and that end <= PAGE_SIZE? | |||||
*/ | */ | ||||
void | void | ||||
vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) | vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_pindex_t nobjsize; | vm_pindex_t nobjsize; | ||||
▲ Show 20 Lines • Show All 85 Lines • ▼ Show 20 Lines | |||||
#else | #else | ||||
atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); | atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); | ||||
#endif | #endif | ||||
object->size = nobjsize; | object->size = nobjsize; | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
} | } | ||||
/* | /* | ||||
* Lets the VM system know about the purged range for a file. We toss away any | |||||
* cached pages in the associated object that are affected by the purge | |||||
* operation. Content not aligned to page boundaries will be discarded and the | |||||
* dirty blocks in DEV_BSIZE unit within a page will not be flushed. | |||||
* | |||||
* Write lock of the VM object in vnode will be held. | |||||
Done Inline Actions'VM object in vnode' is usuall written as v_object. OTOH, I expect that caller is required to own at least the vnode lock. This should be expressed by an assert. kib: 'VM object in vnode' is usuall written as v_object.
The lock is not held, it is taken… | |||||
*/ | |||||
void | |||||
vnode_pager_purge_range( | |||||
struct vnode *vp, vm_ooffset_t startoff, vm_ooffset_t endoff) | |||||
{ | |||||
struct vm_page *m; | |||||
struct vm_object *object; | |||||
object = vp->v_object; | |||||
Not Done Inline ActionsI wonder if you should require exclusive lock on the vnode. kib: I wonder if you should require exclusive lock on the vnode. | |||||
Done Inline ActionsIt is for file systems allowing MNTK_SHARED_WRITES, such as zfs. khng: It is for file systems allowing MNTK_SHARED_WRITES, such as zfs. | |||||
if (object == NULL) | |||||
return; | |||||
VM_OBJECT_WLOCK(object); | |||||
Done Inline ActionsCould this line be unwrapped? kib: Could this line be unwrapped? | |||||
Done Inline ActionsLonger than 80/81 cols for this one to be unwrapped. khng: Longer than 80/81 cols for this one to be unwrapped. | |||||
vm_object_page_remove( | |||||
object, OFF_TO_IDX(startoff), OFF_TO_IDX(endoff), 0); | |||||
Done Inline Actions() is excessive. kib: () is excessive. | |||||
if ((startoff & PAGE_MASK) != 0) { | |||||
m = vm_page_grab( | |||||
vp->v_object, OFF_TO_IDX(startoff), VM_ALLOC_NOCREAT); | |||||
Done Inline ActionsSuppose (startoff & PAGE_MASK) != 0. Note that OFF_TO_IDX(startoff) == OFF_TO_IDX(startoff & ~PAGE_MASK). Finally suppose that the page at OFF_TO_IDX(startoff) is resident in the object. vm_object_page_remove() will free the page at this index, so we will throw away data that is outside the requested range. In other words, I believe this vm_page_grab() call will always return NULL. markj: Suppose `(startoff & PAGE_MASK) != 0`. Note that `OFF_TO_IDX(startoff) == OFF_TO_IDX(startoff &… | |||||
if (m != NULL && !vm_page_none_valid(m)) { | |||||
int base = (int)startoff & PAGE_MASK; | |||||
int size = PAGE_SIZE - base; | |||||
/* | |||||
* Clear out partial-page garbage in case | |||||
* the page has been mapped. | |||||
*/ | |||||
pmap_zero_page_area(m, base, size); | |||||
/* | |||||
Done Inline ActionsAgain, as with shm case, if there is no resident page but file does not have a hole in the partial page' range, you need to clear the data. If this is supposed to be handled by the caller (ie. by fs), then documentation must be very clear about it. kib: Again, as with shm case, if there is no resident page but file does not have a hole in the… | |||||
Done Inline ActionsThis is supposed to be handled by the caller. khng: This is supposed to be handled by the caller. | |||||
* Update the valid bits to reflect the blocks | |||||
* that have been zeroed. Some of these valid | |||||
* bits may have already been set. | |||||
*/ | |||||
vm_page_set_valid_range(m, base, size); | |||||
/* | |||||
* Round up "base" to the next block boundary so that | |||||
* the dirty bit for a partially zeroed block is not | |||||
* cleared. | |||||
*/ | |||||
base = roundup2(base, DEV_BSIZE); | |||||
/* | |||||
* Clear out partial-page dirty bits. | |||||
* | |||||
* note that we do not clear out the valid | |||||
* bits. This would prevent bogus_page | |||||
* replacement from working properly. | |||||
*/ | |||||
vm_page_clear_dirty(m, base, PAGE_SIZE - base); | |||||
} | |||||
vm_page_xunbusy(m); | |||||
} | |||||
if ((endoff & PAGE_MASK) != 0) { | |||||
m = vm_page_grab( | |||||
vp->v_object, OFF_TO_IDX(endoff), VM_ALLOC_NOCREAT); | |||||
if (m != NULL && !vm_page_none_valid(m)) { | |||||
int end = (int)endoff & PAGE_MASK; | |||||
/* | |||||
* Clear out partial-page garbage in case | |||||
* the page has been mapped. | |||||
*/ | |||||
pmap_zero_page_area(m, 0, end); | |||||
/* | |||||
* Update the valid bits to reflect the blocks | |||||
* that have been zeroed. Some of these valid | |||||
* bits may have already been set. | |||||
*/ | |||||
vm_page_set_valid_range(m, 0, end); | |||||
/* | |||||
* Round down "end" to the previous block boundary so | |||||
* that the dirty bit for a partially zeroed block is | |||||
* not cleared. | |||||
*/ | |||||
end = rounddown2(end, DEV_BSIZE); | |||||
/* | |||||
* Clear out partial-page dirty bits. | |||||
* | |||||
* note that we do not clear out the valid | |||||
* bits. This would prevent bogus_page | |||||
* replacement from working properly. | |||||
*/ | |||||
vm_page_clear_dirty(m, 0, end); | |||||
Done Inline ActionsI think this block of code is asking to be moved into a subroutine, otherwise it is duplicated three times in this file. markj: I think this block of code is asking to be moved into a subroutine, otherwise it is duplicated… | |||||
} | |||||
vm_page_xunbusy(m); | |||||
} | |||||
VM_OBJECT_WUNLOCK(object); | |||||
} | |||||
/* | |||||
* calculate the linear (byte) disk address of specified virtual | * calculate the linear (byte) disk address of specified virtual | ||||
* file address | * file address | ||||
*/ | */ | ||||
static int | static int | ||||
vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, | vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, | ||||
int *run) | int *run) | ||||
{ | { | ||||
int bsize; | int bsize; | ||||
▲ Show 20 Lines • Show All 1,065 Lines • Show Last 20 Lines |
Assert that end > base, and that end <= PAGE_SIZE?