Changeset View
Standalone View
sys/vm/vnode_pager.c
Show First 20 Lines • Show All 416 Lines • ▼ Show 20 Lines | if (pagesperblock > 0) { | ||||
if (after) { | if (after) { | ||||
*after /= blocksperpage; | *after /= blocksperpage; | ||||
} | } | ||||
} | } | ||||
return TRUE; | return TRUE; | ||||
} | } | ||||
/* | /* | ||||
* Internal routine clearing partial-page content | |||||
*/ | |||||
static void | |||||
vnode_pager_subpage_purge(struct vm_page *m, int base, int end) | |||||
{ | |||||
int size = end - base; | |||||
kib: Assert that end > base, and that end <= PAGE_SIZE? | |||||
/* | |||||
* Clear out partial-page garbage in case | |||||
* the page has been mapped. | |||||
*/ | |||||
pmap_zero_page_area(m, base, size); | |||||
/* | |||||
* Update the valid bits to reflect the blocks | |||||
* that have been zeroed. Some of these valid | |||||
* bits may have already been set. | |||||
*/ | |||||
vm_page_set_valid_range(m, base, size); | |||||
/* | |||||
* Round up "base" to the next block boundary so | |||||
* that the dirty bit for a partially zeroed | |||||
* block is not cleared. | |||||
*/ | |||||
base = roundup2(base, DEV_BSIZE); | |||||
end = rounddown2(end, DEV_BSIZE); | |||||
if (end > base) { | |||||
/* | |||||
* Clear out partial-page dirty bits. | |||||
* | |||||
* note that we do not clear out the | |||||
* valid bits. This would prevent | |||||
* bogus_page replacement from working | |||||
* properly. | |||||
*/ | |||||
vm_page_clear_dirty(m, base, end - base); | |||||
} | |||||
} | |||||
/* | |||||
* Lets the VM system know about a change in size for a file. | * Lets the VM system know about a change in size for a file. | ||||
* We adjust our own internal size and flush any cached pages in | * We adjust our own internal size and flush any cached pages in | ||||
* the associated object that are affected by the size change. | * the associated object that are affected by the size change. | ||||
* | * | ||||
* Note: this routine may be invoked as a result of a pager put | * Note: this routine may be invoked as a result of a pager put | ||||
* operation (possibly at object termination time), so we must be careful. | * operation (possibly at object termination time), so we must be careful. | ||||
*/ | */ | ||||
void | void | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | if (nsize < object->un_pager.vnp.vnp_size) { | ||||
* completely invalid page and mark it partially valid | * completely invalid page and mark it partially valid | ||||
* it can screw up NFS reads, so we don't allow the case. | * it can screw up NFS reads, so we don't allow the case. | ||||
*/ | */ | ||||
if (!(nsize & PAGE_MASK)) | if (!(nsize & PAGE_MASK)) | ||||
goto out; | goto out; | ||||
m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); | m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); | ||||
if (m == NULL) | if (m == NULL) | ||||
goto out; | goto out; | ||||
if (!vm_page_none_valid(m)) { | if (!vm_page_none_valid(m)) | ||||
int base = (int)nsize & PAGE_MASK; | vnode_pager_subpage_purge(m, (int)nsize & PAGE_MASK, | ||||
int size = PAGE_SIZE - base; | PAGE_SIZE); | ||||
vm_page_xunbusy(m); | |||||
} | |||||
out: | |||||
#if defined(__powerpc__) && !defined(__powerpc64__) | |||||
object->un_pager.vnp.vnp_size = nsize; | |||||
#else | |||||
atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); | |||||
#endif | |||||
object->size = nobjsize; | |||||
VM_OBJECT_WUNLOCK(object); | |||||
} | |||||
/* | /* | ||||
* Clear out partial-page garbage in case | * Lets the VM system know about the purged range for a file. We toss away any | ||||
* the page has been mapped. | * cached pages in the associated object that are affected by the purge | ||||
* operation. Partial-page area not aligned to page boundaries will be zeroed | |||||
* and the dirty blocks in DEV_BSIZE unit within a page will not be flushed. | |||||
* | |||||
* Write lock of the VM object in vnode will be held. | |||||
Done Inline Actions'VM object in vnode' is usuall written as v_object. OTOH, I expect that caller is required to own at least the vnode lock. This should be expressed by an assert. kib: 'VM object in vnode' is usuall written as v_object.
The lock is not held, it is taken… | |||||
*/ | */ | ||||
pmap_zero_page_area(m, base, size); | void | ||||
vnode_pager_purge_range( | |||||
struct vnode *vp, vm_ooffset_t startoff, vm_ooffset_t endoff) | |||||
{ | |||||
struct vm_page *m; | |||||
struct vm_object *object; | |||||
vm_pindex_t pgrmidx; | |||||
bool same_page; | |||||
Not Done Inline ActionsI wonder if you should require exclusive lock on the vnode. kib: I wonder if you should require exclusive lock on the vnode. | |||||
Done Inline ActionsIt is for file systems allowing MNTK_SHARED_WRITES, such as zfs. khng: It is for file systems allowing MNTK_SHARED_WRITES, such as zfs. | |||||
/* | object = vp->v_object; | ||||
* Update the valid bits to reflect the blocks that | pgrmidx = OFF_TO_IDX(startoff + PAGE_MASK); | ||||
* have been zeroed. Some of these valid bits may | same_page = OFF_TO_IDX(startoff) == OFF_TO_IDX(endoff); | ||||
Done Inline ActionsCould this line be unwrapped? kib: Could this line be unwrapped? | |||||
Done Inline ActionsLonger than 80/81 cols for this one to be unwrapped. khng: Longer than 80/81 cols for this one to be unwrapped. | |||||
* have already been set. | if (object == NULL || endoff <= startoff) | ||||
*/ | return; | ||||
Done Inline Actions() is excessive. kib: () is excessive. | |||||
vm_page_set_valid_range(m, base, size); | |||||
/* | VM_OBJECT_WLOCK(object); | ||||
* Round "base" to the next block boundary so that the | |||||
* dirty bit for a partially zeroed block is not | |||||
* cleared. | |||||
*/ | |||||
base = roundup2(base, DEV_BSIZE); | |||||
Done Inline ActionsSuppose (startoff & PAGE_MASK) != 0. Note that OFF_TO_IDX(startoff) == OFF_TO_IDX(startoff & ~PAGE_MASK). Finally suppose that the page at OFF_TO_IDX(startoff) is resident in the object. vm_object_page_remove() will free the page at this index, so we will throw away data that is outside the requested range. In other words, I believe this vm_page_grab() call will always return NULL. markj: Suppose `(startoff & PAGE_MASK) != 0`. Note that `OFF_TO_IDX(startoff) == OFF_TO_IDX(startoff &… | |||||
/* | if (pgrmidx < OFF_TO_IDX(endoff)) | ||||
* Clear out partial-page dirty bits. | vm_object_page_remove(object, pgrmidx, | ||||
* | OFF_TO_IDX(endoff), 0); | ||||
* note that we do not clear out the valid | |||||
* bits. This would prevent bogus_page | if ((startoff & PAGE_MASK) != 0) { | ||||
* replacement from working properly. | int base = (int)startoff & PAGE_MASK; | ||||
*/ | int end = same_page ? (int)endoff & PAGE_MASK : PAGE_SIZE; | ||||
vm_page_clear_dirty(m, base, PAGE_SIZE - base); | |||||
m = vm_page_grab(vp->v_object, OFF_TO_IDX(startoff), | |||||
VM_ALLOC_NOCREAT); | |||||
if (m != NULL) { | |||||
Done Inline ActionsAgain, as with shm case, if there is no resident page but file does not have a hole in the partial page' range, you need to clear the data. If this is supposed to be handled by the caller (ie. by fs), then documentation must be very clear about it. kib: Again, as with shm case, if there is no resident page but file does not have a hole in the… | |||||
Done Inline ActionsThis is supposed to be handled by the caller. khng: This is supposed to be handled by the caller. | |||||
if (!vm_page_none_valid(m)) | |||||
vnode_pager_subpage_purge(m, base, end); | |||||
vm_page_xunbusy(m); | |||||
} | } | ||||
if (same_page) | |||||
goto out; | |||||
} | |||||
if ((endoff & PAGE_MASK) != 0) { | |||||
int base = same_page ? (int)startoff & PAGE_MASK : 0 ; | |||||
int end = (int)endoff & PAGE_MASK; | |||||
m = vm_page_grab(vp->v_object, OFF_TO_IDX(endoff), | |||||
VM_ALLOC_NOCREAT); | |||||
if (m != NULL) { | |||||
if (!vm_page_none_valid(m)) | |||||
vnode_pager_subpage_purge(m, base, end); | |||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
} | } | ||||
} | |||||
out: | out: | ||||
#if defined(__powerpc__) && !defined(__powerpc64__) | |||||
object->un_pager.vnp.vnp_size = nsize; | |||||
#else | |||||
atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); | |||||
#endif | |||||
object->size = nobjsize; | |||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
} | } | ||||
/* | /* | ||||
* calculate the linear (byte) disk address of specified virtual | * calculate the linear (byte) disk address of specified virtual | ||||
* file address | * file address | ||||
*/ | */ | ||||
static int | static int | ||||
Show All 20 Lines | if (run) { | ||||
*run += 1; | *run += 1; | ||||
*run *= bsize / PAGE_SIZE; | *run *= bsize / PAGE_SIZE; | ||||
*run -= voffset / PAGE_SIZE; | *run -= voffset / PAGE_SIZE; | ||||
} | } | ||||
} | } | ||||
return (err); | return (err); | ||||
} | } | ||||
Done Inline ActionsI think this block of code is asking to be moved into a subroutine, otherwise it is duplicated three times in this file. markj: I think this block of code is asking to be moved into a subroutine, otherwise it is duplicated… | |||||
/* | /* | ||||
* small block filesystem vnode pager input | * small block filesystem vnode pager input | ||||
*/ | */ | ||||
static int | static int | ||||
vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) | vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
▲ Show 20 Lines • Show All 1,032 Lines • Show Last 20 Lines |
Assert that end > base, and that end <= PAGE_SIZE?