Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_bio.c
Show First 20 Lines • Show All 148 Lines • ▼ Show 20 Lines | |||||
static struct buf *buf; /* buffer header pool */ | static struct buf *buf; /* buffer header pool */ | ||||
extern struct buf *swbuf; /* Swap buffer header pool. */ | extern struct buf *swbuf; /* Swap buffer header pool. */ | ||||
caddr_t __read_mostly unmapped_buf; | caddr_t __read_mostly unmapped_buf; | ||||
/* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */ | /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */ | ||||
struct proc *bufdaemonproc; | struct proc *bufdaemonproc; | ||||
static int inmem(struct vnode *vp, daddr_t blkno); | |||||
static void vm_hold_free_pages(struct buf *bp, int newbsize); | static void vm_hold_free_pages(struct buf *bp, int newbsize); | ||||
static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, | static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, | ||||
vm_offset_t to); | vm_offset_t to); | ||||
static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m); | static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m); | ||||
static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, | static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, | ||||
vm_page_t m); | vm_page_t m); | ||||
static void vfs_clean_pages_dirty_buf(struct buf *bp); | static void vfs_clean_pages_dirty_buf(struct buf *bp); | ||||
static void vfs_setdirty_range(struct buf *bp); | static void vfs_setdirty_range(struct buf *bp); | ||||
▲ Show 20 Lines • Show All 3,414 Lines • ▼ Show 20 Lines | incore(struct bufobj *bo, daddr_t blkno) | ||||
return (gbincore_unlocked(bo, blkno)); | return (gbincore_unlocked(bo, blkno)); | ||||
} | } | ||||
/* | /* | ||||
* Returns true if no I/O is needed to access the | * Returns true if no I/O is needed to access the | ||||
* associated VM object. This is like incore except | * associated VM object. This is like incore except | ||||
* it also hunts around in the VM system for the data. | * it also hunts around in the VM system for the data. | ||||
*/ | */ | ||||
bool | |||||
static int | |||||
inmem(struct vnode * vp, daddr_t blkno) | inmem(struct vnode * vp, daddr_t blkno) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
vm_offset_t toff, tinc, size; | vm_offset_t toff, tinc, size; | ||||
vm_page_t m; | vm_page_t m, n; | ||||
vm_ooffset_t off; | vm_ooffset_t off; | ||||
int valid; | |||||
ASSERT_VOP_LOCKED(vp, "inmem"); | ASSERT_VOP_LOCKED(vp, "inmem"); | ||||
if (incore(&vp->v_bufobj, blkno)) | if (incore(&vp->v_bufobj, blkno)) | ||||
return 1; | return (true); | ||||
if (vp->v_mount == NULL) | if (vp->v_mount == NULL) | ||||
return 0; | return (false); | ||||
obj = vp->v_object; | obj = vp->v_object; | ||||
if (obj == NULL) | if (obj == NULL) | ||||
return (0); | return (false); | ||||
size = PAGE_SIZE; | size = PAGE_SIZE; | ||||
if (size > vp->v_mount->mnt_stat.f_iosize) | if (size > vp->v_mount->mnt_stat.f_iosize) | ||||
size = vp->v_mount->mnt_stat.f_iosize; | size = vp->v_mount->mnt_stat.f_iosize; | ||||
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; | off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; | ||||
VM_OBJECT_RLOCK(obj); | |||||
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { | for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { | ||||
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); | m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff)); | ||||
if (!m) | recheck: | ||||
goto notinmem; | if (m == NULL) | ||||
return (false); | |||||
tinc = size; | tinc = size; | ||||
if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) | if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) | ||||
tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); | tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); | ||||
if (vm_page_is_valid(m, | /* | ||||
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) | * Consider page validity only if page mapping didn't change | ||||
goto notinmem; | * during the check. | ||||
*/ | |||||
valid = vm_page_is_valid(m, | |||||
(vm_offset_t)((toff + off) & PAGE_MASK), tinc); | |||||
n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff)); | |||||
if (m != n) { | |||||
m = n; | |||||
goto recheck; | |||||
} | } | ||||
VM_OBJECT_RUNLOCK(obj); | if (!valid) | ||||
return 1; | return (false); | ||||
} | |||||
notinmem: | return (true); | ||||
VM_OBJECT_RUNLOCK(obj); | |||||
return (0); | |||||
} | } | ||||
/* | /* | ||||
* Set the dirty range for a buffer based on the status of the dirty | * Set the dirty range for a buffer based on the status of the dirty | ||||
* bits in the pages comprising the buffer. The range is limited | * bits in the pages comprising the buffer. The range is limited | ||||
* to the size of the buffer. | * to the size of the buffer. | ||||
* | * | ||||
* Tell the VM system that the pages associated with this buffer | * Tell the VM system that the pages associated with this buffer | ||||
▲ Show 20 Lines • Show All 1,837 Lines • Show Last 20 Lines |