Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_bio.c
Show First 20 Lines • Show All 2,884 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
vm_ooffset_t foff; | vm_ooffset_t foff; | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_object_t obj; | vm_object_t obj; | ||||
struct vnode *vp __unused; | struct vnode *vp __unused; | ||||
int i, iosize, resid; | int i, iosize, resid; | ||||
bool bogus; | bool bogus; | ||||
obj = bp->b_bufobj->bo_object; | vp = bp->b_vp; | ||||
obj = vp->v_object; | |||||
KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages, | KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages, | ||||
("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)", | ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)", | ||||
blockcount_read(&obj->paging_in_progress), bp->b_npages)); | blockcount_read(&obj->paging_in_progress), bp->b_npages)); | ||||
vp = bp->b_vp; | |||||
VNPASS(vp->v_holdcnt > 0, vp); | VNPASS(vp->v_holdcnt > 0, vp); | ||||
VNPASS(vp->v_object != NULL, vp); | VNPASS(vp->v_object != NULL, vp); | ||||
foff = bp->b_offset; | foff = bp->b_offset; | ||||
KASSERT(bp->b_offset != NOOFFSET, | KASSERT(bp->b_offset != NOOFFSET, | ||||
("vfs_vmio_iodone: bp %p has no buffer offset", bp)); | ("vfs_vmio_iodone: bp %p has no buffer offset", bp)); | ||||
bogus = false; | bogus = false; | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | vfs_vmio_invalidate(struct buf *bp) | ||||
* | * | ||||
* block sizes less then DEV_BSIZE (usually 512) are not | * block sizes less then DEV_BSIZE (usually 512) are not | ||||
* supported due to the page granularity bits (m->valid, | * supported due to the page granularity bits (m->valid, | ||||
* m->dirty, etc...). | * m->dirty, etc...). | ||||
* | * | ||||
* See man buf(9) for more information | * See man buf(9) for more information | ||||
*/ | */ | ||||
flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; | flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; | ||||
obj = bp->b_bufobj->bo_object; | obj = bp->b_vp->v_object; | ||||
resid = bp->b_bufsize; | resid = bp->b_bufsize; | ||||
poffset = bp->b_offset & PAGE_MASK; | poffset = bp->b_offset & PAGE_MASK; | ||||
VM_OBJECT_WLOCK(obj); | VM_OBJECT_WLOCK(obj); | ||||
for (i = 0; i < bp->b_npages; i++) { | for (i = 0; i < bp->b_npages; i++) { | ||||
m = bp->b_pages[i]; | m = bp->b_pages[i]; | ||||
if (m == bogus_page) | if (m == bogus_page) | ||||
panic("vfs_vmio_invalidate: Unexpected bogus page."); | panic("vfs_vmio_invalidate: Unexpected bogus page."); | ||||
bp->b_pages[i] = NULL; | bp->b_pages[i] = NULL; | ||||
Show All 34 Lines | if (buf_mapped(bp)) { | ||||
BUF_CHECK_UNMAPPED(bp); | BUF_CHECK_UNMAPPED(bp); | ||||
/* | /* | ||||
* The object lock is needed only if we will attempt to free pages. | * The object lock is needed only if we will attempt to free pages. | ||||
*/ | */ | ||||
flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; | flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; | ||||
if ((bp->b_flags & B_DIRECT) != 0) { | if ((bp->b_flags & B_DIRECT) != 0) { | ||||
flags |= VPR_TRYFREE; | flags |= VPR_TRYFREE; | ||||
obj = bp->b_bufobj->bo_object; | obj = bp->b_vp->v_object; | ||||
VM_OBJECT_WLOCK(obj); | VM_OBJECT_WLOCK(obj); | ||||
} else { | } else { | ||||
obj = NULL; | obj = NULL; | ||||
} | } | ||||
for (i = desiredpages; i < bp->b_npages; i++) { | for (i = desiredpages; i < bp->b_npages; i++) { | ||||
m = bp->b_pages[i]; | m = bp->b_pages[i]; | ||||
KASSERT(m != bogus_page, ("allocbuf: bogus page found")); | KASSERT(m != bogus_page, ("allocbuf: bogus page found")); | ||||
bp->b_pages[i] = NULL; | bp->b_pages[i] = NULL; | ||||
Show All 22 Lines | vfs_vmio_extend(struct buf *bp, int desiredpages, int size) | ||||
vm_offset_t tinc; | vm_offset_t tinc; | ||||
vm_page_t m; | vm_page_t m; | ||||
/* | /* | ||||
* Step 1, bring in the VM pages from the object, allocating | * Step 1, bring in the VM pages from the object, allocating | ||||
* them if necessary. We must clear B_CACHE if these pages | * them if necessary. We must clear B_CACHE if these pages | ||||
* are not valid for the range covered by the buffer. | * are not valid for the range covered by the buffer. | ||||
*/ | */ | ||||
obj = bp->b_bufobj->bo_object; | obj = bp->b_vp->v_object; | ||||
if (bp->b_npages < desiredpages) { | if (bp->b_npages < desiredpages) { | ||||
KASSERT(desiredpages <= atop(maxbcachebuf), | KASSERT(desiredpages <= atop(maxbcachebuf), | ||||
("vfs_vmio_extend past maxbcachebuf %p %d %u", | ("vfs_vmio_extend past maxbcachebuf %p %d %u", | ||||
bp, desiredpages, maxbcachebuf)); | bp, desiredpages, maxbcachebuf)); | ||||
/* | /* | ||||
* We must allocate system pages since blocking | * We must allocate system pages since blocking | ||||
* here could interfere with paging I/O, no | * here could interfere with paging I/O, no | ||||
▲ Show 20 Lines • Show All 57 Lines • ▼ Show 20 Lines | |||||
vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) | vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) | ||||
{ | { | ||||
struct buf *bpa; | struct buf *bpa; | ||||
int match; | int match; | ||||
match = 0; | match = 0; | ||||
/* If the buf isn't in core skip it */ | /* If the buf isn't in core skip it */ | ||||
if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) | if ((bpa = gbincore(vp2bo(vp), lblkno)) == NULL) | ||||
return (0); | return (0); | ||||
/* If the buf is busy we don't want to wait for it */ | /* If the buf is busy we don't want to wait for it */ | ||||
if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) | if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) | ||||
return (0); | return (0); | ||||
/* Only cluster with valid clusterable delayed write buffers */ | /* Only cluster with valid clusterable delayed write buffers */ | ||||
if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != | if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != | ||||
Show All 31 Lines | vfs_bio_awrite(struct buf *bp) | ||||
daddr_t lblkno = bp->b_lblkno; | daddr_t lblkno = bp->b_lblkno; | ||||
struct vnode *vp = bp->b_vp; | struct vnode *vp = bp->b_vp; | ||||
int ncl; | int ncl; | ||||
int nwritten; | int nwritten; | ||||
int size; | int size; | ||||
int maxcl; | int maxcl; | ||||
int gbflags; | int gbflags; | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0; | gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0; | ||||
/* | /* | ||||
* right now we support clustered writing only to regular files. If | * right now we support clustered writing only to regular files. If | ||||
* we find a clusterable block we could be in the middle of a cluster | * we find a clusterable block we could be in the middle of a cluster | ||||
* rather then at the beginning. | * rather then at the beginning. | ||||
*/ | */ | ||||
if ((vp->v_type == VREG) && | if ((vp->v_type == VREG) && | ||||
(vp->v_mount != 0) && /* Only on nodes that have the size info */ | (vp->v_mount != 0) && /* Only on nodes that have the size info */ | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags) | ||||
if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 || | if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 || | ||||
vp->v_type == VCHR) | vp->v_type == VCHR) | ||||
metadata = true; | metadata = true; | ||||
else | else | ||||
metadata = false; | metadata = false; | ||||
if (vp == NULL) | if (vp == NULL) | ||||
bd = &bdomain[0]; | bd = &bdomain[0]; | ||||
else | else | ||||
bd = &bdomain[vp->v_bufobj.bo_domain]; | bd = &bdomain[vp2bo(vp)->bo_domain]; | ||||
counter_u64_add(getnewbufcalls, 1); | counter_u64_add(getnewbufcalls, 1); | ||||
reserved = false; | reserved = false; | ||||
do { | do { | ||||
if (reserved == false && | if (reserved == false && | ||||
bufspace_reserve(bd, maxsize, metadata) != 0) { | bufspace_reserve(bd, maxsize, metadata) != 0) { | ||||
counter_u64_add(getnewbufrestarts, 1); | counter_u64_add(getnewbufrestarts, 1); | ||||
continue; | continue; | ||||
▲ Show 20 Lines • Show All 318 Lines • ▼ Show 20 Lines | inmem(struct vnode * vp, daddr_t blkno) | ||||
vm_object_t obj; | vm_object_t obj; | ||||
vm_offset_t toff, tinc, size; | vm_offset_t toff, tinc, size; | ||||
vm_page_t m, n; | vm_page_t m, n; | ||||
vm_ooffset_t off; | vm_ooffset_t off; | ||||
int valid; | int valid; | ||||
ASSERT_VOP_LOCKED(vp, "inmem"); | ASSERT_VOP_LOCKED(vp, "inmem"); | ||||
if (incore(&vp->v_bufobj, blkno)) | if (incore(vp2bo(vp), blkno)) | ||||
return (true); | return (true); | ||||
if (vp->v_mount == NULL) | if (vp->v_mount == NULL) | ||||
return (false); | return (false); | ||||
obj = vp->v_object; | obj = vp->v_object; | ||||
if (obj == NULL) | if (obj == NULL) | ||||
return (false); | return (false); | ||||
size = PAGE_SIZE; | size = PAGE_SIZE; | ||||
▲ Show 20 Lines • Show All 251 Lines • ▼ Show 20 Lines | KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, | ||||
("GB_KVAALLOC only makes sense with GB_UNMAPPED")); | ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); | ||||
ASSERT_VOP_LOCKED(vp, "getblk"); | ASSERT_VOP_LOCKED(vp, "getblk"); | ||||
if (size > maxbcachebuf) | if (size > maxbcachebuf) | ||||
panic("getblk: size(%d) > maxbcachebuf(%d)\n", size, | panic("getblk: size(%d) > maxbcachebuf(%d)\n", size, | ||||
maxbcachebuf); | maxbcachebuf); | ||||
if (!unmapped_buf_allowed) | if (!unmapped_buf_allowed) | ||||
flags &= ~(GB_UNMAPPED | GB_KVAALLOC); | flags &= ~(GB_UNMAPPED | GB_KVAALLOC); | ||||
bo = &vp->v_bufobj; | bo = vp2bo(vp); | ||||
d_blkno = dblkno; | d_blkno = dblkno; | ||||
/* Attempt lockless lookup first. */ | /* Attempt lockless lookup first. */ | ||||
bp = gbincore_unlocked(bo, blkno); | bp = gbincore_unlocked(bo, blkno); | ||||
if (bp == NULL) { | if (bp == NULL) { | ||||
/* | /* | ||||
* With GB_NOCREAT we must be sure about not finding the buffer | * With GB_NOCREAT we must be sure about not finding the buffer | ||||
* as it may have been reassigned during unlocked lookup. | * as it may have been reassigned during unlocked lookup. | ||||
▲ Show 20 Lines • Show All 231 Lines • ▼ Show 20 Lines | newbuf_unlocked: | ||||
/* | /* | ||||
* set B_VMIO bit. allocbuf() the buffer bigger. Since the | * set B_VMIO bit. allocbuf() the buffer bigger. Since the | ||||
* buffer size starts out as 0, B_CACHE will be set by | * buffer size starts out as 0, B_CACHE will be set by | ||||
* allocbuf() for the VMIO case prior to it testing the | * allocbuf() for the VMIO case prior to it testing the | ||||
* backing store for validity. | * backing store for validity. | ||||
*/ | */ | ||||
if (vmio) { | if (vmio) { | ||||
bp->b_flags |= B_VMIO; | bp->b_flags |= B_VMIO; | ||||
mckusick: Why are the following KASSERTs no longer relevant? | |||||
kibAuthorUnsubmitted Done Inline ActionsI believe these asserts are not too relevant before the patch, as well. They are tautological because before this patch, v_object is defined as v_bufobj->bo_object, and vmio is evaluated as v_object != NULL. After v_object is the proper member of the struct vnode instead of being a define, I cannot even formulate the tautological condition for the first (vmio == true) case. Might be it could be bp->b_bufobj == vp2bo(vp), but this is trivial after bgetvp(). For the second assert, !vmio, this is tautological check vp->v_object == NULL. kib: I believe these asserts are not too relevant before the patch, as well. They are tautological… | |||||
KASSERT(vp->v_object == bp->b_bufobj->bo_object, | |||||
("ARGH! different b_bufobj->bo_object %p %p %p\n", | |||||
bp, vp->v_object, bp->b_bufobj->bo_object)); | |||||
} else { | } else { | ||||
bp->b_flags &= ~B_VMIO; | bp->b_flags &= ~B_VMIO; | ||||
KASSERT(bp->b_bufobj->bo_object == NULL, | |||||
("ARGH! has b_bufobj->bo_object %p %p\n", | |||||
bp, bp->b_bufobj->bo_object)); | |||||
BUF_CHECK_MAPPED(bp); | BUF_CHECK_MAPPED(bp); | ||||
} | } | ||||
allocbuf(bp, size); | allocbuf(bp, size); | ||||
bufspace_release(bufdomain(bp), maxsize); | bufspace_release(bufdomain(bp), maxsize); | ||||
bp->b_flags &= ~B_DONE; | bp->b_flags &= ~B_DONE; | ||||
} | } | ||||
CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); | CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); | ||||
▲ Show 20 Lines • Show All 366 Lines • ▼ Show 20 Lines | vfs_unbusy_pages(struct buf *bp) | ||||
int i; | int i; | ||||
vm_object_t obj; | vm_object_t obj; | ||||
vm_page_t m; | vm_page_t m; | ||||
runningbufwakeup(bp); | runningbufwakeup(bp); | ||||
if (!(bp->b_flags & B_VMIO)) | if (!(bp->b_flags & B_VMIO)) | ||||
return; | return; | ||||
obj = bp->b_bufobj->bo_object; | obj = bp->b_vp->v_object; | ||||
for (i = 0; i < bp->b_npages; i++) { | for (i = 0; i < bp->b_npages; i++) { | ||||
m = bp->b_pages[i]; | m = bp->b_pages[i]; | ||||
if (m == bogus_page) { | if (m == bogus_page) { | ||||
m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i); | m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i); | ||||
if (!m) | if (!m) | ||||
panic("vfs_unbusy_pages: page missing\n"); | panic("vfs_unbusy_pages: page missing\n"); | ||||
bp->b_pages[i] = m; | bp->b_pages[i] = m; | ||||
if (buf_mapped(bp)) { | if (buf_mapped(bp)) { | ||||
▲ Show 20 Lines • Show All 114 Lines • ▼ Show 20 Lines | vfs_busy_pages(struct buf *bp, int clear_modify) | ||||
vm_ooffset_t foff; | vm_ooffset_t foff; | ||||
vm_page_t m; | vm_page_t m; | ||||
int i; | int i; | ||||
bool bogus; | bool bogus; | ||||
if (!(bp->b_flags & B_VMIO)) | if (!(bp->b_flags & B_VMIO)) | ||||
return; | return; | ||||
obj = bp->b_bufobj->bo_object; | obj = bp->b_vp->v_object; | ||||
foff = bp->b_offset; | foff = bp->b_offset; | ||||
KASSERT(bp->b_offset != NOOFFSET, | KASSERT(bp->b_offset != NOOFFSET, | ||||
("vfs_busy_pages: no buffer offset")); | ("vfs_busy_pages: no buffer offset")); | ||||
if ((bp->b_flags & B_CLUSTER) == 0) { | if ((bp->b_flags & B_CLUSTER) == 0) { | ||||
vm_object_pip_add(obj, bp->b_npages); | vm_object_pip_add(obj, bp->b_npages); | ||||
vfs_busy_pages_acquire(bp); | vfs_busy_pages_acquire(bp); | ||||
} | } | ||||
if (bp->b_bufsize != 0) | if (bp->b_bufsize != 0) | ||||
▲ Show 20 Lines • Show All 808 Lines • ▼ Show 20 Lines | DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs) | ||||
struct buf *bp; | struct buf *bp; | ||||
if (!have_addr) { | if (!have_addr) { | ||||
db_printf("usage: show vnodebufs <addr>\n"); | db_printf("usage: show vnodebufs <addr>\n"); | ||||
return; | return; | ||||
} | } | ||||
vp = (struct vnode *)addr; | vp = (struct vnode *)addr; | ||||
db_printf("Clean buffers:\n"); | db_printf("Clean buffers:\n"); | ||||
TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { | TAILQ_FOREACH(bp, &vp2bo(vp)->bo_clean.bv_hd, b_bobufs) { | ||||
db_show_buffer((uintptr_t)bp, 1, 0, NULL); | db_show_buffer((uintptr_t)bp, 1, 0, NULL); | ||||
db_printf("\n"); | db_printf("\n"); | ||||
} | } | ||||
db_printf("Dirty buffers:\n"); | db_printf("Dirty buffers:\n"); | ||||
TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { | TAILQ_FOREACH(bp, &vp2bo(vp)->bo_dirty.bv_hd, b_bobufs) { | ||||
db_show_buffer((uintptr_t)bp, 1, 0, NULL); | db_show_buffer((uintptr_t)bp, 1, 0, NULL); | ||||
db_printf("\n"); | db_printf("\n"); | ||||
} | } | ||||
} | } | ||||
DB_COMMAND(countfreebufs, db_coundfreebufs) | DB_COMMAND(countfreebufs, db_coundfreebufs) | ||||
{ | { | ||||
struct buf *bp; | struct buf *bp; | ||||
Show All 20 Lines |
Why are the following KASSERTs no longer relevant?