Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_bio.c
Show First 20 Lines • Show All 3,843 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
int | int | ||||
getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag, | getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag, | ||||
int slptimeo, int flags, struct buf **bpp) | int slptimeo, int flags, struct buf **bpp) | ||||
{ | { | ||||
struct buf *bp; | struct buf *bp; | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
daddr_t d_blkno; | daddr_t d_blkno; | ||||
int bsize, error, maxsize, vmio; | int bsize, error, maxsize, vmio, lockflags; | ||||
off_t offset; | off_t offset; | ||||
CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); | CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); | ||||
KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, | KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, | ||||
("GB_KVAALLOC only makes sense with GB_UNMAPPED")); | ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); | ||||
ASSERT_VOP_LOCKED(vp, "getblk"); | ASSERT_VOP_LOCKED(vp, "getblk"); | ||||
if (size > maxbcachebuf) | if (size > maxbcachebuf) | ||||
panic("getblk: size(%d) > maxbcachebuf(%d)\n", size, | panic("getblk: size(%d) > maxbcachebuf(%d)\n", size, | ||||
maxbcachebuf); | maxbcachebuf); | ||||
if (!unmapped_buf_allowed) | if (!unmapped_buf_allowed) | ||||
flags &= ~(GB_UNMAPPED | GB_KVAALLOC); | flags &= ~(GB_UNMAPPED | GB_KVAALLOC); | ||||
bo = &vp->v_bufobj; | bo = &vp->v_bufobj; | ||||
d_blkno = dblkno; | d_blkno = dblkno; | ||||
/* Attempt lockless lookup first. */ | |||||
bp = gbincore_unlocked(bo, blkno); | |||||
if (bp == NULL) | |||||
goto newbuf_unlocked; | |||||
kib: The blocking there is not needed. | |||||
Done Inline Actionswill fix cem: will fix | |||||
lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | | |||||
((flags & GB_LOCK_NOWAIT) ? LK_NOWAIT : 0); | |||||
error = BUF_TIMELOCK(bp, lockflags, NULL, "getblku", slpflag, | |||||
slptimeo); | |||||
if (error == EINTR || error == ERESTART) | |||||
return (error); | |||||
else if (error != 0) | |||||
goto loop; | |||||
/* Verify buf identify has not changed since lookup. */ | |||||
if (bp->b_bufobj != bo || bp->b_lblkno != blkno) { | |||||
Not Done Inline ActionsI think the rule should be that any failure on lockless path for buffer lookup results in locked retry. Imagine that buffer was reclaimed meantime, then you would not reach the check for bufobj/blkno at all. kib: I think the rule should be that any failure on lockless path for buffer lookup results in… | |||||
Done Inline ActionsWe can abort without retry for EINTR/ERESTART, right? Retrying in all other cases makes sense to me. cem: We can abort without retry for EINTR/ERESTART, right? Retrying in all other cases makes sense… | |||||
/* It changed, fallback to locked lookup. */ | |||||
/* XXX: Do we need to handle bremfreef()? */ | |||||
BUF_UNLOCK(bp); | |||||
Done Inline ActionsIf you invert the condition, the control flow can be simplified a bit: if (bp->b_bufobj == && bp->b_lblkno == blkno) goto foundbuf_fastpath; /* It changed, fall back to locked lookup. */ BUF_UNLOCK_RAW(bp); loop: markj: If you invert the condition, the control flow can be simplified a bit:
```
if (bp->b_bufobj ==… | |||||
Done Inline ActionsSure, will do. cem: Sure, will do. | |||||
goto loop; | |||||
} | |||||
Done Inline ActionsWhy do you think it should be handled there ? kib: Why do you think it should be handled there ? | |||||
Done Inline Actions#define BUF_UNLOCK(bp) do { \ KASSERT(((bp)->b_flags & B_REMFREE) == 0, \ ("BUF_UNLOCK %p while B_REMFREE is still set.", (bp))); \ \ (void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL, \ LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \ LOCK_FILE, LOCK_LINE); \ cem: ```
#define BUF_UNLOCK(bp) do { \
KASSERT… | |||||
Done Inline ActionsHuh. I think you should use lockmgr() directly there, to not touch buffer we do not intend to operate on, at all. kib: Huh. I think you should use lockmgr() directly there, to not touch buffer we do not intend to… | |||||
goto existingbuf_unlocked; | |||||
loop: | loop: | ||||
BO_RLOCK(bo); | BO_RLOCK(bo); | ||||
bp = gbincore(bo, blkno); | bp = gbincore(bo, blkno); | ||||
if (bp != NULL) { | if (bp != NULL) { | ||||
int lockflags; | |||||
/* | /* | ||||
* Buffer is in-core. If the buffer is not busy nor managed, | * Buffer is in-core. If the buffer is not busy nor managed, | ||||
* it must be on a queue. | * it must be on a queue. | ||||
*/ | */ | ||||
lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; | lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; | ||||
if ((flags & GB_LOCK_NOWAIT) != 0) | if ((flags & GB_LOCK_NOWAIT) != 0) | ||||
lockflags |= LK_NOWAIT; | lockflags |= LK_NOWAIT; | ||||
error = BUF_TIMELOCK(bp, lockflags, | error = BUF_TIMELOCK(bp, lockflags, | ||||
BO_LOCKPTR(bo), "getblk", slpflag, slptimeo); | BO_LOCKPTR(bo), "getblk", slpflag, slptimeo); | ||||
/* | /* | ||||
* If we slept and got the lock we have to restart in case | * If we slept and got the lock we have to restart in case | ||||
* the buffer changed identities. | * the buffer changed identities. | ||||
*/ | */ | ||||
if (error == ENOLCK) | if (error == ENOLCK) | ||||
goto loop; | goto loop; | ||||
/* We timed out or were interrupted. */ | /* We timed out or were interrupted. */ | ||||
else if (error != 0) | else if (error != 0) | ||||
return (error); | return (error); | ||||
existingbuf_unlocked: | |||||
Done Inline ActionsI must admit that 'unlocked' suffix is misleading. kib: I must admit that 'unlocked' suffix is misleading. | |||||
Done Inline ActionsWill change. cem: Will change. | |||||
/* If recursed, assume caller knows the rules. */ | /* If recursed, assume caller knows the rules. */ | ||||
else if (BUF_LOCKRECURSED(bp)) | if (BUF_LOCKRECURSED(bp)) | ||||
goto end; | goto end; | ||||
/* | /* | ||||
* The buffer is locked. B_CACHE is cleared if the buffer is | * The buffer is locked. B_CACHE is cleared if the buffer is | ||||
* invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set | * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set | ||||
* and for a VMIO buffer B_CACHE is adjusted according to the | * and for a VMIO buffer B_CACHE is adjusted according to the | ||||
* backing VM cache. | * backing VM cache. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | existingbuf_unlocked: | ||||
bp->b_flags &= ~B_DONE; | bp->b_flags &= ~B_DONE; | ||||
} else { | } else { | ||||
/* | /* | ||||
* Buffer is not in-core, create new buffer. The buffer | * Buffer is not in-core, create new buffer. The buffer | ||||
* returned by getnewbuf() is locked. Note that the returned | * returned by getnewbuf() is locked. Note that the returned | ||||
* buffer is also considered valid (not marked B_INVAL). | * buffer is also considered valid (not marked B_INVAL). | ||||
*/ | */ | ||||
BO_RUNLOCK(bo); | BO_RUNLOCK(bo); | ||||
newbuf_unlocked: | |||||
/* | /* | ||||
* If the user does not want us to create the buffer, bail out | * If the user does not want us to create the buffer, bail out | ||||
* here. | * here. | ||||
*/ | */ | ||||
if (flags & GB_NOCREAT) | if (flags & GB_NOCREAT) | ||||
return (EEXIST); | return (EEXIST); | ||||
bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize; | bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize; | ||||
▲ Show 20 Lines • Show All 1,459 Lines • Show Last 20 Lines |
The blocking there is not needed.