Changeset View
Changeset View
Standalone View
Standalone View
sys/fs/nfsclient/nfs_clbio.c
Show First 20 Lines • Show All 84 Lines • ▼ Show 20 Lines | |||||
SYSCTL_INT(_vfs_nfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, | SYSCTL_INT(_vfs_nfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, | ||||
&use_buf_pager, 0, | &use_buf_pager, 0, | ||||
"Use buffer pager instead of direct readrpc call"); | "Use buffer pager instead of direct readrpc call"); | ||||
static daddr_t | static daddr_t | ||||
ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) | ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off) | ||||
{ | { | ||||
return (off / vp->v_bufobj.bo_bsize); | return (off / vp2bo(vp)->bo_bsize); | ||||
} | } | ||||
static int | static int | ||||
ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn) | ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn) | ||||
{ | { | ||||
struct nfsnode *np; | struct nfsnode *np; | ||||
u_quad_t nsize; | u_quad_t nsize; | ||||
int biosize, bcount; | int biosize, bcount; | ||||
np = VTONFS(vp); | np = VTONFS(vp); | ||||
NFSLOCKNODE(np); | NFSLOCKNODE(np); | ||||
nsize = np->n_size; | nsize = np->n_size; | ||||
NFSUNLOCKNODE(np); | NFSUNLOCKNODE(np); | ||||
biosize = vp->v_bufobj.bo_bsize; | biosize = vp2bo(vp)->bo_bsize; | ||||
bcount = biosize; | bcount = biosize; | ||||
if ((off_t)lbn * biosize >= nsize) | if ((off_t)lbn * biosize >= nsize) | ||||
bcount = 0; | bcount = 0; | ||||
else if ((off_t)(lbn + 1) * biosize > nsize) | else if ((off_t)(lbn + 1) * biosize > nsize) | ||||
bcount = nsize - (off_t)lbn * biosize; | bcount = nsize - (off_t)lbn * biosize; | ||||
return (bcount); | return (bcount); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 341 Lines • ▼ Show 20 Lines | ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) | ||||
mtx_unlock(&nmp->nm_mtx); | mtx_unlock(&nmp->nm_mtx); | ||||
if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) | if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG)) | ||||
/* No caching/ no readaheads. Just read data into the user buffer */ | /* No caching/ no readaheads. Just read data into the user buffer */ | ||||
return ncl_readrpc(vp, uio, cred); | return ncl_readrpc(vp, uio, cred); | ||||
n = 0; | n = 0; | ||||
on = 0; | on = 0; | ||||
biosize = vp->v_bufobj.bo_bsize; | biosize = vp2bo(vp)->bo_bsize; | ||||
seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); | seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE); | ||||
error = nfs_bioread_check_cons(vp, td, cred); | error = nfs_bioread_check_cons(vp, td, cred); | ||||
if (error) | if (error) | ||||
return error; | return error; | ||||
save2 = curthread_pflags2_set(TDP2_SBPAGES); | save2 = curthread_pflags2_set(TDP2_SBPAGES); | ||||
do { | do { | ||||
Show All 11 Lines | do { | ||||
/* | /* | ||||
* Start the read ahead(s), as required. | * Start the read ahead(s), as required. | ||||
*/ | */ | ||||
if (nmp->nm_readahead > 0) { | if (nmp->nm_readahead > 0) { | ||||
for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && | for (nra = 0; nra < nmp->nm_readahead && nra < seqcount && | ||||
(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { | (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) { | ||||
rabn = lbn + 1 + nra; | rabn = lbn + 1 + nra; | ||||
if (incore(&vp->v_bufobj, rabn) == NULL) { | if (incore(vp2bo(vp), rabn) == NULL) { | ||||
rabp = nfs_getcacheblk(vp, rabn, biosize, td); | rabp = nfs_getcacheblk(vp, rabn, biosize, td); | ||||
if (!rabp) { | if (!rabp) { | ||||
error = newnfs_sigintr(nmp, td); | error = newnfs_sigintr(nmp, td); | ||||
if (error == 0) | if (error == 0) | ||||
error = EINTR; | error = EINTR; | ||||
goto out; | goto out; | ||||
} | } | ||||
if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { | if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { | ||||
▲ Show 20 Lines • Show All 163 Lines • ▼ Show 20 Lines | do { | ||||
* If not eof and read aheads are enabled, start one. | * If not eof and read aheads are enabled, start one. | ||||
* (You need the current block first, so that you have the | * (You need the current block first, so that you have the | ||||
* directory offset cookie of the next block.) | * directory offset cookie of the next block.) | ||||
*/ | */ | ||||
if (nmp->nm_readahead > 0 && | if (nmp->nm_readahead > 0 && | ||||
(bp->b_flags & B_INVAL) == 0 && | (bp->b_flags & B_INVAL) == 0 && | ||||
(np->n_direofoffset == 0 || | (np->n_direofoffset == 0 || | ||||
(lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && | (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && | ||||
incore(&vp->v_bufobj, lbn + 1) == NULL) { | incore(vp2bo(vp), lbn + 1) == NULL) { | ||||
rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); | rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td); | ||||
if (rabp) { | if (rabp) { | ||||
if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { | if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) { | ||||
rabp->b_flags |= B_ASYNC; | rabp->b_flags |= B_ASYNC; | ||||
rabp->b_iocmd = BIO_READ; | rabp->b_iocmd = BIO_READ; | ||||
vfs_busy_pages(rabp, 0); | vfs_busy_pages(rabp, 0); | ||||
if (ncl_asyncio(nmp, rabp, cred, td)) { | if (ncl_asyncio(nmp, rabp, cred, td)) { | ||||
rabp->b_flags |= B_INVAL; | rabp->b_flags |= B_INVAL; | ||||
▲ Show 20 Lines • Show All 298 Lines • ▼ Show 20 Lines | #endif | ||||
/* | /* | ||||
* Maybe this should be above the vnode op call, but so long as | * Maybe this should be above the vnode op call, but so long as | ||||
* file servers have no limits, i don't think it matters | * file servers have no limits, i don't think it matters | ||||
*/ | */ | ||||
if (vn_rlimit_fsize(vp, uio, td)) | if (vn_rlimit_fsize(vp, uio, td)) | ||||
return (EFBIG); | return (EFBIG); | ||||
save2 = curthread_pflags2_set(TDP2_SBPAGES); | save2 = curthread_pflags2_set(TDP2_SBPAGES); | ||||
biosize = vp->v_bufobj.bo_bsize; | biosize = vp2bo(vp)->bo_bsize; | ||||
/* | /* | ||||
* Find all of this file's B_NEEDCOMMIT buffers. If our writes | * Find all of this file's B_NEEDCOMMIT buffers. If our writes | ||||
* would exceed the local maximum per-file write commit size when | * would exceed the local maximum per-file write commit size when | ||||
* combined with those, we must decide whether to flush, | * combined with those, we must decide whether to flush, | ||||
* go synchronous, or return error. We don't bother checking | * go synchronous, or return error. We don't bother checking | ||||
* IO_UNIT -- we just make all writes atomic anyway, as there's | * IO_UNIT -- we just make all writes atomic anyway, as there's | ||||
* no point optimizing for something that really won't ever happen. | * no point optimizing for something that really won't ever happen. | ||||
*/ | */ | ||||
wouldcommit = 0; | wouldcommit = 0; | ||||
if (!(ioflag & IO_SYNC)) { | if (!(ioflag & IO_SYNC)) { | ||||
int nflag; | int nflag; | ||||
NFSLOCKNODE(np); | NFSLOCKNODE(np); | ||||
nflag = np->n_flag; | nflag = np->n_flag; | ||||
NFSUNLOCKNODE(np); | NFSUNLOCKNODE(np); | ||||
if (nflag & NMODIFIED) { | if (nflag & NMODIFIED) { | ||||
BO_LOCK(&vp->v_bufobj); | BO_LOCK(vp2bo(vp)); | ||||
if (vp->v_bufobj.bo_dirty.bv_cnt != 0) { | if (vp2bo(vp)->bo_dirty.bv_cnt != 0) { | ||||
TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, | TAILQ_FOREACH(bp, &vp2bo(vp)->bo_dirty.bv_hd, | ||||
b_bobufs) { | b_bobufs) { | ||||
if (bp->b_flags & B_NEEDCOMMIT) | if (bp->b_flags & B_NEEDCOMMIT) | ||||
wouldcommit += bp->b_bcount; | wouldcommit += bp->b_bcount; | ||||
} | } | ||||
} | } | ||||
BO_UNLOCK(&vp->v_bufobj); | BO_UNLOCK(vp2bo(vp)); | ||||
} | } | ||||
} | } | ||||
do { | do { | ||||
if (!(ioflag & IO_SYNC)) { | if (!(ioflag & IO_SYNC)) { | ||||
wouldcommit += biosize; | wouldcommit += biosize; | ||||
if (wouldcommit > nmp->nm_wcommitsize) { | if (wouldcommit > nmp->nm_wcommitsize) { | ||||
np->n_attrstamp = 0; | np->n_attrstamp = 0; | ||||
▲ Show 20 Lines • Show All 302 Lines • ▼ Show 20 Lines | while (bp == NULL) { | ||||
return (NULL); | return (NULL); | ||||
bp = getblk(vp, bn, size, 0, 2 * hz, 0); | bp = getblk(vp, bn, size, 0, 2 * hz, 0); | ||||
} | } | ||||
} else { | } else { | ||||
bp = getblk(vp, bn, size, 0, 0, 0); | bp = getblk(vp, bn, size, 0, 0, 0); | ||||
} | } | ||||
if (vp->v_type == VREG) | if (vp->v_type == VREG) | ||||
bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE); | bp->b_blkno = bn * (vp2bo(vp)->bo_bsize / DEV_BSIZE); | ||||
return (bp); | return (bp); | ||||
} | } | ||||
/* | /* | ||||
* Flush and invalidate all dirty buffers. If another process is already | * Flush and invalidate all dirty buffers. If another process is already | ||||
* doing the flush, just wait for completion. | * doing the flush, just wait for completion. | ||||
*/ | */ | ||||
int | int | ||||
Show All 21 Lines | ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) | ||||
old_lock = ncl_excl_start(vp); | old_lock = ncl_excl_start(vp); | ||||
if (old_lock) | if (old_lock) | ||||
flags |= V_ALLOWCLEAN; | flags |= V_ALLOWCLEAN; | ||||
/* | /* | ||||
* Now, flush as required. | * Now, flush as required. | ||||
*/ | */ | ||||
if ((flags & (V_SAVE | V_VMIO)) == V_SAVE && | if ((flags & (V_SAVE | V_VMIO)) == V_SAVE && | ||||
vp->v_bufobj.bo_object != NULL) { | vp->v_object != NULL) { | ||||
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); | VM_OBJECT_WLOCK(vp->v_object); | ||||
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); | vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); | ||||
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); | VM_OBJECT_WUNLOCK(vp->v_object); | ||||
/* | /* | ||||
* If the page clean was interrupted, fail the invalidation. | * If the page clean was interrupted, fail the invalidation. | ||||
* Not doing so, we run the risk of losing dirty pages in the | * Not doing so, we run the risk of losing dirty pages in the | ||||
* vinvalbuf() call below. | * vinvalbuf() call below. | ||||
*/ | */ | ||||
if (intrflg && (error = newnfs_sigintr(nmp, td))) | if (intrflg && (error = newnfs_sigintr(nmp, td))) | ||||
goto out; | goto out; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 472 Lines • ▼ Show 20 Lines | |||||
* that straddle the truncation point. | * that straddle the truncation point. | ||||
*/ | */ | ||||
int | int | ||||
ncl_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize) | ncl_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize) | ||||
{ | { | ||||
struct nfsnode *np = VTONFS(vp); | struct nfsnode *np = VTONFS(vp); | ||||
u_quad_t tsize; | u_quad_t tsize; | ||||
int biosize = vp->v_bufobj.bo_bsize; | int biosize = vp2bo(vp)->bo_bsize; | ||||
int error = 0; | int error = 0; | ||||
NFSLOCKNODE(np); | NFSLOCKNODE(np); | ||||
tsize = np->n_size; | tsize = np->n_size; | ||||
np->n_size = nsize; | np->n_size = nsize; | ||||
NFSUNLOCKNODE(np); | NFSUNLOCKNODE(np); | ||||
if (nsize < tsize) { | if (nsize < tsize) { | ||||
Show All 26 Lines |