Changeset View
Changeset View
Standalone View
Standalone View
sys/contrib/openzfs/module/zfs/zfs_vnops.c
Show First 20 Lines • Show All 181 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* RETURN: 0 on success, error code on failure. | * RETURN: 0 on success, error code on failure. | ||||
* | * | ||||
* Side Effects: | * Side Effects: | ||||
* inode - atime updated if byte count > 0 | * inode - atime updated if byte count > 0 | ||||
*/ | */ | ||||
/* ARGSUSED */ | /* ARGSUSED */ | ||||
int | int | ||||
zfs_read(struct znode *zp, uio_t *uio, int ioflag, cred_t *cr) | zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) | ||||
{ | { | ||||
int error = 0; | int error = 0; | ||||
boolean_t frsync = B_FALSE; | boolean_t frsync = B_FALSE; | ||||
zfsvfs_t *zfsvfs = ZTOZSB(zp); | zfsvfs_t *zfsvfs = ZTOZSB(zp); | ||||
ZFS_ENTER(zfsvfs); | ZFS_ENTER(zfsvfs); | ||||
ZFS_VERIFY_ZP(zp); | ZFS_VERIFY_ZP(zp); | ||||
if (zp->z_pflags & ZFS_AV_QUARANTINED) { | if (zp->z_pflags & ZFS_AV_QUARANTINED) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EACCES)); | return (SET_ERROR(EACCES)); | ||||
} | } | ||||
/* We don't copy out anything useful for directories. */ | /* We don't copy out anything useful for directories. */ | ||||
if (Z_ISDIR(ZTOTYPE(zp))) { | if (Z_ISDIR(ZTOTYPE(zp))) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EISDIR)); | return (SET_ERROR(EISDIR)); | ||||
} | } | ||||
/* | /* | ||||
* Validate file offset | * Validate file offset | ||||
*/ | */ | ||||
if (uio->uio_loffset < (offset_t)0) { | if (zfs_uio_offset(uio) < (offset_t)0) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EINVAL)); | return (SET_ERROR(EINVAL)); | ||||
} | } | ||||
/* | /* | ||||
* Fasttrack empty reads | * Fasttrack empty reads | ||||
*/ | */ | ||||
if (uio->uio_resid == 0) { | if (zfs_uio_resid(uio) == 0) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (0); | return (0); | ||||
} | } | ||||
#ifdef FRSYNC | #ifdef FRSYNC | ||||
/* | /* | ||||
* If we're in FRSYNC mode, sync out this znode before reading it. | * If we're in FRSYNC mode, sync out this znode before reading it. | ||||
* Only do this for non-snapshots. | * Only do this for non-snapshots. | ||||
* | * | ||||
* Some platforms do not support FRSYNC and instead map it | * Some platforms do not support FRSYNC and instead map it | ||||
* to O_SYNC, which results in unnecessary calls to zil_commit. We | * to O_SYNC, which results in unnecessary calls to zil_commit. We | ||||
* only honor FRSYNC requests on platforms which support it. | * only honor FRSYNC requests on platforms which support it. | ||||
*/ | */ | ||||
frsync = !!(ioflag & FRSYNC); | frsync = !!(ioflag & FRSYNC); | ||||
#endif | #endif | ||||
if (zfsvfs->z_log && | if (zfsvfs->z_log && | ||||
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)) | (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)) | ||||
zil_commit(zfsvfs->z_log, zp->z_id); | zil_commit(zfsvfs->z_log, zp->z_id); | ||||
/* | /* | ||||
* Lock the range against changes. | * Lock the range against changes. | ||||
*/ | */ | ||||
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, | zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, | ||||
uio->uio_loffset, uio->uio_resid, RL_READER); | zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER); | ||||
/* | /* | ||||
* If we are reading past end-of-file we can skip | * If we are reading past end-of-file we can skip | ||||
* to the end; but we might still need to set atime. | * to the end; but we might still need to set atime. | ||||
*/ | */ | ||||
if (uio->uio_loffset >= zp->z_size) { | if (zfs_uio_offset(uio) >= zp->z_size) { | ||||
error = 0; | error = 0; | ||||
goto out; | goto out; | ||||
} | } | ||||
ASSERT(uio->uio_loffset < zp->z_size); | ASSERT(zfs_uio_offset(uio) < zp->z_size); | ||||
ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); | ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio)); | ||||
ssize_t start_resid = n; | ssize_t start_resid = n; | ||||
while (n > 0) { | while (n > 0) { | ||||
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size - | ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size - | ||||
P2PHASE(uio->uio_loffset, zfs_vnops_read_chunk_size)); | P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size)); | ||||
#ifdef UIO_NOCOPY | #ifdef UIO_NOCOPY | ||||
if (uio->uio_segflg == UIO_NOCOPY) | if (zfs_uio_segflg(uio) == UIO_NOCOPY) | ||||
error = mappedread_sf(zp, nbytes, uio); | error = mappedread_sf(zp, nbytes, uio); | ||||
else | else | ||||
#endif | #endif | ||||
if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) { | if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) { | ||||
error = mappedread(zp, nbytes, uio); | error = mappedread(zp, nbytes, uio); | ||||
} else { | } else { | ||||
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), | error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), | ||||
uio, nbytes); | uio, nbytes); | ||||
Show All 36 Lines | |||||
* error code if failure | * error code if failure | ||||
* | * | ||||
* Timestamps: | * Timestamps: | ||||
* ip - ctime|mtime updated if byte count > 0 | * ip - ctime|mtime updated if byte count > 0 | ||||
*/ | */ | ||||
/* ARGSUSED */ | /* ARGSUSED */ | ||||
int | int | ||||
zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) | zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) | ||||
{ | { | ||||
int error = 0; | int error = 0; | ||||
ssize_t start_resid = uio->uio_resid; | ssize_t start_resid = zfs_uio_resid(uio); | ||||
/* | /* | ||||
* Fasttrack empty write | * Fasttrack empty write | ||||
*/ | */ | ||||
ssize_t n = start_resid; | ssize_t n = start_resid; | ||||
if (n == 0) | if (n == 0) | ||||
return (0); | return (0); | ||||
Show All 20 Lines | if (zfs_is_readonly(zfsvfs)) { | ||||
return (SET_ERROR(EROFS)); | return (SET_ERROR(EROFS)); | ||||
} | } | ||||
/* | /* | ||||
* If immutable or not appending then return EPERM | * If immutable or not appending then return EPERM | ||||
*/ | */ | ||||
if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || | if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || | ||||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) && | ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) && | ||||
(uio->uio_loffset < zp->z_size))) { | (zfs_uio_offset(uio) < zp->z_size))) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EPERM)); | return (SET_ERROR(EPERM)); | ||||
} | } | ||||
/* | /* | ||||
* Validate file offset | * Validate file offset | ||||
*/ | */ | ||||
offset_t woff = ioflag & O_APPEND ? zp->z_size : uio->uio_loffset; | offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio); | ||||
if (woff < 0) { | if (woff < 0) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EINVAL)); | return (SET_ERROR(EINVAL)); | ||||
} | } | ||||
const uint64_t max_blksz = zfsvfs->z_max_blksz; | const uint64_t max_blksz = zfsvfs->z_max_blksz; | ||||
/* | /* | ||||
* Pre-fault the pages to ensure slow (eg NFS) pages | * Pre-fault the pages to ensure slow (eg NFS) pages | ||||
* don't hold up txg. | * don't hold up txg. | ||||
* Skip this if uio contains loaned arc_buf. | * Skip this if uio contains loaned arc_buf. | ||||
*/ | */ | ||||
if (uio_prefaultpages(MIN(n, max_blksz), uio)) { | if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EFAULT)); | return (SET_ERROR(EFAULT)); | ||||
} | } | ||||
/* | /* | ||||
* If in append mode, set the io offset pointer to eof. | * If in append mode, set the io offset pointer to eof. | ||||
*/ | */ | ||||
zfs_locked_range_t *lr; | zfs_locked_range_t *lr; | ||||
if (ioflag & O_APPEND) { | if (ioflag & O_APPEND) { | ||||
/* | /* | ||||
* Obtain an appending range lock to guarantee file append | * Obtain an appending range lock to guarantee file append | ||||
* semantics. We reset the write offset once we have the lock. | * semantics. We reset the write offset once we have the lock. | ||||
*/ | */ | ||||
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND); | lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND); | ||||
woff = lr->lr_offset; | woff = lr->lr_offset; | ||||
if (lr->lr_length == UINT64_MAX) { | if (lr->lr_length == UINT64_MAX) { | ||||
/* | /* | ||||
* We overlocked the file because this write will cause | * We overlocked the file because this write will cause | ||||
* the file block size to increase. | * the file block size to increase. | ||||
* Note that zp_size cannot change with this lock held. | * Note that zp_size cannot change with this lock held. | ||||
*/ | */ | ||||
woff = zp->z_size; | woff = zp->z_size; | ||||
} | } | ||||
uio->uio_loffset = woff; | zfs_uio_setoffset(uio, woff); | ||||
} else { | } else { | ||||
/* | /* | ||||
* Note that if the file block size will change as a result of | * Note that if the file block size will change as a result of | ||||
* this write, then this range lock will lock the entire file | * this write, then this range lock will lock the entire file | ||||
* so that we can re-write the block safely. | * so that we can re-write the block safely. | ||||
*/ | */ | ||||
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER); | lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER); | ||||
} | } | ||||
if (zn_rlimit_fsize(zp, uio, uio->uio_td)) { | if (zn_rlimit_fsize(zp, uio)) { | ||||
zfs_rangelock_exit(lr); | zfs_rangelock_exit(lr); | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (SET_ERROR(EFBIG)); | return (SET_ERROR(EFBIG)); | ||||
} | } | ||||
const rlim64_t limit = MAXOFFSET_T; | const rlim64_t limit = MAXOFFSET_T; | ||||
if (woff >= limit) { | if (woff >= limit) { | ||||
Show All 13 Lines | zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr) | ||||
const uint64_t projid = zp->z_projid; | const uint64_t projid = zp->z_projid; | ||||
/* | /* | ||||
* Write the file in reasonable size chunks. Each chunk is written | * Write the file in reasonable size chunks. Each chunk is written | ||||
* in a separate transaction; this keeps the intent log records small | * in a separate transaction; this keeps the intent log records small | ||||
* and allows us to do more fine-grained space accounting. | * and allows us to do more fine-grained space accounting. | ||||
*/ | */ | ||||
while (n > 0) { | while (n > 0) { | ||||
woff = uio->uio_loffset; | woff = zfs_uio_offset(uio); | ||||
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) || | if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) || | ||||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) || | zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) || | ||||
(projid != ZFS_DEFAULT_PROJID && | (projid != ZFS_DEFAULT_PROJID && | ||||
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT, | zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT, | ||||
projid))) { | projid))) { | ||||
error = SET_ERROR(EDQUOT); | error = SET_ERROR(EDQUOT); | ||||
break; | break; | ||||
Show All 11 Lines | if (n >= max_blksz && woff >= zp->z_size && | ||||
* up on a pagefault (e.g., from an NFS server mapping). | * up on a pagefault (e.g., from an NFS server mapping). | ||||
*/ | */ | ||||
size_t cbytes; | size_t cbytes; | ||||
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), | abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), | ||||
max_blksz); | max_blksz); | ||||
ASSERT(abuf != NULL); | ASSERT(abuf != NULL); | ||||
ASSERT(arc_buf_size(abuf) == max_blksz); | ASSERT(arc_buf_size(abuf) == max_blksz); | ||||
if ((error = uiocopy(abuf->b_data, max_blksz, | if ((error = zfs_uiocopy(abuf->b_data, max_blksz, | ||||
UIO_WRITE, uio, &cbytes))) { | UIO_WRITE, uio, &cbytes))) { | ||||
dmu_return_arcbuf(abuf); | dmu_return_arcbuf(abuf); | ||||
break; | break; | ||||
} | } | ||||
ASSERT3S(cbytes, ==, max_blksz); | ASSERT3S(cbytes, ==, max_blksz); | ||||
} | } | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | while (n > 0) { | ||||
* XXX - should we really limit each write to z_max_blksz? | * XXX - should we really limit each write to z_max_blksz? | ||||
* Perhaps we should use SPA_MAXBLOCKSIZE chunks? | * Perhaps we should use SPA_MAXBLOCKSIZE chunks? | ||||
*/ | */ | ||||
const ssize_t nbytes = | const ssize_t nbytes = | ||||
MIN(n, max_blksz - P2PHASE(woff, max_blksz)); | MIN(n, max_blksz - P2PHASE(woff, max_blksz)); | ||||
ssize_t tx_bytes; | ssize_t tx_bytes; | ||||
if (abuf == NULL) { | if (abuf == NULL) { | ||||
tx_bytes = uio->uio_resid; | tx_bytes = zfs_uio_resid(uio); | ||||
uio_fault_disable(uio, B_TRUE); | zfs_uio_fault_disable(uio, B_TRUE); | ||||
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), | error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), | ||||
uio, nbytes, tx); | uio, nbytes, tx); | ||||
uio_fault_disable(uio, B_FALSE); | zfs_uio_fault_disable(uio, B_FALSE); | ||||
#ifdef __linux__ | #ifdef __linux__ | ||||
if (error == EFAULT) { | if (error == EFAULT) { | ||||
dmu_tx_commit(tx); | dmu_tx_commit(tx); | ||||
/* | /* | ||||
* Account for partial writes before | * Account for partial writes before | ||||
* continuing the loop. | * continuing the loop. | ||||
* Update needs to occur before the next | * Update needs to occur before the next | ||||
* uio_prefaultpages, or prefaultpages may | * zfs_uio_prefaultpages, or prefaultpages may | ||||
* error, and we may break the loop early. | * error, and we may break the loop early. | ||||
*/ | */ | ||||
if (tx_bytes != uio->uio_resid) | if (tx_bytes != zfs_uio_resid(uio)) | ||||
n -= tx_bytes - uio->uio_resid; | n -= tx_bytes - zfs_uio_resid(uio); | ||||
if (uio_prefaultpages(MIN(n, max_blksz), uio)) { | if (zfs_uio_prefaultpages(MIN(n, max_blksz), | ||||
uio)) { | |||||
break; | break; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
#endif | #endif | ||||
if (error != 0) { | if (error != 0) { | ||||
dmu_tx_commit(tx); | dmu_tx_commit(tx); | ||||
break; | break; | ||||
} | } | ||||
tx_bytes -= uio->uio_resid; | tx_bytes -= zfs_uio_resid(uio); | ||||
} else { | } else { | ||||
/* Implied by abuf != NULL: */ | /* Implied by abuf != NULL: */ | ||||
ASSERT3S(n, >=, max_blksz); | ASSERT3S(n, >=, max_blksz); | ||||
ASSERT0(P2PHASE(woff, max_blksz)); | ASSERT0(P2PHASE(woff, max_blksz)); | ||||
/* | /* | ||||
* We can simplify nbytes to MIN(n, max_blksz) since | * We can simplify nbytes to MIN(n, max_blksz) since | ||||
* P2PHASE(woff, max_blksz) is 0, and knowing | * P2PHASE(woff, max_blksz) is 0, and knowing | ||||
* n >= max_blksz lets us simplify further: | * n >= max_blksz lets us simplify further: | ||||
*/ | */ | ||||
ASSERT3S(nbytes, ==, max_blksz); | ASSERT3S(nbytes, ==, max_blksz); | ||||
/* | /* | ||||
* Thus, we're writing a full block at a block-aligned | * Thus, we're writing a full block at a block-aligned | ||||
* offset and extending the file past EOF. | * offset and extending the file past EOF. | ||||
* | * | ||||
* dmu_assign_arcbuf_by_dbuf() will directly assign the | * dmu_assign_arcbuf_by_dbuf() will directly assign the | ||||
* arc buffer to a dbuf. | * arc buffer to a dbuf. | ||||
*/ | */ | ||||
error = dmu_assign_arcbuf_by_dbuf( | error = dmu_assign_arcbuf_by_dbuf( | ||||
sa_get_db(zp->z_sa_hdl), woff, abuf, tx); | sa_get_db(zp->z_sa_hdl), woff, abuf, tx); | ||||
if (error != 0) { | if (error != 0) { | ||||
dmu_return_arcbuf(abuf); | dmu_return_arcbuf(abuf); | ||||
dmu_tx_commit(tx); | dmu_tx_commit(tx); | ||||
break; | break; | ||||
} | } | ||||
ASSERT3S(nbytes, <=, uio->uio_resid); | ASSERT3S(nbytes, <=, zfs_uio_resid(uio)); | ||||
uioskip(uio, nbytes); | zfs_uioskip(uio, nbytes); | ||||
tx_bytes = nbytes; | tx_bytes = nbytes; | ||||
} | } | ||||
if (tx_bytes && zn_has_cached_data(zp) && | if (tx_bytes && zn_has_cached_data(zp) && | ||||
!(ioflag & O_DIRECT)) { | !(ioflag & O_DIRECT)) { | ||||
update_pages(zp, woff, tx_bytes, zfsvfs->z_os); | update_pages(zp, woff, tx_bytes, zfsvfs->z_os); | ||||
} | } | ||||
/* | /* | ||||
Show All 34 Lines | #endif | ||||
mutex_exit(&zp->z_acl_lock); | mutex_exit(&zp->z_acl_lock); | ||||
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime); | zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime); | ||||
/* | /* | ||||
* Update the file size (zp_size) if it has changed; | * Update the file size (zp_size) if it has changed; | ||||
* account for possible concurrent updates. | * account for possible concurrent updates. | ||||
*/ | */ | ||||
while ((end_size = zp->z_size) < uio->uio_loffset) { | while ((end_size = zp->z_size) < zfs_uio_offset(uio)) { | ||||
(void) atomic_cas_64(&zp->z_size, end_size, | (void) atomic_cas_64(&zp->z_size, end_size, | ||||
uio->uio_loffset); | zfs_uio_offset(uio)); | ||||
ASSERT(error == 0); | ASSERT(error == 0); | ||||
} | } | ||||
/* | /* | ||||
* If we are replaying and eof is non zero then force | * If we are replaying and eof is non zero then force | ||||
* the file size to the specified eof. Note, there's no | * the file size to the specified eof. Note, there's no | ||||
* concurrency during replay. | * concurrency during replay. | ||||
*/ | */ | ||||
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0) | if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0) | ||||
zp->z_size = zfsvfs->z_replay_eof; | zp->z_size = zfsvfs->z_replay_eof; | ||||
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); | error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); | ||||
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag, | zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag, | ||||
NULL, NULL); | NULL, NULL); | ||||
dmu_tx_commit(tx); | dmu_tx_commit(tx); | ||||
if (error != 0) | if (error != 0) | ||||
break; | break; | ||||
ASSERT3S(tx_bytes, ==, nbytes); | ASSERT3S(tx_bytes, ==, nbytes); | ||||
n -= nbytes; | n -= nbytes; | ||||
if (n > 0) { | if (n > 0) { | ||||
if (uio_prefaultpages(MIN(n, max_blksz), uio)) { | if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) { | ||||
error = SET_ERROR(EFAULT); | error = SET_ERROR(EFAULT); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
zfs_inode_update(zp); | zfs_znode_update_vfs(zp); | ||||
zfs_rangelock_exit(lr); | zfs_rangelock_exit(lr); | ||||
/* | /* | ||||
* If we're in replay mode, or we made no progress, or the | * If we're in replay mode, or we made no progress, or the | ||||
* uio data is inaccessible return an error. Otherwise, it's | * uio data is inaccessible return an error. Otherwise, it's | ||||
* at least a partial write, so it's successful. | * at least a partial write, so it's successful. | ||||
*/ | */ | ||||
if (zfsvfs->z_replay || uio->uio_resid == start_resid || | if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid || | ||||
error == EFAULT) { | error == EFAULT) { | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (error); | return (error); | ||||
} | } | ||||
if (ioflag & (O_SYNC | O_DSYNC) || | if (ioflag & (O_SYNC | O_DSYNC) || | ||||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) | zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) | ||||
zil_commit(zilog, zp->z_id); | zil_commit(zilog, zp->z_id); | ||||
const int64_t nwritten = start_resid - uio->uio_resid; | const int64_t nwritten = start_resid - zfs_uio_resid(uio); | ||||
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten); | dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten); | ||||
task_io_account_write(nwritten); | task_io_account_write(nwritten); | ||||
ZFS_EXIT(zfsvfs); | ZFS_EXIT(zfsvfs); | ||||
return (0); | return (0); | ||||
} | } | ||||
/*ARGSUSED*/ | /*ARGSUSED*/ | ||||
▲ Show 20 Lines • Show All 203 Lines • Show Last 20 Lines |