Changeset View
Changeset View
Standalone View
Standalone View
sys/contrib/openzfs/module/os/linux/zfs/zfs_uio.c
- This file was moved from sys/contrib/openzfs/module/zcommon/zfs_uio.c.
Show First 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Move "n" bytes at byte address "p"; "rw" indicates the direction | * Move "n" bytes at byte address "p"; "rw" indicates the direction | ||||
* of the move, and the I/O parameters are provided in "uio", which is | * of the move, and the I/O parameters are provided in "uio", which is | ||||
* update to reflect the data which was moved. Returns 0 on success or | * update to reflect the data which was moved. Returns 0 on success or | ||||
* a non-zero errno on failure. | * a non-zero errno on failure. | ||||
*/ | */ | ||||
static int | static int | ||||
uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio) | zfs_uiomove_iov(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) | ||||
{ | { | ||||
const struct iovec *iov = uio->uio_iov; | const struct iovec *iov = uio->uio_iov; | ||||
size_t skip = uio->uio_skip; | size_t skip = uio->uio_skip; | ||||
ulong_t cnt; | ulong_t cnt; | ||||
while (n && uio->uio_resid) { | while (n && uio->uio_resid) { | ||||
cnt = MIN(iov->iov_len - skip, n); | cnt = MIN(iov->iov_len - skip, n); | ||||
switch (uio->uio_segflg) { | switch (uio->uio_segflg) { | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | while (n && uio->uio_resid) { | ||||
uio->uio_loffset += cnt; | uio->uio_loffset += cnt; | ||||
p = (caddr_t)p + cnt; | p = (caddr_t)p + cnt; | ||||
n -= cnt; | n -= cnt; | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio) | zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) | ||||
{ | { | ||||
const struct bio_vec *bv = uio->uio_bvec; | const struct bio_vec *bv = uio->uio_bvec; | ||||
size_t skip = uio->uio_skip; | size_t skip = uio->uio_skip; | ||||
ulong_t cnt; | ulong_t cnt; | ||||
while (n && uio->uio_resid) { | while (n && uio->uio_resid) { | ||||
void *paddr; | void *paddr; | ||||
cnt = MIN(bv->bv_len - skip, n); | cnt = MIN(bv->bv_len - skip, n); | ||||
Show All 17 Lines | while (n && uio->uio_resid) { | ||||
p = (caddr_t)p + cnt; | p = (caddr_t)p + cnt; | ||||
n -= cnt; | n -= cnt; | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
#if defined(HAVE_VFS_IOV_ITER) | #if defined(HAVE_VFS_IOV_ITER) | ||||
static int | static int | ||||
uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio, | zfs_uiomove_iter(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, | ||||
boolean_t revert) | boolean_t revert) | ||||
{ | { | ||||
size_t cnt = MIN(n, uio->uio_resid); | size_t cnt = MIN(n, uio->uio_resid); | ||||
if (uio->uio_skip) | if (uio->uio_skip) | ||||
iov_iter_advance(uio->uio_iter, uio->uio_skip); | iov_iter_advance(uio->uio_iter, uio->uio_skip); | ||||
if (rw == UIO_READ) | if (rw == UIO_READ) | ||||
cnt = copy_to_iter(p, cnt, uio->uio_iter); | cnt = copy_to_iter(p, cnt, uio->uio_iter); | ||||
else | else | ||||
cnt = copy_from_iter(p, cnt, uio->uio_iter); | cnt = copy_from_iter(p, cnt, uio->uio_iter); | ||||
/* | /* | ||||
* When operating on a full pipe no bytes are processed. | * When operating on a full pipe no bytes are processed. | ||||
* In which case return EFAULT which is converted to EAGAIN | * In which case return EFAULT which is converted to EAGAIN | ||||
* by the kernel's generic_file_splice_read() function. | * by the kernel's generic_file_splice_read() function. | ||||
*/ | */ | ||||
if (cnt == 0) | if (cnt == 0) | ||||
return (EFAULT); | return (EFAULT); | ||||
/* | /* | ||||
* Revert advancing the uio_iter. This is set by uiocopy() | * Revert advancing the uio_iter. This is set by zfs_uiocopy() | ||||
* to avoid consuming the uio and its iov_iter structure. | * to avoid consuming the uio and its iov_iter structure. | ||||
*/ | */ | ||||
if (revert) | if (revert) | ||||
iov_iter_revert(uio->uio_iter, cnt); | iov_iter_revert(uio->uio_iter, cnt); | ||||
uio->uio_resid -= cnt; | uio->uio_resid -= cnt; | ||||
uio->uio_loffset += cnt; | uio->uio_loffset += cnt; | ||||
return (0); | return (0); | ||||
} | } | ||||
#endif | #endif | ||||
int | int | ||||
uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio) | zfs_uiomove(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio) | ||||
{ | { | ||||
if (uio->uio_segflg == UIO_BVEC) | if (uio->uio_segflg == UIO_BVEC) | ||||
return (uiomove_bvec(p, n, rw, uio)); | return (zfs_uiomove_bvec(p, n, rw, uio)); | ||||
#if defined(HAVE_VFS_IOV_ITER) | #if defined(HAVE_VFS_IOV_ITER) | ||||
else if (uio->uio_segflg == UIO_ITER) | else if (uio->uio_segflg == UIO_ITER) | ||||
return (uiomove_iter(p, n, rw, uio, B_FALSE)); | return (zfs_uiomove_iter(p, n, rw, uio, B_FALSE)); | ||||
#endif | #endif | ||||
else | else | ||||
return (uiomove_iov(p, n, rw, uio)); | return (zfs_uiomove_iov(p, n, rw, uio)); | ||||
} | } | ||||
EXPORT_SYMBOL(uiomove); | EXPORT_SYMBOL(zfs_uiomove); | ||||
/* | |||||
* Fault in the pages of the first n bytes specified by the uio structure. | |||||
* 1 byte in each page is touched and the uio struct is unmodified. Any | |||||
* error will terminate the process as this is only a best attempt to get | |||||
* the pages resident. | |||||
*/ | |||||
int | int | ||||
uio_prefaultpages(ssize_t n, struct uio *uio) | zfs_uio_prefaultpages(ssize_t n, zfs_uio_t *uio) | ||||
{ | { | ||||
struct iov_iter iter, *iterp = NULL; | if (uio->uio_segflg == UIO_SYSSPACE || uio->uio_segflg == UIO_BVEC) { | ||||
/* There's never a need to fault in kernel pages */ | |||||
#if defined(HAVE_IOV_ITER_FAULT_IN_READABLE) | return (0); | ||||
if (uio->uio_segflg == UIO_USERSPACE) { | |||||
iterp = &iter; | |||||
iov_iter_init_compat(iterp, READ, uio->uio_iov, | |||||
uio->uio_iovcnt, uio->uio_resid); | |||||
#if defined(HAVE_VFS_IOV_ITER) | #if defined(HAVE_VFS_IOV_ITER) | ||||
} else if (uio->uio_segflg == UIO_ITER) { | } else if (uio->uio_segflg == UIO_ITER) { | ||||
iterp = uio->uio_iter; | /* | ||||
* At least a Linux 4.9 kernel, iov_iter_fault_in_readable() | |||||
* can be relied on to fault in user pages when referenced. | |||||
*/ | |||||
if (iov_iter_fault_in_readable(uio->uio_iter, n)) | |||||
return (EFAULT); | |||||
#endif | #endif | ||||
} else { | |||||
/* Fault in all user pages */ | |||||
ASSERT3S(uio->uio_segflg, ==, UIO_USERSPACE); | |||||
const struct iovec *iov = uio->uio_iov; | |||||
int iovcnt = uio->uio_iovcnt; | |||||
size_t skip = uio->uio_skip; | |||||
uint8_t tmp; | |||||
caddr_t p; | |||||
for (; n > 0 && iovcnt > 0; iov++, iovcnt--, skip = 0) { | |||||
ulong_t cnt = MIN(iov->iov_len - skip, n); | |||||
/* empty iov */ | |||||
if (cnt == 0) | |||||
continue; | |||||
n -= cnt; | |||||
/* touch each page in this segment. */ | |||||
p = iov->iov_base + skip; | |||||
while (cnt) { | |||||
if (get_user(tmp, (uint8_t *)p)) | |||||
return (EFAULT); | |||||
ulong_t incr = MIN(cnt, PAGESIZE); | |||||
p += incr; | |||||
cnt -= incr; | |||||
} | } | ||||
/* touch the last byte in case it straddles a page. */ | |||||
p--; | |||||
if (get_user(tmp, (uint8_t *)p)) | |||||
return (EFAULT); | |||||
} | |||||
} | |||||
if (iterp && iov_iter_fault_in_readable(iterp, n)) | if (iterp && iov_iter_fault_in_readable(iterp, n)) | ||||
return (EFAULT); | return (EFAULT); | ||||
#endif | #endif | ||||
return (0); | return (0); | ||||
} | } | ||||
EXPORT_SYMBOL(uio_prefaultpages); | EXPORT_SYMBOL(zfs_uio_prefaultpages); | ||||
/* | /* | ||||
* The same as uiomove() but doesn't modify uio structure. | * The same as zfs_uiomove() but doesn't modify uio structure. | ||||
* return in cbytes how many bytes were copied. | * return in cbytes how many bytes were copied. | ||||
*/ | */ | ||||
int | int | ||||
uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) | zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes) | ||||
{ | { | ||||
struct uio uio_copy; | zfs_uio_t uio_copy; | ||||
int ret; | int ret; | ||||
bcopy(uio, &uio_copy, sizeof (struct uio)); | bcopy(uio, &uio_copy, sizeof (zfs_uio_t)); | ||||
if (uio->uio_segflg == UIO_BVEC) | if (uio->uio_segflg == UIO_BVEC) | ||||
ret = uiomove_bvec(p, n, rw, &uio_copy); | ret = zfs_uiomove_bvec(p, n, rw, &uio_copy); | ||||
#if defined(HAVE_VFS_IOV_ITER) | #if defined(HAVE_VFS_IOV_ITER) | ||||
else if (uio->uio_segflg == UIO_ITER) | else if (uio->uio_segflg == UIO_ITER) | ||||
ret = uiomove_iter(p, n, rw, &uio_copy, B_TRUE); | ret = zfs_uiomove_iter(p, n, rw, &uio_copy, B_TRUE); | ||||
#endif | #endif | ||||
else | else | ||||
ret = uiomove_iov(p, n, rw, &uio_copy); | ret = zfs_uiomove_iov(p, n, rw, &uio_copy); | ||||
*cbytes = uio->uio_resid - uio_copy.uio_resid; | *cbytes = uio->uio_resid - uio_copy.uio_resid; | ||||
return (ret); | return (ret); | ||||
} | } | ||||
EXPORT_SYMBOL(uiocopy); | EXPORT_SYMBOL(zfs_uiocopy); | ||||
/* | /* | ||||
* Drop the next n chars out of *uio. | * Drop the next n chars out of *uio. | ||||
*/ | */ | ||||
void | void | ||||
uioskip(uio_t *uio, size_t n) | zfs_uioskip(zfs_uio_t *uio, size_t n) | ||||
{ | { | ||||
if (n > uio->uio_resid) | if (n > uio->uio_resid) | ||||
return; | return; | ||||
if (uio->uio_segflg == UIO_BVEC) { | if (uio->uio_segflg == UIO_BVEC) { | ||||
uio->uio_skip += n; | uio->uio_skip += n; | ||||
while (uio->uio_iovcnt && | while (uio->uio_iovcnt && | ||||
uio->uio_skip >= uio->uio_bvec->bv_len) { | uio->uio_skip >= uio->uio_bvec->bv_len) { | ||||
Show All 12 Lines | while (uio->uio_iovcnt && | ||||
uio->uio_skip -= uio->uio_iov->iov_len; | uio->uio_skip -= uio->uio_iov->iov_len; | ||||
uio->uio_iov++; | uio->uio_iov++; | ||||
uio->uio_iovcnt--; | uio->uio_iovcnt--; | ||||
} | } | ||||
} | } | ||||
uio->uio_loffset += n; | uio->uio_loffset += n; | ||||
uio->uio_resid -= n; | uio->uio_resid -= n; | ||||
} | } | ||||
EXPORT_SYMBOL(uioskip); | EXPORT_SYMBOL(zfs_uioskip); | ||||
#endif /* _KERNEL */ | #endif /* _KERNEL */ |