Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/vfs_default.c
Show First 20 Lines • Show All 86 Lines • ▼ Show 20 Lines | |||||
static int vop_stdis_text(struct vop_is_text_args *ap); | static int vop_stdis_text(struct vop_is_text_args *ap); | ||||
static int vop_stdunset_text(struct vop_unset_text_args *ap); | static int vop_stdunset_text(struct vop_unset_text_args *ap); | ||||
static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); | static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); | ||||
static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); | static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap); | ||||
static int vop_stdfdatasync(struct vop_fdatasync_args *ap); | static int vop_stdfdatasync(struct vop_fdatasync_args *ap); | ||||
static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); | static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); | ||||
static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap); | static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap); | ||||
static int vop_stdstat(struct vop_stat_args *ap); | static int vop_stdstat(struct vop_stat_args *ap); | ||||
static int vop_stddeallocate(struct vop_deallocate_args *ap); | |||||
/* | /* | ||||
* This vnode table stores what we want to do if the filesystem doesn't | * This vnode table stores what we want to do if the filesystem doesn't | ||||
* implement a particular VOP. | * implement a particular VOP. | ||||
* | * | ||||
* If there is no specific entry here, we will return EOPNOTSUPP. | * If there is no specific entry here, we will return EOPNOTSUPP. | ||||
* | * | ||||
* Note that every filesystem has to implement either vop_access | * Note that every filesystem has to implement either vop_access | ||||
* or vop_accessx; failing to do so will result in immediate crash | * or vop_accessx; failing to do so will result in immediate crash | ||||
* due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), | * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), | ||||
* which calls vop_stdaccess() etc. | * which calls vop_stdaccess() etc. | ||||
*/ | */ | ||||
struct vop_vector default_vnodeops = { | struct vop_vector default_vnodeops = { | ||||
.vop_default = NULL, | .vop_default = NULL, | ||||
.vop_bypass = VOP_EOPNOTSUPP, | .vop_bypass = VOP_EOPNOTSUPP, | ||||
.vop_access = vop_stdaccess, | .vop_access = vop_stdaccess, | ||||
.vop_accessx = vop_stdaccessx, | .vop_accessx = vop_stdaccessx, | ||||
.vop_advise = vop_stdadvise, | .vop_advise = vop_stdadvise, | ||||
.vop_advlock = vop_stdadvlock, | .vop_advlock = vop_stdadvlock, | ||||
.vop_advlockasync = vop_stdadvlockasync, | .vop_advlockasync = vop_stdadvlockasync, | ||||
.vop_advlockpurge = vop_stdadvlockpurge, | .vop_advlockpurge = vop_stdadvlockpurge, | ||||
.vop_allocate = vop_stdallocate, | .vop_allocate = vop_stdallocate, | ||||
.vop_deallocate = vop_stddeallocate, | |||||
.vop_bmap = vop_stdbmap, | .vop_bmap = vop_stdbmap, | ||||
.vop_close = VOP_NULL, | .vop_close = VOP_NULL, | ||||
.vop_fsync = VOP_NULL, | .vop_fsync = VOP_NULL, | ||||
.vop_stat = vop_stdstat, | .vop_stat = vop_stdstat, | ||||
.vop_fdatasync = vop_stdfdatasync, | .vop_fdatasync = vop_stdfdatasync, | ||||
.vop_getpages = vop_stdgetpages, | .vop_getpages = vop_stdgetpages, | ||||
.vop_getpages_async = vop_stdgetpages_async, | .vop_getpages_async = vop_stdgetpages_async, | ||||
.vop_getwritemount = vop_stdgetwritemount, | .vop_getwritemount = vop_stdgetwritemount, | ||||
▲ Show 20 Lines • Show All 369 Lines • ▼ Show 20 Lines | case _PC_ASYNC_IO: | ||||
*ap->a_retval = _POSIX_ASYNCHRONOUS_IO; | *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; | ||||
return (0); | return (0); | ||||
case _PC_PATH_MAX: | case _PC_PATH_MAX: | ||||
*ap->a_retval = PATH_MAX; | *ap->a_retval = PATH_MAX; | ||||
return (0); | return (0); | ||||
case _PC_ACL_EXTENDED: | case _PC_ACL_EXTENDED: | ||||
case _PC_ACL_NFS4: | case _PC_ACL_NFS4: | ||||
case _PC_CAP_PRESENT: | case _PC_CAP_PRESENT: | ||||
case _PC_FDEALLOC_PRESENT: | |||||
case _PC_INF_PRESENT: | case _PC_INF_PRESENT: | ||||
case _PC_MAC_PRESENT: | case _PC_MAC_PRESENT: | ||||
*ap->a_retval = 0; | *ap->a_retval = 0; | ||||
return (0); | return (0); | ||||
default: | default: | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
/* NOTREACHED */ | /* NOTREACHED */ | ||||
▲ Show 20 Lines • Show All 555 Lines • ▼ Show 20 Lines | for (;;) { | ||||
if (should_yield()) | if (should_yield()) | ||||
break; | break; | ||||
} | } | ||||
out: | out: | ||||
*ap->a_len = len; | *ap->a_len = len; | ||||
*ap->a_offset = offset; | *ap->a_offset = offset; | ||||
free(buf, M_TEMP); | free(buf, M_TEMP); | ||||
return (error); | |||||
} | |||||
static int | |||||
vp_zerofill(struct vnode *vp, struct vattr *vap, off_t offset, off_t len, | |||||
off_t *residp, struct ucred *cred) | |||||
{ | |||||
int iosize; | |||||
int error = 0; | |||||
void *buf = NULL; | |||||
struct iovec aiov; | |||||
struct uio auio; | |||||
struct thread *td; | |||||
iosize = vap->va_blocksize; | |||||
td = curthread; | |||||
if (iosize == 0) | |||||
iosize = BLKDEV_IOSIZE; | |||||
if (iosize > MAXPHYS) | |||||
iosize = MAXPHYS; | |||||
kib: I think you should try to set iosize to multiple of blocksize that fits into zero_region. | |||||
buf = malloc(iosize, M_TEMP, M_ZERO | M_WAITOK); | |||||
while (len > 0) { | |||||
int xfersize = iosize; | |||||
if (offset % iosize != 0) | |||||
xfersize -= offset % iosize; | |||||
if (xfersize > len) | |||||
xfersize = len; | |||||
if (error != EOPNOTSUPP) | |||||
break; | |||||
aiov.iov_base = buf; | |||||
aiov.iov_len = xfersize; | |||||
auio.uio_iov = &aiov; | |||||
auio.uio_iovcnt = 1; | |||||
auio.uio_offset = offset; | |||||
auio.uio_resid = xfersize; | |||||
auio.uio_segflg = UIO_SYSSPACE; | |||||
auio.uio_rw = UIO_WRITE; | |||||
auio.uio_td = td; | |||||
error = VOP_WRITE(vp, &auio, 0, cred); | |||||
if (error != 0) { | |||||
len -= xfersize - auio.uio_resid; | |||||
break; | |||||
} | |||||
Done Inline ActionsYielding with the vnode locked might be not the best proposition. kib: Yielding with the vnode locked might be not the best proposition. | |||||
len -= xfersize; | |||||
offset += xfersize; | |||||
} | |||||
free(buf, M_TEMP); | |||||
*residp = len; | |||||
return (error); | |||||
} | |||||
static int | |||||
vop_stddeallocate(struct vop_deallocate_args *ap) | |||||
{ | |||||
struct vnode *vp; | |||||
off_t offset, resid; | |||||
struct ucred *cred; | |||||
int error; | |||||
struct vattr va; | |||||
vp = ap->a_vp; | |||||
offset = *ap->a_offset; | |||||
resid = *ap->a_len; | |||||
cred = ap->a_cred; | |||||
if ((offset < 0 || resid < 0) || | |||||
(ap->a_flags & ~SPACECTL_F_SUPPORTED)) | |||||
return (EINVAL); | |||||
if (ap->a_flags & SPACECTL_F_ATOMIC) | |||||
return (EOPNOTSUPP); | |||||
error = VOP_GETATTR(vp, &va, cred); | |||||
if (error) | |||||
return (error); | |||||
if ((uint64_t)offset + resid > va.va_size) | |||||
resid = va.va_size - offset; | |||||
while (resid > 0) { | |||||
off_t noff; | |||||
off_t xfersize; | |||||
off_t rem; | |||||
noff = offset; | |||||
error = vn_bmap_seekhole_locked(vp, FIOSEEKDATA, &noff, cred); | |||||
if (error) { | |||||
if (error == ENXIO) { | |||||
/* No more data region to be filled */ | |||||
error = vn_truncate_locked( | |||||
vp, offset + resid, false, cred); | |||||
if (error) | |||||
goto out; | |||||
offset += resid; | |||||
resid = 0; | |||||
break; | |||||
} | |||||
/* XXX: Is it okay to fallback further? */ | |||||
goto out; | |||||
} | |||||
KASSERT(noff >= offset, ("FIOSEEKDATA going backward")); | |||||
if (noff != offset) { | |||||
xfersize = omin(noff - offset, resid); | |||||
resid -= xfersize; | |||||
offset += xfersize; | |||||
continue; | |||||
} | |||||
error = vn_bmap_seekhole_locked(vp, FIOSEEKHOLE, &noff, cred); | |||||
if (error) | |||||
goto out; | |||||
xfersize = noff - offset; | |||||
if (xfersize > resid) | |||||
xfersize = resid; | |||||
/* Fill zeroes */ | |||||
error = vp_zerofill(vp, &va, offset, xfersize, &rem, cred); | |||||
if (error) { | |||||
resid -= xfersize - rem; | |||||
offset += xfersize - rem; | |||||
goto out; | |||||
} | |||||
resid -= xfersize; | |||||
offset += xfersize; | |||||
} | |||||
out: | |||||
if (*ap->a_offset != offset) { | |||||
*ap->a_offset = offset; | |||||
*ap->a_len = resid; | |||||
} | |||||
return (error); | return (error); | ||||
} | } | ||||
int | int | ||||
vop_stdadvise(struct vop_advise_args *ap) | vop_stdadvise(struct vop_advise_args *ap) | ||||
{ | { | ||||
struct vnode *vp; | struct vnode *vp; | ||||
struct bufobj *bo; | struct bufobj *bo; | ||||
▲ Show 20 Lines • Show All 512 Lines • Show Last 20 Lines |
I think you should try to set iosize to multiple of blocksize that fits into zero_region.