diff --git a/sys/compat/linux/linux_file.c b/sys/compat/linux/linux_file.c index 86834a7ecea8..a4be5313aa96 100644 --- a/sys/compat/linux/linux_file.c +++ b/sys/compat/linux/linux_file.c @@ -1,2000 +1,2000 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1994-1995 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_LINUX32 #include #include #include #include #else #include #include #endif #include #include #include static int linux_common_open(struct thread *, int, const char *, int, int, enum uio_seg); static int linux_do_accessat(struct thread *t, int, const char *, int, int); static int linux_getdents_error(struct thread *, int, int); static struct bsd_to_linux_bitmap seal_bitmap[] = { BITMAP_1t1_LINUX(F_SEAL_SEAL), BITMAP_1t1_LINUX(F_SEAL_SHRINK), BITMAP_1t1_LINUX(F_SEAL_GROW), BITMAP_1t1_LINUX(F_SEAL_WRITE), }; #define MFD_HUGETLB_ENTRY(_size) \ { \ .bsd_value = MFD_HUGE_##_size, \ .linux_value = LINUX_HUGETLB_FLAG_ENCODE_##_size \ } static struct bsd_to_linux_bitmap mfd_bitmap[] = { BITMAP_1t1_LINUX(MFD_CLOEXEC), BITMAP_1t1_LINUX(MFD_ALLOW_SEALING), BITMAP_1t1_LINUX(MFD_HUGETLB), MFD_HUGETLB_ENTRY(64KB), MFD_HUGETLB_ENTRY(512KB), MFD_HUGETLB_ENTRY(1MB), MFD_HUGETLB_ENTRY(2MB), MFD_HUGETLB_ENTRY(8MB), MFD_HUGETLB_ENTRY(16MB), MFD_HUGETLB_ENTRY(32MB), MFD_HUGETLB_ENTRY(256MB), MFD_HUGETLB_ENTRY(512MB), MFD_HUGETLB_ENTRY(1GB), MFD_HUGETLB_ENTRY(2GB), MFD_HUGETLB_ENTRY(16GB), }; #undef MFD_HUGETLB_ENTRY #ifdef LINUX_LEGACY_SYSCALLS int linux_creat(struct thread *td, struct linux_creat_args *args) { return (kern_openat(td, AT_FDCWD, args->path, UIO_USERSPACE, O_WRONLY | O_CREAT | O_TRUNC, args->mode)); } #endif int linux_common_openflags(int l_flags) { int bsd_flags; bsd_flags = 0; switch (l_flags & LINUX_O_ACCMODE) { case LINUX_O_WRONLY: bsd_flags |= O_WRONLY; break; case LINUX_O_RDWR: bsd_flags |= O_RDWR; break; default: bsd_flags |= O_RDONLY; } if (l_flags & LINUX_O_NDELAY) bsd_flags |= O_NONBLOCK; if (l_flags & LINUX_O_APPEND) bsd_flags |= O_APPEND; if (l_flags & LINUX_O_SYNC) bsd_flags |= O_FSYNC; if (l_flags & LINUX_O_CLOEXEC) bsd_flags |= O_CLOEXEC; if (l_flags & LINUX_O_NONBLOCK) bsd_flags |= O_NONBLOCK; if (l_flags & LINUX_O_ASYNC) bsd_flags |= O_ASYNC; if (l_flags & LINUX_O_CREAT) bsd_flags |= O_CREAT; if (l_flags & LINUX_O_TRUNC) bsd_flags |= O_TRUNC; if (l_flags & LINUX_O_EXCL) bsd_flags |= O_EXCL; if (l_flags & LINUX_O_NOCTTY) bsd_flags |= O_NOCTTY; if (l_flags & LINUX_O_DIRECT) bsd_flags |= O_DIRECT; if (l_flags & LINUX_O_NOFOLLOW) bsd_flags |= O_NOFOLLOW; if (l_flags & LINUX_O_DIRECTORY) bsd_flags |= O_DIRECTORY; if (l_flags & LINUX_O_PATH) bsd_flags |= O_PATH; /* XXX LINUX_O_NOATIME: unable to be easily implemented. */ return (bsd_flags); } static int linux_common_open(struct thread *td, int dirfd, const char *path, int l_flags, int mode, enum uio_seg seg) { struct proc *p = td->td_proc; struct file *fp; int fd; int bsd_flags, error; bsd_flags = linux_common_openflags(l_flags); error = kern_openat(td, dirfd, path, seg, bsd_flags, mode); if (error != 0) { if (error == EMLINK) error = ELOOP; goto done; } if (p->p_flag & P_CONTROLT) goto done; if (bsd_flags & O_NOCTTY) goto done; /* * XXX In between kern_openat() and fget(), another process * having the same filedesc could use that fd without * checking below. */ fd = td->td_retval[0]; if (fget(td, fd, &cap_ioctl_rights, &fp) == 0) { if (fp->f_type != DTYPE_VNODE) { fdrop(fp, td); goto done; } sx_slock(&proctree_lock); PROC_LOCK(p); if (SESS_LEADER(p) && !(p->p_flag & P_CONTROLT)) { PROC_UNLOCK(p); sx_sunlock(&proctree_lock); /* XXXPJD: Verify if TIOCSCTTY is allowed. */ (void) fo_ioctl(fp, TIOCSCTTY, (caddr_t) 0, td->td_ucred, td); } else { PROC_UNLOCK(p); sx_sunlock(&proctree_lock); } fdrop(fp, td); } done: return (error); } int linux_openat(struct thread *td, struct linux_openat_args *args) { int dfd; dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; return (linux_common_open(td, dfd, args->filename, args->flags, args->mode, UIO_USERSPACE)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_open(struct thread *td, struct linux_open_args *args) { return (linux_common_open(td, AT_FDCWD, args->path, args->flags, args->mode, UIO_USERSPACE)); } #endif int linux_name_to_handle_at(struct thread *td, struct linux_name_to_handle_at_args *args) { static const l_int valid_flags = (LINUX_AT_SYMLINK_FOLLOW | LINUX_AT_EMPTY_PATH); static const l_uint fh_size = sizeof(fhandle_t); fhandle_t fh; l_uint fh_bytes; l_int mount_id; int error, fd, bsd_flags; if (args->flags & ~valid_flags) return (EINVAL); fd = args->dirfd; if (fd == LINUX_AT_FDCWD) fd = AT_FDCWD; bsd_flags = 0; if (!(args->flags & LINUX_AT_SYMLINK_FOLLOW)) bsd_flags |= AT_SYMLINK_NOFOLLOW; if ((args->flags & LINUX_AT_EMPTY_PATH) != 0) bsd_flags |= AT_EMPTY_PATH; error = kern_getfhat(td, bsd_flags, fd, args->name, UIO_USERSPACE, &fh, UIO_SYSSPACE); if (error != 0) return (error); /* Emit mount_id -- required before EOVERFLOW case. */ mount_id = (fh.fh_fsid.val[0] ^ fh.fh_fsid.val[1]); error = copyout(&mount_id, args->mnt_id, sizeof(mount_id)); if (error != 0) return (error); /* Check if there is room for handle. */ error = copyin(&args->handle->handle_bytes, &fh_bytes, sizeof(fh_bytes)); if (error != 0) return (error); if (fh_bytes < fh_size) { error = copyout(&fh_size, &args->handle->handle_bytes, sizeof(fh_size)); if (error == 0) error = EOVERFLOW; return (error); } /* Emit handle. */ mount_id = 0; /* * We don't use handle_type for anything yet, but initialize a known * value. */ error = copyout(&mount_id, &args->handle->handle_type, sizeof(mount_id)); if (error != 0) return (error); error = copyout(&fh, &args->handle->f_handle, sizeof(fh)); return (error); } int linux_open_by_handle_at(struct thread *td, struct linux_open_by_handle_at_args *args) { l_uint fh_bytes; int bsd_flags, error; error = copyin(&args->handle->handle_bytes, &fh_bytes, sizeof(fh_bytes)); if (error != 0) return (error); if (fh_bytes < sizeof(fhandle_t)) return (EINVAL); bsd_flags = linux_common_openflags(args->flags); return (kern_fhopen(td, (void *)&args->handle->f_handle, bsd_flags)); } int linux_lseek(struct thread *td, struct linux_lseek_args *args) { return (kern_lseek(td, args->fdes, args->off, args->whence)); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_llseek(struct thread *td, struct linux_llseek_args *args) { int error; off_t off; off = (args->olow) | (((off_t) args->ohigh) << 32); error = kern_lseek(td, args->fd, off, args->whence); if (error != 0) return (error); error = copyout(td->td_retval, args->res, sizeof(off_t)); if (error != 0) return (error); td->td_retval[0] = 0; return (0); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ /* * Note that linux_getdents(2) and linux_getdents64(2) have the same * arguments. They only differ in the definition of struct dirent they * operate on. * Note that linux_readdir(2) is a special case of linux_getdents(2) * where count is always equals 1, meaning that the buffer is one * dirent-structure in size and that the code can't handle more anyway. * Note that linux_readdir(2) can't be implemented by means of linux_getdents(2) * as in case when the *dent buffer size is equal to 1 linux_getdents(2) will * trash user stack. */ static int linux_getdents_error(struct thread *td, int fd, int err) { struct vnode *vp; struct file *fp; int error; /* Linux return ENOTDIR in case when fd is not a directory. */ error = getvnode(td, fd, &cap_read_rights, &fp); if (error != 0) return (error); vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (ENOTDIR); } fdrop(fp, td); return (err); } struct l_dirent { l_ulong d_ino; l_off_t d_off; l_ushort d_reclen; char d_name[LINUX_NAME_MAX + 1]; }; struct l_dirent64 { uint64_t d_ino; int64_t d_off; l_ushort d_reclen; u_char d_type; char d_name[LINUX_NAME_MAX + 1]; }; /* * Linux uses the last byte in the dirent buffer to store d_type, * at least glibc-2.7 requires it. That is why l_dirent is padded with 2 bytes. */ #define LINUX_RECLEN(namlen) \ roundup(offsetof(struct l_dirent, d_name) + (namlen) + 2, sizeof(l_ulong)) #define LINUX_RECLEN64(namlen) \ roundup(offsetof(struct l_dirent64, d_name) + (namlen) + 1, \ sizeof(uint64_t)) #ifdef LINUX_LEGACY_SYSCALLS int linux_getdents(struct thread *td, struct linux_getdents_args *args) { struct dirent *bdp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* Linux-format */ int resid, linuxreclen; /* Linux-format */ caddr_t lbuf; /* Linux-format */ off_t base; struct l_dirent *linux_dirent; int buflen, error; size_t retval; buflen = min(args->count, MAXBSIZE); buf = malloc(buflen, M_LINUX, M_WAITOK); error = kern_getdirentries(td, args->fd, buf, buflen, &base, NULL, UIO_SYSSPACE); if (error != 0) { error = linux_getdents_error(td, args->fd, error); goto out1; } lbuf = malloc(LINUX_RECLEN(LINUX_NAME_MAX), M_LINUX, M_WAITOK | M_ZERO); len = td->td_retval[0]; inp = buf; outp = (caddr_t)args->dent; resid = args->count; retval = 0; while (len > 0) { bdp = (struct dirent *) inp; reclen = bdp->d_reclen; linuxreclen = LINUX_RECLEN(bdp->d_namlen); /* * No more space in the user supplied dirent buffer. * Return EINVAL. */ if (resid < linuxreclen) { error = EINVAL; goto out; } linux_dirent = (struct l_dirent*)lbuf; linux_dirent->d_ino = bdp->d_fileno; linux_dirent->d_off = bdp->d_off; linux_dirent->d_reclen = linuxreclen; /* * Copy d_type to last byte of l_dirent buffer */ lbuf[linuxreclen - 1] = bdp->d_type; strlcpy(linux_dirent->d_name, bdp->d_name, linuxreclen - offsetof(struct l_dirent, d_name)-1); error = copyout(linux_dirent, outp, linuxreclen); if (error != 0) goto out; inp += reclen; base += reclen; len -= reclen; retval += linuxreclen; outp += linuxreclen; resid -= linuxreclen; } td->td_retval[0] = retval; out: free(lbuf, M_LINUX); out1: free(buf, M_LINUX); return (error); } #endif int linux_getdents64(struct thread *td, struct linux_getdents64_args *args) { struct dirent *bdp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* Linux-format */ int resid, linuxreclen; /* Linux-format */ off_t base; struct l_dirent64 *linux_dirent64; int buflen, error; size_t retval; buflen = min(args->count, MAXBSIZE); buf = malloc(buflen, M_LINUX, M_WAITOK); error = kern_getdirentries(td, args->fd, buf, buflen, &base, NULL, UIO_SYSSPACE); if (error != 0) { error = linux_getdents_error(td, args->fd, error); goto out1; } linux_dirent64 = malloc(LINUX_RECLEN64(LINUX_NAME_MAX), M_LINUX, M_WAITOK | M_ZERO); len = td->td_retval[0]; inp = buf; outp = (caddr_t)args->dirent; resid = args->count; retval = 0; while (len > 0) { bdp = (struct dirent *) inp; reclen = bdp->d_reclen; linuxreclen = LINUX_RECLEN64(bdp->d_namlen); /* * No more space in the user supplied dirent buffer. * Return EINVAL. */ if (resid < linuxreclen) { error = EINVAL; goto out; } linux_dirent64->d_ino = bdp->d_fileno; linux_dirent64->d_off = bdp->d_off; linux_dirent64->d_reclen = linuxreclen; linux_dirent64->d_type = bdp->d_type; strlcpy(linux_dirent64->d_name, bdp->d_name, linuxreclen - offsetof(struct l_dirent64, d_name)); error = copyout(linux_dirent64, outp, linuxreclen); if (error != 0) goto out; inp += reclen; base += reclen; len -= reclen; retval += linuxreclen; outp += linuxreclen; resid -= linuxreclen; } td->td_retval[0] = retval; out: free(linux_dirent64, M_LINUX); out1: free(buf, M_LINUX); return (error); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_readdir(struct thread *td, struct linux_readdir_args *args) { struct dirent *bdp; caddr_t buf; /* BSD-format */ int linuxreclen; /* Linux-format */ off_t base; struct l_dirent *linux_dirent; /* Linux-format */ int buflen, error; buflen = sizeof(*bdp); buf = malloc(buflen, M_LINUX, M_WAITOK); error = kern_getdirentries(td, args->fd, buf, buflen, &base, NULL, UIO_SYSSPACE); if (error != 0) { error = linux_getdents_error(td, args->fd, error); goto out; } if (td->td_retval[0] == 0) goto out; linux_dirent = malloc(LINUX_RECLEN(LINUX_NAME_MAX), M_LINUX, M_WAITOK | M_ZERO); bdp = (struct dirent *) buf; linuxreclen = LINUX_RECLEN(bdp->d_namlen); linux_dirent->d_ino = bdp->d_fileno; linux_dirent->d_off = bdp->d_off; linux_dirent->d_reclen = bdp->d_namlen; strlcpy(linux_dirent->d_name, bdp->d_name, linuxreclen - offsetof(struct l_dirent, d_name)); error = copyout(linux_dirent, args->dent, linuxreclen); if (error == 0) td->td_retval[0] = linuxreclen; free(linux_dirent, M_LINUX); out: free(buf, M_LINUX); return (error); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ /* * These exist mainly for hooks for doing /compat/linux translation. */ #ifdef LINUX_LEGACY_SYSCALLS int linux_access(struct thread *td, struct linux_access_args *args) { /* Linux convention. */ if (args->amode & ~(F_OK | X_OK | W_OK | R_OK)) return (EINVAL); return (kern_accessat(td, AT_FDCWD, args->path, UIO_USERSPACE, 0, args->amode)); } #endif static int linux_do_accessat(struct thread *td, int ldfd, const char *filename, int amode, int flags) { int dfd; /* Linux convention. */ if (amode & ~(F_OK | X_OK | W_OK | R_OK)) return (EINVAL); dfd = (ldfd == LINUX_AT_FDCWD) ? AT_FDCWD : ldfd; return (kern_accessat(td, dfd, filename, UIO_USERSPACE, flags, amode)); } int linux_faccessat(struct thread *td, struct linux_faccessat_args *args) { return (linux_do_accessat(td, args->dfd, args->filename, args->amode, 0)); } int linux_faccessat2(struct thread *td, struct linux_faccessat2_args *args) { int flags, unsupported; unsupported = args->flags & ~(LINUX_AT_EACCESS | LINUX_AT_EMPTY_PATH | LINUX_AT_SYMLINK_NOFOLLOW); if (unsupported != 0) { linux_msg(td, "faccessat2 unsupported flag 0x%x", unsupported); return (EINVAL); } flags = (args->flags & LINUX_AT_EACCESS) == 0 ? 0 : AT_EACCESS; flags |= (args->flags & LINUX_AT_EMPTY_PATH) == 0 ? 0 : AT_EMPTY_PATH; flags |= (args->flags & LINUX_AT_SYMLINK_NOFOLLOW) == 0 ? 0 : AT_SYMLINK_NOFOLLOW; return (linux_do_accessat(td, args->dfd, args->filename, args->amode, flags)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_unlink(struct thread *td, struct linux_unlink_args *args) { int error; struct stat st; error = kern_funlinkat(td, AT_FDCWD, args->path, FD_NONE, UIO_USERSPACE, 0, 0); if (error == EPERM) { /* Introduce POSIX noncompliant behaviour of Linux */ if (kern_statat(td, 0, AT_FDCWD, args->path, UIO_USERSPACE, &st) == 0) { if (S_ISDIR(st.st_mode)) error = EISDIR; } } return (error); } #endif static int linux_unlinkat_impl(struct thread *td, enum uio_seg pathseg, const char *path, int dfd, struct linux_unlinkat_args *args) { struct stat st; int error; if (args->flag & LINUX_AT_REMOVEDIR) error = kern_frmdirat(td, dfd, path, FD_NONE, pathseg, 0); else error = kern_funlinkat(td, dfd, path, FD_NONE, pathseg, 0, 0); if (error == EPERM && !(args->flag & LINUX_AT_REMOVEDIR)) { /* Introduce POSIX noncompliant behaviour of Linux */ if (kern_statat(td, AT_SYMLINK_NOFOLLOW, dfd, path, pathseg, &st) == 0 && S_ISDIR(st.st_mode)) error = EISDIR; } return (error); } int linux_unlinkat(struct thread *td, struct linux_unlinkat_args *args) { int dfd; if (args->flag & ~LINUX_AT_REMOVEDIR) return (EINVAL); dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; return (linux_unlinkat_impl(td, UIO_USERSPACE, args->pathname, dfd, args)); } int linux_chdir(struct thread *td, struct linux_chdir_args *args) { return (kern_chdir(td, args->path, UIO_USERSPACE)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_chmod(struct thread *td, struct linux_chmod_args *args) { return (kern_fchmodat(td, AT_FDCWD, args->path, UIO_USERSPACE, args->mode, 0)); } #endif int linux_fchmodat(struct thread *td, struct linux_fchmodat_args *args) { int dfd; dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; return (kern_fchmodat(td, dfd, args->filename, UIO_USERSPACE, args->mode, 0)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_mkdir(struct thread *td, struct linux_mkdir_args *args) { return (kern_mkdirat(td, AT_FDCWD, args->path, UIO_USERSPACE, args->mode)); } #endif int linux_mkdirat(struct thread *td, struct linux_mkdirat_args *args) { int dfd; dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; return (kern_mkdirat(td, dfd, args->pathname, UIO_USERSPACE, args->mode)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_rmdir(struct thread *td, struct linux_rmdir_args *args) { return (kern_frmdirat(td, AT_FDCWD, args->path, FD_NONE, UIO_USERSPACE, 0)); } int linux_rename(struct thread *td, struct linux_rename_args *args) { return (kern_renameat(td, AT_FDCWD, args->from, AT_FDCWD, args->to, UIO_USERSPACE)); } #endif int linux_renameat(struct thread *td, struct linux_renameat_args *args) { struct linux_renameat2_args renameat2_args = { .olddfd = args->olddfd, .oldname = args->oldname, .newdfd = args->newdfd, .newname = args->newname, .flags = 0 }; return (linux_renameat2(td, &renameat2_args)); } int linux_renameat2(struct thread *td, struct linux_renameat2_args *args) { int olddfd, newdfd; if (args->flags != 0) { if (args->flags & ~(LINUX_RENAME_EXCHANGE | LINUX_RENAME_NOREPLACE | LINUX_RENAME_WHITEOUT)) return (EINVAL); if (args->flags & LINUX_RENAME_EXCHANGE && args->flags & (LINUX_RENAME_NOREPLACE | LINUX_RENAME_WHITEOUT)) return (EINVAL); #if 0 /* * This spams the console on Ubuntu Focal. * * What's needed here is a general mechanism to let users know * about missing features without hogging the system. */ linux_msg(td, "renameat2 unsupported flags 0x%x", args->flags); #endif return (EINVAL); } olddfd = (args->olddfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->olddfd; newdfd = (args->newdfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->newdfd; return (kern_renameat(td, olddfd, args->oldname, newdfd, args->newname, UIO_USERSPACE)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_symlink(struct thread *td, struct linux_symlink_args *args) { return (kern_symlinkat(td, args->path, AT_FDCWD, args->to, UIO_USERSPACE)); } #endif int linux_symlinkat(struct thread *td, struct linux_symlinkat_args *args) { int dfd; dfd = (args->newdfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->newdfd; return (kern_symlinkat(td, args->oldname, dfd, args->newname, UIO_USERSPACE)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_readlink(struct thread *td, struct linux_readlink_args *args) { if (args->count <= 0) return (EINVAL); return (kern_readlinkat(td, AT_FDCWD, args->name, UIO_USERSPACE, args->buf, UIO_USERSPACE, args->count)); } #endif int linux_readlinkat(struct thread *td, struct linux_readlinkat_args *args) { int dfd; if (args->bufsiz <= 0) return (EINVAL); dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; return (kern_readlinkat(td, dfd, args->path, UIO_USERSPACE, args->buf, UIO_USERSPACE, args->bufsiz)); } int linux_truncate(struct thread *td, struct linux_truncate_args *args) { return (kern_truncate(td, args->path, UIO_USERSPACE, args->length)); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_truncate64(struct thread *td, struct linux_truncate64_args *args) { off_t length; #if defined(__amd64__) && defined(COMPAT_LINUX32) length = PAIR32TO64(off_t, args->length); #else length = args->length; #endif return (kern_truncate(td, args->path, UIO_USERSPACE, length)); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ int linux_ftruncate(struct thread *td, struct linux_ftruncate_args *args) { return (kern_ftruncate(td, args->fd, args->length)); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args) { off_t length; #if defined(__amd64__) && defined(COMPAT_LINUX32) length = PAIR32TO64(off_t, args->length); #else length = args->length; #endif return (kern_ftruncate(td, args->fd, length)); } #endif #ifdef LINUX_LEGACY_SYSCALLS int linux_link(struct thread *td, struct linux_link_args *args) { return (kern_linkat(td, AT_FDCWD, AT_FDCWD, args->path, args->to, UIO_USERSPACE, AT_SYMLINK_FOLLOW)); } #endif int linux_linkat(struct thread *td, struct linux_linkat_args *args) { int olddfd, newdfd, flag; if (args->flag & ~(LINUX_AT_SYMLINK_FOLLOW | LINUX_AT_EMPTY_PATH)) return (EINVAL); flag = (args->flag & LINUX_AT_SYMLINK_FOLLOW) != 0 ? AT_SYMLINK_FOLLOW : 0; flag |= (args->flag & LINUX_AT_EMPTY_PATH) != 0 ? AT_EMPTY_PATH : 0; olddfd = (args->olddfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->olddfd; newdfd = (args->newdfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->newdfd; return (kern_linkat(td, olddfd, newdfd, args->oldname, args->newname, UIO_USERSPACE, flag)); } int linux_fdatasync(struct thread *td, struct linux_fdatasync_args *uap) { return (kern_fsync(td, uap->fd, false)); } int linux_sync_file_range(struct thread *td, struct linux_sync_file_range_args *uap) { off_t nbytes, offset; #if defined(__amd64__) && defined(COMPAT_LINUX32) nbytes = PAIR32TO64(off_t, uap->nbytes); offset = PAIR32TO64(off_t, uap->offset); #else nbytes = uap->nbytes; offset = uap->offset; #endif if (offset < 0 || nbytes < 0 || (uap->flags & ~(LINUX_SYNC_FILE_RANGE_WAIT_BEFORE | LINUX_SYNC_FILE_RANGE_WRITE | LINUX_SYNC_FILE_RANGE_WAIT_AFTER)) != 0) { return (EINVAL); } return (kern_fsync(td, uap->fd, false)); } int linux_pread(struct thread *td, struct linux_pread_args *uap) { struct vnode *vp; off_t offset; int error; #if defined(__amd64__) && defined(COMPAT_LINUX32) offset = PAIR32TO64(off_t, uap->offset); #else offset = uap->offset; #endif error = kern_pread(td, uap->fd, uap->buf, uap->nbyte, offset); if (error == 0) { /* This seems to violate POSIX but Linux does it. */ error = fgetvp(td, uap->fd, &cap_pread_rights, &vp); if (error != 0) return (error); if (vp->v_type == VDIR) error = EISDIR; vrele(vp); } return (error); } int linux_pwrite(struct thread *td, struct linux_pwrite_args *uap) { off_t offset; #if defined(__amd64__) && defined(COMPAT_LINUX32) offset = PAIR32TO64(off_t, uap->offset); #else offset = uap->offset; #endif return (linux_enobufs2eagain(td, uap->fd, kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, offset))); } #define HALF_LONG_BITS ((sizeof(l_long) * NBBY / 2)) static inline off_t pos_from_hilo(unsigned long high, unsigned long low) { return (((off_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low; } int linux_preadv(struct thread *td, struct linux_preadv_args *uap) { struct uio *auio; int error; off_t offset; /* * According http://man7.org/linux/man-pages/man2/preadv.2.html#NOTES * pos_l and pos_h, respectively, contain the * low order and high order 32 bits of offset. */ offset = pos_from_hilo(uap->pos_h, uap->pos_l); if (offset < 0) return (EINVAL); #ifdef COMPAT_LINUX32 error = freebsd32_copyinuio(PTRIN(uap->vec), uap->vlen, &auio); #else error = copyinuio(uap->vec, uap->vlen, &auio); #endif if (error != 0) return (error); error = kern_preadv(td, uap->fd, auio, offset); freeuio(auio); return (error); } int linux_pwritev(struct thread *td, struct linux_pwritev_args *uap) { struct uio *auio; int error; off_t offset; /* * According http://man7.org/linux/man-pages/man2/pwritev.2.html#NOTES * pos_l and pos_h, respectively, contain the * low order and high order 32 bits of offset. */ offset = pos_from_hilo(uap->pos_h, uap->pos_l); if (offset < 0) return (EINVAL); #ifdef COMPAT_LINUX32 error = freebsd32_copyinuio(PTRIN(uap->vec), uap->vlen, &auio); #else error = copyinuio(uap->vec, uap->vlen, &auio); #endif if (error != 0) return (error); error = kern_pwritev(td, uap->fd, auio, offset); freeuio(auio); return (linux_enobufs2eagain(td, uap->fd, error)); } int linux_mount(struct thread *td, struct linux_mount_args *args) { struct mntarg *ma = NULL; char *fstypename, *mntonname, *mntfromname, *data; int error, fsflags; fstypename = malloc(MNAMELEN, M_TEMP, M_WAITOK); mntonname = malloc(MNAMELEN, M_TEMP, M_WAITOK); mntfromname = malloc(MNAMELEN, M_TEMP, M_WAITOK); data = NULL; error = copyinstr(args->filesystemtype, fstypename, MNAMELEN - 1, NULL); if (error != 0) goto out; if (args->specialfile != NULL) { error = copyinstr(args->specialfile, mntfromname, MNAMELEN - 1, NULL); if (error != 0) goto out; } else { mntfromname[0] = '\0'; } error = copyinstr(args->dir, mntonname, MNAMELEN - 1, NULL); if (error != 0) goto out; if (strcmp(fstypename, "ext2") == 0) { strcpy(fstypename, "ext2fs"); } else if (strcmp(fstypename, "proc") == 0) { strcpy(fstypename, "linprocfs"); } else if (strcmp(fstypename, "vfat") == 0) { strcpy(fstypename, "msdosfs"); } else if (strcmp(fstypename, "fuse") == 0 || strncmp(fstypename, "fuse.", 5) == 0) { char *fuse_options, *fuse_option, *fuse_name; strcpy(mntfromname, "/dev/fuse"); strcpy(fstypename, "fusefs"); data = malloc(MNAMELEN, M_TEMP, M_WAITOK); error = copyinstr(args->data, data, MNAMELEN - 1, NULL); if (error != 0) goto out; fuse_options = data; while ((fuse_option = strsep(&fuse_options, ",")) != NULL) { fuse_name = strsep(&fuse_option, "="); if (fuse_name == NULL || fuse_option == NULL) goto out; ma = mount_arg(ma, fuse_name, fuse_option, -1); } /* * The FUSE server uses Linux errno values instead of FreeBSD * ones; add a flag to tell fuse(4) to do errno translation. */ ma = mount_arg(ma, "linux_errnos", "1", -1); } fsflags = 0; /* * Linux SYNC flag is not included; the closest equivalent * FreeBSD has is !ASYNC, which is our default. */ if (args->rwflag & LINUX_MS_RDONLY) fsflags |= MNT_RDONLY; if (args->rwflag & LINUX_MS_NOSUID) fsflags |= MNT_NOSUID; if (args->rwflag & LINUX_MS_NOEXEC) fsflags |= MNT_NOEXEC; if (args->rwflag & LINUX_MS_REMOUNT) fsflags |= MNT_UPDATE; ma = mount_arg(ma, "fstype", fstypename, -1); ma = mount_arg(ma, "fspath", mntonname, -1); ma = mount_arg(ma, "from", mntfromname, -1); error = kernel_mount(ma, fsflags); out: free(fstypename, M_TEMP); free(mntonname, M_TEMP); free(mntfromname, M_TEMP); free(data, M_TEMP); return (error); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_oldumount(struct thread *td, struct linux_oldumount_args *args) { return (kern_unmount(td, args->path, 0)); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ #ifdef LINUX_LEGACY_SYSCALLS int linux_umount(struct thread *td, struct linux_umount_args *args) { int flags; flags = 0; if ((args->flags & LINUX_MNT_FORCE) != 0) { args->flags &= ~LINUX_MNT_FORCE; flags |= MNT_FORCE; } if (args->flags != 0) { linux_msg(td, "unsupported umount2 flags %#x", args->flags); return (EINVAL); } return (kern_unmount(td, args->path, flags)); } #endif /* * fcntl family of syscalls */ struct l_flock { l_short l_type; l_short l_whence; l_off_t l_start; l_off_t l_len; l_pid_t l_pid; } #if defined(__amd64__) && defined(COMPAT_LINUX32) __packed #endif ; static void linux_to_bsd_flock(struct l_flock *linux_flock, struct flock *bsd_flock) { switch (linux_flock->l_type) { case LINUX_F_RDLCK: bsd_flock->l_type = F_RDLCK; break; case LINUX_F_WRLCK: bsd_flock->l_type = F_WRLCK; break; case LINUX_F_UNLCK: bsd_flock->l_type = F_UNLCK; break; default: bsd_flock->l_type = -1; break; } bsd_flock->l_whence = linux_flock->l_whence; bsd_flock->l_start = (off_t)linux_flock->l_start; bsd_flock->l_len = (off_t)linux_flock->l_len; bsd_flock->l_pid = (pid_t)linux_flock->l_pid; bsd_flock->l_sysid = 0; } static void bsd_to_linux_flock(struct flock *bsd_flock, struct l_flock *linux_flock) { switch (bsd_flock->l_type) { case F_RDLCK: linux_flock->l_type = LINUX_F_RDLCK; break; case F_WRLCK: linux_flock->l_type = LINUX_F_WRLCK; break; case F_UNLCK: linux_flock->l_type = LINUX_F_UNLCK; break; } linux_flock->l_whence = bsd_flock->l_whence; linux_flock->l_start = (l_off_t)bsd_flock->l_start; linux_flock->l_len = (l_off_t)bsd_flock->l_len; linux_flock->l_pid = (l_pid_t)bsd_flock->l_pid; } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) struct l_flock64 { l_short l_type; l_short l_whence; l_loff_t l_start; l_loff_t l_len; l_pid_t l_pid; } #if defined(__amd64__) && defined(COMPAT_LINUX32) __packed #endif ; static void linux_to_bsd_flock64(struct l_flock64 *linux_flock, struct flock *bsd_flock) { switch (linux_flock->l_type) { case LINUX_F_RDLCK: bsd_flock->l_type = F_RDLCK; break; case LINUX_F_WRLCK: bsd_flock->l_type = F_WRLCK; break; case LINUX_F_UNLCK: bsd_flock->l_type = F_UNLCK; break; default: bsd_flock->l_type = -1; break; } bsd_flock->l_whence = linux_flock->l_whence; bsd_flock->l_start = (off_t)linux_flock->l_start; bsd_flock->l_len = (off_t)linux_flock->l_len; bsd_flock->l_pid = (pid_t)linux_flock->l_pid; bsd_flock->l_sysid = 0; } static void bsd_to_linux_flock64(struct flock *bsd_flock, struct l_flock64 *linux_flock) { switch (bsd_flock->l_type) { case F_RDLCK: linux_flock->l_type = LINUX_F_RDLCK; break; case F_WRLCK: linux_flock->l_type = LINUX_F_WRLCK; break; case F_UNLCK: linux_flock->l_type = LINUX_F_UNLCK; break; } linux_flock->l_whence = bsd_flock->l_whence; linux_flock->l_start = (l_loff_t)bsd_flock->l_start; linux_flock->l_len = (l_loff_t)bsd_flock->l_len; linux_flock->l_pid = (l_pid_t)bsd_flock->l_pid; } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ static int fcntl_common(struct thread *td, struct linux_fcntl_args *args) { struct l_flock linux_flock; struct flock bsd_flock; struct pipe *fpipe; struct file *fp; long arg; int error, result; switch (args->cmd) { case LINUX_F_DUPFD: return (kern_fcntl(td, args->fd, F_DUPFD, args->arg)); case LINUX_F_GETFD: return (kern_fcntl(td, args->fd, F_GETFD, 0)); case LINUX_F_SETFD: return (kern_fcntl(td, args->fd, F_SETFD, args->arg)); case LINUX_F_GETFL: error = kern_fcntl(td, args->fd, F_GETFL, 0); result = td->td_retval[0]; td->td_retval[0] = 0; if (result & O_RDONLY) td->td_retval[0] |= LINUX_O_RDONLY; if (result & O_WRONLY) td->td_retval[0] |= LINUX_O_WRONLY; if (result & O_RDWR) td->td_retval[0] |= LINUX_O_RDWR; if (result & O_NDELAY) td->td_retval[0] |= LINUX_O_NONBLOCK; if (result & O_APPEND) td->td_retval[0] |= LINUX_O_APPEND; if (result & O_FSYNC) td->td_retval[0] |= LINUX_O_SYNC; if (result & O_ASYNC) td->td_retval[0] |= LINUX_O_ASYNC; #ifdef LINUX_O_NOFOLLOW if (result & O_NOFOLLOW) td->td_retval[0] |= LINUX_O_NOFOLLOW; #endif #ifdef LINUX_O_DIRECT if (result & O_DIRECT) td->td_retval[0] |= LINUX_O_DIRECT; #endif return (error); case LINUX_F_SETFL: arg = 0; if (args->arg & LINUX_O_NDELAY) arg |= O_NONBLOCK; if (args->arg & LINUX_O_APPEND) arg |= O_APPEND; if (args->arg & LINUX_O_SYNC) arg |= O_FSYNC; if (args->arg & LINUX_O_ASYNC) arg |= O_ASYNC; #ifdef LINUX_O_NOFOLLOW if (args->arg & LINUX_O_NOFOLLOW) arg |= O_NOFOLLOW; #endif #ifdef LINUX_O_DIRECT if (args->arg & LINUX_O_DIRECT) arg |= O_DIRECT; #endif return (kern_fcntl(td, args->fd, F_SETFL, arg)); case LINUX_F_GETLK: error = copyin((void *)args->arg, &linux_flock, sizeof(linux_flock)); if (error) return (error); linux_to_bsd_flock(&linux_flock, &bsd_flock); error = kern_fcntl(td, args->fd, F_GETLK, (intptr_t)&bsd_flock); if (error) return (error); bsd_to_linux_flock(&bsd_flock, &linux_flock); return (copyout(&linux_flock, (void *)args->arg, sizeof(linux_flock))); case LINUX_F_SETLK: error = copyin((void *)args->arg, &linux_flock, sizeof(linux_flock)); if (error) return (error); linux_to_bsd_flock(&linux_flock, &bsd_flock); return (kern_fcntl(td, args->fd, F_SETLK, (intptr_t)&bsd_flock)); case LINUX_F_SETLKW: error = copyin((void *)args->arg, &linux_flock, sizeof(linux_flock)); if (error) return (error); linux_to_bsd_flock(&linux_flock, &bsd_flock); return (kern_fcntl(td, args->fd, F_SETLKW, (intptr_t)&bsd_flock)); case LINUX_F_GETOWN: return (kern_fcntl(td, args->fd, F_GETOWN, 0)); case LINUX_F_SETOWN: /* * XXX some Linux applications depend on F_SETOWN having no * significant effect for pipes (SIGIO is not delivered for * pipes under Linux-2.2.35 at least). */ error = fget(td, args->fd, &cap_fcntl_rights, &fp); if (error) return (error); if (fp->f_type == DTYPE_PIPE) { fdrop(fp, td); return (EINVAL); } fdrop(fp, td); return (kern_fcntl(td, args->fd, F_SETOWN, args->arg)); case LINUX_F_DUPFD_CLOEXEC: return (kern_fcntl(td, args->fd, F_DUPFD_CLOEXEC, args->arg)); /* * Our F_SEAL_* values match Linux one for maximum compatibility. So we * only needed to account for different values for fcntl(2) commands. */ case LINUX_F_GET_SEALS: error = kern_fcntl(td, args->fd, F_GET_SEALS, 0); if (error != 0) return (error); td->td_retval[0] = bsd_to_linux_bits(td->td_retval[0], seal_bitmap, 0); return (0); case LINUX_F_ADD_SEALS: return (kern_fcntl(td, args->fd, F_ADD_SEALS, linux_to_bsd_bits(args->arg, seal_bitmap, 0))); case LINUX_F_GETPIPE_SZ: error = fget(td, args->fd, &cap_fcntl_rights, &fp); if (error != 0) return (error); if (fp->f_type != DTYPE_PIPE) { fdrop(fp, td); return (EINVAL); } fpipe = fp->f_data; td->td_retval[0] = fpipe->pipe_buffer.size; fdrop(fp, td); return (0); default: linux_msg(td, "unsupported fcntl cmd %d", args->cmd); return (EINVAL); } } int linux_fcntl(struct thread *td, struct linux_fcntl_args *args) { return (fcntl_common(td, args)); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_fcntl64(struct thread *td, struct linux_fcntl64_args *args) { struct l_flock64 linux_flock; struct flock bsd_flock; struct linux_fcntl_args fcntl_args; int error; switch (args->cmd) { case LINUX_F_GETLK64: error = copyin((void *)args->arg, &linux_flock, sizeof(linux_flock)); if (error) return (error); linux_to_bsd_flock64(&linux_flock, &bsd_flock); error = kern_fcntl(td, args->fd, F_GETLK, (intptr_t)&bsd_flock); if (error) return (error); bsd_to_linux_flock64(&bsd_flock, &linux_flock); return (copyout(&linux_flock, (void *)args->arg, sizeof(linux_flock))); case LINUX_F_SETLK64: error = copyin((void *)args->arg, &linux_flock, sizeof(linux_flock)); if (error) return (error); linux_to_bsd_flock64(&linux_flock, &bsd_flock); return (kern_fcntl(td, args->fd, F_SETLK, (intptr_t)&bsd_flock)); case LINUX_F_SETLKW64: error = copyin((void *)args->arg, &linux_flock, sizeof(linux_flock)); if (error) return (error); linux_to_bsd_flock64(&linux_flock, &bsd_flock); return (kern_fcntl(td, args->fd, F_SETLKW, (intptr_t)&bsd_flock)); } fcntl_args.fd = args->fd; fcntl_args.cmd = args->cmd; fcntl_args.arg = args->arg; return (fcntl_common(td, &fcntl_args)); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ #ifdef LINUX_LEGACY_SYSCALLS int linux_chown(struct thread *td, struct linux_chown_args *args) { return (kern_fchownat(td, AT_FDCWD, args->path, UIO_USERSPACE, args->uid, args->gid, 0)); } #endif int linux_fchownat(struct thread *td, struct linux_fchownat_args *args) { int dfd, flag, unsupported; unsupported = args->flag & ~(LINUX_AT_SYMLINK_NOFOLLOW | LINUX_AT_EMPTY_PATH); if (unsupported != 0) { linux_msg(td, "fchownat unsupported flag 0x%x", unsupported); return (EINVAL); } flag = (args->flag & LINUX_AT_SYMLINK_NOFOLLOW) == 0 ? 0 : AT_SYMLINK_NOFOLLOW; flag |= (args->flag & LINUX_AT_EMPTY_PATH) == 0 ? 0 : AT_EMPTY_PATH; dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd; return (kern_fchownat(td, dfd, args->filename, UIO_USERSPACE, args->uid, args->gid, flag)); } #ifdef LINUX_LEGACY_SYSCALLS int linux_lchown(struct thread *td, struct linux_lchown_args *args) { return (kern_fchownat(td, AT_FDCWD, args->path, UIO_USERSPACE, args->uid, args->gid, AT_SYMLINK_NOFOLLOW)); } #endif static int convert_fadvice(int advice) { switch (advice) { case LINUX_POSIX_FADV_NORMAL: return (POSIX_FADV_NORMAL); case LINUX_POSIX_FADV_RANDOM: return (POSIX_FADV_RANDOM); case LINUX_POSIX_FADV_SEQUENTIAL: return (POSIX_FADV_SEQUENTIAL); case LINUX_POSIX_FADV_WILLNEED: return (POSIX_FADV_WILLNEED); case LINUX_POSIX_FADV_DONTNEED: return (POSIX_FADV_DONTNEED); case LINUX_POSIX_FADV_NOREUSE: return (POSIX_FADV_NOREUSE); default: return (-1); } } int linux_fadvise64(struct thread *td, struct linux_fadvise64_args *args) { off_t offset; int advice; #if defined(__amd64__) && defined(COMPAT_LINUX32) offset = PAIR32TO64(off_t, args->offset); #else offset = args->offset; #endif advice = convert_fadvice(args->advice); if (advice == -1) return (EINVAL); return (kern_posix_fadvise(td, args->fd, offset, args->len, advice)); } #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32)) int linux_fadvise64_64(struct thread *td, struct linux_fadvise64_64_args *args) { off_t len, offset; int advice; #if defined(__amd64__) && defined(COMPAT_LINUX32) len = PAIR32TO64(off_t, args->len); offset = PAIR32TO64(off_t, args->offset); #else len = args->len; offset = args->offset; #endif advice = convert_fadvice(args->advice); if (advice == -1) return (EINVAL); return (kern_posix_fadvise(td, args->fd, offset, len, advice)); } #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */ #ifdef LINUX_LEGACY_SYSCALLS int linux_pipe(struct thread *td, struct linux_pipe_args *args) { int fildes[2]; int error; error = kern_pipe(td, fildes, 0, NULL, NULL); if (error != 0) return (error); error = copyout(fildes, args->pipefds, sizeof(fildes)); if (error != 0) { (void)kern_close(td, fildes[0]); (void)kern_close(td, fildes[1]); } return (error); } #endif int linux_pipe2(struct thread *td, struct linux_pipe2_args *args) { int fildes[2]; int error, flags; if ((args->flags & ~(LINUX_O_NONBLOCK | LINUX_O_CLOEXEC)) != 0) return (EINVAL); flags = 0; if ((args->flags & LINUX_O_NONBLOCK) != 0) flags |= O_NONBLOCK; if ((args->flags & LINUX_O_CLOEXEC) != 0) flags |= O_CLOEXEC; error = kern_pipe(td, fildes, flags, NULL, NULL); if (error != 0) return (error); error = copyout(fildes, args->pipefds, sizeof(fildes)); if (error != 0) { (void)kern_close(td, fildes[0]); (void)kern_close(td, fildes[1]); } return (error); } int linux_dup3(struct thread *td, struct linux_dup3_args *args) { int cmd; intptr_t newfd; if (args->oldfd == args->newfd) return (EINVAL); if ((args->flags & ~LINUX_O_CLOEXEC) != 0) return (EINVAL); if (args->flags & LINUX_O_CLOEXEC) cmd = F_DUP2FD_CLOEXEC; else cmd = F_DUP2FD; newfd = args->newfd; return (kern_fcntl(td, args->oldfd, cmd, newfd)); } int linux_fallocate(struct thread *td, struct linux_fallocate_args *args) { off_t len, offset; /* * We emulate only posix_fallocate system call for which * mode should be 0. */ if (args->mode != 0) return (EOPNOTSUPP); #if defined(__amd64__) && defined(COMPAT_LINUX32) len = PAIR32TO64(off_t, args->len); offset = PAIR32TO64(off_t, args->offset); #else len = args->len; offset = args->offset; #endif return (kern_posix_fallocate(td, args->fd, offset, len)); } int linux_copy_file_range(struct thread *td, struct linux_copy_file_range_args *args) { l_loff_t inoff, outoff, *inoffp, *outoffp; int error, flags; /* * copy_file_range(2) on Linux doesn't define any flags (yet), so is * the native implementation. Enforce it. */ if (args->flags != 0) { linux_msg(td, "copy_file_range unsupported flags 0x%x", args->flags); return (EINVAL); } flags = 0; inoffp = outoffp = NULL; if (args->off_in != NULL) { error = copyin(args->off_in, &inoff, sizeof(l_loff_t)); if (error != 0) return (error); inoffp = &inoff; } if (args->off_out != NULL) { error = copyin(args->off_out, &outoff, sizeof(l_loff_t)); if (error != 0) return (error); outoffp = &outoff; } error = kern_copy_file_range(td, args->fd_in, inoffp, args->fd_out, outoffp, args->len, flags); if (error == 0 && args->off_in != NULL) error = copyout(inoffp, args->off_in, sizeof(l_loff_t)); if (error == 0 && args->off_out != NULL) error = copyout(outoffp, args->off_out, sizeof(l_loff_t)); return (error); } #define LINUX_MEMFD_PREFIX "memfd:" int linux_memfd_create(struct thread *td, struct linux_memfd_create_args *args) { char memfd_name[LINUX_NAME_MAX + 1]; int error, flags, shmflags, oflags; /* * This is our clever trick to avoid the heap allocation to copy in the * uname. We don't really need to go this far out of our way, but it * does keep the rest of this function fairly clean as they don't have * to worry about cleanup on the way out. */ error = copyinstr(args->uname_ptr, memfd_name + sizeof(LINUX_MEMFD_PREFIX) - 1, LINUX_NAME_MAX - sizeof(LINUX_MEMFD_PREFIX) - 1, NULL); if (error != 0) { if (error == ENAMETOOLONG) error = EINVAL; return (error); } memcpy(memfd_name, LINUX_MEMFD_PREFIX, sizeof(LINUX_MEMFD_PREFIX) - 1); flags = linux_to_bsd_bits(args->flags, mfd_bitmap, 0); if ((flags & ~(MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_HUGE_MASK)) != 0) return (EINVAL); /* Size specified but no HUGETLB. */ if ((flags & MFD_HUGE_MASK) != 0 && (flags & MFD_HUGETLB) == 0) return (EINVAL); /* We don't actually support HUGETLB. */ if ((flags & MFD_HUGETLB) != 0) return (ENOSYS); oflags = O_RDWR; shmflags = SHM_GROW_ON_WRITE; if ((flags & MFD_CLOEXEC) != 0) oflags |= O_CLOEXEC; if ((flags & MFD_ALLOW_SEALING) != 0) shmflags |= SHM_ALLOW_SEALING; return (kern_shm_open2(td, SHM_ANON, oflags, 0, shmflags, NULL, - memfd_name)); + memfd_name, NULL)); } int linux_splice(struct thread *td, struct linux_splice_args *args) { linux_msg(td, "syscall splice not really implemented"); /* * splice(2) is documented to return EINVAL in various circumstances; * returning it instead of ENOSYS should hint the caller to use fallback * instead. */ return (EINVAL); } int linux_close_range(struct thread *td, struct linux_close_range_args *args) { u_int flags = 0; /* * Implementing close_range(CLOSE_RANGE_UNSHARE) allows Linux to * unshare filedesc table of the calling thread from others threads * in a thread group (i.e., process in the FreeBSD) or others processes, * which shares the same table, before closing the files. FreeBSD does * not have compatible unsharing mechanism due to the fact that sharing * process resources, including filedesc table, is at thread level in the * Linux, while in the FreeBSD it is at the process level. * Return EINVAL for now if the CLOSE_RANGE_UNSHARE flag is specified * until this new Linux API stabilizes. */ if ((args->flags & ~(LINUX_CLOSE_RANGE_CLOEXEC)) != 0) return (EINVAL); if (args->first > args->last) return (EINVAL); if ((args->flags & LINUX_CLOSE_RANGE_CLOEXEC) != 0) flags |= CLOSE_RANGE_CLOEXEC; return (kern_close_range(td, flags, args->first, args->last)); } int linux_enobufs2eagain(struct thread *td, int fd, int error) { struct file *fp; if (error != ENOBUFS) return (error); if (fget(td, fd, &cap_no_rights, &fp) != 0) return (error); if (fp->f_type == DTYPE_SOCKET && (fp->f_flag & FNONBLOCK) != 0) error = EAGAIN; fdrop(fp, td); return (error); } int linux_write(struct thread *td, struct linux_write_args *args) { struct write_args bargs = { .fd = args->fd, .buf = args->buf, .nbyte = args->nbyte, }; return (linux_enobufs2eagain(td, args->fd, sys_write(td, &bargs))); } int linux_writev(struct thread *td, struct linux_writev_args *args) { struct uio *auio; int error; #ifdef COMPAT_LINUX32 error = freebsd32_copyinuio(PTRIN(args->iovp), args->iovcnt, &auio); #else error = copyinuio(args->iovp, args->iovcnt, &auio); #endif if (error != 0) return (error); error = kern_writev(td, args->fd, auio); freeuio(auio); return (linux_enobufs2eagain(td, args->fd, error)); } static int linux_inotify_init_flags(int l_flags) { int bsd_flags; if ((l_flags & ~(LINUX_IN_CLOEXEC | LINUX_IN_NONBLOCK)) != 0) linux_msg(NULL, "inotify_init1 unsupported flags 0x%x", l_flags); bsd_flags = 0; if ((l_flags & LINUX_IN_CLOEXEC) != 0) bsd_flags |= O_CLOEXEC; if ((l_flags & LINUX_IN_NONBLOCK) != 0) bsd_flags |= O_NONBLOCK; return (bsd_flags); } static int inotify_init_common(struct thread *td, int flags) { struct specialfd_inotify si; si.flags = linux_inotify_init_flags(flags); return (kern_specialfd(td, SPECIALFD_INOTIFY, &si)); } #if defined(__i386__) || defined(__amd64__) int linux_inotify_init(struct thread *td, struct linux_inotify_init_args *args) { return (inotify_init_common(td, 0)); } #endif int linux_inotify_init1(struct thread *td, struct linux_inotify_init1_args *args) { return (inotify_init_common(td, args->flags)); } /* * The native implementation uses the same values for inotify events as * libinotify, which gives us binary compatibility with Linux. This simplifies * the shim implementation a lot, as otherwise we would have to handle read(2) * calls on inotify descriptors and translate events to Linux's ABI. */ _Static_assert(LINUX_IN_ACCESS == IN_ACCESS, "IN_ACCESS mismatch"); _Static_assert(LINUX_IN_MODIFY == IN_MODIFY, "IN_MODIFY mismatch"); _Static_assert(LINUX_IN_ATTRIB == IN_ATTRIB, "IN_ATTRIB mismatch"); _Static_assert(LINUX_IN_CLOSE_WRITE == IN_CLOSE_WRITE, "IN_CLOSE_WRITE mismatch"); _Static_assert(LINUX_IN_CLOSE_NOWRITE == IN_CLOSE_NOWRITE, "IN_CLOSE_NOWRITE mismatch"); _Static_assert(LINUX_IN_OPEN == IN_OPEN, "IN_OPEN mismatch"); _Static_assert(LINUX_IN_MOVED_FROM == IN_MOVED_FROM, "IN_MOVED_FROM mismatch"); _Static_assert(LINUX_IN_MOVED_TO == IN_MOVED_TO, "IN_MOVED_TO mismatch"); _Static_assert(LINUX_IN_CREATE == IN_CREATE, "IN_CREATE mismatch"); _Static_assert(LINUX_IN_DELETE == IN_DELETE, "IN_DELETE mismatch"); _Static_assert(LINUX_IN_DELETE_SELF == IN_DELETE_SELF, "IN_DELETE_SELF mismatch"); _Static_assert(LINUX_IN_MOVE_SELF == IN_MOVE_SELF, "IN_MOVE_SELF mismatch"); _Static_assert(LINUX_IN_UNMOUNT == IN_UNMOUNT, "IN_UNMOUNT mismatch"); _Static_assert(LINUX_IN_Q_OVERFLOW == IN_Q_OVERFLOW, "IN_Q_OVERFLOW mismatch"); _Static_assert(LINUX_IN_IGNORED == IN_IGNORED, "IN_IGNORED mismatch"); _Static_assert(LINUX_IN_ISDIR == IN_ISDIR, "IN_ISDIR mismatch"); _Static_assert(LINUX_IN_ONLYDIR == IN_ONLYDIR, "IN_ONLYDIR mismatch"); _Static_assert(LINUX_IN_DONT_FOLLOW == IN_DONT_FOLLOW, "IN_DONT_FOLLOW mismatch"); _Static_assert(LINUX_IN_MASK_CREATE == IN_MASK_CREATE, "IN_MASK_CREATE mismatch"); _Static_assert(LINUX_IN_MASK_ADD == IN_MASK_ADD, "IN_MASK_ADD mismatch"); _Static_assert(LINUX_IN_ONESHOT == IN_ONESHOT, "IN_ONESHOT mismatch"); _Static_assert(LINUX_IN_EXCL_UNLINK == IN_EXCL_UNLINK, "IN_EXCL_UNLINK mismatch"); static int linux_inotify_watch_flags(int l_flags) { if ((l_flags & ~(LINUX_IN_ALL_EVENTS | LINUX_IN_ALL_FLAGS)) != 0) { linux_msg(NULL, "inotify_add_watch unsupported flags 0x%x", l_flags); } return (l_flags); } int linux_inotify_add_watch(struct thread *td, struct linux_inotify_add_watch_args *args) { return (kern_inotify_add_watch(args->fd, AT_FDCWD, args->pathname, linux_inotify_watch_flags(args->mask), td)); } int linux_inotify_rm_watch(struct thread *td, struct linux_inotify_rm_watch_args *args) { return (kern_inotify_rm_watch(args->fd, args->wd, td)); } diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c index 6f83b875a6b6..85fe48ddd466 100644 --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -1,2226 +1,2244 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson * Copyright 2020 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by BAE Systems, the University of * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent * Computing (TC) research program. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Support for shared swap-backed anonymous memory objects via * shm_open(2), shm_rename(2), and shm_unlink(2). * While most of the implementation is here, vm_mmap.c contains * mapping logic changes. * * posixshmcontrol(1) allows users to inspect the state of the memory * objects. Per-uid swap resource limit controls total amount of * memory that user can consume for anonymous objects, including * shared. */ #include #include "opt_capsicum.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct shm_mapping { char *sm_path; Fnv32_t sm_fnv; struct shmfd *sm_shmfd; LIST_ENTRY(shm_mapping) sm_link; }; static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); static LIST_HEAD(, shm_mapping) *shm_dictionary; static struct sx shm_dict_lock; static struct mtx shm_timestamp_lock; static u_long shm_hash; static struct unrhdr64 shm_ino_unr; static dev_t shm_dev_ino; #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) static void shm_init(void *arg); static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); static void shm_doremove(struct shm_mapping *map); static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie); static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie); static int shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out); static int shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags); static fo_rdwr_t shm_read; static fo_rdwr_t shm_write; static fo_truncate_t shm_truncate; static fo_ioctl_t shm_ioctl; static fo_stat_t shm_stat; static fo_close_t shm_close; static fo_chmod_t shm_chmod; static fo_chown_t shm_chown; static fo_seek_t shm_seek; static fo_fill_kinfo_t shm_fill_kinfo; static fo_mmap_t shm_mmap; static fo_get_seals_t shm_get_seals; static fo_add_seals_t shm_add_seals; static fo_fallocate_t shm_fallocate; static fo_fspacectl_t shm_fspacectl; /* File descriptor operations. */ const struct fileops shm_ops = { .fo_read = shm_read, .fo_write = shm_write, .fo_truncate = shm_truncate, .fo_ioctl = shm_ioctl, .fo_poll = invfo_poll, .fo_kqfilter = invfo_kqfilter, .fo_stat = shm_stat, .fo_close = shm_close, .fo_chmod = shm_chmod, .fo_chown = shm_chown, .fo_sendfile = vn_sendfile, .fo_seek = shm_seek, .fo_fill_kinfo = shm_fill_kinfo, .fo_mmap = shm_mmap, .fo_get_seals = shm_get_seals, .fo_add_seals = shm_add_seals, .fo_fallocate = shm_fallocate, .fo_fspacectl = shm_fspacectl, .fo_cmp = file_kcmp_generic, .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE, }; FEATURE(posix_shm, "POSIX shared memory"); static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); static int largepage_reclaim_tries = 1; SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries, CTLFLAG_RWTUN, &largepage_reclaim_tries, 0, "Number of contig reclaims before giving up for default alloc policy"); #define shm_rangelock_unlock(shmfd, cookie) \ rangelock_unlock(&(shmfd)->shm_rl, (cookie)) #define shm_rangelock_rlock(shmfd, start, end) \ rangelock_rlock(&(shmfd)->shm_rl, (start), (end)) #define shm_rangelock_tryrlock(shmfd, start, end) \ rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end)) #define shm_rangelock_wlock(shmfd, start, end) \ rangelock_wlock(&(shmfd)->shm_rl, (start), (end)) static int uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) { struct pctrie_iter pages; vm_page_t m; vm_pindex_t idx; size_t tlen; int error, offset, rv; idx = OFF_TO_IDX(uio->uio_offset); offset = uio->uio_offset & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); rv = vm_page_grab_valid_unlocked(&m, obj, idx, VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT); if (rv == VM_PAGER_OK) goto found; /* * Read I/O without either a corresponding resident page or swap * page: use zero_region. This is intended to avoid instantiating * pages on read from a sparse region. */ vm_page_iter_init(&pages, obj); VM_OBJECT_WLOCK(obj); m = vm_radix_iter_lookup(&pages, idx); if (uio->uio_rw == UIO_READ && m == NULL && !vm_pager_has_page(obj, idx, NULL, NULL)) { VM_OBJECT_WUNLOCK(obj); return (uiomove(__DECONST(void *, zero_region), tlen, uio)); } /* * Although the tmpfs vnode lock is held here, it is * nonetheless safe to sleep waiting for a free page. The * pageout daemon does not need to acquire the tmpfs vnode * lock to page out tobj's pages because tobj is a OBJT_SWAP * type object. */ rv = vm_page_grab_valid_iter(&m, obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY, &pages); if (rv != VM_PAGER_OK) { VM_OBJECT_WUNLOCK(obj); if (bootverbose) { printf("uiomove_object: vm_obj %p idx %jd " "pager error %d\n", obj, idx, rv); } return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO); } VM_OBJECT_WUNLOCK(obj); found: error = uiomove_fromphys(&m, offset, tlen, uio); if (uio->uio_rw == UIO_WRITE && error == 0) vm_page_set_dirty(m); vm_page_activate(m); vm_page_sunbusy(m); return (error); } int uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) { ssize_t resid; size_t len; int error; error = 0; while ((resid = uio->uio_resid) > 0) { if (obj_size <= uio->uio_offset) break; len = MIN(obj_size - uio->uio_offset, resid); if (len == 0) break; error = uiomove_object_page(obj, len, uio); if (error != 0 || resid == uio->uio_resid) break; } return (error); } static u_long count_largepages[MAXPAGESIZES]; static int shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { vm_page_t m __diagused; int psind; psind = object->un_pager.phys.data_val; if (psind == 0 || pidx >= object->size) return (VM_PAGER_FAIL); *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE); /* * We only busy the first page in the superpage run. It is * useless to busy whole run since we only remove full * superpage, and it takes too long to busy e.g. 512 * 512 == * 262144 pages constituing 1G amd64 superage. */ m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT); MPASS(m != NULL); *last = *first + atop(pagesizes[psind]) - 1; return (VM_PAGER_OK); } static boolean_t shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after) { int psind; psind = object->un_pager.phys.data_val; if (psind == 0 || pindex >= object->size) return (FALSE); if (before != NULL) { *before = pindex - rounddown2(pindex, pagesizes[psind] / PAGE_SIZE); } if (after != NULL) { *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) - pindex; } return (TRUE); } static void shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred) { } static void shm_largepage_phys_dtor(vm_object_t object) { int psind; psind = object->un_pager.phys.data_val; if (psind != 0) { atomic_subtract_long(&count_largepages[psind], object->size / (pagesizes[psind] / PAGE_SIZE)); vm_wire_sub(object->size); } else { KASSERT(object->size == 0, ("largepage phys obj %p not initialized bit size %#jx > 0", object, (uintmax_t)object->size)); } } static const struct phys_pager_ops shm_largepage_phys_ops = { .phys_pg_populate = shm_largepage_phys_populate, .phys_pg_haspage = shm_largepage_phys_haspage, .phys_pg_ctor = shm_largepage_phys_ctor, .phys_pg_dtor = shm_largepage_phys_dtor, }; bool shm_largepage(struct shmfd *shmfd) { return (shmfd->shm_object->type == OBJT_PHYS); } static void shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size) { struct shmfd *shm; vm_size_t c; swap_pager_freespace(obj, start, size, &c); if (c == 0) return; shm = obj->un_pager.swp.swp_priv; if (shm == NULL) return; KASSERT(shm->shm_pages >= c, ("shm %p pages %jd free %jd", shm, (uintmax_t)shm->shm_pages, (uintmax_t)c)); shm->shm_pages -= c; } static void shm_page_inserted(vm_object_t obj, vm_page_t m) { struct shmfd *shm; shm = obj->un_pager.swp.swp_priv; if (shm == NULL) return; if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) shm->shm_pages += 1; } static void shm_page_removed(vm_object_t obj, vm_page_t m) { struct shmfd *shm; shm = obj->un_pager.swp.swp_priv; if (shm == NULL) return; if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) { KASSERT(shm->shm_pages >= 1, ("shm %p pages %jd free 1", shm, (uintmax_t)shm->shm_pages)); shm->shm_pages -= 1; } } static struct pagerops shm_swap_pager_ops = { .pgo_kvme_type = KVME_TYPE_SWAP, .pgo_freespace = shm_pager_freespace, .pgo_page_inserted = shm_page_inserted, .pgo_page_removed = shm_page_removed, }; static int shmfd_pager_type = -1; static int shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) { struct shmfd *shmfd; off_t foffset; int error; shmfd = fp->f_data; foffset = foffset_lock(fp, 0); error = 0; switch (whence) { case L_INCR: if (foffset < 0 || (offset > 0 && foffset > OFF_MAX - offset)) { error = EOVERFLOW; break; } offset += foffset; break; case L_XTND: if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { error = EOVERFLOW; break; } offset += shmfd->shm_size; break; case L_SET: break; default: error = EINVAL; } if (error == 0) { if (offset < 0 || offset > shmfd->shm_size) error = EINVAL; else td->td_uretoff.tdu_off = offset; } foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); return (error); } static int shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct shmfd *shmfd; void *rl_cookie; int error; shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif foffset_lock_uio(fp, uio, flags); rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset, uio->uio_offset + uio->uio_resid); error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); shm_rangelock_unlock(shmfd, rl_cookie); foffset_unlock_uio(fp, uio, flags); return (error); } static int shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct shmfd *shmfd; void *rl_cookie; int error; off_t newsize; KASSERT((flags & FOF_OFFSET) == 0 || uio->uio_offset >= 0, ("%s: negative offset", __func__)); shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0) return (EINVAL); foffset_lock_uio(fp, uio, flags); if (uio->uio_resid > OFF_MAX - uio->uio_offset) { /* * Overflow is only an error if we're supposed to expand on * write. Otherwise, we'll just truncate the write to the * size of the file, which can only grow up to OFF_MAX. */ if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) { foffset_unlock_uio(fp, uio, flags); return (EFBIG); } newsize = atomic_load_64(&shmfd->shm_size); } else { newsize = uio->uio_offset + uio->uio_resid; } if ((flags & FOF_OFFSET) == 0) rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX); else rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, MAX(newsize, uio->uio_offset)); if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) { error = EPERM; } else { error = 0; if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 && newsize > shmfd->shm_size) { error = shm_dotruncate_cookie(shmfd, newsize, rl_cookie); } if (error == 0) error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); } shm_rangelock_unlock(shmfd, rl_cookie); foffset_unlock_uio(fp, uio, flags); return (error); } static int shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; #ifdef MAC int error; #endif shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif return (shm_dotruncate(shmfd, length)); } int shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; struct shm_largepage_conf *conf; void *rl_cookie; shmfd = fp->f_data; switch (com) { case FIONBIO: case FIOASYNC: /* * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work, * just like it would on an unlinked regular file */ return (0); case FIOSSHMLPGCNF: if (!shm_largepage(shmfd)) return (ENOTTY); conf = data; if (shmfd->shm_lp_psind != 0 && conf->psind != shmfd->shm_lp_psind) return (EINVAL); if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES || pagesizes[conf->psind] == 0) return (EINVAL); if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT && conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT && conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD) return (EINVAL); rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX); shmfd->shm_lp_psind = conf->psind; shmfd->shm_lp_alloc_policy = conf->alloc_policy; shmfd->shm_object->un_pager.phys.data_val = conf->psind; shm_rangelock_unlock(shmfd, rl_cookie); return (0); case FIOGSHMLPGCNF: if (!shm_largepage(shmfd)) return (ENOTTY); conf = data; rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX); conf->psind = shmfd->shm_lp_psind; conf->alloc_policy = shmfd->shm_lp_alloc_policy; shm_rangelock_unlock(shmfd, rl_cookie); return (0); default: return (ENOTTY); } } static int shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) { struct shmfd *shmfd; #ifdef MAC int error; #endif shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif /* * Attempt to return sanish values for fstat() on a memory file * descriptor. */ bzero(sb, sizeof(*sb)); sb->st_blksize = PAGE_SIZE; sb->st_size = shmfd->shm_size; mtx_lock(&shm_timestamp_lock); sb->st_atim = shmfd->shm_atime; sb->st_ctim = shmfd->shm_ctime; sb->st_mtim = shmfd->shm_mtime; sb->st_birthtim = shmfd->shm_birthtime; sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ sb->st_uid = shmfd->shm_uid; sb->st_gid = shmfd->shm_gid; mtx_unlock(&shm_timestamp_lock); sb->st_dev = shm_dev_ino; sb->st_ino = shmfd->shm_ino; sb->st_nlink = shmfd->shm_object->ref_count; if (shm_largepage(shmfd)) { sb->st_blocks = shmfd->shm_object->size / (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT); } else { sb->st_blocks = shmfd->shm_pages; } return (0); } static int shm_close(struct file *fp, struct thread *td) { struct shmfd *shmfd; shmfd = fp->f_data; fp->f_data = NULL; shm_drop(shmfd); return (0); } static int shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) { int error; char *path; const char *pr_path; size_t pr_pathlen; path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); pr_path = td->td_ucred->cr_prison->pr_path; /* Construct a full pathname for jailed callers. */ pr_pathlen = strcmp(pr_path, "/") == 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN); error = copyinstr(userpath_in, path + pr_pathlen, MAXPATHLEN - pr_pathlen, NULL); if (error != 0) goto out; #ifdef KTRACE if (KTRPOINT(curthread, KTR_NAMEI)) ktrnamei(path); #endif /* Require paths to start with a '/' character. */ if (path[pr_pathlen] != '/') { error = EINVAL; goto out; } *path_out = path; out: if (error != 0) free(path, M_SHMFD); return (error); } static int shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base, int end) { int error; error = vm_page_grab_zero_partial(object, idx, base, end); if (error == EIO) VM_OBJECT_WUNLOCK(object); return (error); } static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie) { vm_object_t object; vm_pindex_t nobjsize; vm_ooffset_t delta; int base, error; KASSERT(length >= 0, ("shm_dotruncate: length < 0")); object = shmfd->shm_object; VM_OBJECT_ASSERT_WLOCKED(object); rangelock_cookie_assert(rl_cookie, RA_WLOCKED); if (length == shmfd->shm_size) return (0); nobjsize = OFF_TO_IDX(length + PAGE_MASK); /* Are we shrinking? If so, trim the end. */ if (length < shmfd->shm_size) { if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) return (EPERM); /* * Disallow any requests to shrink the size if this * object is mapped into the kernel. */ if (shmfd->shm_kmappings > 0) return (EBUSY); /* * Zero the truncated part of the last page. */ base = length & PAGE_MASK; if (base != 0) { error = shm_partial_page_invalidate(object, OFF_TO_IDX(length), base, PAGE_SIZE); if (error) return (error); } delta = IDX_TO_OFF(object->size - nobjsize); if (nobjsize < object->size) vm_object_page_remove(object, nobjsize, object->size, 0); /* Free the swap accounted for shm */ swap_release_by_cred(delta, object->cred); object->charge -= delta; } else { if ((shmfd->shm_seals & F_SEAL_GROW) != 0) return (EPERM); /* Try to reserve additional swap space. */ delta = IDX_TO_OFF(nobjsize - object->size); if (!swap_reserve_by_cred(delta, object->cred)) return (ENOMEM); object->charge += delta; } shmfd->shm_size = length; mtx_lock(&shm_timestamp_lock); vfs_timestamp(&shmfd->shm_ctime); shmfd->shm_mtime = shmfd->shm_ctime; mtx_unlock(&shm_timestamp_lock); object->size = nobjsize; return (0); } static int shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie) { vm_object_t object; vm_page_t m; vm_pindex_t newobjsz; vm_pindex_t oldobjsz __unused; int aflags, error, i, psind, try; KASSERT(length >= 0, ("shm_dotruncate: length < 0")); object = shmfd->shm_object; VM_OBJECT_ASSERT_WLOCKED(object); rangelock_cookie_assert(rl_cookie, RA_WLOCKED); oldobjsz = object->size; newobjsz = OFF_TO_IDX(length); if (length == shmfd->shm_size) return (0); psind = shmfd->shm_lp_psind; if (psind == 0 && length != 0) return (EINVAL); if ((length & (pagesizes[psind] - 1)) != 0) return (EINVAL); if (length < shmfd->shm_size) { if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) return (EPERM); if (shmfd->shm_kmappings > 0) return (EBUSY); return (ENOTSUP); /* Pages are unmanaged. */ #if 0 vm_object_page_remove(object, newobjsz, oldobjsz, 0); object->size = newobjsz; shmfd->shm_size = length; return (0); #endif } if ((shmfd->shm_seals & F_SEAL_GROW) != 0) return (EPERM); aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO; if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT) aflags |= VM_ALLOC_WAITFAIL; try = 0; /* * Extend shmfd and object, keeping all already fully * allocated large pages intact even on error, because dropped * object lock might allowed mapping of them. */ while (object->size < newobjsz) { m = vm_page_alloc_contig(object, object->size, aflags, pagesizes[psind] / PAGE_SIZE, 0, ~0, pagesizes[psind], 0, VM_MEMATTR_DEFAULT); if (m == NULL) { VM_OBJECT_WUNLOCK(object); if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT || (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_DEFAULT && try >= largepage_reclaim_tries)) { VM_OBJECT_WLOCK(object); return (ENOMEM); } error = vm_page_reclaim_contig(aflags, pagesizes[psind] / PAGE_SIZE, 0, ~0, pagesizes[psind], 0); if (error == ENOMEM) error = vm_wait_intr(object); if (error != 0) { VM_OBJECT_WLOCK(object); return (error); } try++; VM_OBJECT_WLOCK(object); continue; } try = 0; for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) { if ((m[i].flags & PG_ZERO) == 0) pmap_zero_page(&m[i]); vm_page_valid(&m[i]); vm_page_xunbusy(&m[i]); } object->size += OFF_TO_IDX(pagesizes[psind]); shmfd->shm_size += pagesizes[psind]; atomic_add_long(&count_largepages[psind], 1); vm_wire_add(atop(pagesizes[psind])); } return (0); } static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie) { int error; VM_OBJECT_WLOCK(shmfd->shm_object); error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd, length, rl_cookie) : shm_dotruncate_locked(shmfd, length, rl_cookie); VM_OBJECT_WUNLOCK(shmfd->shm_object); return (error); } int shm_dotruncate(struct shmfd *shmfd, off_t length) { void *rl_cookie; int error; rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX); error = shm_dotruncate_cookie(shmfd, length, rl_cookie); shm_rangelock_unlock(shmfd, rl_cookie); return (error); } /* * shmfd object management including creation and reference counting * routines. */ struct shmfd * shm_alloc(struct ucred *ucred, mode_t mode, bool largepage) { struct shmfd *shmfd; vm_object_t obj; if (largepage) { obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops, NULL, 0, VM_PROT_DEFAULT, 0, ucred); } else { obj = vm_pager_allocate(shmfd_pager_type, NULL, 0, VM_PROT_DEFAULT, 0, ucred); } if (obj == NULL) { /* * swap reservation limits can cause object allocation * to fail. */ return (NULL); } shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); shmfd->shm_uid = ucred->cr_uid; shmfd->shm_gid = ucred->cr_gid; shmfd->shm_mode = mode; if (largepage) { obj->un_pager.phys.phys_priv = shmfd; shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT; } else { obj->un_pager.swp.swp_priv = shmfd; } VM_OBJECT_WLOCK(obj); vm_object_set_flag(obj, OBJ_POSIXSHM); VM_OBJECT_WUNLOCK(obj); shmfd->shm_object = obj; vfs_timestamp(&shmfd->shm_birthtime); shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = shmfd->shm_birthtime; shmfd->shm_ino = alloc_unr64(&shm_ino_unr); refcount_init(&shmfd->shm_refs, 1); mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); rangelock_init(&shmfd->shm_rl); #ifdef MAC mac_posixshm_init(shmfd); mac_posixshm_create(ucred, shmfd); #endif return (shmfd); } struct shmfd * shm_hold(struct shmfd *shmfd) { refcount_acquire(&shmfd->shm_refs); return (shmfd); } void shm_drop(struct shmfd *shmfd) { vm_object_t obj; if (refcount_release(&shmfd->shm_refs)) { #ifdef MAC mac_posixshm_destroy(shmfd); #endif rangelock_destroy(&shmfd->shm_rl); mtx_destroy(&shmfd->shm_mtx); obj = shmfd->shm_object; VM_OBJECT_WLOCK(obj); if (shm_largepage(shmfd)) obj->un_pager.phys.phys_priv = NULL; else obj->un_pager.swp.swp_priv = NULL; VM_OBJECT_WUNLOCK(obj); vm_object_deallocate(obj); free(shmfd, M_SHMFD); } } /* * Determine if the credentials have sufficient permissions for a * specified combination of FREAD and FWRITE. */ int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) { accmode_t accmode; int error; accmode = 0; if (flags & FREAD) accmode |= VREAD; if (flags & FWRITE) accmode |= VWRITE; mtx_lock(&shm_timestamp_lock); error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, accmode, ucred); mtx_unlock(&shm_timestamp_lock); return (error); } static void shm_init(void *arg) { char name[32]; int i; mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); sx_init(&shm_dict_lock, "shm dictionary"); shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); new_unrhdr64(&shm_ino_unr, 1); shm_dev_ino = devfs_alloc_cdp_inode(); KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops, OBJT_SWAP); MPASS(shmfd_pager_type != -1); for (i = 1; i < MAXPAGESIZES; i++) { if (pagesizes[i] == 0) break; #define M (1024 * 1024) #define G (1024 * M) if (pagesizes[i] >= G) snprintf(name, sizeof(name), "%luG", pagesizes[i] / G); else if (pagesizes[i] >= M) snprintf(name, sizeof(name), "%luM", pagesizes[i] / M); else snprintf(name, sizeof(name), "%lu", pagesizes[i]); #undef G #undef M SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages), OID_AUTO, name, CTLFLAG_RD, &count_largepages[i], "number of non-transient largepages allocated"); } } SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); /* * Remove all shared memory objects that belong to a prison. */ void shm_remove_prison(struct prison *pr) { struct shm_mapping *shmm, *tshmm; u_long i; sx_xlock(&shm_dict_lock); for (i = 0; i < shm_hash + 1; i++) { LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) { if (shmm->sm_shmfd->shm_object->cred && shmm->sm_shmfd->shm_object->cred->cr_prison == pr) shm_doremove(shmm); } } sx_xunlock(&shm_dict_lock); } /* * Dictionary management. We maintain an in-kernel dictionary to map * paths to shmfd objects. We use the FNV hash on the path to store * the mappings in a hash table. */ static struct shmfd * shm_lookup(char *path, Fnv32_t fnv) { struct shm_mapping *map; LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { if (map->sm_fnv != fnv) continue; if (strcmp(map->sm_path, path) == 0) return (map->sm_shmfd); } return (NULL); } static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) { struct shm_mapping *map; map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); map->sm_path = path; map->sm_fnv = fnv; map->sm_shmfd = shm_hold(shmfd); shmfd->shm_path = path; LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); } static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) { struct shm_mapping *map; int error; LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { if (map->sm_fnv != fnv) continue; if (strcmp(map->sm_path, path) == 0) { #ifdef MAC error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); if (error) return (error); #endif error = shm_access(map->sm_shmfd, ucred, FREAD | FWRITE); if (error) return (error); shm_doremove(map); return (0); } } return (ENOENT); } static void shm_doremove(struct shm_mapping *map) { map->sm_shmfd->shm_path = NULL; LIST_REMOVE(map, sm_link); shm_drop(map->sm_shmfd); free(map->sm_path, M_SHMFD); free(map, M_SHMFD); } int kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode, - int shmflags, struct filecaps *fcaps, const char *name __unused) + int shmflags, struct filecaps *fcaps, const char *name __unused, + struct shmfd *shmfd) { struct pwddesc *pdp; - struct shmfd *shmfd; struct file *fp; char *path; void *rl_cookie; Fnv32_t fnv; mode_t cmode; int error, fd, initial_seals; bool largepage; if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE | SHM_LARGEPAGE)) != 0) return (EINVAL); initial_seals = F_SEAL_SEAL; if ((shmflags & SHM_ALLOW_SEALING) != 0) initial_seals &= ~F_SEAL_SEAL; AUDIT_ARG_FFLAGS(flags); AUDIT_ARG_MODE(mode); if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) return (EINVAL); if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) return (EINVAL); largepage = (shmflags & SHM_LARGEPAGE) != 0; if (largepage && !PMAP_HAS_LARGEPAGES) return (ENOTTY); /* * Currently only F_SEAL_SEAL may be set when creating or opening shmfd. * If the decision is made later to allow additional seals, care must be * taken below to ensure that the seals are properly set if the shmfd * already existed -- this currently assumes that only F_SEAL_SEAL can * be set and doesn't take further precautions to ensure the validity of * the seals being added with respect to current mappings. */ if ((initial_seals & ~F_SEAL_SEAL) != 0) return (EINVAL); if (userpath != SHM_ANON) { error = shm_copyin_path(td, userpath, &path); if (error != 0) return (error); #ifdef CAPABILITY_MODE /* * shm_open(2) is only allowed for anonymous objects. */ if (CAP_TRACING(td)) ktrcapfail(CAPFAIL_NAMEI, path); if (IN_CAPABILITY_MODE(td)) { error = ECAPMODE; goto outnofp; } #endif AUDIT_ARG_UPATH1_CANON(path); } else { path = NULL; } pdp = td->td_proc->p_pd; cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS; /* * shm_open(2) created shm should always have O_CLOEXEC set, as mandated * by POSIX. We allow it to be unset here so that an in-kernel * interface may be written as a thin layer around shm, optionally not * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally * in sys_shm_open() to keep this implementation compliant. */ error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps); if (error != 0) goto outnofp; - /* A SHM_ANON path pointer creates an anonymous object. */ + /* + * A SHM_ANON path pointer creates an anonymous object. We allow other + * parts of the kernel to pre-populate a shmfd and then materialize an + * fd for it here as a means to pass data back up to userland. This + * doesn't really make sense for named shm objects, but it makes plenty + * of sense for anonymous objects. + */ if (userpath == SHM_ANON) { - /* A read-only anonymous object is pointless. */ - if ((flags & O_ACCMODE) == O_RDONLY) { - error = EINVAL; - goto out; - } - shmfd = shm_alloc(td->td_ucred, cmode, largepage); - if (shmfd == NULL) { - error = ENOMEM; - goto out; + if (shmfd != NULL) { + shm_hold(shmfd); + } else { + /* + * A read-only anonymous object is pointless, unless it + * was pre-populated by the kernel with the expectation + * that a shmfd would later be created for userland to + * access it through. + */ + if ((flags & O_ACCMODE) == O_RDONLY) { + error = EINVAL; + goto out; + } + shmfd = shm_alloc(td->td_ucred, cmode, largepage); + if (shmfd == NULL) { + error = ENOMEM; + goto out; + } + + shmfd->shm_seals = initial_seals; + shmfd->shm_flags = shmflags; } - shmfd->shm_seals = initial_seals; - shmfd->shm_flags = shmflags; } else { fnv = fnv_32_str(path, FNV1_32_INIT); sx_xlock(&shm_dict_lock); + + MPASS(shmfd == NULL); shmfd = shm_lookup(path, fnv); if (shmfd == NULL) { /* Object does not yet exist, create it if requested. */ if (flags & O_CREAT) { #ifdef MAC error = mac_posixshm_check_create(td->td_ucred, path); if (error == 0) { #endif shmfd = shm_alloc(td->td_ucred, cmode, largepage); if (shmfd == NULL) { error = ENOMEM; } else { shmfd->shm_seals = initial_seals; shmfd->shm_flags = shmflags; shm_insert(path, fnv, shmfd); path = NULL; } #ifdef MAC } #endif } else { error = ENOENT; } } else { /* * Object already exists, obtain a new reference if * requested and permitted. */ rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX); /* * kern_shm_open() likely shouldn't ever error out on * trying to set a seal that already exists, unlike * F_ADD_SEALS. This would break terribly as * shm_open(2) actually sets F_SEAL_SEAL to maintain * historical behavior where the underlying file could * not be sealed. */ initial_seals &= ~shmfd->shm_seals; /* * initial_seals can't set additional seals if we've * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, * then we've already removed that one from * initial_seals. This is currently redundant as we * only allow setting F_SEAL_SEAL at creation time, but * it's cheap to check and decreases the effort required * to allow additional seals. */ if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && initial_seals != 0) error = EPERM; else if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) error = EEXIST; else if (shmflags != 0 && shmflags != shmfd->shm_flags) error = EINVAL; else { #ifdef MAC error = mac_posixshm_check_open(td->td_ucred, shmfd, FFLAGS(flags & O_ACCMODE)); if (error == 0) #endif error = shm_access(shmfd, td->td_ucred, FFLAGS(flags & O_ACCMODE)); } /* * Truncate the file back to zero length if * O_TRUNC was specified and the object was * opened with read/write. */ if (error == 0 && (flags & (O_ACCMODE | O_TRUNC)) == (O_RDWR | O_TRUNC)) { VM_OBJECT_WLOCK(shmfd->shm_object); #ifdef MAC error = mac_posixshm_check_truncate( td->td_ucred, fp->f_cred, shmfd); if (error == 0) #endif error = shm_dotruncate_locked(shmfd, 0, rl_cookie); VM_OBJECT_WUNLOCK(shmfd->shm_object); } if (error == 0) { /* * Currently we only allow F_SEAL_SEAL to be * set initially. As noted above, this would * need to be reworked should that change. */ shmfd->shm_seals |= initial_seals; shm_hold(shmfd); } shm_rangelock_unlock(shmfd, rl_cookie); } sx_xunlock(&shm_dict_lock); if (error != 0) goto out; } finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); td->td_retval[0] = fd; fdrop(fp, td); free(path, M_SHMFD); return (0); out: fdclose(td, fp, fd); fdrop(fp, td); outnofp: free(path, M_SHMFD); return (error); } /* System calls. */ #ifdef COMPAT_FREEBSD12 int freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap) { return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode, NULL)); } #endif int sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) { char *path; Fnv32_t fnv; int error; error = shm_copyin_path(td, uap->path, &path); if (error != 0) return (error); AUDIT_ARG_UPATH1_CANON(path); fnv = fnv_32_str(path, FNV1_32_INIT); sx_xlock(&shm_dict_lock); error = shm_remove(path, fnv, td->td_ucred); sx_xunlock(&shm_dict_lock); free(path, M_SHMFD); return (error); } int sys_shm_rename(struct thread *td, struct shm_rename_args *uap) { char *path_from = NULL, *path_to = NULL; Fnv32_t fnv_from, fnv_to; struct shmfd *fd_from; struct shmfd *fd_to; int error; int flags; flags = uap->flags; AUDIT_ARG_FFLAGS(flags); /* * Make sure the user passed only valid flags. * If you add a new flag, please add a new term here. */ if ((flags & ~( SHM_RENAME_NOREPLACE | SHM_RENAME_EXCHANGE )) != 0) { error = EINVAL; goto out; } /* * EXCHANGE and NOREPLACE don't quite make sense together. Let's * force the user to choose one or the other. */ if ((flags & SHM_RENAME_NOREPLACE) != 0 && (flags & SHM_RENAME_EXCHANGE) != 0) { error = EINVAL; goto out; } /* Renaming to or from anonymous makes no sense */ if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) { error = EINVAL; goto out; } error = shm_copyin_path(td, uap->path_from, &path_from); if (error != 0) goto out; error = shm_copyin_path(td, uap->path_to, &path_to); if (error != 0) goto out; AUDIT_ARG_UPATH1_CANON(path_from); AUDIT_ARG_UPATH2_CANON(path_to); /* Rename with from/to equal is a no-op */ if (strcmp(path_from, path_to) == 0) goto out; fnv_from = fnv_32_str(path_from, FNV1_32_INIT); fnv_to = fnv_32_str(path_to, FNV1_32_INIT); sx_xlock(&shm_dict_lock); fd_from = shm_lookup(path_from, fnv_from); if (fd_from == NULL) { error = ENOENT; goto out_locked; } fd_to = shm_lookup(path_to, fnv_to); if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) { error = EEXIST; goto out_locked; } /* * Unconditionally prevents shm_remove from invalidating the 'from' * shm's state. */ shm_hold(fd_from); error = shm_remove(path_from, fnv_from, td->td_ucred); /* * One of my assumptions failed if ENOENT (e.g. locking didn't * protect us) */ KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s", path_from)); if (error != 0) { shm_drop(fd_from); goto out_locked; } /* * If we are exchanging, we need to ensure the shm_remove below * doesn't invalidate the dest shm's state. */ if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) shm_hold(fd_to); /* * NOTE: if path_to is not already in the hash, c'est la vie; * it simply means we have nothing already at path_to to unlink. * That is the ENOENT case. * * If we somehow don't have access to unlink this guy, but * did for the shm at path_from, then relink the shm to path_from * and abort with EACCES. * * All other errors: that is weird; let's relink and abort the * operation. */ error = shm_remove(path_to, fnv_to, td->td_ucred); if (error != 0 && error != ENOENT) { shm_insert(path_from, fnv_from, fd_from); shm_drop(fd_from); /* Don't free path_from now, since the hash references it */ path_from = NULL; goto out_locked; } error = 0; shm_insert(path_to, fnv_to, fd_from); /* Don't free path_to now, since the hash references it */ path_to = NULL; /* We kept a ref when we removed, and incremented again in insert */ shm_drop(fd_from); KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n", fd_from->shm_refs)); if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) { shm_insert(path_from, fnv_from, fd_to); path_from = NULL; shm_drop(fd_to); KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n", fd_to->shm_refs)); } out_locked: sx_xunlock(&shm_dict_lock); out: free(path_from, M_SHMFD); free(path_to, M_SHMFD); return (error); } static int shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags, vm_ooffset_t foff, struct thread *td) { struct vmspace *vms; vm_map_entry_t next_entry, prev_entry; vm_offset_t align, mask, maxaddr; int docow, error, rv, try; bool curmap; if (shmfd->shm_lp_psind == 0) return (EINVAL); /* MAP_PRIVATE is disabled */ if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL | MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0) return (EINVAL); vms = td->td_proc->p_vmspace; curmap = map == &vms->vm_map; if (curmap) { error = kern_mmap_racct_check(td, map, size); if (error != 0) return (error); } docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT; docow |= MAP_INHERIT_SHARE; if ((flags & MAP_NOCORE) != 0) docow |= MAP_DISABLE_COREDUMP; mask = pagesizes[shmfd->shm_lp_psind] - 1; if ((foff & mask) != 0) return (EINVAL); maxaddr = vm_map_max(map); if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR) maxaddr = MAP_32BIT_MAX_ADDR; if (size == 0 || (size & mask) != 0 || (*addr != 0 && ((*addr & mask) != 0 || *addr + size < *addr || *addr + size > maxaddr))) return (EINVAL); align = flags & MAP_ALIGNMENT_MASK; if (align == 0) { align = pagesizes[shmfd->shm_lp_psind]; } else if (align == MAP_ALIGNED_SUPER) { /* * MAP_ALIGNED_SUPER is only supported on superpage sizes, * i.e., [1, VM_NRESERVLEVEL]. shmfd->shm_lp_psind < 1 is * handled above. */ if ( #if VM_NRESERVLEVEL > 0 shmfd->shm_lp_psind > VM_NRESERVLEVEL #else shmfd->shm_lp_psind > 1 #endif ) return (EINVAL); align = pagesizes[shmfd->shm_lp_psind]; } else { align >>= MAP_ALIGNMENT_SHIFT; align = 1ULL << align; /* Also handles overflow. */ if (align < pagesizes[shmfd->shm_lp_psind]) return (EINVAL); } vm_map_lock(map); if ((flags & MAP_FIXED) == 0) { try = 1; if (curmap && (*addr == 0 || (*addr >= round_page((vm_offset_t)vms->vm_taddr) && *addr < round_page((vm_offset_t)vms->vm_daddr + lim_max(td, RLIMIT_DATA))))) { *addr = roundup2((vm_offset_t)vms->vm_daddr + lim_max(td, RLIMIT_DATA), pagesizes[shmfd->shm_lp_psind]); } again: rv = vm_map_find_aligned(map, addr, size, maxaddr, align); if (rv != KERN_SUCCESS) { if (try == 1) { try = 2; *addr = vm_map_min(map); if ((*addr & mask) != 0) *addr = (*addr + mask) & mask; goto again; } goto fail1; } } else if ((flags & MAP_EXCL) == 0) { rv = vm_map_delete(map, *addr, *addr + size); if (rv != KERN_SUCCESS) goto fail1; } else { error = ENOSPC; if (vm_map_lookup_entry(map, *addr, &prev_entry)) goto fail; next_entry = vm_map_entry_succ(prev_entry); if (next_entry->start < *addr + size) goto fail; } rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size, prot, max_prot, docow); fail1: error = vm_mmap_to_errno(rv); fail: vm_map_unlock(map); return (error); } static int shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, vm_prot_t prot, vm_prot_t max_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct shmfd *shmfd; vm_prot_t maxprot; int error; bool writecnt; void *rl_cookie; shmfd = fp->f_data; maxprot = VM_PROT_NONE; rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize); /* FREAD should always be set. */ if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; /* * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared * mapping with a write seal applied. Private mappings are always * writeable. */ if ((flags & MAP_SHARED) == 0) { if ((max_maxprot & VM_PROT_WRITE) != 0) maxprot |= VM_PROT_WRITE; writecnt = false; } else { if ((fp->f_flag & FWRITE) != 0 && (shmfd->shm_seals & F_SEAL_WRITE) == 0) maxprot |= VM_PROT_WRITE; /* * Any mappings from a writable descriptor may be upgraded to * VM_PROT_WRITE with mprotect(2), unless a write-seal was * applied between the open and subsequent mmap(2). We want to * reject application of a write seal as long as any such * mapping exists so that the seal cannot be trivially bypassed. */ writecnt = (maxprot & VM_PROT_WRITE) != 0; if (!writecnt && (prot & VM_PROT_WRITE) != 0) { error = EACCES; goto out; } } maxprot &= max_maxprot; /* See comment in vn_mmap(). */ if ( #ifdef _LP64 objsize > OFF_MAX || #endif foff > OFF_MAX - objsize) { error = EINVAL; goto out; } #ifdef MAC error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); if (error != 0) goto out; #endif mtx_lock(&shm_timestamp_lock); vfs_timestamp(&shmfd->shm_atime); mtx_unlock(&shm_timestamp_lock); vm_object_reference(shmfd->shm_object); if (shm_largepage(shmfd)) { writecnt = false; error = shm_mmap_large(shmfd, map, addr, objsize, prot, maxprot, flags, foff, td); } else { if (writecnt) { vm_pager_update_writecount(shmfd->shm_object, 0, objsize); } error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, shmfd->shm_object, foff, writecnt, td); } if (error != 0) { if (writecnt) vm_pager_release_writecount(shmfd->shm_object, 0, objsize); vm_object_deallocate(shmfd->shm_object); } out: shm_rangelock_unlock(shmfd, rl_cookie); return (error); } static int shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; int error; error = 0; shmfd = fp->f_data; mtx_lock(&shm_timestamp_lock); /* * SUSv4 says that x bits of permission need not be affected. * Be consistent with our shm_open there. */ #ifdef MAC error = mac_posixshm_check_setmode(active_cred, shmfd, mode); if (error != 0) goto out; #endif error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, VADMIN, active_cred); if (error != 0) goto out; shmfd->shm_mode = mode & ACCESSPERMS; out: mtx_unlock(&shm_timestamp_lock); return (error); } static int shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; int error; error = 0; shmfd = fp->f_data; mtx_lock(&shm_timestamp_lock); #ifdef MAC error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); if (error != 0) goto out; #endif if (uid == (uid_t)-1) uid = shmfd->shm_uid; if (gid == (gid_t)-1) gid = shmfd->shm_gid; if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) goto out; shmfd->shm_uid = uid; shmfd->shm_gid = gid; out: mtx_unlock(&shm_timestamp_lock); return (error); } /* * Helper routines to allow the backing object of a shared memory file * descriptor to be mapped in the kernel. */ int shm_map(struct file *fp, size_t size, off_t offset, void **memp) { struct shmfd *shmfd; vm_offset_t kva, ofs; vm_object_t obj; int rv; if (fp->f_type != DTYPE_SHM) return (EINVAL); shmfd = fp->f_data; obj = shmfd->shm_object; VM_OBJECT_WLOCK(obj); /* * XXXRW: This validation is probably insufficient, and subject to * sign errors. It should be fixed. */ if (offset >= shmfd->shm_size || offset + size > round_page(shmfd->shm_size)) { VM_OBJECT_WUNLOCK(obj); return (EINVAL); } shmfd->shm_kmappings++; vm_object_reference_locked(obj); VM_OBJECT_WUNLOCK(obj); /* Map the object into the kernel_map and wire it. */ kva = vm_map_min(kernel_map); ofs = offset & PAGE_MASK; offset = trunc_page(offset); size = round_page(size + ofs); rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0); if (rv == KERN_SUCCESS) { rv = vm_map_wire(kernel_map, kva, kva + size, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); if (rv == KERN_SUCCESS) { *memp = (void *)(kva + ofs); return (0); } vm_map_remove(kernel_map, kva, kva + size); } else vm_object_deallocate(obj); /* On failure, drop our mapping reference. */ VM_OBJECT_WLOCK(obj); shmfd->shm_kmappings--; VM_OBJECT_WUNLOCK(obj); return (vm_mmap_to_errno(rv)); } /* * We require the caller to unmap the entire entry. This allows us to * safely decrement shm_kmappings when a mapping is removed. */ int shm_unmap(struct file *fp, void *mem, size_t size) { struct shmfd *shmfd; vm_map_entry_t entry; vm_offset_t kva, ofs; vm_object_t obj; vm_pindex_t pindex; vm_prot_t prot; boolean_t wired; vm_map_t map; int rv; if (fp->f_type != DTYPE_SHM) return (EINVAL); shmfd = fp->f_data; kva = (vm_offset_t)mem; ofs = kva & PAGE_MASK; kva = trunc_page(kva); size = round_page(size + ofs); map = kernel_map; rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, &obj, &pindex, &prot, &wired); if (rv != KERN_SUCCESS) return (EINVAL); if (entry->start != kva || entry->end != kva + size) { vm_map_lookup_done(map, entry); return (EINVAL); } vm_map_lookup_done(map, entry); if (obj != shmfd->shm_object) return (EINVAL); vm_map_remove(map, kva, kva + size); VM_OBJECT_WLOCK(obj); KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); shmfd->shm_kmappings--; VM_OBJECT_WUNLOCK(obj); return (0); } static int shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list) { const char *path, *pr_path; size_t pr_pathlen; bool visible; sx_assert(&shm_dict_lock, SA_LOCKED); kif->kf_type = KF_TYPE_SHM; kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; if (shmfd->shm_path != NULL) { path = shmfd->shm_path; pr_path = curthread->td_ucred->cr_prison->pr_path; if (strcmp(pr_path, "/") != 0) { /* Return the jail-rooted pathname. */ pr_pathlen = strlen(pr_path); visible = strncmp(path, pr_path, pr_pathlen) == 0 && path[pr_pathlen] == '/'; if (list && !visible) return (EPERM); if (visible) path += pr_pathlen; } strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); } return (0); } static int shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp __unused) { int res; sx_slock(&shm_dict_lock); res = shm_fill_kinfo_locked(fp->f_data, kif, false); sx_sunlock(&shm_dict_lock); return (res); } static int shm_add_seals(struct file *fp, int seals) { struct shmfd *shmfd; void *rl_cookie; vm_ooffset_t writemappings; int error, nseals; error = 0; shmfd = fp->f_data; rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX); /* Even already-set seals should result in EPERM. */ if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { error = EPERM; goto out; } nseals = seals & ~shmfd->shm_seals; if ((nseals & F_SEAL_WRITE) != 0) { if (shm_largepage(shmfd)) { error = ENOTSUP; goto out; } /* * The rangelock above prevents writable mappings from being * added after we've started applying seals. The RLOCK here * is to avoid torn reads on ILP32 arches as unmapping/reducing * writemappings will be done without a rangelock. */ VM_OBJECT_RLOCK(shmfd->shm_object); writemappings = shmfd->shm_object->un_pager.swp.writemappings; VM_OBJECT_RUNLOCK(shmfd->shm_object); /* kmappings are also writable */ if (writemappings > 0) { error = EBUSY; goto out; } } shmfd->shm_seals |= nseals; out: shm_rangelock_unlock(shmfd, rl_cookie); return (error); } static int shm_get_seals(struct file *fp, int *seals) { struct shmfd *shmfd; shmfd = fp->f_data; *seals = shmfd->shm_seals; return (0); } static int shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags) { vm_object_t object; vm_pindex_t pistart, pi, piend; vm_ooffset_t off, len; int startofs, endofs, end; int error; off = *offset; len = *length; KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows")); if (off + len > shmfd->shm_size) len = shmfd->shm_size - off; object = shmfd->shm_object; startofs = off & PAGE_MASK; endofs = (off + len) & PAGE_MASK; pistart = OFF_TO_IDX(off); piend = OFF_TO_IDX(off + len); pi = OFF_TO_IDX(off + PAGE_MASK); error = 0; /* Handle the case when offset is on or beyond shm size. */ if ((off_t)len <= 0) { *length = 0; return (0); } VM_OBJECT_WLOCK(object); if (startofs != 0) { end = pistart != piend ? PAGE_SIZE : endofs; error = shm_partial_page_invalidate(object, pistart, startofs, end); if (error) goto out; off += end - startofs; len -= end - startofs; } if (pi < piend) { vm_object_page_remove(object, pi, piend, 0); off += IDX_TO_OFF(piend - pi); len -= IDX_TO_OFF(piend - pi); } if (endofs != 0 && pistart != piend) { error = shm_partial_page_invalidate(object, piend, 0, endofs); if (error) goto out; off += endofs; len -= endofs; } out: VM_OBJECT_WUNLOCK(shmfd->shm_object); *offset = off; *length = len; return (error); } static int shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags, struct ucred *active_cred, struct thread *td) { void *rl_cookie; struct shmfd *shmfd; off_t off, len; int error; KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd")); KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0, ("shm_fspacectl: non-zero flags")); KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset, ("shm_fspacectl: offset/length overflow or underflow")); error = EINVAL; shmfd = fp->f_data; off = *offset; len = *length; rl_cookie = shm_rangelock_wlock(shmfd, off, off + len); switch (cmd) { case SPACECTL_DEALLOC: if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) { error = EPERM; break; } error = shm_deallocate(shmfd, &off, &len, flags); *offset = off; *length = len; break; default: __assert_unreachable(); } shm_rangelock_unlock(shmfd, rl_cookie); return (error); } static int shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td) { void *rl_cookie; struct shmfd *shmfd; size_t size; int error; /* This assumes that the caller already checked for overflow. */ error = 0; shmfd = fp->f_data; size = offset + len; /* * Just grab the rangelock for the range that we may be attempting to * grow, rather than blocking read/write for regions we won't be * touching while this (potential) resize is in progress. Other * attempts to resize the shmfd will have to take a write lock from 0 to * OFF_MAX, so this being potentially beyond the current usable range of * the shmfd is not necessarily a concern. If other mechanisms are * added to grow a shmfd, this may need to be re-evaluated. */ rl_cookie = shm_rangelock_wlock(shmfd, offset, size); if (size > shmfd->shm_size) error = shm_dotruncate_cookie(shmfd, size, rl_cookie); shm_rangelock_unlock(shmfd, rl_cookie); /* Translate to posix_fallocate(2) return value as needed. */ if (error == ENOMEM) error = ENOSPC; return (error); } static int sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS) { struct shm_mapping *shmm; struct sbuf sb; struct kinfo_file kif; u_long i; int error, error2; sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); error = 0; sx_slock(&shm_dict_lock); for (i = 0; i < shm_hash + 1; i++) { LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) { error = shm_fill_kinfo_locked(shmm->sm_shmfd, &kif, true); if (error == EPERM) { error = 0; continue; } if (error != 0) break; pack_kinfo(&kif); error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ? 0 : ENOMEM; if (error != 0) break; } } sx_sunlock(&shm_dict_lock); error2 = sbuf_finish(&sb); sbuf_delete(&sb); return (error != 0 ? error : error2); } SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list, CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE, NULL, 0, sysctl_posix_shm_list, "", "POSIX SHM list"); int kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode, struct filecaps *caps) { - return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL)); + return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL, NULL)); } /* * This version of the shm_open() interface leaves CLOEXEC behavior up to the * caller, and libc will enforce it for the traditional shm_open() call. This * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This * interface also includes a 'name' argument that is currently unused, but could * potentially be exported later via some interface for debugging purposes. * From the kernel's perspective, it is optional. Individual consumers like * memfd_create() may require it in order to be compatible with other systems * implementing the same function. */ int sys_shm_open2(struct thread *td, struct shm_open2_args *uap) { return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, - uap->shmflags, NULL, uap->name)); + uap->shmflags, NULL, uap->name, NULL)); } int shm_get_path(struct vm_object *obj, char *path, size_t sz) { struct shmfd *shmfd; int error; error = 0; shmfd = NULL; sx_slock(&shm_dict_lock); VM_OBJECT_RLOCK(obj); if ((obj->flags & OBJ_POSIXSHM) == 0) { error = EINVAL; } else { if (obj->type == shmfd_pager_type) shmfd = obj->un_pager.swp.swp_priv; else if (obj->type == OBJT_PHYS) shmfd = obj->un_pager.phys.phys_priv; if (shmfd == NULL) { error = ENXIO; } else { strlcpy(path, shmfd->shm_path == NULL ? "anon" : shmfd->shm_path, sz); } } if (error != 0) path[0] = '\0'; VM_OBJECT_RUNLOCK(obj); sx_sunlock(&shm_dict_lock); return (error); } diff --git a/sys/sys/syscallsubr.h b/sys/sys/syscallsubr.h index fd183ffbc7a4..8237165b84ce 100644 --- a/sys/sys/syscallsubr.h +++ b/sys/sys/syscallsubr.h @@ -1,412 +1,413 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2002 Ian Dowse. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_SYSCALLSUBR_H_ #define _SYS_SYSCALLSUBR_H_ #include #include #include #include #include #include #include #include struct __wrusage; struct cpuset_copy_cb; struct file; struct filecaps; enum idtype; struct itimerval; struct image_args; struct jail; struct kevent; struct kevent_copyops; struct kld_file_stat; struct ksiginfo; struct mbuf; struct mq_attr; struct msghdr; struct msqid_ds; struct pollfd; struct ogetdirentries_args; struct rlimit; struct rusage; struct sched_param; struct sembuf; union semun; +struct shmfd; struct sockaddr; struct spacectl_range; struct stat; struct thr_param; struct timex; struct uio; struct vm_map; struct vmspace; typedef int (*mmap_check_fp_fn)(struct file *, int, int, int); struct mmap_req { vm_offset_t mr_hint; vm_size_t mr_len; int mr_prot; int mr_flags; int mr_fd; off_t mr_pos; mmap_check_fp_fn mr_check_fp_fn; }; uint64_t at2cnpflags(u_int at_flags, u_int mask); int kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen, size_t path_max); int kern_abort2(struct thread *td, const char *why, int nargs, void **uargs); int kern_accept(struct thread *td, int s, struct sockaddr *sa, struct file **fp); int kern_accept4(struct thread *td, int s, struct sockaddr *sa, int flags, struct file **fp); int kern_accessat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int flags, int mode); int kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta); int kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa); int kern_break(struct thread *td, uintptr_t *addr); int kern_cap_ioctls_limit(struct thread *td, int fd, u_long *cmds, size_t ncmds); int kern_cap_rights_limit(struct thread *td, int fd, cap_rights_t *rights); int kern_chdir(struct thread *td, const char *path, enum uio_seg pathseg); int kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, clockid_t *clk_id); int kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts); int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats); int kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); int kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats); void kern_thread_cputime(struct thread *targettd, struct timespec *ats); void kern_process_cputime(struct proc *targetp, struct timespec *ats); int kern_close_range(struct thread *td, int flags, u_int lowfd, u_int highfd); int kern_close(struct thread *td, int fd); int kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa); int kern_copy_file_range(struct thread *td, int infd, off_t *inoffp, int outfd, off_t *outoffp, size_t len, unsigned int flags); int user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb); int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpuset_t *maskp); int user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *maskp, const struct cpuset_copy_cb *cb); int kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp, const struct cpuset_copy_cb *cb); int kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, const domainset_t *maskp, int policy, const struct cpuset_copy_cb *cb); int kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); int kern_cpuset_setid(struct thread *td, cpuwhich_t which, id_t id, cpusetid_t setid); int kern_dup(struct thread *td, u_int mode, int flags, int old, int new); int kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p, struct vmspace *oldvmspace); int kern_extattr_delete_fd(struct thread *td, int fd, int attrnamespace, const char *attrname); int kern_extattr_delete_path(struct thread *td, const char *path, int attrnamespace, const char *attrname, int follow, enum uio_seg pathseg); int kern_extattr_get_fd(struct thread *td, int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); int kern_extattr_get_path(struct thread *td, const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes, int follow, enum uio_seg pathseg); int kern_extattr_list_fd(struct thread *td, int fd, int attrnamespace, struct uio *auiop); int kern_extattr_list_path(struct thread *td, const char *path, int attrnamespace, struct uio *auiop, int follow, enum uio_seg pathseg); int kern_extattr_set_fd(struct thread *td, int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); int kern_extattr_set_path(struct thread *td, const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes, int follow, enum uio_seg pathseg); int kern_fchmodat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, mode_t mode, int flag); int kern_fchownat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int uid, int gid, int flag); int kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg); int kern_fcntl_freebsd(struct thread *td, int fd, int cmd, intptr_t arg); int kern_fhopen(struct thread *td, const struct fhandle *u_fhp, int flags); int kern_fhstat(struct thread *td, fhandle_t fh, struct stat *buf); int kern_fhstatfs(struct thread *td, fhandle_t fh, struct statfs *buf); int kern_fpathconf(struct thread *td, int fd, int name, long *valuep); int kern_freebsd11_getfsstat(struct thread *td, struct freebsd11_statfs *ubuf, long bufsize, int mode); int kern_fstat(struct thread *td, int fd, struct stat *sbp); int kern_fstatfs(struct thread *td, int fd, struct statfs *buf); int kern_fsync(struct thread *td, int fd, bool fullsync); int kern_ftruncate(struct thread *td, int fd, off_t length); int kern_futimes(struct thread *td, int fd, const struct timeval *tptr, enum uio_seg tptrseg); int kern_futimens(struct thread *td, int fd, const struct timespec *tptr, enum uio_seg tptrseg); int kern_getdirentries(struct thread *td, int fd, char *buf, size_t count, off_t *basep, ssize_t *residp, enum uio_seg bufseg); int kern_getfhat(struct thread *td, int flags, int fd, const char *path, enum uio_seg pathseg, fhandle_t *fhp, enum uio_seg fhseg); int kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize, size_t *countp, enum uio_seg bufseg, int mode); int kern_getitimer(struct thread *, u_int, struct itimerval *); int kern_getppid(struct thread *); int kern_getpeername(struct thread *td, int fd, struct sockaddr *sa); int kern_getpriority(struct thread *td, int which, int who); int kern_getrusage(struct thread *td, int who, struct rusage *rup); int kern_getsid(struct thread *td, pid_t pid); int kern_getsockname(struct thread *td, int fd, struct sockaddr *sa); int kern_getsockopt(struct thread *td, int s, int level, int name, void *optval, enum uio_seg valseg, socklen_t *valsize); int kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data); int kern_jail(struct thread *td, struct jail *j); int kern_jail_get(struct thread *td, struct uio *options, int flags); int kern_jail_set(struct thread *td, struct uio *options, int flags); int kern_kcmp(struct thread *td, pid_t pid1, pid_t pid2, int type, uintptr_t idx1, uintptr_t idx2); int kern_kevent(struct thread *td, int fd, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); int kern_kevent_anonymous(struct thread *td, int nevents, struct kevent_copyops *k_ops); int kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); int kern_kill(struct thread *td, pid_t pid, int signum); int kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps); int kern_kldload(struct thread *td, const char *file, int *fileid); int kern_kldstat(struct thread *td, int fileid, struct kld_file_stat *stat); int kern_kldunload(struct thread *td, int fileid, int flags); int kern_kmq_notify(struct thread *, int, struct sigevent *); int kern_kmq_open(struct thread *, const char *, int, mode_t, const struct mq_attr *); int kern_kmq_setattr(struct thread *, int, const struct mq_attr *, struct mq_attr *); int kern_kmq_timedreceive(struct thread *, int, char *, size_t, unsigned int *, const struct timespec *); int kern_kmq_timedsend(struct thread *td, int, const char *, size_t, unsigned int, const struct timespec *); int kern_linkat(struct thread *td, int fd1, int fd2, const char *path1, const char *path2, enum uio_seg segflg, int flag); int kern_listen(struct thread *td, int s, int backlog); int kern_lseek(struct thread *td, int fd, off_t offset, int whence); int kern_lutimes(struct thread *td, const char *path, enum uio_seg pathseg, const struct timeval *tptr, enum uio_seg tptrseg); int kern_madvise(struct thread *td, uintptr_t addr, size_t len, int behav); int kern_membarrier(struct thread *td, int cmd, unsigned flags, int cpu_id); int kern_mincore(struct thread *td, uintptr_t addr, size_t len, char *vec); int kern_minherit(struct thread *td, uintptr_t addr, size_t len, int inherit); int kern_mkdirat(struct thread *td, int fd, const char *path, enum uio_seg segflg, int mode); int kern_mkfifoat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int mode); int kern_mknodat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, int mode, dev_t dev); int kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr, size_t len); int kern_mmap(struct thread *td, const struct mmap_req *mrp); int kern_mmap_racct_check(struct thread *td, struct vm_map *map, vm_size_t size); int kern_mmap_maxprot(struct proc *p, int prot); int kern_mprotect(struct thread *td, uintptr_t addr, size_t size, int prot, int flags); int kern_msgctl(struct thread *, int, int, struct msqid_ds *); int kern_msgrcv(struct thread *, int, void *, size_t, long, int, long *); int kern_msgsnd(struct thread *, int, const void *, size_t, int, long); int kern_msync(struct thread *td, uintptr_t addr, size_t size, int flags); int kern_munlock(struct thread *td, uintptr_t addr, size_t size); int kern_munmap(struct thread *td, uintptr_t addr, size_t size); int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt); int kern_nosys(struct thread *td, int dummy); int kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp); int kern_ogetdirentries(struct thread *td, struct ogetdirentries_args *uap, long *ploff); int kern_ommap(struct thread *td, uintptr_t hint, int len, int oprot, int oflags, int fd, long pos); int kern_openat(struct thread *td, int dirfd, const char *path, enum uio_seg pathseg, int flags, int mode); int kern_openatfp(struct thread *td, int dirfd, const char *path, enum uio_seg pathseg, int flags, int mode, struct file **fpp); int kern_pathconf(struct thread *td, const char *path, enum uio_seg pathseg, int name, u_long flags, long *valuep); int kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1, struct filecaps *fcaps2); int kern_poll(struct thread *td, struct pollfd *fds, u_int nfds, struct timespec *tsp, sigset_t *uset); int kern_poll_kfds(struct thread *td, struct pollfd *fds, u_int nfds, struct timespec *tsp, sigset_t *uset); bool kern_poll_maxfds(u_int nfds); int kern_posix_error(struct thread *td, int error); int kern_posix_fadvise(struct thread *td, int fd, off_t offset, off_t len, int advice); int kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len); int kern_fspacectl(struct thread *td, int fd, int cmd, const struct spacectl_range *, int flags, struct spacectl_range *); int kern_procctl(struct thread *td, enum idtype idtype, id_t id, int com, void *data); int kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset); int kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset); int kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tvp, sigset_t *uset, int abi_nfdbits); int kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data); int kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte, off_t offset); int kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset); int kern_readlinkat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, char *buf, enum uio_seg bufseg, size_t count); int kern_readv(struct thread *td, int fd, struct uio *auio); int kern_recvit(struct thread *td, int s, struct msghdr *mp, enum uio_seg fromseg, struct mbuf **controlp); int kern_renameat(struct thread *td, int oldfd, const char *old, int newfd, const char *new, enum uio_seg pathseg); int kern_frmdirat(struct thread *td, int dfd, const char *path, int fd, enum uio_seg pathseg, int flag); int kern_sched_getparam(struct thread *td, struct thread *targettd, struct sched_param *param); int kern_sched_getscheduler(struct thread *td, struct thread *targettd, int *policy); int kern_sched_setparam(struct thread *td, struct thread *targettd, struct sched_param *param); int kern_sched_setscheduler(struct thread *td, struct thread *targettd, int policy, struct sched_param *param); int kern_sched_rr_get_interval(struct thread *td, pid_t pid, struct timespec *ts); int kern_sched_rr_get_interval_td(struct thread *td, struct thread *targettd, struct timespec *ts); int kern_semctl(struct thread *td, int semid, int semnum, int cmd, union semun *arg, register_t *rval); int kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits); int kern_sendit(struct thread *td, int s, struct msghdr *mp, int flags, struct mbuf *control, enum uio_seg segflg); int kern_setcred(struct thread *const td, const u_int flags, struct setcred *const wcred, gid_t *preallocated_groups); int kern_setgroups(struct thread *td, int *ngrpp, gid_t *groups); int kern_setitimer(struct thread *, u_int, struct itimerval *, struct itimerval *); int kern_setpriority(struct thread *td, int which, int who, int prio); int kern_setrlimit(struct thread *, u_int, struct rlimit *); int kern_setsockopt(struct thread *td, int s, int level, int name, const void *optval, enum uio_seg valseg, socklen_t valsize); int kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp); int kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode, struct filecaps *fcaps); int kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode, int shmflags, struct filecaps *fcaps, - const char *name); + const char *name, struct shmfd *shmfd); int kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg); int kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz); int kern_shutdown(struct thread *td, int s, int how); int kern_sigaction(struct thread *td, int sig, const struct sigaction *act, struct sigaction *oact, int flags); int kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss); int kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, int flags); int kern_sigsuspend(struct thread *td, sigset_t mask); int kern_sigtimedwait(struct thread *td, sigset_t waitset, struct ksiginfo *ksi, struct timespec *timeout); int kern_sigqueue(struct thread *td, pid_t pid, int signum, union sigval *value); int kern_socket(struct thread *td, int domain, int type, int protocol); int kern_statat(struct thread *td, int flag, int fd, const char *path, enum uio_seg pathseg, struct stat *sbp); int kern_specialfd(struct thread *td, int type, void *arg); int kern_statfs(struct thread *td, const char *path, enum uio_seg pathseg, struct statfs *buf); int kern_symlinkat(struct thread *td, const char *path1, int fd, const char *path2, enum uio_seg segflg); int kern_sync(struct thread *td); int kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, int *timerid, int preset_id); int kern_ktimer_delete(struct thread *, int); int kern_ktimer_settime(struct thread *td, int timer_id, int flags, struct itimerspec *val, struct itimerspec *oval); int kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val); int kern_ktimer_getoverrun(struct thread *td, int timer_id); int kern_semop(struct thread *td, int usemid, struct sembuf *usops, size_t nsops, struct timespec *timeout); int kern_thr_alloc(struct proc *, int pages, struct thread **); int kern_thr_exit(struct thread *td); int kern_thr_new(struct thread *td, struct thr_param *param); int kern_thr_suspend(struct thread *td, struct timespec *tsp); int kern_timerfd_create(struct thread *td, int clockid, int flags); int kern_timerfd_gettime(struct thread *td, int fd, struct itimerspec *curr_value); int kern_timerfd_settime(struct thread *td, int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value); int kern_truncate(struct thread *td, const char *path, enum uio_seg pathseg, off_t length); int kern_funlinkat(struct thread *td, int dfd, const char *path, int fd, enum uio_seg pathseg, int flag, ino_t oldinum); int kern_utimesat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, const struct timeval *tptr, enum uio_seg tptrseg); int kern_utimensat(struct thread *td, int fd, const char *path, enum uio_seg pathseg, const struct timespec *tptr, enum uio_seg tptrseg, int flag); int kern_wait(struct thread *td, pid_t pid, int *status, int options, struct rusage *rup); int kern_wait6(struct thread *td, enum idtype idtype, id_t id, int *status, int options, struct __wrusage *wrup, siginfo_t *sip); int kern_writev(struct thread *td, int fd, struct uio *auio); int kern_socketpair(struct thread *td, int domain, int type, int protocol, int *rsv); int kern_unmount(struct thread *td, const char *path, int flags); /* flags for kern_sigaction */ #define KSA_OSIGSET 0x0001 /* uses osigact_t */ #define KSA_FREEBSD4 0x0002 /* uses ucontext4 */ struct freebsd11_dirent; int freebsd11_kern_getdirentries(struct thread *td, int fd, char *ubuf, u_int count, long *basep, void (*func)(struct freebsd11_dirent *)); #endif /* !_SYS_SYSCALLSUBR_H_ */