Index: head/sys/kern/uipc_shm.c =================================================================== --- head/sys/kern/uipc_shm.c (revision 356511) +++ head/sys/kern/uipc_shm.c (revision 356512) @@ -1,1509 +1,1537 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson * All rights reserved. * * Portions of this software were developed by BAE Systems, the University of * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent * Computing (TC) research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Support for shared swap-backed anonymous memory objects via * shm_open(2), shm_rename(2), and shm_unlink(2). * While most of the implementation is here, vm_mmap.c contains * mapping logic changes. * * posixshmcontrol(1) allows users to inspect the state of the memory * objects. Per-uid swap resource limit controls total amount of * memory that user can consume for anonymous objects, including * shared. */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct shm_mapping { char *sm_path; Fnv32_t sm_fnv; struct shmfd *sm_shmfd; LIST_ENTRY(shm_mapping) sm_link; }; static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor"); static LIST_HEAD(, shm_mapping) *shm_dictionary; static struct sx shm_dict_lock; static struct mtx shm_timestamp_lock; static u_long shm_hash; static struct unrhdr64 shm_ino_unr; static dev_t shm_dev_ino; #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash]) static void shm_init(void *arg); static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd); static struct shmfd *shm_lookup(char *path, Fnv32_t fnv); static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred); static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie); static int shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out); static fo_rdwr_t shm_read; static fo_rdwr_t shm_write; static fo_truncate_t shm_truncate; static fo_ioctl_t shm_ioctl; static fo_stat_t shm_stat; static fo_close_t shm_close; static fo_chmod_t shm_chmod; static fo_chown_t shm_chown; static fo_seek_t shm_seek; static fo_fill_kinfo_t shm_fill_kinfo; static fo_mmap_t shm_mmap; static fo_get_seals_t shm_get_seals; static fo_add_seals_t shm_add_seals; +static fo_fallocate_t shm_fallocate; /* File descriptor operations. */ struct fileops shm_ops = { .fo_read = shm_read, .fo_write = shm_write, .fo_truncate = shm_truncate, .fo_ioctl = shm_ioctl, .fo_poll = invfo_poll, .fo_kqfilter = invfo_kqfilter, .fo_stat = shm_stat, .fo_close = shm_close, .fo_chmod = shm_chmod, .fo_chown = shm_chown, .fo_sendfile = vn_sendfile, .fo_seek = shm_seek, .fo_fill_kinfo = shm_fill_kinfo, .fo_mmap = shm_mmap, .fo_get_seals = shm_get_seals, .fo_add_seals = shm_add_seals, + .fo_fallocate = shm_fallocate, .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE }; FEATURE(posix_shm, "POSIX shared memory"); static int uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) { vm_page_t m; vm_pindex_t idx; size_t tlen; int error, offset, rv; idx = OFF_TO_IDX(uio->uio_offset); offset = uio->uio_offset & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); VM_OBJECT_WLOCK(obj); /* * Read I/O without either a corresponding resident page or swap * page: use zero_region. This is intended to avoid instantiating * pages on read from a sparse region. */ if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL && !vm_pager_has_page(obj, idx, NULL, NULL)) { VM_OBJECT_WUNLOCK(obj); return (uiomove(__DECONST(void *, zero_region), tlen, uio)); } /* * Parallel reads of the page content from disk are prevented * by exclusive busy. * * Although the tmpfs vnode lock is held here, it is * nonetheless safe to sleep waiting for a free page. The * pageout daemon does not need to acquire the tmpfs vnode * lock to page out tobj's pages because tobj is a OBJT_SWAP * type object. */ rv = vm_page_grab_valid(&m, obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY); if (rv != VM_PAGER_OK) { VM_OBJECT_WUNLOCK(obj); printf("uiomove_object: vm_obj %p idx %jd pager error %d\n", obj, idx, rv); return (EIO); } VM_OBJECT_WUNLOCK(obj); error = uiomove_fromphys(&m, offset, tlen, uio); if (uio->uio_rw == UIO_WRITE && error == 0) vm_page_set_dirty(m); vm_page_activate(m); vm_page_sunbusy(m); return (error); } int uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio) { ssize_t resid; size_t len; int error; error = 0; while ((resid = uio->uio_resid) > 0) { if (obj_size <= uio->uio_offset) break; len = MIN(obj_size - uio->uio_offset, resid); if (len == 0) break; error = uiomove_object_page(obj, len, uio); if (error != 0 || resid == uio->uio_resid) break; } return (error); } static int shm_seek(struct file *fp, off_t offset, int whence, struct thread *td) { struct shmfd *shmfd; off_t foffset; int error; shmfd = fp->f_data; foffset = foffset_lock(fp, 0); error = 0; switch (whence) { case L_INCR: if (foffset < 0 || (offset > 0 && foffset > OFF_MAX - offset)) { error = EOVERFLOW; break; } offset += foffset; break; case L_XTND: if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) { error = EOVERFLOW; break; } offset += shmfd->shm_size; break; case L_SET: break; default: error = EINVAL; } if (error == 0) { if (offset < 0 || offset > shmfd->shm_size) error = EINVAL; else td->td_uretoff.tdu_off = offset; } foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); return (error); } static int shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct shmfd *shmfd; void *rl_cookie; int error; shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif foffset_lock_uio(fp, uio, flags); rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset, uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); foffset_unlock_uio(fp, uio, flags); return (error); } static int shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct shmfd *shmfd; void *rl_cookie; int error; shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif foffset_lock_uio(fp, uio, flags); if ((flags & FOF_OFFSET) == 0) { rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, &shmfd->shm_mtx); } else { rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset, uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx); } if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) error = EPERM; else error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio); rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); foffset_unlock_uio(fp, uio, flags); return (error); } static int shm_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; #ifdef MAC int error; #endif shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif return (shm_dotruncate(shmfd, length)); } int shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td) { switch (com) { case FIONBIO: case FIOASYNC: /* * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work, * just like it would on an unlinked regular file */ return (0); default: return (ENOTTY); } } static int shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; #ifdef MAC int error; #endif shmfd = fp->f_data; #ifdef MAC error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd); if (error) return (error); #endif /* * Attempt to return sanish values for fstat() on a memory file * descriptor. */ bzero(sb, sizeof(*sb)); sb->st_blksize = PAGE_SIZE; sb->st_size = shmfd->shm_size; sb->st_blocks = howmany(sb->st_size, sb->st_blksize); mtx_lock(&shm_timestamp_lock); sb->st_atim = shmfd->shm_atime; sb->st_ctim = shmfd->shm_ctime; sb->st_mtim = shmfd->shm_mtime; sb->st_birthtim = shmfd->shm_birthtime; sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */ sb->st_uid = shmfd->shm_uid; sb->st_gid = shmfd->shm_gid; mtx_unlock(&shm_timestamp_lock); sb->st_dev = shm_dev_ino; sb->st_ino = shmfd->shm_ino; sb->st_nlink = shmfd->shm_object->ref_count; return (0); } static int shm_close(struct file *fp, struct thread *td) { struct shmfd *shmfd; shmfd = fp->f_data; fp->f_data = NULL; shm_drop(shmfd); return (0); } static int shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) { int error; char *path; const char *pr_path; size_t pr_pathlen; path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK); pr_path = td->td_ucred->cr_prison->pr_path; /* Construct a full pathname for jailed callers. */ pr_pathlen = strcmp(pr_path, "/") == 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN); error = copyinstr(userpath_in, path + pr_pathlen, MAXPATHLEN - pr_pathlen, NULL); if (error != 0) goto out; #ifdef KTRACE if (KTRPOINT(curthread, KTR_NAMEI)) ktrnamei(path); #endif /* Require paths to start with a '/' character. */ if (path[pr_pathlen] != '/') { error = EINVAL; goto out; } *path_out = path; out: if (error != 0) free(path, M_SHMFD); return (error); } static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie) { vm_object_t object; vm_page_t m; vm_pindex_t idx, nobjsize; vm_ooffset_t delta; int base, rv; KASSERT(length >= 0, ("shm_dotruncate: length < 0")); object = shmfd->shm_object; VM_OBJECT_ASSERT_WLOCKED(object); rangelock_cookie_assert(rl_cookie, RA_WLOCKED); if (length == shmfd->shm_size) return (0); nobjsize = OFF_TO_IDX(length + PAGE_MASK); /* Are we shrinking? If so, trim the end. */ if (length < shmfd->shm_size) { if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0) return (EPERM); /* * Disallow any requests to shrink the size if this * object is mapped into the kernel. */ if (shmfd->shm_kmappings > 0) return (EBUSY); /* * Zero the truncated part of the last page. */ base = length & PAGE_MASK; if (base != 0) { idx = OFF_TO_IDX(length); retry: m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT); if (m != NULL) { MPASS(vm_page_all_valid(m)); } else if (vm_pager_has_page(object, idx, NULL, NULL)) { m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL); if (m == NULL) goto retry; rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); if (rv == VM_PAGER_OK) { /* * Since the page was not resident, * and therefore not recently * accessed, immediately enqueue it * for asynchronous laundering. The * current operation is not regarded * as an access. */ vm_page_launder(m); } else { vm_page_free(m); VM_OBJECT_WUNLOCK(object); return (EIO); } } if (m != NULL) { pmap_zero_page_area(m, base, PAGE_SIZE - base); KASSERT(vm_page_all_valid(m), ("shm_dotruncate: page %p is invalid", m)); vm_page_set_dirty(m); vm_page_xunbusy(m); } } delta = IDX_TO_OFF(object->size - nobjsize); /* Toss in memory pages. */ if (nobjsize < object->size) vm_object_page_remove(object, nobjsize, object->size, 0); /* Toss pages from swap. */ if (object->type == OBJT_SWAP) swap_pager_freespace(object, nobjsize, delta); /* Free the swap accounted for shm */ swap_release_by_cred(delta, object->cred); object->charge -= delta; } else { if ((shmfd->shm_seals & F_SEAL_GROW) != 0) return (EPERM); /* Try to reserve additional swap space. */ delta = IDX_TO_OFF(nobjsize - object->size); if (!swap_reserve_by_cred(delta, object->cred)) return (ENOMEM); object->charge += delta; } shmfd->shm_size = length; mtx_lock(&shm_timestamp_lock); vfs_timestamp(&shmfd->shm_ctime); shmfd->shm_mtime = shmfd->shm_ctime; mtx_unlock(&shm_timestamp_lock); object->size = nobjsize; return (0); } int shm_dotruncate(struct shmfd *shmfd, off_t length) { void *rl_cookie; int error; rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, &shmfd->shm_mtx); VM_OBJECT_WLOCK(shmfd->shm_object); error = shm_dotruncate_locked(shmfd, length, rl_cookie); VM_OBJECT_WUNLOCK(shmfd->shm_object); rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); return (error); } /* * shmfd object management including creation and reference counting * routines. */ struct shmfd * shm_alloc(struct ucred *ucred, mode_t mode) { struct shmfd *shmfd; shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO); shmfd->shm_size = 0; shmfd->shm_uid = ucred->cr_uid; shmfd->shm_gid = ucred->cr_gid; shmfd->shm_mode = mode; shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate")); vfs_timestamp(&shmfd->shm_birthtime); shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime = shmfd->shm_birthtime; shmfd->shm_ino = alloc_unr64(&shm_ino_unr); refcount_init(&shmfd->shm_refs, 1); mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF); rangelock_init(&shmfd->shm_rl); #ifdef MAC mac_posixshm_init(shmfd); mac_posixshm_create(ucred, shmfd); #endif return (shmfd); } struct shmfd * shm_hold(struct shmfd *shmfd) { refcount_acquire(&shmfd->shm_refs); return (shmfd); } void shm_drop(struct shmfd *shmfd) { if (refcount_release(&shmfd->shm_refs)) { #ifdef MAC mac_posixshm_destroy(shmfd); #endif rangelock_destroy(&shmfd->shm_rl); mtx_destroy(&shmfd->shm_mtx); vm_object_deallocate(shmfd->shm_object); free(shmfd, M_SHMFD); } } /* * Determine if the credentials have sufficient permissions for a * specified combination of FREAD and FWRITE. */ int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags) { accmode_t accmode; int error; accmode = 0; if (flags & FREAD) accmode |= VREAD; if (flags & FWRITE) accmode |= VWRITE; mtx_lock(&shm_timestamp_lock); error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, accmode, ucred, NULL); mtx_unlock(&shm_timestamp_lock); return (error); } /* * Dictionary management. We maintain an in-kernel dictionary to map * paths to shmfd objects. We use the FNV hash on the path to store * the mappings in a hash table. */ static void shm_init(void *arg) { mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF); sx_init(&shm_dict_lock, "shm dictionary"); shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash); new_unrhdr64(&shm_ino_unr, 1); shm_dev_ino = devfs_alloc_cdp_inode(); KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized")); } SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL); static struct shmfd * shm_lookup(char *path, Fnv32_t fnv) { struct shm_mapping *map; LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { if (map->sm_fnv != fnv) continue; if (strcmp(map->sm_path, path) == 0) return (map->sm_shmfd); } return (NULL); } static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd) { struct shm_mapping *map; map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK); map->sm_path = path; map->sm_fnv = fnv; map->sm_shmfd = shm_hold(shmfd); shmfd->shm_path = path; LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link); } static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred) { struct shm_mapping *map; int error; LIST_FOREACH(map, SHM_HASH(fnv), sm_link) { if (map->sm_fnv != fnv) continue; if (strcmp(map->sm_path, path) == 0) { #ifdef MAC error = mac_posixshm_check_unlink(ucred, map->sm_shmfd); if (error) return (error); #endif error = shm_access(map->sm_shmfd, ucred, FREAD | FWRITE); if (error) return (error); map->sm_shmfd->shm_path = NULL; LIST_REMOVE(map, sm_link); shm_drop(map->sm_shmfd); free(map->sm_path, M_SHMFD); free(map, M_SHMFD); return (0); } } return (ENOENT); } int kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode, int shmflags, struct filecaps *fcaps, const char *name __unused) { struct filedesc *fdp; struct shmfd *shmfd; struct file *fp; char *path; void *rl_cookie; Fnv32_t fnv; mode_t cmode; int error, fd, initial_seals; if ((shmflags & ~SHM_ALLOW_SEALING) != 0) return (EINVAL); initial_seals = F_SEAL_SEAL; if ((shmflags & SHM_ALLOW_SEALING) != 0) initial_seals &= ~F_SEAL_SEAL; #ifdef CAPABILITY_MODE /* * shm_open(2) is only allowed for anonymous objects. */ if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON)) return (ECAPMODE); #endif AUDIT_ARG_FFLAGS(flags); AUDIT_ARG_MODE(mode); if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR) return (EINVAL); if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0) return (EINVAL); /* * Currently only F_SEAL_SEAL may be set when creating or opening shmfd. * If the decision is made later to allow additional seals, care must be * taken below to ensure that the seals are properly set if the shmfd * already existed -- this currently assumes that only F_SEAL_SEAL can * be set and doesn't take further precautions to ensure the validity of * the seals being added with respect to current mappings. */ if ((initial_seals & ~F_SEAL_SEAL) != 0) return (EINVAL); fdp = td->td_proc->p_fd; cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS; /* * shm_open(2) created shm should always have O_CLOEXEC set, as mandated * by POSIX. We allow it to be unset here so that an in-kernel * interface may be written as a thin layer around shm, optionally not * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally * in sys_shm_open() to keep this implementation compliant. */ error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps); if (error) return (error); /* A SHM_ANON path pointer creates an anonymous object. */ if (userpath == SHM_ANON) { /* A read-only anonymous object is pointless. */ if ((flags & O_ACCMODE) == O_RDONLY) { fdclose(td, fp, fd); fdrop(fp, td); return (EINVAL); } shmfd = shm_alloc(td->td_ucred, cmode); shmfd->shm_seals = initial_seals; } else { error = shm_copyin_path(td, userpath, &path); if (error != 0) { fdclose(td, fp, fd); fdrop(fp, td); return (error); } AUDIT_ARG_UPATH1_CANON(path); fnv = fnv_32_str(path, FNV1_32_INIT); sx_xlock(&shm_dict_lock); shmfd = shm_lookup(path, fnv); if (shmfd == NULL) { /* Object does not yet exist, create it if requested. */ if (flags & O_CREAT) { #ifdef MAC error = mac_posixshm_check_create(td->td_ucred, path); if (error == 0) { #endif shmfd = shm_alloc(td->td_ucred, cmode); shmfd->shm_seals = initial_seals; shm_insert(path, fnv, shmfd); #ifdef MAC } #endif } else { free(path, M_SHMFD); error = ENOENT; } } else { rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, &shmfd->shm_mtx); /* * kern_shm_open() likely shouldn't ever error out on * trying to set a seal that already exists, unlike * F_ADD_SEALS. This would break terribly as * shm_open(2) actually sets F_SEAL_SEAL to maintain * historical behavior where the underlying file could * not be sealed. */ initial_seals &= ~shmfd->shm_seals; /* * Object already exists, obtain a new * reference if requested and permitted. */ free(path, M_SHMFD); /* * initial_seals can't set additional seals if we've * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, * then we've already removed that one from * initial_seals. This is currently redundant as we * only allow setting F_SEAL_SEAL at creation time, but * it's cheap to check and decreases the effort required * to allow additional seals. */ if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 && initial_seals != 0) error = EPERM; else if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) error = EEXIST; else { #ifdef MAC error = mac_posixshm_check_open(td->td_ucred, shmfd, FFLAGS(flags & O_ACCMODE)); if (error == 0) #endif error = shm_access(shmfd, td->td_ucred, FFLAGS(flags & O_ACCMODE)); } /* * Truncate the file back to zero length if * O_TRUNC was specified and the object was * opened with read/write. */ if (error == 0 && (flags & (O_ACCMODE | O_TRUNC)) == (O_RDWR | O_TRUNC)) { VM_OBJECT_WLOCK(shmfd->shm_object); #ifdef MAC error = mac_posixshm_check_truncate( td->td_ucred, fp->f_cred, shmfd); if (error == 0) #endif error = shm_dotruncate_locked(shmfd, 0, rl_cookie); VM_OBJECT_WUNLOCK(shmfd->shm_object); } if (error == 0) { /* * Currently we only allow F_SEAL_SEAL to be * set initially. As noted above, this would * need to be reworked should that change. */ shmfd->shm_seals |= initial_seals; shm_hold(shmfd); } rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); } sx_xunlock(&shm_dict_lock); if (error) { fdclose(td, fp, fd); fdrop(fp, td); return (error); } } finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); td->td_retval[0] = fd; fdrop(fp, td); return (0); } /* System calls. */ #ifdef COMPAT_FREEBSD12 int freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap) { return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode, NULL)); } #endif int sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap) { char *path; Fnv32_t fnv; int error; error = shm_copyin_path(td, uap->path, &path); if (error != 0) return (error); AUDIT_ARG_UPATH1_CANON(path); fnv = fnv_32_str(path, FNV1_32_INIT); sx_xlock(&shm_dict_lock); error = shm_remove(path, fnv, td->td_ucred); sx_xunlock(&shm_dict_lock); free(path, M_TEMP); return (error); } int sys_shm_rename(struct thread *td, struct shm_rename_args *uap) { char *path_from = NULL, *path_to = NULL; Fnv32_t fnv_from, fnv_to; struct shmfd *fd_from; struct shmfd *fd_to; int error; int flags; flags = uap->flags; AUDIT_ARG_FFLAGS(flags); /* * Make sure the user passed only valid flags. * If you add a new flag, please add a new term here. */ if ((flags & ~( SHM_RENAME_NOREPLACE | SHM_RENAME_EXCHANGE )) != 0) { error = EINVAL; goto out; } /* * EXCHANGE and NOREPLACE don't quite make sense together. Let's * force the user to choose one or the other. */ if ((flags & SHM_RENAME_NOREPLACE) != 0 && (flags & SHM_RENAME_EXCHANGE) != 0) { error = EINVAL; goto out; } /* Renaming to or from anonymous makes no sense */ if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) { error = EINVAL; goto out; } error = shm_copyin_path(td, uap->path_from, &path_from); if (error != 0) goto out; error = shm_copyin_path(td, uap->path_to, &path_to); if (error != 0) goto out; AUDIT_ARG_UPATH1_CANON(path_from); AUDIT_ARG_UPATH2_CANON(path_to); /* Rename with from/to equal is a no-op */ if (strcmp(path_from, path_to) == 0) goto out; fnv_from = fnv_32_str(path_from, FNV1_32_INIT); fnv_to = fnv_32_str(path_to, FNV1_32_INIT); sx_xlock(&shm_dict_lock); fd_from = shm_lookup(path_from, fnv_from); if (fd_from == NULL) { error = ENOENT; goto out_locked; } fd_to = shm_lookup(path_to, fnv_to); if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) { error = EEXIST; goto out_locked; } /* * Unconditionally prevents shm_remove from invalidating the 'from' * shm's state. */ shm_hold(fd_from); error = shm_remove(path_from, fnv_from, td->td_ucred); /* * One of my assumptions failed if ENOENT (e.g. locking didn't * protect us) */ KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s", path_from)); if (error != 0) { shm_drop(fd_from); goto out_locked; } /* * If we are exchanging, we need to ensure the shm_remove below * doesn't invalidate the dest shm's state. */ if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) shm_hold(fd_to); /* * NOTE: if path_to is not already in the hash, c'est la vie; * it simply means we have nothing already at path_to to unlink. * That is the ENOENT case. * * If we somehow don't have access to unlink this guy, but * did for the shm at path_from, then relink the shm to path_from * and abort with EACCES. * * All other errors: that is weird; let's relink and abort the * operation. */ error = shm_remove(path_to, fnv_to, td->td_ucred); if (error != 0 && error != ENOENT) { shm_insert(path_from, fnv_from, fd_from); shm_drop(fd_from); /* Don't free path_from now, since the hash references it */ path_from = NULL; goto out_locked; } error = 0; shm_insert(path_to, fnv_to, fd_from); /* Don't free path_to now, since the hash references it */ path_to = NULL; /* We kept a ref when we removed, and incremented again in insert */ shm_drop(fd_from); KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n", fd_from->shm_refs)); if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) { shm_insert(path_from, fnv_from, fd_to); path_from = NULL; shm_drop(fd_to); KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n", fd_to->shm_refs)); } out_locked: sx_xunlock(&shm_dict_lock); out: free(path_from, M_SHMFD); free(path_to, M_SHMFD); return (error); } int shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct shmfd *shmfd; vm_prot_t maxprot; int error; bool writecnt; void *rl_cookie; shmfd = fp->f_data; maxprot = VM_PROT_NONE; rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize, &shmfd->shm_mtx); /* FREAD should always be set. */ if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; /* * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared * mapping with a write seal applied. */ if ((fp->f_flag & FWRITE) != 0 && ((flags & MAP_SHARED) == 0 || (shmfd->shm_seals & F_SEAL_WRITE) == 0)) maxprot |= VM_PROT_WRITE; writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0; if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) { error = EPERM; goto out; } /* Don't permit shared writable mappings on read-only descriptors. */ if (writecnt && (maxprot & VM_PROT_WRITE) == 0) { error = EACCES; goto out; } maxprot &= cap_maxprot; /* See comment in vn_mmap(). */ if ( #ifdef _LP64 objsize > OFF_MAX || #endif foff < 0 || foff > OFF_MAX - objsize) { error = EINVAL; goto out; } #ifdef MAC error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags); if (error != 0) goto out; #endif mtx_lock(&shm_timestamp_lock); vfs_timestamp(&shmfd->shm_atime); mtx_unlock(&shm_timestamp_lock); vm_object_reference(shmfd->shm_object); if (writecnt) vm_pager_update_writecount(shmfd->shm_object, 0, objsize); error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags, shmfd->shm_object, foff, writecnt, td); if (error != 0) { if (writecnt) vm_pager_release_writecount(shmfd->shm_object, 0, objsize); vm_object_deallocate(shmfd->shm_object); } out: rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); return (error); } static int shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; int error; error = 0; shmfd = fp->f_data; mtx_lock(&shm_timestamp_lock); /* * SUSv4 says that x bits of permission need not be affected. * Be consistent with our shm_open there. */ #ifdef MAC error = mac_posixshm_check_setmode(active_cred, shmfd, mode); if (error != 0) goto out; #endif error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid, VADMIN, active_cred, NULL); if (error != 0) goto out; shmfd->shm_mode = mode & ACCESSPERMS; out: mtx_unlock(&shm_timestamp_lock); return (error); } static int shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td) { struct shmfd *shmfd; int error; error = 0; shmfd = fp->f_data; mtx_lock(&shm_timestamp_lock); #ifdef MAC error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid); if (error != 0) goto out; #endif if (uid == (uid_t)-1) uid = shmfd->shm_uid; if (gid == (gid_t)-1) gid = shmfd->shm_gid; if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) || (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) && (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) goto out; shmfd->shm_uid = uid; shmfd->shm_gid = gid; out: mtx_unlock(&shm_timestamp_lock); return (error); } /* * Helper routines to allow the backing object of a shared memory file * descriptor to be mapped in the kernel. */ int shm_map(struct file *fp, size_t size, off_t offset, void **memp) { struct shmfd *shmfd; vm_offset_t kva, ofs; vm_object_t obj; int rv; if (fp->f_type != DTYPE_SHM) return (EINVAL); shmfd = fp->f_data; obj = shmfd->shm_object; VM_OBJECT_WLOCK(obj); /* * XXXRW: This validation is probably insufficient, and subject to * sign errors. It should be fixed. */ if (offset >= shmfd->shm_size || offset + size > round_page(shmfd->shm_size)) { VM_OBJECT_WUNLOCK(obj); return (EINVAL); } shmfd->shm_kmappings++; vm_object_reference_locked(obj); VM_OBJECT_WUNLOCK(obj); /* Map the object into the kernel_map and wire it. */ kva = vm_map_min(kernel_map); ofs = offset & PAGE_MASK; offset = trunc_page(offset); size = round_page(size + ofs); rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0, VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0); if (rv == KERN_SUCCESS) { rv = vm_map_wire(kernel_map, kva, kva + size, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); if (rv == KERN_SUCCESS) { *memp = (void *)(kva + ofs); return (0); } vm_map_remove(kernel_map, kva, kva + size); } else vm_object_deallocate(obj); /* On failure, drop our mapping reference. */ VM_OBJECT_WLOCK(obj); shmfd->shm_kmappings--; VM_OBJECT_WUNLOCK(obj); return (vm_mmap_to_errno(rv)); } /* * We require the caller to unmap the entire entry. This allows us to * safely decrement shm_kmappings when a mapping is removed. */ int shm_unmap(struct file *fp, void *mem, size_t size) { struct shmfd *shmfd; vm_map_entry_t entry; vm_offset_t kva, ofs; vm_object_t obj; vm_pindex_t pindex; vm_prot_t prot; boolean_t wired; vm_map_t map; int rv; if (fp->f_type != DTYPE_SHM) return (EINVAL); shmfd = fp->f_data; kva = (vm_offset_t)mem; ofs = kva & PAGE_MASK; kva = trunc_page(kva); size = round_page(size + ofs); map = kernel_map; rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry, &obj, &pindex, &prot, &wired); if (rv != KERN_SUCCESS) return (EINVAL); if (entry->start != kva || entry->end != kva + size) { vm_map_lookup_done(map, entry); return (EINVAL); } vm_map_lookup_done(map, entry); if (obj != shmfd->shm_object) return (EINVAL); vm_map_remove(map, kva, kva + size); VM_OBJECT_WLOCK(obj); KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped")); shmfd->shm_kmappings--; VM_OBJECT_WUNLOCK(obj); return (0); } static int shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list) { const char *path, *pr_path; size_t pr_pathlen; bool visible; sx_assert(&shm_dict_lock, SA_LOCKED); kif->kf_type = KF_TYPE_SHM; kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; kif->kf_un.kf_file.kf_file_size = shmfd->shm_size; if (shmfd->shm_path != NULL) { if (shmfd->shm_path != NULL) { path = shmfd->shm_path; pr_path = curthread->td_ucred->cr_prison->pr_path; if (strcmp(pr_path, "/") != 0) { /* Return the jail-rooted pathname. */ pr_pathlen = strlen(pr_path); visible = strncmp(path, pr_path, pr_pathlen) == 0 && path[pr_pathlen] == '/'; if (list && !visible) return (EPERM); if (visible) path += pr_pathlen; } strlcpy(kif->kf_path, path, sizeof(kif->kf_path)); } } return (0); } static int shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp __unused) { int res; sx_slock(&shm_dict_lock); res = shm_fill_kinfo_locked(fp->f_data, kif, false); sx_sunlock(&shm_dict_lock); return (res); } static int shm_add_seals(struct file *fp, int seals) { struct shmfd *shmfd; void *rl_cookie; vm_ooffset_t writemappings; int error, nseals; error = 0; shmfd = fp->f_data; rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, &shmfd->shm_mtx); /* Even already-set seals should result in EPERM. */ if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) { error = EPERM; goto out; } nseals = seals & ~shmfd->shm_seals; if ((nseals & F_SEAL_WRITE) != 0) { /* * The rangelock above prevents writable mappings from being * added after we've started applying seals. The RLOCK here * is to avoid torn reads on ILP32 arches as unmapping/reducing * writemappings will be done without a rangelock. */ VM_OBJECT_RLOCK(shmfd->shm_object); writemappings = shmfd->shm_object->un_pager.swp.writemappings; VM_OBJECT_RUNLOCK(shmfd->shm_object); /* kmappings are also writable */ if (writemappings > 0) { error = EBUSY; goto out; } } shmfd->shm_seals |= nseals; out: rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); return (error); } static int shm_get_seals(struct file *fp, int *seals) { struct shmfd *shmfd; shmfd = fp->f_data; *seals = shmfd->shm_seals; return (0); +} + +static int +shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td) +{ + void *rl_cookie; + struct shmfd *shmfd; + size_t size; + int error; + + /* This assumes that the caller already checked for overflow. */ + error = 0; + shmfd = fp->f_data; + size = offset + len; + rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX, + &shmfd->shm_mtx); + if (size > shmfd->shm_size) { + VM_OBJECT_WLOCK(shmfd->shm_object); + error = shm_dotruncate_locked(shmfd, size, rl_cookie); + VM_OBJECT_WUNLOCK(shmfd->shm_object); + } + rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx); + /* Translate to posix_fallocate(2) return value as needed. */ + if (error == ENOMEM) + error = ENOSPC; + return (error); } static int sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS) { struct shm_mapping *shmm; struct sbuf sb; struct kinfo_file kif; u_long i; ssize_t curlen; int error, error2; sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); curlen = 0; error = 0; sx_slock(&shm_dict_lock); for (i = 0; i < shm_hash + 1; i++) { LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) { error = shm_fill_kinfo_locked(shmm->sm_shmfd, &kif, true); if (error == EPERM) continue; if (error != 0) break; pack_kinfo(&kif); if (req->oldptr != NULL && kif.kf_structsize + curlen > req->oldlen) break; error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ? 0 : ENOMEM; if (error != 0) break; curlen += kif.kf_structsize; } } sx_sunlock(&shm_dict_lock); error2 = sbuf_finish(&sb); sbuf_delete(&sb); return (error != 0 ? error : error2); } SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE, NULL, 0, sysctl_posix_shm_list, "", "POSIX SHM list"); int kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode, struct filecaps *caps) { return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL)); } /* * This version of the shm_open() interface leaves CLOEXEC behavior up to the * caller, and libc will enforce it for the traditional shm_open() call. This * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This * interface also includes a 'name' argument that is currently unused, but could * potentially be exported later via some interface for debugging purposes. * From the kernel's perspective, it is optional. Individual consumers like * memfd_create() may require it in order to be compatible with other systems * implementing the same function. */ int sys_shm_open2(struct thread *td, struct shm_open2_args *uap) { return (kern_shm_open2(td, uap->path, uap->flags, uap->mode, uap->shmflags, NULL, uap->name)); } Index: head/tests/sys/posixshm/posixshm_test.c =================================================================== --- head/tests/sys/posixshm/posixshm_test.c (revision 356511) +++ head/tests/sys/posixshm/posixshm_test.c (revision 356512) @@ -1,956 +1,995 @@ /*- * Copyright (c) 2006 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TEST_PATH_LEN 256 static char test_path[TEST_PATH_LEN]; static char test_path2[TEST_PATH_LEN]; static unsigned int test_path_idx = 0; static void gen_a_test_path(char *path) { snprintf(path, TEST_PATH_LEN, "/%s/tmp.XXXXXX%d", getenv("TMPDIR") == NULL ? "/tmp" : getenv("TMPDIR"), test_path_idx); test_path_idx++; ATF_REQUIRE_MSG(mkstemp(path) != -1, "mkstemp failed; errno=%d", errno); ATF_REQUIRE_MSG(unlink(path) == 0, "unlink failed; errno=%d", errno); } static void gen_test_path(void) { gen_a_test_path(test_path); } static void gen_test_path2(void) { gen_a_test_path(test_path2); } /* * Attempt a shm_open() that should fail with an expected error of 'error'. */ static void shm_open_should_fail(const char *path, int flags, mode_t mode, int error) { int fd; fd = shm_open(path, flags, mode); ATF_CHECK_MSG(fd == -1, "shm_open didn't fail"); ATF_CHECK_MSG(error == errno, "shm_open didn't fail with expected errno; errno=%d; expected " "errno=%d", errno, error); } /* * Attempt a shm_unlink() that should fail with an expected error of 'error'. */ static void shm_unlink_should_fail(const char *path, int error) { ATF_CHECK_MSG(shm_unlink(path) == -1, "shm_unlink didn't fail"); ATF_CHECK_MSG(error == errno, "shm_unlink didn't fail with expected errno; errno=%d; expected " "errno=%d", errno, error); } /* * Open the test object and write a value to the first byte. Returns valid fd * on success and -1 on failure. */ static int scribble_object(const char *path, char value) { char *page; int fd, pagesize; ATF_REQUIRE(0 < (pagesize = getpagesize())); fd = shm_open(path, O_CREAT|O_EXCL|O_RDWR, 0777); if (fd < 0 && errno == EEXIST) { if (shm_unlink(test_path) < 0) atf_tc_fail("shm_unlink"); fd = shm_open(test_path, O_CREAT | O_EXCL | O_RDWR, 0777); } if (fd < 0) atf_tc_fail("shm_open failed; errno=%d", errno); if (ftruncate(fd, pagesize) < 0) atf_tc_fail("ftruncate failed; errno=%d", errno); page = mmap(0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (page == MAP_FAILED) atf_tc_fail("mmap failed; errno=%d", errno); page[0] = value; ATF_REQUIRE_MSG(munmap(page, pagesize) == 0, "munmap failed; errno=%d", errno); return (fd); } /* * Fail the test case if the 'path' does not refer to an shm whose first byte * is equal to expected_value */ static void verify_object(const char *path, char expected_value) { int fd; int pagesize; char *page; ATF_REQUIRE(0 < (pagesize = getpagesize())); fd = shm_open(path, O_RDONLY, 0777); if (fd < 0) atf_tc_fail("shm_open failed in verify_object; errno=%d, path=%s", errno, path); page = mmap(0, pagesize, PROT_READ, MAP_SHARED, fd, 0); if (page == MAP_FAILED) atf_tc_fail("mmap(1)"); if (page[0] != expected_value) atf_tc_fail("Renamed object has incorrect value; has" "%d (0x%x, '%c'), expected %d (0x%x, '%c')\n", page[0], page[0], isprint(page[0]) ? page[0] : ' ', expected_value, expected_value, isprint(expected_value) ? expected_value : ' '); ATF_REQUIRE_MSG(munmap(page, pagesize) == 0, "munmap failed; errno=%d", errno); close(fd); } ATF_TC_WITHOUT_HEAD(remap_object); ATF_TC_BODY(remap_object, tc) { char *page; int fd, pagesize; ATF_REQUIRE(0 < (pagesize = getpagesize())); gen_test_path(); fd = scribble_object(test_path, '1'); page = mmap(0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (page == MAP_FAILED) atf_tc_fail("mmap(2) failed; errno=%d", errno); if (page[0] != '1') atf_tc_fail("missing data ('%c' != '1')", page[0]); close(fd); ATF_REQUIRE_MSG(munmap(page, pagesize) == 0, "munmap failed; errno=%d", errno); ATF_REQUIRE_MSG(shm_unlink(test_path) != -1, "shm_unlink failed; errno=%d", errno); } ATF_TC_WITHOUT_HEAD(rename_from_anon); ATF_TC_BODY(rename_from_anon, tc) { int rc; gen_test_path(); rc = shm_rename(SHM_ANON, test_path, 0); if (rc != -1) atf_tc_fail("shm_rename from SHM_ANON succeeded unexpectedly"); } ATF_TC_WITHOUT_HEAD(rename_bad_path_pointer); ATF_TC_BODY(rename_bad_path_pointer, tc) { const char *bad_path; int rc; bad_path = (const char *)0x1; gen_test_path(); rc = shm_rename(test_path, bad_path, 0); if (rc != -1) atf_tc_fail("shm_rename of nonexisting shm succeeded unexpectedly"); rc = shm_rename(bad_path, test_path, 0); if (rc != -1) atf_tc_fail("shm_rename of nonexisting shm succeeded unexpectedly"); } ATF_TC_WITHOUT_HEAD(rename_from_nonexisting); ATF_TC_BODY(rename_from_nonexisting, tc) { int rc; gen_test_path(); gen_test_path2(); rc = shm_rename(test_path, test_path2, 0); if (rc != -1) atf_tc_fail("shm_rename of nonexisting shm succeeded unexpectedly"); if (errno != ENOENT) atf_tc_fail("Expected ENOENT to rename of nonexistent shm; got %d", errno); } ATF_TC_WITHOUT_HEAD(rename_to_anon); ATF_TC_BODY(rename_to_anon, tc) { int rc; gen_test_path(); rc = shm_rename(test_path, SHM_ANON, 0); if (rc != -1) atf_tc_fail("shm_rename to SHM_ANON succeeded unexpectedly"); } ATF_TC_WITHOUT_HEAD(rename_to_replace); ATF_TC_BODY(rename_to_replace, tc) { char expected_value; int fd; int fd2; // Some contents we can verify later expected_value = 'g'; gen_test_path(); fd = scribble_object(test_path, expected_value); close(fd); // Give the other some different value so we can detect success gen_test_path2(); fd2 = scribble_object(test_path2, 'h'); close(fd2); ATF_REQUIRE_MSG(shm_rename(test_path, test_path2, 0) == 0, "shm_rename failed; errno=%d", errno); // Read back renamed; verify contents verify_object(test_path2, expected_value); } ATF_TC_WITHOUT_HEAD(rename_to_noreplace); ATF_TC_BODY(rename_to_noreplace, tc) { char expected_value_from; char expected_value_to; int fd_from; int fd_to; int rc; // Some contents we can verify later expected_value_from = 'g'; gen_test_path(); fd_from = scribble_object(test_path, expected_value_from); close(fd_from); // Give the other some different value so we can detect success expected_value_to = 'h'; gen_test_path2(); fd_to = scribble_object(test_path2, expected_value_to); close(fd_to); rc = shm_rename(test_path, test_path2, SHM_RENAME_NOREPLACE); ATF_REQUIRE_MSG((rc == -1) && (errno == EEXIST), "shm_rename didn't fail as expected; errno: %d; return: %d", errno, rc); // Read back renamed; verify contents verify_object(test_path2, expected_value_to); } ATF_TC_WITHOUT_HEAD(rename_to_exchange); ATF_TC_BODY(rename_to_exchange, tc) { char expected_value_from; char expected_value_to; int fd_from; int fd_to; // Some contents we can verify later expected_value_from = 'g'; gen_test_path(); fd_from = scribble_object(test_path, expected_value_from); close(fd_from); // Give the other some different value so we can detect success expected_value_to = 'h'; gen_test_path2(); fd_to = scribble_object(test_path2, expected_value_to); close(fd_to); ATF_REQUIRE_MSG(shm_rename(test_path, test_path2, SHM_RENAME_EXCHANGE) == 0, "shm_rename failed; errno=%d", errno); // Read back renamed; verify contents verify_object(test_path, expected_value_to); verify_object(test_path2, expected_value_from); } ATF_TC_WITHOUT_HEAD(rename_to_exchange_nonexisting); ATF_TC_BODY(rename_to_exchange_nonexisting, tc) { char expected_value_from; int fd_from; // Some contents we can verify later expected_value_from = 'g'; gen_test_path(); fd_from = scribble_object(test_path, expected_value_from); close(fd_from); gen_test_path2(); ATF_REQUIRE_MSG(shm_rename(test_path, test_path2, SHM_RENAME_EXCHANGE) == 0, "shm_rename failed; errno=%d", errno); // Read back renamed; verify contents verify_object(test_path2, expected_value_from); } ATF_TC_WITHOUT_HEAD(rename_to_self); ATF_TC_BODY(rename_to_self, tc) { int fd; char expected_value; expected_value = 't'; gen_test_path(); fd = scribble_object(test_path, expected_value); close(fd); ATF_REQUIRE_MSG(shm_rename(test_path, test_path, 0) == 0, "shm_rename failed; errno=%d", errno); verify_object(test_path, expected_value); } ATF_TC_WITHOUT_HEAD(rename_bad_flag); ATF_TC_BODY(rename_bad_flag, tc) { int fd; int rc; /* Make sure we don't fail out due to ENOENT */ gen_test_path(); gen_test_path2(); fd = scribble_object(test_path, 'd'); close(fd); fd = scribble_object(test_path2, 'd'); close(fd); /* * Note: if we end up with enough flags that we use all the bits, * then remove this test completely. */ rc = shm_rename(test_path, test_path2, INT_MIN); ATF_REQUIRE_MSG((rc == -1) && (errno == EINVAL), "shm_rename should have failed with EINVAL; got: return=%d, " "errno=%d", rc, errno); } ATF_TC_WITHOUT_HEAD(reopen_object); ATF_TC_BODY(reopen_object, tc) { char *page; int fd, pagesize; ATF_REQUIRE(0 < (pagesize = getpagesize())); gen_test_path(); fd = scribble_object(test_path, '1'); close(fd); fd = shm_open(test_path, O_RDONLY, 0777); if (fd < 0) atf_tc_fail("shm_open(2) failed; errno=%d", errno); page = mmap(0, pagesize, PROT_READ, MAP_SHARED, fd, 0); if (page == MAP_FAILED) atf_tc_fail("mmap(2) failed; errno=%d", errno); if (page[0] != '1') atf_tc_fail("missing data ('%c' != '1')", page[0]); ATF_REQUIRE_MSG(munmap(page, pagesize) == 0, "munmap failed; errno=%d", errno); close(fd); ATF_REQUIRE_MSG(shm_unlink(test_path) != -1, "shm_unlink failed; errno=%d", errno); } ATF_TC_WITHOUT_HEAD(readonly_mmap_write); ATF_TC_BODY(readonly_mmap_write, tc) { char *page; int fd, pagesize; ATF_REQUIRE(0 < (pagesize = getpagesize())); gen_test_path(); fd = shm_open(test_path, O_RDONLY | O_CREAT, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); /* PROT_WRITE should fail with EACCES. */ page = mmap(0, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (page != MAP_FAILED) atf_tc_fail("mmap(PROT_WRITE) succeeded unexpectedly"); if (errno != EACCES) atf_tc_fail("mmap(PROT_WRITE) didn't fail with EACCES; " "errno=%d", errno); close(fd); ATF_REQUIRE_MSG(shm_unlink(test_path) != -1, "shm_unlink failed; errno=%d", errno); } ATF_TC_WITHOUT_HEAD(open_after_link); ATF_TC_BODY(open_after_link, tc) { int fd; gen_test_path(); fd = shm_open(test_path, O_RDONLY | O_CREAT, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open(1) failed; errno=%d", errno); close(fd); ATF_REQUIRE_MSG(shm_unlink(test_path) != -1, "shm_unlink failed: %d", errno); shm_open_should_fail(test_path, O_RDONLY, 0777, ENOENT); } ATF_TC_WITHOUT_HEAD(open_invalid_path); ATF_TC_BODY(open_invalid_path, tc) { shm_open_should_fail("blah", O_RDONLY, 0777, EINVAL); } ATF_TC_WITHOUT_HEAD(open_write_only); ATF_TC_BODY(open_write_only, tc) { gen_test_path(); shm_open_should_fail(test_path, O_WRONLY, 0777, EINVAL); } ATF_TC_WITHOUT_HEAD(open_extra_flags); ATF_TC_BODY(open_extra_flags, tc) { gen_test_path(); shm_open_should_fail(test_path, O_RDONLY | O_DIRECT, 0777, EINVAL); } ATF_TC_WITHOUT_HEAD(open_anon); ATF_TC_BODY(open_anon, tc) { int fd; fd = shm_open(SHM_ANON, O_RDWR, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); close(fd); } ATF_TC_WITHOUT_HEAD(open_anon_readonly); ATF_TC_BODY(open_anon_readonly, tc) { shm_open_should_fail(SHM_ANON, O_RDONLY, 0777, EINVAL); } ATF_TC_WITHOUT_HEAD(open_bad_path_pointer); ATF_TC_BODY(open_bad_path_pointer, tc) { shm_open_should_fail((char *)1024, O_RDONLY, 0777, EFAULT); } ATF_TC_WITHOUT_HEAD(open_path_too_long); ATF_TC_BODY(open_path_too_long, tc) { char *page; page = malloc(MAXPATHLEN + 1); memset(page, 'a', MAXPATHLEN); page[MAXPATHLEN] = '\0'; shm_open_should_fail(page, O_RDONLY, 0777, ENAMETOOLONG); free(page); } ATF_TC_WITHOUT_HEAD(open_nonexisting_object); ATF_TC_BODY(open_nonexisting_object, tc) { shm_open_should_fail("/notreallythere", O_RDONLY, 0777, ENOENT); } ATF_TC_WITHOUT_HEAD(open_create_existing_object); ATF_TC_BODY(open_create_existing_object, tc) { int fd; gen_test_path(); fd = shm_open(test_path, O_RDONLY|O_CREAT, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); close(fd); shm_open_should_fail(test_path, O_RDONLY|O_CREAT|O_EXCL, 0777, EEXIST); ATF_REQUIRE_MSG(shm_unlink(test_path) != -1, "shm_unlink failed; errno=%d", errno); } ATF_TC_WITHOUT_HEAD(trunc_resets_object); ATF_TC_BODY(trunc_resets_object, tc) { struct stat sb; int fd; gen_test_path(); /* Create object and set size to 1024. */ fd = shm_open(test_path, O_RDWR | O_CREAT, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open(1) failed; errno=%d", errno); ATF_REQUIRE_MSG(ftruncate(fd, 1024) != -1, "ftruncate failed; errno=%d", errno); ATF_REQUIRE_MSG(fstat(fd, &sb) != -1, "fstat(1) failed; errno=%d", errno); ATF_REQUIRE_MSG(sb.st_size == 1024, "size %d != 1024", (int)sb.st_size); close(fd); /* Open with O_TRUNC which should reset size to 0. */ fd = shm_open(test_path, O_RDWR | O_TRUNC, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open(2) failed; errno=%d", errno); ATF_REQUIRE_MSG(fstat(fd, &sb) != -1, "fstat(2) failed; errno=%d", errno); ATF_REQUIRE_MSG(sb.st_size == 0, "size was not 0 after truncation: %d", (int)sb.st_size); close(fd); ATF_REQUIRE_MSG(shm_unlink(test_path) != -1, "shm_unlink failed; errno=%d", errno); } ATF_TC_WITHOUT_HEAD(unlink_bad_path_pointer); ATF_TC_BODY(unlink_bad_path_pointer, tc) { shm_unlink_should_fail((char *)1024, EFAULT); } ATF_TC_WITHOUT_HEAD(unlink_path_too_long); ATF_TC_BODY(unlink_path_too_long, tc) { char *page; page = malloc(MAXPATHLEN + 1); memset(page, 'a', MAXPATHLEN); page[MAXPATHLEN] = '\0'; shm_unlink_should_fail(page, ENAMETOOLONG); free(page); } ATF_TC_WITHOUT_HEAD(object_resize); ATF_TC_BODY(object_resize, tc) { pid_t pid; struct stat sb; char *page; int fd, pagesize, status; ATF_REQUIRE(0 < (pagesize = getpagesize())); /* Start off with a size of a single page. */ fd = shm_open(SHM_ANON, O_CREAT|O_RDWR, 0777); if (fd < 0) atf_tc_fail("shm_open failed; errno=%d", errno); if (ftruncate(fd, pagesize) < 0) atf_tc_fail("ftruncate(1) failed; errno=%d", errno); if (fstat(fd, &sb) < 0) atf_tc_fail("fstat(1) failed; errno=%d", errno); if (sb.st_size != pagesize) atf_tc_fail("first resize failed (%d != %d)", (int)sb.st_size, pagesize); /* Write a '1' to the first byte. */ page = mmap(0, pagesize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (page == MAP_FAILED) atf_tc_fail("mmap(1)"); page[0] = '1'; ATF_REQUIRE_MSG(munmap(page, pagesize) == 0, "munmap failed; errno=%d", errno); /* Grow the object to 2 pages. */ if (ftruncate(fd, pagesize * 2) < 0) atf_tc_fail("ftruncate(2) failed; errno=%d", errno); if (fstat(fd, &sb) < 0) atf_tc_fail("fstat(2) failed; errno=%d", errno); if (sb.st_size != pagesize * 2) atf_tc_fail("second resize failed (%d != %d)", (int)sb.st_size, pagesize * 2); /* Check for '1' at the first byte. */ page = mmap(0, pagesize * 2, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (page == MAP_FAILED) atf_tc_fail("mmap(2) failed; errno=%d", errno); if (page[0] != '1') atf_tc_fail("'%c' != '1'", page[0]); /* Write a '2' at the start of the second page. */ page[pagesize] = '2'; /* Shrink the object back to 1 page. */ if (ftruncate(fd, pagesize) < 0) atf_tc_fail("ftruncate(3) failed; errno=%d", errno); if (fstat(fd, &sb) < 0) atf_tc_fail("fstat(3) failed; errno=%d", errno); if (sb.st_size != pagesize) atf_tc_fail("third resize failed (%d != %d)", (int)sb.st_size, pagesize); /* * Fork a child process to make sure the second page is no * longer valid. */ pid = fork(); if (pid == -1) atf_tc_fail("fork failed; errno=%d", errno); if (pid == 0) { struct rlimit lim; char c; /* Don't generate a core dump. */ ATF_REQUIRE(getrlimit(RLIMIT_CORE, &lim) == 0); lim.rlim_cur = 0; ATF_REQUIRE(setrlimit(RLIMIT_CORE, &lim) == 0); /* * The previous ftruncate(2) shrunk the backing object * so that this address is no longer valid, so reading * from it should trigger a SIGBUS. */ c = page[pagesize]; fprintf(stderr, "child: page 1: '%c'\n", c); exit(0); } if (wait(&status) < 0) atf_tc_fail("wait failed; errno=%d", errno); if (!WIFSIGNALED(status) || WTERMSIG(status) != SIGBUS) atf_tc_fail("child terminated with status %x", status); /* Grow the object back to 2 pages. */ if (ftruncate(fd, pagesize * 2) < 0) atf_tc_fail("ftruncate(2) failed; errno=%d", errno); if (fstat(fd, &sb) < 0) atf_tc_fail("fstat(2) failed; errno=%d", errno); if (sb.st_size != pagesize * 2) atf_tc_fail("fourth resize failed (%d != %d)", (int)sb.st_size, pagesize); /* * Note that the mapping at 'page' for the second page is * still valid, and now that the shm object has been grown * back up to 2 pages, there is now memory backing this page * so the read will work. However, the data should be zero * rather than '2' as the old data was thrown away when the * object was shrunk and the new pages when an object are * grown are zero-filled. */ if (page[pagesize] != 0) atf_tc_fail("invalid data at %d: %x != 0", pagesize, (int)page[pagesize]); close(fd); } /* Signal handler which does nothing. */ static void ignoreit(int sig __unused) { ; } ATF_TC_WITHOUT_HEAD(shm_functionality_across_fork); ATF_TC_BODY(shm_functionality_across_fork, tc) { char *cp, c; int error, desc, rv; long scval; sigset_t ss; struct sigaction sa; void *region; size_t i, psize; #ifndef _POSIX_SHARED_MEMORY_OBJECTS printf("_POSIX_SHARED_MEMORY_OBJECTS is undefined\n"); #else printf("_POSIX_SHARED_MEMORY_OBJECTS is defined as %ld\n", (long)_POSIX_SHARED_MEMORY_OBJECTS - 0); if (_POSIX_SHARED_MEMORY_OBJECTS - 0 == -1) printf("***Indicates this feature may be unsupported!\n"); #endif errno = 0; scval = sysconf(_SC_SHARED_MEMORY_OBJECTS); if (scval == -1 && errno != 0) { atf_tc_fail("sysconf(_SC_SHARED_MEMORY_OBJECTS) failed; " "errno=%d", errno); } else { printf("sysconf(_SC_SHARED_MEMORY_OBJECTS) returns %ld\n", scval); if (scval == -1) printf("***Indicates this feature is unsupported!\n"); } errno = 0; scval = sysconf(_SC_PAGESIZE); if (scval == -1 && errno != 0) { atf_tc_fail("sysconf(_SC_PAGESIZE) failed; errno=%d", errno); } else if (scval <= 0) { fprintf(stderr, "bogus return from sysconf(_SC_PAGESIZE): %ld", scval); psize = 4096; } else { printf("sysconf(_SC_PAGESIZE) returns %ld\n", scval); psize = scval; } gen_test_path(); desc = shm_open(test_path, O_EXCL | O_CREAT | O_RDWR, 0600); ATF_REQUIRE_MSG(desc >= 0, "shm_open failed; errno=%d", errno); ATF_REQUIRE_MSG(shm_unlink(test_path) == 0, "shm_unlink failed; errno=%d", errno); ATF_REQUIRE_MSG(ftruncate(desc, (off_t)psize) != -1, "ftruncate failed; errno=%d", errno); region = mmap(NULL, psize, PROT_READ | PROT_WRITE, MAP_SHARED, desc, 0); ATF_REQUIRE_MSG(region != MAP_FAILED, "mmap failed; errno=%d", errno); memset(region, '\377', psize); sa.sa_flags = 0; sa.sa_handler = ignoreit; sigemptyset(&sa.sa_mask); ATF_REQUIRE_MSG(sigaction(SIGUSR1, &sa, (struct sigaction *)0) == 0, "sigaction failed; errno=%d", errno); sigemptyset(&ss); sigaddset(&ss, SIGUSR1); ATF_REQUIRE_MSG(sigprocmask(SIG_BLOCK, &ss, (sigset_t *)0) == 0, "sigprocmask failed; errno=%d", errno); rv = fork(); ATF_REQUIRE_MSG(rv != -1, "fork failed; errno=%d", errno); if (rv == 0) { sigemptyset(&ss); sigsuspend(&ss); for (cp = region; cp < (char *)region + psize; cp++) { if (*cp != '\151') _exit(1); } if (lseek(desc, 0, SEEK_SET) == -1) _exit(1); for (i = 0; i < psize; i++) { error = read(desc, &c, 1); if (c != '\151') _exit(1); } _exit(0); } else { int status; memset(region, '\151', psize - 2); error = pwrite(desc, region, 2, psize - 2); if (error != 2) { if (error >= 0) atf_tc_fail("short write; %d bytes written", error); else atf_tc_fail("shmfd write"); } kill(rv, SIGUSR1); waitpid(rv, &status, 0); if (WIFEXITED(status) && WEXITSTATUS(status) == 0) { printf("Functionality test successful\n"); } else if (WIFEXITED(status)) { atf_tc_fail("Child process exited with status %d", WEXITSTATUS(status)); } else { atf_tc_fail("Child process terminated with %s", strsignal(WTERMSIG(status))); } } ATF_REQUIRE_MSG(munmap(region, psize) == 0, "munmap failed; errno=%d", errno); shm_unlink(test_path); } ATF_TC_WITHOUT_HEAD(cloexec); ATF_TC_BODY(cloexec, tc) { int fd; gen_test_path(); /* shm_open(2) is required to set FD_CLOEXEC */ fd = shm_open(SHM_ANON, O_RDWR, 0777); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); ATF_REQUIRE((fcntl(fd, F_GETFD) & FD_CLOEXEC) != 0); close(fd); /* Also make sure that named shm is correct */ fd = shm_open(test_path, O_CREAT | O_RDWR, 0600); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); ATF_REQUIRE((fcntl(fd, F_GETFD) & FD_CLOEXEC) != 0); close(fd); } ATF_TC_WITHOUT_HEAD(mode); ATF_TC_BODY(mode, tc) { struct stat st; int fd; mode_t restore_mask; gen_test_path(); /* Remove inhibitions from umask */ restore_mask = umask(0); fd = shm_open(test_path, O_CREAT | O_RDWR, 0600); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); ATF_REQUIRE(fstat(fd, &st) == 0); ATF_REQUIRE((st.st_mode & ACCESSPERMS) == 0600); close(fd); ATF_REQUIRE(shm_unlink(test_path) == 0); fd = shm_open(test_path, O_CREAT | O_RDWR, 0660); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); ATF_REQUIRE(fstat(fd, &st) == 0); ATF_REQUIRE((st.st_mode & ACCESSPERMS) == 0660); close(fd); ATF_REQUIRE(shm_unlink(test_path) == 0); fd = shm_open(test_path, O_CREAT | O_RDWR, 0666); ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); ATF_REQUIRE(fstat(fd, &st) == 0); ATF_REQUIRE((st.st_mode & ACCESSPERMS) == 0666); close(fd); ATF_REQUIRE(shm_unlink(test_path) == 0); umask(restore_mask); } +ATF_TC_WITHOUT_HEAD(fallocate); +ATF_TC_BODY(fallocate, tc) +{ + struct stat st; + int error, fd, sz; + + /* + * Primitive test case for posix_fallocate with shmd. Effectively + * expected to work like a smarter ftruncate that will grow the region + * as needed in a race-free way. + */ + fd = shm_open(SHM_ANON, O_RDWR, 0666); + ATF_REQUIRE_MSG(fd >= 0, "shm_open failed; errno=%d", errno); + /* Set the initial size. */ + sz = 32; + ATF_REQUIRE(ftruncate(fd, sz) == 0); + + /* Now grow it. */ + error = 0; + sz *= 2; + ATF_REQUIRE_MSG((error = posix_fallocate(fd, 0, sz)) == 0, + "posix_fallocate failed; error=%d", error); + ATF_REQUIRE(fstat(fd, &st) == 0); + ATF_REQUIRE(st.st_size == sz); + /* Attempt to shrink it; should succeed, but not change the size. */ + ATF_REQUIRE_MSG((error = posix_fallocate(fd, 0, sz / 2)) == 0, + "posix_fallocate failed; error=%d", error); + ATF_REQUIRE(fstat(fd, &st) == 0); + ATF_REQUIRE(st.st_size == sz); + /* Grow it using an offset of sz and len of sz. */ + ATF_REQUIRE_MSG((error = posix_fallocate(fd, sz, sz)) == 0, + "posix_fallocate failed; error=%d", error); + ATF_REQUIRE(fstat(fd, &st) == 0); + ATF_REQUIRE(st.st_size == (sz * 2)); + + close(fd); +} + ATF_TP_ADD_TCS(tp) { ATF_TP_ADD_TC(tp, remap_object); ATF_TP_ADD_TC(tp, rename_from_anon); ATF_TP_ADD_TC(tp, rename_bad_path_pointer); ATF_TP_ADD_TC(tp, rename_from_nonexisting); ATF_TP_ADD_TC(tp, rename_to_anon); ATF_TP_ADD_TC(tp, rename_to_replace); ATF_TP_ADD_TC(tp, rename_to_noreplace); ATF_TP_ADD_TC(tp, rename_to_exchange); ATF_TP_ADD_TC(tp, rename_to_exchange_nonexisting); ATF_TP_ADD_TC(tp, rename_to_self); ATF_TP_ADD_TC(tp, rename_bad_flag); ATF_TP_ADD_TC(tp, reopen_object); ATF_TP_ADD_TC(tp, readonly_mmap_write); ATF_TP_ADD_TC(tp, open_after_link); ATF_TP_ADD_TC(tp, open_invalid_path); ATF_TP_ADD_TC(tp, open_write_only); ATF_TP_ADD_TC(tp, open_extra_flags); ATF_TP_ADD_TC(tp, open_anon); ATF_TP_ADD_TC(tp, open_anon_readonly); ATF_TP_ADD_TC(tp, open_bad_path_pointer); ATF_TP_ADD_TC(tp, open_path_too_long); ATF_TP_ADD_TC(tp, open_nonexisting_object); ATF_TP_ADD_TC(tp, open_create_existing_object); ATF_TP_ADD_TC(tp, shm_functionality_across_fork); ATF_TP_ADD_TC(tp, trunc_resets_object); ATF_TP_ADD_TC(tp, unlink_bad_path_pointer); ATF_TP_ADD_TC(tp, unlink_path_too_long); ATF_TP_ADD_TC(tp, object_resize); ATF_TP_ADD_TC(tp, cloexec); ATF_TP_ADD_TC(tp, mode); + ATF_TP_ADD_TC(tp, fallocate); return (atf_no_error()); }