Index: head/sys/alpha/osf1/osf1_misc.c =================================================================== --- head/sys/alpha/osf1/osf1_misc.c (revision 127139) +++ head/sys/alpha/osf1/osf1_misc.c (revision 127140) @@ -1,1825 +1,1822 @@ /* $NetBSD: osf1_misc.c,v 1.14 1998/05/20 16:34:29 chs Exp $ */ /* * Copyright (c) 1994, 1995 Carnegie-Mellon University. * All rights reserved. * * Author: Chris G. Demetriou * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Additional Copyright (c) 1999 by Andrew Gallatin */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/selinfo.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void cvtstat2osf1(struct stat *, struct osf1_stat *); static int osf2bsd_pathconf(int *); static const char osf1_emul_path[] = "/compat/osf1"; /* * [ taken from the linux emulator ] * Search an alternate path before passing pathname arguments on * to system calls. Useful for keeping a separate 'emulation tree'. * * If cflag is set, we check if an attempt can be made to create * the named file, i.e. we check if the directory it should * be in exists. */ int osf1_emul_find(td, sgp, prefix, path, pbuf, cflag) struct thread *td; caddr_t *sgp; /* Pointer to stackgap memory */ const char *prefix; char *path; char **pbuf; int cflag; { int error; size_t len, sz; char *buf, *cp, *ptr; struct ucred *ucred; struct nameidata nd; struct nameidata ndroot; struct vattr vat; struct vattr vatroot; buf = (char *) malloc(MAXPATHLEN, M_TEMP, M_WAITOK); *pbuf = path; for (ptr = buf; (*ptr = *prefix) != '\0'; ptr++, prefix++) continue; sz = MAXPATHLEN - (ptr - buf); /* * If sgp is not given then the path is already in kernel space */ if (sgp == NULL) error = copystr(path, ptr, sz, &len); else error = copyinstr(path, ptr, sz, &len); if (error) { free(buf, M_TEMP); return error; } if (*ptr != '/') { free(buf, M_TEMP); return EINVAL; } /* * We know that there is a / somewhere in this pathname. * Search backwards for it, to find the file's parent dir * to see if it exists in the alternate tree. If it does, * and we want to create a file (cflag is set). We don't * need to worry about the root comparison in this case. */ if (cflag) { for (cp = &ptr[len] - 1; *cp != '/'; cp--) ; *cp = '\0'; NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, buf, td); if ((error = namei(&nd)) != 0) { free(buf, M_TEMP); return error; } *cp = '/'; } else { NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, buf, td); if ((error = namei(&nd)) != 0) { free(buf, M_TEMP); return error; } /* * We now compare the vnode of the osf1_root to the one * vnode asked. If they resolve to be the same, then we * ignore the match so that the real root gets used. * This avoids the problem of traversing "../.." to find the * root directory and never finding it, because "/" resolves * to the emulation root directory. This is expensive :-( */ NDINIT(&ndroot, LOOKUP, FOLLOW, UIO_SYSSPACE, osf1_emul_path, td); if ((error = namei(&ndroot)) != 0) { /* Cannot happen! */ free(buf, M_TEMP); vrele(nd.ni_vp); return error; } ucred = td->td_ucred; if ((error = VOP_GETATTR(nd.ni_vp, &vat, ucred, td)) != 0) { goto bad; } if ((error = VOP_GETATTR(ndroot.ni_vp, &vatroot, ucred, td)) != 0) { goto bad; } if (vat.va_fsid == vatroot.va_fsid && vat.va_fileid == vatroot.va_fileid) { error = ENOENT; goto bad; } } if (sgp == NULL) *pbuf = buf; else { sz = &ptr[len] - buf; *pbuf = stackgap_alloc(sgp, sz + 1); error = copyout(buf, *pbuf, sz); free(buf, M_TEMP); } vrele(nd.ni_vp); if (!cflag) vrele(ndroot.ni_vp); return error; bad: vrele(ndroot.ni_vp); vrele(nd.ni_vp); free(buf, M_TEMP); return error; } int osf1_open(td, uap) struct thread *td; struct osf1_open_args *uap; { struct open_args /* { syscallarg(char *) path; syscallarg(int) flags; syscallarg(int) mode; } */ a; caddr_t sg; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); a.path = uap->path; a.flags = uap->flags; /* XXX translate */ a.mode = uap->mode; return open(td, &a); } extern long totalphysmem; int osf1_getsysinfo(td, uap) struct thread *td; struct osf1_getsysinfo_args *uap; { int error, retval; int ncpus = 1; /* XXX until SMP */ int ophysmem; int unit; long percpu; long proctype; struct osf1_cpu_info cpuinfo; error = retval = 0; switch(uap->op) { case OSF_GET_MAX_UPROCS: error = copyout(&maxprocperuid, uap->buffer, sizeof(maxprocperuid)); retval = 1; break; case OSF_GET_PHYSMEM: ophysmem = totalphysmem * (PAGE_SIZE >> 10); error = copyout(&ophysmem, uap->buffer, sizeof(ophysmem)); retval = 1; break; case OSF_GET_MAX_CPU: case OSF_GET_CPUS_IN_BOX: error = copyout(&ncpus, uap->buffer, sizeof(ncpus)); retval = 1; break; case OSF_GET_IEEE_FP_CONTROL: error = copyout(&td->td_pcb->pcb_fp_control,uap->buffer, sizeof(td->td_pcb->pcb_fp_control)); retval = 1; break; case OSF_GET_CPU_INFO: if (uap->nbytes < sizeof(cpuinfo)) error = EINVAL; else { bzero(&cpuinfo, sizeof(cpuinfo)); unit = alpha_pal_whami(); cpuinfo.current_cpu = unit; cpuinfo.cpus_in_box = ncpus; cpuinfo.cpu_type = LOCATE_PCS(hwrpb, unit)->pcs_proc_type; cpuinfo.ncpus = ncpus; cpuinfo.cpus_present = ncpus; cpuinfo.cpus_running = ncpus; cpuinfo.cpu_binding = 1; cpuinfo.cpu_ex_binding = 0; cpuinfo.mhz = hwrpb->rpb_cc_freq / 1000000; error = copyout(&cpuinfo, uap->buffer, sizeof(cpuinfo)); retval = 1; } break; case OSF_GET_PROC_TYPE: if(uap->nbytes < sizeof(proctype)) error = EINVAL; else { unit = alpha_pal_whami(); proctype = LOCATE_PCS(hwrpb, unit)->pcs_proc_type; error = copyout (&proctype, uap->buffer, sizeof(percpu)); retval = 1; } break; case OSF_GET_HWRPB: { /* note -- osf/1 doesn't have rpb_tbhint[8] */ unsigned long rpb_size; rpb_size = (unsigned long)&hwrpb->rpb_tbhint - (unsigned long)hwrpb; if(uap->nbytes < rpb_size){ uprintf("nbytes = %ld, sizeof(struct rpb) = %ld\n", uap->nbytes, rpb_size); error = EINVAL; } else { error = copyout(hwrpb, uap->buffer, rpb_size); retval = 1; } } break; case OSF_GET_PLATFORM_NAME: error = copyout(platform.model, uap->buffer, strlen(platform.model)); retval = 1; break; default: printf("osf1_getsysinfo called with unknown op=%ld\n", uap->op); return EINVAL; } td->td_retval[0] = retval; return(error); } int osf1_setsysinfo(td, uap) struct thread *td; struct osf1_setsysinfo_args *uap; { int error; error = 0; switch(uap->op) { case OSF_SET_IEEE_FP_CONTROL: { u_int64_t temp, *fp_control; if ((error = copyin(uap->buffer, &temp, sizeof(temp)))) break; fp_control = &td->td_pcb->pcb_fp_control; *fp_control = temp & IEEE_TRAP_ENABLE_MASK; break; } default: uprintf("osf1_setsysinfo called with op=%ld\n", uap->op); /*error = EINVAL;*/ } return (error); } int osf1_getrlimit(td, uap) struct thread *td; struct osf1_getrlimit_args *uap; { struct rlimit bsd_rlim; struct proc *p; int which; if (uap->which >= OSF1_RLIMIT_NLIMITS) return (EINVAL); if (uap->which <= OSF1_RLIMIT_LASTCOMMON) which = uap->which; else if (uap->which == OSF1_RLIMIT_NOFILE) which = RLIMIT_NOFILE; else return (0); p = td->td_proc; PROC_LOCK(p); lim_rlimit(p, which, &bsd_rlim); PROC_UNLOCK(p); return (copyout(&bsd_rlim, uap->rlp, sizeof(bsd_rlim))); } int osf1_setrlimit(td, uap) struct thread *td; struct osf1_setrlimit_args *uap; { struct rlimit bsd_rlim; int error, which; if (uap->which >= OSF1_RLIMIT_NLIMITS) return (EINVAL); if (uap->which <= OSF1_RLIMIT_LASTCOMMON) which = uap->which; else if (uap->which == OSF1_RLIMIT_NOFILE) which = RLIMIT_NOFILE; else return (0); error = copyin(uap->rlp, &bsd_rlim, sizeof(bsd_rlim)); if (error) return (error); return (kern_setrlimit(td, which, &bsd_rlim)); } /* * As linux says, this is a total guess. */ int osf1_set_program_attributes(td, uap) struct thread *td; struct osf1_set_program_attributes_args *uap; { struct vmspace *vm = td->td_proc->p_vmspace; vm->vm_taddr = (caddr_t)uap->text_start; vm->vm_tsize = btoc(round_page(uap->text_len)); vm->vm_daddr = (caddr_t)uap->bss_start; vm->vm_dsize = btoc(round_page(uap->bss_len)); return(KERN_SUCCESS); } int osf1_mmap(td, uap) struct thread *td; struct osf1_mmap_args *uap; { struct mmap_args /* { syscallarg(caddr_t) addr; syscallarg(size_t) len; syscallarg(int) prot; syscallarg(int) flags; syscallarg(int) fd; syscallarg(long) pad; syscallarg(off_t) pos; } */ a; int retval; vm_map_t map; vm_offset_t addr, len, newaddr; GIANT_REQUIRED; a.addr = uap->addr; a.len = uap->len; a.prot = uap->prot; a.fd = uap->fd; a.pad = 0; a.pos = uap->pos; a.flags = 0; /* * OSF/1's mmap, unlike FreeBSD's, does its best to map memory at the * user's requested address, even if MAP_FIXED is not set. Here we * try to replicate this behaviour as much as we can because some * applications (like /sbin/loader) depend on having things put as * close to where they've requested as possible. */ if (uap->addr != NULL) addr = round_page((vm_offset_t)a.addr); else /* * Try to use the apparent OSF/1 default placement of 0x10000 for * NULL addrs, this helps to prevent non-64 bit clean binaries from * SEGV'ing. */ addr = round_page((vm_offset_t)0x10000UL); len = (vm_offset_t)a.len; map = &td->td_proc->p_vmspace->vm_map; if (!vm_map_findspace(map, addr, len, &newaddr)) { a.addr = (caddr_t) newaddr; a.flags |= (MAP_FIXED); } #ifdef DEBUG else uprintf("osf1_mmap:vm_map_findspace failed for: %p 0x%lx\n", (caddr_t)addr, len); #endif if (uap->flags & OSF1_MAP_SHARED) a.flags |= MAP_SHARED; if (uap->flags & OSF1_MAP_PRIVATE) a.flags |= MAP_PRIVATE; switch (uap->flags & OSF1_MAP_TYPE) { case OSF1_MAP_ANONYMOUS: a.flags |= MAP_ANON; break; case OSF1_MAP_FILE: a.flags |= MAP_FILE; break; default: return (EINVAL); } if (uap->flags & OSF1_MAP_FIXED) a.flags |= MAP_FIXED; if (uap->flags & OSF1_MAP_HASSEMAPHORE) a.flags |= MAP_HASSEMAPHORE; if (uap->flags & OSF1_MAP_INHERIT) return (EINVAL); if (uap->flags & OSF1_MAP_UNALIGNED) return (EINVAL); /* * Emulate an osf/1 bug: Apparently, mmap'ed segments are always * readable even if the user doesn't or in PROT_READ. This causes * some buggy programs to segv. */ a.prot |= PROT_READ; retval = mmap(td, &a); #ifdef DEBUG uprintf( "\nosf1_mmap: addr=%p (%p), len = 0x%lx, prot=0x%x, fd=%d, pad=0, pos=0x%lx", uap->addr, a.addr,uap->len, uap->prot, uap->fd, uap->pos); printf(" flags = 0x%x\n",uap->flags); #endif return (retval); } int osf1_msync(td, uap) struct thread *td; struct osf1_msync_args *uap; { struct msync_args a; a.addr = uap->addr; a.len = uap->len; a.flags = 0; if(uap->flags & OSF1_MS_ASYNC) a.flags |= MS_ASYNC; if(uap->flags & OSF1_MS_SYNC) a.flags |= MS_SYNC; if(uap->flags & OSF1_MS_INVALIDATE) a.flags |= MS_INVALIDATE; return(msync(td, &a)); } struct osf1_stat { int32_t st_dev; u_int32_t st_ino; u_int32_t st_mode; u_int16_t st_nlink; u_int32_t st_uid; u_int32_t st_gid; int32_t st_rdev; u_int64_t st_size; int32_t st_atime_sec; int32_t st_spare1; int32_t st_mtime_sec; int32_t st_spare2; int32_t st_ctime_sec; int32_t st_spare3; u_int32_t st_blksize; int32_t st_blocks; u_int32_t st_flags; u_int32_t st_gen; }; /* * Get file status; this version follows links. */ /* ARGSUSED */ int osf1_stat(td, uap) struct thread *td; struct osf1_stat_args *uap; { int error; struct stat sb; struct osf1_stat osb; struct nameidata nd; caddr_t sg; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd))) return (error); error = vn_stat(nd.ni_vp, &sb, td->td_ucred, NOCRED, td); vput(nd.ni_vp); if (error) return (error); cvtstat2osf1(&sb, &osb); error = copyout((caddr_t)&osb, (caddr_t)uap->ub, sizeof (osb)); return (error); } /* * Get file status; this version does not follow links. */ /* ARGSUSED */ int osf1_lstat(td, uap) struct thread *td; register struct osf1_lstat_args *uap; { struct stat sb; struct osf1_stat osb; int error; struct nameidata nd; caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd))) return (error); error = vn_stat(nd.ni_vp, &sb, td->td_ucred, NOCRED, td); vput(nd.ni_vp); if (error) return (error); cvtstat2osf1(&sb, &osb); error = copyout((caddr_t)&osb, (caddr_t)uap->ub, sizeof (osb)); return (error); } /* * Return status information about a file descriptor. */ int osf1_fstat(td, uap) struct thread *td; register struct osf1_fstat_args *uap; { struct file *fp; struct stat ub; struct osf1_stat oub; int error; if ((error = fget(td, uap->fd, &fp)) != 0) return (error); error = fo_stat(fp, &ub, td->td_ucred, td); fdrop(fp, td); cvtstat2osf1(&ub, &oub); if (error == 0) error = copyout((caddr_t)&oub, (caddr_t)uap->sb, sizeof (oub)); return (error); } #define bsd2osf_dev(dev) (umajor(dev) << 20 | uminor(dev)) /* * Convert from a stat structure to an osf1 stat structure. */ static void cvtstat2osf1(st, ost) struct stat *st; struct osf1_stat *ost; { ost->st_dev = bsd2osf_dev(st->st_dev); ost->st_ino = st->st_ino; ost->st_mode = st->st_mode; ost->st_nlink = st->st_nlink; ost->st_uid = st->st_uid == -2 ? (u_int16_t) -2 : st->st_uid; ost->st_gid = st->st_gid == -2 ? (u_int16_t) -2 : st->st_gid; ost->st_rdev = bsd2osf_dev(st->st_rdev); ost->st_size = st->st_size; ost->st_atime_sec = st->st_atime; ost->st_spare1 = 0; ost->st_mtime_sec = st->st_mtime; ost->st_spare2 = 0; ost->st_ctime_sec = st->st_ctime; ost->st_spare3 = 0; ost->st_blksize = st->st_blksize; ost->st_blocks = st->st_blocks; ost->st_flags = st->st_flags; ost->st_gen = st->st_gen; } int osf1_mknod(td, uap) struct thread *td; struct osf1_mknod_args *uap; { printf("osf1_mknod no longer implemented\n"); return ENOSYS; } int osf1_access(td, uap) struct thread *td; struct osf1_access_args *uap; { caddr_t sg; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return access(td, (struct access_args *)uap); } struct osf1_flock { short l_type; short l_whence; off_t l_start; off_t l_len; pid_t l_pid; }; int osf1_fcntl(td, uap) struct thread *td; struct osf1_fcntl_args *uap; { int error; long tmp; caddr_t oarg, sg; struct fcntl_args a; struct osf1_flock osf_flock; struct flock bsd_flock; struct flock *nflock; error = 0; switch (uap->cmd) { case F_SETFL: a.fd = uap->fd; a.cmd = F_SETFL; /* need to translate flags here */ tmp = 0; if ((long)uap->arg & OSF1_FNONBLOCK) tmp |= FNONBLOCK; if ((long)uap->arg & OSF1_FAPPEND) tmp |= FAPPEND; if ((long)uap->arg & OSF1_FDEFER) tmp |= FDEFER; if ((long)uap->arg & OSF1_FASYNC) tmp |= FASYNC; if ((long)uap->arg & OSF1_FCREAT) tmp |= O_CREAT; if ((long)uap->arg & OSF1_FTRUNC) tmp |= O_TRUNC; if ((long)uap->arg & OSF1_FEXCL) tmp |= O_EXCL; if ((long)uap->arg & OSF1_FNDELAY) tmp |= FNDELAY; if ((long)uap->arg & OSF1_FSYNC) tmp |= FFSYNC; a.arg = tmp; error = fcntl(td, &a); break; case F_SETLK: case F_SETLKW: case F_GETLK: /* * The OSF/1 flock stucture has a different order than * the BSD one, but all else is the same. We must * reorder the one we've gotten so that flock() groks it. */ if ((error = copyin(uap->arg, &osf_flock, sizeof(osf_flock)))) return error; bsd_flock.l_type = osf_flock.l_type; bsd_flock.l_whence = osf_flock.l_whence; bsd_flock.l_start = osf_flock.l_start; bsd_flock.l_len = osf_flock.l_len; bsd_flock.l_pid = osf_flock.l_pid; sg = stackgap_init(); nflock = stackgap_alloc(&sg, sizeof(struct flock)); if ((error = copyout(&bsd_flock, nflock, sizeof(bsd_flock))) != 0) return error; oarg = uap->arg; uap->arg = nflock; error = fcntl(td, (struct fcntl_args *) uap); /* if (error) { printf("fcntl called with cmd=%d, args=0x%lx\n returns %d\n",uap->cmd,(long)uap->arg,error); printf("bsd_flock.l_type = 0x%x\n", bsd_flock.l_type); printf("bsd_flock.l_whence = 0x%x\n", bsd_flock.l_whence); printf("bsd_flock.l_start = 0x%lx\n", bsd_flock.l_start); printf("bsd_flock.l_len = 0x%lx\n", bsd_flock.l_len); printf("bsd_flock.l_pid = 0x%x\n", bsd_flock.l_pid); } */ if ((uap->cmd == F_GETLK) && !error) { osf_flock.l_type = F_UNLCK; if ((error = copyout(&osf_flock, oarg, sizeof(osf_flock)))) return error; } break; default: error = fcntl(td, (struct fcntl_args *) uap); if ((uap->cmd == OSF1_F_GETFL) && !error ) { tmp = td->td_retval[0] & O_ACCMODE; if (td->td_retval[0] & FNONBLOCK) tmp |= OSF1_FNONBLOCK; if (td->td_retval[0] & FAPPEND) tmp |= OSF1_FAPPEND; if (td->td_retval[0] & FDEFER) tmp |= OSF1_FDEFER; if (td->td_retval[0] & FASYNC) tmp |= OSF1_FASYNC; if (td->td_retval[0] & O_CREAT) tmp |= OSF1_FCREAT; if (td->td_retval[0] & O_TRUNC) tmp |= OSF1_FTRUNC; if (td->td_retval[0] & O_EXCL) tmp |= OSF1_FEXCL; if (td->td_retval[0] & FNDELAY) tmp |= OSF1_FNDELAY; if (td->td_retval[0] & FFSYNC) tmp |= OSF1_FSYNC; td->td_retval[0] = tmp; } } return (error); } #if 0 int osf1_fcntl(td, uap) struct thread *td; struct osf1_fcntl_args *uap; { struct fcntl_args a; long tmp; int error; a.fd = uap->fd; switch (uap->cmd) { case OSF1_F_DUPFD: a.cmd = F_DUPFD; a.arg = (long)uap->arg; break; case OSF1_F_GETFD: a.cmd = F_GETFD; a.arg = (long)uap->arg; break; case OSF1_F_SETFD: a.cmd = F_SETFD; a.arg = (long)uap->arg; break; case OSF1_F_GETFL: a.cmd = F_GETFL; a.arg = (long)uap->arg; /* ignored */ break; case OSF1_F_SETFL: a.cmd = F_SETFL; tmp = 0; if ((long)uap->arg & OSF1_FAPPEND) tmp |= FAPPEND; if ((long)uap->arg & OSF1_FNONBLOCK) tmp |= FNONBLOCK; if ((long)uap->arg & OSF1_FASYNC) tmp |= FASYNC; if ((long)uap->arg & OSF1_FSYNC) tmp |= FFSYNC; a.arg = tmp; break; default: /* XXX other cases */ return (EINVAL); } error = fcntl(td, &a); if (error) return error; switch (uap->cmd) { case OSF1_F_GETFL: /* XXX */ break; } return error; } #endif int osf1_socket(td, uap) struct thread *td; struct osf1_socket_args *uap; { struct socket_args a; if (uap->type > AF_LINK) return (EINVAL); /* XXX After AF_LINK, divergence. */ a.domain = uap->domain; a.type = uap->type; a.protocol = uap->protocol; return socket(td, &a); } int osf1_sendto(td, uap) struct thread *td; register struct osf1_sendto_args *uap; { struct sendto_args a; if (uap->flags & ~0x7f) /* unsupported flags */ return (EINVAL); a.s = uap->s; a.buf = uap->buf; a.len = uap->len; a.flags = uap->flags; a.to = (caddr_t)uap->to; a.tolen = uap->tolen; return sendto(td, &a); } int osf1_reboot(td, uap) struct thread *td; struct osf1_reboot_args *uap; { struct reboot_args a; if (uap->opt & ~OSF1_RB_ALLFLAGS && uap->opt & (OSF1_RB_ALTBOOT|OSF1_RB_UNIPROC)) return (EINVAL); a.opt = 0; if (uap->opt & OSF1_RB_ASKNAME) a.opt |= RB_ASKNAME; if (uap->opt & OSF1_RB_SINGLE) a.opt |= RB_SINGLE; if (uap->opt & OSF1_RB_NOSYNC) a.opt |= RB_NOSYNC; if (uap->opt & OSF1_RB_HALT) a.opt |= RB_HALT; if (uap->opt & OSF1_RB_INITNAME) a.opt |= RB_INITNAME; if (uap->opt & OSF1_RB_DFLTROOT) a.opt |= RB_DFLTROOT; return reboot(td, &a); } int osf1_lseek(td, uap) struct thread *td; struct osf1_lseek_args *uap; { struct lseek_args a; a.fd = uap->fd; a.pad = 0; a.offset = uap->offset; a.whence = uap->whence; return lseek(td, &a); } /* * OSF/1 defines _POSIX_SAVED_IDS, which means that our normal * setuid() won't work. * * Instead, by P1003.1b-1993, setuid() is supposed to work like: * If the process has appropriate [super-user] privileges, the * setuid() function sets the real user ID, effective user * ID, and the saved set-user-ID to uid. * If the process does not have appropriate privileges, but uid * is equal to the real user ID or the saved set-user-ID, the * setuid() function sets the effective user ID to uid; the * real user ID and saved set-user-ID remain unchanged by * this function call. */ int osf1_setuid(td, uap) struct thread *td; struct osf1_setuid_args *uap; { struct proc *p; int error; uid_t uid; struct uidinfo *uip; struct ucred *newcred, *oldcred; p = td->td_proc; uid = uap->uid; newcred = crget(); uip = uifind(uid); PROC_LOCK(p); oldcred = p->p_ucred; if ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0 && uid != oldcred->cr_ruid && uid != oldcred->cr_svuid) { PROC_UNLOCK(p); uifree(uip); crfree(newcred); return (error); } crcopy(newcred, oldcred); if (error == 0) { if (uid != oldcred->cr_ruid) { change_ruid(newcred, uip); setsugid(p); } if (oldcred->cr_svuid != uid) { change_svuid(newcred, uid); setsugid(p); } } if (newcred->cr_uid != uid) { change_euid(newcred, uip); setsugid(p); } p->p_ucred = newcred; PROC_UNLOCK(p); uifree(uip); crfree(oldcred); return (0); } /* * OSF/1 defines _POSIX_SAVED_IDS, which means that our normal * setgid() won't work. * * If you change "uid" to "gid" in the discussion, above, about * setuid(), you'll get a correct description of setgid(). */ int osf1_setgid(td, uap) struct thread *td; struct osf1_setgid_args *uap; { struct proc *p; int error; gid_t gid; struct ucred *newcred, *oldcred; p = td->td_proc; gid = uap->gid; newcred = crget(); PROC_LOCK(p); oldcred = p->p_ucred; if (((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0 ) && gid != oldcred->cr_rgid && gid != oldcred->cr_svgid) { PROC_UNLOCK(p); crfree(newcred); return (error); } crcopy(newcred, oldcred); if (error == 0) { if (gid != oldcred->cr_rgid) { change_rgid(newcred, gid); setsugid(p); } if (oldcred->cr_svgid != gid) { change_svgid(newcred, gid); setsugid(p); } } if (newcred->cr_groups[0] != gid) { change_egid(newcred, gid); setsugid(p); } p->p_ucred = newcred; PROC_UNLOCK(p); crfree(oldcred); return (0); } /* * The structures end up being the same... but we can't be sure that * the other word of our iov_len is zero! */ struct osf1_iovec { char *iov_base; int iov_len; }; #define STACKGAPLEN 400 int osf1_readv(td, uap) struct thread *td; struct osf1_readv_args *uap; { int error, osize, nsize, i; caddr_t sg; struct readv_args /* { syscallarg(int) fd; syscallarg(struct iovec *) iovp; syscallarg(u_int) iovcnt; } */ a; struct osf1_iovec *oio; struct iovec *nio; sg = stackgap_init(); if (uap->iovcnt > (STACKGAPLEN / sizeof (struct iovec))) return (EINVAL); osize = uap->iovcnt * sizeof (struct osf1_iovec); nsize = uap->iovcnt * sizeof (struct iovec); oio = malloc(osize, M_TEMP, M_WAITOK); nio = malloc(nsize, M_TEMP, M_WAITOK); error = 0; if ((error = copyin(uap->iovp, oio, osize))) goto punt; for (i = 0; i < uap->iovcnt; i++) { nio[i].iov_base = oio[i].iov_base; nio[i].iov_len = oio[i].iov_len; } a.fd = uap->fd; a.iovp = stackgap_alloc(&sg, nsize); a.iovcnt = uap->iovcnt; if ((error = copyout(nio, (caddr_t)a.iovp, nsize))) goto punt; error = readv(td, &a); punt: free(oio, M_TEMP); free(nio, M_TEMP); return (error); } int osf1_writev(td, uap) struct thread *td; struct osf1_writev_args *uap; { int error, i, nsize, osize; caddr_t sg; struct writev_args /* { syscallarg(int) fd; syscallarg(struct iovec *) iovp; syscallarg(u_int) iovcnt; } */ a; struct osf1_iovec *oio; struct iovec *nio; sg = stackgap_init(); if (uap->iovcnt > (STACKGAPLEN / sizeof (struct iovec))) return (EINVAL); osize = uap->iovcnt * sizeof (struct osf1_iovec); nsize = uap->iovcnt * sizeof (struct iovec); oio = malloc(osize, M_TEMP, M_WAITOK); nio = malloc(nsize, M_TEMP, M_WAITOK); error = 0; if ((error = copyin(uap->iovp, oio, osize))) goto punt; for (i = 0; i < uap->iovcnt; i++) { nio[i].iov_base = oio[i].iov_base; nio[i].iov_len = oio[i].iov_len; } a.fd = uap->fd; a.iovp = stackgap_alloc(&sg, nsize); a.iovcnt = uap->iovcnt; if ((error = copyout(nio, (caddr_t)a.iovp, nsize))) goto punt; error = writev(td, &a); punt: free(oio, M_TEMP); free(nio, M_TEMP); return (error); } /* * More of the stupid off_t padding! */ int osf1_truncate(td, uap) struct thread *td; struct osf1_truncate_args *uap; { caddr_t sg; struct truncate_args a; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); a.path = uap->path; a.pad = 0; a.length = uap->length; return truncate(td, &a); } int osf1_ftruncate(td, uap) struct thread *td; struct osf1_ftruncate_args *uap; { struct ftruncate_args a; a.fd = uap->fd; a.pad = 0; a.length = uap->length; return ftruncate(td, &a); } static int osf2bsd_pathconf(name) int *name; { switch (*name) { case _OSF1_PC_LINK_MAX: case _OSF1_PC_MAX_CANON: case _OSF1_PC_MAX_INPUT: case _OSF1_PC_NAME_MAX: *name -= 10; break; case _OSF1_PC_PATH_MAX: case _OSF1_PC_PIPE_BUF: *name -= 9; case _OSF1_PC_NO_TRUNC: *name = _PC_NO_TRUNC; break; case _OSF1_PC_CHOWN_RESTRICTED: *name = _PC_CHOWN_RESTRICTED; break; case _OSF1_PC_VDISABLE: *name = _PC_VDISABLE; break; default: return (EINVAL); } return 0; } int osf1_pathconf(td, uap) struct thread *td; struct osf1_pathconf_args *uap; { if (osf2bsd_pathconf(&uap->name)) return (EINVAL); else return (pathconf(td, (void *)uap)); } int osf1_fpathconf(td, uap) struct thread *td; struct osf1_fpathconf_args *uap; { if (osf2bsd_pathconf(&uap->name)) return (EINVAL); else return (fpathconf(td, (void *)uap)); } int osf1_getrusage(td, uap) struct thread *td; struct osf1_getrusage_args *uap; { struct proc *p; struct rusage *rup; struct osf1_rusage oru; p = td->td_proc; switch (uap->who) { case RUSAGE_SELF: rup = &p->p_stats->p_ru; mtx_lock_spin(&sched_lock); calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); mtx_unlock_spin(&sched_lock); break; case RUSAGE_CHILDREN: rup = &p->p_stats->p_cru; break; default: return (EINVAL); } TV_CP(rup->ru_utime, oru.ru_utime); TV_CP(rup->ru_stime, oru.ru_stime); bcopy(&(rup->ru_first), &(oru.ru_first), (&(oru.ru_last) - &(oru.ru_first))); return (copyout((caddr_t)&oru, (caddr_t)uap->rusage, sizeof (struct osf1_rusage))); } int osf1_wait4(td, uap) struct thread *td; struct osf1_wait4_args *uap; { - int error; - caddr_t sg; - struct osf1_rusage *orusage, oru; - struct rusage *rusage = NULL, ru; + int error, status; + struct osf1_rusage oru; + struct rusage ru; - orusage = uap->rusage; - if (orusage) { - sg = stackgap_init(); - rusage = stackgap_alloc(&sg, sizeof(struct rusage)); - uap->rusage = (struct osf1_rusage *)rusage; - } - if ((error = wait4(td, (struct wait_args *)uap))) - return error; - if (orusage && (error = copyin(rusage, &ru, sizeof(ru)) == 0)){ + error = kern_wait(td, uap->pid, &status, uap->options, &ru); + if (error) + return (error); + if (uap->status != NULL) + error = copyout(&status, uap->status, sizeof(status)); + if (uap->rusage != NULL && error == 0) { TV_CP(ru.ru_utime, oru.ru_utime); TV_CP(ru.ru_stime, oru.ru_stime); bcopy(&ru.ru_first, &oru.ru_first, (&(oru.ru_last) - &(oru.ru_first))); - copyout(&oru, orusage, sizeof (struct osf1_rusage)); + error = copyout(&oru, uap->rusage, sizeof (struct osf1_rusage)); } - return (0); + return (error); } int osf1_madvise(td, uap) struct thread *td; struct osf1_madvise_args *uap; { /* XXX */ return EINVAL; } int osf1_execve(td, uap) struct thread *td; struct osf1_execve_args *uap; { caddr_t sg; struct execve_args ap; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); ap.fname = uap->path; ap.argv = uap->argp; ap.envv = uap->envp; return execve(td, &ap); } int osf1_usleep_thread(td, uap) struct thread *td; struct osf1_usleep_thread_args *uap; { int error, s, timo; struct osf1_timeval time; struct timeval difftv, endtv, sleeptv, tv; if ((error = copyin(uap->sleep, &time, sizeof time))) return (error); sleeptv.tv_sec = (u_long)time.tv_sec; sleeptv.tv_usec = (u_long)time.tv_usec; timo = tvtohz(&sleeptv); /* * Some callers use usleep(0) as a sort of thread-yield so make * sure that the timeout is non-zero. */ if (timo == 0) timo = 1; s = splclock(); microtime(&tv); splx(s); tsleep(td, PUSER|PCATCH, "OSF/1", timo); if (uap->slept != NULL) { s = splclock(); microtime(&endtv); timersub(&time, &endtv, &difftv); splx(s); if (tv.tv_sec < 0 || tv.tv_usec < 0) tv.tv_sec = tv.tv_usec = 0; TV_CP(difftv, time) error = copyout(&time, uap->slept, sizeof time); } return (error); } int osf1_gettimeofday(td, uap) struct thread *td; register struct osf1_gettimeofday_args *uap; { int error; struct timeval atv; struct timezone tz; struct osf1_timeval otv; error = 0; if (uap->tp) { microtime(&atv); otv.tv_sec = atv.tv_sec; otv.tv_usec = atv.tv_usec; if ((error = copyout((caddr_t)&otv, (caddr_t)uap->tp, sizeof (otv)))) return (error); } if (uap->tzp) { tz.tz_minuteswest = tz_minuteswest; tz.tz_dsttime = tz_dsttime; error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, sizeof (tz)); } return (error); } int osf1_select(td, uap) struct thread *td; register struct osf1_select_args *uap; { if (uap->tv) { int error; caddr_t sg; struct osf1_timeval otv; struct timeval tv; sg = stackgap_init(); if ((error=copyin((caddr_t)uap->tv,(caddr_t)&otv,sizeof(otv)))) return(error); TV_CP(otv,tv); uap->tv = stackgap_alloc(&sg, sizeof(struct timeval)); if ((error=copyout((caddr_t)&tv, (caddr_t)uap->tv,sizeof(tv)))) return(error); } return(select(td, (struct select_args *)uap)); } int osf1_setitimer(td, uap) struct thread *td; struct osf1_setitimer_args *uap; { int error; caddr_t old_oitv, sg; struct itimerval itv; struct osf1_itimerval otv; error = 0; old_oitv = (caddr_t)uap->oitv; sg = stackgap_init(); if ((error = copyin((caddr_t)uap->itv,(caddr_t)&otv,sizeof(otv)))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); return error; } TV_CP(otv.it_interval,itv.it_interval); TV_CP(otv.it_value,itv.it_value); uap->itv = stackgap_alloc(&sg, sizeof(struct itimerval)); if ((error = copyout((caddr_t)&itv,(caddr_t)uap->itv,sizeof(itv)))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); return error; } uap->oitv = stackgap_alloc(&sg, sizeof(struct itimerval)); if ((error = setitimer(td, (struct setitimer_args *)uap))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); return error; } if ((error = copyin((caddr_t)uap->oitv,(caddr_t)&itv,sizeof(itv)))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); return error; } TV_CP(itv.it_interval,otv.it_interval); TV_CP(itv.it_value,otv.it_value); if (old_oitv && (error = copyout((caddr_t)&otv, old_oitv, sizeof(otv)))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); } return error; } int osf1_getitimer(td, uap) struct thread *td; struct osf1_getitimer_args *uap; { int error; caddr_t old_itv, sg; struct itimerval itv; struct osf1_itimerval otv; error = 0; old_itv = (caddr_t)uap->itv; sg = stackgap_init(); uap->itv = stackgap_alloc(&sg, sizeof(struct itimerval)); if ((error = getitimer(td, (struct getitimer_args *)uap))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); return error; } if ((error = copyin((caddr_t)uap->itv,(caddr_t)&itv,sizeof(itv)))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); return error; } TV_CP(itv.it_interval,otv.it_interval); TV_CP(itv.it_value,otv.it_value); if ((error = copyout((caddr_t)&otv, old_itv, sizeof(otv)))) { printf("%s(%d): error = %d\n", __FILE__, __LINE__, error); } return error; } int osf1_proplist_syscall(td, uap) struct thread *td; struct osf1_proplist_syscall_args *uap; { return(EOPNOTSUPP); } int osf1_ntpgettime(td, uap) struct thread *td; struct osf1_ntpgettime_args *uap; { return(ENOSYS); } int osf1_ntpadjtime(td, uap) struct thread *td; struct osf1_ntpadjtime_args *uap; { return(ENOSYS); } int osf1_setpgrp(td, uap) struct thread *td; struct osf1_setpgrp_args *uap; { return(setpgid(td, (struct setpgid_args *)uap)); } int osf1_uswitch(td, uap) struct thread *td; struct osf1_uswitch_args *uap; { struct proc *p; int rv; vm_map_entry_t entry; vm_offset_t zero; GIANT_REQUIRED; p = td->td_proc; zero = 0; if (uap->cmd == OSF1_USC_GET) { if (vm_map_lookup_entry(&(p->p_vmspace->vm_map), 0, &entry)) td->td_retval[0] = OSF1_USW_NULLP; else td->td_retval[0] = 0; return(KERN_SUCCESS); } else if (uap->cmd == OSF1_USC_SET) if (uap->mask & OSF1_USW_NULLP) { rv = vm_mmap(&(p->p_vmspace->vm_map), &zero, PAGE_SIZE, VM_PROT_READ, VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED | MAP_ANON, NULL, 0); if (!rv) return(KERN_SUCCESS); else { printf( "osf1_uswitch:vm_mmap of zero page failed with status %d\n", rv); return(rv); } } return(EINVAL); } int osf1_classcntl(td, uap) struct thread *td; struct osf1_classcntl_args *uap; { return(EACCES); /* class scheduling not enabled */ } struct osf1_tbl_loadavg { union { long l[3]; double d[3]; } tl_avenrun; int tl_lscale; long tl_mach_factor[3]; /* ???? */ }; struct osf1_tbl_sysinfo { long si_user; long si_nice; long si_sys; long si_idle; long si_hz; long si_phz; long si_boottime; long wait; }; #define TBL_LOADAVG 3 #define TBL_SYSINFO 12 int osf1_table(td, uap) struct thread *td; struct osf1_table_args /*{ long id; long index; void *addr; long nel; u_long lel; }*/ *uap; { int retval; struct osf1_tbl_loadavg ld; struct osf1_tbl_sysinfo si; retval = 0; switch(uap->id) { case TBL_LOADAVG: /* xemacs wants this */ if ((uap->index != 0) || (uap->nel != 1)) retval = EINVAL; bcopy(&averunnable, &ld, sizeof(averunnable)); ld.tl_lscale = (u_int)averunnable.fscale; retval = copyout(&ld, uap->addr, sizeof(ld)); break; case TBL_SYSINFO: if ((uap->index != 0) || (uap->nel != 1)) retval = EINVAL; bzero(&si, sizeof(si)); #if 0 si.si_user = cp_time[CP_USER]; si.si_nice = cp_time[CP_NICE]; si.si_sys = cp_time[CP_SYS]; si.si_idle = cp_time[CP_IDLE]; si.wait = cp_time[CP_INTR]; #endif si.si_hz = hz; si.si_phz = profhz; si.si_boottime = boottime.tv_sec; retval = copyout(&si, uap->addr, sizeof(si)); break; default: printf("osf1_table: %ld, %ld, %p, %ld %ld\n", uap->id, uap->index, uap->addr, uap->nel, uap->lel); retval = EINVAL; } return retval; } /* * MPSAFE */ int osf1_sysinfo(td, uap) struct thread *td; struct osf1_sysinfo_args /*{ int cmd; char *buf; long count; }*/ *uap; { int name[2], retval; size_t bytes, len; char *string; string = NULL; switch(uap->cmd) { case 1: /* OS */ string = "OSF1"; break; case 2: /* hostname, from ogethostname */ len = uap->count; name[0] = CTL_KERN; name[1] = KERN_HOSTNAME; mtx_lock(&Giant); retval = userland_sysctl(td, name, 2, uap->buf, &len, 1, 0, 0, &bytes); mtx_unlock(&Giant); td->td_retval[0] = bytes; return(retval); break; case 3: /* release of osf1 */ string = "V4.0"; break; case 4: /* minor version of osf1 */ string = "878"; break; case 5: /* machine or arch */ case 6: string = "alpha"; break; case 7: /* serial number, real osf1 returns 0! */ string = "0"; break; case 8: /* HW vendor */ string = "Digital"; break; case 9: /* dunno, this is what du does.. */ return(ENOSYS); break; default: return(EINVAL); } bytes = min(uap->count, strlen(string)+1); copyout(string, uap->buf, bytes); td->td_retval[0] = bytes; return(0); } Index: head/sys/compat/freebsd32/freebsd32_misc.c =================================================================== --- head/sys/compat/freebsd32/freebsd32_misc.c (revision 127139) +++ head/sys/compat/freebsd32/freebsd32_misc.c (revision 127140) @@ -1,1304 +1,1300 @@ /*- * Copyright (c) 2002 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/selinfo.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct timeval32) == 8); CTASSERT(sizeof(struct timespec32) == 8); CTASSERT(sizeof(struct statfs32) == 256); CTASSERT(sizeof(struct rusage32) == 72); int freebsd32_wait4(struct thread *td, struct freebsd32_wait4_args *uap) { - int error; - caddr_t sg; - struct rusage32 *rusage32, ru32; - struct rusage *rusage = NULL, ru; + int error, status; + struct rusage32 ru32; + struct rusage ru; - rusage32 = uap->rusage; - if (rusage32) { - sg = stackgap_init(); - rusage = stackgap_alloc(&sg, sizeof(struct rusage)); - uap->rusage = (struct rusage32 *)rusage; - } - error = wait4(td, (struct wait_args *)uap); + error = kern_wait(td, uap->pid, &status, uap->options, &ru); if (error) return (error); - if (rusage32 && (error = copyin(rusage, &ru, sizeof(ru)) == 0)) { + if (uap->status != NULL) + error = copyout(&status, uap->status, sizeof(status)); + if (uap->rusage != NULL && error == 0) { TV_CP(ru, ru32, ru_utime); TV_CP(ru, ru32, ru_stime); CP(ru, ru32, ru_maxrss); CP(ru, ru32, ru_ixrss); CP(ru, ru32, ru_idrss); CP(ru, ru32, ru_isrss); CP(ru, ru32, ru_minflt); CP(ru, ru32, ru_majflt); CP(ru, ru32, ru_nswap); CP(ru, ru32, ru_inblock); CP(ru, ru32, ru_oublock); CP(ru, ru32, ru_msgsnd); CP(ru, ru32, ru_msgrcv); CP(ru, ru32, ru_nsignals); CP(ru, ru32, ru_nvcsw); CP(ru, ru32, ru_nivcsw); - error = copyout(&ru32, rusage32, sizeof(ru32)); + error = copyout(&ru32, uap->rusage, sizeof(ru32)); } return (error); } static void copy_statfs(struct statfs *in, struct statfs32 *out) { CP(*in, *out, f_bsize); CP(*in, *out, f_iosize); CP(*in, *out, f_blocks); CP(*in, *out, f_bfree); CP(*in, *out, f_bavail); CP(*in, *out, f_files); CP(*in, *out, f_ffree); CP(*in, *out, f_fsid); CP(*in, *out, f_owner); CP(*in, *out, f_type); CP(*in, *out, f_flags); CP(*in, *out, f_flags); CP(*in, *out, f_syncwrites); CP(*in, *out, f_asyncwrites); bcopy(in->f_fstypename, out->f_fstypename, MFSNAMELEN); bcopy(in->f_mntonname, out->f_mntonname, MNAMELEN); CP(*in, *out, f_syncreads); CP(*in, *out, f_asyncreads); bcopy(in->f_mntfromname, out->f_mntfromname, MNAMELEN); } int freebsd32_getfsstat(struct thread *td, struct freebsd32_getfsstat_args *uap) { int error; caddr_t sg; struct statfs32 *sp32, stat32; struct statfs *sp = NULL, stat; int maxcount, count, i; sp32 = uap->buf; maxcount = uap->bufsize / sizeof(struct statfs32); if (sp32) { sg = stackgap_init(); sp = stackgap_alloc(&sg, sizeof(struct statfs) * maxcount); uap->buf = (struct statfs32 *)sp; } error = getfsstat(td, (struct getfsstat_args *) uap); if (sp32 && !error) { count = td->td_retval[0]; for (i = 0; i < count; i++) { error = copyin(&sp[i], &stat, sizeof(stat)); if (error) return (error); copy_statfs(&stat, &stat32); error = copyout(&stat32, &sp32[i], sizeof(stat32)); if (error) return (error); } } return (error); } struct sigaltstack32 { u_int32_t ss_sp; u_int32_t ss_size; int ss_flags; }; CTASSERT(sizeof(struct sigaltstack32) == 12); int freebsd32_sigaltstack(struct thread *td, struct freebsd32_sigaltstack_args *uap) { struct sigaltstack32 s32; struct sigaltstack ss, oss, *ssp; int error; if (uap->ss != NULL) { error = copyin(uap->ss, &s32, sizeof(s32)); if (error) return (error); PTRIN_CP(s32, ss, ss_sp); CP(s32, ss, ss_size); CP(s32, ss, ss_flags); ssp = &ss; } else ssp = NULL; error = kern_sigaltstack(td, ssp, &oss); if (error == 0 && uap->oss != NULL) { PTROUT_CP(oss, s32, ss_sp); CP(oss, s32, ss_size); CP(oss, s32, ss_flags); error = copyout(&s32, uap->oss, sizeof(s32)); } return (error); } int freebsd32_execve(struct thread *td, struct freebsd32_execve_args *uap) { int error; caddr_t sg; struct execve_args ap; u_int32_t *p32, arg; char **p, *p64; int count; sg = stackgap_init(); ap.fname = uap->fname; if (uap->argv) { count = 0; p32 = uap->argv; do { error = copyin(p32++, &arg, sizeof(arg)); if (error) return error; count++; } while (arg != 0); p = stackgap_alloc(&sg, count * sizeof(char *)); ap.argv = p; p32 = uap->argv; do { error = copyin(p32++, &arg, sizeof(arg)); if (error) return error; p64 = PTRIN(arg); error = copyout(&p64, p++, sizeof(p64)); if (error) return error; } while (arg != 0); } if (uap->envv) { count = 0; p32 = uap->envv; do { error = copyin(p32++, &arg, sizeof(arg)); if (error) return error; count++; } while (arg != 0); p = stackgap_alloc(&sg, count * sizeof(char *)); ap.envv = p; p32 = uap->envv; do { error = copyin(p32++, &arg, sizeof(arg)); if (error) return error; p64 = PTRIN(arg); error = copyout(&p64, p++, sizeof(p64)); if (error) return error; } while (arg != 0); } return execve(td, &ap); } #ifdef __ia64__ static int freebsd32_mmap_partial(struct thread *td, vm_offset_t start, vm_offset_t end, int prot, int fd, off_t pos) { vm_map_t map; vm_map_entry_t entry; int rv; map = &td->td_proc->p_vmspace->vm_map; if (fd != -1) prot |= VM_PROT_WRITE; if (vm_map_lookup_entry(map, start, &entry)) { if ((entry->protection & prot) != prot) { rv = vm_map_protect(map, trunc_page(start), round_page(end), entry->protection | prot, FALSE); if (rv != KERN_SUCCESS) return (EINVAL); } } else { vm_offset_t addr = trunc_page(start); rv = vm_map_find(map, 0, 0, &addr, PAGE_SIZE, FALSE, prot, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) return (EINVAL); } if (fd != -1) { struct pread_args r; r.fd = fd; r.buf = (void *) start; r.nbyte = end - start; r.offset = pos; return (pread(td, &r)); } else { while (start < end) { subyte((void *) start, 0); start++; } return (0); } } #endif int freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap) { struct mmap_args ap; vm_offset_t addr = (vm_offset_t) uap->addr; vm_size_t len = uap->len; int prot = uap->prot; int flags = uap->flags; int fd = uap->fd; off_t pos = (uap->poslo | ((off_t)uap->poshi << 32)); #ifdef __ia64__ vm_size_t pageoff; int error; /* * Attempt to handle page size hassles. */ pageoff = (pos & PAGE_MASK); if (flags & MAP_FIXED) { vm_offset_t start, end; start = addr; end = addr + len; if (start != trunc_page(start)) { error = freebsd32_mmap_partial(td, start, round_page(start), prot, fd, pos); if (fd != -1) pos += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { vm_offset_t t = trunc_page(end); error = freebsd32_mmap_partial(td, t, end, prot, fd, pos + t - start); end = trunc_page(end); } if (end > start && fd != -1 && (pos & PAGE_MASK)) { /* * We can't map this region at all. The specified * address doesn't have the same alignment as the file * position. Fake the mapping by simply reading the * entire region into memory. First we need to make * sure the region exists. */ vm_map_t map; struct pread_args r; int rv; prot |= VM_PROT_WRITE; map = &td->td_proc->p_vmspace->vm_map; rv = vm_map_remove(map, start, end); if (rv != KERN_SUCCESS) return (EINVAL); rv = vm_map_find(map, 0, 0, &start, end - start, FALSE, prot, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) return (EINVAL); r.fd = fd; r.buf = (void *) start; r.nbyte = end - start; r.offset = pos; error = pread(td, &r); if (error) return (error); td->td_retval[0] = addr; return (0); } if (end == start) { /* * After dealing with the ragged ends, there * might be none left. */ td->td_retval[0] = addr; return (0); } addr = start; len = end - start; } #endif ap.addr = (void *) addr; ap.len = len; ap.prot = prot; ap.flags = flags; ap.fd = fd; ap.pos = pos; return (mmap(td, &ap)); } struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; CTASSERT(sizeof(struct itimerval32) == 16); int freebsd32_setitimer(struct thread *td, struct freebsd32_setitimer_args *uap) { int error; caddr_t sg; struct itimerval32 *p32, *op32, s32; struct itimerval *p = NULL, *op = NULL, s; p32 = uap->itv; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct itimerval)); uap->itv = (struct itimerval32 *)p; error = copyin(p32, &s32, sizeof(s32)); if (error) return (error); TV_CP(s32, s, it_interval); TV_CP(s32, s, it_value); error = copyout(&s, p, sizeof(s)); if (error) return (error); } op32 = uap->oitv; if (op32) { sg = stackgap_init(); op = stackgap_alloc(&sg, sizeof(struct itimerval)); uap->oitv = (struct itimerval32 *)op; } error = setitimer(td, (struct setitimer_args *) uap); if (error) return (error); if (op32) { error = copyin(op, &s, sizeof(s)); if (error) return (error); TV_CP(s, s32, it_interval); TV_CP(s, s32, it_value); error = copyout(&s32, op32, sizeof(s32)); } return (error); } int freebsd32_getitimer(struct thread *td, struct freebsd32_getitimer_args *uap) { int error; caddr_t sg; struct itimerval32 *p32, s32; struct itimerval *p = NULL, s; p32 = uap->itv; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct itimerval)); uap->itv = (struct itimerval32 *)p; } error = getitimer(td, (struct getitimer_args *) uap); if (error) return (error); if (p32) { error = copyin(p, &s, sizeof(s)); if (error) return (error); TV_CP(s, s32, it_interval); TV_CP(s, s32, it_value); error = copyout(&s32, p32, sizeof(s32)); } return (error); } int freebsd32_select(struct thread *td, struct freebsd32_select_args *uap) { int error; caddr_t sg; struct timeval32 *p32, s32; struct timeval *p = NULL, s; p32 = uap->tv; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct timeval)); uap->tv = (struct timeval32 *)p; error = copyin(p32, &s32, sizeof(s32)); if (error) return (error); CP(s32, s, tv_sec); CP(s32, s, tv_usec); error = copyout(&s, p, sizeof(s)); if (error) return (error); } /* * XXX big-endian needs to convert the fd_sets too. */ return (select(td, (struct select_args *) uap)); } struct kevent32 { u_int32_t ident; /* identifier for this event */ short filter; /* filter for event */ u_short flags; u_int fflags; int32_t data; u_int32_t udata; /* opaque user data identifier */ }; CTASSERT(sizeof(struct kevent32) == 20); int freebsd32_kevent(struct thread *td, struct freebsd32_kevent_args *uap) { int error; caddr_t sg; struct timespec32 ts32; struct timespec ts; struct kevent32 ks32; struct kevent *ks; struct kevent_args a; int i; sg = stackgap_init(); a.fd = uap->fd; a.changelist = uap->changelist; a.nchanges = uap->nchanges; a.eventlist = uap->eventlist; a.nevents = uap->nevents; a.timeout = NULL; if (uap->timeout) { a.timeout = stackgap_alloc(&sg, sizeof(struct timespec)); error = copyin(uap->timeout, &ts32, sizeof(ts32)); if (error) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); error = copyout(&ts, (void *)(uintptr_t)a.timeout, sizeof(ts)); if (error) return (error); } if (uap->changelist) { a.changelist = (struct kevent *)stackgap_alloc(&sg, uap->nchanges * sizeof(struct kevent)); for (i = 0; i < uap->nchanges; i++) { error = copyin(&uap->changelist[i], &ks32, sizeof(ks32)); if (error) return (error); ks = (struct kevent *)(uintptr_t)&a.changelist[i]; CP(ks32, *ks, ident); CP(ks32, *ks, filter); CP(ks32, *ks, flags); CP(ks32, *ks, fflags); CP(ks32, *ks, data); PTRIN_CP(ks32, *ks, udata); } } if (uap->eventlist) { a.eventlist = stackgap_alloc(&sg, uap->nevents * sizeof(struct kevent)); } error = kevent(td, &a); if (uap->eventlist && error > 0) { for (i = 0; i < error; i++) { ks = &a.eventlist[i]; CP(*ks, ks32, ident); CP(*ks, ks32, filter); CP(*ks, ks32, flags); CP(*ks, ks32, fflags); CP(*ks, ks32, data); PTROUT_CP(*ks, ks32, udata); error = copyout(&ks32, &uap->eventlist[i], sizeof(ks32)); if (error) return (error); } } return error; } int freebsd32_gettimeofday(struct thread *td, struct freebsd32_gettimeofday_args *uap) { struct timeval atv; struct timeval32 atv32; struct timezone rtz; int error = 0; if (uap->tp) { microtime(&atv); CP(atv, atv32, tv_sec); CP(atv, atv32, tv_usec); error = copyout(&atv32, uap->tp, sizeof (atv32)); } if (error == 0 && uap->tzp != NULL) { rtz.tz_minuteswest = tz_minuteswest; rtz.tz_dsttime = tz_dsttime; error = copyout(&rtz, uap->tzp, sizeof (rtz)); } return (error); } int freebsd32_getrusage(struct thread *td, struct freebsd32_getrusage_args *uap) { int error; caddr_t sg; struct rusage32 *p32, s32; struct rusage *p = NULL, s; p32 = uap->rusage; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct rusage)); uap->rusage = (struct rusage32 *)p; } error = getrusage(td, (struct getrusage_args *) uap); if (error) return (error); if (p32) { error = copyin(p, &s, sizeof(s)); if (error) return (error); TV_CP(s, s32, ru_utime); TV_CP(s, s32, ru_stime); CP(s, s32, ru_maxrss); CP(s, s32, ru_ixrss); CP(s, s32, ru_idrss); CP(s, s32, ru_isrss); CP(s, s32, ru_minflt); CP(s, s32, ru_majflt); CP(s, s32, ru_nswap); CP(s, s32, ru_inblock); CP(s, s32, ru_oublock); CP(s, s32, ru_msgsnd); CP(s, s32, ru_msgrcv); CP(s, s32, ru_nsignals); CP(s, s32, ru_nvcsw); CP(s, s32, ru_nivcsw); error = copyout(&s32, p32, sizeof(s32)); } return (error); } struct iovec32 { u_int32_t iov_base; int iov_len; }; #define STACKGAPLEN 400 CTASSERT(sizeof(struct iovec32) == 8); int freebsd32_readv(struct thread *td, struct freebsd32_readv_args *uap) { int error, osize, nsize, i; caddr_t sg; struct readv_args /* { syscallarg(int) fd; syscallarg(struct iovec *) iovp; syscallarg(u_int) iovcnt; } */ a; struct iovec32 *oio; struct iovec *nio; sg = stackgap_init(); if (uap->iovcnt > (STACKGAPLEN / sizeof (struct iovec))) return (EINVAL); osize = uap->iovcnt * sizeof (struct iovec32); nsize = uap->iovcnt * sizeof (struct iovec); oio = malloc(osize, M_TEMP, M_WAITOK); nio = malloc(nsize, M_TEMP, M_WAITOK); error = 0; if ((error = copyin(uap->iovp, oio, osize))) goto punt; for (i = 0; i < uap->iovcnt; i++) { nio[i].iov_base = PTRIN(oio[i].iov_base); nio[i].iov_len = oio[i].iov_len; } a.fd = uap->fd; a.iovp = stackgap_alloc(&sg, nsize); a.iovcnt = uap->iovcnt; if ((error = copyout(nio, (caddr_t)a.iovp, nsize))) goto punt; error = readv(td, &a); punt: free(oio, M_TEMP); free(nio, M_TEMP); return (error); } int freebsd32_writev(struct thread *td, struct freebsd32_writev_args *uap) { int error, i, nsize, osize; caddr_t sg; struct writev_args /* { syscallarg(int) fd; syscallarg(struct iovec *) iovp; syscallarg(u_int) iovcnt; } */ a; struct iovec32 *oio; struct iovec *nio; sg = stackgap_init(); if (uap->iovcnt > (STACKGAPLEN / sizeof (struct iovec))) return (EINVAL); osize = uap->iovcnt * sizeof (struct iovec32); nsize = uap->iovcnt * sizeof (struct iovec); oio = malloc(osize, M_TEMP, M_WAITOK); nio = malloc(nsize, M_TEMP, M_WAITOK); error = 0; if ((error = copyin(uap->iovp, oio, osize))) goto punt; for (i = 0; i < uap->iovcnt; i++) { nio[i].iov_base = PTRIN(oio[i].iov_base); nio[i].iov_len = oio[i].iov_len; } a.fd = uap->fd; a.iovp = stackgap_alloc(&sg, nsize); a.iovcnt = uap->iovcnt; if ((error = copyout(nio, (caddr_t)a.iovp, nsize))) goto punt; error = writev(td, &a); punt: free(oio, M_TEMP); free(nio, M_TEMP); return (error); } int freebsd32_settimeofday(struct thread *td, struct freebsd32_settimeofday_args *uap) { int error; caddr_t sg; struct timeval32 *p32, s32; struct timeval *p = NULL, s; p32 = uap->tv; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct timeval)); uap->tv = (struct timeval32 *)p; error = copyin(p32, &s32, sizeof(s32)); if (error) return (error); CP(s32, s, tv_sec); CP(s32, s, tv_usec); error = copyout(&s, p, sizeof(s)); if (error) return (error); } return (settimeofday(td, (struct settimeofday_args *) uap)); } int freebsd32_utimes(struct thread *td, struct freebsd32_utimes_args *uap) { int error; caddr_t sg; struct timeval32 *p32, s32[2]; struct timeval *p = NULL, s[2]; p32 = uap->tptr; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, 2*sizeof(struct timeval)); uap->tptr = (struct timeval32 *)p; error = copyin(p32, s32, sizeof(s32)); if (error) return (error); CP(s32[0], s[0], tv_sec); CP(s32[0], s[0], tv_usec); CP(s32[1], s[1], tv_sec); CP(s32[1], s[1], tv_usec); error = copyout(s, p, sizeof(s)); if (error) return (error); } return (utimes(td, (struct utimes_args *) uap)); } int freebsd32_adjtime(struct thread *td, struct freebsd32_adjtime_args *uap) { int error; caddr_t sg; struct timeval32 *p32, *op32, s32; struct timeval *p = NULL, *op = NULL, s; p32 = uap->delta; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct timeval)); uap->delta = (struct timeval32 *)p; error = copyin(p32, &s32, sizeof(s32)); if (error) return (error); CP(s32, s, tv_sec); CP(s32, s, tv_usec); error = copyout(&s, p, sizeof(s)); if (error) return (error); } op32 = uap->olddelta; if (op32) { sg = stackgap_init(); op = stackgap_alloc(&sg, sizeof(struct timeval)); uap->olddelta = (struct timeval32 *)op; } error = utimes(td, (struct utimes_args *) uap); if (error) return error; if (op32) { error = copyin(op, &s, sizeof(s)); if (error) return (error); CP(s, s32, tv_sec); CP(s, s32, tv_usec); error = copyout(&s32, op32, sizeof(s32)); } return (error); } int freebsd32_statfs(struct thread *td, struct freebsd32_statfs_args *uap) { int error; caddr_t sg; struct statfs32 *p32, s32; struct statfs *p = NULL, s; p32 = uap->buf; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct statfs)); uap->buf = (struct statfs32 *)p; } error = statfs(td, (struct statfs_args *) uap); if (error) return (error); if (p32) { error = copyin(p, &s, sizeof(s)); if (error) return (error); copy_statfs(&s, &s32); error = copyout(&s32, p32, sizeof(s32)); } return (error); } int freebsd32_fstatfs(struct thread *td, struct freebsd32_fstatfs_args *uap) { int error; caddr_t sg; struct statfs32 *p32, s32; struct statfs *p = NULL, s; p32 = uap->buf; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct statfs)); uap->buf = (struct statfs32 *)p; } error = fstatfs(td, (struct fstatfs_args *) uap); if (error) return (error); if (p32) { error = copyin(p, &s, sizeof(s)); if (error) return (error); copy_statfs(&s, &s32); error = copyout(&s32, p32, sizeof(s32)); } return (error); } int freebsd32_semsys(struct thread *td, struct freebsd32_semsys_args *uap) { /* * Vector through to semsys if it is loaded. */ return sysent[169].sy_call(td, uap); } int freebsd32_msgsys(struct thread *td, struct freebsd32_msgsys_args *uap) { /* * Vector through to msgsys if it is loaded. */ return sysent[170].sy_call(td, uap); } int freebsd32_shmsys(struct thread *td, struct freebsd32_shmsys_args *uap) { /* * Vector through to shmsys if it is loaded. */ return sysent[171].sy_call(td, uap); } int freebsd32_pread(struct thread *td, struct freebsd32_pread_args *uap) { struct pread_args ap; ap.fd = uap->fd; ap.buf = uap->buf; ap.nbyte = uap->nbyte; ap.offset = (uap->offsetlo | ((off_t)uap->offsethi << 32)); return (pread(td, &ap)); } int freebsd32_pwrite(struct thread *td, struct freebsd32_pwrite_args *uap) { struct pwrite_args ap; ap.fd = uap->fd; ap.buf = uap->buf; ap.nbyte = uap->nbyte; ap.offset = (uap->offsetlo | ((off_t)uap->offsethi << 32)); return (pwrite(td, &ap)); } int freebsd32_lseek(struct thread *td, struct freebsd32_lseek_args *uap) { int error; struct lseek_args ap; off_t pos; ap.fd = uap->fd; ap.offset = (uap->offsetlo | ((off_t)uap->offsethi << 32)); ap.whence = uap->whence; error = lseek(td, &ap); /* Expand the quad return into two parts for eax and edx */ pos = *(off_t *)(td->td_retval); td->td_retval[0] = pos & 0xffffffff; /* %eax */ td->td_retval[1] = pos >> 32; /* %edx */ return error; } int freebsd32_truncate(struct thread *td, struct freebsd32_truncate_args *uap) { struct truncate_args ap; ap.path = uap->path; ap.length = (uap->lengthlo | ((off_t)uap->lengthhi << 32)); return (truncate(td, &ap)); } int freebsd32_ftruncate(struct thread *td, struct freebsd32_ftruncate_args *uap) { struct ftruncate_args ap; ap.fd = uap->fd; ap.length = (uap->lengthlo | ((off_t)uap->lengthhi << 32)); return (ftruncate(td, &ap)); } #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_sendfile(struct thread *td, struct freebsd4_freebsd32_sendfile_args *uap) { struct freebsd4_sendfile_args ap; ap.fd = uap->fd; ap.s = uap->s; ap.offset = (uap->offsetlo | ((off_t)uap->offsethi << 32)); ap.nbytes = uap->nbytes; /* XXX check */ ap.hdtr = uap->hdtr; /* XXX check */ ap.sbytes = uap->sbytes; /* XXX FIXME!! */ ap.flags = uap->flags; return (freebsd4_sendfile(td, &ap)); } #endif int freebsd32_sendfile(struct thread *td, struct freebsd32_sendfile_args *uap) { struct sendfile_args ap; ap.fd = uap->fd; ap.s = uap->s; ap.offset = (uap->offsetlo | ((off_t)uap->offsethi << 32)); ap.nbytes = uap->nbytes; /* XXX check */ ap.hdtr = uap->hdtr; /* XXX check */ ap.sbytes = uap->sbytes; /* XXX FIXME!! */ ap.flags = uap->flags; return (sendfile(td, &ap)); } struct stat32 { udev_t st_dev; ino_t st_ino; mode_t st_mode; nlink_t st_nlink; uid_t st_uid; gid_t st_gid; udev_t st_rdev; struct timespec32 st_atimespec; struct timespec32 st_mtimespec; struct timespec32 st_ctimespec; off_t st_size; int64_t st_blocks; u_int32_t st_blksize; u_int32_t st_flags; u_int32_t st_gen; struct timespec32 st_birthtimespec; unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec32)); unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec32)); }; CTASSERT(sizeof(struct stat32) == 96); static void copy_stat( struct stat *in, struct stat32 *out) { CP(*in, *out, st_dev); CP(*in, *out, st_ino); CP(*in, *out, st_mode); CP(*in, *out, st_nlink); CP(*in, *out, st_uid); CP(*in, *out, st_gid); CP(*in, *out, st_rdev); TS_CP(*in, *out, st_atimespec); TS_CP(*in, *out, st_mtimespec); TS_CP(*in, *out, st_ctimespec); CP(*in, *out, st_size); CP(*in, *out, st_blocks); CP(*in, *out, st_blksize); CP(*in, *out, st_flags); CP(*in, *out, st_gen); } int freebsd32_stat(struct thread *td, struct freebsd32_stat_args *uap) { struct stat sb; struct stat32 sb32; int error; struct nameidata nd; #ifdef LOOKUP_SHARED NDINIT(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF | NOOBJ, UIO_USERSPACE, uap->path, td); #else NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE, uap->path, td); #endif if ((error = namei(&nd)) != 0) return (error); error = vn_stat(nd.ni_vp, &sb, td->td_ucred, NOCRED, td); NDFREE(&nd, NDF_ONLY_PNBUF); vput(nd.ni_vp); if (error) return (error); copy_stat(&sb, &sb32); error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } int freebsd32_fstat(struct thread *td, struct freebsd32_fstat_args *uap) { struct file *fp; struct stat ub; struct stat32 ub32; int error; if ((error = fget(td, uap->fd, &fp)) != 0) return (error); mtx_lock(&Giant); error = fo_stat(fp, &ub, td->td_ucred, td); mtx_unlock(&Giant); fdrop(fp, td); if (error) return (error); copy_stat(&ub, &ub32); error = copyout(&ub32, uap->ub, sizeof(ub32)); return (error); } int freebsd32_lstat(struct thread *td, struct freebsd32_lstat_args *uap) { int error; struct vnode *vp; struct stat sb; struct stat32 sb32; struct nameidata nd; NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd)) != 0) return (error); vp = nd.ni_vp; error = vn_stat(vp, &sb, td->td_ucred, NOCRED, td); NDFREE(&nd, NDF_ONLY_PNBUF); vput(vp); if (error) return (error); copy_stat(&sb, &sb32); error = copyout(&sb32, uap->ub, sizeof (sb32)); return (error); } /* * MPSAFE */ int freebsd32_sysctl(struct thread *td, struct freebsd32_sysctl_args *uap) { int error, name[CTL_MAXNAME]; size_t j, oldlen; if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) return (EINVAL); error = copyin(uap->name, &name, uap->namelen * sizeof(int)); if (error) return (error); mtx_lock(&Giant); if (uap->oldlenp) oldlen = fuword32(uap->oldlenp); else oldlen = 0; error = userland_sysctl(td, name, uap->namelen, uap->old, &oldlen, 1, uap->new, uap->newlen, &j); if (error && error != ENOMEM) goto done2; if (uap->oldlenp) { suword32(uap->oldlenp, j); } done2: mtx_unlock(&Giant); return (error); } struct sigaction32 { u_int32_t sa_u; int sa_flags; sigset_t sa_mask; }; CTASSERT(sizeof(struct sigaction32) == 24); int freebsd32_sigaction(struct thread *td, struct freebsd32_sigaction_args *uap) { struct sigaction32 s32; struct sigaction sa, osa, *sap; int error; if (uap->act) { error = copyin(uap->act, &s32, sizeof(s32)); if (error) return (error); sa.sa_handler = PTRIN(s32.sa_u); CP(s32, sa, sa_flags); CP(s32, sa, sa_mask); sap = &sa; } else sap = NULL; error = kern_sigaction(td, uap->sig, sap, &osa, 0); if (error != 0 && uap->oact != NULL) { s32.sa_u = PTROUT(osa.sa_handler); CP(osa, s32, sa_flags); CP(osa, s32, sa_mask); error = copyout(&s32, uap->oact, sizeof(s32)); } return (error); } #ifdef COMPAT_FREEBSD4 int freebsd4_freebsd32_sigaction(struct thread *td, struct freebsd4_freebsd32_sigaction_args *uap) { struct sigaction32 s32; struct sigaction sa, osa, *sap; int error; if (uap->act) { error = copyin(uap->act, &s32, sizeof(s32)); if (error) return (error); sa.sa_handler = PTRIN(s32.sa_u); CP(s32, sa, sa_flags); CP(s32, sa, sa_mask); sap = &sa; } else sap = NULL; error = kern_sigaction(td, uap->sig, sap, &osa, KSA_FREEBSD4); if (error != 0 && uap->oact != NULL) { s32.sa_u = PTROUT(osa.sa_handler); CP(osa, s32, sa_flags); CP(osa, s32, sa_mask); error = copyout(&s32, uap->oact, sizeof(s32)); } return (error); } #endif #if 0 int freebsd32_xxx(struct thread *td, struct freebsd32_xxx_args *uap) { int error; caddr_t sg; struct yyy32 *p32, s32; struct yyy *p = NULL, s; p32 = uap->zzz; if (p32) { sg = stackgap_init(); p = stackgap_alloc(&sg, sizeof(struct yyy)); uap->zzz = (struct yyy32 *)p; error = copyin(p32, &s32, sizeof(s32)); if (error) return (error); /* translate in */ error = copyout(&s, p, sizeof(s)); if (error) return (error); } error = xxx(td, (struct xxx_args *) uap); if (error) return (error); if (p32) { error = copyin(p, &s, sizeof(s)); if (error) return (error); /* translate out */ error = copyout(&s32, p32, sizeof(s32)); } return (error); } #endif Index: head/sys/compat/linux/linux_misc.c =================================================================== --- head/sys/compat/linux/linux_misc.c (revision 127139) +++ head/sys/compat/linux/linux_misc.c (revision 127140) @@ -1,1361 +1,1344 @@ /*- * Copyright (c) 1994-1995 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #endif #ifdef __alpha__ #define BSD_TO_LINUX_SIGNAL(sig) (sig) #else #define BSD_TO_LINUX_SIGNAL(sig) \ (((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig) #endif #ifndef __alpha__ static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = { RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK, RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE, RLIMIT_MEMLOCK, -1 }; #endif /*!__alpha__*/ struct l_sysinfo { l_long uptime; /* Seconds since boot */ l_ulong loads[3]; /* 1, 5, and 15 minute load averages */ #define LINUX_SYSINFO_LOADS_SCALE 65536 l_ulong totalram; /* Total usable main memory size */ l_ulong freeram; /* Available memory size */ l_ulong sharedram; /* Amount of shared memory */ l_ulong bufferram; /* Memory used by buffers */ l_ulong totalswap; /* Total swap space size */ l_ulong freeswap; /* swap space still available */ l_ushort procs; /* Number of current processes */ l_ulong totalbig; l_ulong freebig; l_uint mem_unit; char _f[6]; /* Pads structure to 64 bytes */ }; #ifndef __alpha__ int linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) { struct l_sysinfo sysinfo; vm_object_t object; int i, j; struct timespec ts; /* Uptime is copied out of print_uptime() in kern_shutdown.c */ getnanouptime(&ts); i = 0; if (ts.tv_sec >= 86400) { ts.tv_sec %= 86400; i = 1; } if (i || ts.tv_sec >= 3600) { ts.tv_sec %= 3600; i = 1; } if (i || ts.tv_sec >= 60) { ts.tv_sec %= 60; i = 1; } sysinfo.uptime=ts.tv_sec; /* Use the information from the mib to get our load averages */ for (i = 0; i < 3; i++) sysinfo.loads[i] = averunnable.ldavg[i] * LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale; sysinfo.totalram = physmem * PAGE_SIZE; sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE; sysinfo.sharedram = 0; mtx_lock(&vm_object_list_mtx); TAILQ_FOREACH(object, &vm_object_list, object_list) if (object->shadow_count > 1) sysinfo.sharedram += object->resident_page_count; mtx_unlock(&vm_object_list_mtx); sysinfo.sharedram *= PAGE_SIZE; sysinfo.bufferram = 0; swap_pager_status(&i, &j); sysinfo.totalswap= i * PAGE_SIZE; sysinfo.freeswap = (i - j) * PAGE_SIZE; sysinfo.procs = nprocs; /* The following are only present in newer Linux kernels. */ sysinfo.totalbig = 0; sysinfo.freebig = 0; sysinfo.mem_unit = 1; return copyout(&sysinfo, args->info, sizeof(sysinfo)); } #endif /*!__alpha__*/ #ifndef __alpha__ int linux_alarm(struct thread *td, struct linux_alarm_args *args) { struct itimerval it, old_it; struct timeval tv; struct proc *p; #ifdef DEBUG if (ldebug(alarm)) printf(ARGS(alarm, "%u"), args->secs); #endif if (args->secs > 100000000) return EINVAL; it.it_value.tv_sec = (long)args->secs; it.it_value.tv_usec = 0; it.it_interval.tv_sec = 0; it.it_interval.tv_usec = 0; p = td->td_proc; PROC_LOCK(p); old_it = p->p_realtimer; getmicrouptime(&tv); if (timevalisset(&old_it.it_value)) callout_stop(&p->p_itcallout); if (it.it_value.tv_sec != 0) { callout_reset(&p->p_itcallout, tvtohz(&it.it_value), realitexpire, p); timevaladd(&it.it_value, &tv); } p->p_realtimer = it; PROC_UNLOCK(p); if (timevalcmp(&old_it.it_value, &tv, >)) { timevalsub(&old_it.it_value, &tv); if (old_it.it_value.tv_usec != 0) old_it.it_value.tv_sec++; td->td_retval[0] = old_it.it_value.tv_sec; } return 0; } #endif /*!__alpha__*/ int linux_brk(struct thread *td, struct linux_brk_args *args) { struct vmspace *vm = td->td_proc->p_vmspace; vm_offset_t new, old; struct obreak_args /* { char * nsize; } */ tmp; #ifdef DEBUG if (ldebug(brk)) printf(ARGS(brk, "%p"), (void *)args->dsend); #endif old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize); new = (vm_offset_t)args->dsend; tmp.nsize = (char *) new; if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp)) td->td_retval[0] = (long)new; else td->td_retval[0] = (long)old; return 0; } int linux_uselib(struct thread *td, struct linux_uselib_args *args) { struct nameidata ni; struct vnode *vp; struct exec *a_out; struct vattr attr; vm_offset_t vmaddr; unsigned long file_offset; vm_offset_t buffer; unsigned long bss_size; char *library; int error; int locked; LCONVPATHEXIST(td, args->library, &library); #ifdef DEBUG if (ldebug(uselib)) printf(ARGS(uselib, "%s"), library); #endif a_out = NULL; locked = 0; vp = NULL; /* * XXX: This code should make use of vn_open(), rather than doing * all this stuff itself. */ NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td); error = namei(&ni); LFREEPATH(library); if (error) goto cleanup; vp = ni.ni_vp; /* * XXX - This looks like a bogus check. A LOCKLEAF namei should not * succeed without returning a vnode. */ if (vp == NULL) { error = ENOEXEC; /* ?? */ goto cleanup; } NDFREE(&ni, NDF_ONLY_PNBUF); /* * From here on down, we have a locked vnode that must be unlocked. */ locked++; /* Writable? */ if (vp->v_writecount) { error = ETXTBSY; goto cleanup; } /* Executable? */ error = VOP_GETATTR(vp, &attr, td->td_ucred, td); if (error) goto cleanup; if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) { error = ENOEXEC; goto cleanup; } /* Sensible size? */ if (attr.va_size == 0) { error = ENOEXEC; goto cleanup; } /* Can we access it? */ error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); if (error) goto cleanup; /* * XXX: This should use vn_open() so that it is properly authorized, * and to reduce code redundancy all over the place here. */ #ifdef MAC error = mac_check_vnode_open(td->td_ucred, vp, FREAD); if (error) goto cleanup; #endif error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1); if (error) goto cleanup; /* Pull in executable header into kernel_map */ error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0); /* * Lock no longer needed */ locked = 0; VOP_UNLOCK(vp, 0, td); if (error) goto cleanup; /* Is it a Linux binary ? */ if (((a_out->a_magic >> 16) & 0xff) != 0x64) { error = ENOEXEC; goto cleanup; } /* * While we are here, we should REALLY do some more checks */ /* Set file/virtual offset based on a.out variant. */ switch ((int)(a_out->a_magic & 0xffff)) { case 0413: /* ZMAGIC */ file_offset = 1024; break; case 0314: /* QMAGIC */ file_offset = 0; break; default: error = ENOEXEC; goto cleanup; } bss_size = round_page(a_out->a_bss); /* Check various fields in header for validity/bounds. */ if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) { error = ENOEXEC; goto cleanup; } /* text + data can't exceed file size */ if (a_out->a_data + a_out->a_text > attr.va_size) { error = EFAULT; goto cleanup; } /* * text/data/bss must not exceed limits * XXX - this is not complete. it should check current usage PLUS * the resources needed by this library. */ PROC_LOCK(td->td_proc); if (a_out->a_text > maxtsiz || a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) { PROC_UNLOCK(td->td_proc); error = ENOMEM; goto cleanup; } PROC_UNLOCK(td->td_proc); mp_fixme("Unlocked vflags access."); /* prevent more writers */ vp->v_vflag |= VV_TEXT; /* * Check if file_offset page aligned. Currently we cannot handle * misalinged file offsets, and so we read in the entire image * (what a waste). */ if (file_offset & PAGE_MASK) { #ifdef DEBUG printf("uselib: Non page aligned binary %lu\n", file_offset); #endif /* Map text+data read/write/execute */ /* a_entry is the load address and is page aligned */ vmaddr = trunc_page(a_out->a_entry); /* get anon user mapping, read+write+execute */ error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); if (error) goto cleanup; /* map file into kernel_map */ error = vm_mmap(kernel_map, &buffer, round_page(a_out->a_text + a_out->a_data + file_offset), VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, trunc_page(file_offset)); if (error) goto cleanup; /* copy from kernel VM space to user space */ error = copyout((void *)(uintptr_t)(buffer + file_offset), (void *)vmaddr, a_out->a_text + a_out->a_data); /* release temporary kernel space */ vm_map_remove(kernel_map, buffer, buffer + round_page(a_out->a_text + a_out->a_data + file_offset)); if (error) goto cleanup; } else { #ifdef DEBUG printf("uselib: Page aligned binary %lu\n", file_offset); #endif /* * for QMAGIC, a_entry is 20 bytes beyond the load address * to skip the executable header */ vmaddr = trunc_page(a_out->a_entry); /* * Map it all into the process's space as a single * copy-on-write "data" segment. */ error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr, a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset); if (error) goto cleanup; } #ifdef DEBUG printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0], ((long*)vmaddr)[1]); #endif if (bss_size != 0) { /* Calculate BSS start address */ vmaddr = trunc_page(a_out->a_entry) + a_out->a_text + a_out->a_data; /* allocate some 'anon' space */ error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0, &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); if (error) goto cleanup; } cleanup: /* Unlock vnode if needed */ if (locked) VOP_UNLOCK(vp, 0, td); /* Release the kernel mapping. */ if (a_out) vm_map_remove(kernel_map, (vm_offset_t)a_out, (vm_offset_t)a_out + PAGE_SIZE); return error; } int linux_select(struct thread *td, struct linux_select_args *args) { struct timeval tv0, tv1, utv, *tvp; int error; #ifdef DEBUG if (ldebug(select)) printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds, (void *)args->readfds, (void *)args->writefds, (void *)args->exceptfds, (void *)args->timeout); #endif /* * Store current time for computation of the amount of * time left. */ if (args->timeout) { if ((error = copyin(args->timeout, &utv, sizeof(utv)))) goto select_out; #ifdef DEBUG if (ldebug(select)) printf(LMSG("incoming timeout (%ld/%ld)"), utv.tv_sec, utv.tv_usec); #endif if (itimerfix(&utv)) { /* * The timeval was invalid. Convert it to something * valid that will act as it does under Linux. */ utv.tv_sec += utv.tv_usec / 1000000; utv.tv_usec %= 1000000; if (utv.tv_usec < 0) { utv.tv_sec -= 1; utv.tv_usec += 1000000; } if (utv.tv_sec < 0) timevalclear(&utv); } microtime(&tv0); tvp = &utv; } else tvp = NULL; error = kern_select(td, args->nfds, args->readfds, args->writefds, args->exceptfds, tvp); #ifdef DEBUG if (ldebug(select)) printf(LMSG("real select returns %d"), error); #endif if (error) { /* * See fs/select.c in the Linux kernel. Without this, * Maelstrom doesn't work. */ if (error == ERESTART) error = EINTR; goto select_out; } if (args->timeout) { if (td->td_retval[0]) { /* * Compute how much time was left of the timeout, * by subtracting the current time and the time * before we started the call, and subtracting * that result from the user-supplied value. */ microtime(&tv1); timevalsub(&tv1, &tv0); timevalsub(&utv, &tv1); if (utv.tv_sec < 0) timevalclear(&utv); } else timevalclear(&utv); #ifdef DEBUG if (ldebug(select)) printf(LMSG("outgoing timeout (%ld/%ld)"), utv.tv_sec, utv.tv_usec); #endif if ((error = copyout(&utv, args->timeout, sizeof(utv)))) goto select_out; } select_out: #ifdef DEBUG if (ldebug(select)) printf(LMSG("select_out -> %d"), error); #endif return error; } int linux_mremap(struct thread *td, struct linux_mremap_args *args) { struct munmap_args /* { void *addr; size_t len; } */ bsd_args; int error = 0; #ifdef DEBUG if (ldebug(mremap)) printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"), (void *)args->addr, (unsigned long)args->old_len, (unsigned long)args->new_len, (unsigned long)args->flags); #endif args->new_len = round_page(args->new_len); args->old_len = round_page(args->old_len); if (args->new_len > args->old_len) { td->td_retval[0] = 0; return ENOMEM; } if (args->new_len < args->old_len) { bsd_args.addr = (caddr_t)(args->addr + args->new_len); bsd_args.len = args->old_len - args->new_len; error = munmap(td, &bsd_args); } td->td_retval[0] = error ? 0 : (uintptr_t)args->addr; return error; } #define LINUX_MS_ASYNC 0x0001 #define LINUX_MS_INVALIDATE 0x0002 #define LINUX_MS_SYNC 0x0004 int linux_msync(struct thread *td, struct linux_msync_args *args) { struct msync_args bsd_args; bsd_args.addr = (caddr_t)args->addr; bsd_args.len = args->len; bsd_args.flags = args->fl & ~LINUX_MS_SYNC; return msync(td, &bsd_args); } #ifndef __alpha__ int linux_time(struct thread *td, struct linux_time_args *args) { struct timeval tv; l_time_t tm; int error; #ifdef DEBUG if (ldebug(time)) printf(ARGS(time, "*")); #endif microtime(&tv); tm = tv.tv_sec; if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm)))) return error; td->td_retval[0] = tm; return 0; } #endif /*!__alpha__*/ struct l_times_argv { l_long tms_utime; l_long tms_stime; l_long tms_cutime; l_long tms_cstime; }; #ifdef __alpha__ #define CLK_TCK 1024 /* Linux uses 1024 on alpha */ #else #define CLK_TCK 100 /* Linux uses 100 */ #endif #define CONVTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK)) int linux_times(struct thread *td, struct linux_times_args *args) { struct timeval tv; struct l_times_argv tms; struct rusage ru; int error; #ifdef DEBUG if (ldebug(times)) printf(ARGS(times, "*")); #endif mtx_lock_spin(&sched_lock); calcru(td->td_proc, &ru.ru_utime, &ru.ru_stime, NULL); mtx_unlock_spin(&sched_lock); tms.tms_utime = CONVTCK(ru.ru_utime); tms.tms_stime = CONVTCK(ru.ru_stime); tms.tms_cutime = CONVTCK(td->td_proc->p_stats->p_cru.ru_utime); tms.tms_cstime = CONVTCK(td->td_proc->p_stats->p_cru.ru_stime); if ((error = copyout(&tms, args->buf, sizeof(tms)))) return error; microuptime(&tv); td->td_retval[0] = (int)CONVTCK(tv); return 0; } int linux_newuname(struct thread *td, struct linux_newuname_args *args) { struct l_new_utsname utsname; char osname[LINUX_MAX_UTSNAME]; char osrelease[LINUX_MAX_UTSNAME]; char *p; #ifdef DEBUG if (ldebug(newuname)) printf(ARGS(newuname, "*")); #endif linux_get_osname(td, osname); linux_get_osrelease(td, osrelease); bzero(&utsname, sizeof(utsname)); strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME); getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME); strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME); strlcpy(utsname.version, version, LINUX_MAX_UTSNAME); for (p = utsname.version; *p != '\0'; ++p) if (*p == '\n') { *p = '\0'; break; } #ifdef __i386__ { const char *class; switch (cpu_class) { case CPUCLASS_686: class = "i686"; break; case CPUCLASS_586: class = "i586"; break; case CPUCLASS_486: class = "i486"; break; default: class = "i386"; } strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME); } #else strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME); #endif strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME); return (copyout(&utsname, args->buf, sizeof(utsname))); } #if defined(__i386__) struct l_utimbuf { l_time_t l_actime; l_time_t l_modtime; }; int linux_utime(struct thread *td, struct linux_utime_args *args) { struct timeval tv[2], *tvp; struct l_utimbuf lut; char *fname; int error; LCONVPATHEXIST(td, args->fname, &fname); #ifdef DEBUG if (ldebug(utime)) printf(ARGS(utime, "%s, *"), fname); #endif if (args->times) { if ((error = copyin(args->times, &lut, sizeof lut))) { LFREEPATH(fname); return error; } tv[0].tv_sec = lut.l_actime; tv[0].tv_usec = 0; tv[1].tv_sec = lut.l_modtime; tv[1].tv_usec = 0; tvp = tv; } else tvp = NULL; error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE); LFREEPATH(fname); return (error); } #endif /* __i386__ */ #define __WCLONE 0x80000000 #ifndef __alpha__ int linux_waitpid(struct thread *td, struct linux_waitpid_args *args) { - struct wait_args /* { - int pid; - int *status; - int options; - struct rusage *rusage; - } */ tmp; - int error, tmpstat; + int error, options, tmpstat; #ifdef DEBUG if (ldebug(waitpid)) printf(ARGS(waitpid, "%d, %p, %d"), args->pid, (void *)args->status, args->options); #endif - tmp.pid = args->pid; - tmp.status = args->status; - tmp.options = (args->options & (WNOHANG | WUNTRACED)); + options = (args->options & (WNOHANG | WUNTRACED)); /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ if (args->options & __WCLONE) - tmp.options |= WLINUXCLONE; - tmp.rusage = NULL; + options |= WLINUXCLONE; - if ((error = wait4(td, &tmp)) != 0) + error = kern_wait(td, args->pid, &tmpstat, options, NULL); + if (error) return error; if (args->status) { - if ((error = copyin(args->status, &tmpstat, sizeof(int))) != 0) - return error; tmpstat &= 0xffff; if (WIFSIGNALED(tmpstat)) tmpstat = (tmpstat & 0xffffff80) | BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); else if (WIFSTOPPED(tmpstat)) tmpstat = (tmpstat & 0xffff00ff) | (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); return copyout(&tmpstat, args->status, sizeof(int)); } return 0; } #endif /*!__alpha__*/ int linux_wait4(struct thread *td, struct linux_wait4_args *args) { - struct wait_args /* { - int pid; - int *status; - int options; - struct rusage *rusage; - } */ tmp; - int error, tmpstat; + int error, options, tmpstat; + struct rusage ru; struct proc *p; #ifdef DEBUG if (ldebug(wait4)) printf(ARGS(wait4, "%d, %p, %d, %p"), args->pid, (void *)args->status, args->options, (void *)args->rusage); #endif - tmp.pid = args->pid; - tmp.status = args->status; - tmp.options = (args->options & (WNOHANG | WUNTRACED)); + options = (args->options & (WNOHANG | WUNTRACED)); /* WLINUXCLONE should be equal to __WCLONE, but we make sure */ if (args->options & __WCLONE) - tmp.options |= WLINUXCLONE; - tmp.rusage = (struct rusage *)args->rusage; + options |= WLINUXCLONE; - if ((error = wait4(td, &tmp)) != 0) + error = kern_wait(td, args->pid, &tmpstat, options, &ru); + if (error) return error; p = td->td_proc; PROC_LOCK(p); SIGDELSET(p->p_siglist, SIGCHLD); PROC_UNLOCK(p); if (args->status) { - if ((error = copyin(args->status, &tmpstat, sizeof(int))) != 0) - return error; tmpstat &= 0xffff; if (WIFSIGNALED(tmpstat)) tmpstat = (tmpstat & 0xffffff80) | BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat)); else if (WIFSTOPPED(tmpstat)) tmpstat = (tmpstat & 0xffff00ff) | (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8); - return copyout(&tmpstat, args->status, sizeof(int)); + error = copyout(&tmpstat, args->status, sizeof(int)); } + if (args->rusage != NULL && error == 0) + error = copyout(&ru, args->rusage, sizeof(ru)); - return 0; + return (error); } int linux_mknod(struct thread *td, struct linux_mknod_args *args) { char *path; int error; LCONVPATHCREAT(td, args->path, &path); #ifdef DEBUG if (ldebug(mknod)) printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev); #endif if (args->mode & S_IFIFO) error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode); else error = kern_mknod(td, path, UIO_SYSSPACE, args->mode, args->dev); LFREEPATH(path); return (error); } /* * UGH! This is just about the dumbest idea I've ever heard!! */ int linux_personality(struct thread *td, struct linux_personality_args *args) { #ifdef DEBUG if (ldebug(personality)) printf(ARGS(personality, "%lu"), (unsigned long)args->per); #endif #ifndef __alpha__ if (args->per != 0) return EINVAL; #endif /* Yes Jim, it's still a Linux... */ td->td_retval[0] = 0; return 0; } /* * Wrappers for get/setitimer for debugging.. */ int linux_setitimer(struct thread *td, struct linux_setitimer_args *args) { struct setitimer_args bsa; struct itimerval foo; int error; #ifdef DEBUG if (ldebug(setitimer)) printf(ARGS(setitimer, "%p, %p"), (void *)args->itv, (void *)args->oitv); #endif bsa.which = args->which; bsa.itv = (struct itimerval *)args->itv; bsa.oitv = (struct itimerval *)args->oitv; if (args->itv) { if ((error = copyin(args->itv, &foo, sizeof(foo)))) return error; #ifdef DEBUG if (ldebug(setitimer)) { printf("setitimer: value: sec: %ld, usec: %ld\n", foo.it_value.tv_sec, foo.it_value.tv_usec); printf("setitimer: interval: sec: %ld, usec: %ld\n", foo.it_interval.tv_sec, foo.it_interval.tv_usec); } #endif } return setitimer(td, &bsa); } int linux_getitimer(struct thread *td, struct linux_getitimer_args *args) { struct getitimer_args bsa; #ifdef DEBUG if (ldebug(getitimer)) printf(ARGS(getitimer, "%p"), (void *)args->itv); #endif bsa.which = args->which; bsa.itv = (struct itimerval *)args->itv; return getitimer(td, &bsa); } #ifndef __alpha__ int linux_nice(struct thread *td, struct linux_nice_args *args) { struct setpriority_args bsd_args; bsd_args.which = PRIO_PROCESS; bsd_args.who = 0; /* current process */ bsd_args.prio = args->inc; return setpriority(td, &bsd_args); } #endif /*!__alpha__*/ int linux_setgroups(struct thread *td, struct linux_setgroups_args *args) { struct ucred *newcred, *oldcred; l_gid_t linux_gidset[NGROUPS]; gid_t *bsd_gidset; int ngrp, error; struct proc *p; ngrp = args->gidsetsize; if (ngrp < 0 || ngrp >= NGROUPS) return (EINVAL); error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t)); if (error) return (error); newcred = crget(); p = td->td_proc; PROC_LOCK(p); oldcred = p->p_ucred; /* * cr_groups[0] holds egid. Setting the whole set from * the supplied set will cause egid to be changed too. * Keep cr_groups[0] unchanged to prevent that. */ if ((error = suser_cred(oldcred, PRISON_ROOT)) != 0) { PROC_UNLOCK(p); crfree(newcred); return (error); } crcopy(newcred, oldcred); if (ngrp > 0) { newcred->cr_ngroups = ngrp + 1; bsd_gidset = newcred->cr_groups; ngrp--; while (ngrp >= 0) { bsd_gidset[ngrp + 1] = linux_gidset[ngrp]; ngrp--; } } else newcred->cr_ngroups = 1; setsugid(p); p->p_ucred = newcred; PROC_UNLOCK(p); crfree(oldcred); return (0); } int linux_getgroups(struct thread *td, struct linux_getgroups_args *args) { struct ucred *cred; l_gid_t linux_gidset[NGROUPS]; gid_t *bsd_gidset; int bsd_gidsetsz, ngrp, error; cred = td->td_ucred; bsd_gidset = cred->cr_groups; bsd_gidsetsz = cred->cr_ngroups - 1; /* * cr_groups[0] holds egid. Returning the whole set * here will cause a duplicate. Exclude cr_groups[0] * to prevent that. */ if ((ngrp = args->gidsetsize) == 0) { td->td_retval[0] = bsd_gidsetsz; return (0); } if (ngrp < bsd_gidsetsz) return (EINVAL); ngrp = 0; while (ngrp < bsd_gidsetsz) { linux_gidset[ngrp] = bsd_gidset[ngrp + 1]; ngrp++; } if ((error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t)))) return (error); td->td_retval[0] = ngrp; return (0); } #ifndef __alpha__ int linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args) { struct rlimit bsd_rlim; struct l_rlimit rlim; u_int which; int error; #ifdef DEBUG if (ldebug(setrlimit)) printf(ARGS(setrlimit, "%d, %p"), args->resource, (void *)args->rlim); #endif if (args->resource >= LINUX_RLIM_NLIMITS) return (EINVAL); which = linux_to_bsd_resource[args->resource]; if (which == -1) return (EINVAL); error = copyin(args->rlim, &rlim, sizeof(rlim)); if (error) return (error); bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur; bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max; return (kern_setrlimit(td, which, &bsd_rlim)); } int linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args) { struct l_rlimit rlim; struct proc *p = td->td_proc; struct rlimit bsd_rlim; u_int which; #ifdef DEBUG if (ldebug(old_getrlimit)) printf(ARGS(old_getrlimit, "%d, %p"), args->resource, (void *)args->rlim); #endif if (args->resource >= LINUX_RLIM_NLIMITS) return (EINVAL); which = linux_to_bsd_resource[args->resource]; if (which == -1) return (EINVAL); PROC_LOCK(p); lim_rlimit(p, which, &bsd_rlim); PROC_UNLOCK(p); rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur; if (rlim.rlim_cur == ULONG_MAX) rlim.rlim_cur = LONG_MAX; rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max; if (rlim.rlim_max == ULONG_MAX) rlim.rlim_max = LONG_MAX; return (copyout(&rlim, args->rlim, sizeof(rlim))); } int linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args) { struct l_rlimit rlim; struct proc *p = td->td_proc; struct rlimit bsd_rlim; u_int which; #ifdef DEBUG if (ldebug(getrlimit)) printf(ARGS(getrlimit, "%d, %p"), args->resource, (void *)args->rlim); #endif if (args->resource >= LINUX_RLIM_NLIMITS) return (EINVAL); which = linux_to_bsd_resource[args->resource]; if (which == -1) return (EINVAL); PROC_LOCK(p); lim_rlimit(p, which, &bsd_rlim); PROC_UNLOCK(p); rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur; rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max; return (copyout(&rlim, args->rlim, sizeof(rlim))); } #endif /*!__alpha__*/ int linux_sched_setscheduler(struct thread *td, struct linux_sched_setscheduler_args *args) { struct sched_setscheduler_args bsd; #ifdef DEBUG if (ldebug(sched_setscheduler)) printf(ARGS(sched_setscheduler, "%d, %d, %p"), args->pid, args->policy, (const void *)args->param); #endif switch (args->policy) { case LINUX_SCHED_OTHER: bsd.policy = SCHED_OTHER; break; case LINUX_SCHED_FIFO: bsd.policy = SCHED_FIFO; break; case LINUX_SCHED_RR: bsd.policy = SCHED_RR; break; default: return EINVAL; } bsd.pid = args->pid; bsd.param = (struct sched_param *)args->param; return sched_setscheduler(td, &bsd); } int linux_sched_getscheduler(struct thread *td, struct linux_sched_getscheduler_args *args) { struct sched_getscheduler_args bsd; int error; #ifdef DEBUG if (ldebug(sched_getscheduler)) printf(ARGS(sched_getscheduler, "%d"), args->pid); #endif bsd.pid = args->pid; error = sched_getscheduler(td, &bsd); switch (td->td_retval[0]) { case SCHED_OTHER: td->td_retval[0] = LINUX_SCHED_OTHER; break; case SCHED_FIFO: td->td_retval[0] = LINUX_SCHED_FIFO; break; case SCHED_RR: td->td_retval[0] = LINUX_SCHED_RR; break; } return error; } int linux_sched_get_priority_max(struct thread *td, struct linux_sched_get_priority_max_args *args) { struct sched_get_priority_max_args bsd; #ifdef DEBUG if (ldebug(sched_get_priority_max)) printf(ARGS(sched_get_priority_max, "%d"), args->policy); #endif switch (args->policy) { case LINUX_SCHED_OTHER: bsd.policy = SCHED_OTHER; break; case LINUX_SCHED_FIFO: bsd.policy = SCHED_FIFO; break; case LINUX_SCHED_RR: bsd.policy = SCHED_RR; break; default: return EINVAL; } return sched_get_priority_max(td, &bsd); } int linux_sched_get_priority_min(struct thread *td, struct linux_sched_get_priority_min_args *args) { struct sched_get_priority_min_args bsd; #ifdef DEBUG if (ldebug(sched_get_priority_min)) printf(ARGS(sched_get_priority_min, "%d"), args->policy); #endif switch (args->policy) { case LINUX_SCHED_OTHER: bsd.policy = SCHED_OTHER; break; case LINUX_SCHED_FIFO: bsd.policy = SCHED_FIFO; break; case LINUX_SCHED_RR: bsd.policy = SCHED_RR; break; default: return EINVAL; } return sched_get_priority_min(td, &bsd); } #define REBOOT_CAD_ON 0x89abcdef #define REBOOT_CAD_OFF 0 #define REBOOT_HALT 0xcdef0123 int linux_reboot(struct thread *td, struct linux_reboot_args *args) { struct reboot_args bsd_args; #ifdef DEBUG if (ldebug(reboot)) printf(ARGS(reboot, "0x%x"), args->cmd); #endif if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF) return (0); bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0; return (reboot(td, &bsd_args)); } #ifndef __alpha__ /* * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify * td->td_retval[1] when COMPAT_43 or COMPAT_SUNOS is defined. This * globbers registers that are assumed to be preserved. The following * lightweight syscalls fixes this. See also linux_getgid16() and * linux_getuid16() in linux_uid16.c. * * linux_getpid() - MP SAFE * linux_getgid() - MP SAFE * linux_getuid() - MP SAFE */ int linux_getpid(struct thread *td, struct linux_getpid_args *args) { td->td_retval[0] = td->td_proc->p_pid; return (0); } int linux_getgid(struct thread *td, struct linux_getgid_args *args) { td->td_retval[0] = td->td_ucred->cr_rgid; return (0); } int linux_getuid(struct thread *td, struct linux_getuid_args *args) { td->td_retval[0] = td->td_ucred->cr_ruid; return (0); } #endif /*!__alpha__*/ int linux_getsid(struct thread *td, struct linux_getsid_args *args) { struct getsid_args bsd; bsd.pid = args->pid; return getsid(td, &bsd); } Index: head/sys/compat/svr4/svr4_misc.c =================================================================== --- head/sys/compat/svr4/svr4_misc.c (revision 127139) +++ head/sys/compat/svr4/svr4_misc.c (revision 127140) @@ -1,1749 +1,1731 @@ /* * Copyright (c) 1998 Mark Newton * Copyright (c) 1994 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * SVR4 compatibility module. * * SVR4 system calls that are implemented differently in BSD are * handled here. */ #include __FBSDID("$FreeBSD$"); #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #include #endif #if defined(NetBSD) # if defined(UVM) # include # endif #endif #define BSD_DIRENT(cp) ((struct dirent *)(cp)) static int svr4_mknod(struct thread *, register_t *, char *, svr4_mode_t, svr4_dev_t); static __inline clock_t timeval_to_clock_t(struct timeval *); static int svr4_setinfo (struct proc *, int, svr4_siginfo_t *); struct svr4_hrtcntl_args; static int svr4_hrtcntl (struct thread *, struct svr4_hrtcntl_args *, register_t *); static void bsd_statfs_to_svr4_statvfs(const struct statfs *, struct svr4_statvfs *); static void bsd_statfs_to_svr4_statvfs64(const struct statfs *, struct svr4_statvfs64 *); static struct proc *svr4_pfind(pid_t pid); /* BOGUS noop */ #if defined(BOGUS) int svr4_sys_setitimer(td, uap) register struct thread *td; struct svr4_sys_setitimer_args *uap; { td->td_retval[0] = 0; return 0; } #endif int svr4_sys_wait(td, uap) struct thread *td; struct svr4_sys_wait_args *uap; { - struct wait_args w4; - int error, *retval = td->td_retval, st, sig; - size_t sz = sizeof(*w4.status); + int error, st, sig; - w4.rusage = NULL; - w4.options = 0; - - if (uap->status == NULL) { - caddr_t sg = stackgap_init(); - - w4.status = stackgap_alloc(&sg, sz); - } - else - w4.status = uap->status; - - w4.pid = WAIT_ANY; - - if ((error = wait4(td, &w4)) != 0) - return error; + error = kern_wait(td, WAIT_ANY, &st, 0, NULL); + if (error) + return (error); - if ((error = copyin(w4.status, &st, sizeof(st))) != 0) - return error; - if (WIFSIGNALED(st)) { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0177) | SVR4_BSD2SVR4_SIG(sig); } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) st = (st & ~0xff00) | (SVR4_BSD2SVR4_SIG(sig) << 8); } /* * It looks like wait(2) on svr4/solaris/2.4 returns * the status in retval[1], and the pid on retval[0]. */ - retval[1] = st; + td->td_retval[1] = st; if (uap->status) - if ((error = copyout(&st, uap->status, sizeof(st))) != 0) - return error; + error = copyout(&st, uap->status, sizeof(st)); - return 0; + return (error); } int svr4_sys_execv(td, uap) struct thread *td; struct svr4_sys_execv_args *uap; { struct execve_args ap; caddr_t sg; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); ap.fname = uap->path; ap.argv = uap->argp; ap.envv = NULL; return execve(td, &ap); } int svr4_sys_execve(td, uap) struct thread *td; struct svr4_sys_execve_args *uap; { struct execve_args ap; caddr_t sg; sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); ap.fname = uap->path; ap.argv = uap->argp; ap.envv = uap->envp; return execve(td, &ap); } int svr4_sys_time(td, v) struct thread *td; struct svr4_sys_time_args *v; { struct svr4_sys_time_args *uap = v; int error = 0; struct timeval tv; microtime(&tv); if (uap->t) error = copyout(&tv.tv_sec, uap->t, sizeof(*(uap->t))); td->td_retval[0] = (int) tv.tv_sec; return error; } /* * Read SVR4-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. * * This code is ported from the Linux emulator: Changes to the VFS interface * between FreeBSD and NetBSD have made it simpler to port it from there than * to adapt the NetBSD version. */ int svr4_sys_getdents64(td, uap) struct thread *td; struct svr4_sys_getdents64_args *uap; { register struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4reclen=0; /* SVR4-format */ struct file *fp; struct uio auio; struct iovec aiov; off_t off; struct svr4_dirent64 svr4_dirent; int buflen, error, eofflag, nbytes, justone; u_long *cookies = NULL, *cookiep; int ncookies; DPRINTF(("svr4_sys_getdents64(%d, *, %d)\n", uap->fd, uap->nbytes)); if ((error = getvnode(td->td_proc->p_fd, uap->fd, &fp)) != 0) { return (error); } if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } nbytes = uap->nbytes; if (nbytes == 1) { nbytes = sizeof (struct svr4_dirent64); justone = 1; } else justone = 0; off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_check_vnode_readdir(td->td_ucred, vp); if (error) goto out; #endif error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies); if (error) { goto out; } inp = buf; outp = (caddr_t) uap->dp; resid = nbytes; if ((len = buflen - auio.uio_resid) <= 0) { goto eof; } cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { bdp = (struct dirent *) inp; len -= bdp->d_reclen; inp += bdp->d_reclen; cookiep++; ncookies--; } } while (len > 0) { if (cookiep && ncookies == 0) break; bdp = (struct dirent *) inp; reclen = bdp->d_reclen; if (reclen & 3) { DPRINTF(("svr4_readdir: reclen=%d\n", reclen)); error = EFAULT; goto out; } if (bdp->d_fileno == 0) { inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; len -= reclen; continue; } svr4reclen = SVR4_RECLEN(&svr4_dirent, bdp->d_namlen); if (reclen > len || resid < svr4reclen) { outp++; break; } svr4_dirent.d_ino = (long) bdp->d_fileno; if (justone) { /* * old svr4-style readdir usage. */ svr4_dirent.d_off = (svr4_off_t) svr4reclen; svr4_dirent.d_reclen = (u_short) bdp->d_namlen; } else { svr4_dirent.d_off = (svr4_off_t)(off + reclen); svr4_dirent.d_reclen = (u_short) svr4reclen; } strcpy(svr4_dirent.d_name, bdp->d_name); if ((error = copyout((caddr_t)&svr4_dirent, outp, svr4reclen))) goto out; inp += reclen; if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; outp += svr4reclen; resid -= svr4reclen; len -= reclen; if (justone) break; } if (outp == (caddr_t) uap->dp) goto again; fp->f_offset = off; if (justone) nbytes = resid + svr4reclen; eof: td->td_retval[0] = nbytes - resid; out: VOP_UNLOCK(vp, 0, td); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_getdents(td, uap) struct thread *td; struct svr4_sys_getdents_args *uap; { struct dirent *bdp; struct vnode *vp; caddr_t inp, buf; /* BSD-format */ int len, reclen; /* BSD-format */ caddr_t outp; /* SVR4-format */ int resid, svr4_reclen; /* SVR4-format */ struct file *fp; struct uio auio; struct iovec aiov; struct svr4_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookiebuf = NULL, *cookie; int ncookies = 0, *retval = td->td_retval; if (uap->nbytes < 0) return (EINVAL); if ((error = getvnode(td->td_proc->p_fd, uap->fd, &fp)) != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return (EINVAL); } buflen = min(MAXBSIZE, uap->nbytes); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); off = fp->f_offset; again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; #ifdef MAC error = mac_check_vnode_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookiebuf); if (error) { goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic("svr4_sys_getdents64: bad reclen"); off = *cookie++; /* each entry points to the next */ if ((off >> 32) != 0) { uprintf("svr4_sys_getdents64: dir offset too large for emulated program"); error = EINVAL; goto out; } if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ continue; } svr4_reclen = SVR4_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < svr4_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make a SVR4-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (svr4_ino_t)bdp->d_fileno; idb.d_off = (svr4_off_t)off; idb.d_reclen = (u_short)svr4_reclen; strcpy(idb.d_name, bdp->d_name); if ((error = copyout((caddr_t)&idb, outp, svr4_reclen))) goto out; /* advance past this real entry */ inp += reclen; /* advance output past SVR4-shaped entry */ outp += svr4_reclen; resid -= svr4_reclen; } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: *retval = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0, td); fdrop(fp, td); if (cookiebuf) free(cookiebuf, M_TEMP); free(buf, M_TEMP); return error; } int svr4_sys_mmap(td, uap) struct thread *td; struct svr4_sys_mmap_args *uap; { struct mmap_args mm; int *retval; retval = td->td_retval; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; return mmap(td, &mm); } int svr4_sys_mmap64(td, uap) struct thread *td; struct svr4_sys_mmap64_args *uap; { struct mmap_args mm; void *rp; #define _MAP_NEW 0x80000000 /* * Verify the arguments. */ if (uap->prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return EINVAL; /* XXX still needed? */ if (uap->len == 0) return EINVAL; mm.prot = uap->prot; mm.len = uap->len; mm.flags = uap->flags & ~_MAP_NEW; mm.fd = uap->fd; mm.addr = uap->addr; mm.pos = uap->pos; rp = (void *) round_page((vm_offset_t)(td->td_proc->p_vmspace->vm_daddr + maxdsiz)); if ((mm.flags & MAP_FIXED) == 0 && mm.addr != 0 && (void *)mm.addr < rp) mm.addr = rp; return mmap(td, &mm); } int svr4_sys_fchroot(td, uap) struct thread *td; struct svr4_sys_fchroot_args *uap; { struct filedesc *fdp = td->td_proc->p_fd; struct vnode *vp, *vpold; struct file *fp; int error; if ((error = suser(td)) != 0) return error; if ((error = getvnode(fdp, uap->fd, &fp)) != 0) return error; vp = fp->f_vnode; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); if (vp->v_type != VDIR) error = ENOTDIR; else error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); VOP_UNLOCK(vp, 0, td); if (error) { fdrop(fp, td); return error; } VREF(vp); FILEDESC_LOCK(fdp); vpold = fdp->fd_rdir; fdp->fd_rdir = vp; FILEDESC_UNLOCK(fdp); if (vpold != NULL) vrele(vpold); fdrop(fp, td); return 0; } static int svr4_mknod(td, retval, path, mode, dev) struct thread *td; register_t *retval; char *path; svr4_mode_t mode; svr4_dev_t dev; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, path); if (S_ISFIFO(mode)) { struct mkfifo_args ap; ap.path = path; ap.mode = mode; return mkfifo(td, &ap); } else { struct mknod_args ap; ap.path = path; ap.mode = mode; ap.dev = dev; return mknod(td, &ap); } } int svr4_sys_mknod(td, uap) register struct thread *td; struct svr4_sys_mknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_odev_t(uap->dev)); } int svr4_sys_xmknod(td, uap) struct thread *td; struct svr4_sys_xmknod_args *uap; { int *retval = td->td_retval; return svr4_mknod(td, retval, uap->path, uap->mode, (svr4_dev_t)svr4_to_bsd_dev_t(uap->dev)); } int svr4_sys_vhangup(td, uap) struct thread *td; struct svr4_sys_vhangup_args *uap; { return 0; } int svr4_sys_sysconfig(td, uap) struct thread *td; struct svr4_sys_sysconfig_args *uap; { int *retval; retval = &(td->td_retval[0]); switch (uap->name) { case SVR4_CONFIG_UNUSED: *retval = 0; break; case SVR4_CONFIG_NGROUPS: *retval = NGROUPS_MAX; break; case SVR4_CONFIG_CHILD_MAX: *retval = maxproc; break; case SVR4_CONFIG_OPEN_FILES: *retval = maxfiles; break; case SVR4_CONFIG_POSIX_VER: *retval = 198808; break; case SVR4_CONFIG_PAGESIZE: *retval = PAGE_SIZE; break; case SVR4_CONFIG_CLK_TCK: *retval = 60; /* should this be `hz', ie. 100? */ break; case SVR4_CONFIG_XOPEN_VER: *retval = 2; /* XXX: What should that be? */ break; case SVR4_CONFIG_PROF_TCK: *retval = 60; /* XXX: What should that be? */ break; case SVR4_CONFIG_NPROC_CONF: *retval = 1; /* Only one processor for now */ break; case SVR4_CONFIG_NPROC_ONLN: *retval = 1; /* And it better be online */ break; case SVR4_CONFIG_AIO_LISTIO_MAX: case SVR4_CONFIG_AIO_MAX: case SVR4_CONFIG_AIO_PRIO_DELTA_MAX: *retval = 0; /* No aio support */ break; case SVR4_CONFIG_DELAYTIMER_MAX: *retval = 0; /* No delaytimer support */ break; case SVR4_CONFIG_MQ_OPEN_MAX: *retval = msginfo.msgmni; break; case SVR4_CONFIG_MQ_PRIO_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_RTSIG_MAX: *retval = 0; break; case SVR4_CONFIG_SEM_NSEMS_MAX: *retval = seminfo.semmni; break; case SVR4_CONFIG_SEM_VALUE_MAX: *retval = seminfo.semvmx; break; case SVR4_CONFIG_SIGQUEUE_MAX: *retval = 0; /* XXX: Don't know */ break; case SVR4_CONFIG_SIGRT_MIN: case SVR4_CONFIG_SIGRT_MAX: *retval = 0; /* No real time signals */ break; case SVR4_CONFIG_TIMER_MAX: *retval = 3; /* XXX: real, virtual, profiling */ break; #if defined(NOTYET) case SVR4_CONFIG_PHYS_PAGES: #if defined(UVM) *retval = uvmexp.free; /* XXX: free instead of total */ #else *retval = cnt.v_free_count; /* XXX: free instead of total */ #endif break; case SVR4_CONFIG_AVPHYS_PAGES: #if defined(UVM) *retval = uvmexp.active; /* XXX: active instead of avg */ #else *retval = cnt.v_active_count; /* XXX: active instead of avg */ #endif break; #endif /* NOTYET */ default: return EINVAL; } return 0; } /* ARGSUSED */ int svr4_sys_break(td, uap) struct thread *td; struct svr4_sys_break_args *uap; { struct proc *p = td->td_proc; struct vmspace *vm = p->p_vmspace; vm_offset_t new, old, base, ns; int rv; base = round_page((vm_offset_t) vm->vm_daddr); ns = (vm_offset_t)uap->nsize; new = round_page(ns); if (new > base) { PROC_LOCK(p); if ((new - base) > (unsigned)lim_cur(p, RLIMIT_DATA)) { PROC_UNLOCK(p); return ENOMEM; } PROC_UNLOCK(p); if (new >= VM_MAXUSER_ADDRESS) return (ENOMEM); } else if (new < base) { /* * This is simply an invalid value. If someone wants to * do fancy address space manipulations, mmap and munmap * can do most of what the user would want. */ return EINVAL; } old = base + ctob(vm->vm_dsize); if (new > old) { vm_size_t diff; diff = new - old; PROC_LOCK(p); if (vm->vm_map.size + diff > lim_cur(p, RLIMIT_VMEM)) { PROC_UNLOCK(p); return(ENOMEM); } PROC_UNLOCK(p); rv = vm_map_find(&vm->vm_map, NULL, 0, &old, diff, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) { return (ENOMEM); } vm->vm_dsize += btoc(diff); } else if (new < old) { rv = vm_map_remove(&vm->vm_map, new, old); if (rv != KERN_SUCCESS) { return (ENOMEM); } vm->vm_dsize -= btoc(old - new); } return (0); } static __inline clock_t timeval_to_clock_t(tv) struct timeval *tv; { return tv->tv_sec * hz + tv->tv_usec / (1000000 / hz); } int svr4_sys_times(td, uap) struct thread *td; struct svr4_sys_times_args *uap; { int error, *retval = td->td_retval; struct tms tms; struct timeval t; struct rusage *ru; struct rusage r; struct getrusage_args ga; caddr_t sg = stackgap_init(); ru = stackgap_alloc(&sg, sizeof(struct rusage)); ga.who = RUSAGE_SELF; ga.rusage = ru; error = getrusage(td, &ga); if (error) return error; if ((error = copyin(ru, &r, sizeof r)) != 0) return error; tms.tms_utime = timeval_to_clock_t(&r.ru_utime); tms.tms_stime = timeval_to_clock_t(&r.ru_stime); ga.who = RUSAGE_CHILDREN; error = getrusage(td, &ga); if (error) return error; if ((error = copyin(ru, &r, sizeof r)) != 0) return error; tms.tms_cutime = timeval_to_clock_t(&r.ru_utime); tms.tms_cstime = timeval_to_clock_t(&r.ru_stime); microtime(&t); *retval = timeval_to_clock_t(&t); return copyout(&tms, uap->tp, sizeof(tms)); } int svr4_sys_ulimit(td, uap) struct thread *td; struct svr4_sys_ulimit_args *uap; { int *retval = td->td_retval; int error; switch (uap->cmd) { case SVR4_GFILLIM: PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_FSIZE) / 512; PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; case SVR4_SFILLIM: { struct rlimit krl; krl.rlim_cur = uap->newlimit * 512; PROC_LOCK(td->td_proc); krl.rlim_max = lim_max(td->td_proc, RLIMIT_FSIZE); PROC_UNLOCK(td->td_proc); error = kern_setrlimit(td, RLIMIT_FSIZE, &krl); if (error) return error; PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_FSIZE); PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; } case SVR4_GMEMLIM: { struct vmspace *vm = td->td_proc->p_vmspace; register_t r; PROC_LOCK(td->td_proc); r = lim_cur(td->td_proc, RLIMIT_DATA); PROC_UNLOCK(td->td_proc); if (r == -1) r = 0x7fffffff; mtx_lock(&Giant); /* XXX */ r += (long) vm->vm_daddr; mtx_unlock(&Giant); if (r < 0) r = 0x7fffffff; *retval = r; return 0; } case SVR4_GDESLIM: PROC_LOCK(td->td_proc); *retval = lim_cur(td->td_proc, RLIMIT_NOFILE); PROC_UNLOCK(td->td_proc); if (*retval == -1) *retval = 0x7fffffff; return 0; default: return EINVAL; } } static struct proc * svr4_pfind(pid) pid_t pid; { struct proc *p; /* look in the live processes */ if ((p = pfind(pid)) == NULL) /* look in the zombies */ p = zpfind(pid); return p; } int svr4_sys_pgrpsys(td, uap) struct thread *td; struct svr4_sys_pgrpsys_args *uap; { int *retval = td->td_retval; struct proc *p = td->td_proc; switch (uap->cmd) { case 1: /* setpgrp() */ /* * SVR4 setpgrp() (which takes no arguments) has the * semantics that the session ID is also created anew, so * in almost every sense, setpgrp() is identical to * setsid() for SVR4. (Under BSD, the difference is that * a setpgid(0,0) will not create a new session.) */ setsid(td, NULL); /*FALLTHROUGH*/ case 0: /* getpgrp() */ PROC_LOCK(p); *retval = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 2: /* getsid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; /* * This has already been initialized to the pid of * the session leader. */ *retval = (register_t) p->p_session->s_sid; PROC_UNLOCK(p); return 0; case 3: /* setsid() */ return setsid(td, NULL); case 4: /* getpgid(pid) */ if (uap->pid == 0) PROC_LOCK(p); else if ((p = svr4_pfind(uap->pid)) == NULL) return ESRCH; *retval = (int) p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 5: /* setpgid(pid, pgid); */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return setpgid(td, &sa); } default: return EINVAL; } } #define syscallarg(x) union { x datum; register_t pad; } struct svr4_hrtcntl_args { int cmd; int fun; int clk; svr4_hrt_interval_t * iv; svr4_hrt_time_t * ti; }; static int svr4_hrtcntl(td, uap, retval) struct thread *td; struct svr4_hrtcntl_args *uap; register_t *retval; { switch (uap->fun) { case SVR4_HRT_CNTL_RES: DPRINTF(("htrcntl(RES)\n")); *retval = SVR4_HRT_USEC; return 0; case SVR4_HRT_CNTL_TOFD: DPRINTF(("htrcntl(TOFD)\n")); { struct timeval tv; svr4_hrt_time_t t; if (uap->clk != SVR4_HRT_CLK_STD) { DPRINTF(("clk == %d\n", uap->clk)); return EINVAL; } if (uap->ti == NULL) { DPRINTF(("ti NULL\n")); return EINVAL; } microtime(&tv); t.h_sec = tv.tv_sec; t.h_rem = tv.tv_usec; t.h_res = SVR4_HRT_USEC; return copyout(&t, uap->ti, sizeof(t)); } case SVR4_HRT_CNTL_START: DPRINTF(("htrcntl(START)\n")); return ENOSYS; case SVR4_HRT_CNTL_GET: DPRINTF(("htrcntl(GET)\n")); return ENOSYS; default: DPRINTF(("Bad htrcntl command %d\n", uap->fun)); return ENOSYS; } } int svr4_sys_hrtsys(td, uap) struct thread *td; struct svr4_sys_hrtsys_args *uap; { int *retval = td->td_retval; switch (uap->cmd) { case SVR4_HRT_CNTL: return svr4_hrtcntl(td, (struct svr4_hrtcntl_args *) uap, retval); case SVR4_HRT_ALRM: DPRINTF(("hrtalarm\n")); return ENOSYS; case SVR4_HRT_SLP: DPRINTF(("hrtsleep\n")); return ENOSYS; case SVR4_HRT_CAN: DPRINTF(("hrtcancel\n")); return ENOSYS; default: DPRINTF(("Bad hrtsys command %d\n", uap->cmd)); return EINVAL; } } static int svr4_setinfo(p, st, s) struct proc *p; int st; svr4_siginfo_t *s; { svr4_siginfo_t i; int sig; memset(&i, 0, sizeof(i)); i.si_signo = SVR4_SIGCHLD; i.si_errno = 0; /* XXX? */ if (p) { i.si_pid = p->p_pid; mtx_lock_spin(&sched_lock); if (p->p_state == PRS_ZOMBIE) { i.si_stime = p->p_ru->ru_stime.tv_sec; i.si_utime = p->p_ru->ru_utime.tv_sec; } else { i.si_stime = p->p_stats->p_ru.ru_stime.tv_sec; i.si_utime = p->p_stats->p_ru.ru_utime.tv_sec; } mtx_unlock_spin(&sched_lock); } if (WIFEXITED(st)) { i.si_status = WEXITSTATUS(st); i.si_code = SVR4_CLD_EXITED; } else if (WIFSTOPPED(st)) { sig = WSTOPSIG(st); if (sig >= 0 && sig < NSIG) i.si_status = SVR4_BSD2SVR4_SIG(sig); if (i.si_status == SVR4_SIGCONT) i.si_code = SVR4_CLD_CONTINUED; else i.si_code = SVR4_CLD_STOPPED; } else { sig = WTERMSIG(st); if (sig >= 0 && sig < NSIG) i.si_status = SVR4_BSD2SVR4_SIG(sig); if (WCOREDUMP(st)) i.si_code = SVR4_CLD_DUMPED; else i.si_code = SVR4_CLD_KILLED; } DPRINTF(("siginfo [pid %ld signo %d code %d errno %d status %d]\n", i.si_pid, i.si_signo, i.si_code, i.si_errno, i.si_status)); return copyout(&i, s, sizeof(i)); } int svr4_sys_waitsys(td, uap) struct thread *td; struct svr4_sys_waitsys_args *uap; { int nfound; int error, *retval = td->td_retval; struct proc *q, *t; switch (uap->grp) { case SVR4_P_PID: break; case SVR4_P_PGID: PROC_LOCK(td->td_proc); uap->id = -td->td_proc->p_pgid; PROC_UNLOCK(td->td_proc); break; case SVR4_P_ALL: uap->id = WAIT_ANY; break; default: return EINVAL; } DPRINTF(("waitsys(%d, %d, %p, %x)\n", uap->grp, uap->id, uap->info, uap->options)); loop: nfound = 0; sx_slock(&proctree_lock); LIST_FOREACH(q, &td->td_proc->p_children, p_sibling) { PROC_LOCK(q); if (uap->id != WAIT_ANY && q->p_pid != uap->id && q->p_pgid != -uap->id) { PROC_UNLOCK(q); DPRINTF(("pid %d pgid %d != %d\n", q->p_pid, q->p_pgid, uap->id)); continue; } nfound++; if ((q->p_state == PRS_ZOMBIE) && ((uap->options & (SVR4_WEXITED|SVR4_WTRAPPED)))) { PROC_UNLOCK(q); sx_sunlock(&proctree_lock); *retval = 0; DPRINTF(("found %d\n", q->p_pid)); error = svr4_setinfo(q, q->p_xstat, uap->info); if (error != 0) return error; if ((uap->options & SVR4_WNOWAIT)) { DPRINTF(("Don't wait\n")); return 0; } /* * If we got the child via ptrace(2) or procfs, and * the parent is different (meaning the process was * attached, rather than run as a child), then we need * to give it back to the old parent, and send the * parent a SIGCHLD. The rest of the cleanup will be * done when the old parent waits on the child. */ sx_xlock(&proctree_lock); PROC_LOCK(q); if (q->p_flag & P_TRACED) { if (q->p_oppid != q->p_pptr->p_pid) { PROC_UNLOCK(q); t = pfind(q->p_oppid); if (t == NULL) { t = initproc; PROC_LOCK(initproc); } PROC_LOCK(q); proc_reparent(q, t); q->p_oppid = 0; q->p_flag &= ~(P_TRACED | P_WAITED); PROC_UNLOCK(q); psignal(t, SIGCHLD); wakeup(t); PROC_UNLOCK(t); sx_xunlock(&proctree_lock); return 0; } } PROC_UNLOCK(q); sx_xunlock(&proctree_lock); q->p_xstat = 0; ruadd(&td->td_proc->p_stats->p_cru, q->p_ru); FREE(q->p_ru, M_ZOMBIE); q->p_ru = 0; /* * Decrement the count of procs running with this uid. */ (void)chgproccnt(q->p_ucred->cr_ruidinfo, -1, 0); /* * Release reference to text vnode. */ if (q->p_textvp) vrele(q->p_textvp); /* * Free up credentials. */ crfree(q->p_ucred); q->p_ucred = NULL; /* * Remove unused arguments */ pargs_drop(q->p_args); PROC_UNLOCK(q); /* * Finally finished with old proc entry. * Unlink it from its process group and free it. */ sx_xlock(&proctree_lock); leavepgrp(q); sx_xlock(&allproc_lock); LIST_REMOVE(q, p_list); /* off zombproc */ sx_xunlock(&allproc_lock); LIST_REMOVE(q, p_sibling); sx_xunlock(&proctree_lock); PROC_LOCK(q); sigacts_free(q->p_sigacts); q->p_sigacts = NULL; PROC_UNLOCK(q); /* * Give machine-dependent layer a chance * to free anything that cpu_exit couldn't * release while still running in process context. */ vm_waitproc(q); #if defined(__NetBSD__) pool_put(&proc_pool, q); #endif #ifdef __FreeBSD__ mtx_destroy(&q->p_mtx); #ifdef MAC mac_destroy_proc(q); #endif uma_zfree(proc_zone, q); #endif nprocs--; return 0; } /* XXXKSE this needs clarification */ if (P_SHOULDSTOP(q) && ((q->p_flag & P_WAITED) == 0) && (q->p_flag & P_TRACED || (uap->options & (SVR4_WSTOPPED|SVR4_WCONTINUED)))) { DPRINTF(("jobcontrol %d\n", q->p_pid)); if (((uap->options & SVR4_WNOWAIT)) == 0) q->p_flag |= P_WAITED; PROC_UNLOCK(q); *retval = 0; return svr4_setinfo(q, W_STOPCODE(q->p_xstat), uap->info); } PROC_UNLOCK(q); } if (nfound == 0) return ECHILD; if (uap->options & SVR4_WNOHANG) { *retval = 0; if ((error = svr4_setinfo(NULL, 0, uap->info)) != 0) return error; return 0; } if ((error = tsleep(td->td_proc, PWAIT | PCATCH, "svr4_wait", 0)) != 0) return error; goto loop; } static void bsd_statfs_to_svr4_statvfs(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } static void bsd_statfs_to_svr4_statvfs64(bfs, sfs) const struct statfs *bfs; struct svr4_statvfs64 *sfs; { sfs->f_bsize = bfs->f_iosize; /* XXX */ sfs->f_frsize = bfs->f_bsize; sfs->f_blocks = bfs->f_blocks; sfs->f_bfree = bfs->f_bfree; sfs->f_bavail = bfs->f_bavail; sfs->f_files = bfs->f_files; sfs->f_ffree = bfs->f_ffree; sfs->f_favail = bfs->f_ffree; sfs->f_fsid = bfs->f_fsid.val[0]; memcpy(sfs->f_basetype, bfs->f_fstypename, sizeof(sfs->f_basetype)); sfs->f_flag = 0; if (bfs->f_flags & MNT_RDONLY) sfs->f_flag |= SVR4_ST_RDONLY; if (bfs->f_flags & MNT_NOSUID) sfs->f_flag |= SVR4_ST_NOSUID; sfs->f_namemax = MAXNAMLEN; memcpy(sfs->f_fstr, bfs->f_fstypename, sizeof(sfs->f_fstr)); /* XXX */ memset(sfs->f_filler, 0, sizeof(sfs->f_filler)); } int svr4_sys_statvfs(td, uap) struct thread *td; struct svr4_sys_statvfs_args *uap; { struct statfs_args fs_args; caddr_t sg = stackgap_init(); struct statfs *fs = stackgap_alloc(&sg, sizeof(struct statfs)); struct statfs bfs; struct svr4_statvfs sfs; int error; CHECKALTEXIST(td, &sg, uap->path); fs_args.path = uap->path; fs_args.buf = fs; if ((error = statfs(td, &fs_args)) != 0) return error; if ((error = copyin(fs, &bfs, sizeof(bfs))) != 0) return error; bsd_statfs_to_svr4_statvfs(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs(td, uap) struct thread *td; struct svr4_sys_fstatvfs_args *uap; { struct fstatfs_args fs_args; caddr_t sg = stackgap_init(); struct statfs *fs = stackgap_alloc(&sg, sizeof(struct statfs)); struct statfs bfs; struct svr4_statvfs sfs; int error; fs_args.fd = uap->fd; fs_args.buf = fs; if ((error = fstatfs(td, &fs_args)) != 0) return error; if ((error = copyin(fs, &bfs, sizeof(bfs))) != 0) return error; bsd_statfs_to_svr4_statvfs(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_statvfs64(td, uap) struct thread *td; struct svr4_sys_statvfs64_args *uap; { struct statfs_args fs_args; caddr_t sg = stackgap_init(); struct statfs *fs = stackgap_alloc(&sg, sizeof(struct statfs)); struct statfs bfs; struct svr4_statvfs64 sfs; int error; CHECKALTEXIST(td, &sg, uap->path); fs_args.path = uap->path; fs_args.buf = fs; if ((error = statfs(td, &fs_args)) != 0) return error; if ((error = copyin(fs, &bfs, sizeof(bfs))) != 0) return error; bsd_statfs_to_svr4_statvfs64(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_fstatvfs64(td, uap) struct thread *td; struct svr4_sys_fstatvfs64_args *uap; { struct fstatfs_args fs_args; caddr_t sg = stackgap_init(); struct statfs *fs = stackgap_alloc(&sg, sizeof(struct statfs)); struct statfs bfs; struct svr4_statvfs64 sfs; int error; fs_args.fd = uap->fd; fs_args.buf = fs; if ((error = fstatfs(td, &fs_args)) != 0) return error; if ((error = copyin(fs, &bfs, sizeof(bfs))) != 0) return error; bsd_statfs_to_svr4_statvfs64(&bfs, &sfs); return copyout(&sfs, uap->fs, sizeof(sfs)); } int svr4_sys_alarm(td, uap) struct thread *td; struct svr4_sys_alarm_args *uap; { int error; struct itimerval *itp, *oitp; struct setitimer_args sa; caddr_t sg = stackgap_init(); itp = stackgap_alloc(&sg, sizeof(*itp)); oitp = stackgap_alloc(&sg, sizeof(*oitp)); timevalclear(&itp->it_interval); itp->it_value.tv_sec = uap->sec; itp->it_value.tv_usec = 0; sa.which = ITIMER_REAL; sa.itv = itp; sa.oitv = oitp; error = setitimer(td, &sa); if (error) return error; if (oitp->it_value.tv_usec) oitp->it_value.tv_sec++; td->td_retval[0] = oitp->it_value.tv_sec; return 0; } int svr4_sys_gettimeofday(td, uap) struct thread *td; struct svr4_sys_gettimeofday_args *uap; { if (uap->tp) { struct timeval atv; microtime(&atv); return copyout(&atv, uap->tp, sizeof (atv)); } return 0; } int svr4_sys_facl(td, uap) struct thread *td; struct svr4_sys_facl_args *uap; { int *retval; retval = td->td_retval; *retval = 0; switch (uap->cmd) { case SVR4_SYS_SETACL: /* We don't support acls on any filesystem */ return ENOSYS; case SVR4_SYS_GETACL: return copyout(retval, &uap->num, sizeof(uap->num)); case SVR4_SYS_GETACLCNT: return 0; default: return EINVAL; } } int svr4_sys_acl(td, uap) struct thread *td; struct svr4_sys_acl_args *uap; { /* XXX: for now the same */ return svr4_sys_facl(td, (struct svr4_sys_facl_args *)uap); } int svr4_sys_auditsys(td, uap) struct thread *td; struct svr4_sys_auditsys_args *uap; { /* * XXX: Big brother is *not* watching. */ return 0; } int svr4_sys_memcntl(td, uap) struct thread *td; struct svr4_sys_memcntl_args *uap; { switch (uap->cmd) { case SVR4_MC_SYNC: { struct msync_args msa; msa.addr = uap->addr; msa.len = uap->len; msa.flags = (int)uap->arg; return msync(td, &msa); } case SVR4_MC_ADVISE: { struct madvise_args maa; maa.addr = uap->addr; maa.len = uap->len; maa.behav = (int)uap->arg; return madvise(td, &maa); } case SVR4_MC_LOCK: case SVR4_MC_UNLOCK: case SVR4_MC_LOCKAS: case SVR4_MC_UNLOCKAS: return EOPNOTSUPP; default: return ENOSYS; } } int svr4_sys_nice(td, uap) struct thread *td; struct svr4_sys_nice_args *uap; { struct setpriority_args ap; int error; ap.which = PRIO_PROCESS; ap.who = 0; ap.prio = uap->prio; if ((error = setpriority(td, &ap)) != 0) return error; /* the cast is stupid, but the structures are the same */ if ((error = getpriority(td, (struct getpriority_args *)&ap)) != 0) return error; return 0; } int svr4_sys_resolvepath(td, uap) struct thread *td; struct svr4_sys_resolvepath_args *uap; { struct nameidata nd; int error, *retval = td->td_retval; unsigned int ncopy; NDINIT(&nd, LOOKUP, NOFOLLOW | SAVENAME, UIO_USERSPACE, uap->path, td); if ((error = namei(&nd)) != 0) return error; ncopy = min(uap->bufsiz, strlen(nd.ni_cnd.cn_pnbuf) + 1); if ((error = copyout(nd.ni_cnd.cn_pnbuf, uap->buf, ncopy)) != 0) goto bad; *retval = ncopy; bad: NDFREE(&nd, NDF_ONLY_PNBUF); vput(nd.ni_vp); return error; } Index: head/sys/i386/ibcs2/ibcs2_misc.c =================================================================== --- head/sys/i386/ibcs2/ibcs2_misc.c (revision 127139) +++ head/sys/i386/ibcs2/ibcs2_misc.c (revision 127140) @@ -1,1213 +1,1205 @@ /* * Copyright (c) 1995 Steven Wallace * Copyright (c) 1994, 1995 Scott Bartram * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This software was developed by the Computer Systems Engineering group * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and * contributed to Berkeley. * * All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Header: sun_misc.c,v 1.16 93/04/07 02:46:27 torek Exp * * @(#)sun_misc.c 8.1 (Berkeley) 6/18/93 */ #include __FBSDID("$FreeBSD$"); /* * IBCS2 compatibility module. * * IBCS2 system calls that are implemented differently in BSD are * handled here. */ #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int ibcs2_ulimit(td, uap) struct thread *td; struct ibcs2_ulimit_args *uap; { struct rlimit rl; struct proc *p; int error; #define IBCS2_GETFSIZE 1 #define IBCS2_SETFSIZE 2 #define IBCS2_GETPSIZE 3 #define IBCS2_GETDTABLESIZE 4 p = td->td_proc; switch (uap->cmd) { case IBCS2_GETFSIZE: PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE); PROC_UNLOCK(p); if (td->td_retval[0] == -1) td->td_retval[0] = 0x7fffffff; return 0; case IBCS2_SETFSIZE: PROC_LOCK(p); rl.rlim_max = lim_max(p, RLIMIT_FSIZE); PROC_UNLOCK(p); rl.rlim_cur = uap->newlimit; error = kern_setrlimit(td, RLIMIT_FSIZE, &rl); if (!error) { PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_FSIZE); PROC_UNLOCK(p); } else { DPRINTF(("failed ")); } return error; case IBCS2_GETPSIZE: PROC_LOCK(p); td->td_retval[0] = lim_cur(p, RLIMIT_RSS); /* XXX */ PROC_UNLOCK(p); return 0; case IBCS2_GETDTABLESIZE: uap->cmd = IBCS2_SC_OPEN_MAX; return ibcs2_sysconf(td, (struct ibcs2_sysconf_args *)uap); default: return ENOSYS; } } #define IBCS2_WSTOPPED 0177 #define IBCS2_STOPCODE(sig) ((sig) << 8 | IBCS2_WSTOPPED) int ibcs2_wait(td, uap) struct thread *td; struct ibcs2_wait_args *uap; { - int error, status; - struct wait_args w4; + int error, options, status; + int *statusp; + pid_t pid; struct trapframe *tf = td->td_frame; - w4.rusage = NULL; - if ((tf->tf_eflags & (PSL_Z|PSL_PF|PSL_N|PSL_V)) + if ((tf->tf_eflags & (PSL_Z|PSL_PF|PSL_N|PSL_V)) == (PSL_Z|PSL_PF|PSL_N|PSL_V)) { /* waitpid */ - w4.pid = uap->a1; - w4.status = (int *)uap->a2; - w4.options = uap->a3; + pid = uap->a1; + statusp = (int *)uap->a2; + options = uap->a3; } else { /* wait */ - w4.pid = WAIT_ANY; - w4.status = (int *)uap->a1; - w4.options = 0; + pid = WAIT_ANY; + statusp = (int *)uap->a1; + options = 0; } - if ((error = wait4(td, &w4)) != 0) + error = kern_wait(td, pid, &status, options, NULL); + if (error) return error; - if (w4.status) { /* this is real iBCS brain-damage */ - error = copyin((caddr_t)w4.status, (caddr_t)&status, - sizeof(w4.status)); - if(error) - return error; - + if (statusp) { /* - * Convert status/signal result. We must validate the - * signal number stored in the exit status in case - * the user changed it between wait4()'s copyout() - * and our copyin(). + * Convert status/signal result. */ if (WIFSTOPPED(status)) { if (WSTOPSIG(status) <= 0 || WSTOPSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = IBCS2_STOPCODE(bsd_to_ibcs2_sig[_SIG_IDX(WSTOPSIG(status))]); } else if (WIFSIGNALED(status)) { if (WTERMSIG(status) <= 0 || WTERMSIG(status) > IBCS2_SIGTBLSZ) return (EINVAL); status = bsd_to_ibcs2_sig[_SIG_IDX(WTERMSIG(status))]; } /* else exit status -- identical */ /* record result/status */ td->td_retval[1] = status; - return copyout((caddr_t)&status, (caddr_t)w4.status, - sizeof(w4.status)); + return copyout(&status, statusp, sizeof(status)); } return 0; } int ibcs2_execv(td, uap) struct thread *td; struct ibcs2_execv_args *uap; { struct execve_args ea; caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); ea.fname = uap->path; ea.argv = uap->argp; ea.envv = NULL; return execve(td, &ea); } int ibcs2_execve(td, uap) struct thread *td; struct ibcs2_execve_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return execve(td, (struct execve_args *)uap); } int ibcs2_umount(td, uap) struct thread *td; struct ibcs2_umount_args *uap; { struct unmount_args um; um.path = uap->name; um.flags = 0; return unmount(td, &um); } int ibcs2_mount(td, uap) struct thread *td; struct ibcs2_mount_args *uap; { #ifdef notyet int oflags = uap->flags, nflags, error; char fsname[MFSNAMELEN]; if (oflags & (IBCS2_MS_NOSUB | IBCS2_MS_SYS5)) return (EINVAL); if ((oflags & IBCS2_MS_NEWTYPE) == 0) return (EINVAL); nflags = 0; if (oflags & IBCS2_MS_RDONLY) nflags |= MNT_RDONLY; if (oflags & IBCS2_MS_NOSUID) nflags |= MNT_NOSUID; if (oflags & IBCS2_MS_REMOUNT) nflags |= MNT_UPDATE; uap->flags = nflags; if (error = copyinstr((caddr_t)uap->type, fsname, sizeof fsname, (u_int *)0)) return (error); if (strcmp(fsname, "4.2") == 0) { uap->type = (caddr_t)STACK_ALLOC(); if (error = copyout("ufs", uap->type, sizeof("ufs"))) return (error); } else if (strcmp(fsname, "nfs") == 0) { struct ibcs2_nfs_args sna; struct sockaddr_in sain; struct nfs_args na; struct sockaddr sa; if (error = copyin(uap->data, &sna, sizeof sna)) return (error); if (error = copyin(sna.addr, &sain, sizeof sain)) return (error); bcopy(&sain, &sa, sizeof sa); sa.sa_len = sizeof(sain); uap->data = (caddr_t)STACK_ALLOC(); na.addr = (struct sockaddr *)((int)uap->data + sizeof na); na.sotype = SOCK_DGRAM; na.proto = IPPROTO_UDP; na.fh = (nfsv2fh_t *)sna.fh; na.flags = sna.flags; na.wsize = sna.wsize; na.rsize = sna.rsize; na.timeo = sna.timeo; na.retrans = sna.retrans; na.hostname = sna.hostname; if (error = copyout(&sa, na.addr, sizeof sa)) return (error); if (error = copyout(&na, uap->data, sizeof na)) return (error); } return (mount(td, uap)); #else return EINVAL; #endif } /* * Read iBCS2-style directory entries. We suck them into kernel space so * that they can be massaged before being copied out to user code. Like * SunOS, we squish out `empty' entries. * * This is quite ugly, but what do you expect from compatibility code? */ int ibcs2_getdents(td, uap) struct thread *td; register struct ibcs2_getdents_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_dirent idb; off_t off; /* true file offset */ int buflen, error, eofflag; u_long *cookies = NULL, *cookiep; int ncookies; #define BSD_DIRENT(cp) ((struct dirent *)(cp)) #define IBCS2_RECLEN(reclen) (reclen + sizeof(u_short)) if ((error = getvnode(td->td_proc->p_fd, uap->fd, &fp)) != 0) return (error); if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { /* XXX vnode readdir op should do this */ fdrop(fp, td); return (EINVAL); } off = fp->f_offset; #define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_check_vnode_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) goto out; inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_getdents: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < IBCS2_RECLEN(reclen)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = (ibcs2_ino_t)BSD_DIRENT(inp)->d_fileno; idb.d_off = (ibcs2_off_t)off; idb.d_reclen = (u_short)IBCS2_RECLEN(reclen); if ((error = copyout((caddr_t)&idb, outp, 10)) != 0 || (error = copyout(BSD_DIRENT(inp)->d_name, outp + 10, BSD_DIRENT(inp)->d_namlen + 1)) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += IBCS2_RECLEN(reclen); resid -= IBCS2_RECLEN(reclen); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0, td); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_read(td, uap) struct thread *td; struct ibcs2_read_args *uap; { register struct vnode *vp; register caddr_t inp, buf; /* BSD-format */ register int len, reclen; /* BSD-format */ register caddr_t outp; /* iBCS2-format */ register int resid; /* iBCS2-format */ struct file *fp; struct uio auio; struct iovec aiov; struct ibcs2_direct { ibcs2_ino_t ino; char name[14]; } idb; off_t off; /* true file offset */ int buflen, error, eofflag, size; u_long *cookies = NULL, *cookiep; int ncookies; if ((error = getvnode(td->td_proc->p_fd, uap->fd, &fp)) != 0) { if (error == EINVAL) return read(td, (struct read_args *)uap); else return error; } if ((fp->f_flag & FREAD) == 0) { fdrop(fp, td); return (EBADF); } vp = fp->f_vnode; if (vp->v_type != VDIR) { fdrop(fp, td); return read(td, (struct read_args *)uap); } off = fp->f_offset; if (vp->v_type != VDIR) return read(td, (struct read_args *)uap); DPRINTF(("ibcs2_read: read directory\n")); buflen = max(DIRBLKSIZ, uap->nbytes); buflen = min(buflen, MAXBSIZE); buf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); again: aiov.iov_base = buf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = buflen; auio.uio_offset = off; if (cookies) { free(cookies, M_TEMP); cookies = NULL; } #ifdef MAC error = mac_check_vnode_readdir(td->td_ucred, vp); if (error) goto out; #endif /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ if ((error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &ncookies, &cookies)) != 0) { DPRINTF(("VOP_READDIR failed: %d\n", error)); goto out; } inp = buf; outp = uap->buf; resid = uap->nbytes; if ((len = buflen - auio.uio_resid) <= 0) goto eof; cookiep = cookies; if (cookies) { /* * When using cookies, the vfs has the option of reading from * a different offset than that supplied (UFS truncates the * offset to a block boundary to make sure that it never reads * partway through a directory entry, even if the directory * has been compacted). */ while (len > 0 && ncookies > 0 && *cookiep <= off) { len -= BSD_DIRENT(inp)->d_reclen; inp += BSD_DIRENT(inp)->d_reclen; cookiep++; ncookies--; } } for (; len > 0 && resid > 0; len -= reclen) { if (cookiep && ncookies == 0) break; reclen = BSD_DIRENT(inp)->d_reclen; if (reclen & 3) { printf("ibcs2_read: reclen=%d\n", reclen); error = EFAULT; goto out; } if (BSD_DIRENT(inp)->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; continue; } if (reclen > len || resid < sizeof(struct ibcs2_direct)) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make an iBCS2-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). * * TODO: if length(filename) > 14, then break filename into * multiple entries and set inode = 0xffff except last */ idb.ino = (BSD_DIRENT(inp)->d_fileno > 0xfffe) ? 0xfffe : BSD_DIRENT(inp)->d_fileno; (void)copystr(BSD_DIRENT(inp)->d_name, idb.name, 14, &size); bzero(idb.name + size, 14 - size); if ((error = copyout(&idb, outp, sizeof(struct ibcs2_direct))) != 0) goto out; /* advance past this real entry */ if (cookiep) { off = *cookiep++; ncookies--; } else off += reclen; inp += reclen; /* advance output past iBCS2-shaped entry */ outp += sizeof(struct ibcs2_direct); resid -= sizeof(struct ibcs2_direct); } /* if we squished out the whole block, try again */ if (outp == uap->buf) goto again; fp->f_offset = off; /* update the vnode offset */ eof: td->td_retval[0] = uap->nbytes - resid; out: VOP_UNLOCK(vp, 0, td); fdrop(fp, td); if (cookies) free(cookies, M_TEMP); free(buf, M_TEMP); return (error); } int ibcs2_mknod(td, uap) struct thread *td; struct ibcs2_mknod_args *uap; { caddr_t sg = stackgap_init(); CHECKALTCREAT(td, &sg, uap->path); if (S_ISFIFO(uap->mode)) { struct mkfifo_args ap; ap.path = uap->path; ap.mode = uap->mode; return mkfifo(td, &ap); } else { struct mknod_args ap; ap.path = uap->path; ap.mode = uap->mode; ap.dev = uap->dev; return mknod(td, &ap); } } int ibcs2_getgroups(td, uap) struct thread *td; struct ibcs2_getgroups_args *uap; { int error, i; ibcs2_gid_t *iset = NULL; struct getgroups_args sa; gid_t *gp; caddr_t sg = stackgap_init(); if (uap->gidsetsize < 0) return (EINVAL); if (uap->gidsetsize > NGROUPS_MAX) uap->gidsetsize = NGROUPS_MAX; sa.gidsetsize = uap->gidsetsize; if (uap->gidsetsize) { sa.gidset = stackgap_alloc(&sg, NGROUPS_MAX * sizeof(gid_t *)); iset = stackgap_alloc(&sg, uap->gidsetsize * sizeof(ibcs2_gid_t)); } if ((error = getgroups(td, &sa)) != 0) return error; if (uap->gidsetsize == 0) return 0; for (i = 0, gp = sa.gidset; i < td->td_retval[0]; i++) iset[i] = (ibcs2_gid_t)*gp++; if (td->td_retval[0] && (error = copyout((caddr_t)iset, (caddr_t)uap->gidset, sizeof(ibcs2_gid_t) * td->td_retval[0]))) return error; return 0; } int ibcs2_setgroups(td, uap) struct thread *td; struct ibcs2_setgroups_args *uap; { int error, i; ibcs2_gid_t *iset; struct setgroups_args sa; gid_t *gp; caddr_t sg = stackgap_init(); if (uap->gidsetsize < 0 || uap->gidsetsize > NGROUPS_MAX) return (EINVAL); sa.gidsetsize = uap->gidsetsize; sa.gidset = stackgap_alloc(&sg, sa.gidsetsize * sizeof(gid_t *)); iset = stackgap_alloc(&sg, sa.gidsetsize * sizeof(ibcs2_gid_t *)); if (sa.gidsetsize) { if ((error = copyin((caddr_t)uap->gidset, (caddr_t)iset, sizeof(ibcs2_gid_t *) * uap->gidsetsize)) != 0) return error; } for (i = 0, gp = sa.gidset; i < sa.gidsetsize; i++) *gp++ = (gid_t)iset[i]; return setgroups(td, &sa); } int ibcs2_setuid(td, uap) struct thread *td; struct ibcs2_setuid_args *uap; { struct setuid_args sa; sa.uid = (uid_t)uap->uid; return setuid(td, &sa); } int ibcs2_setgid(td, uap) struct thread *td; struct ibcs2_setgid_args *uap; { struct setgid_args sa; sa.gid = (gid_t)uap->gid; return setgid(td, &sa); } int ibcs2_time(td, uap) struct thread *td; struct ibcs2_time_args *uap; { struct timeval tv; microtime(&tv); td->td_retval[0] = tv.tv_sec; if (uap->tp) return copyout((caddr_t)&tv.tv_sec, (caddr_t)uap->tp, sizeof(ibcs2_time_t)); else return 0; } int ibcs2_pathconf(td, uap) struct thread *td; struct ibcs2_pathconf_args *uap; { uap->name++; /* iBCS2 _PC_* defines are offset by one */ return pathconf(td, (struct pathconf_args *)uap); } int ibcs2_fpathconf(td, uap) struct thread *td; struct ibcs2_fpathconf_args *uap; { uap->name++; /* iBCS2 _PC_* defines are offset by one */ return fpathconf(td, (struct fpathconf_args *)uap); } int ibcs2_sysconf(td, uap) struct thread *td; struct ibcs2_sysconf_args *uap; { int mib[2], value, len, error; struct proc *p; p = td->td_proc; switch(uap->name) { case IBCS2_SC_ARG_MAX: mib[1] = KERN_ARGMAX; break; case IBCS2_SC_CHILD_MAX: PROC_LOCK(p); td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NPROC); PROC_UNLOCK(p); return 0; case IBCS2_SC_CLK_TCK: td->td_retval[0] = hz; return 0; case IBCS2_SC_NGROUPS_MAX: mib[1] = KERN_NGROUPS; break; case IBCS2_SC_OPEN_MAX: PROC_LOCK(p); td->td_retval[0] = lim_cur(td->td_proc, RLIMIT_NOFILE); PROC_UNLOCK(p); return 0; case IBCS2_SC_JOB_CONTROL: mib[1] = KERN_JOB_CONTROL; break; case IBCS2_SC_SAVED_IDS: mib[1] = KERN_SAVED_IDS; break; case IBCS2_SC_VERSION: mib[1] = KERN_POSIX1; break; case IBCS2_SC_PASS_MAX: td->td_retval[0] = 128; /* XXX - should we create PASS_MAX ? */ return 0; case IBCS2_SC_XOPEN_VERSION: td->td_retval[0] = 2; /* XXX: What should that be? */ return 0; default: return EINVAL; } mib[0] = CTL_KERN; len = sizeof(value); error = kernel_sysctl(td, mib, 2, &value, &len, NULL, 0, NULL); if (error) return error; td->td_retval[0] = value; return 0; } int ibcs2_alarm(td, uap) struct thread *td; struct ibcs2_alarm_args *uap; { int error; struct itimerval *itp, *oitp; struct setitimer_args sa; caddr_t sg = stackgap_init(); itp = stackgap_alloc(&sg, sizeof(*itp)); oitp = stackgap_alloc(&sg, sizeof(*oitp)); timevalclear(&itp->it_interval); itp->it_value.tv_sec = uap->sec; itp->it_value.tv_usec = 0; sa.which = ITIMER_REAL; sa.itv = itp; sa.oitv = oitp; error = setitimer(td, &sa); if (error) return error; if (oitp->it_value.tv_usec) oitp->it_value.tv_sec++; td->td_retval[0] = oitp->it_value.tv_sec; return 0; } int ibcs2_times(td, uap) struct thread *td; struct ibcs2_times_args *uap; { int error; struct getrusage_args ga; struct tms tms; struct timeval t; caddr_t sg = stackgap_init(); struct rusage *ru = stackgap_alloc(&sg, sizeof(*ru)); #define CONVTCK(r) (r.tv_sec * hz + r.tv_usec / (1000000 / hz)) ga.who = RUSAGE_SELF; ga.rusage = ru; error = getrusage(td, &ga); if (error) return error; tms.tms_utime = CONVTCK(ru->ru_utime); tms.tms_stime = CONVTCK(ru->ru_stime); ga.who = RUSAGE_CHILDREN; error = getrusage(td, &ga); if (error) return error; tms.tms_cutime = CONVTCK(ru->ru_utime); tms.tms_cstime = CONVTCK(ru->ru_stime); microtime(&t); td->td_retval[0] = CONVTCK(t); return copyout((caddr_t)&tms, (caddr_t)uap->tp, sizeof(struct tms)); } int ibcs2_stime(td, uap) struct thread *td; struct ibcs2_stime_args *uap; { int error; struct settimeofday_args sa; caddr_t sg = stackgap_init(); sa.tv = stackgap_alloc(&sg, sizeof(*sa.tv)); sa.tzp = NULL; if ((error = copyin((caddr_t)uap->timep, &(sa.tv->tv_sec), sizeof(long))) != 0) return error; sa.tv->tv_usec = 0; if ((error = settimeofday(td, &sa)) != 0) return EPERM; return 0; } int ibcs2_utime(td, uap) struct thread *td; struct ibcs2_utime_args *uap; { int error; struct utimes_args sa; struct timeval *tp; caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); sa.path = uap->path; if (uap->buf) { struct ibcs2_utimbuf ubuf; if ((error = copyin((caddr_t)uap->buf, (caddr_t)&ubuf, sizeof(ubuf))) != 0) return error; sa.tptr = stackgap_alloc(&sg, 2 * sizeof(struct timeval *)); tp = (struct timeval *)sa.tptr; tp->tv_sec = ubuf.actime; tp->tv_usec = 0; tp++; tp->tv_sec = ubuf.modtime; tp->tv_usec = 0; } else sa.tptr = NULL; return utimes(td, &sa); } int ibcs2_nice(td, uap) struct thread *td; struct ibcs2_nice_args *uap; { int error; struct setpriority_args sa; sa.which = PRIO_PROCESS; sa.who = 0; sa.prio = td->td_ksegrp->kg_nice + uap->incr; if ((error = setpriority(td, &sa)) != 0) return EPERM; td->td_retval[0] = td->td_ksegrp->kg_nice; return 0; } /* * iBCS2 getpgrp, setpgrp, setsid, and setpgid */ int ibcs2_pgrpsys(td, uap) struct thread *td; struct ibcs2_pgrpsys_args *uap; { struct proc *p = td->td_proc; switch (uap->type) { case 0: /* getpgrp */ PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; case 1: /* setpgrp */ { struct setpgid_args sa; sa.pid = 0; sa.pgid = 0; setpgid(td, &sa); PROC_LOCK(p); td->td_retval[0] = p->p_pgrp->pg_id; PROC_UNLOCK(p); return 0; } case 2: /* setpgid */ { struct setpgid_args sa; sa.pid = uap->pid; sa.pgid = uap->pgid; return setpgid(td, &sa); } case 3: /* setsid */ return setsid(td, NULL); default: return EINVAL; } } /* * XXX - need to check for nested calls */ int ibcs2_plock(td, uap) struct thread *td; struct ibcs2_plock_args *uap; { int error; #define IBCS2_UNLOCK 0 #define IBCS2_PROCLOCK 1 #define IBCS2_TEXTLOCK 2 #define IBCS2_DATALOCK 4 if ((error = suser(td)) != 0) return EPERM; switch(uap->cmd) { case IBCS2_UNLOCK: case IBCS2_PROCLOCK: case IBCS2_TEXTLOCK: case IBCS2_DATALOCK: return 0; /* XXX - TODO */ } return EINVAL; } int ibcs2_uadmin(td, uap) struct thread *td; struct ibcs2_uadmin_args *uap; { #define SCO_A_REBOOT 1 #define SCO_A_SHUTDOWN 2 #define SCO_A_REMOUNT 4 #define SCO_A_CLOCK 8 #define SCO_A_SETCONFIG 128 #define SCO_A_GETDEV 130 #define SCO_AD_HALT 0 #define SCO_AD_BOOT 1 #define SCO_AD_IBOOT 2 #define SCO_AD_PWRDOWN 3 #define SCO_AD_PWRNAP 4 #define SCO_AD_PANICBOOT 1 #define SCO_AD_GETBMAJ 0 #define SCO_AD_GETCMAJ 1 if (suser(td)) return EPERM; switch(uap->cmd) { case SCO_A_REBOOT: case SCO_A_SHUTDOWN: switch(uap->func) { struct reboot_args r; case SCO_AD_HALT: case SCO_AD_PWRDOWN: case SCO_AD_PWRNAP: r.opt = RB_HALT; reboot(td, &r); case SCO_AD_BOOT: case SCO_AD_IBOOT: r.opt = RB_AUTOBOOT; reboot(td, &r); } return EINVAL; case SCO_A_REMOUNT: case SCO_A_CLOCK: case SCO_A_SETCONFIG: return 0; case SCO_A_GETDEV: return EINVAL; /* XXX - TODO */ } return EINVAL; } int ibcs2_sysfs(td, uap) struct thread *td; struct ibcs2_sysfs_args *uap; { #define IBCS2_GETFSIND 1 #define IBCS2_GETFSTYP 2 #define IBCS2_GETNFSTYP 3 switch(uap->cmd) { case IBCS2_GETFSIND: case IBCS2_GETFSTYP: case IBCS2_GETNFSTYP: break; } return EINVAL; /* XXX - TODO */ } int ibcs2_unlink(td, uap) struct thread *td; struct ibcs2_unlink_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return unlink(td, (struct unlink_args *)uap); } int ibcs2_chdir(td, uap) struct thread *td; struct ibcs2_chdir_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return chdir(td, (struct chdir_args *)uap); } int ibcs2_chmod(td, uap) struct thread *td; struct ibcs2_chmod_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return chmod(td, (struct chmod_args *)uap); } int ibcs2_chown(td, uap) struct thread *td; struct ibcs2_chown_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return chown(td, (struct chown_args *)uap); } int ibcs2_rmdir(td, uap) struct thread *td; struct ibcs2_rmdir_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return rmdir(td, (struct rmdir_args *)uap); } int ibcs2_mkdir(td, uap) struct thread *td; struct ibcs2_mkdir_args *uap; { caddr_t sg = stackgap_init(); CHECKALTCREAT(td, &sg, uap->path); return mkdir(td, (struct mkdir_args *)uap); } int ibcs2_symlink(td, uap) struct thread *td; struct ibcs2_symlink_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); CHECKALTCREAT(td, &sg, uap->link); return symlink(td, (struct symlink_args *)uap); } int ibcs2_rename(td, uap) struct thread *td; struct ibcs2_rename_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->from); CHECKALTCREAT(td, &sg, uap->to); return rename(td, (struct rename_args *)uap); } int ibcs2_readlink(td, uap) struct thread *td; struct ibcs2_readlink_args *uap; { caddr_t sg = stackgap_init(); CHECKALTEXIST(td, &sg, uap->path); return readlink(td, (struct readlink_args *) uap); } Index: head/sys/kern/kern_exit.c =================================================================== --- head/sys/kern/kern_exit.c (revision 127139) +++ head/sys/kern/kern_exit.c (revision 127140) @@ -1,813 +1,774 @@ /* * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ktrace.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for acct_process() function prototype */ #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include /* Required to be non-static for SysVR4 emulator */ MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); -static int wait1(struct thread *, struct wait_args *, int); - /* * exit -- * Death of process. * * MPSAFE */ void sys_exit(struct thread *td, struct sys_exit_args *uap) { exit1(td, W_EXITCODE(uap->rval, 0)); /* NOTREACHED */ } /* * Exit: deallocate address space and other resources, change proc state * to zombie, and unlink proc from allproc and parent's lists. Save exit * status and rusage for wait(). Check for child processes and orphan them. */ void exit1(struct thread *td, int rv) { struct proc *p, *nq, *q; struct tty *tp; struct vnode *ttyvp; struct vmspace *vm; struct vnode *vtmp; #ifdef KTRACE struct vnode *tracevp; struct ucred *tracecred; #endif struct plimit *plim; /* * Drop Giant if caller has it. Eventually we should warn about * being called with Giant held. */ while (mtx_owned(&Giant)) mtx_unlock(&Giant); p = td->td_proc; if (p == initproc) { printf("init died (signal %d, exit %d)\n", WTERMSIG(rv), WEXITSTATUS(rv)); panic("Going nowhere without my init!"); } /* * MUST abort all other threads before proceeding past here. */ PROC_LOCK(p); if (p->p_flag & P_SA || p->p_numthreads > 1) { /* * First check if some other thread got here before us.. * if so, act apropriatly, (exit or suspend); */ thread_suspend_check(0); /* * Kill off the other threads. This requires * Some co-operation from other parts of the kernel * so it may not be instant. * With this state set: * Any thread entering the kernel from userspace will * thread_exit() in trap(). Any thread attempting to * sleep will return immediatly * with EINTR or EWOULDBLOCK, which will hopefully force them * to back out to userland, freeing resources as they go, and * anything attempting to return to userland will thread_exit() * from userret(). thread_exit() will unsuspend us * when the last other thread exits. */ if (thread_single(SINGLE_EXIT)) panic ("Exit: Single threading fouled up"); /* * All other activity in this process is now stopped. * Remove excess KSEs and KSEGRPS. XXXKSE (when we have them) * ... * Turn off threading support. */ p->p_flag &= ~P_SA; thread_single_end(); /* Don't need this any more. */ } /* * With this state set: * Any thread entering the kernel from userspace will thread_exit() * in trap(). Any thread attempting to sleep will return immediatly * with EINTR or EWOULDBLOCK, which will hopefully force them * to back out to userland, freeing resources as they go, and * anything attempting to return to userland will thread_exit() * from userret(). thread_exit() will do a wakeup on p->p_numthreads * if it transitions to 1. */ p->p_flag |= P_WEXIT; PROC_UNLOCK(p); /* Are we a task leader? */ if (p == p->p_leader) { mtx_lock(&ppeers_lock); q = p->p_peers; while (q != NULL) { PROC_LOCK(q); psignal(q, SIGKILL); PROC_UNLOCK(q); q = q->p_peers; } while (p->p_peers != NULL) msleep(p, &ppeers_lock, PWAIT, "exit1", 0); mtx_unlock(&ppeers_lock); } #ifdef PGINPROF mtx_lock(&Giant); vmsizmon(); mtx_unlock(&Giant); #endif PROC_LOCK(p); _STOPEVENT(p, S_EXIT, rv); wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ PROC_UNLOCK(p); /* * Check if any loadable modules need anything done at process exit. * e.g. SYSV IPC stuff * XXX what if one of these generates an error? */ EVENTHANDLER_INVOKE(process_exit, p); MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage), M_ZOMBIE, M_WAITOK); /* * If parent is waiting for us to exit or exec, * P_PPWAIT is set; we will wakeup the parent below. */ PROC_LOCK(p); stopprofclock(p); p->p_flag &= ~(P_TRACED | P_PPWAIT); SIGEMPTYSET(p->p_siglist); SIGEMPTYSET(td->td_siglist); /* * Stop the real interval timer. If the handler is currently * executing, prevent it from rearming itself and let it finish. */ if (timevalisset(&p->p_realtimer.it_value) && callout_stop(&p->p_itcallout) == 0) { timevalclear(&p->p_realtimer.it_interval); msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0); KASSERT(!timevalisset(&p->p_realtimer.it_value), ("realtime timer is still armed")); } PROC_UNLOCK(p); /* * Reset any sigio structures pointing to us as a result of * F_SETOWN with our pid. */ mtx_lock(&Giant); /* XXX: not sure if needed */ funsetownlst(&p->p_sigiolst); /* * Close open files and release open-file table. * This may block! */ fdfree(td); mtx_unlock(&Giant); /* * Remove ourself from our leader's peer list and wake our leader. */ mtx_lock(&ppeers_lock); if (p->p_leader->p_peers) { q = p->p_leader; while (q->p_peers != p) q = q->p_peers; q->p_peers = p->p_peers; wakeup(p->p_leader); } mtx_unlock(&ppeers_lock); mtx_lock(&Giant); /* The next two chunks should probably be moved to vmspace_exit. */ vm = p->p_vmspace; /* * Release user portion of address space. * This releases references to vnodes, * which could cause I/O if the file has been unlinked. * Need to do this early enough that we can still sleep. * Can't free the entire vmspace as the kernel stack * may be mapped within that space also. * * Processes sharing the same vmspace may exit in one order, and * get cleaned up by vmspace_exit() in a different order. The * last exiting process to reach this point releases as much of * the environment as it can, and the last process cleaned up * by vmspace_exit() (which decrements exitingcnt) cleans up the * remainder. */ ++vm->vm_exitingcnt; if (--vm->vm_refcnt == 0) { shmexit(vm); vm_page_lock_queues(); pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map), vm_map_max(&vm->vm_map)); vm_page_unlock_queues(); (void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map), vm_map_max(&vm->vm_map)); } sx_xlock(&proctree_lock); if (SESS_LEADER(p)) { struct session *sp; sp = p->p_session; if (sp->s_ttyvp) { /* * Controlling process. * Signal foreground pgrp, * drain controlling terminal * and revoke access to controlling terminal. */ if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { tp = sp->s_ttyp; if (sp->s_ttyp->t_pgrp) { PGRP_LOCK(sp->s_ttyp->t_pgrp); pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); PGRP_UNLOCK(sp->s_ttyp->t_pgrp); } /* XXX tp should be locked. */ sx_xunlock(&proctree_lock); (void) ttywait(tp); sx_xlock(&proctree_lock); /* * The tty could have been revoked * if we blocked. */ if (sp->s_ttyvp) { ttyvp = sp->s_ttyvp; SESS_LOCK(p->p_session); sp->s_ttyvp = NULL; SESS_UNLOCK(p->p_session); sx_xunlock(&proctree_lock); VOP_REVOKE(ttyvp, REVOKEALL); vrele(ttyvp); sx_xlock(&proctree_lock); } } if (sp->s_ttyvp) { ttyvp = sp->s_ttyvp; SESS_LOCK(p->p_session); sp->s_ttyvp = NULL; SESS_UNLOCK(p->p_session); vrele(ttyvp); } /* * s_ttyp is not zero'd; we use this to indicate * that the session once had a controlling terminal. * (for logging and informational purposes) */ } SESS_LOCK(p->p_session); sp->s_leader = NULL; SESS_UNLOCK(p->p_session); } fixjobc(p, p->p_pgrp, 0); sx_xunlock(&proctree_lock); (void)acct_process(td); mtx_unlock(&Giant); #ifdef KTRACE /* * release trace file */ PROC_LOCK(p); mtx_lock(&ktrace_mtx); p->p_traceflag = 0; /* don't trace the vrele() */ tracevp = p->p_tracevp; p->p_tracevp = NULL; tracecred = p->p_tracecred; p->p_tracecred = NULL; mtx_unlock(&ktrace_mtx); PROC_UNLOCK(p); if (tracevp != NULL) vrele(tracevp); if (tracecred != NULL) crfree(tracecred); #endif /* * Release reference to text vnode */ if ((vtmp = p->p_textvp) != NULL) { p->p_textvp = NULL; mtx_lock(&Giant); vrele(vtmp); mtx_unlock(&Giant); } /* * Release our limits structure. */ PROC_LOCK(p); plim = p->p_limit; p->p_limit = NULL; PROC_UNLOCK(p); lim_free(plim); /* * Release this thread's reference to the ucred. The actual proc * reference will stay around until the proc is harvested by * wait(). At this point the ucred is immutable (no other threads * from this proc are around that can change it) so we leave the * per-thread ucred pointer intact in case it is needed although * in theory nothing should be using it at this point. */ crfree(td->td_ucred); /* * Remove proc from allproc queue and pidhash chain. * Place onto zombproc. Unlink from parent's child list. */ sx_xlock(&allproc_lock); LIST_REMOVE(p, p_list); LIST_INSERT_HEAD(&zombproc, p, p_list); LIST_REMOVE(p, p_hash); sx_xunlock(&allproc_lock); sx_xlock(&proctree_lock); q = LIST_FIRST(&p->p_children); if (q != NULL) /* only need this if any child is S_ZOMB */ wakeup(initproc); for (; q != NULL; q = nq) { nq = LIST_NEXT(q, p_sibling); PROC_LOCK(q); proc_reparent(q, initproc); q->p_sigparent = SIGCHLD; /* * Traced processes are killed * since their existence means someone is screwing up. */ if (q->p_flag & P_TRACED) { q->p_flag &= ~P_TRACED; psignal(q, SIGKILL); } PROC_UNLOCK(q); } /* * Save exit status and final rusage info, adding in child rusage * info and self times. */ mtx_lock(&Giant); PROC_LOCK(p); p->p_xstat = rv; *p->p_ru = p->p_stats->p_ru; mtx_lock_spin(&sched_lock); calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); mtx_unlock_spin(&sched_lock); ruadd(p->p_ru, &p->p_stats->p_cru); /* * Notify interested parties of our demise. */ KNOTE(&p->p_klist, NOTE_EXIT); mtx_unlock(&Giant); /* * Just delete all entries in the p_klist. At this point we won't * report any more events, and there are nasty race conditions that * can beat us if we don't. */ while (SLIST_FIRST(&p->p_klist)) SLIST_REMOVE_HEAD(&p->p_klist, kn_selnext); /* * Notify parent that we're gone. If parent has the PS_NOCLDWAIT * flag set, or if the handler is set to SIG_IGN, notify process * 1 instead (and hope it will handle this situation). */ PROC_LOCK(p->p_pptr); mtx_lock(&p->p_pptr->p_sigacts->ps_mtx); if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { struct proc *pp; mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx); pp = p->p_pptr; PROC_UNLOCK(pp); proc_reparent(p, initproc); p->p_sigparent = SIGCHLD; PROC_LOCK(p->p_pptr); /* * If this was the last child of our parent, notify * parent, so in case he was wait(2)ing, he will * continue. */ if (LIST_EMPTY(&pp->p_children)) wakeup(pp); } else mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx); if (p->p_pptr == initproc) psignal(p->p_pptr, SIGCHLD); else if (p->p_sigparent != 0) psignal(p->p_pptr, p->p_sigparent); PROC_UNLOCK(p->p_pptr); /* * If this is a kthread, then wakeup anyone waiting for it to exit. */ if (p->p_flag & P_KTHREAD) wakeup(p); PROC_UNLOCK(p); /* * Finally, call machine-dependent code to release the remaining * resources including address space. * The address space is released by "vmspace_exitfree(p)" in * vm_waitproc(). */ cpu_exit(td); PROC_LOCK(p); PROC_LOCK(p->p_pptr); sx_xunlock(&proctree_lock); while (mtx_owned(&Giant)) mtx_unlock(&Giant); /* * We have to wait until after acquiring all locks before * changing p_state. We need to avoid any possibly context * switches while marked as a zombie including blocking on * a mutex. */ mtx_lock_spin(&sched_lock); p->p_state = PRS_ZOMBIE; critical_enter(); mtx_unlock_spin(&sched_lock); wakeup(p->p_pptr); PROC_UNLOCK(p->p_pptr); mtx_lock_spin(&sched_lock); critical_exit(); cnt.v_swtch++; binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */ /* * Allow the scheduler to adjust the priority of the * parent when a kseg is exiting. */ if (p->p_pid != 1) sched_exit(p->p_pptr, p); /* * Make sure the scheduler takes this thread out of its tables etc. * This will also release this thread's reference to the ucred. * Other thread parts to release include pcb bits and such. */ thread_exit(); } #ifdef COMPAT_43 /* - * MPSAFE. The dirty work is handled by wait1(). + * MPSAFE. The dirty work is handled by kern_wait(). */ int owait(struct thread *td, struct owait_args *uap __unused) { - struct wait_args w; + int error, status; - w.options = 0; - w.rusage = NULL; - w.pid = WAIT_ANY; - w.status = NULL; - return (wait1(td, &w, 1)); + error = kern_wait(td, WAIT_ANY, &status, 0, NULL); + if (error == 0) + td->td_retval[1] = status; + return (error); } #endif /* COMPAT_43 */ /* - * MPSAFE. The dirty work is handled by wait1(). + * MPSAFE. The dirty work is handled by kern_wait(). */ int wait4(struct thread *td, struct wait_args *uap) { + struct rusage ru; + int error, status; - return (wait1(td, uap, 0)); + error = kern_wait(td, uap->pid, &status, uap->options, &ru); + if (uap->status != NULL && error == 0) + error = copyout(&status, uap->status, sizeof(status)); + if (uap->rusage != NULL && error == 0) + error = copyout(&ru, uap->rusage, sizeof(struct rusage)); + return (error); } -/* - * MPSAFE - */ -static int -wait1(struct thread *td, struct wait_args *uap, int compat) +int +kern_wait(struct thread *td, pid_t pid, int *status, int options, struct rusage *rusage) { - struct rusage ru; int nfound; struct proc *p, *q, *t; - int status, error; + int error; q = td->td_proc; - if (uap->pid == 0) { + if (pid == 0) { PROC_LOCK(q); - uap->pid = -q->p_pgid; + pid = -q->p_pgid; PROC_UNLOCK(q); } - if (uap->options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) + if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) return (EINVAL); loop: nfound = 0; sx_xlock(&proctree_lock); LIST_FOREACH(p, &q->p_children, p_sibling) { PROC_LOCK(p); - if (uap->pid != WAIT_ANY && - p->p_pid != uap->pid && p->p_pgid != -uap->pid) { + if (pid != WAIT_ANY && + p->p_pid != pid && p->p_pgid != -pid) { PROC_UNLOCK(p); continue; } /* * This special case handles a kthread spawned by linux_clone * (see linux_misc.c). The linux_wait4 and linux_waitpid * functions need to be able to distinguish between waiting * on a process and waiting on a thread. It is a thread if * p_sigparent is not SIGCHLD, and the WLINUXCLONE option * signifies we want to wait for threads and not processes. */ if ((p->p_sigparent != SIGCHLD) ^ - ((uap->options & WLINUXCLONE) != 0)) { + ((options & WLINUXCLONE) != 0)) { PROC_UNLOCK(p); continue; } nfound++; if (p->p_state == PRS_ZOMBIE) { td->td_retval[0] = p->p_pid; -#ifdef COMPAT_43 - if (compat) - td->td_retval[1] = p->p_xstat; - else -#endif - if (uap->status) { - status = p->p_xstat; /* convert to int */ - PROC_UNLOCK(p); - if ((error = copyout(&status, - uap->status, sizeof(status)))) { - sx_xunlock(&proctree_lock); - mtx_unlock(&Giant); - return (error); - } - PROC_LOCK(p); - } - if (uap->rusage) { - bcopy(p->p_ru, &ru, sizeof(ru)); - PROC_UNLOCK(p); - if ((error = copyout(&ru, - uap->rusage, sizeof (struct rusage)))) { - sx_xunlock(&proctree_lock); - mtx_unlock(&Giant); - return (error); - } - } else - PROC_UNLOCK(p); + if (status) + *status = p->p_xstat; /* convert to int */ + if (rusage) + bcopy(p->p_ru, rusage, sizeof(struct rusage)); + /* * If we got the child via a ptrace 'attach', * we need to give it back to the old parent. */ + PROC_UNLOCK(p); if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { PROC_LOCK(p); p->p_oppid = 0; proc_reparent(p, t); PROC_UNLOCK(p); psignal(t, SIGCHLD); wakeup(t); PROC_UNLOCK(t); sx_xunlock(&proctree_lock); return (0); } /* * Remove other references to this process to ensure * we have an exclusive reference. */ sx_xlock(&allproc_lock); LIST_REMOVE(p, p_list); /* off zombproc */ sx_xunlock(&allproc_lock); LIST_REMOVE(p, p_sibling); leavepgrp(p); sx_xunlock(&proctree_lock); /* * As a side effect of this lock, we know that * all other writes to this proc are visible now, so * no more locking is needed for p. */ mtx_lock(&Giant); PROC_LOCK(p); p->p_xstat = 0; /* XXX: why? */ PROC_UNLOCK(p); PROC_LOCK(q); ruadd(&q->p_stats->p_cru, p->p_ru); PROC_UNLOCK(q); FREE(p->p_ru, M_ZOMBIE); p->p_ru = NULL; mtx_unlock(&Giant); /* * Decrement the count of procs running with this uid. */ (void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); /* * Free credentials, arguments, and sigacts */ crfree(p->p_ucred); p->p_ucred = NULL; pargs_drop(p->p_args); p->p_args = NULL; sigacts_free(p->p_sigacts); p->p_sigacts = NULL; /* * do any thread-system specific cleanups */ thread_wait(p); /* * Give vm and machine-dependent layer a chance * to free anything that cpu_exit couldn't * release while still running in process context. */ mtx_lock(&Giant); vm_waitproc(p); mtx_unlock(&Giant); #ifdef MAC mac_destroy_proc(p); #endif KASSERT(FIRST_THREAD_IN_PROC(p), - ("wait1: no residual thread!")); + ("kern_wait: no residual thread!")); uma_zfree(proc_zone, p); sx_xlock(&allproc_lock); nprocs--; sx_xunlock(&allproc_lock); return (0); } mtx_lock_spin(&sched_lock); if (P_SHOULDSTOP(p) && (p->p_suspcount == p->p_numthreads) && ((p->p_flag & P_WAITED) == 0) && - (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { + (p->p_flag & P_TRACED || options & WUNTRACED)) { mtx_unlock_spin(&sched_lock); p->p_flag |= P_WAITED; sx_xunlock(&proctree_lock); td->td_retval[0] = p->p_pid; -#ifdef COMPAT_43 - if (compat) { - td->td_retval[1] = W_STOPCODE(p->p_xstat); - PROC_UNLOCK(p); - error = 0; - } else -#endif - if (uap->status) { - status = W_STOPCODE(p->p_xstat); - PROC_UNLOCK(p); - error = copyout(&status, - uap->status, sizeof(status)); - } else { - PROC_UNLOCK(p); - error = 0; - } - return (error); + if (status) + *status = W_STOPCODE(p->p_xstat); + PROC_UNLOCK(p); + return (0); } mtx_unlock_spin(&sched_lock); - if (uap->options & WCONTINUED && (p->p_flag & P_CONTINUED)) { + if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) { sx_xunlock(&proctree_lock); td->td_retval[0] = p->p_pid; p->p_flag &= ~P_CONTINUED; PROC_UNLOCK(p); - if (uap->status) { - status = SIGCONT; - error = copyout(&status, - uap->status, sizeof(status)); - } else - error = 0; - - return (error); + if (status) + *status = SIGCONT; + return (0); } PROC_UNLOCK(p); } if (nfound == 0) { sx_xunlock(&proctree_lock); return (ECHILD); } - if (uap->options & WNOHANG) { + if (options & WNOHANG) { sx_xunlock(&proctree_lock); td->td_retval[0] = 0; return (0); } PROC_LOCK(q); sx_xunlock(&proctree_lock); error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0); PROC_UNLOCK(q); if (error) return (error); goto loop; } /* * Make process 'parent' the new parent of process 'child'. * Must be called with an exclusive hold of proctree lock. */ void proc_reparent(struct proc *child, struct proc *parent) { sx_assert(&proctree_lock, SX_XLOCKED); PROC_LOCK_ASSERT(child, MA_OWNED); if (child->p_pptr == parent) return; LIST_REMOVE(child, p_sibling); LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); child->p_pptr = parent; } Index: head/sys/sys/wait.h =================================================================== --- head/sys/sys/wait.h (revision 127139) +++ head/sys/sys/wait.h (revision 127140) @@ -1,114 +1,118 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)wait.h 8.2 (Berkeley) 7/10/94 * $FreeBSD$ */ #ifndef _SYS_WAIT_H_ #define _SYS_WAIT_H_ #include /* * This file holds definitions relevant to the wait4 system call and the * alternate interfaces that use it (wait, wait3, waitpid). */ /* * Macros to test the exit status returned by wait and extract the relevant * values. */ #if __BSD_VISIBLE #define _W_INT(w) (*(int *)&(w)) /* Convert union wait to int. */ #define WCOREFLAG 0200 #else #define _W_INT(i) (i) #endif #define _WSTATUS(x) (_W_INT(x) & 0177) #define _WSTOPPED 0177 /* _WSTATUS if process is stopped */ #define WIFSTOPPED(x) (_WSTATUS(x) == _WSTOPPED) #define WSTOPSIG(x) (_W_INT(x) >> 8) #define WIFSIGNALED(x) (_WSTATUS(x) != _WSTOPPED && _WSTATUS(x) != 0) #define WTERMSIG(x) (_WSTATUS(x)) #define WIFEXITED(x) (_WSTATUS(x) == 0) #define WEXITSTATUS(x) (_W_INT(x) >> 8) #define WIFCONTINUED(x) (x == 0x13) /* 0x13 == SIGCONT */ #if __BSD_VISIBLE #define WCOREDUMP(x) (_W_INT(x) & WCOREFLAG) #define W_EXITCODE(ret, sig) ((ret) << 8 | (sig)) #define W_STOPCODE(sig) ((sig) << 8 | _WSTOPPED) #endif /* * Option bits for the third argument of wait4. WNOHANG causes the * wait to not hang if there are no stopped or terminated processes, rather * returning an error indication in this case (pid==0). WUNTRACED * indicates that the caller should receive status about untraced children * which stop due to signals. If children are stopped and a wait without * this option is done, it is as though they were still running... nothing * about them is returned. */ #define WNOHANG 1 /* Don't hang in wait. */ #define WUNTRACED 2 /* Tell about stopped, untraced children. */ #define WCONTINUED 4 /* Report a job control continued process. */ #if __BSD_VISIBLE #define WLINUXCLONE 0x80000000 /* Wait for kthread spawned from linux_clone. */ #endif /* * Tokens for special values of the "pid" parameter to wait4. */ #if __BSD_VISIBLE #define WAIT_ANY (-1) /* any process */ #define WAIT_MYPGRP 0 /* any process in my process group */ #endif /* __BSD_VISIBLE */ -#ifndef _KERNEL +#ifdef _KERNEL +int kern_wait(struct thread *td, pid_t pid, int *status, int options, + struct rusage *rusage); + +#else #include __BEGIN_DECLS struct rusage; /* forward declaration */ pid_t wait(int *); pid_t waitpid(pid_t, int *, int); #if __BSD_VISIBLE pid_t wait3(int *, int, struct rusage *); pid_t wait4(pid_t, int *, int, struct rusage *); #endif __END_DECLS #endif #endif /* !_SYS_WAIT_H_ */