Index: head/sys/kern/kern_exec.c =================================================================== --- head/sys/kern/kern_exec.c (revision 151992) +++ head/sys/kern/kern_exec.c (revision 151993) @@ -1,1296 +1,1296 @@ /*- * Copyright (c) 1993, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_ktrace.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); static int do_execve(struct thread *td, struct image_args *args, struct mac *mac_p); /* XXX This should be vm_size_t. */ SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, NULL, 0, sysctl_kern_ps_strings, "LU", ""); /* XXX This should be vm_size_t. */ SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD, NULL, 0, sysctl_kern_usrstack, "LU", ""); SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, NULL, 0, sysctl_kern_stackprot, "I", ""); u_long ps_arg_cache_limit = PAGE_SIZE / 16; SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, &ps_arg_cache_limit, 0, ""); static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) { struct proc *p; int error; p = curproc; #ifdef SCTL_MASK32 if (req->flags & SCTL_MASK32) { unsigned int val; val = (unsigned int)p->p_sysent->sv_psstrings; error = SYSCTL_OUT(req, &val, sizeof(val)); } else #endif error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, sizeof(p->p_sysent->sv_psstrings)); return error; } static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) { struct proc *p; int error; p = curproc; #ifdef SCTL_MASK32 if (req->flags & SCTL_MASK32) { unsigned int val; val = (unsigned int)p->p_sysent->sv_usrstack; error = SYSCTL_OUT(req, &val, sizeof(val)); } else #endif error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, sizeof(p->p_sysent->sv_usrstack)); return error; } static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) { struct proc *p; p = curproc; return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, sizeof(p->p_sysent->sv_stackprot))); } /* * Each of the items is a pointer to a `const struct execsw', hence the * double pointer here. */ static const struct execsw **execsw; #ifndef _SYS_SYSPROTO_H_ struct execve_args { char *fname; char **argv; char **envv; }; #endif /* * MPSAFE */ int execve(td, uap) struct thread *td; struct execve_args /* { char *fname; char **argv; char **envv; } */ *uap; { int error; struct image_args args; error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &args, NULL); exec_free_args(&args); return (error); } #ifndef _SYS_SYSPROTO_H_ struct __mac_execve_args { char *fname; char **argv; char **envv; struct mac *mac_p; }; #endif /* * MPSAFE */ int __mac_execve(td, uap) struct thread *td; struct __mac_execve_args /* { char *fname; char **argv; char **envv; struct mac *mac_p; } */ *uap; { #ifdef MAC int error; struct image_args args; error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &args, uap->mac_p); exec_free_args(&args); return (error); #else return (ENOSYS); #endif } /* * XXX: kern_execve has the astonishing property of not always * returning to the caller. If sufficiently bad things happen during * the call to do_execve(), it can end up calling exit1(); as a result, * callers must avoid doing anything which they might need to undo * (e.g., allocating memory). */ int kern_execve(td, args, mac_p) struct thread *td; struct image_args *args; struct mac *mac_p; { struct proc *p = td->td_proc; int error; if (p->p_flag & P_HADTHREADS) { PROC_LOCK(p); if (thread_single(SINGLE_BOUNDARY)) { PROC_UNLOCK(p); return (ERESTART); /* Try again later. */ } PROC_UNLOCK(p); } error = do_execve(td, args, mac_p); if (p->p_flag & P_HADTHREADS) { PROC_LOCK(p); /* * If success, we upgrade to SINGLE_EXIT state to * force other threads to suicide. */ if (error == 0) thread_single(SINGLE_EXIT); else thread_single_end(); PROC_UNLOCK(p); } return (error); } /* * In-kernel implementation of execve(). All arguments are assumed to be * userspace pointers from the passed thread. * * MPSAFE */ static int do_execve(td, args, mac_p) struct thread *td; struct image_args *args; struct mac *mac_p; { struct proc *p = td->td_proc; struct nameidata nd, *ndp; struct ucred *newcred = NULL, *oldcred; struct uidinfo *euip; register_t *stack_base; int error, len, i; struct image_params image_params, *imgp; struct vattr attr; int (*img_first)(struct image_params *); struct pargs *oldargs = NULL, *newargs = NULL; struct sigacts *oldsigacts, *newsigacts; #ifdef KTRACE struct vnode *tracevp = NULL; struct ucred *tracecred = NULL; #endif struct vnode *textvp = NULL; int credential_changing; int vfslocked; int textset; #ifdef MAC struct label *interplabel = NULL; int will_transition; #endif #ifdef HWPMC_HOOKS struct pmckern_procexec pe; #endif vfslocked = 0; imgp = &image_params; /* * Lock the process and set the P_INEXEC flag to indicate that * it should be left alone until we're done here. This is * necessary to avoid race conditions - e.g. in ptrace() - * that might allow a local user to illicitly obtain elevated * privileges. */ PROC_LOCK(p); KASSERT((p->p_flag & P_INEXEC) == 0, ("%s(): process already has P_INEXEC flag", __func__)); p->p_flag |= P_INEXEC; PROC_UNLOCK(p); /* * Initialize part of the common data */ imgp->proc = p; imgp->execlabel = NULL; imgp->attr = &attr; imgp->entry_addr = 0; imgp->vmspace_destroyed = 0; imgp->interpreted = 0; imgp->interpreter_name = args->buf + PATH_MAX + ARG_MAX; imgp->auxargs = NULL; imgp->vp = NULL; imgp->object = NULL; imgp->firstpage = NULL; imgp->ps_strings = 0; imgp->auxarg_size = 0; imgp->args = args; #ifdef MAC error = mac_execve_enter(imgp, mac_p); if (error) goto exec_fail; #endif imgp->image_header = NULL; /* * Translate the file name. namei() returns a vnode pointer * in ni_vp amoung other things. */ ndp = &nd; NDINIT(ndp, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME | MPSAFE, UIO_SYSSPACE, args->fname, td); interpret: error = namei(ndp); if (error) goto exec_fail; vfslocked = NDHASGIANT(ndp); imgp->vp = ndp->ni_vp; /* * Check file permissions (also 'opens' file) */ error = exec_check_permissions(imgp); if (error) goto exec_fail_dealloc; imgp->object = imgp->vp->v_object; if (imgp->object != NULL) vm_object_reference(imgp->object); /* * Set VV_TEXT now so no one can write to the executable while we're * activating it. * * Remember if this was set before and unset it in case this is not * actually an executable image. */ textset = imgp->vp->v_vflag & VV_TEXT; imgp->vp->v_vflag |= VV_TEXT; error = exec_map_first_page(imgp); if (error) goto exec_fail_dealloc; /* * If the current process has a special image activator it * wants to try first, call it. For example, emulating shell * scripts differently. */ error = -1; if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) error = img_first(imgp); /* * Loop through the list of image activators, calling each one. * An activator returns -1 if there is no match, 0 on success, * and an error otherwise. */ for (i = 0; error == -1 && execsw[i]; ++i) { if (execsw[i]->ex_imgact == NULL || execsw[i]->ex_imgact == img_first) { continue; } error = (*execsw[i]->ex_imgact)(imgp); } if (error) { if (error == -1) { if (textset == 0) imgp->vp->v_vflag &= ~VV_TEXT; error = ENOEXEC; } goto exec_fail_dealloc; } /* * Special interpreter operation, cleanup and loop up to try to * activate the interpreter. */ if (imgp->interpreted) { exec_unmap_first_page(imgp); /* * VV_TEXT needs to be unset for scripts. There is a short * period before we determine that something is a script where * VV_TEXT will be set. The vnode lock is held over this * entire period so nothing should illegitimately be blocked. */ imgp->vp->v_vflag &= ~VV_TEXT; /* free name buffer and old vnode */ NDFREE(ndp, NDF_ONLY_PNBUF); #ifdef MAC interplabel = mac_vnode_label_alloc(); mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel); #endif vput(ndp->ni_vp); vm_object_deallocate(imgp->object); imgp->object = NULL; VFS_UNLOCK_GIANT(vfslocked); vfslocked = 0; /* set new name to that of the interpreter */ NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE, UIO_SYSSPACE, imgp->interpreter_name, td); goto interpret; } /* * Copy out strings (args and env) and initialize stack base */ if (p->p_sysent->sv_copyout_strings) stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); else stack_base = exec_copyout_strings(imgp); /* * If custom stack fixup routine present for this process * let it do the stack setup. * Else stuff argument count as first item on stack */ if (p->p_sysent->sv_fixup != NULL) (*p->p_sysent->sv_fixup)(&stack_base, imgp); else suword(--stack_base, imgp->args->argc); /* * For security and other reasons, the file descriptor table cannot * be shared after an exec. */ fdunshare(p, td); /* Clear POSIX timers */ itimers_event_hook(p, ITIMER_EV_EXEC); /* * Malloc things before we need locks. */ newcred = crget(); euip = uifind(attr.va_uid); i = imgp->args->begin_envv - imgp->args->begin_argv; /* Cache arguments if they fit inside our allowance */ if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { newargs = pargs_alloc(i); bcopy(imgp->args->begin_argv, newargs->ar_args, i); } /* close files on exec */ fdcloseexec(td); /* Get a reference to the vnode prior to locking the proc */ VREF(ndp->ni_vp); /* * For security and other reasons, signal handlers cannot * be shared after an exec. The new process gets a copy of the old * handlers. In execsigs(), the new process will have its signals * reset. */ PROC_LOCK(p); if (sigacts_shared(p->p_sigacts)) { oldsigacts = p->p_sigacts; PROC_UNLOCK(p); newsigacts = sigacts_alloc(); sigacts_copy(newsigacts, oldsigacts); PROC_LOCK(p); p->p_sigacts = newsigacts; } else oldsigacts = NULL; /* Stop profiling */ stopprofclock(p); /* reset caught signals */ execsigs(p); /* name this process - nameiexec(p, ndp) */ len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); p->p_comm[len] = 0; /* * mark as execed, wakeup the process that vforked (if any) and tell * it that it now has its own resources back */ p->p_flag |= P_EXEC; if (p->p_pptr && (p->p_flag & P_PPWAIT)) { p->p_flag &= ~P_PPWAIT; wakeup(p->p_pptr); } /* * Implement image setuid/setgid. * * Don't honor setuid/setgid if the filesystem prohibits it or if * the process is being traced. * * XXXMAC: For the time being, use NOSUID to also prohibit * transitions on the file system. */ oldcred = p->p_ucred; credential_changing = 0; credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != attr.va_uid; credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != attr.va_gid; #ifdef MAC will_transition = mac_execve_will_transition(oldcred, imgp->vp, interplabel, imgp); credential_changing |= will_transition; #endif if (credential_changing && (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && (p->p_flag & P_TRACED) == 0) { /* * Turn off syscall tracing for set-id programs, except for * root. Record any set-id flags first to make sure that * we do not regain any tracing during a possible block. */ setsugid(p); #ifdef KTRACE if (p->p_tracevp != NULL && suser_cred(oldcred, SUSER_ALLOWJAIL)) { mtx_lock(&ktrace_mtx); p->p_traceflag = 0; tracevp = p->p_tracevp; p->p_tracevp = NULL; tracecred = p->p_tracecred; p->p_tracecred = NULL; mtx_unlock(&ktrace_mtx); } #endif /* * Close any file descriptors 0..2 that reference procfs, * then make sure file descriptors 0..2 are in use. * * setugidsafety() may call closef() and then pfind() * which may grab the process lock. * fdcheckstd() may call falloc() which may block to * allocate memory, so temporarily drop the process lock. */ PROC_UNLOCK(p); setugidsafety(td); error = fdcheckstd(td); if (error != 0) goto done1; PROC_LOCK(p); /* * Set the new credentials. */ crcopy(newcred, oldcred); if (attr.va_mode & VSUID) change_euid(newcred, euip); if (attr.va_mode & VSGID) change_egid(newcred, attr.va_gid); #ifdef MAC if (will_transition) { mac_execve_transition(oldcred, newcred, imgp->vp, interplabel, imgp); } #endif /* * Implement correct POSIX saved-id behavior. * * XXXMAC: Note that the current logic will save the * uid and gid if a MAC domain transition occurs, even * though maybe it shouldn't. */ change_svuid(newcred, newcred->cr_uid); change_svgid(newcred, newcred->cr_gid); p->p_ucred = newcred; newcred = NULL; } else { if (oldcred->cr_uid == oldcred->cr_ruid && oldcred->cr_gid == oldcred->cr_rgid) p->p_flag &= ~P_SUGID; /* * Implement correct POSIX saved-id behavior. * * XXX: It's not clear that the existing behavior is * POSIX-compliant. A number of sources indicate that the * saved uid/gid should only be updated if the new ruid is * not equal to the old ruid, or the new euid is not equal * to the old euid and the new euid is not equal to the old * ruid. The FreeBSD code always updates the saved uid/gid. * Also, this code uses the new (replaced) euid and egid as * the source, which may or may not be the right ones to use. */ if (oldcred->cr_svuid != oldcred->cr_uid || oldcred->cr_svgid != oldcred->cr_gid) { crcopy(newcred, oldcred); change_svuid(newcred, newcred->cr_uid); change_svgid(newcred, newcred->cr_gid); p->p_ucred = newcred; newcred = NULL; } } /* * Store the vp for use in procfs. This vnode was referenced prior * to locking the proc lock. */ textvp = p->p_textvp; p->p_textvp = ndp->ni_vp; /* * Notify others that we exec'd, and clear the P_INEXEC flag * as we're now a bona fide freshly-execed process. */ KNOTE_LOCKED(&p->p_klist, NOTE_EXEC); p->p_flag &= ~P_INEXEC; /* * If tracing the process, trap to debugger so breakpoints * can be set before the program executes. * Use tdsignal to deliver signal to current thread, use * psignal may cause the signal to be delivered to wrong thread * because that thread will exit, remember we are going to enter * single thread mode. */ if (p->p_flag & P_TRACED) - tdsignal(td, SIGTRAP, NULL, SIGTARGET_TD); + tdsignal(p, td, SIGTRAP, NULL); /* clear "fork but no exec" flag, as we _are_ execing */ p->p_acflag &= ~AFORK; /* * Free any previous argument cache and replace it with * the new argument cache, if any. */ oldargs = p->p_args; p->p_args = newargs; newargs = NULL; #ifdef HWPMC_HOOKS /* * Check if system-wide sampling is in effect or if the * current process is using PMCs. If so, do exec() time * processing. This processing needs to happen AFTER the * P_INEXEC flag is cleared. * * The proc lock needs to be released before taking the PMC * SX. */ if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { PROC_UNLOCK(p); pe.pm_credentialschanged = credential_changing; pe.pm_entryaddr = imgp->entry_addr; PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); } else PROC_UNLOCK(p); #else /* !HWPMC_HOOKS */ PROC_UNLOCK(p); #endif /* Set values passed into the program in registers. */ if (p->p_sysent->sv_setregs) (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base, imgp->ps_strings); else exec_setregs(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base, imgp->ps_strings); vfs_mark_atime(imgp->vp, td); done1: /* * Free any resources malloc'd earlier that we didn't use. */ uifree(euip); if (newcred == NULL) crfree(oldcred); else crfree(newcred); /* * Handle deferred decrement of ref counts. */ if (textvp != NULL) vrele(textvp); if (ndp->ni_vp && error != 0) vrele(ndp->ni_vp); #ifdef KTRACE if (tracevp != NULL) vrele(tracevp); if (tracecred != NULL) crfree(tracecred); #endif if (oldargs != NULL) pargs_drop(oldargs); if (newargs != NULL) pargs_drop(newargs); if (oldsigacts != NULL) sigacts_free(oldsigacts); exec_fail_dealloc: /* * free various allocated resources */ if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); if (imgp->vp != NULL) { NDFREE(ndp, NDF_ONLY_PNBUF); vput(imgp->vp); } if (imgp->object != NULL) vm_object_deallocate(imgp->object); if (error == 0) { /* * Stop the process here if its stop event mask has * the S_EXEC bit set. */ STOPEVENT(p, S_EXEC, 0); goto done2; } exec_fail: /* we're done here, clear P_INEXEC */ PROC_LOCK(p); p->p_flag &= ~P_INEXEC; PROC_UNLOCK(p); if (imgp->vmspace_destroyed) { /* sorry, no more process anymore. exit gracefully */ #ifdef MAC mac_execve_exit(imgp); if (interplabel != NULL) mac_vnode_label_free(interplabel); #endif VFS_UNLOCK_GIANT(vfslocked); exec_free_args(args); exit1(td, W_EXITCODE(0, SIGABRT)); /* NOT REACHED */ error = 0; } done2: #ifdef MAC mac_execve_exit(imgp); if (interplabel != NULL) mac_vnode_label_free(interplabel); #endif VFS_UNLOCK_GIANT(vfslocked); return (error); } int exec_map_first_page(imgp) struct image_params *imgp; { int rv, i; int initial_pagein; vm_page_t ma[VM_INITIAL_PAGEIN]; vm_object_t object; if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); object = imgp->vp->v_object; if (object == NULL) return (EACCES); VM_OBJECT_LOCK(object); ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { initial_pagein = VM_INITIAL_PAGEIN; if (initial_pagein > object->size) initial_pagein = object->size; for (i = 1; i < initial_pagein; i++) { if ((ma[i] = vm_page_lookup(object, i)) != NULL) { if (ma[i]->valid) break; vm_page_lock_queues(); if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) { vm_page_unlock_queues(); break; } vm_page_busy(ma[i]); vm_page_unlock_queues(); } else { ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL); if (ma[i] == NULL) break; } } initial_pagein = i; rv = vm_pager_get_pages(object, ma, initial_pagein, 0); ma[0] = vm_page_lookup(object, 0); if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || (ma[0]->valid == 0)) { if (ma[0]) { vm_page_lock_queues(); pmap_remove_all(ma[0]); vm_page_free(ma[0]); vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(object); return (EIO); } } vm_page_lock_queues(); vm_page_hold(ma[0]); vm_page_wakeup(ma[0]); vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); imgp->firstpage = sf_buf_alloc(ma[0], 0); imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); return (0); } void exec_unmap_first_page(imgp) struct image_params *imgp; { vm_page_t m; if (imgp->firstpage != NULL) { m = sf_buf_page(imgp->firstpage); sf_buf_free(imgp->firstpage); imgp->firstpage = NULL; vm_page_lock_queues(); vm_page_unhold(m); vm_page_unlock_queues(); } } /* * Destroy old address space, and allocate a new stack * The new stack is only SGROWSIZ large because it is grown * automatically in trap.c. */ int exec_new_vmspace(imgp, sv) struct image_params *imgp; struct sysentvec *sv; { int error; struct proc *p = imgp->proc; struct vmspace *vmspace = p->p_vmspace; vm_offset_t stack_addr; vm_map_t map; imgp->vmspace_destroyed = 1; /* Called with Giant held, do not depend on it! */ EVENTHANDLER_INVOKE(process_exec, p); /* * Here is as good a place as any to do any resource limit cleanups. * This is needed if a 64 bit binary exec's a 32 bit binary - the * data size limit may need to be changed to a value that makes * sense for the 32 bit binary. */ if (sv->sv_fixlimits != NULL) sv->sv_fixlimits(p); /* * Blow away entire process VM, if address space not shared, * otherwise, create a new VM space so that other threads are * not disrupted */ map = &vmspace->vm_map; if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser && vm_map_max(map) == sv->sv_maxuser) { shmexit(vmspace); pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map), vm_map_max(map)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); } else { vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); vmspace = p->p_vmspace; map = &vmspace->vm_map; } /* Allocate a new stack */ stack_addr = sv->sv_usrstack - maxssiz; error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN); if (error) return (error); #ifdef __ia64__ /* Allocate a new register stack */ stack_addr = IA64_BACKINGSTORE; error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz, sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP); if (error) return (error); #endif /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the * VM_STACK case, but they are still used to monitor the size of the * process stack so we can check the stack rlimit. */ vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz; return (0); } /* * Copy out argument and environment strings from the old process * address space into the temporary string buffer. */ int exec_copyin_args(struct image_args *args, char *fname, enum uio_seg segflg, char **argv, char **envv) { char *argp, *envp; int error; size_t length; error = 0; bzero(args, sizeof(*args)); if (argv == NULL) return (EFAULT); /* * Allocate temporary demand zeroed space for argument and * environment strings: * * o ARG_MAX for argument and environment; * o MAXSHELLCMDLEN for the name of interpreters. */ args->buf = (char *) kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); if (args->buf == NULL) return (ENOMEM); args->begin_argv = args->buf; args->endp = args->begin_argv; args->stringspace = ARG_MAX; args->fname = args->buf + ARG_MAX; /* * Copy the file name. */ error = (segflg == UIO_SYSSPACE) ? copystr(fname, args->fname, PATH_MAX, &length) : copyinstr(fname, args->fname, PATH_MAX, &length); if (error != 0) return (error); /* * extract arguments first */ while ((argp = (caddr_t) (intptr_t) fuword(argv++))) { if (argp == (caddr_t) -1) return (EFAULT); if ((error = copyinstr(argp, args->endp, args->stringspace, &length))) { if (error == ENAMETOOLONG) return (E2BIG); return (error); } args->stringspace -= length; args->endp += length; args->argc++; } args->begin_envv = args->endp; /* * extract environment strings */ if (envv) { while ((envp = (caddr_t)(intptr_t)fuword(envv++))) { if (envp == (caddr_t)-1) return (EFAULT); if ((error = copyinstr(envp, args->endp, args->stringspace, &length))) { if (error == ENAMETOOLONG) return (E2BIG); return (error); } args->stringspace -= length; args->endp += length; args->envc++; } } return (0); } void exec_free_args(struct image_args *args) { if (args->buf) { kmem_free_wakeup(exec_map, (vm_offset_t)args->buf, PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); args->buf = NULL; } } /* * Copy strings out to the new process address space, constructing * new arg and env vector tables. Return a pointer to the base * so that it can be used as the initial stack pointer. */ register_t * exec_copyout_strings(imgp) struct image_params *imgp; { int argc, envc; char **vectp; char *stringp, *destp; register_t *stack_base; struct ps_strings *arginfo; struct proc *p; int szsigcode; /* * Calculate string base and vector table pointers. * Also deal with signal trampoline code for this exec type. */ p = imgp->proc; szsigcode = 0; arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; if (p->p_sysent->sv_szsigcode != NULL) szsigcode = *(p->p_sysent->sv_szsigcode); destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *)); /* * install sigcode */ if (szsigcode) copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo - szsigcode), szsigcode); /* * If we have a valid auxargs ptr, prepare some room * on the stack. */ if (imgp->auxargs) { /* * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for * lower compatibility. */ imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : (AT_COUNT * 2); /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets,and imgp->auxarg_size is room * for argument of Runtime loader. */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2 + imgp->auxarg_size) * sizeof(char *)); } else { /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) * sizeof(char *)); } /* * vectp also becomes our initial stack base */ stack_base = (register_t *)vectp; stringp = imgp->args->begin_argv; argc = imgp->args->argc; envc = imgp->args->envc; /* * Copy out strings - arguments and environment. */ copyout(stringp, destp, ARG_MAX - imgp->args->stringspace); /* * Fill in "ps_strings" struct for ps, w, etc. */ suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); suword(&arginfo->ps_nargvstr, argc); /* * Fill in argument portion of vector table. */ for (; argc > 0; --argc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* a null vector table pointer separates the argp's from the envp's */ suword(vectp++, 0); suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); suword(&arginfo->ps_nenvstr, envc); /* * Fill in environment portion of vector table. */ for (; envc > 0; --envc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* end of vector table is a null pointer */ suword(vectp, 0); return (stack_base); } /* * Check permissions of file to execute. * Called with imgp->vp locked. * Return 0 for success or error code on failure. */ int exec_check_permissions(imgp) struct image_params *imgp; { struct vnode *vp = imgp->vp; struct vattr *attr = imgp->attr; struct thread *td; int error; td = curthread; /* XXXKSE */ /* Get file attributes */ error = VOP_GETATTR(vp, attr, td->td_ucred, td); if (error) return (error); #ifdef MAC error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp); if (error) return (error); #endif /* * 1) Check if file execution is disabled for the filesystem that this * file resides on. * 2) Insure that at least one execute bit is on - otherwise root * will always succeed, and we don't want to happen unless the * file really is executable. * 3) Insure that the file is a regular file. */ if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || ((attr->va_mode & 0111) == 0) || (attr->va_type != VREG)) return (EACCES); /* * Zero length files can't be exec'd */ if (attr->va_size == 0) return (ENOEXEC); /* * Check for execute permission to file based on current credentials. */ error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); if (error) return (error); /* * Check number of open-for-writes on the file and deny execution * if there are any. */ if (vp->v_writecount) return (ETXTBSY); /* * Call filesystem specific open routine (which does nothing in the * general case). */ error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1); return (error); } /* * Exec handler registration */ int exec_register(execsw_arg) const struct execsw *execsw_arg; { const struct execsw **es, **xs, **newexecsw; int count = 2; /* New slot and trailing NULL */ if (execsw) for (es = execsw; *es; es++) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); if (newexecsw == NULL) return (ENOMEM); xs = newexecsw; if (execsw) for (es = execsw; *es; es++) *xs++ = *es; *xs++ = execsw_arg; *xs = NULL; if (execsw) free(execsw, M_TEMP); execsw = newexecsw; return (0); } int exec_unregister(execsw_arg) const struct execsw *execsw_arg; { const struct execsw **es, **xs, **newexecsw; int count = 1; if (execsw == NULL) panic("unregister with no handlers left?\n"); for (es = execsw; *es; es++) { if (*es == execsw_arg) break; } if (*es == NULL) return (ENOENT); for (es = execsw; *es; es++) if (*es != execsw_arg) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); if (newexecsw == NULL) return (ENOMEM); xs = newexecsw; for (es = execsw; *es; es++) if (*es != execsw_arg) *xs++ = *es; *xs = NULL; if (execsw) free(execsw, M_TEMP); execsw = newexecsw; return (0); } Index: head/sys/kern/kern_kse.c =================================================================== --- head/sys/kern/kern_kse.c (revision 151992) +++ head/sys/kern/kern_kse.c (revision 151993) @@ -1,1468 +1,1468 @@ /*- * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * KSEGRP related storage. */ static uma_zone_t upcall_zone; /* DEBUG ONLY */ extern int virtual_cpu; extern int thread_debug; extern int max_threads_per_proc; extern int max_groups_per_proc; extern int max_threads_hits; extern struct mtx kse_zombie_lock; TAILQ_HEAD(, kse_upcall) zombie_upcalls = TAILQ_HEAD_INITIALIZER(zombie_upcalls); static int thread_update_usr_ticks(struct thread *td); static void thread_alloc_spare(struct thread *td); struct kse_upcall * upcall_alloc(void) { struct kse_upcall *ku; ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO); return (ku); } void upcall_free(struct kse_upcall *ku) { uma_zfree(upcall_zone, ku); } void upcall_link(struct kse_upcall *ku, struct ksegrp *kg) { mtx_assert(&sched_lock, MA_OWNED); TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); ku->ku_ksegrp = kg; kg->kg_numupcalls++; } void upcall_unlink(struct kse_upcall *ku) { struct ksegrp *kg = ku->ku_ksegrp; mtx_assert(&sched_lock, MA_OWNED); KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); kg->kg_numupcalls--; upcall_stash(ku); } void upcall_remove(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); if (td->td_upcall != NULL) { td->td_upcall->ku_owner = NULL; upcall_unlink(td->td_upcall); td->td_upcall = NULL; } } #ifndef _SYS_SYSPROTO_H_ struct kse_switchin_args { struct kse_thr_mailbox *tmbx; int flags; }; #endif int kse_switchin(struct thread *td, struct kse_switchin_args *uap) { struct kse_thr_mailbox tmbx; struct kse_upcall *ku; int error; if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) return (EINVAL); error = (uap->tmbx == NULL) ? EINVAL : 0; if (!error) error = copyin(uap->tmbx, &tmbx, sizeof(tmbx)); if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX)) error = (suword(&ku->ku_mailbox->km_curthread, (long)uap->tmbx) != 0 ? EINVAL : 0); if (!error) error = set_mcontext(td, &tmbx.tm_context.uc_mcontext); if (!error) { suword32(&uap->tmbx->tm_lwp, td->td_tid); if (uap->flags & KSE_SWITCHIN_SETTMBX) { td->td_mailbox = uap->tmbx; td->td_pflags |= TDP_CAN_UNBIND; } if (td->td_proc->p_flag & P_TRACED) { if (tmbx.tm_dflags & TMDF_SSTEP) ptrace_single_step(td); else ptrace_clear_single_step(td); if (tmbx.tm_dflags & TMDF_SUSPEND) { mtx_lock_spin(&sched_lock); /* fuword can block, check again */ if (td->td_upcall) ku->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } } } return ((error == 0) ? EJUSTRETURN : error); } /* struct kse_thr_interrupt_args { struct kse_thr_mailbox * tmbx; int cmd; long data; }; */ int kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) { struct kse_execve_args args; struct image_args iargs; struct proc *p; struct thread *td2; struct kse_upcall *ku; struct kse_thr_mailbox *tmbx; uint32_t flags; int error; p = td->td_proc; if (!(p->p_flag & P_SA)) return (EINVAL); switch (uap->cmd) { case KSE_INTR_SENDSIG: if (uap->data < 0 || uap->data > _SIG_MAXSIG) return (EINVAL); case KSE_INTR_INTERRUPT: case KSE_INTR_RESTART: PROC_LOCK(p); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td2) { if (td2->td_mailbox == uap->tmbx) break; } if (td2 == NULL) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (ESRCH); } if (uap->cmd == KSE_INTR_SENDSIG) { if (uap->data > 0) { td2->td_flags &= ~TDF_INTERRUPT; mtx_unlock_spin(&sched_lock); - tdsignal(td2, (int)uap->data, NULL, SIGTARGET_TD); + tdsignal(p, td2, (int)uap->data, NULL); } else { mtx_unlock_spin(&sched_lock); } } else { td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING; if (TD_CAN_UNBIND(td2)) td2->td_upcall->ku_flags |= KUF_DOUPCALL; if (uap->cmd == KSE_INTR_INTERRUPT) td2->td_intrval = EINTR; else td2->td_intrval = ERESTART; if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) sleepq_abort(td2); mtx_unlock_spin(&sched_lock); } PROC_UNLOCK(p); break; case KSE_INTR_SIGEXIT: if (uap->data < 1 || uap->data > _SIG_MAXSIG) return (EINVAL); PROC_LOCK(p); sigexit(td, (int)uap->data); break; case KSE_INTR_DBSUSPEND: /* this sub-function is only for bound thread */ if (td->td_pflags & TDP_SA) return (EINVAL); ku = td->td_upcall; tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); if (tmbx == NULL || tmbx == (void *)-1) return (EINVAL); flags = 0; while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) { flags = fuword32(&tmbx->tm_dflags); if (!(flags & TMDF_SUSPEND)) break; PROC_LOCK(p); mtx_lock_spin(&sched_lock); thread_stopped(p); thread_suspend_one(td); PROC_UNLOCK(p); mi_switch(SW_VOL, NULL); mtx_unlock_spin(&sched_lock); } return (0); case KSE_INTR_EXECVE: error = copyin((void *)uap->data, &args, sizeof(args)); if (error) return (error); error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE, args.argv, args.envp); if (error == 0) error = kern_execve(td, &iargs, NULL); exec_free_args(&iargs); if (error == 0) { PROC_LOCK(p); SIGSETOR(td->td_siglist, args.sigpend); PROC_UNLOCK(p); kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL, 0); } return (error); default: return (EINVAL); } return (0); } /* struct kse_exit_args { register_t dummy; }; */ int kse_exit(struct thread *td, struct kse_exit_args *uap) { struct proc *p; struct ksegrp *kg; struct kse_upcall *ku, *ku2; int error, count; p = td->td_proc; /* * Ensure that this is only called from the UTS */ if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) return (EINVAL); kg = td->td_ksegrp; count = 0; /* * Calculate the existing non-exiting upcalls in this ksegroup. * If we are the last upcall but there are still other threads, * then do not exit. We need the other threads to be able to * complete whatever they are doing. * XXX This relies on the userland knowing what to do if we return. * It may be a better choice to convert ourselves into a kse_release * ( or similar) and wait in the kernel to be needed. */ PROC_LOCK(p); mtx_lock_spin(&sched_lock); FOREACH_UPCALL_IN_GROUP(kg, ku2) { if (ku2->ku_flags & KUF_EXITING) count++; } if ((kg->kg_numupcalls - count) == 1 && (kg->kg_numthreads > 1)) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (EDEADLK); } ku->ku_flags |= KUF_EXITING; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); /* * Mark the UTS mailbox as having been finished with. * If that fails then just go for a segfault. * XXX need to check it that can be deliverred without a mailbox. */ error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); if (!(td->td_pflags & TDP_SA)) if (suword32(&td->td_mailbox->tm_lwp, 0)) error = EFAULT; PROC_LOCK(p); if (error) psignal(p, SIGSEGV); sigqueue_flush(&td->td_sigqueue); mtx_lock_spin(&sched_lock); upcall_remove(td); if (p->p_numthreads != 1) { /* * If we are not the last thread, but we are the last * thread in this ksegrp, then by definition this is not * the last group and we need to clean it up as well. * thread_exit will clean up the kseg as needed. */ thread_stopped(p); thread_exit(); /* NOTREACHED */ } /* * This is the last thread. Just return to the user. * We know that there is only one ksegrp too, as any others * would have been discarded in previous calls to thread_exit(). * Effectively we have left threading mode.. * The only real thing left to do is ensure that the * scheduler sets out concurrency back to 1 as that may be a * resource leak otherwise. * This is an A[PB]I issue.. what SHOULD we do? * One possibility is to return to the user. It may not cope well. * The other possibility would be to let the process exit. */ thread_unthread(td); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); #if 1 return (0); #else exit1(td, 0); #endif } /* * Either becomes an upcall or waits for an awakening event and * then becomes an upcall. Only error cases return. */ /* struct kse_release_args { struct timespec *timeout; }; */ int kse_release(struct thread *td, struct kse_release_args *uap) { struct proc *p; struct ksegrp *kg; struct kse_upcall *ku; struct timespec timeout; struct timeval tv; sigset_t sigset; int error; p = td->td_proc; kg = td->td_ksegrp; if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) return (EINVAL); if (uap->timeout != NULL) { if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) return (error); TIMESPEC_TO_TIMEVAL(&tv, &timeout); } if (td->td_pflags & TDP_SA) td->td_pflags |= TDP_UPCALLING; else { ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags); if (ku->ku_mflags == -1) { PROC_LOCK(p); sigexit(td, SIGSEGV); } } PROC_LOCK(p); if (ku->ku_mflags & KMF_WAITSIGEVENT) { /* UTS wants to wait for signal event */ if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL)) { td->td_kflags |= TDK_KSERELSIG; error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH, "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0)); td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP); } p->p_flag &= ~P_SIGEVENT; sigset = p->p_siglist; PROC_UNLOCK(p); error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught, sizeof(sigset)); } else { if ((ku->ku_flags & KUF_DOUPCALL) == 0 && ((ku->ku_mflags & KMF_NOCOMPLETED) || (kg->kg_completed == NULL))) { kg->kg_upsleeps++; td->td_kflags |= TDK_KSEREL; error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "kserel", (uap->timeout ? tvtohz(&tv) : 0)); td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP); kg->kg_upsleeps--; } PROC_UNLOCK(p); } if (ku->ku_flags & KUF_DOUPCALL) { mtx_lock_spin(&sched_lock); ku->ku_flags &= ~KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } return (0); } /* struct kse_wakeup_args { struct kse_mailbox *mbx; }; */ int kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) { struct proc *p; struct ksegrp *kg; struct kse_upcall *ku; struct thread *td2; p = td->td_proc; td2 = NULL; ku = NULL; /* KSE-enabled processes only, please. */ if (!(p->p_flag & P_SA)) return (EINVAL); PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (uap->mbx) { FOREACH_KSEGRP_IN_PROC(p, kg) { FOREACH_UPCALL_IN_GROUP(kg, ku) { if (ku->ku_mailbox == uap->mbx) break; } if (ku) break; } } else { kg = td->td_ksegrp; if (kg->kg_upsleeps) { mtx_unlock_spin(&sched_lock); wakeup(&kg->kg_completed); PROC_UNLOCK(p); return (0); } ku = TAILQ_FIRST(&kg->kg_upcalls); } if (ku == NULL) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (ESRCH); } if ((td2 = ku->ku_owner) == NULL) { mtx_unlock_spin(&sched_lock); panic("%s: no owner", __func__); } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) { mtx_unlock_spin(&sched_lock); if (!(td2->td_kflags & TDK_WAKEUP)) { td2->td_kflags |= TDK_WAKEUP; if (td2->td_kflags & TDK_KSEREL) sleepq_remove(td2, &kg->kg_completed); else sleepq_remove(td2, &p->p_siglist); } } else { ku->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } PROC_UNLOCK(p); return (0); } /* * No new KSEG: first call: use current KSE, don't schedule an upcall * All other situations, do allocate max new KSEs and schedule an upcall. * * XXX should be changed so that 'first' behaviour lasts for as long * as you have not made a kse in this ksegrp. i.e. as long as we do not have * a mailbox.. */ /* struct kse_create_args { struct kse_mailbox *mbx; int newgroup; }; */ int kse_create(struct thread *td, struct kse_create_args *uap) { struct ksegrp *newkg; struct ksegrp *kg; struct proc *p; struct kse_mailbox mbx; struct kse_upcall *newku; int err, ncpus, sa = 0, first = 0; struct thread *newtd; p = td->td_proc; kg = td->td_ksegrp; if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) return (err); ncpus = mp_ncpus; if (virtual_cpu != 0) ncpus = virtual_cpu; /* * If the new UTS mailbox says that this * will be a BOUND lwp, then it had better * have its thread mailbox already there. * In addition, this ksegrp will be limited to * a concurrency of 1. There is more on this later. */ if (mbx.km_flags & KMF_BOUND) { if (mbx.km_curthread == NULL) return (EINVAL); ncpus = 1; } else { sa = TDP_SA; } PROC_LOCK(p); /* * Processes using the other threading model can't * suddenly start calling this one */ if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) { PROC_UNLOCK(p); return (EINVAL); } /* * Limit it to NCPU upcall contexts per ksegrp in any case. * There is a small race here as we don't hold proclock * until we inc the ksegrp count, but it's not really a big problem * if we get one too many, but we save a proc lock. */ if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) { PROC_UNLOCK(p); return (EPROCLIM); } if (!(p->p_flag & P_SA)) { first = 1; p->p_flag |= P_SA|P_HADTHREADS; } PROC_UNLOCK(p); /* * Now pay attention! * If we are going to be bound, then we need to be either * a new group, or the first call ever. In either * case we will be creating (or be) the only thread in a group. * and the concurrency will be set to 1. * This is not quite right, as we may still make ourself * bound after making other ksegrps but it will do for now. * The library will only try do this much. */ if (!sa && !(uap->newgroup || first)) return (EINVAL); if (uap->newgroup) { newkg = ksegrp_alloc(); bzero(&newkg->kg_startzero, __rangeof(struct ksegrp, kg_startzero, kg_endzero)); bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); sched_init_concurrency(newkg); PROC_LOCK(p); if (p->p_numksegrps >= max_groups_per_proc) { PROC_UNLOCK(p); ksegrp_free(newkg); return (EPROCLIM); } ksegrp_link(newkg, p); mtx_lock_spin(&sched_lock); sched_fork_ksegrp(td, newkg); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { /* * We want to make a thread in our own ksegrp. * If we are just the first call, either kind * is ok, but if not then either we must be * already an upcallable thread to make another, * or a bound thread to make one of those. * Once again, not quite right but good enough for now.. XXXKSE */ if (!first && ((td->td_pflags & TDP_SA) != sa)) return (EINVAL); newkg = kg; } /* * This test is a bit "indirect". * It might simplify things if we made a direct way of testing * if a ksegrp has been worked on before. * In the case of a bound request and the concurrency being set to * one, the concurrency will already be 1 so it's just inefficient * but not dangerous to call this again. XXX */ if (newkg->kg_numupcalls == 0) { /* * Initialize KSE group with the appropriate * concurrency. * * For a multiplexed group, create as as much concurrency * as the number of physical cpus. * This increases concurrency in the kernel even if the * userland is not MP safe and can only run on a single CPU. * In an ideal world, every physical cpu should execute a * thread. If there is enough concurrency, threads in the * kernel can be executed parallel on different cpus at * full speed without being restricted by the number of * upcalls the userland provides. * Adding more upcall structures only increases concurrency * in userland. * * For a bound thread group, because there is only one thread * in the group, we only set the concurrency for the group * to 1. A thread in this kind of group will never schedule * an upcall when blocked. This simulates pthread system * scope thread behaviour. */ sched_set_concurrency(newkg, ncpus); } /* * Even bound LWPs get a mailbox and an upcall to hold it. */ newku = upcall_alloc(); newku->ku_mailbox = uap->mbx; newku->ku_func = mbx.km_func; bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); /* * For the first call this may not have been set. * Of course nor may it actually be needed. */ if (td->td_standin == NULL) thread_alloc_spare(td); PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (newkg->kg_numupcalls >= ncpus) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); upcall_free(newku); return (EPROCLIM); } /* * If we are the first time, and a normal thread, * then transfer all the signals back to the 'process'. * SA threading will make a special thread to handle them. */ if (first && sa) { sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue, &td->td_sigqueue.sq_signals); SIGFILLSET(td->td_sigmask); SIG_CANTMASK(td->td_sigmask); } /* * Make the new upcall available to the ksegrp. * It may or may not use it, but it's available. */ upcall_link(newku, newkg); PROC_UNLOCK(p); if (mbx.km_quantum) newkg->kg_upquantum = max(1, mbx.km_quantum / tick); /* * Each upcall structure has an owner thread, find which * one owns it. */ if (uap->newgroup) { /* * Because the new ksegrp hasn't a thread, * create an initial upcall thread to own it. */ newtd = thread_schedule_upcall(td, newku); } else { /* * If the current thread hasn't an upcall structure, * just assign the upcall to it. * It'll just return. */ if (td->td_upcall == NULL) { newku->ku_owner = td; td->td_upcall = newku; newtd = td; } else { /* * Create a new upcall thread to own it. */ newtd = thread_schedule_upcall(td, newku); } } mtx_unlock_spin(&sched_lock); /* * Let the UTS instance know its LWPID. * It doesn't really care. But the debugger will. */ suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid); /* * In the same manner, if the UTS has a current user thread, * then it is also running on this LWP so set it as well. * The library could do that of course.. but why not.. */ if (mbx.km_curthread) suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid); if (sa) { newtd->td_pflags |= TDP_SA; } else { newtd->td_pflags &= ~TDP_SA; /* * Since a library will use the mailbox pointer to * identify even a bound thread, and the mailbox pointer * will never be allowed to change after this syscall * for a bound thread, set it here so the library can * find the thread after the syscall returns. */ newtd->td_mailbox = mbx.km_curthread; if (newtd != td) { /* * If we did create a new thread then * make sure it goes to the right place * when it starts up, and make sure that it runs * at full speed when it gets there. * thread_schedule_upcall() copies all cpu state * to the new thread, so we should clear single step * flag here. */ cpu_set_upcall_kse(newtd, newku->ku_func, newku->ku_mailbox, &newku->ku_stack); if (p->p_flag & P_TRACED) ptrace_clear_single_step(newtd); } } /* * If we are starting a new thread, kick it off. */ if (newtd != td) { mtx_lock_spin(&sched_lock); setrunqueue(newtd, SRQ_BORING); mtx_unlock_spin(&sched_lock); } return (0); } /* * Initialize global thread allocation resources. */ void kseinit(void) { upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); } /* * Stash an embarasingly extra upcall into the zombie upcall queue. */ void upcall_stash(struct kse_upcall *ku) { mtx_lock_spin(&kse_zombie_lock); TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); mtx_unlock_spin(&kse_zombie_lock); } /* * Reap zombie kse resource. */ void kse_GC(void) { struct kse_upcall *ku_first, *ku_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant.. */ if (!TAILQ_EMPTY(&zombie_upcalls)) { mtx_lock_spin(&kse_zombie_lock); ku_first = TAILQ_FIRST(&zombie_upcalls); if (ku_first) TAILQ_INIT(&zombie_upcalls); mtx_unlock_spin(&kse_zombie_lock); while (ku_first) { ku_next = TAILQ_NEXT(ku_first, ku_link); upcall_free(ku_first); ku_first = ku_next; } } } /* * Store the thread context in the UTS's mailbox. * then add the mailbox at the head of a list we are building in user space. * The list is anchored in the ksegrp structure. */ int thread_export_context(struct thread *td, int willexit) { struct proc *p; struct ksegrp *kg; uintptr_t mbx; void *addr; int error = 0, sig; mcontext_t mc; p = td->td_proc; kg = td->td_ksegrp; /* * Post sync signal, or process SIGKILL and SIGSTOP. * For sync signal, it is only possible when the signal is not * caught by userland or process is being debugged. */ PROC_LOCK(p); if (td->td_flags & TDF_NEEDSIGCHK) { mtx_lock_spin(&sched_lock); td->td_flags &= ~TDF_NEEDSIGCHK; mtx_unlock_spin(&sched_lock); mtx_lock(&p->p_sigacts->ps_mtx); while ((sig = cursig(td)) != 0) postsig(sig); mtx_unlock(&p->p_sigacts->ps_mtx); } if (willexit) SIGFILLSET(td->td_sigmask); PROC_UNLOCK(p); /* Export the user/machine context. */ get_mcontext(td, &mc, 0); addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); error = copyout(&mc, addr, sizeof(mcontext_t)); if (error) goto bad; addr = (caddr_t)(&td->td_mailbox->tm_lwp); if (suword32(addr, 0)) { error = EFAULT; goto bad; } /* Get address in latest mbox of list pointer */ addr = (void *)(&td->td_mailbox->tm_next); /* * Put the saved address of the previous first * entry into this one */ for (;;) { mbx = (uintptr_t)kg->kg_completed; if (suword(addr, mbx)) { error = EFAULT; goto bad; } PROC_LOCK(p); if (mbx == (uintptr_t)kg->kg_completed) { kg->kg_completed = td->td_mailbox; /* * The thread context may be taken away by * other upcall threads when we unlock * process lock. it's no longer valid to * use it again in any other places. */ td->td_mailbox = NULL; PROC_UNLOCK(p); break; } PROC_UNLOCK(p); } td->td_usticks = 0; return (0); bad: PROC_LOCK(p); sigexit(td, SIGILL); return (error); } /* * Take the list of completed mailboxes for this KSEGRP and put them on this * upcall's mailbox as it's the next one going up. */ static int thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) { struct proc *p = kg->kg_proc; void *addr; uintptr_t mbx; addr = (void *)(&ku->ku_mailbox->km_completed); for (;;) { mbx = (uintptr_t)kg->kg_completed; if (suword(addr, mbx)) { PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); return (EFAULT); } PROC_LOCK(p); if (mbx == (uintptr_t)kg->kg_completed) { kg->kg_completed = NULL; PROC_UNLOCK(p); break; } PROC_UNLOCK(p); } return (0); } /* * This function should be called at statclock interrupt time */ int thread_statclock(int user) { struct thread *td = curthread; if (!(td->td_pflags & TDP_SA)) return (0); if (user) { /* Current always do via ast() */ mtx_lock_spin(&sched_lock); td->td_flags |= TDF_ASTPENDING; mtx_unlock_spin(&sched_lock); td->td_uuticks++; } else if (td->td_mailbox != NULL) td->td_usticks++; return (0); } /* * Export state clock ticks for userland */ static int thread_update_usr_ticks(struct thread *td) { struct proc *p = td->td_proc; caddr_t addr; u_int uticks; if (td->td_mailbox == NULL) return (-1); if ((uticks = td->td_uuticks) != 0) { td->td_uuticks = 0; addr = (caddr_t)&td->td_mailbox->tm_uticks; if (suword32(addr, uticks+fuword32(addr))) goto error; } if ((uticks = td->td_usticks) != 0) { td->td_usticks = 0; addr = (caddr_t)&td->td_mailbox->tm_sticks; if (suword32(addr, uticks+fuword32(addr))) goto error; } return (0); error: PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); return (-2); } /* * This function is intended to be used to initialize a spare thread * for upcall. Initialize thread's large data area outside sched_lock * for thread_schedule_upcall(). The crhold is also here to get it out * from the schedlock as it has a mutex op itself. * XXX BUG.. we need to get the cr ref after the thread has * checked and chenged its own, not 6 months before... */ void thread_alloc_spare(struct thread *td) { struct thread *spare; if (td->td_standin) return; spare = thread_alloc(); td->td_standin = spare; bzero(&spare->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); spare->td_proc = td->td_proc; spare->td_ucred = crhold(td->td_ucred); } /* * Create a thread and schedule it for upcall on the KSE given. * Use our thread's standin so that we don't have to allocate one. */ struct thread * thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) { struct thread *td2; mtx_assert(&sched_lock, MA_OWNED); /* * Schedule an upcall thread on specified kse_upcall, * the kse_upcall must be free. * td must have a spare thread. */ KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); if ((td2 = td->td_standin) != NULL) { td->td_standin = NULL; } else { panic("no reserve thread when scheduling an upcall"); return (NULL); } CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", td2, td->td_proc->p_pid, td->td_proc->p_comm); /* * Bzero already done in thread_alloc_spare() because we can't * do the crhold here because we are in schedlock already. */ bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); thread_link(td2, ku->ku_ksegrp); /* inherit parts of blocked thread's context as a good template */ cpu_set_upcall(td2, td); /* Let the new thread become owner of the upcall */ ku->ku_owner = td2; td2->td_upcall = ku; td2->td_flags = 0; td2->td_pflags = TDP_SA|TDP_UPCALLING; td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; SIGFILLSET(td2->td_sigmask); SIG_CANTMASK(td2->td_sigmask); sched_fork_thread(td, td2); return (td2); /* bogus.. should be a void function */ } /* * It is only used when thread generated a trap and process is being * debugged. */ void thread_signal_add(struct thread *td, ksiginfo_t *ksi) { struct proc *p; struct sigacts *ps; int error; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); ps = p->p_sigacts; mtx_assert(&ps->ps_mtx, MA_OWNED); mtx_unlock(&ps->ps_mtx); SIGADDSET(td->td_sigmask, ksi->ksi_signo); PROC_UNLOCK(p); error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig, sizeof(siginfo_t)); if (error) { PROC_LOCK(p); sigexit(td, SIGSEGV); } PROC_LOCK(p); mtx_lock(&ps->ps_mtx); } #include "opt_sched.h" struct thread * thread_switchout(struct thread *td, int flags, struct thread *nextthread) { struct kse_upcall *ku; struct thread *td2; mtx_assert(&sched_lock, MA_OWNED); /* * If the outgoing thread is in threaded group and has never * scheduled an upcall, decide whether this is a short * or long term event and thus whether or not to schedule * an upcall. * If it is a short term event, just suspend it in * a way that takes its KSE with it. * Select the events for which we want to schedule upcalls. * For now it's just sleep or if thread is suspended but * process wide suspending flag is not set (debugger * suspends thread). * XXXKSE eventually almost any inhibition could do. */ if (TD_CAN_UNBIND(td) && (td->td_standin) && (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) && !P_SHOULDSTOP(td->td_proc)))) { /* * Release ownership of upcall, and schedule an upcall * thread, this new upcall thread becomes the owner of * the upcall structure. It will be ahead of us in the * run queue, so as we are stopping, it should either * start up immediatly, or at least before us if * we release our slot. */ ku = td->td_upcall; ku->ku_owner = NULL; td->td_upcall = NULL; td->td_pflags &= ~TDP_CAN_UNBIND; td2 = thread_schedule_upcall(td, ku); if (flags & SW_INVOL || nextthread) { setrunqueue(td2, SRQ_YIELDING); } else { /* Keep up with reality.. we have one extra thread * in the picture.. and it's 'running'. */ return td2; } } return (nextthread); } /* * Setup done on the thread when it enters the kernel. */ void thread_user_enter(struct thread *td) { struct proc *p = td->td_proc; struct ksegrp *kg; struct kse_upcall *ku; struct kse_thr_mailbox *tmbx; uint32_t flags; /* * First check that we shouldn't just abort. we * can suspend it here or just exit. */ if (__predict_false(P_SHOULDSTOP(p))) { PROC_LOCK(p); thread_suspend_check(0); PROC_UNLOCK(p); } if (!(td->td_pflags & TDP_SA)) return; /* * If we are doing a syscall in a KSE environment, * note where our mailbox is. */ kg = td->td_ksegrp; ku = td->td_upcall; KASSERT(ku != NULL, ("no upcall owned")); KASSERT(ku->ku_owner == td, ("wrong owner")); KASSERT(!TD_CAN_UNBIND(td), ("can unbind")); if (td->td_standin == NULL) thread_alloc_spare(td); ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags); tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); if ((tmbx == NULL) || (tmbx == (void *)-1L) || (ku->ku_mflags & KMF_NOUPCALL)) { td->td_mailbox = NULL; } else { flags = fuword32(&tmbx->tm_flags); /* * On some architectures, TP register points to thread * mailbox but not points to kse mailbox, and userland * can not atomically clear km_curthread, but can * use TP register, and set TMF_NOUPCALL in thread * flag to indicate a critical region. */ if (flags & TMF_NOUPCALL) { td->td_mailbox = NULL; } else { td->td_mailbox = tmbx; td->td_pflags |= TDP_CAN_UNBIND; if (__predict_false(p->p_flag & P_TRACED)) { flags = fuword32(&tmbx->tm_dflags); if (flags & TMDF_SUSPEND) { mtx_lock_spin(&sched_lock); /* fuword can block, check again */ if (td->td_upcall) ku->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } } } } } /* * The extra work we go through if we are a threaded process when we * return to userland. * * If we are a KSE process and returning to user mode, check for * extra work to do before we return (e.g. for more syscalls * to complete first). If we were in a critical section, we should * just return to let it finish. Same if we were in the UTS (in * which case the mailbox's context's busy indicator will be set). * The only traps we suport will have set the mailbox. * We will clear it here. */ int thread_userret(struct thread *td, struct trapframe *frame) { struct kse_upcall *ku; struct ksegrp *kg, *kg2; struct proc *p; struct timespec ts; int error = 0, upcalls, uts_crit; /* Nothing to do with bound thread */ if (!(td->td_pflags & TDP_SA)) return (0); /* * Update stat clock count for userland */ if (td->td_mailbox != NULL) { thread_update_usr_ticks(td); uts_crit = 0; } else { uts_crit = 1; } p = td->td_proc; kg = td->td_ksegrp; ku = td->td_upcall; /* * Optimisation: * This thread has not started any upcall. * If there is no work to report other than ourself, * then it can return direct to userland. */ if (TD_CAN_UNBIND(td)) { td->td_pflags &= ~TDP_CAN_UNBIND; if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && (kg->kg_completed == NULL) && (ku->ku_flags & KUF_DOUPCALL) == 0 && (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { nanotime(&ts); error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts)); td->td_mailbox = 0; ku->ku_mflags = 0; if (error) goto out; return (0); } thread_export_context(td, 0); /* * There is something to report, and we own an upcall * structure, we can go to userland. * Turn ourself into an upcall thread. */ td->td_pflags |= TDP_UPCALLING; } else if (td->td_mailbox && (ku == NULL)) { thread_export_context(td, 1); PROC_LOCK(p); if (kg->kg_upsleeps) wakeup(&kg->kg_completed); WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object, "thread exiting in userret"); sigqueue_flush(&td->td_sigqueue); mtx_lock_spin(&sched_lock); thread_stopped(p); thread_exit(); /* NOTREACHED */ } KASSERT(ku != NULL, ("upcall is NULL")); KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); if (p->p_numthreads > max_threads_per_proc) { max_threads_hits++; PROC_LOCK(p); mtx_lock_spin(&sched_lock); p->p_maxthrwaits++; while (p->p_numthreads > max_threads_per_proc) { upcalls = 0; FOREACH_KSEGRP_IN_PROC(p, kg2) { if (kg2->kg_numupcalls == 0) upcalls++; else upcalls += kg2->kg_numupcalls; } if (upcalls >= max_threads_per_proc) break; mtx_unlock_spin(&sched_lock); if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, "maxthreads", hz/10) != EWOULDBLOCK) { mtx_lock_spin(&sched_lock); break; } else { mtx_lock_spin(&sched_lock); } } p->p_maxthrwaits--; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } if (td->td_pflags & TDP_UPCALLING) { uts_crit = 0; kg->kg_nextupcall = ticks + kg->kg_upquantum; /* * There is no more work to do and we are going to ride * this thread up to userland as an upcall. * Do the last parts of the setup needed for the upcall. */ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", td, td->td_proc->p_pid, td->td_proc->p_comm); td->td_pflags &= ~TDP_UPCALLING; if (ku->ku_flags & KUF_DOUPCALL) { mtx_lock_spin(&sched_lock); ku->ku_flags &= ~KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } /* * Set user context to the UTS */ if (!(ku->ku_mflags & KMF_NOUPCALL)) { cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox, &ku->ku_stack); if (p->p_flag & P_TRACED) ptrace_clear_single_step(td); error = suword32(&ku->ku_mailbox->km_lwp, td->td_tid); if (error) goto out; error = suword(&ku->ku_mailbox->km_curthread, 0); if (error) goto out; } /* * Unhook the list of completed threads. * anything that completes after this gets to * come in next time. * Put the list of completed thread mailboxes on * this KSE's mailbox. */ if (!(ku->ku_mflags & KMF_NOCOMPLETED) && (error = thread_link_mboxes(kg, ku)) != 0) goto out; } if (!uts_crit) { nanotime(&ts); error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); } out: if (error) { /* * Things are going to be so screwed we should just kill * the process. * how do we do that? */ PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); } else { /* * Optimisation: * Ensure that we have a spare thread available, * for when we re-enter the kernel. */ if (td->td_standin == NULL) thread_alloc_spare(td); } ku->ku_mflags = 0; td->td_mailbox = NULL; td->td_usticks = 0; return (error); /* go sync */ } /* * called after ptrace resumed a process, force all * virtual CPUs to schedule upcall for SA process, * because debugger may have changed something in userland, * we should notice UTS as soon as possible. */ void thread_continued(struct proc *p) { struct ksegrp *kg; struct kse_upcall *ku; struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(P_SHOULDSTOP(p), ("process not stopped")); if (!(p->p_flag & P_SA)) return; if (p->p_flag & P_TRACED) { FOREACH_KSEGRP_IN_PROC(p, kg) { td = TAILQ_FIRST(&kg->kg_threads); if (td == NULL) continue; /* not a SA group, nothing to do */ if (!(td->td_pflags & TDP_SA)) continue; FOREACH_UPCALL_IN_GROUP(kg, ku) { mtx_lock_spin(&sched_lock); ku->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); wakeup(&kg->kg_completed); } } } } Index: head/sys/kern/kern_sig.c =================================================================== --- head/sys/kern/kern_sig.c (revision 151992) +++ head/sys/kern/kern_sig.c (revision 151993) @@ -1,3173 +1,3178 @@ /*- * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined (__alpha__) && !defined(COMPAT_43) #error "You *really* need COMPAT_43 on the alpha for longjmp(3)" #endif #define ONSIG 32 /* NSIG for osig* syscalls. XXX. */ static int coredump(struct thread *); static char *expand_name(const char *, uid_t, pid_t); static int killpg1(struct thread *td, int sig, int pgid, int all); static int issignal(struct thread *p); static int sigprop(int sig); static void tdsigwakeup(struct thread *td, int sig, sig_t action); static int filt_sigattach(struct knote *kn); static void filt_sigdetach(struct knote *kn); static int filt_signal(struct knote *kn, long hint); static struct thread *sigtd(struct proc *p, int sig, int prop); static int kern_sigtimedwait(struct thread *, sigset_t, ksiginfo_t *, struct timespec *); -static int do_tdsignal(struct thread *, int, ksiginfo_t *, sigtarget_t); +static int do_tdsignal(struct proc *, struct thread *, int, ksiginfo_t *); static void sigqueue_start(void); -static int psignal_common(struct proc *p, int sig, ksiginfo_t *ksi); static uma_zone_t ksiginfo_zone = NULL; struct filterops sig_filtops = { 0, filt_sigattach, filt_sigdetach, filt_signal }; static int kern_logsigexit = 1; SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW, &kern_logsigexit, 0, "Log processes quitting on abnormal signals to syslog(3)"); SYSCTL_NODE(_kern, OID_AUTO, sigqueue, CTLFLAG_RW, 0, "POSIX real time signal"); static int max_pending_per_proc = 128; SYSCTL_INT(_kern_sigqueue, OID_AUTO, max_pending_per_proc, CTLFLAG_RW, &max_pending_per_proc, 0, "Max pending signals per proc"); static int queue_rt_signal_only = 1; SYSCTL_INT(_kern_sigqueue, OID_AUTO, queue_rt_signal_only, CTLFLAG_RW, &queue_rt_signal_only, 0, "Only rt signal is queued"); static int preallocate_siginfo = 1024; TUNABLE_INT("kern.sigqueue.preallocate", &preallocate_siginfo); SYSCTL_INT(_kern_sigqueue, OID_AUTO, preallocate, CTLFLAG_RD, &preallocate_siginfo, 0, "Preallocated signal memory size"); static int signal_overflow = 0; SYSCTL_INT(_kern_sigqueue, OID_AUTO, signal_overflow, CTLFLAG_RD, &signal_overflow, 0, "Number of signals overflew"); static int signal_alloc_fail = 0; SYSCTL_INT(_kern_sigqueue, OID_AUTO, signal_alloc_fail, CTLFLAG_RD, &signal_alloc_fail, 0, "signals failed to be allocated"); SYSINIT(signal, SI_SUB_P1003_1B, SI_ORDER_FIRST+3, sigqueue_start, NULL); /* * Policy -- Can ucred cr1 send SIGIO to process cr2? * Should use cr_cansignal() once cr_cansignal() allows SIGIO and SIGURG * in the right situations. */ #define CANSIGIO(cr1, cr2) \ ((cr1)->cr_uid == 0 || \ (cr1)->cr_ruid == (cr2)->cr_ruid || \ (cr1)->cr_uid == (cr2)->cr_ruid || \ (cr1)->cr_ruid == (cr2)->cr_uid || \ (cr1)->cr_uid == (cr2)->cr_uid) int sugid_coredump; SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW, &sugid_coredump, 0, "Enable coredumping set user/group ID processes"); static int do_coredump = 1; SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW, &do_coredump, 0, "Enable/Disable coredumps"); static int set_core_nodump_flag = 0; SYSCTL_INT(_kern, OID_AUTO, nodump_coredump, CTLFLAG_RW, &set_core_nodump_flag, 0, "Enable setting the NODUMP flag on coredump files"); /* * Signal properties and actions. * The array below categorizes the signals and their default actions * according to the following properties: */ #define SA_KILL 0x01 /* terminates process by default */ #define SA_CORE 0x02 /* ditto and coredumps */ #define SA_STOP 0x04 /* suspend process */ #define SA_TTYSTOP 0x08 /* ditto, from tty */ #define SA_IGNORE 0x10 /* ignore by default */ #define SA_CONT 0x20 /* continue if suspended */ #define SA_CANTMASK 0x40 /* non-maskable, catchable */ #define SA_PROC 0x80 /* deliverable to any thread */ static int sigproptbl[NSIG] = { SA_KILL|SA_PROC, /* SIGHUP */ SA_KILL|SA_PROC, /* SIGINT */ SA_KILL|SA_CORE|SA_PROC, /* SIGQUIT */ SA_KILL|SA_CORE, /* SIGILL */ SA_KILL|SA_CORE, /* SIGTRAP */ SA_KILL|SA_CORE, /* SIGABRT */ SA_KILL|SA_CORE|SA_PROC, /* SIGEMT */ SA_KILL|SA_CORE, /* SIGFPE */ SA_KILL|SA_PROC, /* SIGKILL */ SA_KILL|SA_CORE, /* SIGBUS */ SA_KILL|SA_CORE, /* SIGSEGV */ SA_KILL|SA_CORE, /* SIGSYS */ SA_KILL|SA_PROC, /* SIGPIPE */ SA_KILL|SA_PROC, /* SIGALRM */ SA_KILL|SA_PROC, /* SIGTERM */ SA_IGNORE|SA_PROC, /* SIGURG */ SA_STOP|SA_PROC, /* SIGSTOP */ SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTSTP */ SA_IGNORE|SA_CONT|SA_PROC, /* SIGCONT */ SA_IGNORE|SA_PROC, /* SIGCHLD */ SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTIN */ SA_STOP|SA_TTYSTOP|SA_PROC, /* SIGTTOU */ SA_IGNORE|SA_PROC, /* SIGIO */ SA_KILL, /* SIGXCPU */ SA_KILL, /* SIGXFSZ */ SA_KILL|SA_PROC, /* SIGVTALRM */ SA_KILL|SA_PROC, /* SIGPROF */ SA_IGNORE|SA_PROC, /* SIGWINCH */ SA_IGNORE|SA_PROC, /* SIGINFO */ SA_KILL|SA_PROC, /* SIGUSR1 */ SA_KILL|SA_PROC, /* SIGUSR2 */ }; static void sigqueue_start(void) { ksiginfo_zone = uma_zcreate("ksiginfo", sizeof(ksiginfo_t), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); uma_prealloc(ksiginfo_zone, preallocate_siginfo); } ksiginfo_t * ksiginfo_alloc(void) { if (ksiginfo_zone != NULL) return ((ksiginfo_t *)uma_zalloc(ksiginfo_zone, M_NOWAIT | M_ZERO)); return (NULL); } void ksiginfo_free(ksiginfo_t *ksi) { uma_zfree(ksiginfo_zone, ksi); } static __inline int ksiginfo_tryfree(ksiginfo_t *ksi) { if (!(ksi->ksi_flags & KSI_EXT)) { uma_zfree(ksiginfo_zone, ksi); return (1); } return (0); } void sigqueue_init(sigqueue_t *list, struct proc *p) { SIGEMPTYSET(list->sq_signals); TAILQ_INIT(&list->sq_list); list->sq_proc = p; list->sq_flags = SQ_INIT; } /* * Get a signal's ksiginfo. * Return: * 0 - signal not found * others - signal number */ int sigqueue_get(sigqueue_t *sq, int signo, ksiginfo_t *si) { struct proc *p = sq->sq_proc; struct ksiginfo *ksi, *next; int count = 0; KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); if (!SIGISMEMBER(sq->sq_signals, signo)) return (0); for (ksi = TAILQ_FIRST(&sq->sq_list); ksi != NULL; ksi = next) { next = TAILQ_NEXT(ksi, ksi_link); if (ksi->ksi_signo == signo) { if (count == 0) { TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); ksi->ksi_sigq = NULL; ksiginfo_copy(ksi, si); if (ksiginfo_tryfree(ksi) && p != NULL) p->p_pendingcnt--; } count++; } } if (count <= 1) SIGDELSET(sq->sq_signals, signo); si->ksi_signo = signo; return (signo); } void sigqueue_take(ksiginfo_t *ksi) { struct ksiginfo *kp; struct proc *p; sigqueue_t *sq; if ((sq = ksi->ksi_sigq) == NULL) return; p = sq->sq_proc; TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); ksi->ksi_sigq = NULL; if (!(ksi->ksi_flags & KSI_EXT) && p != NULL) p->p_pendingcnt--; for (kp = TAILQ_FIRST(&sq->sq_list); kp != NULL; kp = TAILQ_NEXT(kp, ksi_link)) { if (kp->ksi_signo == ksi->ksi_signo) break; } if (kp == NULL) SIGDELSET(sq->sq_signals, ksi->ksi_signo); } int sigqueue_add(sigqueue_t *sq, int signo, ksiginfo_t *si) { struct proc *p = sq->sq_proc; struct ksiginfo *ksi; int ret = 0; KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); if (signo == SIGKILL || signo == SIGSTOP || si == NULL) goto out_set_bit; /* directly insert the ksi, don't copy it */ if (si->ksi_flags & KSI_INS) { TAILQ_INSERT_TAIL(&sq->sq_list, si, ksi_link); si->ksi_sigq = sq; goto out_set_bit; } if (__predict_false(ksiginfo_zone == NULL)) goto out_set_bit; if (p != NULL && p->p_pendingcnt > max_pending_per_proc) { signal_overflow++; ret = EAGAIN; } else if ((ksi = ksiginfo_alloc()) == NULL) { signal_alloc_fail++; ret = EAGAIN; } else { if (p != NULL) p->p_pendingcnt++; ksiginfo_copy(si, ksi); ksi->ksi_signo = signo; TAILQ_INSERT_TAIL(&sq->sq_list, ksi, ksi_link); ksi->ksi_sigq = sq; } if ((si->ksi_flags & KSI_TRAP) != 0) { ret = 0; goto out_set_bit; } if (ret != 0) return (ret); out_set_bit: SIGADDSET(sq->sq_signals, signo); return (ret); } void sigqueue_flush(sigqueue_t *sq) { struct proc *p = sq->sq_proc; ksiginfo_t *ksi; KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); if (p != NULL) PROC_LOCK_ASSERT(p, MA_OWNED); while ((ksi = TAILQ_FIRST(&sq->sq_list)) != NULL) { TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); ksi->ksi_sigq = NULL; if (ksiginfo_tryfree(ksi) && p != NULL) p->p_pendingcnt--; } SIGEMPTYSET(sq->sq_signals); } void sigqueue_collect_set(sigqueue_t *sq, sigset_t *set) { ksiginfo_t *ksi; KASSERT(sq->sq_flags & SQ_INIT, ("sigqueue not inited")); TAILQ_FOREACH(ksi, &sq->sq_list, ksi_link) SIGADDSET(*set, ksi->ksi_signo); } void sigqueue_move_set(sigqueue_t *src, sigqueue_t *dst, sigset_t *setp) { sigset_t tmp, set; struct proc *p1, *p2; ksiginfo_t *ksi, *next; KASSERT(src->sq_flags & SQ_INIT, ("src sigqueue not inited")); KASSERT(dst->sq_flags & SQ_INIT, ("dst sigqueue not inited")); /* * make a copy, this allows setp to point to src or dst * sq_signals without trouble. */ set = *setp; p1 = src->sq_proc; p2 = dst->sq_proc; /* Move siginfo to target list */ for (ksi = TAILQ_FIRST(&src->sq_list); ksi != NULL; ksi = next) { next = TAILQ_NEXT(ksi, ksi_link); if (SIGISMEMBER(set, ksi->ksi_signo)) { TAILQ_REMOVE(&src->sq_list, ksi, ksi_link); if (p1 != NULL) p1->p_pendingcnt--; TAILQ_INSERT_TAIL(&dst->sq_list, ksi, ksi_link); ksi->ksi_sigq = dst; if (p2 != NULL) p2->p_pendingcnt++; } } /* Move pending bits to target list */ tmp = src->sq_signals; SIGSETAND(tmp, set); SIGSETOR(dst->sq_signals, tmp); SIGSETNAND(src->sq_signals, tmp); /* Finally, rescan src queue and set pending bits for it */ sigqueue_collect_set(src, &src->sq_signals); } void sigqueue_move(sigqueue_t *src, sigqueue_t *dst, int signo) { sigset_t set; SIGEMPTYSET(set); SIGADDSET(set, signo); sigqueue_move_set(src, dst, &set); } void sigqueue_delete_set(sigqueue_t *sq, sigset_t *set) { struct proc *p = sq->sq_proc; ksiginfo_t *ksi, *next; KASSERT(sq->sq_flags & SQ_INIT, ("src sigqueue not inited")); /* Remove siginfo queue */ for (ksi = TAILQ_FIRST(&sq->sq_list); ksi != NULL; ksi = next) { next = TAILQ_NEXT(ksi, ksi_link); if (SIGISMEMBER(*set, ksi->ksi_signo)) { TAILQ_REMOVE(&sq->sq_list, ksi, ksi_link); ksi->ksi_sigq = NULL; if (ksiginfo_tryfree(ksi) && p != NULL) p->p_pendingcnt--; } } SIGSETNAND(sq->sq_signals, *set); /* Finally, rescan queue and set pending bits for it */ sigqueue_collect_set(sq, &sq->sq_signals); } void sigqueue_delete(sigqueue_t *sq, int signo) { sigset_t set; SIGEMPTYSET(set); SIGADDSET(set, signo); sigqueue_delete_set(sq, &set); } /* Remove a set of signals for a process */ void sigqueue_delete_set_proc(struct proc *p, sigset_t *set) { sigqueue_t worklist; struct thread *td0; PROC_LOCK_ASSERT(p, MA_OWNED); sigqueue_init(&worklist, NULL); sigqueue_move_set(&p->p_sigqueue, &worklist, set); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td0) sigqueue_move_set(&td0->td_sigqueue, &worklist, set); mtx_unlock_spin(&sched_lock); sigqueue_flush(&worklist); } void sigqueue_delete_proc(struct proc *p, int signo) { sigset_t set; SIGEMPTYSET(set); SIGADDSET(set, signo); sigqueue_delete_set_proc(p, &set); } void sigqueue_delete_stopmask_proc(struct proc *p) { sigset_t set; SIGEMPTYSET(set); SIGADDSET(set, SIGSTOP); SIGADDSET(set, SIGTSTP); SIGADDSET(set, SIGTTIN); SIGADDSET(set, SIGTTOU); sigqueue_delete_set_proc(p, &set); } /* * Determine signal that should be delivered to process p, the current * process, 0 if none. If there is a pending stop signal with default * action, the process stops in issignal(). * * MP SAFE. */ int cursig(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); mtx_assert(&td->td_proc->p_sigacts->ps_mtx, MA_OWNED); mtx_assert(&sched_lock, MA_NOTOWNED); return (SIGPENDING(td) ? issignal(td) : 0); } /* * Arrange for ast() to handle unmasked pending signals on return to user * mode. This must be called whenever a signal is added to td_sigqueue or * unmasked in td_sigmask. */ void signotify(struct thread *td) { struct proc *p; sigset_t set, saved; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); /* * If our mask changed we may have to move signal that were * previously masked by all threads to our sigqueue. */ set = p->p_sigqueue.sq_signals; if (p->p_flag & P_SA) saved = p->p_sigqueue.sq_signals; SIGSETNAND(set, td->td_sigmask); if (! SIGISEMPTY(set)) sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set); if (SIGPENDING(td)) { mtx_lock_spin(&sched_lock); td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING; mtx_unlock_spin(&sched_lock); } if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { /* pending set changed */ p->p_flag |= P_SIGEVENT; wakeup(&p->p_siglist); } } } int sigonstack(size_t sp) { struct thread *td = curthread; return ((td->td_pflags & TDP_ALTSTACK) ? #if defined(COMPAT_43) ((td->td_sigstk.ss_size == 0) ? (td->td_sigstk.ss_flags & SS_ONSTACK) : ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size)) #else ((sp - (size_t)td->td_sigstk.ss_sp) < td->td_sigstk.ss_size) #endif : 0); } static __inline int sigprop(int sig) { if (sig > 0 && sig < NSIG) return (sigproptbl[_SIG_IDX(sig)]); return (0); } int sig_ffs(sigset_t *set) { int i; for (i = 0; i < _SIG_WORDS; i++) if (set->__bits[i]) return (ffs(set->__bits[i]) + (i * 32)); return (0); } /* * kern_sigaction * sigaction * freebsd4_sigaction * osigaction * * MPSAFE */ int kern_sigaction(td, sig, act, oact, flags) struct thread *td; register int sig; struct sigaction *act, *oact; int flags; { struct sigacts *ps; struct proc *p = td->td_proc; if (!_SIG_VALID(sig)) return (EINVAL); PROC_LOCK(p); ps = p->p_sigacts; mtx_lock(&ps->ps_mtx); if (oact) { oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)]; oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)]; oact->sa_flags = 0; if (SIGISMEMBER(ps->ps_sigonstack, sig)) oact->sa_flags |= SA_ONSTACK; if (!SIGISMEMBER(ps->ps_sigintr, sig)) oact->sa_flags |= SA_RESTART; if (SIGISMEMBER(ps->ps_sigreset, sig)) oact->sa_flags |= SA_RESETHAND; if (SIGISMEMBER(ps->ps_signodefer, sig)) oact->sa_flags |= SA_NODEFER; if (SIGISMEMBER(ps->ps_siginfo, sig)) oact->sa_flags |= SA_SIGINFO; if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDSTOP) oact->sa_flags |= SA_NOCLDSTOP; if (sig == SIGCHLD && ps->ps_flag & PS_NOCLDWAIT) oact->sa_flags |= SA_NOCLDWAIT; } if (act) { if ((sig == SIGKILL || sig == SIGSTOP) && act->sa_handler != SIG_DFL) { mtx_unlock(&ps->ps_mtx); PROC_UNLOCK(p); return (EINVAL); } /* * Change setting atomically. */ ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask; SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]); if (act->sa_flags & SA_SIGINFO) { ps->ps_sigact[_SIG_IDX(sig)] = (__sighandler_t *)act->sa_sigaction; SIGADDSET(ps->ps_siginfo, sig); } else { ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler; SIGDELSET(ps->ps_siginfo, sig); } if (!(act->sa_flags & SA_RESTART)) SIGADDSET(ps->ps_sigintr, sig); else SIGDELSET(ps->ps_sigintr, sig); if (act->sa_flags & SA_ONSTACK) SIGADDSET(ps->ps_sigonstack, sig); else SIGDELSET(ps->ps_sigonstack, sig); if (act->sa_flags & SA_RESETHAND) SIGADDSET(ps->ps_sigreset, sig); else SIGDELSET(ps->ps_sigreset, sig); if (act->sa_flags & SA_NODEFER) SIGADDSET(ps->ps_signodefer, sig); else SIGDELSET(ps->ps_signodefer, sig); if (sig == SIGCHLD) { if (act->sa_flags & SA_NOCLDSTOP) ps->ps_flag |= PS_NOCLDSTOP; else ps->ps_flag &= ~PS_NOCLDSTOP; if (act->sa_flags & SA_NOCLDWAIT) { /* * Paranoia: since SA_NOCLDWAIT is implemented * by reparenting the dying child to PID 1 (and * trust it to reap the zombie), PID 1 itself * is forbidden to set SA_NOCLDWAIT. */ if (p->p_pid == 1) ps->ps_flag &= ~PS_NOCLDWAIT; else ps->ps_flag |= PS_NOCLDWAIT; } else ps->ps_flag &= ~PS_NOCLDWAIT; if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) ps->ps_flag |= PS_CLDSIGIGN; else ps->ps_flag &= ~PS_CLDSIGIGN; } /* * Set bit in ps_sigignore for signals that are set to SIG_IGN, * and for signals set to SIG_DFL where the default is to * ignore. However, don't put SIGCONT in ps_sigignore, as we * have to restart the process. */ if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || (sigprop(sig) & SA_IGNORE && ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) { if ((p->p_flag & P_SA) && SIGISMEMBER(p->p_sigqueue.sq_signals, sig)) { p->p_flag |= P_SIGEVENT; wakeup(&p->p_siglist); } /* never to be seen again */ sigqueue_delete_proc(p, sig); if (sig != SIGCONT) /* easier in psignal */ SIGADDSET(ps->ps_sigignore, sig); SIGDELSET(ps->ps_sigcatch, sig); } else { SIGDELSET(ps->ps_sigignore, sig); if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL) SIGDELSET(ps->ps_sigcatch, sig); else SIGADDSET(ps->ps_sigcatch, sig); } #ifdef COMPAT_FREEBSD4 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || (flags & KSA_FREEBSD4) == 0) SIGDELSET(ps->ps_freebsd4, sig); else SIGADDSET(ps->ps_freebsd4, sig); #endif #ifdef COMPAT_43 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN || ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL || (flags & KSA_OSIGSET) == 0) SIGDELSET(ps->ps_osigset, sig); else SIGADDSET(ps->ps_osigset, sig); #endif } mtx_unlock(&ps->ps_mtx); PROC_UNLOCK(p); return (0); } #ifndef _SYS_SYSPROTO_H_ struct sigaction_args { int sig; struct sigaction *act; struct sigaction *oact; }; #endif /* * MPSAFE */ int sigaction(td, uap) struct thread *td; register struct sigaction_args *uap; { struct sigaction act, oact; register struct sigaction *actp, *oactp; int error; actp = (uap->act != NULL) ? &act : NULL; oactp = (uap->oact != NULL) ? &oact : NULL; if (actp) { error = copyin(uap->act, actp, sizeof(act)); if (error) return (error); } error = kern_sigaction(td, uap->sig, actp, oactp, 0); if (oactp && !error) error = copyout(oactp, uap->oact, sizeof(oact)); return (error); } #ifdef COMPAT_FREEBSD4 #ifndef _SYS_SYSPROTO_H_ struct freebsd4_sigaction_args { int sig; struct sigaction *act; struct sigaction *oact; }; #endif /* * MPSAFE */ int freebsd4_sigaction(td, uap) struct thread *td; register struct freebsd4_sigaction_args *uap; { struct sigaction act, oact; register struct sigaction *actp, *oactp; int error; actp = (uap->act != NULL) ? &act : NULL; oactp = (uap->oact != NULL) ? &oact : NULL; if (actp) { error = copyin(uap->act, actp, sizeof(act)); if (error) return (error); } error = kern_sigaction(td, uap->sig, actp, oactp, KSA_FREEBSD4); if (oactp && !error) error = copyout(oactp, uap->oact, sizeof(oact)); return (error); } #endif /* COMAPT_FREEBSD4 */ #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ #ifndef _SYS_SYSPROTO_H_ struct osigaction_args { int signum; struct osigaction *nsa; struct osigaction *osa; }; #endif /* * MPSAFE */ int osigaction(td, uap) struct thread *td; register struct osigaction_args *uap; { struct osigaction sa; struct sigaction nsa, osa; register struct sigaction *nsap, *osap; int error; if (uap->signum <= 0 || uap->signum >= ONSIG) return (EINVAL); nsap = (uap->nsa != NULL) ? &nsa : NULL; osap = (uap->osa != NULL) ? &osa : NULL; if (nsap) { error = copyin(uap->nsa, &sa, sizeof(sa)); if (error) return (error); nsap->sa_handler = sa.sa_handler; nsap->sa_flags = sa.sa_flags; OSIG2SIG(sa.sa_mask, nsap->sa_mask); } error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); if (osap && !error) { sa.sa_handler = osap->sa_handler; sa.sa_flags = osap->sa_flags; SIG2OSIG(osap->sa_mask, sa.sa_mask); error = copyout(&sa, uap->osa, sizeof(sa)); } return (error); } #if !defined(__i386__) && !defined(__alpha__) /* Avoid replicating the same stub everywhere */ int osigreturn(td, uap) struct thread *td; struct osigreturn_args *uap; { return (nosys(td, (struct nosys_args *)uap)); } #endif #endif /* COMPAT_43 */ /* * Initialize signal state for process 0; * set to ignore signals that are ignored by default. */ void siginit(p) struct proc *p; { register int i; struct sigacts *ps; PROC_LOCK(p); ps = p->p_sigacts; mtx_lock(&ps->ps_mtx); for (i = 1; i <= NSIG; i++) if (sigprop(i) & SA_IGNORE && i != SIGCONT) SIGADDSET(ps->ps_sigignore, i); mtx_unlock(&ps->ps_mtx); PROC_UNLOCK(p); } /* * Reset signals for an exec of the specified process. */ void execsigs(struct proc *p) { struct sigacts *ps; int sig; struct thread *td; /* * Reset caught signals. Held signals remain held * through td_sigmask (unless they were caught, * and are now ignored by default). */ PROC_LOCK_ASSERT(p, MA_OWNED); td = FIRST_THREAD_IN_PROC(p); ps = p->p_sigacts; mtx_lock(&ps->ps_mtx); while (SIGNOTEMPTY(ps->ps_sigcatch)) { sig = sig_ffs(&ps->ps_sigcatch); SIGDELSET(ps->ps_sigcatch, sig); if (sigprop(sig) & SA_IGNORE) { if (sig != SIGCONT) SIGADDSET(ps->ps_sigignore, sig); sigqueue_delete_proc(p, sig); } ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; } /* * Reset stack state to the user stack. * Clear set of signals caught on the signal stack. */ td->td_sigstk.ss_flags = SS_DISABLE; td->td_sigstk.ss_size = 0; td->td_sigstk.ss_sp = 0; td->td_pflags &= ~TDP_ALTSTACK; /* * Reset no zombies if child dies flag as Solaris does. */ ps->ps_flag &= ~(PS_NOCLDWAIT | PS_CLDSIGIGN); if (ps->ps_sigact[_SIG_IDX(SIGCHLD)] == SIG_IGN) ps->ps_sigact[_SIG_IDX(SIGCHLD)] = SIG_DFL; mtx_unlock(&ps->ps_mtx); } /* * kern_sigprocmask() * * Manipulate signal mask. */ int kern_sigprocmask(td, how, set, oset, old) struct thread *td; int how; sigset_t *set, *oset; int old; { int error; PROC_LOCK(td->td_proc); if (oset != NULL) *oset = td->td_sigmask; error = 0; if (set != NULL) { switch (how) { case SIG_BLOCK: SIG_CANTMASK(*set); SIGSETOR(td->td_sigmask, *set); break; case SIG_UNBLOCK: SIGSETNAND(td->td_sigmask, *set); signotify(td); break; case SIG_SETMASK: SIG_CANTMASK(*set); if (old) SIGSETLO(td->td_sigmask, *set); else td->td_sigmask = *set; signotify(td); break; default: error = EINVAL; break; } } PROC_UNLOCK(td->td_proc); return (error); } /* * sigprocmask() - MP SAFE */ #ifndef _SYS_SYSPROTO_H_ struct sigprocmask_args { int how; const sigset_t *set; sigset_t *oset; }; #endif int sigprocmask(td, uap) register struct thread *td; struct sigprocmask_args *uap; { sigset_t set, oset; sigset_t *setp, *osetp; int error; setp = (uap->set != NULL) ? &set : NULL; osetp = (uap->oset != NULL) ? &oset : NULL; if (setp) { error = copyin(uap->set, setp, sizeof(set)); if (error) return (error); } error = kern_sigprocmask(td, uap->how, setp, osetp, 0); if (osetp && !error) { error = copyout(osetp, uap->oset, sizeof(oset)); } return (error); } #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ /* * osigprocmask() - MP SAFE */ #ifndef _SYS_SYSPROTO_H_ struct osigprocmask_args { int how; osigset_t mask; }; #endif int osigprocmask(td, uap) register struct thread *td; struct osigprocmask_args *uap; { sigset_t set, oset; int error; OSIG2SIG(uap->mask, set); error = kern_sigprocmask(td, uap->how, &set, &oset, 1); SIG2OSIG(oset, td->td_retval[0]); return (error); } #endif /* COMPAT_43 */ /* * MPSAFE */ int sigwait(struct thread *td, struct sigwait_args *uap) { ksiginfo_t ksi; sigset_t set; int error; error = copyin(uap->set, &set, sizeof(set)); if (error) { td->td_retval[0] = error; return (0); } error = kern_sigtimedwait(td, set, &ksi, NULL); if (error) { if (error == ERESTART) return (error); td->td_retval[0] = error; return (0); } error = copyout(&ksi.ksi_signo, uap->sig, sizeof(ksi.ksi_signo)); td->td_retval[0] = error; return (0); } /* * MPSAFE */ int sigtimedwait(struct thread *td, struct sigtimedwait_args *uap) { struct timespec ts; struct timespec *timeout; sigset_t set; ksiginfo_t ksi; int error; if (uap->timeout) { error = copyin(uap->timeout, &ts, sizeof(ts)); if (error) return (error); timeout = &ts; } else timeout = NULL; error = copyin(uap->set, &set, sizeof(set)); if (error) return (error); error = kern_sigtimedwait(td, set, &ksi, timeout); if (error) return (error); if (uap->info) error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); if (error == 0) td->td_retval[0] = ksi.ksi_signo; return (error); } /* * MPSAFE */ int sigwaitinfo(struct thread *td, struct sigwaitinfo_args *uap) { ksiginfo_t ksi; sigset_t set; int error; error = copyin(uap->set, &set, sizeof(set)); if (error) return (error); error = kern_sigtimedwait(td, set, &ksi, NULL); if (error) return (error); if (uap->info) error = copyout(&ksi.ksi_info, uap->info, sizeof(siginfo_t)); if (error == 0) td->td_retval[0] = ksi.ksi_signo; return (error); } static int kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi, struct timespec *timeout) { struct sigacts *ps; sigset_t savedmask; struct proc *p; int error, sig, hz, i, timevalid = 0; struct timespec rts, ets, ts; struct timeval tv; p = td->td_proc; error = 0; sig = 0; SIG_CANTMASK(waitset); PROC_LOCK(p); ps = p->p_sigacts; savedmask = td->td_sigmask; if (timeout) { if (timeout->tv_nsec >= 0 && timeout->tv_nsec < 1000000000) { timevalid = 1; getnanouptime(&rts); ets = rts; timespecadd(&ets, timeout); } } again: for (i = 1; i <= _SIG_MAXSIG; ++i) { if (!SIGISMEMBER(waitset, i)) continue; if (SIGISMEMBER(td->td_sigqueue.sq_signals, i)) { SIGFILLSET(td->td_sigmask); SIG_CANTMASK(td->td_sigmask); SIGDELSET(td->td_sigmask, i); mtx_lock(&ps->ps_mtx); sig = cursig(td); i = 0; mtx_unlock(&ps->ps_mtx); } else if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) { if (p->p_flag & P_SA) { p->p_flag |= P_SIGEVENT; wakeup(&p->p_siglist); } sigqueue_move(&p->p_sigqueue, &td->td_sigqueue, i); SIGFILLSET(td->td_sigmask); SIG_CANTMASK(td->td_sigmask); SIGDELSET(td->td_sigmask, i); mtx_lock(&ps->ps_mtx); sig = cursig(td); i = 0; mtx_unlock(&ps->ps_mtx); } if (sig) goto out; } if (error) goto out; /* * POSIX says this must be checked after looking for pending * signals. */ if (timeout) { if (!timevalid) { error = EINVAL; goto out; } getnanouptime(&rts); if (timespeccmp(&rts, &ets, >=)) { error = EAGAIN; goto out; } ts = ets; timespecsub(&ts, &rts); TIMESPEC_TO_TIMEVAL(&tv, &ts); hz = tvtohz(&tv); } else hz = 0; td->td_sigmask = savedmask; SIGSETNAND(td->td_sigmask, waitset); signotify(td); error = msleep(&ps, &p->p_mtx, PPAUSE|PCATCH, "sigwait", hz); if (timeout) { if (error == ERESTART) { /* timeout can not be restarted. */ error = EINTR; } else if (error == EAGAIN) { /* will calculate timeout by ourself. */ error = 0; } } goto again; out: if (sig) { sig_t action; ksiginfo_init(ksi); sigqueue_get(&td->td_sigqueue, sig, ksi); ksi->ksi_signo = sig; if (ksi->ksi_code == SI_TIMER) itimer_accept(p, ksi->ksi_timerid, ksi); error = 0; mtx_lock(&ps->ps_mtx); action = ps->ps_sigact[_SIG_IDX(sig)]; mtx_unlock(&ps->ps_mtx); #ifdef KTRACE if (KTRPOINT(td, KTR_PSIG)) ktrpsig(sig, action, &td->td_sigmask, 0); #endif _STOPEVENT(p, S_SIG, sig); } td->td_sigmask = savedmask; signotify(td); PROC_UNLOCK(p); return (error); } #ifndef _SYS_SYSPROTO_H_ struct sigpending_args { sigset_t *set; }; #endif /* * MPSAFE */ int sigpending(td, uap) struct thread *td; struct sigpending_args *uap; { struct proc *p = td->td_proc; sigset_t pending; PROC_LOCK(p); pending = p->p_sigqueue.sq_signals; SIGSETOR(pending, td->td_sigqueue.sq_signals); PROC_UNLOCK(p); return (copyout(&pending, uap->set, sizeof(sigset_t))); } #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ #ifndef _SYS_SYSPROTO_H_ struct osigpending_args { int dummy; }; #endif /* * MPSAFE */ int osigpending(td, uap) struct thread *td; struct osigpending_args *uap; { struct proc *p = td->td_proc; sigset_t pending; PROC_LOCK(p); pending = p->p_sigqueue.sq_signals; SIGSETOR(pending, td->td_sigqueue.sq_signals); PROC_UNLOCK(p); SIG2OSIG(pending, td->td_retval[0]); return (0); } #endif /* COMPAT_43 */ #if defined(COMPAT_43) /* * Generalized interface signal handler, 4.3-compatible. */ #ifndef _SYS_SYSPROTO_H_ struct osigvec_args { int signum; struct sigvec *nsv; struct sigvec *osv; }; #endif /* * MPSAFE */ /* ARGSUSED */ int osigvec(td, uap) struct thread *td; register struct osigvec_args *uap; { struct sigvec vec; struct sigaction nsa, osa; register struct sigaction *nsap, *osap; int error; if (uap->signum <= 0 || uap->signum >= ONSIG) return (EINVAL); nsap = (uap->nsv != NULL) ? &nsa : NULL; osap = (uap->osv != NULL) ? &osa : NULL; if (nsap) { error = copyin(uap->nsv, &vec, sizeof(vec)); if (error) return (error); nsap->sa_handler = vec.sv_handler; OSIG2SIG(vec.sv_mask, nsap->sa_mask); nsap->sa_flags = vec.sv_flags; nsap->sa_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ } error = kern_sigaction(td, uap->signum, nsap, osap, KSA_OSIGSET); if (osap && !error) { vec.sv_handler = osap->sa_handler; SIG2OSIG(osap->sa_mask, vec.sv_mask); vec.sv_flags = osap->sa_flags; vec.sv_flags &= ~SA_NOCLDWAIT; vec.sv_flags ^= SA_RESTART; error = copyout(&vec, uap->osv, sizeof(vec)); } return (error); } #ifndef _SYS_SYSPROTO_H_ struct osigblock_args { int mask; }; #endif /* * MPSAFE */ int osigblock(td, uap) register struct thread *td; struct osigblock_args *uap; { struct proc *p = td->td_proc; sigset_t set; OSIG2SIG(uap->mask, set); SIG_CANTMASK(set); PROC_LOCK(p); SIG2OSIG(td->td_sigmask, td->td_retval[0]); SIGSETOR(td->td_sigmask, set); PROC_UNLOCK(p); return (0); } #ifndef _SYS_SYSPROTO_H_ struct osigsetmask_args { int mask; }; #endif /* * MPSAFE */ int osigsetmask(td, uap) struct thread *td; struct osigsetmask_args *uap; { struct proc *p = td->td_proc; sigset_t set; OSIG2SIG(uap->mask, set); SIG_CANTMASK(set); PROC_LOCK(p); SIG2OSIG(td->td_sigmask, td->td_retval[0]); SIGSETLO(td->td_sigmask, set); signotify(td); PROC_UNLOCK(p); return (0); } #endif /* COMPAT_43 */ /* * Suspend calling thread until signal, providing mask to be set * in the meantime. */ #ifndef _SYS_SYSPROTO_H_ struct sigsuspend_args { const sigset_t *sigmask; }; #endif /* * MPSAFE */ /* ARGSUSED */ int sigsuspend(td, uap) struct thread *td; struct sigsuspend_args *uap; { sigset_t mask; int error; error = copyin(uap->sigmask, &mask, sizeof(mask)); if (error) return (error); return (kern_sigsuspend(td, mask)); } int kern_sigsuspend(struct thread *td, sigset_t mask) { struct proc *p = td->td_proc; /* * When returning from sigsuspend, we want * the old mask to be restored after the * signal handler has finished. Thus, we * save it here and mark the sigacts structure * to indicate this. */ PROC_LOCK(p); td->td_oldsigmask = td->td_sigmask; td->td_pflags |= TDP_OLDMASK; SIG_CANTMASK(mask); td->td_sigmask = mask; signotify(td); while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "pause", 0) == 0) /* void */; PROC_UNLOCK(p); /* always return EINTR rather than ERESTART... */ return (EINTR); } #ifdef COMPAT_43 /* XXX - COMPAT_FBSD3 */ /* * Compatibility sigsuspend call for old binaries. Note nonstandard calling * convention: libc stub passes mask, not pointer, to save a copyin. */ #ifndef _SYS_SYSPROTO_H_ struct osigsuspend_args { osigset_t mask; }; #endif /* * MPSAFE */ /* ARGSUSED */ int osigsuspend(td, uap) struct thread *td; struct osigsuspend_args *uap; { struct proc *p = td->td_proc; sigset_t mask; PROC_LOCK(p); td->td_oldsigmask = td->td_sigmask; td->td_pflags |= TDP_OLDMASK; OSIG2SIG(uap->mask, mask); SIG_CANTMASK(mask); SIGSETLO(td->td_sigmask, mask); signotify(td); while (msleep(&p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH, "opause", 0) == 0) /* void */; PROC_UNLOCK(p); /* always return EINTR rather than ERESTART... */ return (EINTR); } #endif /* COMPAT_43 */ #if defined(COMPAT_43) #ifndef _SYS_SYSPROTO_H_ struct osigstack_args { struct sigstack *nss; struct sigstack *oss; }; #endif /* * MPSAFE */ /* ARGSUSED */ int osigstack(td, uap) struct thread *td; register struct osigstack_args *uap; { struct sigstack nss, oss; int error = 0; if (uap->nss != NULL) { error = copyin(uap->nss, &nss, sizeof(nss)); if (error) return (error); } oss.ss_sp = td->td_sigstk.ss_sp; oss.ss_onstack = sigonstack(cpu_getstack(td)); if (uap->nss != NULL) { td->td_sigstk.ss_sp = nss.ss_sp; td->td_sigstk.ss_size = 0; td->td_sigstk.ss_flags |= nss.ss_onstack & SS_ONSTACK; td->td_pflags |= TDP_ALTSTACK; } if (uap->oss != NULL) error = copyout(&oss, uap->oss, sizeof(oss)); return (error); } #endif /* COMPAT_43 */ #ifndef _SYS_SYSPROTO_H_ struct sigaltstack_args { stack_t *ss; stack_t *oss; }; #endif /* * MPSAFE */ /* ARGSUSED */ int sigaltstack(td, uap) struct thread *td; register struct sigaltstack_args *uap; { stack_t ss, oss; int error; if (uap->ss != NULL) { error = copyin(uap->ss, &ss, sizeof(ss)); if (error) return (error); } error = kern_sigaltstack(td, (uap->ss != NULL) ? &ss : NULL, (uap->oss != NULL) ? &oss : NULL); if (error) return (error); if (uap->oss != NULL) error = copyout(&oss, uap->oss, sizeof(stack_t)); return (error); } int kern_sigaltstack(struct thread *td, stack_t *ss, stack_t *oss) { struct proc *p = td->td_proc; int oonstack; oonstack = sigonstack(cpu_getstack(td)); if (oss != NULL) { *oss = td->td_sigstk; oss->ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; } if (ss != NULL) { if (oonstack) return (EPERM); if ((ss->ss_flags & ~SS_DISABLE) != 0) return (EINVAL); if (!(ss->ss_flags & SS_DISABLE)) { if (ss->ss_size < p->p_sysent->sv_minsigstksz) return (ENOMEM); td->td_sigstk = *ss; td->td_pflags |= TDP_ALTSTACK; } else { td->td_pflags &= ~TDP_ALTSTACK; } } return (0); } /* * Common code for kill process group/broadcast kill. * cp is calling process. */ static int killpg1(td, sig, pgid, all) register struct thread *td; int sig, pgid, all; { register struct proc *p; struct pgrp *pgrp; int nfound = 0; if (all) { /* * broadcast */ sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { PROC_LOCK(p); if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || p == td->td_proc) { PROC_UNLOCK(p); continue; } if (p_cansignal(td, p, sig) == 0) { nfound++; if (sig) psignal(p, sig); } PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); } else { sx_slock(&proctree_lock); if (pgid == 0) { /* * zero pgid means send to my process group. */ pgrp = td->td_proc->p_pgrp; PGRP_LOCK(pgrp); } else { pgrp = pgfind(pgid); if (pgrp == NULL) { sx_sunlock(&proctree_lock); return (ESRCH); } } sx_sunlock(&proctree_lock); LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { PROC_LOCK(p); if (p->p_pid <= 1 || p->p_flag & P_SYSTEM) { PROC_UNLOCK(p); continue; } if (p_cansignal(td, p, sig) == 0) { nfound++; if (sig) psignal(p, sig); } PROC_UNLOCK(p); } PGRP_UNLOCK(pgrp); } return (nfound ? 0 : ESRCH); } #ifndef _SYS_SYSPROTO_H_ struct kill_args { int pid; int signum; }; #endif /* * MPSAFE */ /* ARGSUSED */ int kill(td, uap) register struct thread *td; register struct kill_args *uap; { register struct proc *p; int error; if ((u_int)uap->signum > _SIG_MAXSIG) return (EINVAL); if (uap->pid > 0) { /* kill single process */ if ((p = pfind(uap->pid)) == NULL) { if ((p = zpfind(uap->pid)) == NULL) return (ESRCH); } error = p_cansignal(td, p, uap->signum); if (error == 0 && uap->signum) psignal(p, uap->signum); PROC_UNLOCK(p); return (error); } switch (uap->pid) { case -1: /* broadcast signal */ return (killpg1(td, uap->signum, 0, 1)); case 0: /* signal own process group */ return (killpg1(td, uap->signum, 0, 0)); default: /* negative explicit process group */ return (killpg1(td, uap->signum, -uap->pid, 0)); } /* NOTREACHED */ } #if defined(COMPAT_43) #ifndef _SYS_SYSPROTO_H_ struct okillpg_args { int pgid; int signum; }; #endif /* * MPSAFE */ /* ARGSUSED */ int okillpg(td, uap) struct thread *td; register struct okillpg_args *uap; { if ((u_int)uap->signum > _SIG_MAXSIG) return (EINVAL); return (killpg1(td, uap->signum, uap->pgid, 0)); } #endif /* COMPAT_43 */ #ifndef _SYS_SYSPROTO_H_ struct sigqueue_args { pid_t pid; int signum; /* union sigval */ void *value; }; #endif int sigqueue(struct thread *td, struct sigqueue_args *uap) { ksiginfo_t ksi; struct proc *p; int error; if ((u_int)uap->signum > _SIG_MAXSIG) return (EINVAL); /* * Specification says sigqueue can only send signal to * single process. */ if (uap->pid <= 0) return (EINVAL); if ((p = pfind(uap->pid)) == NULL) { if ((p = zpfind(uap->pid)) == NULL) return (ESRCH); } error = p_cansignal(td, p, uap->signum); if (error == 0 && uap->signum != 0) { ksiginfo_init(&ksi); ksi.ksi_signo = uap->signum; ksi.ksi_code = SI_QUEUE; ksi.ksi_pid = td->td_proc->p_pid; ksi.ksi_uid = td->td_ucred->cr_ruid; ksi.ksi_value.sigval_ptr = uap->value; - error = psignal_info(p, &ksi); + error = tdsignal(p, NULL, ksi.ksi_signo, &ksi); } PROC_UNLOCK(p); return (error); } /* * Send a signal to a process group. */ void gsignal(pgid, sig) int pgid, sig; { struct pgrp *pgrp; if (pgid != 0) { sx_slock(&proctree_lock); pgrp = pgfind(pgid); sx_sunlock(&proctree_lock); if (pgrp != NULL) { pgsignal(pgrp, sig, 0); PGRP_UNLOCK(pgrp); } } } /* * Send a signal to a process group. If checktty is 1, * limit to members which have a controlling terminal. */ void pgsignal(pgrp, sig, checkctty) struct pgrp *pgrp; int sig, checkctty; { register struct proc *p; if (pgrp) { PGRP_LOCK_ASSERT(pgrp, MA_OWNED); LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { PROC_LOCK(p); if (checkctty == 0 || p->p_flag & P_CONTROLT) psignal(p, sig); PROC_UNLOCK(p); } } } /* * Send a signal caused by a trap to the current thread. * If it will be caught immediately, deliver it with correct code. * Otherwise, post it normally. * * MPSAFE */ void trapsignal(struct thread *td, ksiginfo_t *ksi) { struct sigacts *ps; struct proc *p; int error; int sig; int code; p = td->td_proc; sig = ksi->ksi_signo; code = ksi->ksi_code; KASSERT(_SIG_VALID(sig), ("invalid signal")); if (td->td_pflags & TDP_SA) { if (td->td_mailbox == NULL) thread_user_enter(td); PROC_LOCK(p); SIGDELSET(td->td_sigmask, sig); mtx_lock_spin(&sched_lock); /* * Force scheduling an upcall, so UTS has chance to * process the signal before thread runs again in * userland. */ if (td->td_upcall) td->td_upcall->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } else { PROC_LOCK(p); } ps = p->p_sigacts; mtx_lock(&ps->ps_mtx); if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) && !SIGISMEMBER(td->td_sigmask, sig)) { p->p_stats->p_ru.ru_nsignals++; #ifdef KTRACE if (KTRPOINT(curthread, KTR_PSIG)) ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)], &td->td_sigmask, code); #endif if (!(td->td_pflags & TDP_SA)) (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], ksi, &td->td_sigmask); else if (td->td_mailbox == NULL) { mtx_unlock(&ps->ps_mtx); /* UTS caused a sync signal */ p->p_code = code; /* XXX for core dump/debugger */ p->p_sig = sig; /* XXX to verify code */ sigexit(td, sig); } else { mtx_unlock(&ps->ps_mtx); SIGADDSET(td->td_sigmask, sig); PROC_UNLOCK(p); error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig, sizeof(siginfo_t)); PROC_LOCK(p); /* UTS memory corrupted */ if (error) sigexit(td, SIGSEGV); mtx_lock(&ps->ps_mtx); } SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); if (!SIGISMEMBER(ps->ps_signodefer, sig)) SIGADDSET(td->td_sigmask, sig); if (SIGISMEMBER(ps->ps_sigreset, sig)) { /* * See kern_sigaction() for origin of this code. */ SIGDELSET(ps->ps_sigcatch, sig); if (sig != SIGCONT && sigprop(sig) & SA_IGNORE) SIGADDSET(ps->ps_sigignore, sig); ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; } mtx_unlock(&ps->ps_mtx); } else { mtx_unlock(&ps->ps_mtx); p->p_code = code; /* XXX for core dump/debugger */ p->p_sig = sig; /* XXX to verify code */ - tdsignal(td, sig, ksi, SIGTARGET_TD); + tdsignal(p, td, sig, ksi); } PROC_UNLOCK(p); } static struct thread * sigtd(struct proc *p, int sig, int prop) { struct thread *td, *signal_td; PROC_LOCK_ASSERT(p, MA_OWNED); /* * Check if current thread can handle the signal without * switching conetxt to another thread. */ if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig)) return (curthread); signal_td = NULL; mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td) { if (!SIGISMEMBER(td->td_sigmask, sig)) { signal_td = td; break; } } if (signal_td == NULL) signal_td = FIRST_THREAD_IN_PROC(p); mtx_unlock_spin(&sched_lock); return (signal_td); } /* * Send the signal to the process. If the signal has an action, the action * is usually performed by the target process rather than the caller; we add * the signal to the set of pending signals for the process. * * Exceptions: * o When a stop signal is sent to a sleeping process that takes the * default action, the process is stopped without awakening it. * o SIGCONT restarts stopped processes (or puts them back to sleep) * regardless of the signal action (eg, blocked or ignored). * * Other ignored signals are discarded immediately. * * MPSAFE */ void psignal(struct proc *p, int sig) { - (void) psignal_common(p, sig, NULL); + (void) tdsignal(p, NULL, sig, NULL); } int -psignal_info(struct proc *p, ksiginfo_t *ksi) +psignal_event(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi) { - return (psignal_common(p, ksi->ksi_signo, ksi)); -} + struct thread *td = NULL; -static int -psignal_common(struct proc *p, int sig, ksiginfo_t *ksi) -{ - struct thread *td; - int prop; + PROC_LOCK_ASSERT(p, MA_OWNED); - if (!_SIG_VALID(sig)) - panic("psignal(): invalid signal"); + KASSERT(!KSI_ONQ(ksi), ("psignal_event: ksi on queue")); - PROC_LOCK_ASSERT(p, MA_OWNED); /* - * IEEE Std 1003.1-2001: return success when killing a zombie. + * ksi_code and other fields should be set before + * calling this function. */ - if (p->p_state == PRS_ZOMBIE) - return (0); - prop = sigprop(sig); - - /* - * Find a thread to deliver the signal to. - */ - td = sigtd(p, sig, prop); - - return (tdsignal(td, sig, ksi, SIGTARGET_P)); + ksi->ksi_signo = sigev->sigev_signo; + ksi->ksi_value = sigev->sigev_value; + if (sigev->sigev_notify == SIGEV_THREAD_ID) { + td = thread_find(p, sigev->sigev_notify_thread_id); + if (td == NULL) + return (ESRCH); + } + return (tdsignal(p, td, ksi->ksi_signo, ksi)); } /* * MPSAFE */ int -tdsignal(struct thread *td, int sig, ksiginfo_t *ksi, sigtarget_t target) +tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) { sigset_t saved; - struct proc *p = td->td_proc; int ret; if (p->p_flag & P_SA) saved = p->p_sigqueue.sq_signals; - ret = do_tdsignal(td, sig, ksi, target); + ret = do_tdsignal(p, td, sig, ksi); if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) { if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) { /* pending set changed */ p->p_flag |= P_SIGEVENT; wakeup(&p->p_siglist); } } return (ret); } static int -do_tdsignal(struct thread *td, int sig, ksiginfo_t *ksi, sigtarget_t target) +do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi) { - struct proc *p; sig_t action; sigqueue_t *sigqueue; struct thread *td0; int prop; struct sigacts *ps; int ret = 0; + PROC_LOCK_ASSERT(p, MA_OWNED); + if (!_SIG_VALID(sig)) panic("do_tdsignal(): invalid signal"); - p = td->td_proc; - ps = p->p_sigacts; + KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("do_tdsignal: ksi on queue")); - PROC_LOCK_ASSERT(p, MA_OWNED); - KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig); + /* + * IEEE Std 1003.1-2001: return success when killing a zombie. + */ + if (p->p_state == PRS_ZOMBIE) { + if (ksi && (ksi->ksi_flags & KSI_INS)) + ksiginfo_tryfree(ksi); + return (ret); + } + ps = p->p_sigacts; + KNOTE_LOCKED(&p->p_klist, NOTE_SIGNAL | sig); prop = sigprop(sig); /* * If the signal is blocked and not destined for this thread, then * assign it to the process so that we can find it later in the first * thread that unblocks it. Otherwise, assign it to this thread now. */ - if (target == SIGTARGET_TD) { - sigqueue = &td->td_sigqueue; - } else { - if (!SIGISMEMBER(td->td_sigmask, sig)) - sigqueue = &td->td_sigqueue; - else + if (td == NULL) { + td = sigtd(p, sig, prop); + if (SIGISMEMBER(td->td_sigmask, sig)) sigqueue = &p->p_sigqueue; + else + sigqueue = &td->td_sigqueue; + } else { + KASSERT(td->td_proc == p, ("invalid thread")); + sigqueue = &td->td_sigqueue; } /* * If the signal is being ignored, * or process is exiting or thread is exiting, * then we forget about it immediately. * (Note: we don't set SIGCONT in ps_sigignore, * and if it is set to SIG_IGN, * action will be SIG_DFL here.) */ mtx_lock(&ps->ps_mtx); if (SIGISMEMBER(ps->ps_sigignore, sig) || (p->p_flag & P_WEXIT)) { mtx_unlock(&ps->ps_mtx); + if (ksi && (ksi->ksi_flags & KSI_INS)) + ksiginfo_tryfree(ksi); return (ret); } if (SIGISMEMBER(td->td_sigmask, sig)) action = SIG_HOLD; else if (SIGISMEMBER(ps->ps_sigcatch, sig)) action = SIG_CATCH; else action = SIG_DFL; mtx_unlock(&ps->ps_mtx); if (prop & SA_CONT) sigqueue_delete_stopmask_proc(p); else if (prop & SA_STOP) { /* * If sending a tty stop signal to a member of an orphaned * process group, discard the signal here if the action * is default; don't stop the process below if sleeping, * and don't clear any pending SIGCONT. */ if ((prop & SA_TTYSTOP) && (p->p_pgrp->pg_jobc == 0) && - (action == SIG_DFL)) - return (ret); + (action == SIG_DFL)) { + if (ksi && (ksi->ksi_flags & KSI_INS)) + ksiginfo_tryfree(ksi); + return (ret); + } sigqueue_delete_proc(p, SIGCONT); p->p_flag &= ~P_CONTINUED; } ret = sigqueue_add(sigqueue, sig, ksi); if (ret != 0) return (ret); - signotify(td); /* uses schedlock */ + signotify(td); /* * Defer further processing for signals which are held, * except that stopped processes must be continued by SIGCONT. */ if (action == SIG_HOLD && !((prop & SA_CONT) && (p->p_flag & P_STOPPED_SIG))) return (ret); /* * SIGKILL: Remove procfs STOPEVENTs. */ if (sig == SIGKILL) { /* from procfs_ioctl.c: PIOCBIC */ p->p_stops = 0; /* from procfs_ioctl.c: PIOCCONT */ p->p_step = 0; wakeup(&p->p_step); } /* * Some signals have a process-wide effect and a per-thread * component. Most processing occurs when the process next * tries to cross the user boundary, however there are some * times when processing needs to be done immediatly, such as * waking up threads so that they can cross the user boundary. * We try do the per-process part here. */ if (P_SHOULDSTOP(p)) { /* * The process is in stopped mode. All the threads should be * either winding down or already on the suspended queue. */ if (p->p_flag & P_TRACED) { /* * The traced process is already stopped, * so no further action is necessary. * No signal can restart us. */ goto out; } if (sig == SIGKILL) { /* * SIGKILL sets process running. * It will die elsewhere. * All threads must be restarted. */ p->p_flag &= ~P_STOPPED_SIG; goto runfast; } if (prop & SA_CONT) { /* * If SIGCONT is default (or ignored), we continue the * process but don't leave the signal in sigqueue as * it has no further action. If SIGCONT is held, we * continue the process and leave the signal in * sigqueue. If the process catches SIGCONT, let it * handle the signal itself. If it isn't waiting on * an event, it goes back to run state. * Otherwise, process goes back to sleep state. */ p->p_flag &= ~P_STOPPED_SIG; p->p_flag |= P_CONTINUED; if (action == SIG_DFL) { sigqueue_delete(sigqueue, sig); } else if (action == SIG_CATCH) { /* * The process wants to catch it so it needs * to run at least one thread, but which one? * It would seem that the answer would be to * run an upcall in the next KSE to run, and * deliver the signal that way. In a NON KSE * process, we need to make sure that the * single thread is runnable asap. * XXXKSE for now however, make them all run. */ goto runfast; } /* * The signal is not ignored or caught. */ mtx_lock_spin(&sched_lock); thread_unsuspend(p); mtx_unlock_spin(&sched_lock); goto out; } if (prop & SA_STOP) { /* * Already stopped, don't need to stop again * (If we did the shell could get confused). * Just make sure the signal STOP bit set. */ p->p_flag |= P_STOPPED_SIG; sigqueue_delete(sigqueue, sig); goto out; } /* * All other kinds of signals: * If a thread is sleeping interruptibly, simulate a * wakeup so that when it is continued it will be made * runnable and can look at the signal. However, don't make * the PROCESS runnable, leave it stopped. * It may run a bit until it hits a thread_suspend_check(). */ mtx_lock_spin(&sched_lock); if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) sleepq_abort(td); mtx_unlock_spin(&sched_lock); goto out; /* * Mutexes are short lived. Threads waiting on them will * hit thread_suspend_check() soon. */ } else if (p->p_state == PRS_NORMAL) { if (p->p_flag & P_TRACED || action == SIG_CATCH) { mtx_lock_spin(&sched_lock); tdsigwakeup(td, sig, action); mtx_unlock_spin(&sched_lock); goto out; } MPASS(action == SIG_DFL); if (prop & SA_STOP) { if (p->p_flag & P_PPWAIT) goto out; p->p_flag |= P_STOPPED_SIG; p->p_xstat = sig; p->p_xthread = td; mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td0) { if (TD_IS_SLEEPING(td0) && (td0->td_flags & TDF_SINTR) && !TD_IS_SUSPENDED(td0)) { thread_suspend_one(td0); } else if (td != td0) { td0->td_flags |= TDF_ASTPENDING; } } thread_stopped(p); if (p->p_numthreads == p->p_suspcount) { mtx_unlock_spin(&sched_lock); sigqueue_delete_proc(p, p->p_xstat); } else mtx_unlock_spin(&sched_lock); goto out; } else goto runfast; /* NOTREACHED */ } else { /* Not in "NORMAL" state. discard the signal. */ sigqueue_delete(sigqueue, sig); goto out; } /* * The process is not stopped so we need to apply the signal to all the * running threads. */ runfast: mtx_lock_spin(&sched_lock); tdsigwakeup(td, sig, action); thread_unsuspend(p); mtx_unlock_spin(&sched_lock); out: /* If we jump here, sched_lock should not be owned. */ mtx_assert(&sched_lock, MA_NOTOWNED); return (ret); } /* * The force of a signal has been directed against a single * thread. We need to see what we can do about knocking it * out of any sleep it may be in etc. */ static void tdsigwakeup(struct thread *td, int sig, sig_t action) { struct proc *p = td->td_proc; register int prop; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); prop = sigprop(sig); /* * Bring the priority of a thread up if we want it to get * killed in this lifetime. */ if (action == SIG_DFL && (prop & SA_KILL)) { if (p->p_nice > 0) sched_nice(td->td_proc, 0); if (td->td_priority > PUSER) sched_prio(td, PUSER); } if (TD_ON_SLEEPQ(td)) { /* * If thread is sleeping uninterruptibly * we can't interrupt the sleep... the signal will * be noticed when the process returns through * trap() or syscall(). */ if ((td->td_flags & TDF_SINTR) == 0) return; /* * If SIGCONT is default (or ignored) and process is * asleep, we are finished; the process should not * be awakened. */ if ((prop & SA_CONT) && action == SIG_DFL) { mtx_unlock_spin(&sched_lock); sigqueue_delete(&p->p_sigqueue, sig); /* * It may be on either list in this state. * Remove from both for now. */ sigqueue_delete(&td->td_sigqueue, sig); mtx_lock_spin(&sched_lock); return; } /* * Give low priority threads a better chance to run. */ if (td->td_priority > PUSER) sched_prio(td, PUSER); sleepq_abort(td); } else { /* * Other states do nothing with the signal immediately, * other than kicking ourselves if we are running. * It will either never be noticed, or noticed very soon. */ #ifdef SMP if (TD_IS_RUNNING(td) && td != curthread) forward_signal(td); #endif } } int ptracestop(struct thread *td, int sig) { struct proc *p = td->td_proc; struct thread *td0; PROC_LOCK_ASSERT(p, MA_OWNED); WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.mtx_object, "Stopping for traced signal"); mtx_lock_spin(&sched_lock); td->td_flags |= TDF_XSIG; mtx_unlock_spin(&sched_lock); td->td_xsig = sig; while ((p->p_flag & P_TRACED) && (td->td_flags & TDF_XSIG)) { if (p->p_flag & P_SINGLE_EXIT) { mtx_lock_spin(&sched_lock); td->td_flags &= ~TDF_XSIG; mtx_unlock_spin(&sched_lock); return (sig); } /* * Just make wait() to work, the last stopped thread * will win. */ p->p_xstat = sig; p->p_xthread = td; p->p_flag |= (P_STOPPED_SIG|P_STOPPED_TRACE); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td0) { if (TD_IS_SLEEPING(td0) && (td0->td_flags & TDF_SINTR) && !TD_IS_SUSPENDED(td0)) { thread_suspend_one(td0); } else if (td != td0) { td0->td_flags |= TDF_ASTPENDING; } } stopme: thread_stopped(p); thread_suspend_one(td); PROC_UNLOCK(p); DROP_GIANT(); mi_switch(SW_VOL, NULL); mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); PROC_LOCK(p); if (!(p->p_flag & P_TRACED)) break; if (td->td_flags & TDF_DBSUSPEND) { if (p->p_flag & P_SINGLE_EXIT) break; mtx_lock_spin(&sched_lock); goto stopme; } } return (td->td_xsig); } /* * If the current process has received a signal (should be caught or cause * termination, should interrupt current syscall), return the signal number. * Stop signals with default action are processed immediately, then cleared; * they aren't returned. This is checked after each entry to the system for * a syscall or trap (though this can usually be done without calling issignal * by checking the pending signal masks in cursig.) The normal call * sequence is * * while (sig = cursig(curthread)) * postsig(sig); */ static int issignal(td) struct thread *td; { struct proc *p; struct sigacts *ps; sigset_t sigpending; int sig, prop, newsig; struct thread *td0; p = td->td_proc; ps = p->p_sigacts; mtx_assert(&ps->ps_mtx, MA_OWNED); PROC_LOCK_ASSERT(p, MA_OWNED); for (;;) { int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG); sigpending = td->td_sigqueue.sq_signals; SIGSETNAND(sigpending, td->td_sigmask); if (p->p_flag & P_PPWAIT) SIG_STOPSIGMASK(sigpending); if (SIGISEMPTY(sigpending)) /* no signal to send */ return (0); sig = sig_ffs(&sigpending); if (p->p_stops & S_SIG) { mtx_unlock(&ps->ps_mtx); stopevent(p, S_SIG, sig); mtx_lock(&ps->ps_mtx); } /* * We should see pending but ignored signals * only if P_TRACED was on when they were posted. */ if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) { sigqueue_delete(&td->td_sigqueue, sig); if (td->td_pflags & TDP_SA) SIGADDSET(td->td_sigmask, sig); continue; } if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { /* * If traced, always stop. */ mtx_unlock(&ps->ps_mtx); newsig = ptracestop(td, sig); mtx_lock(&ps->ps_mtx); if (td->td_pflags & TDP_SA) SIGADDSET(td->td_sigmask, sig); if (sig != newsig) { /* * clear old signal. * XXX shrug off debugger, it causes siginfo to * be thrown away. */ sigqueue_delete(&td->td_sigqueue, sig); /* * If parent wants us to take the signal, * then it will leave it in p->p_xstat; * otherwise we just look for signals again. */ if (newsig == 0) continue; sig = newsig; /* * Put the new signal into td_sigqueue. If the * signal is being masked, look for other signals. */ SIGADDSET(td->td_sigqueue.sq_signals, sig); if (td->td_pflags & TDP_SA) SIGDELSET(td->td_sigmask, sig); if (SIGISMEMBER(td->td_sigmask, sig)) continue; signotify(td); } /* * If the traced bit got turned off, go back up * to the top to rescan signals. This ensures * that p_sig* and p_sigact are consistent. */ if ((p->p_flag & P_TRACED) == 0) continue; } prop = sigprop(sig); /* * Decide whether the signal should be returned. * Return the signal's number, or fall through * to clear it from the pending mask. */ switch ((intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) { case (intptr_t)SIG_DFL: /* * Don't take default actions on system processes. */ if (p->p_pid <= 1) { #ifdef DIAGNOSTIC /* * Are you sure you want to ignore SIGSEGV * in init? XXX */ printf("Process (pid %lu) got signal %d\n", (u_long)p->p_pid, sig); #endif break; /* == ignore */ } /* * If there is a pending stop signal to process * with default action, stop here, * then clear the signal. However, * if process is member of an orphaned * process group, ignore tty stop signals. */ if (prop & SA_STOP) { if (p->p_flag & P_TRACED || (p->p_pgrp->pg_jobc == 0 && prop & SA_TTYSTOP)) break; /* == ignore */ mtx_unlock(&ps->ps_mtx); WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.mtx_object, "Catching SIGSTOP"); p->p_flag |= P_STOPPED_SIG; p->p_xstat = sig; p->p_xthread = td; mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td0) { if (TD_IS_SLEEPING(td0) && (td0->td_flags & TDF_SINTR) && !TD_IS_SUSPENDED(td0)) { thread_suspend_one(td0); } else if (td != td0) { td0->td_flags |= TDF_ASTPENDING; } } thread_stopped(p); thread_suspend_one(td); PROC_UNLOCK(p); DROP_GIANT(); mi_switch(SW_INVOL, NULL); mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); PROC_LOCK(p); mtx_lock(&ps->ps_mtx); break; } else if (prop & SA_IGNORE) { /* * Except for SIGCONT, shouldn't get here. * Default action is to ignore; drop it. */ break; /* == ignore */ } else return (sig); /*NOTREACHED*/ case (intptr_t)SIG_IGN: /* * Masking above should prevent us ever trying * to take action on an ignored signal other * than SIGCONT, unless process is traced. */ if ((prop & SA_CONT) == 0 && (p->p_flag & P_TRACED) == 0) printf("issignal\n"); break; /* == ignore */ default: /* * This signal has an action, let * postsig() process it. */ return (sig); } sigqueue_delete(&td->td_sigqueue, sig); /* take the signal! */ } /* NOTREACHED */ } /* * MPSAFE */ void thread_stopped(struct proc *p) { struct proc *p1 = curthread->td_proc; struct sigacts *ps; int n; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); n = p->p_suspcount; if (p == p1) n++; if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) { mtx_unlock_spin(&sched_lock); p->p_flag &= ~P_WAITED; PROC_LOCK(p->p_pptr); /* * Wake up parent sleeping in kern_wait(), also send * SIGCHLD to parent, but SIGCHLD does not guarantee * that parent will awake, because parent may masked * the signal. */ p->p_pptr->p_flag |= P_STATCHILD; wakeup(p->p_pptr); ps = p->p_pptr->p_sigacts; mtx_lock(&ps->ps_mtx); if ((ps->ps_flag & PS_NOCLDSTOP) == 0) { mtx_unlock(&ps->ps_mtx); psignal(p->p_pptr, SIGCHLD); } else mtx_unlock(&ps->ps_mtx); PROC_UNLOCK(p->p_pptr); mtx_lock_spin(&sched_lock); } } /* * Take the action for the specified signal * from the current set of pending signals. */ void postsig(sig) register int sig; { struct thread *td = curthread; register struct proc *p = td->td_proc; struct sigacts *ps; sig_t action; ksiginfo_t ksi; sigset_t returnmask; int code; KASSERT(sig != 0, ("postsig")); PROC_LOCK_ASSERT(p, MA_OWNED); ps = p->p_sigacts; mtx_assert(&ps->ps_mtx, MA_OWNED); ksiginfo_init(&ksi); sigqueue_get(&td->td_sigqueue, sig, &ksi); ksi.ksi_signo = sig; if (ksi.ksi_code == SI_TIMER) itimer_accept(p, ksi.ksi_timerid, &ksi); action = ps->ps_sigact[_SIG_IDX(sig)]; #ifdef KTRACE if (KTRPOINT(td, KTR_PSIG)) ktrpsig(sig, action, td->td_pflags & TDP_OLDMASK ? &td->td_oldsigmask : &td->td_sigmask, 0); #endif if (p->p_stops & S_SIG) { mtx_unlock(&ps->ps_mtx); stopevent(p, S_SIG, sig); mtx_lock(&ps->ps_mtx); } if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) { /* * Default action, where the default is to kill * the process. (Other cases were ignored above.) */ mtx_unlock(&ps->ps_mtx); sigexit(td, sig); /* NOTREACHED */ } else { if (td->td_pflags & TDP_SA) { if (sig == SIGKILL) { mtx_unlock(&ps->ps_mtx); sigexit(td, sig); } } /* * If we get here, the signal must be caught. */ KASSERT(action != SIG_IGN && !SIGISMEMBER(td->td_sigmask, sig), ("postsig action")); /* * Set the new mask value and also defer further * occurrences of this signal. * * Special case: user has done a sigsuspend. Here the * current mask is not of interest, but rather the * mask from before the sigsuspend is what we want * restored after the signal processing is completed. */ if (td->td_pflags & TDP_OLDMASK) { returnmask = td->td_oldsigmask; td->td_pflags &= ~TDP_OLDMASK; } else returnmask = td->td_sigmask; SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]); if (!SIGISMEMBER(ps->ps_signodefer, sig)) SIGADDSET(td->td_sigmask, sig); if (SIGISMEMBER(ps->ps_sigreset, sig)) { /* * See kern_sigaction() for origin of this code. */ SIGDELSET(ps->ps_sigcatch, sig); if (sig != SIGCONT && sigprop(sig) & SA_IGNORE) SIGADDSET(ps->ps_sigignore, sig); ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL; } p->p_stats->p_ru.ru_nsignals++; if (p->p_sig != sig) { code = 0; } else { code = p->p_code; p->p_code = 0; p->p_sig = 0; } if (td->td_pflags & TDP_SA) thread_signal_add(curthread, &ksi); else (*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask); } } /* * Kill the current process for stated reason. */ void killproc(p, why) struct proc *p; char *why; { PROC_LOCK_ASSERT(p, MA_OWNED); CTR3(KTR_PROC, "killproc: proc %p (pid %d, %s)", p, p->p_pid, p->p_comm); log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid, p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, why); psignal(p, SIGKILL); } /* * Force the current process to exit with the specified signal, dumping core * if appropriate. We bypass the normal tests for masked and caught signals, * allowing unrecoverable failures to terminate the process without changing * signal state. Mark the accounting record with the signal termination. * If dumping core, save the signal number for the debugger. Calls exit and * does not return. * * MPSAFE */ void sigexit(td, sig) struct thread *td; int sig; { struct proc *p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); p->p_acflag |= AXSIG; /* * We must be single-threading to generate a core dump. This * ensures that the registers in the core file are up-to-date. * Also, the ELF dump handler assumes that the thread list doesn't * change out from under it. * * XXX If another thread attempts to single-thread before us * (e.g. via fork()), we won't get a dump at all. */ if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) { p->p_sig = sig; /* * Log signals which would cause core dumps * (Log as LOG_INFO to appease those who don't want * these messages.) * XXX : Todo, as well as euid, write out ruid too * Note that coredump() drops proc lock. */ if (coredump(td) == 0) sig |= WCOREFLAG; if (kern_logsigexit) log(LOG_INFO, "pid %d (%s), uid %d: exited on signal %d%s\n", p->p_pid, p->p_comm, td->td_ucred ? td->td_ucred->cr_uid : -1, sig &~ WCOREFLAG, sig & WCOREFLAG ? " (core dumped)" : ""); } else PROC_UNLOCK(p); exit1(td, W_EXITCODE(0, sig)); /* NOTREACHED */ } static char corefilename[MAXPATHLEN] = {"%N.core"}; SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename, sizeof(corefilename), "process corefile name format string"); /* * expand_name(name, uid, pid) * Expand the name described in corefilename, using name, uid, and pid. * corefilename is a printf-like string, with three format specifiers: * %N name of process ("name") * %P process id (pid) * %U user id (uid) * For example, "%N.core" is the default; they can be disabled completely * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P". * This is controlled by the sysctl variable kern.corefile (see above). */ static char * expand_name(name, uid, pid) const char *name; uid_t uid; pid_t pid; { const char *format, *appendstr; char *temp; char buf[11]; /* Buffer for pid/uid -- max 4B */ size_t i, l, n; format = corefilename; temp = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT | M_ZERO); if (temp == NULL) return (NULL); for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) { switch (format[i]) { case '%': /* Format character */ i++; switch (format[i]) { case '%': appendstr = "%"; break; case 'N': /* process name */ appendstr = name; break; case 'P': /* process id */ sprintf(buf, "%u", pid); appendstr = buf; break; case 'U': /* user id */ sprintf(buf, "%u", uid); appendstr = buf; break; default: appendstr = ""; log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format); } l = strlen(appendstr); if ((n + l) >= MAXPATHLEN) goto toolong; memcpy(temp + n, appendstr, l); n += l; break; default: temp[n++] = format[i]; } } if (format[i] != '\0') goto toolong; return (temp); toolong: log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too long\n", (long)pid, name, (u_long)uid); free(temp, M_TEMP); return (NULL); } /* * Dump a process' core. The main routine does some * policy checking, and creates the name of the coredump; * then it passes on a vnode and a size limit to the process-specific * coredump routine if there is one; if there _is not_ one, it returns * ENOSYS; otherwise it returns the error from the process-specific routine. */ static int coredump(struct thread *td) { struct proc *p = td->td_proc; register struct vnode *vp; register struct ucred *cred = td->td_ucred; struct flock lf; struct nameidata nd; struct vattr vattr; int error, error1, flags, locked; struct mount *mp; char *name; /* name of corefile */ off_t limit; PROC_LOCK_ASSERT(p, MA_OWNED); MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td); _STOPEVENT(p, S_CORE, 0); if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0) { PROC_UNLOCK(p); return (EFAULT); } /* * Note that the bulk of limit checking is done after * the corefile is created. The exception is if the limit * for corefiles is 0, in which case we don't bother * creating the corefile at all. This layout means that * a corefile is truncated instead of not being created, * if it is larger than the limit. */ limit = (off_t)lim_cur(p, RLIMIT_CORE); PROC_UNLOCK(p); if (limit == 0) return (EFBIG); mtx_lock(&Giant); restart: name = expand_name(p->p_comm, td->td_ucred->cr_uid, p->p_pid); if (name == NULL) { mtx_unlock(&Giant); return (EINVAL); } NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name, td); /* XXXKSE */ flags = O_CREAT | FWRITE | O_NOFOLLOW; error = vn_open(&nd, &flags, S_IRUSR | S_IWUSR, -1); free(name, M_TEMP); if (error) { mtx_unlock(&Giant); return (error); } NDFREE(&nd, NDF_ONLY_PNBUF); vp = nd.ni_vp; /* Don't dump to non-regular files or files with links. */ if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred, td) || vattr.va_nlink != 1) { VOP_UNLOCK(vp, 0, td); error = EFAULT; goto out; } VOP_UNLOCK(vp, 0, td); lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; lf.l_type = F_WRLCK; locked = (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, F_FLOCK) == 0); if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { lf.l_type = F_UNLCK; if (locked) VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); if ((error = vn_close(vp, FWRITE, cred, td)) != 0) return (error); if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0) return (error); goto restart; } VATTR_NULL(&vattr); vattr.va_size = 0; if (set_core_nodump_flag) vattr.va_flags = UF_NODUMP; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); VOP_LEASE(vp, td, cred, LEASE_WRITE); VOP_SETATTR(vp, &vattr, cred, td); VOP_UNLOCK(vp, 0, td); PROC_LOCK(p); p->p_acflag |= ACORE; PROC_UNLOCK(p); error = p->p_sysent->sv_coredump ? p->p_sysent->sv_coredump(td, vp, limit) : ENOSYS; if (locked) { lf.l_type = F_UNLCK; VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_FLOCK); } vn_finished_write(mp); out: error1 = vn_close(vp, FWRITE, cred, td); mtx_unlock(&Giant); if (error == 0) error = error1; return (error); } /* * Nonexistent system call-- signal process (may want to handle it). * Flag error in case process won't see signal immediately (blocked or ignored). */ #ifndef _SYS_SYSPROTO_H_ struct nosys_args { int dummy; }; #endif /* * MPSAFE */ /* ARGSUSED */ int nosys(td, args) struct thread *td; struct nosys_args *args; { struct proc *p = td->td_proc; PROC_LOCK(p); psignal(p, SIGSYS); PROC_UNLOCK(p); return (ENOSYS); } /* * Send a SIGIO or SIGURG signal to a process or process group using * stored credentials rather than those of the current process. */ void pgsigio(sigiop, sig, checkctty) struct sigio **sigiop; int sig, checkctty; { struct sigio *sigio; SIGIO_LOCK(); sigio = *sigiop; if (sigio == NULL) { SIGIO_UNLOCK(); return; } if (sigio->sio_pgid > 0) { PROC_LOCK(sigio->sio_proc); if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred)) psignal(sigio->sio_proc, sig); PROC_UNLOCK(sigio->sio_proc); } else if (sigio->sio_pgid < 0) { struct proc *p; PGRP_LOCK(sigio->sio_pgrp); LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) { PROC_LOCK(p); if (CANSIGIO(sigio->sio_ucred, p->p_ucred) && (checkctty == 0 || (p->p_flag & P_CONTROLT))) psignal(p, sig); PROC_UNLOCK(p); } PGRP_UNLOCK(sigio->sio_pgrp); } SIGIO_UNLOCK(); } static int filt_sigattach(struct knote *kn) { struct proc *p = curproc; kn->kn_ptr.p_proc = p; kn->kn_flags |= EV_CLEAR; /* automatically set */ knlist_add(&p->p_klist, kn, 0); return (0); } static void filt_sigdetach(struct knote *kn) { struct proc *p = kn->kn_ptr.p_proc; knlist_remove(&p->p_klist, kn, 0); } /* * signal knotes are shared with proc knotes, so we apply a mask to * the hint in order to differentiate them from process hints. This * could be avoided by using a signal-specific knote list, but probably * isn't worth the trouble. */ static int filt_signal(struct knote *kn, long hint) { if (hint & NOTE_SIGNAL) { hint &= ~NOTE_SIGNAL; if (kn->kn_id == hint) kn->kn_data++; } return (kn->kn_data != 0); } struct sigacts * sigacts_alloc(void) { struct sigacts *ps; ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); ps->ps_refcnt = 1; mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); return (ps); } void sigacts_free(struct sigacts *ps) { mtx_lock(&ps->ps_mtx); ps->ps_refcnt--; if (ps->ps_refcnt == 0) { mtx_destroy(&ps->ps_mtx); free(ps, M_SUBPROC); } else mtx_unlock(&ps->ps_mtx); } struct sigacts * sigacts_hold(struct sigacts *ps) { mtx_lock(&ps->ps_mtx); ps->ps_refcnt++; mtx_unlock(&ps->ps_mtx); return (ps); } void sigacts_copy(struct sigacts *dest, struct sigacts *src) { KASSERT(dest->ps_refcnt == 1, ("sigacts_copy to shared dest")); mtx_lock(&src->ps_mtx); bcopy(src, dest, offsetof(struct sigacts, ps_refcnt)); mtx_unlock(&src->ps_mtx); } int sigacts_shared(struct sigacts *ps) { int shared; mtx_lock(&ps->ps_mtx); shared = ps->ps_refcnt > 1; mtx_unlock(&ps->ps_mtx); return (shared); } Index: head/sys/kern/kern_thr.c =================================================================== --- head/sys/kern/kern_thr.c (revision 151992) +++ head/sys/kern/kern_thr.c (revision 151993) @@ -1,394 +1,394 @@ /*- * Copyright (c) 2003, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern int max_threads_per_proc; extern int max_groups_per_proc; SYSCTL_DECL(_kern_threads); static int thr_scope = 0; SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope, CTLFLAG_RW, &thr_scope, 0, "sys or proc scope scheduling"); static int thr_concurrency = 0; SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW, &thr_concurrency, 0, "a concurrency value if not default"); static int create_thread(struct thread *td, mcontext_t *ctx, void (*start_func)(void *), void *arg, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, int flags); /* * System call interface. */ int thr_create(struct thread *td, struct thr_create_args *uap) /* ucontext_t *ctx, long *id, int flags */ { ucontext_t ctx; int error; if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) return (error); error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, NULL, 0, NULL, uap->id, NULL, uap->flags); return (error); } int thr_new(struct thread *td, struct thr_new_args *uap) /* struct thr_param * */ { struct thr_param param; int error; if (uap->param_size < sizeof(param)) return (EINVAL); if ((error = copyin(uap->param, ¶m, sizeof(param)))) return (error); error = create_thread(td, NULL, param.start_func, param.arg, param.stack_base, param.stack_size, param.tls_base, param.child_tid, param.parent_tid, param.flags); return (error); } static int create_thread(struct thread *td, mcontext_t *ctx, void (*start_func)(void *), void *arg, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, int flags) { stack_t stack; struct thread *newtd; struct ksegrp *kg, *newkg; struct proc *p; long id; int error, scope_sys, linkkg; error = 0; p = td->td_proc; kg = td->td_ksegrp; /* Have race condition but it is cheap. */ if ((p->p_numksegrps >= max_groups_per_proc) || (p->p_numthreads >= max_threads_per_proc)) { return (EPROCLIM); } /* Check PTHREAD_SCOPE_SYSTEM */ scope_sys = (flags & THR_SYSTEM_SCOPE) != 0; /* sysctl overrides user's flag */ if (thr_scope == 1) scope_sys = 0; else if (thr_scope == 2) scope_sys = 1; /* Initialize our td and new ksegrp.. */ newtd = thread_alloc(); /* * Try the copyout as soon as we allocate the td so we don't * have to tear things down in a failure case below. * Here we copy out tid to two places, one for child and one * for parent, because pthread can create a detached thread, * if parent wants to safely access child tid, it has to provide * its storage, because child thread may exit quickly and * memory is freed before parent thread can access it. */ id = newtd->td_tid; if ((child_tid != NULL && (error = copyout(&id, child_tid, sizeof(long)))) || (parent_tid != NULL && (error = copyout(&id, parent_tid, sizeof(long))))) { thread_free(newtd); return (error); } bzero(&newtd->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &newtd->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); newtd->td_proc = td->td_proc; newtd->td_ucred = crhold(td->td_ucred); cpu_set_upcall(newtd, td); if (ctx != NULL) { /* old way to set user context */ error = set_mcontext(newtd, ctx); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); return (error); } } else { /* Set up our machine context. */ stack.ss_sp = stack_base; stack.ss_size = stack_size; /* Set upcall address to user thread entry function. */ cpu_set_upcall_kse(newtd, start_func, arg, &stack); /* Setup user TLS address and TLS pointer register. */ error = cpu_set_user_tls(newtd, tls_base); if (error != 0) { thread_free(newtd); crfree(td->td_ucred); return (error); } } if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* Treat initial thread as it has PTHREAD_SCOPE_PROCESS. */ p->p_procscopegrp = kg; mtx_lock_spin(&sched_lock); sched_set_concurrency(kg, thr_concurrency ? thr_concurrency : (2*mp_ncpus)); mtx_unlock_spin(&sched_lock); } linkkg = 0; if (scope_sys) { linkkg = 1; newkg = ksegrp_alloc(); bzero(&newkg->kg_startzero, __rangeof(struct ksegrp, kg_startzero, kg_endzero)); bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); sched_init_concurrency(newkg); PROC_LOCK(td->td_proc); } else { /* * Try to create a KSE group which will be shared * by all PTHREAD_SCOPE_PROCESS threads. */ retry: PROC_LOCK(td->td_proc); if ((newkg = p->p_procscopegrp) == NULL) { PROC_UNLOCK(p); newkg = ksegrp_alloc(); bzero(&newkg->kg_startzero, __rangeof(struct ksegrp, kg_startzero, kg_endzero)); bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); PROC_LOCK(p); if (p->p_procscopegrp == NULL) { p->p_procscopegrp = newkg; sched_init_concurrency(newkg); sched_set_concurrency(newkg, thr_concurrency ? thr_concurrency : (2*mp_ncpus)); linkkg = 1; } else { PROC_UNLOCK(p); ksegrp_free(newkg); goto retry; } } } td->td_proc->p_flag |= P_HADTHREADS; newtd->td_sigmask = td->td_sigmask; mtx_lock_spin(&sched_lock); if (linkkg) ksegrp_link(newkg, p); thread_link(newtd, newkg); PROC_UNLOCK(p); /* let the scheduler know about these things. */ if (linkkg) sched_fork_ksegrp(td, newkg); sched_fork_thread(td, newtd); TD_SET_CAN_RUN(newtd); /* if ((flags & THR_SUSPENDED) == 0) */ setrunqueue(newtd, SRQ_BORING); mtx_unlock_spin(&sched_lock); return (error); } int thr_self(struct thread *td, struct thr_self_args *uap) /* long *id */ { long id; int error; id = td->td_tid; if ((error = copyout(&id, uap->id, sizeof(long)))) return (error); return (0); } int thr_exit(struct thread *td, struct thr_exit_args *uap) /* long *state */ { struct proc *p; p = td->td_proc; /* Signal userland that it can free the stack. */ if ((void *)uap->state != NULL) { suword((void *)uap->state, 1); kern_umtx_wake(td, uap->state, INT_MAX); } PROC_LOCK(p); sigqueue_flush(&td->td_sigqueue); mtx_lock_spin(&sched_lock); /* * Shutting down last thread in the proc. This will actually * call exit() in the trampoline when it returns. */ if (p->p_numthreads != 1) { thread_exit(); /* NOTREACHED */ } mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (0); } int thr_kill(struct thread *td, struct thr_kill_args *uap) /* long id, int sig */ { struct thread *ttd; struct proc *p; int error; p = td->td_proc; error = 0; PROC_LOCK(p); ttd = thread_find(p, uap->id); if (ttd == NULL) { error = ESRCH; goto out; } if (uap->sig == 0) goto out; if (!_SIG_VALID(uap->sig)) { error = EINVAL; goto out; } - tdsignal(ttd, uap->sig, NULL, SIGTARGET_TD); + tdsignal(p, ttd, uap->sig, NULL); out: PROC_UNLOCK(p); return (error); } int thr_suspend(struct thread *td, struct thr_suspend_args *uap) /* const struct timespec *timeout */ { struct timespec ts; struct timeval tv; int error; int hz; hz = 0; error = 0; if (uap->timeout != NULL) { error = copyin((const void *)uap->timeout, (void *)&ts, sizeof(struct timespec)); if (error != 0) return (error); if (ts.tv_nsec < 0 || ts.tv_nsec > 1000000000) return (EINVAL); if (ts.tv_sec == 0 && ts.tv_nsec == 0) return (ETIMEDOUT); TIMESPEC_TO_TIMEVAL(&tv, &ts); hz = tvtohz(&tv); } PROC_LOCK(td->td_proc); if ((td->td_flags & TDF_THRWAKEUP) == 0) error = msleep((void *)td, &td->td_proc->p_mtx, td->td_priority | PCATCH, "lthr", hz); if (td->td_flags & TDF_THRWAKEUP) { mtx_lock_spin(&sched_lock); td->td_flags &= ~TDF_THRWAKEUP; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(td->td_proc); return (0); } PROC_UNLOCK(td->td_proc); if (error == EWOULDBLOCK) error = ETIMEDOUT; else if (error == ERESTART) { if (hz != 0) error = EINTR; } return (error); } int thr_wake(struct thread *td, struct thr_wake_args *uap) /* long id */ { struct proc *p; struct thread *ttd; p = td->td_proc; PROC_LOCK(p); ttd = thread_find(p, uap->id); if (ttd == NULL) { PROC_UNLOCK(p); return (ESRCH); } mtx_lock_spin(&sched_lock); ttd->td_flags |= TDF_THRWAKEUP; mtx_unlock_spin(&sched_lock); wakeup((void *)ttd); PROC_UNLOCK(p); return (0); } Index: head/sys/kern/kern_time.c =================================================================== --- head/sys/kern/kern_time.c (revision 151992) +++ head/sys/kern/kern_time.c (revision 151993) @@ -1,1525 +1,1508 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 */ #include __FBSDID("$FreeBSD$"); #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MAX_CLOCKS (CLOCK_MONOTONIC+1) int tz_minuteswest; int tz_dsttime; static struct kclock posix_clocks[MAX_CLOCKS]; static uma_zone_t itimer_zone = NULL; /* * Time of day and interval timer support. * * These routines provide the kernel entry points to get and set * the time-of-day and per-process interval timers. Subroutines * here provide support for adding and subtracting timeval structures * and decrementing interval timers, optionally reloading the interval * timers when they expire. */ static int settime(struct thread *, struct timeval *); static void timevalfix(struct timeval *); static void no_lease_updatetime(int); static void itimer_start(void); static int itimer_init(void *, int, int); static void itimer_fini(void *, int); static void itimer_enter(struct itimer *); static void itimer_leave(struct itimer *); static struct itimer *itimer_find(struct proc *, timer_t, int); static void itimers_alloc(struct proc *); static int realtimer_create(struct itimer *); static int realtimer_gettime(struct itimer *, struct itimerspec *); static int realtimer_settime(struct itimer *, int, struct itimerspec *, struct itimerspec *); static int realtimer_delete(struct itimer *); static void realtimer_clocktime(clockid_t, struct timespec *); static void realtimer_expire(void *); static void realtimer_event_hook(struct proc *, clockid_t, int event); static int kern_timer_create(struct thread *, clockid_t, struct sigevent *, timer_t *, timer_t); static int kern_timer_delete(struct thread *, timer_t); int register_posix_clock(int, struct kclock *); void itimer_fire(struct itimer *it); int itimespecfix(struct timespec *ts); #define CLOCK_CALL(clock, call, arglist) \ ((*posix_clocks[clock].call) arglist) SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); static void no_lease_updatetime(deltat) int deltat; { } void (*lease_updatetime)(int) = no_lease_updatetime; static int settime(struct thread *td, struct timeval *tv) { struct timeval delta, tv1, tv2; static struct timeval maxtime, laststep; struct timespec ts; int s; s = splclock(); microtime(&tv1); delta = *tv; timevalsub(&delta, &tv1); /* * If the system is secure, we do not allow the time to be * set to a value earlier than 1 second less than the highest * time we have yet seen. The worst a miscreant can do in * this circumstance is "freeze" time. He couldn't go * back to the past. * * We similarly do not allow the clock to be stepped more * than one second, nor more than once per second. This allows * a miscreant to make the clock march double-time, but no worse. */ if (securelevel_gt(td->td_ucred, 1) != 0) { if (delta.tv_sec < 0 || delta.tv_usec < 0) { /* * Update maxtime to latest time we've seen. */ if (tv1.tv_sec > maxtime.tv_sec) maxtime = tv1; tv2 = *tv; timevalsub(&tv2, &maxtime); if (tv2.tv_sec < -1) { tv->tv_sec = maxtime.tv_sec - 1; printf("Time adjustment clamped to -1 second\n"); } } else { if (tv1.tv_sec == laststep.tv_sec) { splx(s); return (EPERM); } if (delta.tv_sec > 1) { tv->tv_sec = tv1.tv_sec + 1; printf("Time adjustment clamped to +1 second\n"); } laststep = *tv; } } ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; mtx_lock(&Giant); tc_setclock(&ts); (void) splsoftclock(); lease_updatetime(delta.tv_sec); splx(s); resettodr(); mtx_unlock(&Giant); return (0); } #ifndef _SYS_SYSPROTO_H_ struct clock_gettime_args { clockid_t clock_id; struct timespec *tp; }; #endif /* * MPSAFE */ /* ARGSUSED */ int clock_gettime(struct thread *td, struct clock_gettime_args *uap) { struct timespec ats; int error; error = kern_clock_gettime(td, uap->clock_id, &ats); if (error == 0) error = copyout(&ats, uap->tp, sizeof(ats)); return (error); } int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) { struct timeval sys, user; struct proc *p; p = td->td_proc; switch (clock_id) { case CLOCK_REALTIME: nanotime(ats); break; case CLOCK_VIRTUAL: PROC_LOCK(p); calcru(p, &user, &sys); PROC_UNLOCK(p); TIMEVAL_TO_TIMESPEC(&user, ats); break; case CLOCK_PROF: PROC_LOCK(p); calcru(p, &user, &sys); PROC_UNLOCK(p); timevaladd(&user, &sys); TIMEVAL_TO_TIMESPEC(&user, ats); break; case CLOCK_MONOTONIC: nanouptime(ats); break; default: return (EINVAL); } return (0); } #ifndef _SYS_SYSPROTO_H_ struct clock_settime_args { clockid_t clock_id; const struct timespec *tp; }; #endif /* * MPSAFE */ /* ARGSUSED */ int clock_settime(struct thread *td, struct clock_settime_args *uap) { struct timespec ats; int error; if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) return (error); return (kern_clock_settime(td, uap->clock_id, &ats)); } int kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) { struct timeval atv; int error; #ifdef MAC error = mac_check_system_settime(td->td_ucred); if (error) return (error); #endif if ((error = suser(td)) != 0) return (error); if (clock_id != CLOCK_REALTIME) return (EINVAL); if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) return (EINVAL); /* XXX Don't convert nsec->usec and back */ TIMESPEC_TO_TIMEVAL(&atv, ats); error = settime(td, &atv); return (error); } #ifndef _SYS_SYSPROTO_H_ struct clock_getres_args { clockid_t clock_id; struct timespec *tp; }; #endif int clock_getres(struct thread *td, struct clock_getres_args *uap) { struct timespec ts; int error; if (uap->tp == NULL) return (0); error = kern_clock_getres(td, uap->clock_id, &ts); if (error == 0) error = copyout(&ts, uap->tp, sizeof(ts)); return (error); } int kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) { ts->tv_sec = 0; switch (clock_id) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: /* * Round up the result of the division cheaply by adding 1. * Rounding up is especially important if rounding down * would give 0. Perfect rounding is unimportant. */ ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; break; case CLOCK_VIRTUAL: case CLOCK_PROF: /* Accurately round up here because we can do so cheaply. */ ts->tv_nsec = (1000000000 + hz - 1) / hz; break; default: return (EINVAL); } return (0); } static int nanowait; int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) { struct timespec ts, ts2, ts3; struct timeval tv; int error; if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) return (EINVAL); if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) return (0); getnanouptime(&ts); timespecadd(&ts, rqt); TIMESPEC_TO_TIMEVAL(&tv, rqt); for (;;) { error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp", tvtohz(&tv)); getnanouptime(&ts2); if (error != EWOULDBLOCK) { if (error == ERESTART) error = EINTR; if (rmt != NULL) { timespecsub(&ts, &ts2); if (ts.tv_sec < 0) timespecclear(&ts); *rmt = ts; } return (error); } if (timespeccmp(&ts2, &ts, >=)) return (0); ts3 = ts; timespecsub(&ts3, &ts2); TIMESPEC_TO_TIMEVAL(&tv, &ts3); } } #ifndef _SYS_SYSPROTO_H_ struct nanosleep_args { struct timespec *rqtp; struct timespec *rmtp; }; #endif /* * MPSAFE */ /* ARGSUSED */ int nanosleep(struct thread *td, struct nanosleep_args *uap) { struct timespec rmt, rqt; int error; error = copyin(uap->rqtp, &rqt, sizeof(rqt)); if (error) return (error); if (uap->rmtp && !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE)) return (EFAULT); error = kern_nanosleep(td, &rqt, &rmt); if (error && uap->rmtp) { int error2; error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); if (error2) error = error2; } return (error); } #ifndef _SYS_SYSPROTO_H_ struct gettimeofday_args { struct timeval *tp; struct timezone *tzp; }; #endif /* * MPSAFE */ /* ARGSUSED */ int gettimeofday(struct thread *td, struct gettimeofday_args *uap) { struct timeval atv; struct timezone rtz; int error = 0; if (uap->tp) { microtime(&atv); error = copyout(&atv, uap->tp, sizeof (atv)); } if (error == 0 && uap->tzp != NULL) { rtz.tz_minuteswest = tz_minuteswest; rtz.tz_dsttime = tz_dsttime; error = copyout(&rtz, uap->tzp, sizeof (rtz)); } return (error); } #ifndef _SYS_SYSPROTO_H_ struct settimeofday_args { struct timeval *tv; struct timezone *tzp; }; #endif /* * MPSAFE */ /* ARGSUSED */ int settimeofday(struct thread *td, struct settimeofday_args *uap) { struct timeval atv, *tvp; struct timezone atz, *tzp; int error; if (uap->tv) { error = copyin(uap->tv, &atv, sizeof(atv)); if (error) return (error); tvp = &atv; } else tvp = NULL; if (uap->tzp) { error = copyin(uap->tzp, &atz, sizeof(atz)); if (error) return (error); tzp = &atz; } else tzp = NULL; return (kern_settimeofday(td, tvp, tzp)); } int kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) { int error; #ifdef MAC error = mac_check_system_settime(td->td_ucred); if (error) return (error); #endif error = suser(td); if (error) return (error); /* Verify all parameters before changing time. */ if (tv) { if (tv->tv_usec < 0 || tv->tv_usec >= 1000000) return (EINVAL); error = settime(td, tv); } if (tzp && error == 0) { tz_minuteswest = tzp->tz_minuteswest; tz_dsttime = tzp->tz_dsttime; } return (error); } /* * Get value of an interval timer. The process virtual and * profiling virtual time timers are kept in the p_stats area, since * they can be swapped out. These are kept internally in the * way they are specified externally: in time until they expire. * * The real time interval timer is kept in the process table slot * for the process, and its value (it_value) is kept as an * absolute time rather than as a delta, so that it is easy to keep * periodic real-time signals from drifting. * * Virtual time timers are processed in the hardclock() routine of * kern_clock.c. The real time timer is processed by a timeout * routine, called from the softclock() routine. Since a callout * may be delayed in real time due to interrupt processing in the system, * it is possible for the real time timeout routine (realitexpire, given below), * to be delayed in real time past when it is supposed to occur. It * does not suffice, therefore, to reload the real timer .it_value from the * real time timers .it_interval. Rather, we compute the next time in * absolute time the timer should go off. */ #ifndef _SYS_SYSPROTO_H_ struct getitimer_args { u_int which; struct itimerval *itv; }; #endif /* * MPSAFE */ int getitimer(struct thread *td, struct getitimer_args *uap) { struct itimerval aitv; int error; error = kern_getitimer(td, uap->which, &aitv); if (error != 0) return (error); return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); } int kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) { struct proc *p = td->td_proc; struct timeval ctv; if (which > ITIMER_PROF) return (EINVAL); if (which == ITIMER_REAL) { /* * Convert from absolute to relative time in .it_value * part of real time timer. If time for real time timer * has passed return 0, else return difference between * current time and time for the timer to go off. */ PROC_LOCK(p); *aitv = p->p_realtimer; PROC_UNLOCK(p); if (timevalisset(&aitv->it_value)) { getmicrouptime(&ctv); if (timevalcmp(&aitv->it_value, &ctv, <)) timevalclear(&aitv->it_value); else timevalsub(&aitv->it_value, &ctv); } } else { mtx_lock_spin(&sched_lock); *aitv = p->p_stats->p_timer[which]; mtx_unlock_spin(&sched_lock); } return (0); } #ifndef _SYS_SYSPROTO_H_ struct setitimer_args { u_int which; struct itimerval *itv, *oitv; }; #endif /* * MPSAFE */ int setitimer(struct thread *td, struct setitimer_args *uap) { struct itimerval aitv, oitv; int error; if (uap->itv == NULL) { uap->itv = uap->oitv; return (getitimer(td, (struct getitimer_args *)uap)); } if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) return (error); error = kern_setitimer(td, uap->which, &aitv, &oitv); if (error != 0 || uap->oitv == NULL) return (error); return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); } int kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, struct itimerval *oitv) { struct proc *p = td->td_proc; struct timeval ctv; if (aitv == NULL) return (kern_getitimer(td, which, oitv)); if (which > ITIMER_PROF) return (EINVAL); if (itimerfix(&aitv->it_value)) return (EINVAL); if (!timevalisset(&aitv->it_value)) timevalclear(&aitv->it_interval); else if (itimerfix(&aitv->it_interval)) return (EINVAL); if (which == ITIMER_REAL) { PROC_LOCK(p); if (timevalisset(&p->p_realtimer.it_value)) callout_stop(&p->p_itcallout); getmicrouptime(&ctv); if (timevalisset(&aitv->it_value)) { callout_reset(&p->p_itcallout, tvtohz(&aitv->it_value), realitexpire, p); timevaladd(&aitv->it_value, &ctv); } *oitv = p->p_realtimer; p->p_realtimer = *aitv; PROC_UNLOCK(p); if (timevalisset(&oitv->it_value)) { if (timevalcmp(&oitv->it_value, &ctv, <)) timevalclear(&oitv->it_value); else timevalsub(&oitv->it_value, &ctv); } } else { mtx_lock_spin(&sched_lock); *oitv = p->p_stats->p_timer[which]; p->p_stats->p_timer[which] = *aitv; mtx_unlock_spin(&sched_lock); } return (0); } /* * Real interval timer expired: * send process whose timer expired an alarm signal. * If time is not set up to reload, then just return. * Else compute next time timer should go off which is > current time. * This is where delay in processing this timeout causes multiple * SIGALRM calls to be compressed into one. * tvtohz() always adds 1 to allow for the time until the next clock * interrupt being strictly less than 1 clock tick, but we don't want * that here since we want to appear to be in sync with the clock * interrupt even when we're delayed. */ void realitexpire(void *arg) { struct proc *p; struct timeval ctv, ntv; p = (struct proc *)arg; PROC_LOCK(p); psignal(p, SIGALRM); if (!timevalisset(&p->p_realtimer.it_interval)) { timevalclear(&p->p_realtimer.it_value); if (p->p_flag & P_WEXIT) wakeup(&p->p_itcallout); PROC_UNLOCK(p); return; } for (;;) { timevaladd(&p->p_realtimer.it_value, &p->p_realtimer.it_interval); getmicrouptime(&ctv); if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { ntv = p->p_realtimer.it_value; timevalsub(&ntv, &ctv); callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1, realitexpire, p); PROC_UNLOCK(p); return; } } /*NOTREACHED*/ } /* * Check that a proposed value to load into the .it_value or * .it_interval part of an interval timer is acceptable, and * fix it to have at least minimal value (i.e. if it is less * than the resolution of the clock, round it up.) */ int itimerfix(struct timeval *tv) { if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) return (EINVAL); if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) tv->tv_usec = tick; return (0); } /* * Decrement an interval timer by a specified number * of microseconds, which must be less than a second, * i.e. < 1000000. If the timer expires, then reload * it. In this case, carry over (usec - old value) to * reduce the value reloaded into the timer so that * the timer does not drift. This routine assumes * that it is called in a context where the timers * on which it is operating cannot change in value. */ int itimerdecr(struct itimerval *itp, int usec) { if (itp->it_value.tv_usec < usec) { if (itp->it_value.tv_sec == 0) { /* expired, and already in next interval */ usec -= itp->it_value.tv_usec; goto expire; } itp->it_value.tv_usec += 1000000; itp->it_value.tv_sec--; } itp->it_value.tv_usec -= usec; usec = 0; if (timevalisset(&itp->it_value)) return (1); /* expired, exactly at end of interval */ expire: if (timevalisset(&itp->it_interval)) { itp->it_value = itp->it_interval; itp->it_value.tv_usec -= usec; if (itp->it_value.tv_usec < 0) { itp->it_value.tv_usec += 1000000; itp->it_value.tv_sec--; } } else itp->it_value.tv_usec = 0; /* sec is already 0 */ return (0); } /* * Add and subtract routines for timevals. * N.B.: subtract routine doesn't deal with * results which are before the beginning, * it just gets very confused in this case. * Caveat emptor. */ void timevaladd(struct timeval *t1, const struct timeval *t2) { t1->tv_sec += t2->tv_sec; t1->tv_usec += t2->tv_usec; timevalfix(t1); } void timevalsub(struct timeval *t1, const struct timeval *t2) { t1->tv_sec -= t2->tv_sec; t1->tv_usec -= t2->tv_usec; timevalfix(t1); } static void timevalfix(struct timeval *t1) { if (t1->tv_usec < 0) { t1->tv_sec--; t1->tv_usec += 1000000; } if (t1->tv_usec >= 1000000) { t1->tv_sec++; t1->tv_usec -= 1000000; } } /* * ratecheck(): simple time-based rate-limit checking. */ int ratecheck(struct timeval *lasttime, const struct timeval *mininterval) { struct timeval tv, delta; int rv = 0; getmicrouptime(&tv); /* NB: 10ms precision */ delta = tv; timevalsub(&delta, lasttime); /* * check for 0,0 is so that the message will be seen at least once, * even if interval is huge. */ if (timevalcmp(&delta, mininterval, >=) || (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { *lasttime = tv; rv = 1; } return (rv); } /* * ppsratecheck(): packets (or events) per second limitation. * * Return 0 if the limit is to be enforced (e.g. the caller * should drop a packet because of the rate limitation). * * maxpps of 0 always causes zero to be returned. maxpps of -1 * always causes 1 to be returned; this effectively defeats rate * limiting. * * Note that we maintain the struct timeval for compatibility * with other bsd systems. We reuse the storage and just monitor * clock ticks for minimal overhead. */ int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) { int now; /* * Reset the last time and counter if this is the first call * or more than a second has passed since the last update of * lasttime. */ now = ticks; if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { lasttime->tv_sec = now; *curpps = 1; return (maxpps != 0); } else { (*curpps)++; /* NB: ignore potential overflow */ return (maxpps < 0 || *curpps < maxpps); } } static void itimer_start(void) { struct kclock rt_clock = { .timer_create = realtimer_create, .timer_delete = realtimer_delete, .timer_settime = realtimer_settime, .timer_gettime = realtimer_gettime, .event_hook = realtimer_event_hook }; itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); register_posix_clock(CLOCK_REALTIME, &rt_clock); register_posix_clock(CLOCK_MONOTONIC, &rt_clock); } int register_posix_clock(int clockid, struct kclock *clk) { if ((unsigned)clockid >= MAX_CLOCKS) { printf("%s: invalid clockid\n", __func__); return (0); } posix_clocks[clockid] = *clk; return (1); } static int itimer_init(void *mem, int size, int flags) { struct itimer *it; it = (struct itimer *)mem; mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); return (0); } static void itimer_fini(void *mem, int size) { struct itimer *it; it = (struct itimer *)mem; mtx_destroy(&it->it_mtx); } static void itimer_enter(struct itimer *it) { mtx_assert(&it->it_mtx, MA_OWNED); it->it_usecount++; } static void itimer_leave(struct itimer *it) { mtx_assert(&it->it_mtx, MA_OWNED); KASSERT(it->it_usecount > 0, ("invalid it_usecount")); if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) wakeup(it); } #ifndef _SYS_SYSPROTO_H_ struct timer_create_args { clockid_t clock_id; struct sigevent * evp; timer_t * timerid; }; #endif int timer_create(struct thread *td, struct timer_create_args *uap) { struct sigevent *evp1, ev; timer_t id; int error; if (uap->evp != NULL) { error = copyin(uap->evp, &ev, sizeof(ev)); if (error != 0) return (error); evp1 = &ev; } else evp1 = NULL; error = kern_timer_create(td, uap->clock_id, evp1, &id, -1); if (error == 0) { error = copyout(&id, uap->timerid, sizeof(timer_t)); if (error != 0) kern_timer_delete(td, id); } return (error); } static int kern_timer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, timer_t *timerid, timer_t preset_id) { struct proc *p = td->td_proc; struct itimer *it; int id; int error; if (clock_id < 0 || clock_id >= MAX_CLOCKS) return (EINVAL); if (posix_clocks[clock_id].timer_create == NULL) return (EINVAL); if (evp != NULL) { if (evp->sigev_notify != SIGEV_NONE && evp->sigev_notify != SIGEV_SIGNAL && evp->sigev_notify != SIGEV_THREAD_ID) return (EINVAL); if ((evp->sigev_notify == SIGEV_SIGNAL || evp->sigev_notify == SIGEV_THREAD_ID) && !_SIG_VALID(evp->sigev_signo)) return (EINVAL); } if (p->p_itimers == NULL) itimers_alloc(p); it = uma_zalloc(itimer_zone, M_WAITOK); it->it_flags = 0; it->it_usecount = 0; it->it_active = 0; timespecclear(&it->it_time.it_value); timespecclear(&it->it_time.it_interval); it->it_overrun = 0; it->it_overrun_last = 0; it->it_clockid = clock_id; it->it_timerid = -1; it->it_proc = p; ksiginfo_init(&it->it_ksi); it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; error = CLOCK_CALL(clock_id, timer_create, (it)); if (error != 0) goto out; PROC_LOCK(p); if (preset_id != -1) { KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); id = preset_id; if (p->p_itimers->its_timers[id] != NULL) { PROC_UNLOCK(p); error = 0; goto out; } } else { /* * Find a free timer slot, skipping those reserved * for setitimer(). */ for (id = 3; id < TIMER_MAX; id++) if (p->p_itimers->its_timers[id] == NULL) break; if (id == TIMER_MAX) { PROC_UNLOCK(p); error = EAGAIN; goto out; } } it->it_timerid = id; p->p_itimers->its_timers[id] = it; if (evp != NULL) it->it_sigev = *evp; else { it->it_sigev.sigev_notify = SIGEV_SIGNAL; switch (clock_id) { default: case CLOCK_REALTIME: it->it_sigev.sigev_signo = SIGALRM; break; case CLOCK_VIRTUAL: it->it_sigev.sigev_signo = SIGVTALRM; break; case CLOCK_PROF: it->it_sigev.sigev_signo = SIGPROF; break; } it->it_sigev.sigev_value.sigval_int = id; } if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; it->it_ksi.ksi_code = SI_TIMER; it->it_ksi.ksi_value = it->it_sigev.sigev_value; it->it_ksi.ksi_timerid = id; } PROC_UNLOCK(p); *timerid = id; return (0); out: ITIMER_LOCK(it); CLOCK_CALL(it->it_clockid, timer_delete, (it)); ITIMER_UNLOCK(it); uma_zfree(itimer_zone, it); return (error); } #ifndef _SYS_SYSPROTO_H_ struct timer_delete_args { timer_t timerid; }; #endif int timer_delete(struct thread *td, struct timer_delete_args *uap) { return (kern_timer_delete(td, uap->timerid)); } static struct itimer * itimer_find(struct proc *p, timer_t timerid, int include_deleting) { struct itimer *it; PROC_LOCK_ASSERT(p, MA_OWNED); if ((p->p_itimers == NULL) || (timerid >= TIMER_MAX) || (it = p->p_itimers->its_timers[timerid]) == NULL) { return (NULL); } ITIMER_LOCK(it); if (!include_deleting && (it->it_flags & ITF_DELETING) != 0) { ITIMER_UNLOCK(it); it = NULL; } return (it); } static int kern_timer_delete(struct thread *td, timer_t timerid) { struct proc *p = td->td_proc; struct itimer *it; PROC_LOCK(p); it = itimer_find(p, timerid, 0); if (it == NULL) { PROC_UNLOCK(p); return (EINVAL); } PROC_UNLOCK(p); it->it_flags |= ITF_DELETING; while (it->it_usecount > 0) { it->it_flags |= ITF_WANTED; msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); } it->it_flags &= ~ITF_WANTED; CLOCK_CALL(it->it_clockid, timer_delete, (it)); ITIMER_UNLOCK(it); PROC_LOCK(p); if (KSI_ONQ(&it->it_ksi)) sigqueue_take(&it->it_ksi); p->p_itimers->its_timers[timerid] = NULL; PROC_UNLOCK(p); uma_zfree(itimer_zone, it); return (0); } #ifndef _SYS_SYSPROTO_H_ struct timer_settime_args { timer_t timerid; int flags; const struct itimerspec * value; struct itimerspec * ovalue; }; #endif int timer_settime(struct thread *td, struct timer_settime_args *uap) { struct proc *p = td->td_proc; struct itimer *it; struct itimerspec val, oval, *ovalp; int error; error = copyin(uap->value, &val, sizeof(val)); if (error != 0) return (error); if (uap->ovalue != NULL) ovalp = &oval; else ovalp = NULL; PROC_LOCK(p); if (uap->timerid < 3 || (it = itimer_find(p, uap->timerid, 0)) == NULL) { PROC_UNLOCK(p); error = EINVAL; } else { PROC_UNLOCK(p); itimer_enter(it); error = CLOCK_CALL(it->it_clockid, timer_settime, (it, uap->flags, &val, ovalp)); itimer_leave(it); ITIMER_UNLOCK(it); } if (error == 0 && uap->ovalue != NULL) error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); return (error); } #ifndef _SYS_SYSPROTO_H_ struct timer_gettime_args { timer_t timerid; struct itimerspec * value; }; #endif int timer_gettime(struct thread *td, struct timer_gettime_args *uap) { struct proc *p = td->td_proc; struct itimer *it; struct itimerspec val; int error; PROC_LOCK(p); if (uap->timerid < 3 || (it = itimer_find(p, uap->timerid, 0)) == NULL) { PROC_UNLOCK(p); error = EINVAL; } else { PROC_UNLOCK(p); itimer_enter(it); error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, &val)); itimer_leave(it); ITIMER_UNLOCK(it); } if (error == 0) error = copyout(&val, uap->value, sizeof(val)); return (error); } #ifndef _SYS_SYSPROTO_H_ struct timer_getoverrun_args { timer_t timerid; }; #endif int timer_getoverrun(struct thread *td, struct timer_getoverrun_args *uap) { struct proc *p = td->td_proc; struct itimer *it; int error ; PROC_LOCK(p); if (uap->timerid < 3 || (it = itimer_find(p, uap->timerid, 0)) == NULL) { PROC_UNLOCK(p); error = EINVAL; } else { td->td_retval[0] = it->it_overrun_last; ITIMER_UNLOCK(it); PROC_UNLOCK(p); error = 0; } return (error); } static int realtimer_create(struct itimer *it) { callout_init_mtx(&it->it_callout, &it->it_mtx, 0); return (0); } static int realtimer_delete(struct itimer *it) { mtx_assert(&it->it_mtx, MA_OWNED); callout_stop(&it->it_callout); return (0); } static int realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) { struct timespec cts; mtx_assert(&it->it_mtx, MA_OWNED); realtimer_clocktime(it->it_clockid, &cts); *ovalue = it->it_time; if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { timespecsub(&ovalue->it_value, &cts); if (ovalue->it_value.tv_sec < 0 || (ovalue->it_value.tv_sec == 0 && ovalue->it_value.tv_nsec == 0)) { ovalue->it_value.tv_sec = 0; ovalue->it_value.tv_nsec = 1; } } return (0); } static int realtimer_settime(struct itimer *it, int flags, struct itimerspec *value, struct itimerspec *ovalue) { struct timespec cts, ts; struct timeval tv; struct itimerspec val; mtx_assert(&it->it_mtx, MA_OWNED); val = *value; if (itimespecfix(&val.it_value)) return (EINVAL); if (timespecisset(&val.it_value)) { if (itimespecfix(&val.it_interval)) return (EINVAL); } else { timespecclear(&val.it_interval); } if (ovalue != NULL) realtimer_gettime(it, ovalue); it->it_time = val; if (timespecisset(&val.it_value)) { realtimer_clocktime(it->it_clockid, &cts); ts = val.it_value; if ((flags & TIMER_ABSTIME) == 0) { /* Convert to absolute time. */ timespecadd(&it->it_time.it_value, &cts); } else { timespecsub(&ts, &cts); /* * We don't care if ts is negative, tztohz will * fix it. */ } TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, it); } else { callout_stop(&it->it_callout); } return (0); } static void realtimer_clocktime(clockid_t id, struct timespec *ts) { if (id == CLOCK_REALTIME) getnanotime(ts); else /* CLOCK_MONOTONIC */ getnanouptime(ts); } int itimer_accept(struct proc *p, timer_t timerid, ksiginfo_t *ksi) { struct itimer *it; PROC_LOCK_ASSERT(p, MA_OWNED); it = itimer_find(p, timerid, 0); if (it != NULL) { ksi->ksi_overrun = it->it_overrun; it->it_overrun_last = it->it_overrun; it->it_overrun = 0; ITIMER_UNLOCK(it); return (0); } return (EINVAL); } int itimespecfix(struct timespec *ts) { if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) return (EINVAL); if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) ts->tv_nsec = tick * 1000; return (0); } static void realtimer_event_hook(struct proc *p, clockid_t clock_id, int event) { struct itimers *its; struct itimer *it; int i; /* * Timer 0 (ITIMER_REAL) is XSI interval timer, according to POSIX * specification, it should be inherited by new process image. */ if (event == ITIMER_EV_EXEC) i = 1; else i = 0; its = p->p_itimers; for (; i < TIMER_MAX; i++) { if ((it = its->its_timers[i]) != NULL && it->it_clockid == clock_id) { ITIMER_LOCK(it); callout_stop(&it->it_callout); ITIMER_UNLOCK(it); } } } /* Timeout callback for realtime timer */ static void realtimer_expire(void *arg) { struct timespec cts, ts; struct timeval tv; struct itimer *it; struct proc *p; it = (struct itimer *)arg; p = it->it_proc; realtimer_clocktime(it->it_clockid, &cts); /* Only fire if time is reached. */ if (timespeccmp(&cts, &it->it_time.it_value, >=)) { if (timespecisset(&it->it_time.it_interval)) { timespecadd(&it->it_time.it_value, &it->it_time.it_interval); while (timespeccmp(&cts, &it->it_time.it_value, >=)) { it->it_overrun++; timespecadd(&it->it_time.it_value, &it->it_time.it_interval); } } else { /* single shot timer ? */ timespecclear(&it->it_time.it_value); } if (timespecisset(&it->it_time.it_value)) { ts = it->it_time.it_value; timespecsub(&ts, &cts); TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, it); } ITIMER_UNLOCK(it); itimer_fire(it); ITIMER_LOCK(it); } else if (timespecisset(&it->it_time.it_value)) { ts = it->it_time.it_value; timespecsub(&ts, &cts); TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, it); } } void itimer_fire(struct itimer *it) { struct proc *p = it->it_proc; - struct thread *td; + int ret; if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { PROC_LOCK(p); - if (KSI_ONQ(&it->it_ksi)) { - it->it_overrun++; - } else { - if (it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { - /* XXX - * This is too slow if there are many threads, - * why the world don't have a thread hash table, - * sigh. + if (!KSI_ONQ(&it->it_ksi)) { + ret = psignal_event(p, &it->it_sigev, &it->it_ksi); + if (__predict_false(ret != 0)) { + it->it_overrun++; + /* + * Broken userland code, thread went + * away, disarm the timer. */ - FOREACH_THREAD_IN_PROC(p, td) { - if (td->td_tid == - it->it_sigev.sigev_notify_thread_id) - break; - } - if (td != NULL) - tdsignal(td, it->it_ksi.ksi_signo, - &it->it_ksi, SIGTARGET_TD); - else { - /* - * Broken userland code, thread went - * away, disarm the timer. - */ -#if 0 - it->it_overrun++; -#else + if (ret == ESRCH) { ITIMER_LOCK(it); timespecclear(&it->it_time.it_value); timespecclear(&it->it_time.it_interval); callout_stop(&it->it_callout); ITIMER_UNLOCK(it); -#endif } - } else { - psignal_info(p, &it->it_ksi); } + } else { + it->it_overrun++; } PROC_UNLOCK(p); } } static void itimers_alloc(struct proc *p) { struct itimers *its; int i; its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); LIST_INIT(&its->its_virtual); LIST_INIT(&its->its_prof); TAILQ_INIT(&its->its_worklist); for (i = 0; i < TIMER_MAX; i++) its->its_timers[i] = NULL; PROC_LOCK(p); if (p->p_itimers == NULL) { p->p_itimers = its; PROC_UNLOCK(p); } else { PROC_UNLOCK(p); free(its, M_SUBPROC); } } /* Clean up timers when some process events are being triggered. */ void itimers_event_hook(struct proc *p, int event) { struct itimers *its; struct itimer *it; int i; if (p->p_itimers != NULL) { its = p->p_itimers; for (i = 0; i < MAX_CLOCKS; ++i) { if (posix_clocks[i].event_hook != NULL) CLOCK_CALL(i, event_hook, (p, i, event)); } /* * According to susv3, XSI interval timers should be inherited * by new image. */ if (event == ITIMER_EV_EXEC) i = 3; else if (event == ITIMER_EV_EXIT) i = 0; else panic("unhandled event"); for (; i < TIMER_MAX; ++i) { if ((it = its->its_timers[i]) != NULL) { PROC_LOCK(p); if (KSI_ONQ(&it->it_ksi)) sigqueue_take(&it->it_ksi); PROC_UNLOCK(p); uma_zfree(itimer_zone, its->its_timers[i]); its->its_timers[i] = NULL; } } if (its->its_timers[0] == NULL && its->its_timers[1] == NULL && its->its_timers[2] == NULL) { free(its, M_SUBPROC); p->p_itimers = NULL; } } } Index: head/sys/sys/signalvar.h =================================================================== --- head/sys/sys/signalvar.h (revision 151992) +++ head/sys/sys/signalvar.h (revision 151993) @@ -1,366 +1,359 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)signalvar.h 8.6 (Berkeley) 2/19/95 * $FreeBSD$ */ #ifndef _SYS_SIGNALVAR_H_ #define _SYS_SIGNALVAR_H_ #include #include #include #include /* * Kernel signal definitions and data structures, * not exported to user programs. */ /* * Logical process signal actions and state, needed only within the process * The mapping between sigacts and proc structures is 1:1 except for rfork() * processes masquerading as threads which use one structure for the whole * group. All members are locked by the included mutex. The reference count * and mutex must be last for the bcopy in sigacts_copy() to work. */ struct sigacts { sig_t ps_sigact[_SIG_MAXSIG]; /* Disposition of signals. */ sigset_t ps_catchmask[_SIG_MAXSIG]; /* Signals to be blocked. */ sigset_t ps_sigonstack; /* Signals to take on sigstack. */ sigset_t ps_sigintr; /* Signals that interrupt syscalls. */ sigset_t ps_sigreset; /* Signals that reset when caught. */ sigset_t ps_signodefer; /* Signals not masked while handled. */ sigset_t ps_siginfo; /* Signals that want SA_SIGINFO args. */ sigset_t ps_sigignore; /* Signals being ignored. */ sigset_t ps_sigcatch; /* Signals being caught by user. */ sigset_t ps_freebsd4; /* signals using freebsd4 ucontext. */ sigset_t ps_osigset; /* Signals using <= 3.x osigset_t. */ sigset_t ps_usertramp; /* SunOS compat; libc sigtramp. XXX */ int ps_flag; int ps_refcnt; struct mtx ps_mtx; }; #define PS_NOCLDWAIT 0x0001 /* No zombies if child dies */ #define PS_NOCLDSTOP 0x0002 /* No SIGCHLD when children stop. */ #define PS_CLDSIGIGN 0x0004 /* The SIGCHLD handler is SIG_IGN. */ #if defined(_KERNEL) && defined(COMPAT_43) /* * Compatibility. */ typedef struct { struct osigcontext si_sc; int si_signo; int si_code; union sigval si_value; } osiginfo_t; struct osigaction { union { void (*__sa_handler)(int); void (*__sa_sigaction)(int, osiginfo_t *, void *); } __sigaction_u; /* signal handler */ osigset_t sa_mask; /* signal mask to apply */ int sa_flags; /* see signal options below */ }; typedef void __osiginfohandler_t(int, osiginfo_t *, void *); #endif /* _KERNEL && COMPAT_43 */ /* additional signal action values, used only temporarily/internally */ #define SIG_CATCH ((__sighandler_t *)2) #define SIG_HOLD ((__sighandler_t *)3) /* * get signal action for process and signal; currently only for current process */ #define SIGACTION(p, sig) (p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) /* * sigset_t manipulation macros */ #define SIGADDSET(set, signo) \ ((set).__bits[_SIG_WORD(signo)] |= _SIG_BIT(signo)) #define SIGDELSET(set, signo) \ ((set).__bits[_SIG_WORD(signo)] &= ~_SIG_BIT(signo)) #define SIGEMPTYSET(set) \ do { \ int __i; \ for (__i = 0; __i < _SIG_WORDS; __i++) \ (set).__bits[__i] = 0; \ } while (0) #define SIGFILLSET(set) \ do { \ int __i; \ for (__i = 0; __i < _SIG_WORDS; __i++) \ (set).__bits[__i] = ~0U; \ } while (0) #define SIGISMEMBER(set, signo) \ ((set).__bits[_SIG_WORD(signo)] & _SIG_BIT(signo)) #define SIGISEMPTY(set) (__sigisempty(&(set))) #define SIGNOTEMPTY(set) (!__sigisempty(&(set))) #define SIGSETEQ(set1, set2) (__sigseteq(&(set1), &(set2))) #define SIGSETNEQ(set1, set2) (!__sigseteq(&(set1), &(set2))) #define SIGSETOR(set1, set2) \ do { \ int __i; \ for (__i = 0; __i < _SIG_WORDS; __i++) \ (set1).__bits[__i] |= (set2).__bits[__i]; \ } while (0) #define SIGSETAND(set1, set2) \ do { \ int __i; \ for (__i = 0; __i < _SIG_WORDS; __i++) \ (set1).__bits[__i] &= (set2).__bits[__i]; \ } while (0) #define SIGSETNAND(set1, set2) \ do { \ int __i; \ for (__i = 0; __i < _SIG_WORDS; __i++) \ (set1).__bits[__i] &= ~(set2).__bits[__i]; \ } while (0) #define SIGSETLO(set1, set2) ((set1).__bits[0] = (set2).__bits[0]) #define SIGSETOLD(set, oset) ((set).__bits[0] = (oset)) #define SIG_CANTMASK(set) \ SIGDELSET(set, SIGKILL), SIGDELSET(set, SIGSTOP) #define SIG_STOPSIGMASK(set) \ SIGDELSET(set, SIGSTOP), SIGDELSET(set, SIGTSTP), \ SIGDELSET(set, SIGTTIN), SIGDELSET(set, SIGTTOU) #define SIG_CONTSIGMASK(set) \ SIGDELSET(set, SIGCONT) #define sigcantmask (sigmask(SIGKILL) | sigmask(SIGSTOP)) #define SIG2OSIG(sig, osig) (osig = (sig).__bits[0]) #define OSIG2SIG(osig, sig) SIGEMPTYSET(sig); (sig).__bits[0] = osig static __inline int __sigisempty(sigset_t *set) { int i; for (i = 0; i < _SIG_WORDS; i++) { if (set->__bits[i]) return (0); } return (1); } static __inline int __sigseteq(sigset_t *set1, sigset_t *set2) { int i; for (i = 0; i < _SIG_WORDS; i++) { if (set1->__bits[i] != set2->__bits[i]) return (0); } return (1); } struct osigevent { int sigev_notify; /* Notification type */ union { int __sigev_signo; /* Signal number */ int __sigev_notify_kqueue; } __sigev_u; union sigval sigev_value; /* Signal value */ }; typedef struct ksiginfo { TAILQ_ENTRY(ksiginfo) ksi_link; siginfo_t ksi_info; int ksi_flags; struct sigqueue *ksi_sigq; } ksiginfo_t; #define ksi_signo ksi_info.si_signo #define ksi_errno ksi_info.si_errno #define ksi_code ksi_info.si_code #define ksi_pid ksi_info.si_pid #define ksi_uid ksi_info.si_uid #define ksi_status ksi_info.si_status #define ksi_addr ksi_info.si_addr #define ksi_value ksi_info.si_value #define ksi_band ksi_info.si_band #define ksi_trapno ksi_info.si_trapno #define ksi_overrun ksi_info.si_overrun #define ksi_timerid ksi_info.si_timerid /* bits for ksi_flags */ #define KSI_TRAP 0x01 /* Generated by trap. */ #define KSI_EXT 0x02 /* Externally managed ksi. */ #define KSI_INS 0x04 /* Directly insert ksi, not the copy */ #define KSI_COPYMASK KSI_TRAP #define KSI_ONQ(ksi) ((ksi)->ksi_sigq != NULL) typedef struct sigqueue { sigset_t sq_signals; TAILQ_HEAD(, ksiginfo) sq_list; struct proc *sq_proc; int sq_flags; } sigqueue_t; /* Flags for ksi_flags */ #define SQ_INIT 0x01 #ifdef _KERNEL -/* - * Specifies the target of a signal. - * P - Doesn't matter which thread it gets delivered to. - * TD - Must be delivered to a specific thread. - */ -typedef enum sigtarget_enum { SIGTARGET_P, SIGTARGET_TD } sigtarget_t; - /* Return nonzero if process p has an unmasked pending signal. */ #define SIGPENDING(td) \ (!SIGISEMPTY((td)->td_siglist) && \ !sigsetmasked(&(td)->td_siglist, &(td)->td_sigmask)) /* * Return the value of the pseudo-expression ((*set & ~*mask) != 0). This * is an optimized version of SIGISEMPTY() on a temporary variable * containing SIGSETNAND(*set, *mask). */ static __inline int sigsetmasked(sigset_t *set, sigset_t *mask) { int i; for (i = 0; i < _SIG_WORDS; i++) { if (set->__bits[i] & ~mask->__bits[i]) return (0); } return (1); } #define ksiginfo_init(ksi) \ do { \ bzero(ksi, sizeof(ksiginfo_t)); \ } while(0) #define ksiginfo_init_trap(ksi) \ do { \ ksiginfo_t *kp = ksi; \ bzero(kp, sizeof(ksiginfo_t)); \ kp->ksi_flags |= KSI_TRAP; \ } while(0) static __inline void ksiginfo_copy(ksiginfo_t *src, ksiginfo_t *dst) { (dst)->ksi_info = src->ksi_info; (dst)->ksi_flags = (src->ksi_flags & KSI_COPYMASK); } struct pgrp; struct thread; struct proc; struct sigio; struct mtx; extern int sugid_coredump; /* Sysctl variable kern.sugid_coredump */ extern struct mtx sigio_lock; /* * Lock the pointers for a sigio object in the underlying objects of * a file descriptor. */ #define SIGIO_LOCK() mtx_lock(&sigio_lock) #define SIGIO_TRYLOCK() mtx_trylock(&sigio_lock) #define SIGIO_UNLOCK() mtx_unlock(&sigio_lock) #define SIGIO_LOCKED() mtx_owned(&sigio_lock) #define SIGIO_ASSERT(type) mtx_assert(&sigio_lock, type) /* * Machine-independent functions: */ int cursig(struct thread *td); void execsigs(struct proc *p); void gsignal(int pgid, int sig); void killproc(struct proc *p, char *why); void pgsigio(struct sigio **, int signum, int checkctty); void pgsignal(struct pgrp *pgrp, int sig, int checkctty); void postsig(int sig); void psignal(struct proc *p, int sig); -int psignal_info(struct proc *p, ksiginfo_t *ksi); +int psignal_event(struct proc *p, struct sigevent *, ksiginfo_t *); struct sigacts *sigacts_alloc(void); void sigacts_copy(struct sigacts *dest, struct sigacts *src); void sigacts_free(struct sigacts *ps); struct sigacts *sigacts_hold(struct sigacts *ps); int sigacts_shared(struct sigacts *ps); void sigexit(struct thread *td, int signum) __dead2; int sig_ffs(sigset_t *set); void siginit(struct proc *p); void signotify(struct thread *td); -int tdsignal(struct thread *td, int sig, ksiginfo_t *ksi, - sigtarget_t target); +int tdsignal(struct proc *p, struct thread *td, int sig, + ksiginfo_t *ksi); void trapsignal(struct thread *td, ksiginfo_t *); int ptracestop(struct thread *td, int sig); ksiginfo_t * ksiginfo_alloc(void); void ksiginfo_free(ksiginfo_t *); void sigqueue_init(struct sigqueue *queue, struct proc *p); void sigqueue_flush(struct sigqueue *queue); void sigqueue_delete_proc(struct proc *p, int sig); void sigqueue_delete_set(struct sigqueue *queue, sigset_t *set); void sigqueue_delete(struct sigqueue *queue, int sig); void sigqueue_move_set(struct sigqueue *src, sigqueue_t *dst, sigset_t *); int sigqueue_get(struct sigqueue *queue, int sig, ksiginfo_t *info); int sigqueue_add(struct sigqueue *queue, int sig, ksiginfo_t *info); void sigqueue_collect_set(struct sigqueue *queue, sigset_t *set); void sigqueue_move(struct sigqueue *, struct sigqueue *, int sig); void sigqueue_delete_set_proc(struct proc *, sigset_t *); void sigqueue_delete_stopmask_proc(struct proc *); void sigqueue_take(ksiginfo_t *ksi); /* * Machine-dependent functions: */ void sendsig(sig_t, ksiginfo_t *, sigset_t *retmask); #endif /* _KERNEL */ #endif /* !_SYS_SIGNALVAR_H_ */