diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index d0044dfc19a0..41f6a76c4fa1 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -1,1245 +1,1234 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ktrace.h" #include "opt_kstack_pages.h" #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include dtrace_fork_func_t dtrace_fasttrap_fork; #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int"); #ifndef _SYS_SYSPROTO_H_ struct fork_args { int dummy; }; #endif /* ARGSUSED */ int sys_fork(struct thread *td, struct fork_args *uap) { struct fork_req fr; int error, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } /* ARGUSED */ int sys_pdfork(struct thread *td, struct pdfork_args *uap) { struct fork_req fr; int error, fd, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFPROCDESC; fr.fr_pidp = &pid; fr.fr_pd_fd = &fd; fr.fr_pd_flags = uap->flags; AUDIT_ARG_FFLAGS(uap->flags); /* * It is necessary to return fd by reference because 0 is a valid file * descriptor number, and the child needs to be able to distinguish * itself from the parent using the return value. */ error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; error = copyout(&fd, uap->fdp, sizeof(fd)); } return (error); } /* ARGSUSED */ int sys_vfork(struct thread *td, struct vfork_args *uap) { struct fork_req fr; int error, pid; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } int sys_rfork(struct thread *td, struct rfork_args *uap) { struct fork_req fr; int error, pid; /* Don't allow kernel-only flags. */ if ((uap->flags & RFKERNELONLY) != 0) return (EINVAL); /* RFSPAWN must not appear with others */ if ((uap->flags & RFSPAWN) != 0 && uap->flags != RFSPAWN) return (EINVAL); AUDIT_ARG_FFLAGS(uap->flags); bzero(&fr, sizeof(fr)); if ((uap->flags & RFSPAWN) != 0) { fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; fr.fr_flags2 = FR2_DROPSIG_CAUGHT; } else { fr.fr_flags = uap->flags; } fr.fr_pidp = &pid; error = fork1(td, &fr); if (error == 0) { td->td_retval[0] = pid; td->td_retval[1] = 0; } return (error); } int __exclusive_cache_line nprocs = 1; /* process 0 */ int lastpid = 0; SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, "Last used PID"); /* * Random component to lastpid generation. We mix in a random factor to make * it a little harder to predict. We sanity check the modulus value to avoid * doing it in critical paths. Don't let it be too small or we pointlessly * waste randomness entropy, and don't let it be impossibly large. Using a * modulus that is too big causes a LOT more process table scans and slows * down fork processing as the pidchecked caching is defeated. */ static int randompid = 0; static int sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) { int error, pid; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error != 0) return(error); sx_xlock(&allproc_lock); pid = randompid; error = sysctl_handle_int(oidp, &pid, 0, req); if (error == 0 && req->newptr != NULL) { if (pid == 0) randompid = 0; else if (pid == 1) /* generate a random PID modulus between 100 and 1123 */ randompid = 100 + arc4random() % 1024; else if (pid < 0 || pid > pid_max - 100) /* out of range */ randompid = pid_max - 100; else if (pid < 100) /* Make it reasonable */ randompid = 100; else randompid = pid; } sx_xunlock(&allproc_lock); return (error); } SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, sysctl_kern_randompid, "I", "Random PID modulus. Special values: 0: disable, 1: choose random value"); extern bitstr_t proc_id_pidmap; extern bitstr_t proc_id_grpidmap; extern bitstr_t proc_id_sessidmap; extern bitstr_t proc_id_reapmap; /* * Find an unused process ID * * If RFHIGHPID is set (used during system boot), do not allocate * low-numbered pids. */ static int fork_findpid(int flags) { pid_t result; int trypid, random; /* * Avoid calling arc4random with procid_lock held. */ random = 0; if (__predict_false(randompid)) random = arc4random() % randompid; mtx_lock(&procid_lock); trypid = lastpid + 1; if (flags & RFHIGHPID) { if (trypid < 10) trypid = 10; } else { trypid += random; } retry: if (trypid >= pid_max) trypid = 2; bit_ffc_at(&proc_id_pidmap, trypid, pid_max, &result); if (result == -1) { KASSERT(trypid != 2, ("unexpectedly ran out of IDs")); trypid = 2; goto retry; } if (bit_test(&proc_id_grpidmap, result) || bit_test(&proc_id_sessidmap, result) || bit_test(&proc_id_reapmap, result)) { trypid = result + 1; goto retry; } /* * RFHIGHPID does not mess with the lastpid counter during boot. */ if ((flags & RFHIGHPID) == 0) lastpid = result; bit_set(&proc_id_pidmap, result); mtx_unlock(&procid_lock); return (result); } static int fork_norfproc(struct thread *td, int flags) { struct proc *p1; int error; KASSERT((flags & RFPROC) == 0, ("fork_norfproc called with RFPROC set")); p1 = td->td_proc; /* * Quiesce other threads if necessary. If RFMEM is not specified we * must ensure that other threads do not concurrently create a second * process sharing the vmspace, see vmspace_unshare(). */ if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS && ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) { PROC_LOCK(p1); if (thread_single(p1, SINGLE_BOUNDARY)) { PROC_UNLOCK(p1); return (ERESTART); } PROC_UNLOCK(p1); } error = vm_forkproc(td, NULL, NULL, NULL, flags); if (error != 0) goto fail; /* * Close all file descriptors. */ if ((flags & RFCFDG) != 0) { struct filedesc *fdtmp; struct pwddesc *pdtmp; pdtmp = pdinit(td->td_proc->p_pd, false); fdtmp = fdinit(); pdescfree(td); fdescfree(td); p1->p_fd = fdtmp; p1->p_pd = pdtmp; } /* * Unshare file descriptors (from parent). */ if ((flags & RFFDG) != 0) { fdunshare(td); pdunshare(td); } fail: if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS && ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) { PROC_LOCK(p1); thread_single_end(p1, SINGLE_BOUNDARY); PROC_UNLOCK(p1); } return (error); } static void do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2, struct vmspace *vm2, struct file *fp_procdesc) { struct proc *p1, *pptr; struct filedesc *fd; struct filedesc_to_leader *fdtol; struct pwddesc *pd; struct sigacts *newsigacts; p1 = td->td_proc; PROC_LOCK(p1); bcopy(&p1->p_startcopy, &p2->p_startcopy, __rangeof(struct proc, p_startcopy, p_endcopy)); pargs_hold(p2->p_args); PROC_UNLOCK(p1); bzero(&p2->p_startzero, __rangeof(struct proc, p_startzero, p_endzero)); /* Tell the prison that we exist. */ prison_proc_hold(p2->p_ucred->cr_prison); p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = fork_findpid(fr->fr_flags); AUDIT_ARG_PID(p2->p_pid); TSFORK(p2->p_pid, p1->p_pid); sx_xlock(&allproc_lock); LIST_INSERT_HEAD(&allproc, p2, p_list); allproc_gen++; prison_proc_link(p2->p_ucred->cr_prison, p2); sx_xunlock(&allproc_lock); sx_xlock(PIDHASHLOCK(p2->p_pid)); LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); sx_xunlock(PIDHASHLOCK(p2->p_pid)); tidhash_add(td2); /* * Malloc things while we don't hold any locks. */ if (fr->fr_flags & RFSIGSHARE) newsigacts = NULL; else newsigacts = sigacts_alloc(); /* * Copy filedesc. */ if (fr->fr_flags & RFCFDG) { pd = pdinit(p1->p_pd, false); fd = fdinit(); fdtol = NULL; } else if (fr->fr_flags & RFFDG) { if (fr->fr_flags2 & FR2_SHARE_PATHS) pd = pdshare(p1->p_pd); else pd = pdcopy(p1->p_pd); fd = fdcopy(p1->p_fd); fdtol = NULL; } else { if (fr->fr_flags2 & FR2_SHARE_PATHS) pd = pdcopy(p1->p_pd); else pd = pdshare(p1->p_pd); fd = fdshare(p1->p_fd); if (p1->p_fdtol == NULL) p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, p1->p_leader); if ((fr->fr_flags & RFTHREAD) != 0) { /* * Shared file descriptor table, and shared * process leaders. */ fdtol = filedesc_to_leader_share(p1->p_fdtol, p1->p_fd); } else { /* * Shared file descriptor table, and different * process leaders. */ fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p1->p_fd, p2); } } /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ PROC_LOCK(p2); PROC_LOCK(p1); bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); td2->td_sigstk = td->td_sigstk; td2->td_flags = TDF_INMEM; td2->td_lend_user_pri = PRI_MAX; #ifdef VIMAGE td2->td_vnet = NULL; td2->td_vnet_lpush = NULL; #endif /* * Allow the scheduler to initialize the child. */ thread_lock(td); sched_fork(td, td2); /* * Request AST to check for TDP_RFPPWAIT. Do it here * to avoid calling thread_lock() again. */ if ((fr->fr_flags & RFPPWAIT) != 0) ast_sched_locked(td, TDA_VFORK); thread_unlock(td); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. */ p2->p_flag = P_INMEM; p2->p_flag2 = p1->p_flag2 & (P2_ASLR_DISABLE | P2_ASLR_ENABLE | P2_ASLR_IGNSTART | P2_NOTRACE | P2_NOTRACE_EXEC | P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE | P2_TRAPCAP | P2_STKGAP_DISABLE | P2_STKGAP_DISABLE_EXEC | P2_NO_NEW_PRIVS | P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); p2->p_swtick = ticks; if (p1->p_flag & P_PROFIL) startprofclock(p2); if (fr->fr_flags & RFSIGSHARE) { p2->p_sigacts = sigacts_hold(p1->p_sigacts); } else { sigacts_copy(newsigacts, p1->p_sigacts); p2->p_sigacts = newsigacts; if ((fr->fr_flags2 & (FR2_DROPSIG_CAUGHT | FR2_KPROC)) != 0) { mtx_lock(&p2->p_sigacts->ps_mtx); if ((fr->fr_flags2 & FR2_DROPSIG_CAUGHT) != 0) sig_drop_caught(p2); if ((fr->fr_flags2 & FR2_KPROC) != 0) p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; mtx_unlock(&p2->p_sigacts->ps_mtx); } } if (fr->fr_flags & RFTSIGZMB) p2->p_sigparent = RFTSIGNUM(fr->fr_flags); else if (fr->fr_flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; else p2->p_sigparent = SIGCHLD; if ((fr->fr_flags2 & FR2_KPROC) != 0) { p2->p_flag |= P_SYSTEM | P_KPROC; td2->td_pflags |= TDP_KTHREAD; } p2->p_textvp = p1->p_textvp; p2->p_textdvp = p1->p_textdvp; p2->p_fd = fd; p2->p_fdtol = fdtol; p2->p_pd = pd; if (p1->p_flag2 & P2_INHERIT_PROTECTED) { p2->p_flag |= P_PROTECTED; p2->p_flag2 |= P2_INHERIT_PROTECTED; } /* * p_limit is copy-on-write. Bump its refcount. */ lim_fork(p1, p2); thread_cow_get_proc(td2, p2); pstats_fork(p1->p_stats, p2->p_stats); PROC_UNLOCK(p1); PROC_UNLOCK(p2); /* * Bump references to the text vnode and directory, and copy * the hardlink name. */ if (p2->p_textvp != NULL) vrefact(p2->p_textvp); if (p2->p_textdvp != NULL) vrefact(p2->p_textdvp); p2->p_binname = p1->p_binname == NULL ? NULL : strdup(p1->p_binname, M_PARGS); /* * Set up linkage for kernel based threading. */ if ((fr->fr_flags & RFTHREAD) != 0) { mtx_lock(&ppeers_lock); p2->p_peers = p1->p_peers; p1->p_peers = p2; p2->p_leader = p1->p_leader; mtx_unlock(&ppeers_lock); PROC_LOCK(p1->p_leader); if ((p1->p_leader->p_flag & P_WEXIT) != 0) { PROC_UNLOCK(p1->p_leader); /* * The task leader is exiting, so process p1 is * going to be killed shortly. Since p1 obviously * isn't dead yet, we know that the leader is either * sending SIGKILL's to all the processes in this * task or is sleeping waiting for all the peers to * exit. We let p1 complete the fork, but we need * to go ahead and kill the new process p2 since * the task leader may not get a chance to send * SIGKILL to it. We leave it on the list so that * the task leader will wait for this new process * to commit suicide. */ PROC_LOCK(p2); kern_psignal(p2, SIGKILL); PROC_UNLOCK(p2); } else PROC_UNLOCK(p1->p_leader); } else { p2->p_peers = NULL; p2->p_leader = p2; } sx_xlock(&proctree_lock); PGRP_LOCK(p1->p_pgrp); PROC_LOCK(p2); PROC_LOCK(p1); /* * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; td2->td_pflags |= (td->td_pflags & (TDP_ALTSTACK | TDP_SIGFASTBLOCK)); SESS_LOCK(p1->p_session); if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; SESS_UNLOCK(p1->p_session); if (fr->fr_flags & RFPPWAIT) p2->p_flag |= P_PPWAIT; p2->p_pgrp = p1->p_pgrp; LIST_INSERT_AFTER(p1, p2, p_pglist); PGRP_UNLOCK(p1->p_pgrp); LIST_INIT(&p2->p_children); LIST_INIT(&p2->p_orphans); callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0); /* * This begins the section where we must prevent the parent * from being swapped. */ _PHOLD(p1); PROC_UNLOCK(p1); /* * Attach the new process to its parent. * * If RFNOWAIT is set, the newly created process becomes a child * of init. This effectively disassociates the child from the * parent. */ if ((fr->fr_flags & RFNOWAIT) != 0) { pptr = p1->p_reaper; p2->p_reaper = pptr; } else { p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ? p1 : p1->p_reaper; pptr = p1; } p2->p_pptr = pptr; p2->p_oppid = pptr->p_pid; LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); LIST_INIT(&p2->p_reaplist); LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling); if (p2->p_reaper == p1 && p1 != initproc) { p2->p_reapsubtree = p2->p_pid; proc_id_set_cond(PROC_ID_REAP, p2->p_pid); } sx_xunlock(&proctree_lock); /* Inform accounting that we have forked. */ p2->p_acflag = AFORK; PROC_UNLOCK(p2); #ifdef KTRACE ktrprocfork(p1, p2); #endif /* * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ vm_forkproc(td, p2, td2, vm2, fr->fr_flags); if (fr->fr_flags == (RFFDG | RFPROC)) { VM_CNT_INC(v_forks); VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { VM_CNT_INC(v_vforks); VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (p1 == &proc0) { VM_CNT_INC(v_kthreads); VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else { VM_CNT_INC(v_rforks); VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } /* * Associate the process descriptor with the process before anything * can happen that might cause that process to need the descriptor. * However, don't do this until after fork(2) can no longer fail. */ if (fr->fr_flags & RFPROCDESC) procdesc_new(p2, fr->fr_pd_flags); /* * Both processes are set up, now check if any loadable modules want * to adjust anything. */ EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags); /* * Set the child start time and mark the process as being complete. */ PROC_LOCK(p2); PROC_LOCK(p1); microuptime(&p2->p_stats->p_start); PROC_SLOCK(p2); p2->p_state = PRS_NORMAL; PROC_SUNLOCK(p2); #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the new process so that any * tracepoints inherited from the parent can be removed. We have to do * this only after p_state is PRS_NORMAL since the fasttrap module will * use pfind() later on. */ if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork) dtrace_fasttrap_fork(p1, p2); #endif if (fr->fr_flags & RFPPWAIT) { td->td_pflags |= TDP_RFPPWAIT; td->td_rfppwait_p = p2; td->td_dbgflags |= TDB_VFORK; } PROC_UNLOCK(p2); /* * Tell any interested parties about the new process. */ knote_fork(p1->p_klist, p2->p_pid); /* * Now can be swapped. */ _PRELE(p1); PROC_UNLOCK(p1); SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags); if (fr->fr_flags & RFPROCDESC) { procdesc_finit(p2->p_procdesc, fp_procdesc); fdrop(fp_procdesc, td); } /* * Speculative check for PTRACE_FORK. PTRACE_FORK is not * synced with forks in progress so it is OK if we miss it * if being set atm. */ if ((p1->p_ptevents & PTRACE_FORK) != 0) { sx_xlock(&proctree_lock); PROC_LOCK(p2); /* * p1->p_ptevents & p1->p_pptr are protected by both * process and proctree locks for modifications, * so owning proctree_lock allows the race-free read. */ if ((p1->p_ptevents & PTRACE_FORK) != 0) { /* * Arrange for debugger to receive the fork event. * * We can report PL_FLAG_FORKED regardless of * P_FOLLOWFORK settings, but it does not make a sense * for runaway child. */ td->td_dbgflags |= TDB_FORK; td->td_dbg_forked = p2->p_pid; td2->td_dbgflags |= TDB_STOPATFORK; proc_set_traced(p2, true); CTR2(KTR_PTRACE, "do_fork: attaching to new child pid %d: oppid %d", p2->p_pid, p2->p_oppid); proc_reparent(p2, p1->p_pptr, false); } PROC_UNLOCK(p2); sx_xunlock(&proctree_lock); } racct_proc_fork_done(p2); if ((fr->fr_flags & RFSTOPPED) == 0) { if (fr->fr_pidp != NULL) *fr->fr_pidp = p2->p_pid; /* * If RFSTOPPED not requested, make child runnable and * add to run queue. */ thread_lock(td2); TD_SET_CAN_RUN(td2); sched_add(td2, SRQ_BORING); } else { *fr->fr_procp = p2; } } static void ast_vfork(struct thread *td, int tda __unused) { struct proc *p, *p2; MPASS(td->td_pflags & TDP_RFPPWAIT); p = td->td_proc; /* * Preserve synchronization semantics of vfork. If * waiting for child to exec or exit, fork set * P_PPWAIT on child, and there we sleep on our proc * (in case of exit). * * Do it after the ptracestop() above is finished, to * not block our debugger until child execs or exits * to finish vfork wait. */ td->td_pflags &= ~TDP_RFPPWAIT; p2 = td->td_rfppwait_p; again: PROC_LOCK(p2); while (p2->p_flag & P_PPWAIT) { PROC_LOCK(p); if (thread_suspend_check_needed()) { PROC_UNLOCK(p2); thread_suspend_check(0); PROC_UNLOCK(p); goto again; } else { PROC_UNLOCK(p); } cv_timedwait(&p2->p_pwait, &p2->p_mtx, hz); } PROC_UNLOCK(p2); if (td->td_dbgflags & TDB_VFORK) { PROC_LOCK(p); if (p->p_ptevents & PTRACE_VFORK) ptracestop(td, SIGTRAP, NULL); td->td_dbgflags &= ~TDB_VFORK; PROC_UNLOCK(p); } } int fork1(struct thread *td, struct fork_req *fr) { struct proc *p1, *newproc; struct thread *td2; struct vmspace *vm2; struct ucred *cred; struct file *fp_procdesc; struct pgrp *pg; vm_ooffset_t mem_charged; int error, nprocs_new; static int curfail; static struct timeval lastfail; int flags, pages; bool killsx_locked, singlethreaded; flags = fr->fr_flags; pages = fr->fr_pages; if ((flags & RFSTOPPED) != 0) MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL); else MPASS(fr->fr_procp == NULL); /* Check for the undefined or unimplemented flags. */ if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0) return (EINVAL); /* Signal value requires RFTSIGZMB. */ if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0) return (EINVAL); /* Can't copy and clear. */ if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) return (EINVAL); /* Check the validity of the signal number. */ if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG) return (EINVAL); if ((flags & RFPROCDESC) != 0) { /* Can't not create a process yet get a process descriptor. */ if ((flags & RFPROC) == 0) return (EINVAL); /* Must provide a place to put a procdesc if creating one. */ if (fr->fr_pd_fd == NULL) return (EINVAL); /* Check if we are using supported flags. */ if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0) return (EINVAL); } p1 = td->td_proc; /* * Here we don't create a new process, but we divorce * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { if (fr->fr_procp != NULL) *fr->fr_procp = NULL; else if (fr->fr_pidp != NULL) *fr->fr_pidp = 0; return (fork_norfproc(td, flags)); } fp_procdesc = NULL; newproc = NULL; vm2 = NULL; killsx_locked = false; singlethreaded = false; /* * Increment the nprocs resource before allocations occur. * Although process entries are dynamically created, we still * keep a global limit on the maximum number we will * create. There are hard-limits as to the number of processes * that can run, established by the KVA and memory usage for * the process data. * * Don't allow a nonprivileged user to use the last ten * processes; don't let root exceed the limit. */ nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1; if (nprocs_new >= maxproc - 10) { if (priv_check_cred(td->td_ucred, PRIV_MAXPROC) != 0 || nprocs_new >= maxproc) { error = EAGAIN; sx_xlock(&allproc_lock); if (ppsratecheck(&lastfail, &curfail, 1)) { printf("maxproc limit exceeded by uid %u " "(pid %d); see tuning(7) and " "login.conf(5)\n", td->td_ucred->cr_ruid, p1->p_pid); } sx_xunlock(&allproc_lock); goto fail2; } } /* * If we are possibly multi-threaded, and there is a process * sending a signal to our group right now, ensure that our * other threads cannot be chosen for the signal queueing. * Otherwise, this might delay signal action, and make the new * child escape the signaling. */ pg = p1->p_pgrp; if (p1->p_numthreads > 1) { if (sx_try_slock(&pg->pg_killsx) != 0) { killsx_locked = true; } else { PROC_LOCK(p1); if (thread_single(p1, SINGLE_BOUNDARY)) { PROC_UNLOCK(p1); error = ERESTART; goto fail2; } PROC_UNLOCK(p1); singlethreaded = true; } } /* * Atomically check for signals and block processes from sending * a signal to our process group until the child is visible. */ if (!killsx_locked && sx_slock_sig(&pg->pg_killsx) != 0) { error = ERESTART; goto fail2; } if (__predict_false(p1->p_pgrp != pg || sig_intr() != 0)) { /* * Either the process was moved to other process * group, or there is pending signal. sx_slock_sig() * does not check for signals if not sleeping for the * lock. */ sx_sunlock(&pg->pg_killsx); killsx_locked = false; error = ERESTART; goto fail2; } else { killsx_locked = true; } /* * If required, create a process descriptor in the parent first; we * will abandon it if something goes wrong. We don't finit() until * later. */ if (flags & RFPROCDESC) { error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd, fr->fr_pd_flags, fr->fr_pd_fcaps); if (error != 0) goto fail2; AUDIT_ARG_FD(*fr->fr_pd_fd); } mem_charged = 0; if (pages == 0) pages = kstack_pages; /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); td2 = FIRST_THREAD_IN_PROC(newproc); if (td2 == NULL) { td2 = thread_alloc(pages); if (td2 == NULL) { error = ENOMEM; goto fail2; } proc_linkup(newproc, td2); } else { - kmsan_thread_alloc(td2); - if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) { - if (td2->td_kstack != 0) - vm_thread_dispose(td2); - if (!thread_alloc_stack(td2, pages)) { - error = ENOMEM; - goto fail2; - } - } else { - kasan_mark((void *)td2->td_kstack, - ptoa(td2->td_kstack_pages), - ptoa(td2->td_kstack_pages), 0); - } + error = thread_recycle(td2, pages); + if (error != 0) + goto fail2; } if ((flags & RFMEM) == 0) { vm2 = vmspace_fork(p1->p_vmspace, &mem_charged); if (vm2 == NULL) { error = ENOMEM; goto fail2; } if (!swap_reserve(mem_charged)) { /* * The swap reservation failed. The accounting * from the entries of the copied vm2 will be * subtracted in vmspace_free(), so force the * reservation there. */ swap_reserve_force(mem_charged); error = ENOMEM; goto fail2; } } else vm2 = NULL; /* * XXX: This is ugly; when we copy resource usage, we need to bump * per-cred resource counters. */ newproc->p_ucred = crcowget(td->td_ucred); /* * Initialize resource accounting for the child process. */ error = racct_proc_fork(p1, newproc); if (error != 0) { error = EAGAIN; goto fail1; } #ifdef MAC mac_proc_init(newproc); #endif newproc->p_klist = knlist_alloc(&newproc->p_mtx); STAILQ_INIT(&newproc->p_ktr); /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. */ cred = td->td_ucred; if (!chgproccnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPROC))) { if (priv_check_cred(cred, PRIV_PROC_LIMIT) != 0) goto fail0; chgproccnt(cred->cr_ruidinfo, 1, 0); } do_fork(td, fr, newproc, td2, vm2, fp_procdesc); error = 0; goto cleanup; fail0: error = EAGAIN; #ifdef MAC mac_proc_destroy(newproc); #endif racct_proc_exit(newproc); fail1: proc_unset_cred(newproc); fail2: if (vm2 != NULL) vmspace_free(vm2); uma_zfree(proc_zone, newproc); if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) { fdclose(td, fp_procdesc, *fr->fr_pd_fd); fdrop(fp_procdesc, td); } atomic_add_int(&nprocs, -1); cleanup: if (killsx_locked) sx_sunlock(&pg->pg_killsx); if (singlethreaded) { PROC_LOCK(p1); thread_single_end(p1, SINGLE_BOUNDARY); PROC_UNLOCK(p1); } if (error != 0) pause("fork", hz / 2); return (error); } /* * Handle the return of a child process from fork1(). This function * is called from the MD fork_trampoline() entry point. */ void fork_exit(void (*callout)(void *, struct trapframe *), void *arg, struct trapframe *frame) { struct proc *p; struct thread *td; struct thread *dtd; kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED); td = curthread; p = td->td_proc; KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", td, td_get_sched(td), p->p_pid, td->td_name); sched_fork_exit(td); /* * Processes normally resume in mi_switch() after being * cpu_switch()'ed to, but when children start up they arrive here * instead, so we must do much the same things as mi_switch() would. */ if ((dtd = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(dtd); } thread_unlock(td); /* * cpu_fork_kthread_handler intercepts this function call to * have this call a non-return function to stay in kernel mode. * initproc has its own fork handler, but it does return. */ KASSERT(callout != NULL, ("NULL callout in fork_exit")); callout(arg, frame); /* * Check if a kernel thread misbehaved and returned from its main * function. */ if (p->p_flag & P_KPROC) { printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", td->td_name, p->p_pid); kthread_exit(); } mtx_assert(&Giant, MA_NOTOWNED); /* * Now going to return to userland. */ if (p->p_sysent->sv_schedtail != NULL) (p->p_sysent->sv_schedtail)(td); userret(td, frame); } /* * Simplified back end of syscall(), used when returning from fork() * directly into user mode. This function is passed in to fork_exit() * as the first parameter and is called when returning to a new * userland process. */ void fork_return(struct thread *td, struct trapframe *frame) { struct proc *p; p = td->td_proc; if (td->td_dbgflags & TDB_STOPATFORK) { PROC_LOCK(p); if ((p->p_flag & P_TRACED) != 0) { /* * Inform the debugger if one is still present. */ td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP; ptracestop(td, SIGSTOP, NULL); td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX); } else { /* * ... otherwise clear the request. */ td->td_dbgflags &= ~TDB_STOPATFORK; } PROC_UNLOCK(p); } else if (p->p_flag & P_TRACED) { /* * This is the start of a new thread in a traced * process. Report a system call exit event. */ PROC_LOCK(p); td->td_dbgflags |= TDB_SCX; if ((p->p_ptevents & PTRACE_SCX) != 0 || (td->td_dbgflags & TDB_BORN) != 0) ptracestop(td, SIGTRAP, NULL); td->td_dbgflags &= ~(TDB_SCX | TDB_BORN); PROC_UNLOCK(p); } /* * If the prison was killed mid-fork, die along with it. */ if (!prison_isalive(td->td_ucred->cr_prison)) exit1(td, 0, SIGKILL); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(td->td_sa.code, 0, 0); #endif } static void fork_init(void *arg __unused) { ast_register(TDA_VFORK, ASTR_ASTF_REQUIRED | ASTR_TDP, TDP_RFPPWAIT, ast_vfork); } SYSINIT(fork, SI_SUB_INTRINSIC, SI_ORDER_ANY, fork_init, NULL); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 0bcd5c2f2add..725791769ac3 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -1,1820 +1,1824 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "opt_witness.h" #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #include #include #include #include /* * Asserts below verify the stability of struct thread and struct proc * layout, as exposed by KBI to modules. On head, the KBI is allowed * to drift, change to the structures must be accompanied by the * assert update. * * On the stable branches after KBI freeze, conditions must not be * violated. Typically new fields are moved to the end of the * structures. */ #ifdef __amd64__ _Static_assert(offsetof(struct thread, td_flags) == 0x108, "struct thread KBI td_flags"); _Static_assert(offsetof(struct thread, td_pflags) == 0x114, "struct thread KBI td_pflags"); _Static_assert(offsetof(struct thread, td_frame) == 0x4b8, "struct thread KBI td_frame"); _Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0xb8, "struct proc KBI p_flag"); _Static_assert(offsetof(struct proc, p_pid) == 0xc4, "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x3c8, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x3e0, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x4d0, "struct proc KBI p_emuldata"); #endif #ifdef __i386__ _Static_assert(offsetof(struct thread, td_flags) == 0x9c, "struct thread KBI td_flags"); _Static_assert(offsetof(struct thread, td_pflags) == 0xa8, "struct thread KBI td_pflags"); _Static_assert(offsetof(struct thread, td_frame) == 0x318, "struct thread KBI td_frame"); _Static_assert(offsetof(struct thread, td_emuldata) == 0x35c, "struct thread KBI td_emuldata"); _Static_assert(offsetof(struct proc, p_flag) == 0x6c, "struct proc KBI p_flag"); _Static_assert(offsetof(struct proc, p_pid) == 0x78, "struct proc KBI p_pid"); _Static_assert(offsetof(struct proc, p_filemon) == 0x270, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x284, "struct proc KBI p_comm"); _Static_assert(offsetof(struct proc, p_emuldata) == 0x318, "struct proc KBI p_emuldata"); #endif SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE(proc, , , lwp__exit); /* * thread related storage. */ static uma_zone_t thread_zone; struct thread_domain_data { struct thread *tdd_zombies; int tdd_reapticks; } __aligned(CACHE_LINE_SIZE); static struct thread_domain_data thread_domain_data[MAXMEMDOM]; static struct task thread_reap_task; static struct callout thread_reap_callout; static void thread_zombie(struct thread *); static void thread_reap(void); static void thread_reap_all(void); static void thread_reap_task_cb(void *, int); static void thread_reap_callout_cb(void *); static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary); static void thread_free_batched(struct thread *td); static __exclusive_cache_line struct mtx tid_lock; static bitstr_t *tid_bitmap; static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); static int maxthread; SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN, &maxthread, 0, "Maximum number of threads"); static __exclusive_cache_line int nthreads; static LIST_HEAD(tidhashhead, thread) *tidhashtbl; static u_long tidhash; static u_long tidhashlock; static struct rwlock *tidhashtbl_lock; #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) #define TIDHASHLOCK(tid) (&tidhashtbl_lock[(tid) & tidhashlock]) EVENTHANDLER_LIST_DEFINE(thread_ctor); EVENTHANDLER_LIST_DEFINE(thread_dtor); EVENTHANDLER_LIST_DEFINE(thread_init); EVENTHANDLER_LIST_DEFINE(thread_fini); static bool thread_count_inc_try(void) { int nthreads_new; nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1; if (nthreads_new >= maxthread - 100) { if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 || nthreads_new >= maxthread) { atomic_subtract_int(&nthreads, 1); return (false); } } return (true); } static bool thread_count_inc(void) { static struct timeval lastfail; static int curfail; thread_reap(); if (thread_count_inc_try()) { return (true); } thread_reap_all(); if (thread_count_inc_try()) { return (true); } if (ppsratecheck(&lastfail, &curfail, 1)) { printf("maxthread limit exceeded by uid %u " "(pid %d); consider increasing kern.maxthread\n", curthread->td_ucred->cr_ruid, curproc->p_pid); } return (false); } static void thread_count_sub(int n) { atomic_subtract_int(&nthreads, n); } static void thread_count_dec(void) { thread_count_sub(1); } static lwpid_t tid_alloc(void) { static lwpid_t trytid; lwpid_t tid; mtx_lock(&tid_lock); /* * It is an invariant that the bitmap is big enough to hold maxthread * IDs. If we got to this point there has to be at least one free. */ if (trytid >= maxthread) trytid = 0; bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); if (tid == -1) { KASSERT(trytid != 0, ("unexpectedly ran out of IDs")); trytid = 0; bit_ffc_at(tid_bitmap, trytid, maxthread, &tid); KASSERT(tid != -1, ("unexpectedly ran out of IDs")); } bit_set(tid_bitmap, tid); trytid = tid + 1; mtx_unlock(&tid_lock); return (tid + NO_PID); } static void tid_free_locked(lwpid_t rtid) { lwpid_t tid; mtx_assert(&tid_lock, MA_OWNED); KASSERT(rtid >= NO_PID, ("%s: invalid tid %d\n", __func__, rtid)); tid = rtid - NO_PID; KASSERT(bit_test(tid_bitmap, tid) != 0, ("thread ID %d not allocated\n", rtid)); bit_clear(tid_bitmap, tid); } static void tid_free(lwpid_t rtid) { mtx_lock(&tid_lock); tid_free_locked(rtid); mtx_unlock(&tid_lock); } static void tid_free_batch(lwpid_t *batch, int n) { int i; mtx_lock(&tid_lock); for (i = 0; i < n; i++) { tid_free_locked(batch[i]); } mtx_unlock(&tid_lock); } /* * Batching for thread reapping. */ struct tidbatch { lwpid_t tab[16]; int n; }; static void tidbatch_prep(struct tidbatch *tb) { tb->n = 0; } static void tidbatch_add(struct tidbatch *tb, struct thread *td) { KASSERT(tb->n < nitems(tb->tab), ("%s: count too high %d", __func__, tb->n)); tb->tab[tb->n] = td->td_tid; tb->n++; } static void tidbatch_process(struct tidbatch *tb) { KASSERT(tb->n <= nitems(tb->tab), ("%s: count too high %d", __func__, tb->n)); if (tb->n == nitems(tb->tab)) { tid_free_batch(tb->tab, tb->n); tb->n = 0; } } static void tidbatch_final(struct tidbatch *tb) { KASSERT(tb->n <= nitems(tb->tab), ("%s: count too high %d", __func__, tb->n)); if (tb->n != 0) { tid_free_batch(tb->tab, tb->n); } } /* * Batching thread count free, for consistency */ struct tdcountbatch { int n; }; static void tdcountbatch_prep(struct tdcountbatch *tb) { tb->n = 0; } static void tdcountbatch_add(struct tdcountbatch *tb, struct thread *td __unused) { tb->n++; } static void tdcountbatch_process(struct tdcountbatch *tb) { if (tb->n == 32) { thread_count_sub(tb->n); tb->n = 0; } } static void tdcountbatch_final(struct tdcountbatch *tb) { if (tb->n != 0) { thread_count_sub(tb->n); } } /* * Prepare a thread for use. */ static int thread_ctor(void *mem, int size, void *arg, int flags) { struct thread *td; td = (struct thread *)mem; TD_SET_STATE(td, TDS_INACTIVE); td->td_lastcpu = td->td_oncpu = NOCPU; /* * Note that td_critnest begins life as 1 because the thread is not * running and is thereby implicitly waiting to be on the receiving * end of a context switch. */ td->td_critnest = 1; td->td_lend_user_pri = PRI_MAX; #ifdef AUDIT audit_thread_alloc(td); #endif #ifdef KDTRACE_HOOKS kdtrace_thread_ctor(td); #endif umtx_thread_alloc(td); MPASS(td->td_sel == NULL); return (0); } /* * Reclaim a thread after use. */ static void thread_dtor(void *mem, int size, void *arg) { struct thread *td; td = (struct thread *)mem; #ifdef INVARIANTS /* Verify that this thread is in a safe state to free. */ switch (TD_GET_STATE(td)) { case TDS_INHIBITED: case TDS_RUNNING: case TDS_CAN_RUN: case TDS_RUNQ: /* * We must never unlink a thread that is in one of * these states, because it is currently active. */ panic("bad state for thread unlinking"); /* NOTREACHED */ case TDS_INACTIVE: break; default: panic("bad thread state"); /* NOTREACHED */ } #endif #ifdef AUDIT audit_thread_free(td); #endif #ifdef KDTRACE_HOOKS kdtrace_thread_dtor(td); #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); ast_kclear(td); seltdfini(td); } /* * Initialize type-stable parts of a thread (when newly created). */ static int thread_init(void *mem, int size, int flags) { struct thread *td; td = (struct thread *)mem; td->td_allocdomain = vm_phys_domain(vtophys(td)); td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_rlqe = NULL; EVENTHANDLER_DIRECT_INVOKE(thread_init, td); umtx_thread_init(td); td->td_kstack = 0; td->td_sel = NULL; return (0); } /* * Tear down type-stable parts of a thread (just before being discarded). */ static void thread_fini(void *mem, int size) { struct thread *td; td = (struct thread *)mem; EVENTHANDLER_DIRECT_INVOKE(thread_fini, td); rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); MPASS(td->td_sel == NULL); } /* * For a newly created process, * link up all the structures and its initial threads etc. * called from: * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. * proc_dtor() (should go away) * proc_init() */ void proc_linkup0(struct proc *p, struct thread *td) { TAILQ_INIT(&p->p_threads); /* all threads in proc */ proc_linkup(p, td); } void proc_linkup(struct proc *p, struct thread *td) { sigqueue_init(&p->p_sigqueue, p); p->p_ksi = ksiginfo_alloc(M_WAITOK); if (p->p_ksi != NULL) { /* XXX p_ksi may be null if ksiginfo zone is not ready */ p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; } LIST_INIT(&p->p_mqnotifier); p->p_numthreads = 0; thread_link(td, p); } static void ast_suspend(struct thread *td, int tda __unused) { struct proc *p; p = td->td_proc; /* * We need to check to see if we have to exit or wait due to a * single threading requirement or some other STOP condition. */ PROC_LOCK(p); thread_suspend_check(0); PROC_UNLOCK(p); } extern int max_threads_per_proc; /* * Initialize global thread allocation resources. */ void threadinit(void) { u_long i; lwpid_t tid0; /* * Place an upper limit on threads which can be allocated. * * Note that other factors may make the de facto limit much lower. * * Platform limits are somewhat arbitrary but deemed "more than good * enough" for the foreseable future. */ if (maxthread == 0) { #ifdef _LP64 maxthread = MIN(maxproc * max_threads_per_proc, 1000000); #else maxthread = MIN(maxproc * max_threads_per_proc, 100000); #endif } mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK); /* * Handle thread0. */ thread_count_inc(); tid0 = tid_alloc(); if (tid0 != THREAD0_TID) panic("tid0 %d != %d\n", tid0, THREAD0_TID); /* * Thread structures are specially aligned so that (at least) the * 5 lower bits of a pointer to 'struct thead' must be 0. These bits * are used by synchronization primitives to store flags in pointers to * such structures. */ thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, UMA_ALIGN_CACHE_AND_MASK(32 - 1), UMA_ZONE_NOFREE); tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); tidhashlock = (tidhash + 1) / 64; if (tidhashlock > 0) tidhashlock--; tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1), M_TIDHASH, M_WAITOK | M_ZERO); for (i = 0; i < tidhashlock + 1; i++) rw_init(&tidhashtbl_lock[i], "tidhash"); TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL); callout_init(&thread_reap_callout, 1); callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL); ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend); } /* * Place an unused thread on the zombie list. */ void thread_zombie(struct thread *td) { struct thread_domain_data *tdd; struct thread *ztd; tdd = &thread_domain_data[td->td_allocdomain]; ztd = atomic_load_ptr(&tdd->tdd_zombies); for (;;) { td->td_zombie = ztd; if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies, (uintptr_t *)&ztd, (uintptr_t)td)) break; continue; } } /* * Release a thread that has exited after cpu_throw(). */ void thread_stash(struct thread *td) { atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); thread_zombie(td); } /* * Reap zombies from passed domain. */ static void thread_reap_domain(struct thread_domain_data *tdd) { struct thread *itd, *ntd; struct tidbatch tidbatch; struct credbatch credbatch; struct limbatch limbatch; struct tdcountbatch tdcountbatch; /* * Reading upfront is pessimal if followed by concurrent atomic_swap, * but most of the time the list is empty. */ if (tdd->tdd_zombies == NULL) return; itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies, (uintptr_t)NULL); if (itd == NULL) return; /* * Multiple CPUs can get here, the race is fine as ticks is only * advisory. */ tdd->tdd_reapticks = ticks; tidbatch_prep(&tidbatch); credbatch_prep(&credbatch); limbatch_prep(&limbatch); tdcountbatch_prep(&tdcountbatch); while (itd != NULL) { ntd = itd->td_zombie; EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd); tidbatch_add(&tidbatch, itd); credbatch_add(&credbatch, itd); limbatch_add(&limbatch, itd); tdcountbatch_add(&tdcountbatch, itd); thread_free_batched(itd); tidbatch_process(&tidbatch); credbatch_process(&credbatch); limbatch_process(&limbatch); tdcountbatch_process(&tdcountbatch); itd = ntd; } tidbatch_final(&tidbatch); credbatch_final(&credbatch); limbatch_final(&limbatch); tdcountbatch_final(&tdcountbatch); } /* * Reap zombies from all domains. */ static void thread_reap_all(void) { struct thread_domain_data *tdd; int i, domain; domain = PCPU_GET(domain); for (i = 0; i < vm_ndomains; i++) { tdd = &thread_domain_data[(i + domain) % vm_ndomains]; thread_reap_domain(tdd); } } /* * Reap zombies from local domain. */ static void thread_reap(void) { struct thread_domain_data *tdd; int domain; domain = PCPU_GET(domain); tdd = &thread_domain_data[domain]; thread_reap_domain(tdd); } static void thread_reap_task_cb(void *arg __unused, int pending __unused) { thread_reap_all(); } static void thread_reap_callout_cb(void *arg __unused) { struct thread_domain_data *tdd; int i, cticks, lticks; bool wantreap; wantreap = false; cticks = atomic_load_int(&ticks); for (i = 0; i < vm_ndomains; i++) { tdd = &thread_domain_data[i]; lticks = tdd->tdd_reapticks; if (tdd->tdd_zombies != NULL && (u_int)(cticks - lticks) > 5 * hz) { wantreap = true; break; } } if (wantreap) taskqueue_enqueue(taskqueue_thread, &thread_reap_task); callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL); } /* * Calling this function guarantees that any thread that exited before * the call is reaped when the function returns. By 'exited' we mean * a thread removed from the process linkage with thread_unlink(). * Practically this means that caller must lock/unlock corresponding * process lock before the call, to synchronize with thread_exit(). */ void thread_reap_barrier(void) { struct task *t; /* * First do context switches to each CPU to ensure that all * PCPU pc_deadthreads are moved to zombie list. */ quiesce_all_cpus("", PDROP); /* * Second, fire the task in the same thread as normal * thread_reap() is done, to serialize reaping. */ t = malloc(sizeof(*t), M_TEMP, M_WAITOK); TASK_INIT(t, 0, thread_reap_task_cb, t); taskqueue_enqueue(taskqueue_thread, t); taskqueue_drain(taskqueue_thread, t); free(t, M_TEMP); } /* * Allocate a thread. */ struct thread * thread_alloc(int pages) { struct thread *td; lwpid_t tid; if (!thread_count_inc()) { return (NULL); } tid = tid_alloc(); td = uma_zalloc(thread_zone, M_WAITOK); KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); if (!vm_thread_new(td, pages)) { uma_zfree(thread_zone, td); tid_free(tid); thread_count_dec(); return (NULL); } td->td_tid = tid; bzero(&td->td_sa.args, sizeof(td->td_sa.args)); + kasan_thread_alloc(td); kmsan_thread_alloc(td); cpu_thread_alloc(td); EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td); return (td); } int -thread_alloc_stack(struct thread *td, int pages) +thread_recycle(struct thread *td, int pages) { - - KASSERT(td->td_kstack == 0, - ("thread_alloc_stack called on a thread with kstack")); - if (!vm_thread_new(td, pages)) - return (0); - cpu_thread_alloc(td); - return (1); + if (td->td_kstack == 0 || td->td_kstack_pages != pages) { + if (td->td_kstack != 0) + vm_thread_dispose(td); + if (!vm_thread_new(td, pages)) + return (ENOMEM); + cpu_thread_alloc(td); + } + kasan_thread_alloc(td); + kmsan_thread_alloc(td); + return (0); } /* * Deallocate a thread. */ static void thread_free_batched(struct thread *td) { lock_profile_thread_exit(td); if (td->td_cpuset) cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_free(td); if (td->td_kstack != 0) vm_thread_dispose(td); callout_drain(&td->td_slpcallout); /* * Freeing handled by the caller. */ td->td_tid = -1; kmsan_thread_free(td); uma_zfree(thread_zone, td); } void thread_free(struct thread *td) { lwpid_t tid; EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td); tid = td->td_tid; thread_free_batched(td); tid_free(tid); thread_count_dec(); } void thread_cow_get_proc(struct thread *newtd, struct proc *p) { PROC_LOCK_ASSERT(p, MA_OWNED); newtd->td_realucred = crcowget(p->p_ucred); newtd->td_ucred = newtd->td_realucred; newtd->td_limit = lim_hold(p->p_limit); newtd->td_cowgen = p->p_cowgen; } void thread_cow_get(struct thread *newtd, struct thread *td) { MPASS(td->td_realucred == td->td_ucred); newtd->td_realucred = crcowget(td->td_realucred); newtd->td_ucred = newtd->td_realucred; newtd->td_limit = lim_hold(td->td_limit); newtd->td_cowgen = td->td_cowgen; } void thread_cow_free(struct thread *td) { if (td->td_realucred != NULL) crcowfree(td); if (td->td_limit != NULL) lim_free(td->td_limit); } void thread_cow_update(struct thread *td) { struct proc *p; struct ucred *oldcred; struct plimit *oldlimit; p = td->td_proc; PROC_LOCK(p); oldcred = crcowsync(); oldlimit = lim_cowsync(); td->td_cowgen = p->p_cowgen; PROC_UNLOCK(p); if (oldcred != NULL) crfree(oldcred); if (oldlimit != NULL) lim_free(oldlimit); } void thread_cow_synced(struct thread *td) { struct proc *p; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); MPASS(td->td_cowgen != p->p_cowgen); MPASS(td->td_ucred == p->p_ucred); MPASS(td->td_limit == p->p_limit); td->td_cowgen = p->p_cowgen; } /* * Discard the current thread and exit from its context. * Always called with scheduler locked. * * Because we can't free a thread while we're operating under its context, * push the current thread into our CPU's deadthread holder. This means * we needn't worry about someone else grabbing our context before we * do a cpu_throw(). */ void thread_exit(void) { uint64_t runtime, new_switchtime; struct thread *td; struct thread *td2; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(p != NULL, ("thread exiting without a process")); CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, (long)p->p_pid, td->td_name); SDT_PROBE0(proc, , , lwp__exit); KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); MPASS(td->td_realucred == td->td_ucred); /* * drop FPU & debug register state storage, or any other * architecture specific resources that * would not be on a new untouched process. */ cpu_thread_exit(td); /* * The last thread is left attached to the process * So that the whole bundle gets recycled. Skip * all this stuff if we never had threads. * EXIT clears all sign of other threads when * it goes to single threading, so the last thread always * takes the short path. */ if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { atomic_add_int(&td->td_proc->p_exitthreads, 1); thread_unlink(td); td2 = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td2, td); /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SINGLE is unset * in exit1() after it is the only survivor. */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); if (wakeup_swapper) kick_proc0(); } } PCPU_SET(deadthread, td); } else { /* * The last thread is exiting.. but not through exit() */ panic ("thread_exit: Last thread exiting on its own"); } } #ifdef HWPMC_HOOKS /* * If this thread is part of a process that is being tracked by hwpmc(4), * inform the module of the thread's impending exit. */ if (PMC_PROC_IS_USING_PMCS(td->td_proc)) { PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL); } else if (PMC_SYSTEM_SAMPLING_ACTIVE()) PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL); #endif PROC_UNLOCK(p); PROC_STATLOCK(p); thread_lock(td); PROC_SUNLOCK(p); /* Do the same timestamp bookkeeping that mi_switch() would do. */ new_switchtime = cpu_ticks(); runtime = new_switchtime - PCPU_GET(switchtime); td->td_runtime += runtime; td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchticks, ticks); VM_CNT_INC(v_swtch); /* Save our resource usage in our process. */ td->td_ru.ru_nvcsw++; ruxagg_locked(p, td); rucollect(&p->p_ru, &td->td_ru); PROC_STATUNLOCK(p); TD_SET_STATE(td, TDS_INACTIVE); #ifdef WITNESS witness_thread_exit(td); #endif CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); sched_throw(td); panic("I'm a teapot!"); /* NOTREACHED */ } /* * Do any thread specific cleanups that may be needed in wait() * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; mtx_assert(&Giant, MA_NOTOWNED); KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); td = FIRST_THREAD_IN_PROC(p); /* Lock the last thread so we spin until it exits cpu_throw(). */ thread_lock(td); thread_unlock(td); lock_profile_thread_exit(td); cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_clean(td); thread_cow_free(td); callout_drain(&td->td_slpcallout); thread_reap(); /* check for zombie threads etc. */ } /* * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. */ void thread_link(struct thread *td, struct proc *p) { /* * XXX This can't be enabled because it's called for proc0 before * its lock has been created. * PROC_LOCK_ASSERT(p, MA_OWNED); */ TD_SET_STATE(td, TDS_INACTIVE); td->td_proc = p; td->td_flags = TDF_INMEM; LIST_INIT(&td->td_contested); LIST_INIT(&td->td_lprof[0]); LIST_INIT(&td->td_lprof[1]); #ifdef EPOCH_TRACE SLIST_INIT(&td->td_epochs); #endif sigqueue_init(&td->td_sigqueue, p); callout_init(&td->td_slpcallout, 1); TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); p->p_numthreads++; } /* * Called from: * thread_exit() */ void thread_unlink(struct thread *td) { struct proc *p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); #ifdef EPOCH_TRACE MPASS(SLIST_EMPTY(&td->td_epochs)); #endif TAILQ_REMOVE(&p->p_threads, td, td_plist); p->p_numthreads--; /* could clear a few other things here */ /* Must NOT clear links to proc! */ } static int calc_remaining(struct proc *p, int mode) { int remaining; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); if (mode == SINGLE_EXIT) remaining = p->p_numthreads; else if (mode == SINGLE_BOUNDARY) remaining = p->p_numthreads - p->p_boundary_count; else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) remaining = p->p_numthreads - p->p_suspcount; else panic("calc_remaining: wrong mode %d", mode); return (remaining); } static int remain_for_mode(int mode) { return (mode == SINGLE_ALLPROC ? 0 : 1); } static int weed_inhib(int mode, struct thread *td2, struct proc *p) { int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td2, MA_OWNED); wakeup_swapper = 0; /* * Since the thread lock is dropped by the scheduler we have * to retry to check for races. */ restart: switch (mode) { case SINGLE_EXIT: if (TD_IS_SUSPENDED(td2)) { wakeup_swapper |= thread_unsuspend_one(td2, p, true); thread_lock(td2); goto restart; } if (TD_CAN_ABORT(td2)) { wakeup_swapper |= sleepq_abort(td2, EINTR); return (wakeup_swapper); } break; case SINGLE_BOUNDARY: case SINGLE_NO_EXIT: if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) { wakeup_swapper |= thread_unsuspend_one(td2, p, false); thread_lock(td2); goto restart; } if (TD_CAN_ABORT(td2)) { wakeup_swapper |= sleepq_abort(td2, ERESTART); return (wakeup_swapper); } break; case SINGLE_ALLPROC: /* * ALLPROC suspend tries to avoid spurious EINTR for * threads sleeping interruptable, by suspending the * thread directly, similarly to sig_suspend_threads(). * Since such sleep is not neccessary performed at the user * boundary, TDF_ALLPROCSUSP is used to avoid immediate * un-suspend. */ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_ALLPROCSUSP) == 0) { wakeup_swapper |= thread_unsuspend_one(td2, p, false); thread_lock(td2); goto restart; } if (TD_CAN_ABORT(td2)) { td2->td_flags |= TDF_ALLPROCSUSP; wakeup_swapper |= sleepq_abort(td2, ERESTART); return (wakeup_swapper); } break; default: break; } thread_unlock(td2); return (wakeup_swapper); } /* * Enforce single-threading. * * Returns 1 if the caller must abort (another thread is waiting to * exit the process or similar). Process is locked! * Returns 0 when you are successfully the only thread running. * A process has successfully single threaded in the suspend mode when * There are no threads in user mode. Threads in the kernel must be * allowed to continue until they get to the user boundary. They may even * copy out their return values and data before suspending. They may however be * accelerated in reaching the user boundary as we will wake up * any sleeping threads that are interruptable. (PCATCH). */ int thread_single(struct proc *p, int mode) { struct thread *td; struct thread *td2; int remaining, wakeup_swapper; td = curthread; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); /* * If allowing non-ALLPROC singlethreading for non-curproc * callers, calc_remaining() and remain_for_mode() should be * adjusted to also account for td->td_proc != p. For now * this is not implemented because it is not used. */ KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || (mode != SINGLE_ALLPROC && td->td_proc == p), ("mode %d proc %p curproc %p", mode, p, td->td_proc)); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); /* * Is someone already single threading? * Or may be singlethreading is not needed at all. */ if (mode == SINGLE_ALLPROC) { while ((p->p_flag & P_STOPPED_SINGLE) != 0) { if ((p->p_flag2 & P2_WEXIT) != 0) return (1); msleep(&p->p_flag, &p->p_mtx, PCATCH, "thrsgl", 0); } if ((p->p_flag & (P_STOPPED_SIG | P_TRACED)) != 0 || (p->p_flag2 & P2_WEXIT) != 0) return (1); } else if ((p->p_flag & P_HADTHREADS) == 0) return (0); if (p->p_singlethread != NULL && p->p_singlethread != td) return (1); if (mode == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; p->p_flag &= ~P_SINGLE_BOUNDARY; } else { p->p_flag &= ~P_SINGLE_EXIT; if (mode == SINGLE_BOUNDARY) p->p_flag |= P_SINGLE_BOUNDARY; else p->p_flag &= ~P_SINGLE_BOUNDARY; } if (mode == SINGLE_ALLPROC) p->p_flag |= P_TOTAL_STOP; p->p_flag |= P_STOPPED_SINGLE; PROC_SLOCK(p); p->p_singlethread = td; remaining = calc_remaining(p, mode); while (remaining != remain_for_mode(mode)) { if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) goto stopme; wakeup_swapper = 0; FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); ast_sched_locked(td2, TDA_SUSPEND); if (TD_IS_INHIBITED(td2)) { wakeup_swapper |= weed_inhib(mode, td2, p); #ifdef SMP } else if (TD_IS_RUNNING(td2)) { forward_signal(td2); thread_unlock(td2); #endif } else thread_unlock(td2); } if (wakeup_swapper) kick_proc0(); remaining = calc_remaining(p, mode); /* * Maybe we suspended some threads.. was it enough? */ if (remaining == remain_for_mode(mode)) break; stopme: /* * Wake us up when everyone else has suspended. * In the mean time we suspend as well. */ thread_suspend_switch(td, p); remaining = calc_remaining(p, mode); } if (mode == SINGLE_EXIT) { /* * Convert the process to an unthreaded process. The * SINGLE_EXIT is called by exit1() or execve(), in * both cases other threads must be retired. */ KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); p->p_singlethread = NULL; p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); /* * Wait for any remaining threads to exit cpu_throw(). */ while (p->p_exitthreads != 0) { PROC_SUNLOCK(p); PROC_UNLOCK(p); sched_relinquish(td); PROC_LOCK(p); PROC_SLOCK(p); } } else if (mode == SINGLE_BOUNDARY) { /* * Wait until all suspended threads are removed from * the processors. The thread_suspend_check() * increments p_boundary_count while it is still * running, which makes it possible for the execve() * to destroy vmspace while our other threads are * still using the address space. * * We lock the thread, which is only allowed to * succeed after context switch code finished using * the address space. */ FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, ("td %p not on boundary", td2)); KASSERT(TD_IS_SUSPENDED(td2), ("td %p is not suspended", td2)); thread_unlock(td2); } } PROC_SUNLOCK(p); return (0); } bool thread_suspend_check_needed(void) { struct proc *p; struct thread *td; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && (td->td_dbgflags & TDB_SUSPEND) != 0)); } /* * Called in from locations that can safely check to see * whether we have to suspend or at least throttle for a * single-thread event (e.g. fork). * * Such locations include userret(). * If the "return_instead" argument is non zero, the thread must be able to * accept 0 (caller may continue), or 1 (caller must abort) as a result. * * The 'return_instead' argument tells the function if it may do a * thread_exit() or suspend, or whether the caller must abort and back * out instead. * * If the thread that set the single_threading request has set the * P_SINGLE_EXIT bit in the process flags then this call will never return * if 'return_instead' is false, but will exit. * * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 *---------------+--------------------+--------------------- * 0 | returns 0 | returns 0 or 1 * | when ST ends | immediately *---------------+--------------------+--------------------- * 1 | thread exits | returns 1 * | | immediately * 0 = thread_exit() or suspension ok, * other = return error instead of stopping the thread. * * While a full suspension is under effect, even a single threading * thread would be suspended if it made this call (but it shouldn't). * This call should only be made from places where * thread_exit() would be safe as that may be the outcome unless * return_instead is set. */ int thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (thread_suspend_check_needed()) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { KASSERT(p->p_singlethread != NULL, ("singlethread not set")); /* * The only suspension in action is a * single-threading. Single threader need not stop. * It is safe to access p->p_singlethread unlocked * because it can only be set to our address by us. */ if (p->p_singlethread == td) return (0); /* Exempt from stopping. */ } if ((p->p_flag & P_SINGLE_EXIT) && return_instead) return (EINTR); /* Should we goto user boundary if we didn't come from there? */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) return (ERESTART); /* * Ignore suspend requests if they are deferred. */ if ((td->td_flags & TDF_SBDRY) != 0) { KASSERT(return_instead, ("TDF_SBDRY set for unsafe thread_suspend_check")); KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != (TDF_SEINTR | TDF_SERESTART), ("both TDF_SEINTR and TDF_SERESTART")); return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); } /* * If the process is waiting for us to exit, * this thread should just suicide. * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { PROC_UNLOCK(p); /* * Allow Linux emulation layer to do some work * before thread suicide. */ if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) (p->p_sysent->sv_thread_detach)(td); umtx_thread_exit(td); kern_thr_exit(td); panic("stopped thread did not exit"); } PROC_SLOCK(p); thread_stopped(p); if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount + 1) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); if (wakeup_swapper) kick_proc0(); } } PROC_UNLOCK(p); thread_lock(td); /* * When a thread suspends, it just * gets taken off all queues. */ thread_suspend_one(td); if (return_instead == 0) { p->p_boundary_count++; td->td_flags |= TDF_BOUNDARY; } PROC_SUNLOCK(p); mi_switch(SW_INVOL | SWT_SUSPEND); PROC_LOCK(p); } return (0); } /* * Check for possible stops and suspensions while executing a * casueword or similar transiently failing operation. * * The sleep argument controls whether the function can handle a stop * request itself or it should return ERESTART and the request is * proceed at the kernel/user boundary in ast. * * Typically, when retrying due to casueword(9) failure (rv == 1), we * should handle the stop requests there, with exception of cases when * the thread owns a kernel resource, for instance busied the umtx * key, or when functions return immediately if thread_check_susp() * returned non-zero. On the other hand, retrying the whole lock * operation, we better not stop there but delegate the handling to * ast. * * If the request is for thread termination P_SINGLE_EXIT, we cannot * handle it at all, and simply return EINTR. */ int thread_check_susp(struct thread *td, bool sleep) { struct proc *p; int error; /* * The check for TDA_SUSPEND is racy, but it is enough to * eventually break the lockstep loop. */ if (!td_ast_pending(td, TDA_SUSPEND)) return (0); error = 0; p = td->td_proc; PROC_LOCK(p); if (p->p_flag & P_SINGLE_EXIT) error = EINTR; else if (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) error = sleep ? thread_suspend_check(0) : ERESTART; PROC_UNLOCK(p); return (error); } void thread_suspend_switch(struct thread *td, struct proc *p) { KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); /* * We implement thread_suspend_one in stages here to avoid * dropping the proc lock while the thread lock is owned. */ if (p == td->td_proc) { thread_stopped(p); p->p_suspcount++; } PROC_UNLOCK(p); thread_lock(td); ast_unsched_locked(td, TDA_SUSPEND); TD_SET_SUSPENDED(td); sched_sleep(td, 0); PROC_SUNLOCK(p); DROP_GIANT(); mi_switch(SW_VOL | SWT_SUSPEND); PICKUP_GIANT(); PROC_LOCK(p); PROC_SLOCK(p); } void thread_suspend_one(struct thread *td) { struct proc *p; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); p->p_suspcount++; ast_unsched_locked(td, TDA_SUSPEND); TD_SET_SUSPENDED(td); sched_sleep(td, 0); } static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) { THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); TD_CLR_SUSPENDED(td); td->td_flags &= ~TDF_ALLPROCSUSP; if (td->td_proc == p) { PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_suspcount--; if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { td->td_flags &= ~TDF_BOUNDARY; p->p_boundary_count--; } } return (setrunnable(td, 0)); } void thread_run_flash(struct thread *td) { struct proc *p; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); if (TD_ON_SLEEPQ(td)) sleepq_remove_nested(td); else thread_lock(td); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); TD_CLR_SUSPENDED(td); PROC_SLOCK(p); MPASS(p->p_suspcount > 0); p->p_suspcount--; PROC_SUNLOCK(p); if (setrunnable(td, 0)) kick_proc0(); } /* * Allow all threads blocked by single threading to continue running. */ void thread_unsuspend(struct proc *p) { struct thread *td; int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); wakeup_swapper = 0; if (!P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) wakeup_swapper |= thread_unsuspend_one(td, p, true); else thread_unlock(td); } } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && p->p_numthreads == p->p_suspcount) { /* * Stopping everything also did the job for the single * threading request. Now we've downgraded to single-threaded, * let it continue. */ if (p->p_singlethread->td_proc == p) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); } } if (wakeup_swapper) kick_proc0(); } /* * End the single threading mode.. */ void thread_single_end(struct proc *p, int mode) { struct thread *td; int wakeup_swapper; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), ("mode %d does not match P_TOTAL_STOP", mode)); KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, ("thread_single_end from other thread %p %p", curthread, p->p_singlethread)); KASSERT(mode != SINGLE_BOUNDARY || (p->p_flag & P_SINGLE_BOUNDARY) != 0, ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | P_TOTAL_STOP); PROC_SLOCK(p); p->p_singlethread = NULL; wakeup_swapper = 0; /* * If there are other threads they may now run, * unless of course there is a blanket 'stop order' * on the process. The single threader must be allowed * to continue however as this is a bad place to stop. */ if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, true); } else thread_unlock(td); } } KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, ("inconsistent boundary count %d", p->p_boundary_count)); PROC_SUNLOCK(p); if (wakeup_swapper) kick_proc0(); wakeup(&p->p_flag); } /* * Locate a thread by number and return with proc lock held. * * thread exit establishes proc -> tidhash lock ordering, but lookup * takes tidhash first and needs to return locked proc. * * The problem is worked around by relying on type-safety of both * structures and doing the work in 2 steps: * - tidhash-locked lookup which saves both thread and proc pointers * - proc-locked verification that the found thread still matches */ static bool tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp) { #define RUN_THRESH 16 struct proc *p; struct thread *td; int run; bool locked; run = 0; rw_rlock(TIDHASHLOCK(tid)); locked = true; LIST_FOREACH(td, TIDHASH(tid), td_hash) { if (td->td_tid != tid) { run++; continue; } p = td->td_proc; if (pid != -1 && p->p_pid != pid) { td = NULL; break; } if (run > RUN_THRESH) { if (rw_try_upgrade(TIDHASHLOCK(tid))) { LIST_REMOVE(td, td_hash); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(TIDHASHLOCK(tid)); locked = false; break; } } break; } if (locked) rw_runlock(TIDHASHLOCK(tid)); if (td == NULL) return (false); *pp = p; *tdp = td; return (true); } struct thread * tdfind(lwpid_t tid, pid_t pid) { struct proc *p; struct thread *td; td = curthread; if (td->td_tid == tid) { if (pid != -1 && td->td_proc->p_pid != pid) return (NULL); PROC_LOCK(td->td_proc); return (td); } for (;;) { if (!tdfind_hash(tid, pid, &p, &td)) return (NULL); PROC_LOCK(p); if (td->td_tid != tid) { PROC_UNLOCK(p); continue; } if (td->td_proc != p) { PROC_UNLOCK(p); continue; } if (p->p_state == PRS_NEW) { PROC_UNLOCK(p); return (NULL); } return (td); } } void tidhash_add(struct thread *td) { rw_wlock(TIDHASHLOCK(td->td_tid)); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(TIDHASHLOCK(td->td_tid)); } void tidhash_remove(struct thread *td) { rw_wlock(TIDHASHLOCK(td->td_tid)); LIST_REMOVE(td, td_hash); rw_wunlock(TIDHASHLOCK(td->td_tid)); } diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c index c3664d9cf6c2..75d9e75c531a 100644 --- a/sys/kern/subr_asan.c +++ b/sys/kern/subr_asan.c @@ -1,1227 +1,1237 @@ /* $NetBSD: subr_asan.c,v 1.26 2020/09/10 14:10:46 maxv Exp $ */ /* * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net * All rights reserved. * * This code is part of the KASAN subsystem of the NetBSD kernel. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define SAN_RUNTIME #include #if 0 __KERNEL_RCSID(0, "$NetBSD: subr_asan.c,v 1.26 2020/09/10 14:10:46 maxv Exp $"); #endif #include #include #include #include +#include #include #include #include #include /* ASAN constants. Part of the compiler ABI. */ #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE - 1) #define KASAN_ALLOCA_SCALE_SIZE 32 /* ASAN ABI version. */ #if defined(__clang__) && (__clang_major__ - 0 >= 6) #define ASAN_ABI_VERSION 8 #elif __GNUC_PREREQ__(7, 1) && !defined(__clang__) #define ASAN_ABI_VERSION 8 #elif __GNUC_PREREQ__(6, 1) && !defined(__clang__) #define ASAN_ABI_VERSION 6 #else #error "Unsupported compiler version" #endif #define __RET_ADDR (unsigned long)__builtin_return_address(0) /* Global variable descriptor. Part of the compiler ABI. */ struct __asan_global_source_location { const char *filename; int line_no; int column_no; }; struct __asan_global { const void *beg; /* address of the global variable */ size_t size; /* size of the global variable */ size_t size_with_redzone; /* size with the redzone */ const void *name; /* name of the variable */ const void *module_name; /* name of the module where the var is declared */ unsigned long has_dynamic_init; /* the var has dyn initializer (c++) */ struct __asan_global_source_location *location; #if ASAN_ABI_VERSION >= 7 uintptr_t odr_indicator; /* the address of the ODR indicator symbol */ #endif }; FEATURE(kasan, "Kernel address sanitizer"); static SYSCTL_NODE(_debug, OID_AUTO, kasan, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "KASAN options"); static int panic_on_violation = 1; SYSCTL_INT(_debug_kasan, OID_AUTO, panic_on_violation, CTLFLAG_RDTUN, &panic_on_violation, 0, "Panic if an invalid access is detected"); #define kasan_enabled (!kasan_disabled) static bool kasan_disabled __read_mostly = true; SYSCTL_BOOL(_debug_kasan, OID_AUTO, disabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &kasan_disabled, 0, "KASAN is disabled"); /* -------------------------------------------------------------------------- */ void kasan_shadow_map(vm_offset_t addr, size_t size) { size_t sz, npages, i; vm_offset_t sva, eva; KASSERT(addr % KASAN_SHADOW_SCALE == 0, ("%s: invalid address %#lx", __func__, addr)); sz = roundup(size, KASAN_SHADOW_SCALE) / KASAN_SHADOW_SCALE; sva = kasan_md_addr_to_shad(addr); eva = kasan_md_addr_to_shad(addr) + sz; sva = rounddown(sva, PAGE_SIZE); eva = roundup(eva, PAGE_SIZE); npages = (eva - sva) / PAGE_SIZE; KASSERT(sva >= KASAN_MIN_ADDRESS && eva < KASAN_MAX_ADDRESS, ("%s: invalid address range %#lx-%#lx", __func__, sva, eva)); for (i = 0; i < npages; i++) pmap_san_enter(sva + ptoa(i)); } void kasan_init(void) { int disabled; disabled = 0; TUNABLE_INT_FETCH("debug.kasan.disabled", &disabled); if (disabled) return; /* MD initialization. */ kasan_md_init(); /* Now officially enabled. */ kasan_disabled = false; } void kasan_init_early(vm_offset_t stack, size_t size) { kasan_md_init_early(stack, size); } static inline const char * kasan_code_name(uint8_t code) { switch (code) { case KASAN_GENERIC_REDZONE: return "GenericRedZone"; case KASAN_MALLOC_REDZONE: return "MallocRedZone"; case KASAN_KMEM_REDZONE: return "KmemRedZone"; case KASAN_UMA_FREED: return "UMAUseAfterFree"; case KASAN_KSTACK_FREED: return "KernelStack"; case KASAN_EXEC_ARGS_FREED: return "ExecKVA"; case 1 ... 7: return "RedZonePartial"; case KASAN_STACK_LEFT: return "StackLeft"; case KASAN_STACK_MID: return "StackMiddle"; case KASAN_STACK_RIGHT: return "StackRight"; case KASAN_USE_AFTER_RET: return "UseAfterRet"; case KASAN_USE_AFTER_SCOPE: return "UseAfterScope"; default: return "Unknown"; } } #define REPORT(f, ...) do { \ if (panic_on_violation) { \ kasan_disabled = true; \ panic(f, __VA_ARGS__); \ } else { \ struct stack st; \ \ stack_save(&st); \ printf(f "\n", __VA_ARGS__); \ stack_print_ddb(&st); \ } \ } while (0) static void kasan_report(unsigned long addr, size_t size, bool write, unsigned long pc, uint8_t code) { REPORT("ASan: Invalid access, %zu-byte %s at %#lx, %s(%x)", size, (write ? "write" : "read"), addr, kasan_code_name(code), code); } static __always_inline void kasan_shadow_1byte_markvalid(unsigned long addr) { int8_t *byte = (int8_t *)kasan_md_addr_to_shad(addr); int8_t last = (addr & KASAN_SHADOW_MASK) + 1; *byte = last; } static __always_inline void kasan_shadow_Nbyte_markvalid(const void *addr, size_t size) { size_t i; for (i = 0; i < size; i++) { kasan_shadow_1byte_markvalid((unsigned long)addr + i); } } static __always_inline void kasan_shadow_Nbyte_fill(const void *addr, size_t size, uint8_t code) { void *shad; if (__predict_false(size == 0)) return; if (__predict_false(kasan_md_unsupported((vm_offset_t)addr))) return; KASSERT((vm_offset_t)addr % KASAN_SHADOW_SCALE == 0, ("%s: invalid address %p", __func__, addr)); KASSERT(size % KASAN_SHADOW_SCALE == 0, ("%s: invalid size %zu", __func__, size)); shad = (void *)kasan_md_addr_to_shad((uintptr_t)addr); size = size >> KASAN_SHADOW_SCALE_SHIFT; __builtin_memset(shad, code, size); } /* * In an area of size 'sz_with_redz', mark the 'size' first bytes as valid, * and the rest as invalid. There are generally two use cases: * * o kasan_mark(addr, origsize, size, code), with origsize < size. This marks * the redzone at the end of the buffer as invalid. If the entire is to be * marked invalid, origsize will be 0. * * o kasan_mark(addr, size, size, 0). This marks the entire buffer as valid. */ void kasan_mark(const void *addr, size_t size, size_t redzsize, uint8_t code) { size_t i, n, redz; int8_t *shad; if (__predict_false(!kasan_enabled)) return; if ((vm_offset_t)addr >= DMAP_MIN_ADDRESS && (vm_offset_t)addr < DMAP_MAX_ADDRESS) return; KASSERT((vm_offset_t)addr >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)addr < VM_MAX_KERNEL_ADDRESS, ("%s: invalid address %p", __func__, addr)); KASSERT((vm_offset_t)addr % KASAN_SHADOW_SCALE == 0, ("%s: invalid address %p", __func__, addr)); redz = redzsize - roundup(size, KASAN_SHADOW_SCALE); KASSERT(redz % KASAN_SHADOW_SCALE == 0, ("%s: invalid size %zu", __func__, redz)); shad = (int8_t *)kasan_md_addr_to_shad((uintptr_t)addr); /* Chunks of 8 bytes, valid. */ n = size / KASAN_SHADOW_SCALE; for (i = 0; i < n; i++) { *shad++ = 0; } /* Possibly one chunk, mid. */ if ((size & KASAN_SHADOW_MASK) != 0) { *shad++ = (size & KASAN_SHADOW_MASK); } /* Chunks of 8 bytes, invalid. */ n = redz / KASAN_SHADOW_SCALE; for (i = 0; i < n; i++) { *shad++ = code; } } +void +kasan_thread_alloc(struct thread *td) +{ + if (td->td_kstack != 0) { + kasan_mark((void *)td->td_kstack, ptoa(td->td_kstack_pages), + ptoa(td->td_kstack_pages), 0); + } +} + /* -------------------------------------------------------------------------- */ #define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) \ (addr >> KASAN_SHADOW_SCALE_SHIFT) != \ ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT) static __always_inline bool kasan_shadow_1byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte = (int8_t *)kasan_md_addr_to_shad(addr); int8_t last = (addr & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_2byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte, last; if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) { return (kasan_shadow_1byte_isvalid(addr, code) && kasan_shadow_1byte_isvalid(addr+1, code)); } byte = (int8_t *)kasan_md_addr_to_shad(addr); last = ((addr + 1) & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_4byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte, last; if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) { return (kasan_shadow_2byte_isvalid(addr, code) && kasan_shadow_2byte_isvalid(addr+2, code)); } byte = (int8_t *)kasan_md_addr_to_shad(addr); last = ((addr + 3) & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_8byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte, last; if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) { return (kasan_shadow_4byte_isvalid(addr, code) && kasan_shadow_4byte_isvalid(addr+4, code)); } byte = (int8_t *)kasan_md_addr_to_shad(addr); last = ((addr + 7) & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size, uint8_t *code) { size_t i; for (i = 0; i < size; i++) { if (!kasan_shadow_1byte_isvalid(addr+i, code)) return (false); } return (true); } static __always_inline void kasan_shadow_check(unsigned long addr, size_t size, bool write, unsigned long retaddr) { uint8_t code; bool valid; if (__predict_false(!kasan_enabled)) return; if (__predict_false(size == 0)) return; if (__predict_false(kasan_md_unsupported(addr))) return; if (KERNEL_PANICKED()) return; if (__builtin_constant_p(size)) { switch (size) { case 1: valid = kasan_shadow_1byte_isvalid(addr, &code); break; case 2: valid = kasan_shadow_2byte_isvalid(addr, &code); break; case 4: valid = kasan_shadow_4byte_isvalid(addr, &code); break; case 8: valid = kasan_shadow_8byte_isvalid(addr, &code); break; default: valid = kasan_shadow_Nbyte_isvalid(addr, size, &code); break; } } else { valid = kasan_shadow_Nbyte_isvalid(addr, size, &code); } if (__predict_false(!valid)) { kasan_report(addr, size, write, retaddr, code); } } /* -------------------------------------------------------------------------- */ void * kasan_memcpy(void *dst, const void *src, size_t len) { kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR); kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR); return (__builtin_memcpy(dst, src, len)); } int kasan_memcmp(const void *b1, const void *b2, size_t len) { kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR); kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR); return (__builtin_memcmp(b1, b2, len)); } void * kasan_memset(void *b, int c, size_t len) { kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR); return (__builtin_memset(b, c, len)); } void * kasan_memmove(void *dst, const void *src, size_t len) { kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR); kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR); return (__builtin_memmove(dst, src, len)); } size_t kasan_strlen(const char *str) { const char *s; s = str; while (1) { kasan_shadow_check((unsigned long)s, 1, false, __RET_ADDR); if (*s == '\0') break; s++; } return (s - str); } char * kasan_strcpy(char *dst, const char *src) { char *save = dst; while (1) { kasan_shadow_check((unsigned long)src, 1, false, __RET_ADDR); kasan_shadow_check((unsigned long)dst, 1, true, __RET_ADDR); *dst = *src; if (*src == '\0') break; src++, dst++; } return save; } int kasan_strcmp(const char *s1, const char *s2) { while (1) { kasan_shadow_check((unsigned long)s1, 1, false, __RET_ADDR); kasan_shadow_check((unsigned long)s2, 1, false, __RET_ADDR); if (*s1 != *s2) break; if (*s1 == '\0') return 0; s1++, s2++; } return (*(const unsigned char *)s1 - *(const unsigned char *)s2); } int kasan_copyin(const void *uaddr, void *kaddr, size_t len) { kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR); return (copyin(uaddr, kaddr, len)); } int kasan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) { kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR); return (copyinstr(uaddr, kaddr, len, done)); } int kasan_copyout(const void *kaddr, void *uaddr, size_t len) { kasan_shadow_check((unsigned long)kaddr, len, false, __RET_ADDR); return (copyout(kaddr, uaddr, len)); } /* -------------------------------------------------------------------------- */ int kasan_fubyte(volatile const void *base) { return (fubyte(base)); } int kasan_fuword16(volatile const void *base) { return (fuword16(base)); } int kasan_fueword(volatile const void *base, long *val) { kasan_shadow_check((unsigned long)val, sizeof(*val), true, __RET_ADDR); return (fueword(base, val)); } int kasan_fueword32(volatile const void *base, int32_t *val) { kasan_shadow_check((unsigned long)val, sizeof(*val), true, __RET_ADDR); return (fueword32(base, val)); } int kasan_fueword64(volatile const void *base, int64_t *val) { kasan_shadow_check((unsigned long)val, sizeof(*val), true, __RET_ADDR); return (fueword64(base, val)); } int kasan_subyte(volatile void *base, int byte) { return (subyte(base, byte)); } int kasan_suword(volatile void *base, long word) { return (suword(base, word)); } int kasan_suword16(volatile void *base, int word) { return (suword16(base, word)); } int kasan_suword32(volatile void *base, int32_t word) { return (suword32(base, word)); } int kasan_suword64(volatile void *base, int64_t word) { return (suword64(base, word)); } int kasan_casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, uint32_t newval) { kasan_shadow_check((unsigned long)oldvalp, sizeof(*oldvalp), true, __RET_ADDR); return (casueword32(base, oldval, oldvalp, newval)); } int kasan_casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval) { kasan_shadow_check((unsigned long)oldvalp, sizeof(*oldvalp), true, __RET_ADDR); return (casueword(base, oldval, oldvalp, newval)); } /* -------------------------------------------------------------------------- */ #include #include #define _ASAN_ATOMIC_FUNC_ADD(name, type) \ void kasan_atomic_add_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_add_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_ADD(name, type) \ _ASAN_ATOMIC_FUNC_ADD(name, type) \ _ASAN_ATOMIC_FUNC_ADD(acq_##name, type) \ _ASAN_ATOMIC_FUNC_ADD(rel_##name, type) #define _ASAN_ATOMIC_FUNC_SUBTRACT(name, type) \ void kasan_atomic_subtract_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_subtract_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _ASAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _ASAN_ATOMIC_FUNC_SUBTRACT(acq_##name, type) \ _ASAN_ATOMIC_FUNC_SUBTRACT(rel_##name, type) #define _ASAN_ATOMIC_FUNC_SET(name, type) \ void kasan_atomic_set_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_set_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_SET(name, type) \ _ASAN_ATOMIC_FUNC_SET(name, type) \ _ASAN_ATOMIC_FUNC_SET(acq_##name, type) \ _ASAN_ATOMIC_FUNC_SET(rel_##name, type) #define _ASAN_ATOMIC_FUNC_CLEAR(name, type) \ void kasan_atomic_clear_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_clear_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_CLEAR(name, type) \ _ASAN_ATOMIC_FUNC_CLEAR(name, type) \ _ASAN_ATOMIC_FUNC_CLEAR(acq_##name, type) \ _ASAN_ATOMIC_FUNC_CLEAR(rel_##name, type) #define ASAN_ATOMIC_FUNC_FETCHADD(name, type) \ type kasan_atomic_fetchadd_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_fetchadd_##name(ptr, val)); \ } #define ASAN_ATOMIC_FUNC_READANDCLEAR(name, type) \ type kasan_atomic_readandclear_##name(volatile type *ptr) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_readandclear_##name(ptr)); \ } #define ASAN_ATOMIC_FUNC_TESTANDCLEAR(name, type) \ int kasan_atomic_testandclear_##name(volatile type *ptr, u_int v) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_testandclear_##name(ptr, v)); \ } #define ASAN_ATOMIC_FUNC_TESTANDSET(name, type) \ int kasan_atomic_testandset_##name(volatile type *ptr, u_int v) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_testandset_##name(ptr, v)); \ } #define ASAN_ATOMIC_FUNC_SWAP(name, type) \ type kasan_atomic_swap_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_swap_##name(ptr, val)); \ } #define _ASAN_ATOMIC_FUNC_CMPSET(name, type) \ int kasan_atomic_cmpset_##name(volatile type *ptr, type oval, \ type nval) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_cmpset_##name(ptr, oval, nval)); \ } #define ASAN_ATOMIC_FUNC_CMPSET(name, type) \ _ASAN_ATOMIC_FUNC_CMPSET(name, type) \ _ASAN_ATOMIC_FUNC_CMPSET(acq_##name, type) \ _ASAN_ATOMIC_FUNC_CMPSET(rel_##name, type) #define _ASAN_ATOMIC_FUNC_FCMPSET(name, type) \ int kasan_atomic_fcmpset_##name(volatile type *ptr, type *oval, \ type nval) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_fcmpset_##name(ptr, oval, nval)); \ } #define ASAN_ATOMIC_FUNC_FCMPSET(name, type) \ _ASAN_ATOMIC_FUNC_FCMPSET(name, type) \ _ASAN_ATOMIC_FUNC_FCMPSET(acq_##name, type) \ _ASAN_ATOMIC_FUNC_FCMPSET(rel_##name, type) #define ASAN_ATOMIC_FUNC_THREAD_FENCE(name) \ void kasan_atomic_thread_fence_##name(void) \ { \ atomic_thread_fence_##name(); \ } #define _ASAN_ATOMIC_FUNC_LOAD(name, type) \ type kasan_atomic_load_##name(volatile type *ptr) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_load_##name(ptr)); \ } #define ASAN_ATOMIC_FUNC_LOAD(name, type) \ _ASAN_ATOMIC_FUNC_LOAD(name, type) \ _ASAN_ATOMIC_FUNC_LOAD(acq_##name, type) #define _ASAN_ATOMIC_FUNC_STORE(name, type) \ void kasan_atomic_store_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_store_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_STORE(name, type) \ _ASAN_ATOMIC_FUNC_STORE(name, type) \ _ASAN_ATOMIC_FUNC_STORE(rel_##name, type) ASAN_ATOMIC_FUNC_ADD(8, uint8_t); ASAN_ATOMIC_FUNC_ADD(16, uint16_t); ASAN_ATOMIC_FUNC_ADD(32, uint32_t); ASAN_ATOMIC_FUNC_ADD(64, uint64_t); ASAN_ATOMIC_FUNC_ADD(int, u_int); ASAN_ATOMIC_FUNC_ADD(long, u_long); ASAN_ATOMIC_FUNC_ADD(ptr, uintptr_t); ASAN_ATOMIC_FUNC_SUBTRACT(8, uint8_t); ASAN_ATOMIC_FUNC_SUBTRACT(16, uint16_t); ASAN_ATOMIC_FUNC_SUBTRACT(32, uint32_t); ASAN_ATOMIC_FUNC_SUBTRACT(64, uint64_t); ASAN_ATOMIC_FUNC_SUBTRACT(int, u_int); ASAN_ATOMIC_FUNC_SUBTRACT(long, u_long); ASAN_ATOMIC_FUNC_SUBTRACT(ptr, uintptr_t); ASAN_ATOMIC_FUNC_SET(8, uint8_t); ASAN_ATOMIC_FUNC_SET(16, uint16_t); ASAN_ATOMIC_FUNC_SET(32, uint32_t); ASAN_ATOMIC_FUNC_SET(64, uint64_t); ASAN_ATOMIC_FUNC_SET(int, u_int); ASAN_ATOMIC_FUNC_SET(long, u_long); ASAN_ATOMIC_FUNC_SET(ptr, uintptr_t); ASAN_ATOMIC_FUNC_CLEAR(8, uint8_t); ASAN_ATOMIC_FUNC_CLEAR(16, uint16_t); ASAN_ATOMIC_FUNC_CLEAR(32, uint32_t); ASAN_ATOMIC_FUNC_CLEAR(64, uint64_t); ASAN_ATOMIC_FUNC_CLEAR(int, u_int); ASAN_ATOMIC_FUNC_CLEAR(long, u_long); ASAN_ATOMIC_FUNC_CLEAR(ptr, uintptr_t); ASAN_ATOMIC_FUNC_FETCHADD(32, uint32_t); ASAN_ATOMIC_FUNC_FETCHADD(64, uint64_t); ASAN_ATOMIC_FUNC_FETCHADD(int, u_int); ASAN_ATOMIC_FUNC_FETCHADD(long, u_long); ASAN_ATOMIC_FUNC_READANDCLEAR(32, uint32_t); ASAN_ATOMIC_FUNC_READANDCLEAR(64, uint64_t); ASAN_ATOMIC_FUNC_READANDCLEAR(int, u_int); ASAN_ATOMIC_FUNC_READANDCLEAR(long, u_long); ASAN_ATOMIC_FUNC_READANDCLEAR(ptr, uintptr_t); ASAN_ATOMIC_FUNC_TESTANDCLEAR(32, uint32_t); ASAN_ATOMIC_FUNC_TESTANDCLEAR(64, uint64_t); ASAN_ATOMIC_FUNC_TESTANDCLEAR(int, u_int); ASAN_ATOMIC_FUNC_TESTANDCLEAR(long, u_long); ASAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t); ASAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t); ASAN_ATOMIC_FUNC_TESTANDSET(int, u_int); ASAN_ATOMIC_FUNC_TESTANDSET(long, u_long); ASAN_ATOMIC_FUNC_SWAP(32, uint32_t); ASAN_ATOMIC_FUNC_SWAP(64, uint64_t); ASAN_ATOMIC_FUNC_SWAP(int, u_int); ASAN_ATOMIC_FUNC_SWAP(long, u_long); ASAN_ATOMIC_FUNC_SWAP(ptr, uintptr_t); ASAN_ATOMIC_FUNC_CMPSET(8, uint8_t); ASAN_ATOMIC_FUNC_CMPSET(16, uint16_t); ASAN_ATOMIC_FUNC_CMPSET(32, uint32_t); ASAN_ATOMIC_FUNC_CMPSET(64, uint64_t); ASAN_ATOMIC_FUNC_CMPSET(int, u_int); ASAN_ATOMIC_FUNC_CMPSET(long, u_long); ASAN_ATOMIC_FUNC_CMPSET(ptr, uintptr_t); ASAN_ATOMIC_FUNC_FCMPSET(8, uint8_t); ASAN_ATOMIC_FUNC_FCMPSET(16, uint16_t); ASAN_ATOMIC_FUNC_FCMPSET(32, uint32_t); ASAN_ATOMIC_FUNC_FCMPSET(64, uint64_t); ASAN_ATOMIC_FUNC_FCMPSET(int, u_int); ASAN_ATOMIC_FUNC_FCMPSET(long, u_long); ASAN_ATOMIC_FUNC_FCMPSET(ptr, uintptr_t); _ASAN_ATOMIC_FUNC_LOAD(bool, bool); ASAN_ATOMIC_FUNC_LOAD(8, uint8_t); ASAN_ATOMIC_FUNC_LOAD(16, uint16_t); ASAN_ATOMIC_FUNC_LOAD(32, uint32_t); ASAN_ATOMIC_FUNC_LOAD(64, uint64_t); ASAN_ATOMIC_FUNC_LOAD(char, u_char); ASAN_ATOMIC_FUNC_LOAD(short, u_short); ASAN_ATOMIC_FUNC_LOAD(int, u_int); ASAN_ATOMIC_FUNC_LOAD(long, u_long); ASAN_ATOMIC_FUNC_LOAD(ptr, uintptr_t); _ASAN_ATOMIC_FUNC_STORE(bool, bool); ASAN_ATOMIC_FUNC_STORE(8, uint8_t); ASAN_ATOMIC_FUNC_STORE(16, uint16_t); ASAN_ATOMIC_FUNC_STORE(32, uint32_t); ASAN_ATOMIC_FUNC_STORE(64, uint64_t); ASAN_ATOMIC_FUNC_STORE(char, u_char); ASAN_ATOMIC_FUNC_STORE(short, u_short); ASAN_ATOMIC_FUNC_STORE(int, u_int); ASAN_ATOMIC_FUNC_STORE(long, u_long); ASAN_ATOMIC_FUNC_STORE(ptr, uintptr_t); ASAN_ATOMIC_FUNC_THREAD_FENCE(acq); ASAN_ATOMIC_FUNC_THREAD_FENCE(rel); ASAN_ATOMIC_FUNC_THREAD_FENCE(acq_rel); ASAN_ATOMIC_FUNC_THREAD_FENCE(seq_cst); void kasan_atomic_interrupt_fence(void) { } /* -------------------------------------------------------------------------- */ #include #include #include int kasan_bus_space_map(bus_space_tag_t tag, bus_addr_t hnd, bus_size_t size, int flags, bus_space_handle_t *handlep) { return (bus_space_map(tag, hnd, size, flags, handlep)); } void kasan_bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_unmap(tag, hnd, size); } int kasan_bus_space_subregion(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, bus_space_handle_t *handlep) { return (bus_space_subregion(tag, hnd, offset, size, handlep)); } void kasan_bus_space_free(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_free(tag, hnd, size); } void kasan_bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, int flags) { bus_space_barrier(tag, hnd, offset, size, flags); } #define ASAN_BUS_READ_FUNC(func, width, type) \ type kasan_bus_space_read##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset) \ { \ return (bus_space_read##func##_##width(tag, hnd, \ offset)); \ } \ #define ASAN_BUS_READ_PTR_FUNC(func, width, type) \ void kasan_bus_space_read_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, type *buf, \ bus_size_t count) \ { \ kasan_shadow_check((uintptr_t)buf, sizeof(type) * count,\ false, __RET_ADDR); \ bus_space_read_##func##_##width(tag, hnd, size, buf, \ count); \ } ASAN_BUS_READ_FUNC(, 1, uint8_t) ASAN_BUS_READ_FUNC(_stream, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(multi, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(region, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 1, uint8_t) ASAN_BUS_READ_FUNC(, 2, uint16_t) ASAN_BUS_READ_FUNC(_stream, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(multi, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(region, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 2, uint16_t) ASAN_BUS_READ_FUNC(, 4, uint32_t) ASAN_BUS_READ_FUNC(_stream, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(multi, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(region, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 4, uint32_t) ASAN_BUS_READ_FUNC(, 8, uint64_t) #if defined(__aarch64__) ASAN_BUS_READ_FUNC(_stream, 8, uint64_t) ASAN_BUS_READ_PTR_FUNC(multi, 8, uint64_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 8, uint64_t) ASAN_BUS_READ_PTR_FUNC(region, 8, uint64_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 8, uint64_t) #endif #define ASAN_BUS_WRITE_FUNC(func, width, type) \ void kasan_bus_space_write##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ bus_space_write##func##_##width(tag, hnd, offset, value);\ } \ #define ASAN_BUS_WRITE_PTR_FUNC(func, width, type) \ void kasan_bus_space_write_##func##_##width(bus_space_tag_t tag,\ bus_space_handle_t hnd, bus_size_t size, const type *buf, \ bus_size_t count) \ { \ kasan_shadow_check((uintptr_t)buf, sizeof(type) * count,\ true, __RET_ADDR); \ bus_space_write_##func##_##width(tag, hnd, size, buf, \ count); \ } ASAN_BUS_WRITE_FUNC(, 1, uint8_t) ASAN_BUS_WRITE_FUNC(_stream, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(multi, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(multi_stream, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(region, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(region_stream, 1, uint8_t) ASAN_BUS_WRITE_FUNC(, 2, uint16_t) ASAN_BUS_WRITE_FUNC(_stream, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(multi, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(multi_stream, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(region, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(region_stream, 2, uint16_t) ASAN_BUS_WRITE_FUNC(, 4, uint32_t) ASAN_BUS_WRITE_FUNC(_stream, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(multi, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(multi_stream, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(region, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(region_stream, 4, uint32_t) ASAN_BUS_WRITE_FUNC(, 8, uint64_t) #define ASAN_BUS_SET_FUNC(func, width, type) \ void kasan_bus_space_set_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value, \ bus_size_t count) \ { \ bus_space_set_##func##_##width(tag, hnd, offset, value, \ count); \ } ASAN_BUS_SET_FUNC(multi, 1, uint8_t) ASAN_BUS_SET_FUNC(region, 1, uint8_t) ASAN_BUS_SET_FUNC(multi_stream, 1, uint8_t) ASAN_BUS_SET_FUNC(region_stream, 1, uint8_t) ASAN_BUS_SET_FUNC(multi, 2, uint16_t) ASAN_BUS_SET_FUNC(region, 2, uint16_t) ASAN_BUS_SET_FUNC(multi_stream, 2, uint16_t) ASAN_BUS_SET_FUNC(region_stream, 2, uint16_t) ASAN_BUS_SET_FUNC(multi, 4, uint32_t) ASAN_BUS_SET_FUNC(region, 4, uint32_t) ASAN_BUS_SET_FUNC(multi_stream, 4, uint32_t) ASAN_BUS_SET_FUNC(region_stream, 4, uint32_t) #define ASAN_BUS_PEEK_FUNC(width, type) \ int kasan_bus_space_peek_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type *valuep) \ { \ return (bus_space_peek_##width(tag, hnd, offset, \ valuep)); \ } ASAN_BUS_PEEK_FUNC(1, uint8_t) ASAN_BUS_PEEK_FUNC(2, uint16_t) ASAN_BUS_PEEK_FUNC(4, uint32_t) ASAN_BUS_PEEK_FUNC(8, uint64_t) #define ASAN_BUS_POKE_FUNC(width, type) \ int kasan_bus_space_poke_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ return (bus_space_poke_##width(tag, hnd, offset, \ value)); \ } ASAN_BUS_POKE_FUNC(1, uint8_t) ASAN_BUS_POKE_FUNC(2, uint16_t) ASAN_BUS_POKE_FUNC(4, uint32_t) ASAN_BUS_POKE_FUNC(8, uint64_t) /* -------------------------------------------------------------------------- */ void __asan_register_globals(struct __asan_global *, size_t); void __asan_unregister_globals(struct __asan_global *, size_t); void __asan_register_globals(struct __asan_global *globals, size_t n) { size_t i; for (i = 0; i < n; i++) { kasan_mark(globals[i].beg, globals[i].size, globals[i].size_with_redzone, KASAN_GENERIC_REDZONE); } } void __asan_unregister_globals(struct __asan_global *globals, size_t n) { size_t i; for (i = 0; i < n; i++) { kasan_mark(globals[i].beg, globals[i].size_with_redzone, globals[i].size_with_redzone, 0); } } #define ASAN_LOAD_STORE(size) \ void __asan_load##size(unsigned long); \ void __asan_load##size(unsigned long addr) \ { \ kasan_shadow_check(addr, size, false, __RET_ADDR);\ } \ void __asan_load##size##_noabort(unsigned long); \ void __asan_load##size##_noabort(unsigned long addr) \ { \ kasan_shadow_check(addr, size, false, __RET_ADDR);\ } \ void __asan_store##size(unsigned long); \ void __asan_store##size(unsigned long addr) \ { \ kasan_shadow_check(addr, size, true, __RET_ADDR);\ } \ void __asan_store##size##_noabort(unsigned long); \ void __asan_store##size##_noabort(unsigned long addr) \ { \ kasan_shadow_check(addr, size, true, __RET_ADDR);\ } ASAN_LOAD_STORE(1); ASAN_LOAD_STORE(2); ASAN_LOAD_STORE(4); ASAN_LOAD_STORE(8); ASAN_LOAD_STORE(16); void __asan_loadN(unsigned long, size_t); void __asan_loadN_noabort(unsigned long, size_t); void __asan_storeN(unsigned long, size_t); void __asan_storeN_noabort(unsigned long, size_t); void __asan_handle_no_return(void); void __asan_loadN(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, false, __RET_ADDR); } void __asan_loadN_noabort(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, false, __RET_ADDR); } void __asan_storeN(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, true, __RET_ADDR); } void __asan_storeN_noabort(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, true, __RET_ADDR); } void __asan_handle_no_return(void) { /* nothing */ } #define ASAN_SET_SHADOW(byte) \ void __asan_set_shadow_##byte(void *, size_t); \ void __asan_set_shadow_##byte(void *addr, size_t size) \ { \ __builtin_memset((void *)addr, 0x##byte, size); \ } ASAN_SET_SHADOW(00); ASAN_SET_SHADOW(f1); ASAN_SET_SHADOW(f2); ASAN_SET_SHADOW(f3); ASAN_SET_SHADOW(f5); ASAN_SET_SHADOW(f8); void __asan_poison_stack_memory(const void *, size_t); void __asan_unpoison_stack_memory(const void *, size_t); void __asan_poison_stack_memory(const void *addr, size_t size) { size = roundup(size, KASAN_SHADOW_SCALE); kasan_shadow_Nbyte_fill(addr, size, KASAN_USE_AFTER_SCOPE); } void __asan_unpoison_stack_memory(const void *addr, size_t size) { kasan_shadow_Nbyte_markvalid(addr, size); } void __asan_alloca_poison(const void *, size_t); void __asan_allocas_unpoison(const void *, const void *); void __asan_alloca_poison(const void *addr, size_t size) { const void *l, *r; KASSERT((vm_offset_t)addr % KASAN_ALLOCA_SCALE_SIZE == 0, ("%s: invalid address %p", __func__, addr)); l = (const uint8_t *)addr - KASAN_ALLOCA_SCALE_SIZE; r = (const uint8_t *)addr + roundup(size, KASAN_ALLOCA_SCALE_SIZE); kasan_shadow_Nbyte_fill(l, KASAN_ALLOCA_SCALE_SIZE, KASAN_STACK_LEFT); kasan_mark(addr, size, roundup(size, KASAN_ALLOCA_SCALE_SIZE), KASAN_STACK_MID); kasan_shadow_Nbyte_fill(r, KASAN_ALLOCA_SCALE_SIZE, KASAN_STACK_RIGHT); } void __asan_allocas_unpoison(const void *stkbegin, const void *stkend) { size_t size; if (__predict_false(!stkbegin)) return; if (__predict_false((uintptr_t)stkbegin > (uintptr_t)stkend)) return; size = (uintptr_t)stkend - (uintptr_t)stkbegin; kasan_shadow_Nbyte_fill(stkbegin, size, 0); } void __asan_poison_memory_region(const void *addr, size_t size); void __asan_unpoison_memory_region(const void *addr, size_t size); void __asan_poison_memory_region(const void *addr, size_t size) { } void __asan_unpoison_memory_region(const void *addr, size_t size) { } diff --git a/sys/sys/asan.h b/sys/sys/asan.h index a3becdef5f57..6a01d0531725 100644 --- a/sys/sys/asan.h +++ b/sys/sys/asan.h @@ -1,66 +1,70 @@ /* $NetBSD: asan.h,v 1.15 2020/09/10 14:10:46 maxv Exp $ */ /* * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net * All rights reserved. * * This code is part of the KASAN subsystem of the NetBSD kernel. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_ASAN_H_ #define _SYS_ASAN_H_ #ifdef KASAN #include /* ASAN constants. Part of the compiler ABI. */ #define KASAN_SHADOW_SCALE 8 #define KASAN_SHADOW_SCALE_SHIFT 3 /* Stack redzone values. Part of the compiler ABI. */ #define KASAN_STACK_LEFT 0xF1 #define KASAN_STACK_MID 0xF2 #define KASAN_STACK_RIGHT 0xF3 #define KASAN_USE_AFTER_RET 0xF5 #define KASAN_USE_AFTER_SCOPE 0xF8 /* Our redzone values. */ #define KASAN_GENERIC_REDZONE 0xFA #define KASAN_MALLOC_REDZONE 0xFB #define KASAN_KMEM_REDZONE 0xFC #define KASAN_UMA_FREED 0xFD #define KASAN_KSTACK_FREED 0xFE #define KASAN_EXEC_ARGS_FREED 0xFF +struct thread; + void kasan_init(void); void kasan_init_early(vm_offset_t, size_t); void kasan_shadow_map(vm_offset_t, size_t); void kasan_mark(const void *, size_t, size_t, uint8_t); +void kasan_thread_alloc(struct thread *); #else /* KASAN */ #define kasan_init() #define kasan_shadow_map(a, s) #define kasan_mark(p, s, l, c) +#define kasan_thread_alloc(t) #endif /* !KASAN */ #endif /* !_SYS_ASAN_H_ */ diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 33b836f4150e..1b542d1374b4 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1,1367 +1,1367 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #ifdef _KERNEL #include #endif #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #include #endif #include #include #include #include #include /* Machine-dependent proc substruct. */ #ifdef _KERNEL #include #endif /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ struct mtx pg_mtx; /* Mutex to protect members */ int pg_flags; /* (m) PGRP_ flags */ struct sx pg_killsx; /* Mutual exclusion between group member * fork() and killpg() */ }; #define PGRP_ORPHANED 0x00000001 /* Group is orphaned */ /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * kx- only accessed by curthread and by debugger * l - the attaching proc or attaching proc parent * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9) * t - thread lock * u - process stat lock * w - process timer lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct filecaps; struct filemon; struct kaioinfo; struct kaudit_record; struct kcov_info; struct kdtrace_proc; struct kdtrace_thread; struct kmsan_td; struct kq_timer_cb_data; struct mqueue_notifier; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct socket; struct td_sched; struct thread; struct trapframe; struct turnstile; struct vm_map; struct vm_map_entry; struct epoch_tracker; struct syscall_args { u_int code; u_int original_code; struct sysent *callp; register_t args[8]; }; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cu) Real time. */ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ union { TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ struct thread *td_zombie; /* Zombie list linkage */ }; TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct domainset_ref td_domain; /* (a) NUMA policy */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ u_char td_allocdomain; /* (b) NUMA domain backing this struct thread. */ u_char td_base_ithread_pri; /* (t) Base ithread pri */ struct kmsan_td *td_kmsan; /* (k) KMSAN state */ /* Cleared during fork1(), thread_create(), or kthread_add(). */ #define td_startzero td_flags int td_flags; /* (t) TDF_* flags. */ int td_ast; /* (t) TDA_* indicators */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_pflags2; /* (k) Private thread (TDP2_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ const void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ u_char _td_pad0[2]; /* Available. */ int td_locks; /* (k) Debug: count of non-spin locks */ int td_rw_rlocks; /* (k) Count of rwlock read locks. */ int td_sx_slocks; /* (k) Count of sx shared locks. */ int td_lk_slocks; /* (k) Count of lockmgr shared locks. */ struct lock_object *td_wantedlock; /* (k) Lock we are contending on */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_realucred; /* (k) Reference to credentials. */ struct ucred *td_ucred; /* (k) Used credentials, temporarily switchable. */ struct plimit *td_limit; /* (k) Resource limits. */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ siginfo_t td_si; /* (c) For debugger or core file */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ u_int td_no_sleeping; /* (k) Sleeping disabled count. */ struct vnode *td_vp_reserved;/* (k) Preallocated vnode. */ void *td_su; /* (k) FFS SU private */ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ int td_rtcgen; /* (s) rtc_generation of abs. sleep */ int td_errno; /* (k) Error from last syscall. */ size_t td_vslock_sz; /* (k) amount of vslock-ed space */ struct kcov_info *td_kcov_info; /* (*) Kernel code coverage data */ long td_ucredref; /* (k) references on td_realucred */ #define td_endzero td_sigmask /* Copied during fork1(), thread_create(), or kthread_add(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ uintptr_t td_rb_list; /* (k) Robust list head. */ uintptr_t td_rbp_list; /* (k) Robust priv list head. */ uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on fork for child tracing. */ void *td_sigblock_ptr; /* (k) uptr for fast sigblock. */ uint32_t td_sigblock_val; /* (k) fast sigblock value read at td_sigblock_ptr on kern entry */ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1(), thread_create(), kthread_add(), * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum td_states { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ /* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */ union { syscallarg_t tdu_retval[2]; off_t tdu_off; } td_uretoff; /* (k) Syscall aux returns. */ #define td_retval td_uretoff.tdu_retval u_int td_cowgen; /* (k) Generation of COW pointers. */ /* LP64 hole */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ u_short td_kstack_pages; /* (a) Size of the kstack. */ u_short td_kstack_domain; /* (a) Domain backing kstack KVA. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ /* LP64 hole */ void *td_emuldata; /* Emulator state data */ int td_lastcpu; /* (t) Last cpu we were on. */ int td_oncpu; /* (t) Which cpu we are on. */ void *td_lkpi_task; /* LinuxKPI task struct pointer */ int td_pmcpend; void *td_remotereq; /* (c) dbg remote request. */ off_t td_ktr_io_lim; /* (k) limit for ktrace file size */ #ifdef EPOCH_TRACE SLIST_HEAD(, epoch_tracker) td_epochs; #endif }; struct thread0_storage { struct thread t0st_thread; uint64_t t0st_sched[10]; }; struct mtx *thread_lock_block(struct thread *); void thread_lock_block_wait(struct thread *); void thread_lock_set(struct thread *, struct mtx *); void thread_lock_unblock(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ mtx_assert((td)->td_lock, (type)) #define THREAD_LOCK_BLOCKED_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m; \ __m = (td)->td_lock; \ KASSERT(__m == (lock), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) \ do { \ struct mtx *__m; \ __m = (td)->td_lock; \ KASSERT(__m == (lock) || __m == &blocked_lock, \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) do { \ KASSERT(SCHEDULER_STOPPED() || (td)->td_locks > 0, \ ("Thread %p owns no locks", (td))); \ (td)->td_locks--; \ } while (0) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) #define TD_LOCKS_INC(td) #define TD_LOCKS_DEC(td) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ #define TDF_SIGWAIT 0x00000080 /* Ignore ignored signals */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_UNUSED1 0x00000800 /* Available */ #define TDF_UNUSED2 0x00001000 /* Available */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_UNUSED3 0x00008000 /* Available */ #define TDF_UNUSED4 0x00010000 /* Available */ #define TDF_UNUSED5 0x00020000 /* Available */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ #define TDF_UNUSED6 0x00800000 /* Available */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_UNUSED7 0x10000000 /* Available */ #define TDF_UNUSED8 0x20000000 /* Available */ #define TDF_UNUSED9 0x40000000 /* Available */ #define TDF_UNUSED10 0x80000000 /* Available */ enum { TDA_AST = 0, /* Special: call all non-flagged AST handlers */ TDA_OWEUPC, TDA_HWPMC, TDA_VFORK, TDA_ALRM, TDA_PROF, TDA_MAC, TDA_SCHED, TDA_UFS, TDA_GEOM, TDA_KQUEUE, TDA_RACCT, TDA_MOD1, /* For third party use, before signals are */ TAD_MOD2, /* processed .. */ TDA_SIG, TDA_KTRACE, TDA_SUSPEND, TDA_SIGSUSPEND, TDA_MOD3, /* .. and after */ TAD_MOD4, TDA_MAX, }; #define TDAI(tda) (1U << (tda)) #define td_ast_pending(td, tda) ((td->td_ast & TDAI(tda)) != 0) /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ #define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */ #define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */ #define TDB_STEP 0x00002000 /* (x86) PSL_T set for PT_STEP */ #define TDB_SSWITCH 0x00004000 /* Suspended in ptracestop */ #define TDB_BOUNDARY 0x00008000 /* ptracestop() at boundary */ #define TDB_COREDUMPREQ 0x00010000 /* Coredump request */ #define TDB_SCREMOTEREQ 0x00020000 /* Remote syscall request */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_SIGFASTBLOCK 0x00000100 /* Fast sigblock active */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_INTCPCALLOUT 0x20000000 /* used by netinet/tcp_timer.c */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ #define TDP_SIGFASTPENDING 0x80000000 /* Pending signal due to sigfastblock */ #define TDP2_SBPAGES 0x00000001 /* Owns sbusy on some pages */ #define TDP2_COMPAT32RB 0x00000002 /* compat32 ABI for robust lists */ #define TDP2_ACCT 0x00000004 /* Doing accounting */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #ifdef _KERNEL #define TD_GET_STATE(td) atomic_load_int(&(td)->td_state) #else #define TD_GET_STATE(td) ((td)->td_state) #endif #define TD_IS_RUNNING(td) (TD_GET_STATE(td) == TDS_RUNNING) #define TD_ON_RUNQ(td) (TD_GET_STATE(td) == TDS_RUNQ) #define TD_CAN_RUN(td) (TD_GET_STATE(td) == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) (TD_GET_STATE(td) == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define TD_CAN_ABORT(td) (TD_ON_SLEEPQ((td)) && \ ((td)->td_flags & TDF_SINTR) != 0) #define KTDSTATE(td) \ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") #define TD_SET_INHIB(td, inhib) do { \ TD_SET_STATE(td, TDS_INHIBITED); \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ TD_SET_STATE(td, TDS_CAN_RUN); \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #ifdef _KERNEL #define TD_SET_STATE(td, state) atomic_store_int(&(td)->td_state, state) #else #define TD_SET_STATE(td, state) (td)->td_state = state #endif #define TD_SET_RUNNING(td) TD_SET_STATE(td, TDS_RUNNING) #define TD_SET_RUNQ(td) TD_SET_STATE(td, TDS_RUNQ) #define TD_SET_CAN_RUN(td) TD_SET_STATE(td, TDS_CAN_RUN) #define TD_SBDRY_INTR(td) \ (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) #define TD_SBDRY_ERRNO(td) \ (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pwddesc *p_pd; /* (b) Cwd, chroot, jail, umask */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Resource limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum p_states { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct mtx p_statmtx; /* Lock for the stats */ struct mtx p_itimmtx; /* Lock for the virt/prof timers */ struct mtx p_profmtx; /* Lock for the profiling */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals pid_t p_oppid; /* (c + e) Real parent pid. */ /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_vmspace struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ u_int p_cowgen; /* (c) Generation of COW pointers. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cu) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct ktr_io_params *p_ktrioparms; /* (c + o) Params for ktrace. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ struct vnode *p_textdvp; /* (b) Dir containing textvp. */ char *p_binname; /* (b) Binary hardlink name. */ u_int p_lock; /* (c) Proclock (prevent swap) count. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_int p_ptevents; /* (c + e) ptrace() event mask. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ u_int p_treeflag; /* (e) P_TREE flags */ int p_pendingexits; /* (c) Count of pending thread exits. */ struct filemon *p_filemon; /* (c) filemon-specific data. */ int p_pdeathsig; /* (c) Signal from parent on exit. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ uint32_t p_fctl0; /* (x) ABI feature control, ELF note */ char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ uint64_t p_elf_flags; /* (x) ELF flags */ void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for non ELF binaries. */ sbintime_t p_umtx_min_timeout; /* End area that is copied on creation. */ #define p_endcopy p_xexit u_int p_xexit; /* (c) Exit code. */ u_int p_xsig; /* (c) Stop/kill sig. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct knlist *p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ int p_throttled; /* (c) Flag for racct pcpu throttling */ /* * An orphan is the child that has been re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ TAILQ_HEAD(, kq_timer_cb_data) p_kqtim_stop; /* (c) */ LIST_ENTRY(proc) p_jaillist; /* (d) Jail process linkage. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU (-1) /* For when we aren't on a CPU. */ #define NOCPU_OLD (255) #define MAXCPU_OLD (254) #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00000001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00000002 /* Has a controlling terminal. */ #define P_KPROC 0x00000004 /* Kernel process. */ #define P_UNUSED3 0x00000008 /* --available-- */ #define P_PPWAIT 0x00000010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00000020 /* Has started profiling. */ #define P_STOPPROF 0x00000040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00000080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00000100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00000200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00000400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00000800 /* Debugged process being traced. */ #define P_WAITED 0x00001000 /* Someone is waiting for us. */ #define P_WEXIT 0x00002000 /* Working on exiting. */ #define P_EXEC 0x00004000 /* Process called exec. */ #define P_WKILLED 0x00008000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x00010000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x00020000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x00040000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x00080000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x00100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x00200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x00400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x00800000 /* Process is using HWPMCs */ #define P_JAILED 0x01000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x02000000 /* Stopped in stop_all_proc. */ #define P_INEXEC 0x04000000 /* Process is in execve(). */ #define P_STATCHILD 0x08000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory. */ #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ #define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not yet handled. */ #define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */ #define P2_ASLR_ENABLE 0x00000040 /* Force enable ASLR. */ #define P2_ASLR_DISABLE 0x00000080 /* Force disable ASLR. */ #define P2_ASLR_IGNSTART 0x00000100 /* Enable ASLR to consume sbrk area. */ #define P2_PROTMAX_ENABLE 0x00000200 /* Force enable implied PROT_MAX. */ #define P2_PROTMAX_DISABLE 0x00000400 /* Force disable implied PROT_MAX. */ #define P2_STKGAP_DISABLE 0x00000800 /* Disable stack gap for MAP_STACK */ #define P2_STKGAP_DISABLE_EXEC 0x00001000 /* Stack gap disabled after exec */ #define P2_ITSTOPPED 0x00002000 #define P2_PTRACEREQ 0x00004000 /* Active ptrace req */ #define P2_NO_NEW_PRIVS 0x00008000 /* Ignore setuid */ #define P2_WXORX_DISABLE 0x00010000 /* WX mappings enabled */ #define P2_WXORX_ENABLE_EXEC 0x00020000 /* WXORX enabled after exec */ #define P2_WEXIT 0x00040000 /* exit just started, no external thread_single() is permitted */ #define P2_REAPKILLED 0x00080000 #define P2_MEMBAR_PRIVE 0x00100000 /* membar private expedited registered */ #define P2_MEMBAR_PRIVE_SYNCORE 0x00200000 /* membar private expedited sync core registered */ #define P2_MEMBAR_GLOBE 0x00400000 /* membar global expedited registered */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ #define P_TREE_GRPEXITED 0x00000008 /* exit1() done with job ctl */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(9). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_OWEPREEMPT 1 /* Switching due to owepreempt. */ #define SWT_TURNSTILE 2 /* Turnstile contention. */ #define SWT_SLEEPQ 3 /* Sleepq wait. */ #define SWT_RELINQUISH 4 /* yield call. */ #define SWT_NEEDRESCHED 5 /* NEEDRESCHED was set. */ #define SWT_IDLE 6 /* Switching from the idle thread. */ #define SWT_IWAIT 7 /* Waiting for interrupts. */ #define SWT_SUSPEND 8 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 9 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 10 /* Remote processor preempted idle. */ #define SWT_BIND 11 /* Thread bound to a new CPU. */ #define SWT_COUNT 12 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID (PID_MAX + 1) #define THREAD0_TID NO_PID extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_WAIT_UNLOCKED(p) mtx_wait_unlocked(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* * Non-zero p_lock ensures that: * - exit1() is not performed until p_lock reaches zero; * - the process' threads stack are not swapped out if they are currently * not (P_INMEM). * * PHOLD() asserts that the process (except the current process) is * not exiting, increments p_lock and swaps threads stacks into memory, * if needed. * _PHOLD() is same as PHOLD(), it takes the process locked. * _PHOLD_LITE() also takes the process locked, but comparing with * _PHOLD(), it only guarantees that exit1() is not executed, * faultin() is not called. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ if (((p)->p_flag & P_INMEM) == 0) \ faultin((p)); \ } while (0) #define _PHOLD_LITE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process %p held", p)); \ } while (0) #define PROC_UPDATE_COW(p) do { \ struct proc *_p = (p); \ PROC_LOCK_ASSERT((_p), MA_OWNED); \ atomic_store_int(&_p->p_cowgen, _p->p_cowgen + 1); \ } while (0) #define PROC_COW_CHANGECOUNT(td, p) ({ \ struct thread *_td = (td); \ struct proc *_p = (p); \ MPASS(_td == curthread); \ PROC_LOCK_ASSERT(_p, MA_OWNED); \ _p->p_cowgen - _td->td_cowgen; \ }) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() do { \ curthread->td_no_sleeping++; \ MPASS(curthread->td_no_sleeping > 0); \ } while (0) #define THREAD_SLEEPING_OK() do { \ MPASS(curthread->td_no_sleeping > 0); \ curthread->td_no_sleeping--; \ } while (0) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define THREAD_CONTENDS_ON_LOCK(lo) do { \ MPASS(curthread->td_wantedlock == NULL); \ curthread->td_wantedlock = lo; \ } while (0) #define THREAD_CONTENTION_DONE(lo) do { \ MPASS(curthread->td_wantedlock == lo); \ curthread->td_wantedlock = NULL; \ } while (0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) #define PIDHASHLOCK(pid) (&pidhashtbl_lock[((pid) & pidhashlock)]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern struct sx *pidhashtbl_lock; extern u_long pidhash; extern u_long pidhashlock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct mtx procid_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ #define thread0 (thread0_st.t0st_thread) extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; extern struct uma_zone *pgrp_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */ struct proc *pfind_any_locked(pid_t pid); /* Find process by id, locked. */ struct pgrp *pgfind(pid_t); /* Find process group by id. */ void pidhash_slockall(void); /* Shared lock all pid hash lists. */ void pidhash_sunlockall(void); /* Shared unlock all pid hash lists. */ struct fork_req { int fr_flags; int fr_pages; int *fr_pidp; struct proc **fr_procp; int *fr_pd_fd; int fr_pd_flags; struct filecaps *fr_pd_fcaps; int fr_flags2; #define FR2_DROPSIG_CAUGHT 0x00000001 /* Drop caught non-DFL signals */ #define FR2_SHARE_PATHS 0x00000002 /* Invert sense of RFFDG for paths */ #define FR2_KPROC 0x00000004 /* Create a kernel process */ }; /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); /* ast_register() flags */ #define ASTR_ASTF_REQUIRED 0x0001 /* td_ast TDAI(TDA_X) flag set is required for call */ #define ASTR_TDP 0x0002 /* td_pflags flag set is required */ #define ASTR_KCLEAR 0x0004 /* call me on ast_kclear() */ #define ASTR_UNCOND 0x0008 /* call me always */ void ast(struct trapframe *framep); void ast_kclear(struct thread *td); void ast_register(int ast, int ast_flags, int tdp, void (*f)(struct thread *td, int asts)); void ast_deregister(int tda); void ast_sched_locked(struct thread *td, int tda); void ast_sched_mask(struct thread *td, int ast); void ast_sched(struct thread *td, int tda); void ast_unsched_locked(struct thread *td, int tda); struct thread *choosethread(void); int cr_bsd_visible(struct ucred *u1, struct ucred *u2); int cr_cansee(struct ucred *u1, struct ucred *u2); int cr_canseesocket(struct ucred *cred, struct socket *so); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); int fork1(struct thread *, struct fork_req *); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void itimer_proc_continue(struct proc *p); void kqtimer_proc_continue(struct proc *p); void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry, int *resident_count, bool *super); void kern_yield(int); void kick_proc0(void); void killjobc(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); void proc_add_orphan(struct proc *child, struct proc *parent); int proc_get_binpath(struct proc *p, char *binname, char **fullpath, char **freepath); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); int proc_iterate(int (*cb)(struct proc *, void *), void *cbarg); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent, bool set_oppid); void proc_set_p2_wexit(struct proc *p); void proc_set_traced(struct proc *p, bool stop); void proc_wkilled(struct proc *p); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void proc_clear_orphan(struct proc *p); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); int setrunnable(struct thread *, int); void setsugid(struct proc *p); bool should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_sync_core(void); void cpu_throw(struct thread *, struct thread *) __dead2; bool curproc_sigkilled(void); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; void cpu_copy_thread(struct thread *td, struct thread *td0); bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map); int cpu_fetch_syscall_args(struct thread *td); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); int cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data); void cpu_set_syscall_retval(struct thread *, int); int cpu_set_upcall(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); struct thread *thread_alloc(int pages); -int thread_alloc_stack(struct thread *, int pages); int thread_check_susp(struct thread *td, bool sleep); void thread_cow_get_proc(struct thread *newtd, struct proc *p); void thread_cow_get(struct thread *newtd, struct thread *td); void thread_cow_free(struct thread *td); void thread_cow_update(struct thread *td); void thread_cow_synced(struct thread *td); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap_barrier(void); +int thread_recycle(struct thread *, int pages); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); void thread_run_flash(struct thread *td); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); bool stop_all_proc_block(void); void stop_all_proc_unblock(void); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } static __inline int curthread_pflags2_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags2 & flags); td->td_pflags2 |= flags; return (save); } static __inline void curthread_pflags2_restore(int save) { curthread->td_pflags2 &= save; } static __inline __pure2 struct td_sched * td_get_sched(struct thread *td) { return ((struct td_sched *)&td[1]); } #define PROC_ID_PID 0 #define PROC_ID_GROUP 1 #define PROC_ID_SESSION 2 #define PROC_ID_REAP 3 void proc_id_set(int type, pid_t id); void proc_id_set_cond(int type, pid_t id); void proc_id_clear(int type, pid_t id); EVENTHANDLER_LIST_DECLARE(process_ctor); EVENTHANDLER_LIST_DECLARE(process_dtor); EVENTHANDLER_LIST_DECLARE(process_init); EVENTHANDLER_LIST_DECLARE(process_fini); EVENTHANDLER_LIST_DECLARE(process_exit); EVENTHANDLER_LIST_DECLARE(process_fork); EVENTHANDLER_LIST_DECLARE(process_exec); EVENTHANDLER_LIST_DECLARE(thread_ctor); EVENTHANDLER_LIST_DECLARE(thread_dtor); EVENTHANDLER_LIST_DECLARE(thread_init); #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 4292a7533503..f9235fe03fab 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -1,836 +1,835 @@ /*- * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #include #include "opt_vm.h" #include "opt_kstack_pages.h" #include "opt_kstack_max_pages.h" #include "opt_kstack_usage_prof.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if VM_NRESERVLEVEL > 0 #define KVA_KSTACK_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) #else #define KVA_KSTACK_QUANTUM_SHIFT (8 + PAGE_SHIFT) #endif #define KVA_KSTACK_QUANTUM (1ul << KVA_KSTACK_QUANTUM_SHIFT) /* * MPSAFE * * WARNING! This code calls vm_map_check_protection() which only checks * the associated vm_map_entry range. It does not determine whether the * contents of the memory is actually readable or writable. In most cases * just checking the vm_map_entry is sufficient within the kernel's address * space. */ int kernacc(void *addr, int len, int rw) { boolean_t rv; vm_offset_t saddr, eaddr; vm_prot_t prot; KASSERT((rw & ~VM_PROT_ALL) == 0, ("illegal ``rw'' argument to kernacc (%x)\n", rw)); if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || (vm_offset_t)addr + len < (vm_offset_t)addr) return (FALSE); prot = rw; saddr = trunc_page((vm_offset_t)addr); eaddr = round_page((vm_offset_t)addr + len); vm_map_lock_read(kernel_map); rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); vm_map_unlock_read(kernel_map); return (rv == TRUE); } /* * MPSAFE * * WARNING! This code calls vm_map_check_protection() which only checks * the associated vm_map_entry range. It does not determine whether the * contents of the memory is actually readable or writable. vmapbuf(), * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be * used in conjunction with this call. */ int useracc(void *addr, int len, int rw) { boolean_t rv; vm_prot_t prot; vm_map_t map; KASSERT((rw & ~VM_PROT_ALL) == 0, ("illegal ``rw'' argument to useracc (%x)\n", rw)); prot = rw; map = &curproc->p_vmspace->vm_map; if ((vm_offset_t)addr + len > vm_map_max(map) || (vm_offset_t)addr + len < (vm_offset_t)addr) { return (FALSE); } vm_map_lock_read(map); rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot); vm_map_unlock_read(map); return (rv == TRUE); } int vslock(void *addr, size_t len) { vm_offset_t end, last, start; vm_size_t npages; int error; last = (vm_offset_t)addr + len; start = trunc_page((vm_offset_t)addr); end = round_page(last); if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) return (EINVAL); npages = atop(end - start); if (npages > vm_page_max_user_wired) return (ENOMEM); error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); if (error == KERN_SUCCESS) { curthread->td_vslock_sz += len; return (0); } /* * Return EFAULT on error to match copy{in,out}() behaviour * rather than returning ENOMEM like mlock() would. */ return (EFAULT); } void vsunlock(void *addr, size_t len) { /* Rely on the parameter sanity checks performed by vslock(). */ MPASS(curthread->td_vslock_sz >= len); curthread->td_vslock_sz -= len; (void)vm_map_unwire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); } /* * Pin the page contained within the given object at the given offset. If the * page is not resident, allocate and load it using the given object's pager. * Return the pinned page if successful; otherwise, return NULL. */ static vm_page_t vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) { vm_page_t m; vm_pindex_t pindex; pindex = OFF_TO_IDX(offset); (void)vm_page_grab_valid_unlocked(&m, object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); return (m); } /* * Return a CPU private mapping to the page at the given offset within the * given object. The page is pinned before it is mapped. */ struct sf_buf * vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) { vm_page_t m; m = vm_imgact_hold_page(object, offset); if (m == NULL) return (NULL); sched_pin(); return (sf_buf_alloc(m, SFB_CPUPRIVATE)); } /* * Destroy the given CPU private mapping and unpin the page that it mapped. */ void vm_imgact_unmap_page(struct sf_buf *sf) { vm_page_t m; m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); vm_page_unwire(m, PQ_ACTIVE); } void vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz) { pmap_sync_icache(map->pmap, va, sz); } static vm_object_t kstack_object; static vm_object_t kstack_alt_object; static uma_zone_t kstack_cache; static int kstack_cache_size; static vmem_t *vmd_kstack_arena[MAXMEMDOM]; static int sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS) { int error, oldsize; oldsize = kstack_cache_size; error = sysctl_handle_int(oidp, arg1, arg2, req); if (error == 0 && req->newptr && oldsize != kstack_cache_size) uma_zone_set_maxcache(kstack_cache, kstack_cache_size); return (error); } SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size, CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0, sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks"); /* * Allocate a virtual address range from a domain kstack arena, following * the specified NUMA policy. */ static vm_offset_t vm_thread_alloc_kstack_kva(vm_size_t size, int domain) { #ifndef __ILP32__ int rv; vmem_t *arena; vm_offset_t addr = 0; size = round_page(size); /* Allocate from the kernel arena for non-standard kstack sizes. */ if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) { arena = vm_dom[domain].vmd_kernel_arena; } else { arena = vmd_kstack_arena[domain]; } rv = vmem_alloc(arena, size, M_BESTFIT | M_NOWAIT, &addr); if (rv == ENOMEM) return (0); KASSERT(atop(addr - VM_MIN_KERNEL_ADDRESS) % (kstack_pages + KSTACK_GUARD_PAGES) == 0, ("%s: allocated kstack KVA not aligned to multiple of kstack size", __func__)); return (addr); #else return (kva_alloc(size)); #endif } /* * Release a region of kernel virtual memory * allocated from the kstack arena. */ static __noinline void vm_thread_free_kstack_kva(vm_offset_t addr, vm_size_t size, int domain) { vmem_t *arena; size = round_page(size); #ifdef __ILP32__ arena = kernel_arena; #else arena = vmd_kstack_arena[domain]; if (size != ptoa(kstack_pages + KSTACK_GUARD_PAGES)) { arena = vm_dom[domain].vmd_kernel_arena; } #endif vmem_free(arena, addr, size); } static vmem_size_t vm_thread_kstack_import_quantum(void) { #ifndef __ILP32__ /* * The kstack_quantum is larger than KVA_QUANTUM to account * for holes induced by guard pages. */ return (KVA_KSTACK_QUANTUM * (kstack_pages + KSTACK_GUARD_PAGES)); #else return (KVA_KSTACK_QUANTUM); #endif } /* * Import KVA from a parent arena into the kstack arena. Imports must be * a multiple of kernel stack pages + guard pages in size. * * Kstack VA allocations need to be aligned so that the linear KVA pindex * is divisible by the total number of kstack VA pages. This is necessary to * make vm_kstack_pindex work properly. * * We import a multiple of KVA_KSTACK_QUANTUM-sized region from the parent * arena. The actual size used by the kstack arena is one kstack smaller to * allow for the necessary alignment adjustments to be made. */ static int vm_thread_kstack_arena_import(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp) { int error, rem; size_t kpages = kstack_pages + KSTACK_GUARD_PAGES; KASSERT(atop(size) % kpages == 0, ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__, (intmax_t)size, (int)kpages)); error = vmem_xalloc(arena, vm_thread_kstack_import_quantum(), KVA_KSTACK_QUANTUM, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, addrp); if (error) { return (error); } rem = atop(*addrp - VM_MIN_KERNEL_ADDRESS) % kpages; if (rem != 0) { /* Bump addr to next aligned address */ *addrp = *addrp + (kpages - rem) * PAGE_SIZE; } return (0); } /* * Release KVA from a parent arena into the kstack arena. Released imports must * be a multiple of kernel stack pages + guard pages in size. */ static void vm_thread_kstack_arena_release(void *arena, vmem_addr_t addr, vmem_size_t size) { int rem; size_t kpages __diagused = kstack_pages + KSTACK_GUARD_PAGES; KASSERT(size % kpages == 0, ("%s: Size %jd is not a multiple of kstack pages (%d)", __func__, (intmax_t)size, (int)kpages)); KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % kpages == 0, ("%s: Address %p is not properly aligned (%p)", __func__, (void *)addr, (void *)VM_MIN_KERNEL_ADDRESS)); /* * If the address is not KVA_KSTACK_QUANTUM-aligned we have to decrement * it to account for the shift in kva_import_kstack. */ rem = addr % KVA_KSTACK_QUANTUM; if (rem) { KASSERT(rem <= ptoa(kpages), ("%s: rem > kpages (%d), (%d)", __func__, rem, (int)kpages)); addr -= rem; } vmem_xfree(arena, addr, vm_thread_kstack_import_quantum()); } /* * Create the kernel stack for a new thread. */ static vm_offset_t vm_thread_stack_create(struct domainset *ds, int pages) { vm_page_t ma[KSTACK_MAX_PAGES]; struct vm_domainset_iter di; int req = VM_ALLOC_NORMAL; vm_object_t obj; vm_offset_t ks; int domain, i; obj = vm_thread_kstack_size_to_obj(pages); if (vm_ndomains > 1) obj->domain.dr_policy = ds; vm_domainset_iter_page_init(&di, obj, 0, &domain, &req); do { /* * Get a kernel virtual address for this thread's kstack. */ ks = vm_thread_alloc_kstack_kva(ptoa(pages + KSTACK_GUARD_PAGES), domain); if (ks == 0) continue; ks += ptoa(KSTACK_GUARD_PAGES); /* * Allocate physical pages to back the stack. */ if (vm_thread_stack_back(ks, ma, pages, req, domain) != 0) { vm_thread_free_kstack_kva(ks - ptoa(KSTACK_GUARD_PAGES), ptoa(pages + KSTACK_GUARD_PAGES), domain); continue; } if (KSTACK_GUARD_PAGES != 0) { pmap_qremove(ks - ptoa(KSTACK_GUARD_PAGES), KSTACK_GUARD_PAGES); } for (i = 0; i < pages; i++) vm_page_valid(ma[i]); pmap_qenter(ks, ma, pages); return (ks); } while (vm_domainset_iter_page(&di, obj, &domain) == 0); return (0); } static __noinline void vm_thread_stack_dispose(vm_offset_t ks, int pages) { vm_page_t m; vm_pindex_t pindex; int i, domain; vm_object_t obj = vm_thread_kstack_size_to_obj(pages); pindex = vm_kstack_pindex(ks, pages); domain = vm_phys_domain(vtophys(ks)); pmap_qremove(ks, pages); VM_OBJECT_WLOCK(obj); for (i = 0; i < pages; i++) { m = vm_page_lookup(obj, pindex + i); if (m == NULL) panic("%s: kstack already missing?", __func__); KASSERT(vm_page_domain(m) == domain, ("%s: page %p domain mismatch, expected %d got %d", __func__, m, domain, vm_page_domain(m))); vm_page_xbusy_claim(m); vm_page_unwire_noq(m); vm_page_free(m); } VM_OBJECT_WUNLOCK(obj); kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0); vm_thread_free_kstack_kva(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), ptoa(pages + KSTACK_GUARD_PAGES), domain); } /* * Allocate the kernel stack for a new thread. */ int vm_thread_new(struct thread *td, int pages) { vm_offset_t ks; u_short ks_domain; /* Bounds check */ if (pages <= 1) pages = kstack_pages; else if (pages > KSTACK_MAX_PAGES) pages = KSTACK_MAX_PAGES; ks = 0; if (pages == kstack_pages && kstack_cache != NULL) ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT); /* * Ensure that kstack objects can draw pages from any memory * domain. Otherwise a local memory shortage can block a process * swap-in. */ if (ks == 0) ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)), pages); if (ks == 0) return (0); ks_domain = vm_phys_domain(vtophys(ks)); KASSERT(ks_domain >= 0 && ks_domain < vm_ndomains, ("%s: invalid domain for kstack %p", __func__, (void *)ks)); td->td_kstack = ks; td->td_kstack_pages = pages; td->td_kstack_domain = ks_domain; - kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0); - kmsan_mark((void *)ks, ptoa(pages), KMSAN_STATE_UNINIT); return (1); } /* * Dispose of a thread's kernel stack. */ void vm_thread_dispose(struct thread *td) { vm_offset_t ks; int pages; pages = td->td_kstack_pages; ks = td->td_kstack; td->td_kstack = 0; td->td_kstack_pages = 0; td->td_kstack_domain = MAXMEMDOM; - kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED); - if (pages == kstack_pages) + if (pages == kstack_pages) { + kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED); uma_zfree(kstack_cache, (void *)ks); - else + } else { vm_thread_stack_dispose(ks, pages); + } } /* * Calculate kstack pindex. * * Uses a non-identity mapping if guard pages are * active to avoid pindex holes in the kstack object. */ vm_pindex_t vm_kstack_pindex(vm_offset_t ks, int kpages) { vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS); #ifdef __ILP32__ return (pindex); #else /* * Return the linear pindex if guard pages aren't active or if we are * allocating a non-standard kstack size. */ if (KSTACK_GUARD_PAGES == 0 || kpages != kstack_pages) { return (pindex); } KASSERT(pindex % (kpages + KSTACK_GUARD_PAGES) >= KSTACK_GUARD_PAGES, ("%s: Attempting to calculate kstack guard page pindex", __func__)); return (pindex - (pindex / (kpages + KSTACK_GUARD_PAGES) + 1) * KSTACK_GUARD_PAGES); #endif } /* * Allocate physical pages, following the specified NUMA policy, to back a * kernel stack. */ int vm_thread_stack_back(vm_offset_t ks, vm_page_t ma[], int npages, int req_class, int domain) { vm_object_t obj = vm_thread_kstack_size_to_obj(npages); vm_pindex_t pindex; vm_page_t m; int n; pindex = vm_kstack_pindex(ks, npages); VM_OBJECT_WLOCK(obj); for (n = 0; n < npages;) { m = vm_page_grab(obj, pindex + n, VM_ALLOC_NOCREAT | VM_ALLOC_WIRED); if (m == NULL) { m = vm_page_alloc_domain(obj, pindex + n, domain, req_class | VM_ALLOC_WIRED); } if (m == NULL) break; ma[n++] = m; } if (n < npages) goto cleanup; VM_OBJECT_WUNLOCK(obj); return (0); cleanup: vm_object_page_remove(obj, pindex, pindex + n, 0); VM_OBJECT_WUNLOCK(obj); return (ENOMEM); } vm_object_t vm_thread_kstack_size_to_obj(int npages) { return (npages == kstack_pages ? kstack_object : kstack_alt_object); } static int kstack_import(void *arg, void **store, int cnt, int domain, int flags) { struct domainset *ds; int i; if (domain == UMA_ANYDOMAIN) ds = DOMAINSET_RR(); else ds = DOMAINSET_PREF(domain); for (i = 0; i < cnt; i++) { store[i] = (void *)vm_thread_stack_create(ds, kstack_pages); if (store[i] == NULL) break; } return (i); } static void kstack_release(void *arg, void **store, int cnt) { vm_offset_t ks; int i; for (i = 0; i < cnt; i++) { ks = (vm_offset_t)store[i]; vm_thread_stack_dispose(ks, kstack_pages); } } static void kstack_cache_init(void *null) { vm_size_t kstack_quantum; int domain; kstack_object = vm_object_allocate(OBJT_SWAP, atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); kstack_cache = uma_zcache_create("kstack_cache", kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL, kstack_import, kstack_release, NULL, UMA_ZONE_FIRSTTOUCH); kstack_cache_size = imax(128, mp_ncpus * 4); uma_zone_set_maxcache(kstack_cache, kstack_cache_size); kstack_alt_object = vm_object_allocate(OBJT_SWAP, atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)); kstack_quantum = vm_thread_kstack_import_quantum(); /* * Reduce size used by the kstack arena to allow for * alignment adjustments in vm_thread_kstack_arena_import. */ kstack_quantum -= (kstack_pages + KSTACK_GUARD_PAGES) * PAGE_SIZE; /* * Create the kstack_arena for each domain and set kernel_arena as * parent. */ for (domain = 0; domain < vm_ndomains; domain++) { vmd_kstack_arena[domain] = vmem_create("kstack arena", 0, 0, PAGE_SIZE, 0, M_WAITOK); KASSERT(vmd_kstack_arena[domain] != NULL, ("%s: failed to create domain %d kstack_arena", __func__, domain)); vmem_set_import(vmd_kstack_arena[domain], vm_thread_kstack_arena_import, vm_thread_kstack_arena_release, vm_dom[domain].vmd_kernel_arena, kstack_quantum); } } SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL); #ifdef KSTACK_USAGE_PROF /* * Track maximum stack used by a thread in kernel. */ static int max_kstack_used; SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD, &max_kstack_used, 0, "Maximum stack depth used by a thread in kernel"); void intr_prof_stack_use(struct thread *td, struct trapframe *frame) { vm_offset_t stack_top; vm_offset_t current; int used, prev_used; /* * Testing for interrupted kernel mode isn't strictly * needed. It optimizes the execution, since interrupts from * usermode will have only the trap frame on the stack. */ if (TRAPF_USERMODE(frame)) return; stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE; current = (vm_offset_t)(uintptr_t)&stack_top; /* * Try to detect if interrupt is using kernel thread stack. * Hardware could use a dedicated stack for interrupt handling. */ if (stack_top <= current || current < td->td_kstack) return; used = stack_top - current; for (;;) { prev_used = max_kstack_used; if (prev_used >= used) break; if (atomic_cmpset_int(&max_kstack_used, prev_used, used)) break; } } #endif /* KSTACK_USAGE_PROF */ /* * Implement fork's actions on an address space. * Here we arrange for the address space to be copied or referenced, * allocate a user struct (pcb and kernel stack), then call the * machine-dependent layer to fill those in and make the new process * ready to run. The new process is set up so that it returns directly * to user mode to avoid stack copying and relocation problems. */ int vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2, struct vmspace *vm2, int flags) { struct proc *p1 = td->td_proc; struct domainset *dset; int error; if ((flags & RFPROC) == 0) { /* * Divorce the memory, if it is shared, essentially * this changes shared memory amongst threads, into * COW locally. */ if ((flags & RFMEM) == 0) { error = vmspace_unshare(p1); if (error) return (error); } cpu_fork(td, p2, td2, flags); return (0); } if (flags & RFMEM) { p2->p_vmspace = p1->p_vmspace; refcount_acquire(&p1->p_vmspace->vm_refcnt); } dset = td2->td_domain.dr_policy; while (vm_page_count_severe_set(&dset->ds_mask)) { vm_wait_doms(&dset->ds_mask, 0); } if ((flags & RFMEM) == 0) { p2->p_vmspace = vm2; if (p1->p_vmspace->vm_shm) shmfork(p1, p2); } /* * cpu_fork will copy and update the pcb, set up the kernel stack, * and make the child ready to run. */ cpu_fork(td, p2, td2, flags); return (0); } /* * Called after process has been wait(2)'ed upon and is being reaped. * The idea is to reclaim resources that we could not reclaim while * the process was still executing. */ void vm_waitproc(struct proc *p) { vmspace_exitfree(p); /* and clean-out the vmspace */ } void kick_proc0(void) { wakeup(&proc0); }