Index: head/sys/kern/init_main.c =================================================================== --- head/sys/kern/init_main.c (revision 113449) +++ head/sys/kern/init_main.c (revision 113450) @@ -1,719 +1,719 @@ /* * Copyright (c) 1995 Terrence R. Lambert * All rights reserved. * * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 * $FreeBSD$ */ #include "opt_init_path.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void mi_startup(void); /* Should be elsewhere */ /* Components of the first process -- never freed. */ static struct session session0; static struct pgrp pgrp0; struct proc proc0; struct thread thread0; struct kse kse0; struct ksegrp ksegrp0; static struct procsig procsig0; static struct filedesc0 filedesc0; static struct plimit limit0; -static struct vmspace vmspace0; +struct vmspace vmspace0; struct proc *initproc; int cmask = CMASK; struct vnode *rootvp; int boothowto = 0; /* initialized so that it can be patched */ SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0, ""); int bootverbose; SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, &bootverbose, 0, ""); /* * This ensures that there is at least one entry so that the sysinit_set * symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never * executed. */ SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL) /* * The sysinit table itself. Items are checked off as the are run. * If we want to register new sysinit types, add them to newsysinit. */ SET_DECLARE(sysinit_set, struct sysinit); struct sysinit **sysinit, **sysinit_end; struct sysinit **newsysinit, **newsysinit_end; /* * Merge a new sysinit set into the current set, reallocating it if * necessary. This can only be called after malloc is running. */ void sysinit_add(struct sysinit **set, struct sysinit **set_end) { struct sysinit **newset; struct sysinit **sipp; struct sysinit **xipp; int count; count = set_end - set; if (newsysinit) count += newsysinit_end - newsysinit; else count += sysinit_end - sysinit; newset = malloc(count * sizeof(*sipp), M_TEMP, M_NOWAIT); if (newset == NULL) panic("cannot malloc for sysinit"); xipp = newset; if (newsysinit) for (sipp = newsysinit; sipp < newsysinit_end; sipp++) *xipp++ = *sipp; else for (sipp = sysinit; sipp < sysinit_end; sipp++) *xipp++ = *sipp; for (sipp = set; sipp < set_end; sipp++) *xipp++ = *sipp; if (newsysinit) free(newsysinit, M_TEMP); newsysinit = newset; newsysinit_end = newset + count; } /* * System startup; initialize the world, create process 0, mount root * filesystem, and fork to create init and pagedaemon. Most of the * hard work is done in the lower-level initialization routines including * startup(), which does memory initialization and autoconfiguration. * * This allows simple addition of new kernel subsystems that require * boot time initialization. It also allows substitution of subsystem * (for instance, a scheduler, kernel profiler, or VM system) by object * module. Finally, it allows for optional "kernel threads". */ void mi_startup(void) { register struct sysinit **sipp; /* system initialization*/ register struct sysinit **xipp; /* interior loop of sort*/ register struct sysinit *save; /* bubble*/ if (sysinit == NULL) { sysinit = SET_BEGIN(sysinit_set); sysinit_end = SET_LIMIT(sysinit_set); } restart: /* * Perform a bubble sort of the system initialization objects by * their subsystem (primary key) and order (secondary key). */ for (sipp = sysinit; sipp < sysinit_end; sipp++) { for (xipp = sipp + 1; xipp < sysinit_end; xipp++) { if ((*sipp)->subsystem < (*xipp)->subsystem || ((*sipp)->subsystem == (*xipp)->subsystem && (*sipp)->order <= (*xipp)->order)) continue; /* skip*/ save = *sipp; *sipp = *xipp; *xipp = save; } } /* * Traverse the (now) ordered list of system initialization tasks. * Perform each task, and continue on to the next task. * * The last item on the list is expected to be the scheduler, * which will not return. */ for (sipp = sysinit; sipp < sysinit_end; sipp++) { if ((*sipp)->subsystem == SI_SUB_DUMMY) continue; /* skip dummy task(s)*/ if ((*sipp)->subsystem == SI_SUB_DONE) continue; /* Call function */ (*((*sipp)->func))((*sipp)->udata); /* Check off the one we're just done */ (*sipp)->subsystem = SI_SUB_DONE; /* Check if we've installed more sysinit items via KLD */ if (newsysinit != NULL) { if (sysinit != SET_BEGIN(sysinit_set)) free(sysinit, M_TEMP); sysinit = newsysinit; sysinit_end = newsysinit_end; newsysinit = NULL; newsysinit_end = NULL; goto restart; } } panic("Shouldn't get here!"); /* NOTREACHED*/ } /* *************************************************************************** **** **** The following SYSINIT's belong elsewhere, but have not yet **** been moved. **** *************************************************************************** */ static void print_caddr_t(void *data __unused) { printf("%s", (char *)data); } SYSINIT(announce, SI_SUB_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t, copyright) SYSINIT(version, SI_SUB_COPYRIGHT, SI_ORDER_SECOND, print_caddr_t, version) static void set_boot_verbose(void *data __unused) { if (boothowto & RB_VERBOSE) bootverbose++; } SYSINIT(boot_verbose, SI_SUB_TUNABLES, SI_ORDER_ANY, set_boot_verbose, NULL) struct sysentvec null_sysvec = { 0, NULL, 0, 0, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "null", NULL, NULL, 0, PAGE_SIZE, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS, USRSTACK, PS_STRINGS, VM_PROT_ALL, NULL, NULL }; /* *************************************************************************** **** **** The two following SYSINIT's are proc0 specific glue code. I am not **** convinced that they can not be safely combined, but their order of **** operation has been maintained as the same as the original init_main.c **** for right now. **** **** These probably belong in init_proc.c or kern_proc.c, since they **** deal with proc0 (the fork template process). **** *************************************************************************** */ /* ARGSUSED*/ static void proc0_init(void *dummy __unused) { register struct proc *p; register struct filedesc0 *fdp; register unsigned i; struct thread *td; struct ksegrp *kg; struct kse *ke; GIANT_REQUIRED; p = &proc0; td = &thread0; ke = &kse0; kg = &ksegrp0; ke->ke_sched = kse0_sched; kg->kg_sched = ksegrp0_sched; p->p_sched = proc0_sched; td->td_sched = thread0_sched; /* * Initialize magic number. */ p->p_magic = P_MAGIC; /* * Initialize thread, process and pgrp structures. */ procinit(); threadinit(); /* * Initialize sleep queue hash table */ sleepinit(); /* * additional VM structures */ vm_init2(); /* * Create process 0 (the swapper). */ LIST_INSERT_HEAD(&allproc, p, p_list); LIST_INSERT_HEAD(PIDHASH(0), p, p_hash); mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); p->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist); pgrp0.pg_session = &session0; mtx_init(&session0.s_mtx, "session", NULL, MTX_DEF); session0.s_count = 1; session0.s_leader = p; p->p_sysent = &null_sysvec; /* * proc_linkup was already done in init_i386() or alphainit() etc. * because the earlier code needed to follow td->td_proc. Otherwise * I would have done it here.. maybe this means this should be * done earlier too. */ p->p_flag = P_SYSTEM; p->p_sflag = PS_INMEM; p->p_state = PRS_NORMAL; td->td_state = TDS_RUNNING; kg->kg_nice = NZERO; kg->kg_pri_class = PRI_TIMESHARE; kg->kg_user_pri = PUSER; td->td_priority = PVM; td->td_base_pri = PUSER; td->td_kse = ke; /* XXXKSE */ td->td_oncpu = 0; ke->ke_state = KES_THREAD; ke->ke_thread = td; p->p_peers = 0; p->p_leader = p; bcopy("swapper", p->p_comm, sizeof ("swapper")); callout_init(&p->p_itcallout, 1); callout_init(&td->td_slpcallout, 1); /* Create credentials. */ p->p_ucred = crget(); p->p_ucred->cr_ngroups = 1; /* group 0 */ p->p_ucred->cr_uidinfo = uifind(0); p->p_ucred->cr_ruidinfo = uifind(0); p->p_ucred->cr_prison = NULL; /* Don't jail it. */ #ifdef MAC mac_create_proc0(p->p_ucred); #endif td->td_ucred = crhold(p->p_ucred); /* Create procsig. */ p->p_procsig = &procsig0; p->p_procsig->ps_refcnt = 1; /* Initialize signal state for process 0. */ siginit(&proc0); /* Create the file descriptor table. */ fdp = &filedesc0; p->p_fd = &fdp->fd_fd; mtx_init(&fdp->fd_fd.fd_mtx, FILEDESC_LOCK_DESC, NULL, MTX_DEF); fdp->fd_fd.fd_refcnt = 1; fdp->fd_fd.fd_cmask = cmask; fdp->fd_fd.fd_ofiles = fdp->fd_dfiles; fdp->fd_fd.fd_ofileflags = fdp->fd_dfileflags; fdp->fd_fd.fd_nfiles = NDFILE; /* Create the limits structures. */ p->p_limit = &limit0; for (i = 0; i < sizeof(p->p_rlimit)/sizeof(p->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = limit0.pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; i = ptoa(cnt.v_free_count); limit0.pl_rlimit[RLIMIT_RSS].rlim_max = i; limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i; limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3; limit0.p_refcnt = 1; p->p_cpulimit = RLIM_INFINITY; /* Allocate a prototype map so we have something to fork. */ pmap_pinit0(vmspace_pmap(&vmspace0)); p->p_vmspace = &vmspace0; vmspace0.vm_refcnt = 1; vm_map_init(&vmspace0.vm_map, p->p_sysent->sv_minuser, p->p_sysent->sv_maxuser); vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0); /* * We continue to place resource usage info and signal * actions in the user struct so they're pageable. */ p->p_stats = &p->p_uarea->u_stats; p->p_sigacts = &p->p_uarea->u_sigacts; /* * Charge root for one process. */ (void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0); } SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL) /* ARGSUSED*/ static void proc0_post(void *dummy __unused) { struct timespec ts; struct proc *p; /* * Now we can look at the time, having had a chance to verify the * time from the filesystem. Pretend that proc0 started now. */ sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { microtime(&p->p_stats->p_start); p->p_runtime.sec = 0; p->p_runtime.frac = 0; } sx_sunlock(&allproc_lock); binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); /* * Give the ``random'' number generator a thump. */ nanotime(&ts); srandom(ts.tv_sec ^ ts.tv_nsec); } SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL) /* *************************************************************************** **** **** The following SYSINIT's and glue code should be moved to the **** respective files on a per subsystem basis. **** *************************************************************************** */ /* *************************************************************************** **** **** The following code probably belongs in another file, like **** kern/init_init.c. **** *************************************************************************** */ /* * List of paths to try when searching for "init". */ static char init_path[MAXPATHLEN] = #ifdef INIT_PATH __XSTRING(INIT_PATH); #else "/sbin/init:/sbin/oinit:/sbin/init.bak:/stand/sysinstall"; #endif SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0, "Path used to search the init process"); /* * Start the initial user process; try exec'ing each pathname in init_path. * The program is invoked with one argument containing the boot flags. */ static void start_init(void *dummy) { vm_offset_t addr; struct execve_args args; int options, error; char *var, *path, *next, *s; char *ucp, **uap, *arg0, *arg1; struct thread *td; struct proc *p; int init_does_devfs = 0; mtx_lock(&Giant); GIANT_REQUIRED; td = curthread; p = td->td_proc; vfs_mountroot(); /* Get the vnode for '/'. Set p->p_fd->fd_cdir to reference it. */ if (VFS_ROOT(TAILQ_FIRST(&mountlist), &rootvnode)) panic("cannot find root vnode"); FILEDESC_LOCK(p->p_fd); p->p_fd->fd_cdir = rootvnode; VREF(p->p_fd->fd_cdir); p->p_fd->fd_rdir = rootvnode; VREF(p->p_fd->fd_rdir); FILEDESC_UNLOCK(p->p_fd); VOP_UNLOCK(rootvnode, 0, td); #ifdef MAC mac_create_root_mount(td->td_ucred, TAILQ_FIRST(&mountlist)); #endif /* * For disk based systems, we probably cannot do this yet * since the fs will be read-only. But a NFS root * might be ok. It is worth a shot. */ error = kern_mkdir(td, "/dev", UIO_SYSSPACE, 0700); if (error == EEXIST) error = 0; if (error == 0) error = kernel_vmount(0, "fstype", "devfs", "fspath", "/dev", NULL); if (error != 0) init_does_devfs = 1; /* * Need just enough stack to hold the faked-up "execve()" arguments. */ addr = p->p_sysent->sv_usrstack - PAGE_SIZE; if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0) panic("init: couldn't allocate argument space"); p->p_vmspace->vm_maxsaddr = (caddr_t)addr; p->p_vmspace->vm_ssize = 1; if ((var = getenv("init_path")) != NULL) { strlcpy(init_path, var, sizeof(init_path)); freeenv(var); } for (path = init_path; *path != '\0'; path = next) { while (*path == ':') path++; if (*path == '\0') break; for (next = path; *next != '\0' && *next != ':'; next++) /* nothing */ ; if (bootverbose) printf("start_init: trying %.*s\n", (int)(next - path), path); /* * Move out the boot flag argument. */ options = 0; ucp = (char *)p->p_sysent->sv_usrstack; (void)subyte(--ucp, 0); /* trailing zero */ if (boothowto & RB_SINGLE) { (void)subyte(--ucp, 's'); options = 1; } #ifdef notyet if (boothowto & RB_FASTBOOT) { (void)subyte(--ucp, 'f'); options = 1; } #endif #ifdef BOOTCDROM (void)subyte(--ucp, 'C'); options = 1; #endif if (init_does_devfs) { (void)subyte(--ucp, 'd'); options = 1; } if (options == 0) (void)subyte(--ucp, '-'); (void)subyte(--ucp, '-'); /* leading hyphen */ arg1 = ucp; /* * Move out the file name (also arg 0). */ (void)subyte(--ucp, 0); for (s = next - 1; s >= path; s--) (void)subyte(--ucp, *s); arg0 = ucp; /* * Move out the arg pointers. */ uap = (char **)((intptr_t)ucp & ~(sizeof(intptr_t)-1)); (void)suword((caddr_t)--uap, (long)0); /* terminator */ (void)suword((caddr_t)--uap, (long)(intptr_t)arg1); (void)suword((caddr_t)--uap, (long)(intptr_t)arg0); /* * Point at the arguments. */ args.fname = arg0; args.argv = uap; args.envv = NULL; /* * Now try to exec the program. If can't for any reason * other than it doesn't exist, complain. * * Otherwise, return via fork_trampoline() all the way * to user mode as init! */ if ((error = execve(td, &args)) == 0) { mtx_unlock(&Giant); return; } if (error != ENOENT) printf("exec %.*s: error %d\n", (int)(next - path), path, error); } printf("init: not found in path %s\n", init_path); panic("no init"); } /* * Like kthread_create(), but runs in it's own address space. * We do this early to reserve pid 1. * * Note special case - do not make it runnable yet. Other work * in progress will change this more. */ static void create_init(const void *udata __unused) { struct ucred *newcred, *oldcred; int error; error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, 0, &initproc); if (error) panic("cannot fork init: %d\n", error); /* divorce init's credentials from the kernel's */ newcred = crget(); PROC_LOCK(initproc); initproc->p_flag |= P_SYSTEM; oldcred = initproc->p_ucred; crcopy(newcred, oldcred); #ifdef MAC mac_create_proc1(newcred); #endif initproc->p_ucred = newcred; PROC_UNLOCK(initproc); crfree(oldcred); cred_update_thread(FIRST_THREAD_IN_PROC(initproc)); mtx_lock_spin(&sched_lock); initproc->p_sflag |= PS_INMEM; mtx_unlock_spin(&sched_lock); cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL); } SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL) /* * Make it runnable now. */ static void kick_init(const void *udata __unused) { struct thread *td; td = FIRST_THREAD_IN_PROC(initproc); mtx_lock_spin(&sched_lock); TD_SET_CAN_RUN(td); setrunqueue(td); /* XXXKSE */ mtx_unlock_spin(&sched_lock); } SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL) Index: head/sys/sys/proc.h =================================================================== --- head/sys/sys/proc.h (revision 113449) +++ head/sys/sys/proc.h (revision 113450) @@ -1,963 +1,964 @@ /*- * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #ifndef _KERNEL #include #endif #include #include #include #include #include /* XXX */ #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #endif #include #include #include /* Machine-dependent proc substruct. */ /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { int s_count; /* (m) Ref cnt; pgrps in session. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct tty *s_ttyp; /* (m) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Pgrp id. */ int pg_jobc; /* (m) job cntl proc count */ struct mtx pg_mtx; /* Mutex to protect members */ }; struct procsig { sigset_t ps_sigignore; /* Signals being ignored. */ sigset_t ps_sigcatch; /* Signals being caught by user. */ int ps_flag; struct sigacts *ps_sigacts; /* Signal actions, state. */ int ps_refcnt; }; #define PS_NOCLDWAIT 0x0001 /* No zombies if child dies */ #define PS_NOCLDSTOP 0x0002 /* No SIGCHLD when children stop. */ #define PS_CLDSIGIGN 0x0004 /* The SIGCHLD handler is SIG_IGN. */ /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by sched_lock mtx * k - only accessed by curthread * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * p - select lock (sellock) * r - p_peers lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct ithd; struct ke_sched; struct kg_sched; struct nlminfo; struct p_sched; struct td_sched; struct trapframe; /* * Here we define the four structures used for process information. * * The first is the thread. It might be though of as a "Kernel * Schedulable Entity Context". * This structure contains all the information as to where a thread of * execution is now, or was when it was suspended, why it was suspended, * and anything else that will be needed to restart it when it is * rescheduled. Always associated with a KSE when running, but can be * reassigned to an equivalent KSE when being restarted for * load balancing. Each of these is associated with a kernel stack * and a pcb. * * It is important to remember that a particular thread structure only * exists as long as the system call or kernel entrance (e.g. by pagefault) * which it is currently executing. It should therefore NEVER be referenced * by pointers in long lived structures that live longer than a single * request. If several threads complete their work at the same time, * they will all rewind their stacks to the user boundary, report their * completion state, and all but one will be freed. That last one will * be kept to provide a kernel stack and pcb for the NEXT syscall or kernel * entrance. (basically to save freeing and then re-allocating it) The KSE * keeps a cached thread available to allow it to quickly * get one when it needs a new one. There is also a system * cache of free threads. Threads have priority and partake in priority * inherritance schemes. */ struct thread; /* * The second structure is the Kernel Schedulable Entity. (KSE) * It represents the ability to take a slot in the scheduler queue. * As long as this is scheduled, it could continue to run any threads that * are assigned to the KSEGRP (see later) until either it runs out * of runnable threads of high enough priority, or CPU. * It runs on one CPU and is assigned a quantum of time. When a thread is * blocked, The KSE continues to run and will search for another thread * in a runnable state amongst those it has. It May decide to return to user * mode with a new 'empty' thread if there are no runnable threads. * Threads are temporarily associated with a KSE for scheduling reasons. */ struct kse; /* * The KSEGRP is allocated resources across a number of CPUs. * (Including a number of CPUxQUANTA. It parcels these QUANTA up among * its KSEs, each of which should be running in a different CPU. * BASE priority and total available quanta are properties of a KSEGRP. * Multiple KSEGRPs in a single process compete against each other * for total quanta in the same way that a forked child competes against * it's parent process. */ struct ksegrp; /* * A process is the owner of all system resources allocated to a task * except CPU quanta. * All KSEGs under one process see, and have the same access to, these * resources (e.g. files, memory, sockets, permissions kqueues). * A process may compete for CPU cycles on the same basis as a * forked process cluster by spawning several KSEGRPs. */ struct proc; /*************** * In pictures: With a single run queue used by all processors: RUNQ: --->KSE---KSE--... SLEEPQ:[]---THREAD---THREAD---THREAD | / []---THREAD KSEG---THREAD--THREAD--THREAD [] []---THREAD---THREAD (processors run THREADs from the KSEG until they are exhausted or the KSEG exhausts its quantum) With PER-CPU run queues: KSEs on the separate run queues directly They would be given priorities calculated from the KSEG. * *****************/ /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * The first KSE available in the correct group will run this thread. * If several are available, use the one on the same CPU as last time. * When waiting to be run, threads are hung off the KSEGRP in priority order. * with N runnable and queued KSEs in the KSEGRP, the first N threads * are linked to them. Other threads are not yet assigned. */ struct thread { struct proc *td_proc; /* Associated process. */ struct ksegrp *td_ksegrp; /* Associated KSEG. */ TAILQ_ENTRY(thread) td_plist; /* All threads in this proc */ TAILQ_ENTRY(thread) td_kglist; /* All threads in this ksegrp */ /* The two queues below should someday be merged */ TAILQ_ENTRY(thread) td_slpq; /* (j) Sleep queue. XXXKSE */ TAILQ_ENTRY(thread) td_lockq; /* (j) Lock queue. XXXKSE */ TAILQ_ENTRY(thread) td_runq; /* (j) Run queue(s). XXXKSE */ TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */ /* Cleared during fork1() or thread_sched_upcall() */ #define td_startzero td_flags int td_flags; /* (j) TDF_* flags. */ int td_inhibitors; /* (j) Why can not run */ struct kse *td_last_kse; /* (j) Previous value of td_kse */ struct kse *td_kse; /* (j) Current KSE if running. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ void *td_wchan; /* (j) Sleep address. */ const char *td_wmesg; /* (j) Reason for sleep. */ u_char td_lastcpu; /* (j) Last cpu we were on. */ u_char td_inktr; /* (k) Currently handling a KTR. */ u_char td_inktrace; /* (k) Currently handling a KTRACE. */ u_char td_oncpu; /* (j) Which cpu we are on. */ short td_locks; /* (k) DEBUG: lockmgr count of locks */ struct mtx *td_blocked; /* (j) Mutex process is blocked on. */ struct ithd *td_ithd; /* (b) For interrupt threads only. */ const char *td_lockname; /* (j) Name of lock blocked on. */ LIST_HEAD(, mtx) td_contested; /* (j) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ struct kse_thr_mailbox *td_mailbox; /* The userland mailbox address */ struct ucred *td_ucred; /* (k) Reference to credentials. */ void (*td_switchin)(void); /* (k) Switchin special func. */ struct thread *td_standin; /* (?) Use this for an upcall */ u_int td_prticks; /* (?) Profclock hits in sys for user */ struct kse_upcall *td_upcall; /* our upcall structure. */ u_int64_t td_sticks; /* (j) Statclock hits in system mode. */ u_int td_uuticks; /* Statclock hits in user, for UTS */ u_int td_usticks; /* Statclock hits in kernel, for UTS */ u_int td_critnest; /* (k) Critical section nest level. */ sigset_t td_oldsigmask; /* (c) Saved mask from pre sigpause. */ sigset_t td_sigmask; /* (c) Current signal mask. */ sigset_t td_siglist; /* (c) Sigs arrived, not delivered. */ STAILQ_HEAD(, thread) td_umtxq; /* (p) List of threads blocked by us. */ STAILQ_ENTRY(thread) td_umtx; /* (p) Link for when we're blocked. */ #define td_endzero td_base_pri /* Copied during fork1() or thread_sched_upcall() */ #define td_startcopy td_endzero u_char td_base_pri; /* (j) Thread base kernel priority. */ u_char td_priority; /* (j) Thread active priority. */ #define td_endcopy td_pcb /* * fields that must be manually set in fork1() or thread_sched_upcall() * or already have been set in the allocator, contstructor, etc.. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; register_t td_retval[2]; /* (k) Syscall aux returns. */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* Kernel VA of kstack. */ int td_kstack_pages; /* Size of the kstack */ struct vm_object *td_altkstack_obj;/* (a) Alternate kstack object. */ vm_offset_t td_altkstack; /* Kernel VA of alternate kstack. */ int td_altkstack_pages; /* Size of the alternate kstack */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct td_sched *td_sched; /* Scheduler specific data */ }; /* flags kept in td_flags */ #define TDF_OLDMASK 0x000001 /* Need to restore mask after suspend. */ #define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump. */ #define TDF_CAN_UNBIND 0x000004 /* Only temporarily bound. */ #define TDF_SINTR 0x000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */ #define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */ #define TDF_CVWAITQ 0x000080 /* Thread is on a cv_waitq (not slpq). */ #define TDF_UPCALLING 0x000100 /* This thread is doing an upcall. */ #define TDF_ONSLEEPQ 0x000200 /* On the sleep queue. */ #define TDF_INMSLEEP 0x000400 /* Don't recurse in msleep(). */ #define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */ #define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */ #define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */ #define TDF_USTATCLOCK 0x004000 /* Stat clock hits in userland. */ #define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */ #define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */ #define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem.. bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_CAN_UNBIND(td) \ (((td)->td_flags & TDF_CAN_UNBIND) == TDF_CAN_UNBIND && \ ((td)->td_upcall != NULL)) #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ (td)->td_state = TDS_CAN_RUN; \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #define TD_SET_RUNNING(td) do {(td)->td_state = TDS_RUNNING; } while (0) #define TD_SET_RUNQ(td) do {(td)->td_state = TDS_RUNQ; } while (0) #define TD_SET_CAN_RUN(td) do {(td)->td_state = TDS_CAN_RUN; } while (0) #define TD_SET_ON_SLEEPQ(td) do {(td)->td_flags |= TDF_ONSLEEPQ; } while (0) #define TD_CLR_ON_SLEEPQ(td) do { \ (td)->td_flags &= ~TDF_ONSLEEPQ; \ (td)->td_wchan = NULL; \ } while (0) /* * The schedulable entity that can be given a context to run. * A process may have several of these. Probably one per processor * but posibly a few more. In this universe they are grouped * with a KSEG that contains the priority and niceness * for the group. */ struct kse { struct proc *ke_proc; /* Associated process. */ struct ksegrp *ke_ksegrp; /* Associated KSEG. */ TAILQ_ENTRY(kse) ke_kglist; /* Queue of all KSEs in ke_ksegrp. */ TAILQ_ENTRY(kse) ke_kgrlist; /* Queue of all KSEs in this state. */ TAILQ_ENTRY(kse) ke_procq; /* (j) Run queue. */ #define ke_startzero ke_flags int ke_flags; /* (j) KEF_* flags. */ struct thread *ke_thread; /* Active associated thread. */ fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ u_char ke_oncpu; /* (j) Which cpu we are on. */ char ke_rqindex; /* (j) Run queue index. */ enum { KES_UNUSED = 0x0, KES_IDLE, KES_ONRUNQ, KES_UNQUEUED, /* in transit */ KES_THREAD /* slaved to thread state */ } ke_state; /* (j) S* process status. */ #define ke_endzero ke_dummy u_char ke_dummy; struct ke_sched *ke_sched; /* Scheduler specific data */ }; /* flags kept in ke_flags */ #define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */ #define KEF_DIDRUN 0x02000 /* KSE actually ran. */ #define KEF_EXIT 0x04000 /* KSE is being killed. */ /* * The upcall management structure. * The upcall is used when returning to userland. If a thread does not have * an upcall on return to userland the thread exports its context and exits. */ struct kse_upcall { TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in KSEG. */ struct ksegrp *ku_ksegrp; /* Associated KSEG. */ struct thread *ku_owner; /* owning thread */ int ku_flags; /* KUF_* flags. */ struct kse_mailbox *ku_mailbox; /* userland mailbox address. */ stack_t ku_stack; /* userland upcall stack. */ void *ku_func; /* userland upcall function. */ }; #define KUF_DOUPCALL 0x00001 /* Do upcall now, don't wait */ /* * Kernel-scheduled entity group (KSEG). The scheduler considers each KSEG to * be an indivisible unit from a time-sharing perspective, though each KSEG may * contain multiple KSEs. */ struct ksegrp { struct proc *kg_proc; /* Process that contains this KSEG. */ TAILQ_ENTRY(ksegrp) kg_ksegrp; /* Queue of KSEGs in kg_proc. */ TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */ TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) All idle KSEs. */ TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */ TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */ TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */ TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group */ #define kg_startzero kg_estcpu u_int kg_estcpu; /* Sum of the same field in KSEs. */ u_int kg_slptime; /* (j) How long completely blocked. */ struct thread *kg_last_assigned; /* (j) Last thread assigned to a KSE */ int kg_runnable; /* (j) Num runnable threads on queue. */ int kg_runq_kses; /* (j) Num KSEs on runq. */ int kg_idle_kses; /* (j) Num KSEs on iq */ int kg_numupcalls; /* (j) Num upcalls */ int kg_upsleeps; /* (c) Num threads in kse_release() */ struct kse_thr_mailbox *kg_completed; /* (c) completed thread mboxes */ int kg_nextupcall; /* next upcall time */ int kg_upquantum; /* quantum to schedule an upcall */ #define kg_endzero kg_pri_class #define kg_startcopy kg_endzero u_char kg_pri_class; /* (j) Scheduling class. */ u_char kg_user_pri; /* (j) User pri from estcpu and nice. */ char kg_nice; /* (j?/k?) Process "nice" value. */ #define kg_endcopy kg_numthreads int kg_numthreads; /* (j) Num threads in total */ int kg_kses; /* (j) Num KSEs in group. */ struct kg_sched *kg_sched; /* Scheduler specific data */ }; /* * The old fashionned process. May have multiple threads, KSEGRPs * and KSEs. Starts off with a single embedded KSEGRP, KSE and THREAD. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, ksegrp) p_ksegrps; /* (kg_ksegrp) All KSEGs. */ TAILQ_HEAD(, thread) p_threads; /* (td_plist) Threads. (shortcut) */ TAILQ_HEAD(, thread) p_suspended; /* (td_runq) suspended threads */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Ptr to open files structure. */ /* Accumulated stats for all KSEs? */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (m) Process limits. */ struct vm_object *p_upages_obj; /* (a) Upages object. */ struct procsig *p_procsig; /* (c) Signal actions, state (CPU). */ /*struct ksegrp p_ksegrp; struct kse p_kse; */ /* * The following don't make too much sense.. * See the td_ or ke_ versions of the same flags */ int p_flag; /* (c) P_* flags. */ int p_sflag; /* (j) PS_* flags. */ enum { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* KSEs can be run */ PRS_ZOMBIE } p_state; /* (j) S* process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct mtx p_mtx; /* (k) Lock for this struct. */ /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_oppid pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtime; /* (j) Time swapped in or out. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct bintime p_runtime; /* (j) Real time. */ u_int64_t p_uu; /* (j) Previous user time in usec. */ u_int64_t p_su; /* (j) Previous system time in usec. */ u_int64_t p_iu; /* (j) Previous intr time in usec. */ u_int64_t p_uticks; /* (j) Statclock hits in user mode. */ u_int64_t p_sticks; /* (j) Statclock hits in system mode. */ u_int64_t p_iticks; /* (j) Statclock hits in intr. */ int p_profthreads; /* (c) Num threads in addupc_task */ int p_maxthrwaits; /* (c) Max threads num waiters */ int p_traceflag; /* (o) Kernel trace points. */ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ struct ucred *p_tracecred; /* (o) Credentials to trace with. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ sigset_t p_siglist; /* (c) Sigs not delivered to a td. */ char p_lock; /* (c) Proclock (prevent swap) count. */ struct klist p_klist; /* (c) Knotes attached to this proc. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_long p_code; /* (n) For core dump/debugger XXX. */ u_int p_stops; /* (c) Stop event bitmask. */ u_int p_stype; /* (c) Stop event type. */ char p_step; /* (c) Process is stopped. */ u_char p_pfsflags; /* (c) Procfs flags. */ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ void *p_aioinfo; /* (c) ASYNC I/O info. */ struct thread *p_singlethread;/* (j) If single threading this is it */ int p_suspcount; /* (j) # threads in suspended mode */ /* End area that is zeroed on creation. */ #define p_endzero p_sigstk /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero stack_t p_sigstk; /* (c) Stack ptr and on-stack flag. */ u_int p_magic; /* (b) Magic number. */ char p_comm[MAXCOMLEN + 1]; /* (b) Process name. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (j) Current CPU limit in seconds. */ /* End area that is copied on creation. */ #define p_endcopy p_xstat u_short p_xstat; /* (c) Exit status; also stop sig. */ int p_numthreads; /* (?) number of threads */ int p_numksegrps; /* (?) number of ksegrps */ struct mdproc p_md; /* (c) Any machine-dependent fields. */ struct callout p_itcallout; /* (h) Interval timer callout. */ struct user *p_uarea; /* (k) Kernel VA of u-area (CPU) */ u_short p_acflag; /* (c) Accounting flags. */ struct rusage *p_ru; /* (a) Exit information. XXX */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label p_label; /* process (not subject) MAC label */ struct p_sched *p_sched; /* Scheduler specific data */ }; #define p_rlimit p_limit->pl_rlimit #define p_sigacts p_procsig->ps_sigacts #define p_sigignore p_procsig->ps_sigignore #define p_sigcatch p_procsig->ps_sigcatch #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU 0xff /* For when we aren't on a CPU. (SMP) */ /* Status values (p_stat). */ /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ #define P_KTHREAD 0x00004 /* Kernel thread. (*)*/ #define P_NOLOAD 0x00008 /* Ignore during load avg calculations. */ #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ #define P_WAITED 0x01000 /* Someone is waiting for us */ #define P_WEXIT 0x02000 /* Working on exiting. */ #define P_EXEC 0x04000 /* Process called exec. */ #define P_THREADED 0x08000 /* Process is using threads. */ #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ #define P_PROTECTED 0x20000 /* Do not kill on memory overcommit. */ /* flags that control how threads may be suspended for some reason */ #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP */ #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing */ #define P_STOPPED_SINGLE 0x80000 /* Only one thread can continue */ /* (not to user) */ #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, */ /* not wait */ #define P_TRACED 0x00800 /* Debugged process being traced. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) /* Should be moved to machine-dependent areas. */ #define P_UNUSED100000 0x100000 #define P_COWINPROGRESS 0x400000 /* Snapshot copy-on-write in progress. */ #define P_JAILED 0x1000000 /* Process is in jail. */ #define P_ALTSTACK 0x2000000 /* Have alternate signal stack. */ #define P_INEXEC 0x4000000 /* Process is in execve(). */ /* These flags are kept in p_sflag and are protected with sched_lock. */ #define PS_INMEM 0x00001 /* Loaded into memory. */ #define PS_XCPU 0x00002 /* Exceeded CPU limit. */ #define PS_PROFIL 0x00004 /* Has started profiling. */ #define PS_STOPPROF 0x00008 /* Has thread in requesting to stop prof */ #define PS_ALRMPEND 0x00020 /* Pending SIGVTALRM needs to be posted. */ #define PS_PROFPEND 0x00040 /* Pending SIGPROF needs to be posted. */ #define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */ #define PS_SWAPPING 0x00200 /* Process is being swapped. */ #define PS_SWAPPINGIN 0x04000 /* Swapin in progress. */ #define PS_MACPEND 0x08000 /* Ast()-based MAC event pending. */ /* used only in legacy conversion code */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_PGRP); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); MALLOC_DECLARE(M_ZOMBIE); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_KSEGRP_IN_PROC(p, kg) \ TAILQ_FOREACH((kg), &(p)->p_ksegrps, kg_ksegrp) #define FOREACH_THREAD_IN_GROUP(kg, td) \ TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist) #define FOREACH_KSE_IN_GROUP(kg, ke) \ TAILQ_FOREACH((ke), &(kg)->kg_kseq, ke_kglist) #define FOREACH_UPCALL_IN_GROUP(kg, ku) \ TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) /* XXXKSE the lines below should probably only be used in 1:1 code */ #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&p->p_threads) #define FIRST_KSEGRP_IN_PROC(p) TAILQ_FIRST(&p->p_ksegrps) #define FIRST_KSE_IN_KSEGRP(kg) TAILQ_FIRST(&kg->kg_kseq) #define FIRST_KSE_IN_PROC(p) FIRST_KSE_IN_KSEGRP(FIRST_KSEGRP_IN_PROC(p)) static __inline int sigonstack(size_t sp) { register struct thread *td = curthread; struct proc *p = td->td_proc; return ((p->p_flag & P_ALTSTACK) ? #if defined(COMPAT_43) || defined(COMPAT_SUNOS) ((p->p_sigstk.ss_size == 0) ? (p->p_sigstk.ss_flags & SS_ONSTACK) : ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size)) #else ((sp - (size_t)p->p_sigstk.ss_sp) < p->p_sigstk.ss_size) #endif : 0); } /* * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t, * as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) #define SESSHOLD(s) ((s)->s_count++) #define SESSRELE(s) { \ if (--(s)->s_count == 0) \ FREE(s, M_SESSION); \ } #define STOPEVENT(p, e, v) do { \ PROC_LOCK(p); \ _STOPEVENT((p), (e), (v)); \ PROC_UNLOCK(p); \ } while (0) #define _STOPEVENT(p, e, v) do { \ PROC_LOCK_ASSERT(p, MA_OWNED); \ if ((p)->p_stops & (e)) { \ stopevent((p), (e), (v)); \ } \ } while (0) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) \ do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0); #define PGRP_UNLOCK_PGSIGNAL(pg) \ do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0); /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* Hold process U-area in memory, normally for ptrace/procfs work. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ if ((p)->p_lock++ == 0) { \ mtx_lock_spin(&sched_lock); \ faultin((p)); \ mtx_unlock_spin(&sched_lock); \ } \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (--(p)->p_lock); \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) (TD_IS_SLEEPING(td) || TD_IS_SUSPENDED(td)) /* Lock and unlock process arguments. */ #define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock) #define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern struct sx proctree_lock; extern struct mtx pargs_ref_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread thread0; /* Primary thread in proc0 */ extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0 */ extern struct kse kse0; /* Primary kse in proc0 */ +extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; extern int ps_argsopen; extern int ps_showallprocs; extern int sched_quantum; /* Scheduling quantum in ticks. */ LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct proc *updateproc; /* Process slot for syncer (sic). */ extern struct uma_zone *proc_zone; extern int lastpid; struct proc *pfind(pid_t); /* Find process by id. */ struct pgrp *pgfind(pid_t); /* Find process group by id. */ struct proc *zpfind(pid_t); /* Find zombie process by id. */ void adjustrunqueue(struct thread *, int newpri); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); int fork1(struct thread *, int, int, struct proc **); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); int leavepgrp(struct proc *p); void mi_switch(void); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_free(struct pargs *pa); void pargs_hold(struct pargs *pa); void procinit(void); void threadinit(void); void proc_linkup(struct proc *p, struct ksegrp *kg, struct kse *ke, struct thread *td); void proc_reparent(struct proc *child, struct proc *newparent); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void setrunnable(struct thread *); void setrunqueue(struct thread *); void setsugid(struct proc *p); void sleepinit(void); void stopevent(struct proc *, u_int, u_int); void cpu_idle(void); #if defined(__i386__) || defined(__sparc64__) void cpu_switch(struct thread *old, struct thread *new); void cpu_throw(struct thread *old, struct thread *new) __dead2; #else void cpu_switch(void); void cpu_throw(void) __dead2; #endif void unsleep(struct thread *); void userret(struct thread *, struct trapframe *, u_int); void cpu_exit(struct thread *); void cpu_sched_exit(struct thread *); void exit1(struct thread *, int) __dead2; void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_set_fork_handler(struct thread *, void (*)(void *), void *); void cpu_wait(struct proc *); /* New in KSE. */ struct ksegrp *ksegrp_alloc(void); void ksegrp_free(struct ksegrp *kg); void ksegrp_stash(struct ksegrp *kg); struct kse *kse_alloc(void); void kse_free(struct kse *ke); void kse_stash(struct kse *ke); void cpu_set_upcall(struct thread *td, void *pcb); void cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_setup(struct thread *td); void kse_reassign(struct kse *ke); void kse_link(struct kse *ke, struct ksegrp *kg); void kse_unlink(struct kse *ke); void ksegrp_link(struct ksegrp *kg, struct proc *p); void ksegrp_unlink(struct ksegrp *kg); void thread_signal_add(struct thread *td, int sig); void thread_signal_upcall(struct thread *td); struct thread *thread_alloc(void); void thread_exit(void) __dead2; int thread_export_context(struct thread *td); void thread_free(struct thread *td); void thread_getcontext(struct thread *td, ucontext_t *uc); void thread_link(struct thread *td, struct ksegrp *kg); void thread_reap(void); struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku); int thread_setcontext(struct thread *td, ucontext_t *uc); int thread_single(int how); #define SINGLE_NO_EXIT 0 /* values for 'how' */ #define SINGLE_EXIT 1 void thread_single_end(void); void thread_stash(struct thread *td); int thread_suspend_check(int how); void thread_suspend_one(struct thread *td); void thread_unsuspend(struct proc *p); void thread_unsuspend_one(struct thread *td); int thread_userret(struct thread *td, struct trapframe *frame); void thread_user_enter(struct proc *p, struct thread *td); void thread_wait(struct proc *p); int thread_statclock(int user); struct kse_upcall *upcall_alloc(void); void upcall_free(struct kse_upcall *ku); void upcall_link(struct kse_upcall *ku, struct ksegrp *kg); void upcall_unlink(struct kse_upcall *ku); void upcall_remove(struct thread *td); void upcall_stash(struct kse_upcall *ke); void thread_sanity_check(struct thread *td, char *); void thread_stopped(struct proc *p); void thread_switchout(struct thread *td); void thr_exit1(void); #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */