Index: head/sys/kern/imgact_elf.c =================================================================== --- head/sys/kern/imgact_elf.c (revision 131148) +++ head/sys/kern/imgact_elf.c (revision 131149) @@ -1,1300 +1,1298 @@ /*- * Copyright (c) 2000 David O'Brien * Copyright (c) 1995-1996 Søren Schmidt * Copyright (c) 1996 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define OLD_EI_BRAND 8 static int __elfN(check_header)(const Elf_Ehdr *hdr); static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize); static int __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_object_t object, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, ""); int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand", &__elfN(fallback_brand)); static int elf_trace = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, ""); static int elf_legacy_coredump = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, &elf_legacy_coredump, 0, ""); static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; int __elfN(insert_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == NULL) { elf_brand_list[i] = entry; break; } } if (i == MAX_BRANDS) return (-1); return (0); } int __elfN(remove_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == entry) { elf_brand_list[i] = NULL; break; } } if (i == MAX_BRANDS) return (-1); return (0); } int __elfN(brand_inuse)(Elf_Brandinfo *entry) { struct proc *p; int rval = FALSE; sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { if (p->p_sysent == entry->sysvec) { rval = TRUE; break; } } sx_sunlock(&allproc_lock); return (rval); } static Elf_Brandinfo * __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp) { Elf_Brandinfo *bi; int i; /* * We support three types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, and (3) path of the `interp_path' * field. We should also look for an ".note.ABI-tag" ELF section now * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. */ /* If the executable has a brand, search for it in the brand list. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && hdr->e_machine == bi->machine && (hdr->e_ident[EI_OSABI] == bi->brand || strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) return (bi); } /* Lacking a known brand, search for a recognized interpreter. */ if (interp != NULL) { for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && hdr->e_machine == bi->machine && strcmp(interp, bi->interp_path) == 0) return (bi); } } /* Lacking a recognized interpreter, try the default brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && hdr->e_machine == bi->machine && __elfN(fallback_brand) == bi->brand) return (bi); } return (NULL); } static int __elfN(check_header)(const Elf_Ehdr *hdr) { Elf_Brandinfo *bi; int i; if (!IS_ELF(*hdr) || hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA || hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_phentsize != sizeof(Elf_Phdr) || hdr->e_version != ELF_TARG_VER) return (ENOEXEC); /* * Make sure we have at least one brand for this machine. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && bi->machine == hdr->e_machine) break; } if (i == MAX_BRANDS) return (ENOEXEC); return (0); } static int __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max) { int error, rv; vm_offset_t off; vm_offset_t data_buf = 0; /* * Create the page if it doesn't exist yet. Ignore errors. */ vm_map_lock(map); vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max, max, 0); vm_map_unlock(map); /* * Find the page from the underlying object. */ if (object) { vm_object_reference(object); rv = vm_map_find(exec_map, object, trunc_page(offset), &data_buf, PAGE_SIZE, TRUE, VM_PROT_READ, VM_PROT_ALL, MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); if (rv != KERN_SUCCESS) { vm_object_deallocate(object); return (rv); } off = offset - trunc_page(offset); error = copyout((caddr_t)data_buf + off, (caddr_t)start, end - start); vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); if (error) { return (KERN_FAILURE); } } return (KERN_SUCCESS); } static int __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) { vm_offset_t data_buf, off; vm_size_t sz; int error, rv; if (start != trunc_page(start)) { rv = __elfN(map_partial)(map, object, offset, start, round_page(start), prot, max); if (rv) return (rv); offset += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { rv = __elfN(map_partial)(map, object, offset + trunc_page(end) - start, trunc_page(end), end, prot, max); if (rv) return (rv); end = trunc_page(end); } if (end > start) { if (offset & PAGE_MASK) { /* * The mapping is not page aligned. This means we have * to copy the data. Sigh. */ rv = vm_map_find(map, 0, 0, &start, end - start, FALSE, prot, max, 0); if (rv) return (rv); data_buf = 0; while (start < end) { vm_object_reference(object); rv = vm_map_find(exec_map, object, trunc_page(offset), &data_buf, 2 * PAGE_SIZE, TRUE, VM_PROT_READ, VM_PROT_ALL, (MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL)); if (rv != KERN_SUCCESS) { vm_object_deallocate(object); return (rv); } off = offset - trunc_page(offset); sz = end - start; if (sz > PAGE_SIZE) sz = PAGE_SIZE; error = copyout((caddr_t)data_buf + off, (caddr_t)start, sz); vm_map_remove(exec_map, data_buf, data_buf + 2 * PAGE_SIZE); if (error) { return (KERN_FAILURE); } start += sz; } rv = KERN_SUCCESS; } else { vm_map_lock(map); rv = vm_map_insert(map, object, offset, start, end, prot, max, cow); vm_map_unlock(map); } return (rv); } else { return (KERN_SUCCESS); } } static int __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_object_t object, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize) { size_t map_len; vm_offset_t map_addr; int error, rv, cow; size_t copy_len; vm_offset_t file_addr; vm_offset_t data_buf = 0; GIANT_REQUIRED; error = 0; /* * It's necessary to fail if the filsz + offset taken from the * header is greater than the actual file pager object's size. * If we were to allow this, then the vm_map_find() below would * walk right off the end of the file object and into the ether. * * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); file_addr = trunc_page_ps(offset, pagesize); /* * We have two choices. We can either clear the data in the last page * of an oversized mapping, or we can start the anon mapping a page * early and copy the initialized data into that first page. We * choose the second.. */ if (memsz > filsz) map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; else map_len = round_page_ps(offset + filsz, pagesize) - file_addr; if (map_len != 0) { vm_object_reference(object); /* cow flags: don't dump readonly sections in core */ cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); rv = __elfN(map_insert)(&vmspace->vm_map, object, file_addr, /* file offset */ map_addr, /* virtual start */ map_addr + map_len,/* virtual end */ prot, VM_PROT_ALL, cow); if (rv != KERN_SUCCESS) { vm_object_deallocate(object); return (EINVAL); } /* we can stop now if we've covered it all */ if (memsz == filsz) { return (0); } } /* * We have to get the remaining bit of the file into the first part * of the oversized map segment. This is normally because the .data * segment in the file is extended to provide bss. It's a neat idea * to try and save a page, but it's a pain in the behind to implement. */ copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - map_addr; /* This had damn well better be true! */ if (map_len != 0) { rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) { return (EINVAL); } } if (copy_len != 0) { vm_offset_t off; vm_object_reference(object); rv = vm_map_find(exec_map, object, trunc_page(offset + filsz), &data_buf, PAGE_SIZE, TRUE, VM_PROT_READ, VM_PROT_ALL, MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); if (rv != KERN_SUCCESS) { vm_object_deallocate(object); return (EINVAL); } /* send the page fragment to user space */ off = trunc_page_ps(offset + filsz, pagesize) - trunc_page(offset + filsz); error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr, copy_len); vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); if (error) { return (error); } } /* * set it to the specified protection. * XXX had better undo the damage from pasting over the cracks here! */ vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), round_page(map_addr + map_len), prot, FALSE); return (error); } /* * Load the file "file" into memory. It may be either a shared object * or an executable. * * The "addr" reference parameter is in/out. On entry, it specifies * the address where a shared object should be loaded. If the file is * an executable, this value is ignored. On exit, "addr" specifies * where the file was actually loaded. * * The "entry" reference parameter is out only. On exit, it specifies * the entry point for the loaded file. */ static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize) { struct { struct nameidata nd; struct vattr attr; struct image_params image_params; } *tempdata; const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; struct vmspace *vmspace = p->p_vmspace; struct vattr *attr; struct image_params *imgp; vm_prot_t prot; u_long rbase; u_long base_addr = 0; int error, i, numsegs; if (curthread->td_proc != p) panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; /* * Initialize part of the common data */ imgp->proc = p; imgp->userspace_argv = NULL; imgp->userspace_envv = NULL; imgp->attr = attr; imgp->firstpage = NULL; imgp->image_header = NULL; imgp->object = NULL; imgp->execlabel = NULL; /* XXXKSE */ NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread); if ((error = namei(nd)) != 0) { nd->ni_vp = NULL; goto fail; } NDFREE(nd, NDF_ONLY_PNBUF); imgp->vp = nd->ni_vp; /* * Check permissions, modes, uid, etc on the file, and "open" it. */ error = exec_check_permissions(imgp); if (error) { VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ goto fail; } error = exec_map_first_page(imgp); /* * Also make certain that the interpreter stays the same, so set * its VV_TEXT flag, too. */ if (error == 0) nd->ni_vp->v_vflag |= VV_TEXT; VOP_GETVOBJECT(nd->ni_vp, &imgp->object); vm_object_reference(imgp->object); VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */ if (error) goto fail; hdr = (const Elf_Ehdr *)imgp->image_header; if ((error = __elfN(check_header)(hdr)) != 0) goto fail; if (hdr->e_type == ET_DYN) rbase = *addr; else if (hdr->e_type == ET_EXEC) rbase = 0; else { error = ENOEXEC; goto fail; } /* Only support headers that fit within first page for now */ /* (multiplication of two Elf_Half fields will not overflow) */ if ((hdr->e_phoff > PAGE_SIZE) || (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) { error = ENOEXEC; goto fail; } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ prot = 0; if (phdr[i].p_flags & PF_X) prot |= VM_PROT_EXECUTE; if (phdr[i].p_flags & PF_W) prot |= VM_PROT_WRITE; if (phdr[i].p_flags & PF_R) prot |= VM_PROT_READ; if ((error = __elfN(load_section)(p, vmspace, nd->ni_vp, imgp->object, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize)) != 0) goto fail; /* * Establish the base address if this is the * first segment. */ if (numsegs == 0) base_addr = trunc_page(phdr[i].p_vaddr + rbase); numsegs++; } } *addr = base_addr; *entry = (unsigned long)hdr->e_entry + rbase; fail: if (imgp->firstpage) exec_unmap_first_page(imgp); if (imgp->object) vm_object_deallocate(imgp->object); if (nd->ni_vp) vrele(nd->ni_vp); free(tempdata, M_TEMP); return (error); } static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs = NULL; struct vmspace *vmspace; vm_prot_t prot; u_long text_size = 0, data_size = 0, total_size = 0; u_long text_addr = 0, data_addr = 0; u_long seg_size, seg_addr; u_long addr, entry = 0, proghdr = 0; int error, i; const char *interp = NULL; Elf_Brandinfo *brand_info; char *path; struct thread *td = curthread; struct sysentvec *sv; GIANT_REQUIRED; /* * Do we have a valid ELF header ? */ if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC) return (-1); /* * From here on down, we return an errno, not -1, as we've * detected an ELF file. */ if ((hdr->e_phoff > PAGE_SIZE) || (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { /* Only support headers in first page for now */ return (ENOEXEC); } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); /* * From this point on, we may have resources that need to be freed. */ VOP_UNLOCK(imgp->vp, 0, td); for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_INTERP: /* Path to interpreter */ if (phdr[i].p_filesz > MAXPATHLEN || phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { error = ENOEXEC; goto fail; } interp = imgp->image_header + phdr[i].p_offset; break; default: break; } } brand_info = __elfN(get_brandinfo)(hdr, interp); if (brand_info == NULL) { uprintf("ELF binary type \"%u\" not known.\n", hdr->e_ident[EI_OSABI]); error = ENOEXEC; goto fail; } sv = brand_info->sysvec; if (interp != NULL && brand_info->interp_newpath != NULL) interp = brand_info->interp_newpath; if ((error = exec_extract_strings(imgp)) != 0) goto fail; exec_new_vmspace(imgp, sv); vmspace = imgp->proc->p_vmspace; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: /* Loadable segment */ prot = 0; if (phdr[i].p_flags & PF_X) prot |= VM_PROT_EXECUTE; if (phdr[i].p_flags & PF_W) prot |= VM_PROT_WRITE; if (phdr[i].p_flags & PF_R) prot |= VM_PROT_READ; #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER) /* * Some x86 binaries assume read == executable, * notably the M3 runtime and therefore cvsup */ if (prot & VM_PROT_READ) prot |= VM_PROT_EXECUTE; #endif if ((error = __elfN(load_section)(imgp->proc, vmspace, imgp->vp, imgp->object, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr, phdr[i].p_memsz, phdr[i].p_filesz, prot, sv->sv_pagesize)) != 0) goto fail; seg_addr = trunc_page(phdr[i].p_vaddr); seg_size = round_page(phdr[i].p_memsz + phdr[i].p_vaddr - seg_addr); /* * Is this .text or .data? We can't use * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the * alpha terribly and possibly does other bad * things so we stick to the old way of figuring * it out: If the segment contains the program * entry point, it's a text segment, otherwise it * is a data segment. * * Note that obreak() assumes that data_addr + * data_size == end of data load area, and the ELF * file format expects segments to be sorted by * address. If multiple data segments exist, the * last one will be used. */ if (hdr->e_entry >= phdr[i].p_vaddr && hdr->e_entry < (phdr[i].p_vaddr + phdr[i].p_memsz)) { text_size = seg_size; text_addr = seg_addr; entry = (u_long)hdr->e_entry; } else { data_size = seg_size; data_addr = seg_addr; } total_size += seg_size; break; case PT_PHDR: /* Program header table info */ proghdr = phdr[i].p_vaddr; break; default: break; } } if (data_addr == 0 && data_size == 0) { data_addr = text_addr; data_size = text_size; } /* * Check limits. It should be safe to check the * limits after loading the segments since we do * not actually fault in all the segments pages. */ PROC_LOCK(imgp->proc); if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) || text_size > maxtsiz || total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) { PROC_UNLOCK(imgp->proc); error = ENOMEM; goto fail; } vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; /* * We load the dynamic linker where a userland call * to mmap(0, ...) would put it. The rationale behind this * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + lim_max(imgp->proc, RLIMIT_DATA)); PROC_UNLOCK(imgp->proc); imgp->entry_addr = entry; imgp->proc->p_sysent = sv; if (interp != NULL && brand_info->emul_path != NULL && brand_info->emul_path[0] != '\0') { path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, interp); error = __elfN(load_file)(imgp->proc, path, &addr, &imgp->entry_addr, sv->sv_pagesize); free(path, M_TEMP); if (error == 0) interp = NULL; } if (interp != NULL) { error = __elfN(load_file)(imgp->proc, interp, &addr, &imgp->entry_addr, sv->sv_pagesize); if (error != 0) { uprintf("ELF interpreter %s not found\n", interp); goto fail; } } /* * Construct auxargs table (used by the fixup routine) */ elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); elf_auxargs->execfd = -1; elf_auxargs->phdr = proghdr; elf_auxargs->phent = hdr->e_phentsize; elf_auxargs->phnum = hdr->e_phnum; elf_auxargs->pagesz = PAGE_SIZE; elf_auxargs->base = addr; elf_auxargs->flags = 0; elf_auxargs->entry = entry; elf_auxargs->trace = elf_trace; imgp->auxargs = elf_auxargs; imgp->interpreted = 0; fail: vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); return (error); } #define suword __CONCAT(suword, __ELF_WORD_SIZE) int __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) { Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; Elf_Addr *base; Elf_Addr *pos; base = (Elf_Addr *)*stack_base; pos = base + (imgp->argc + imgp->envc + 2); if (args->trace) { AUXARGS_ENTRY(pos, AT_DEBUG, 1); } if (args->execfd != -1) { AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); } AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; base--; suword(base, (long)imgp->argc); *stack_base = (register_t *)base; return (0); } /* * Code for generating ELF core dumps. */ typedef void (*segment_callback)(vm_map_entry_t, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; /* Closure for cb_size_segment(). */ struct sseg_closure { int count; /* Count of writable segments. */ size_t size; /* Total size of all writable segments. */ }; static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); -static void each_writable_segment(struct proc *, segment_callback, void *); +static void each_writable_segment(struct thread *, segment_callback, void *); static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, int, void *, size_t); -static void __elfN(puthdr)(struct proc *, void *, size_t *, int); +static void __elfN(puthdr)(struct thread *, void *, size_t *, int); static void __elfN(putnote)(void *, size_t *, const char *, int, const void *, size_t); extern int osreldate; int __elfN(coredump)(td, vp, limit) struct thread *td; - register struct vnode *vp; + struct vnode *vp; off_t limit; { - register struct proc *p = td->td_proc; - register struct ucred *cred = td->td_ucred; + struct ucred *cred = td->td_ucred; int error = 0; struct sseg_closure seginfo; void *hdr; size_t hdrsize; /* Size the program segments. */ seginfo.count = 0; seginfo.size = 0; - each_writable_segment(p, cb_size_segment, &seginfo); + each_writable_segment(td, cb_size_segment, &seginfo); /* * Calculate the size of the core file header area by making * a dry run of generating it. Nothing is written, but the * size is calculated. */ hdrsize = 0; - __elfN(puthdr)(p, (void *)NULL, &hdrsize, seginfo.count); + __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count); if (hdrsize + seginfo.size >= limit) return (EFAULT); /* * Allocate memory for building the header, fill it up, * and write it out. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); if (hdr == NULL) { return (EINVAL); } error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize); /* Write the contents of all of the writable segments. */ if (error == 0) { Elf_Phdr *php; off_t offset; int i; php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = hdrsize; for (i = 0; i < seginfo.count; i++) { error = vn_rdwr_inchunks(UIO_WRITE, vp, (caddr_t)(uintptr_t)php->p_vaddr, php->p_filesz, offset, UIO_USERSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, curthread); /* XXXKSE */ if (error != 0) break; offset += php->p_filesz; php++; } } free(hdr, M_TEMP); return (error); } /* * A callback for each_writable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(entry, closure) vm_map_entry_t entry; void *closure; { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = 0; if (entry->protection & VM_PROT_READ) phdr->p_flags |= PF_R; if (entry->protection & VM_PROT_WRITE) phdr->p_flags |= PF_W; if (entry->protection & VM_PROT_EXECUTE) phdr->p_flags |= PF_X; phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_writable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(entry, closure) vm_map_entry_t entry; void *closure; { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } /* * For each writable segment in the process's memory map, call the given * function with a pointer to the map entry and some arbitrary * caller-supplied data. */ static void -each_writable_segment(p, func, closure) - struct proc *p; +each_writable_segment(td, func, closure) + struct thread *td; segment_callback func; void *closure; { + struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; for (entry = map->header.next; entry != &map->header; entry = entry->next) { vm_object_t obj; /* * Don't dump inaccessible mappings, deal with legacy * coredump mode. * * Note that read-only segments related to the elf binary * are marked MAP_ENTRY_NOCOREDUMP now so we no longer * need to arbitrarily ignore such segments. */ if (elf_legacy_coredump) { if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) continue; } else { if ((entry->protection & VM_PROT_ALL) == 0) continue; } /* * Dont include memory segment in the coredump if * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in * madvise(2). Do not dump submaps (i.e. parts of the * kernel map). */ if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) continue; if ((obj = entry->object.vm_object) == NULL) continue; /* Find the deepest backing object. */ while (obj->backing_object != NULL) obj = obj->backing_object; /* Ignore memory-mapped devices and such things. */ if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP && obj->type != OBJT_VNODE) continue; (*func)(entry, closure); } } /* * Write the core file header to the file, including padding up to * the page boundary. */ static int __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize) struct thread *td; struct vnode *vp; struct ucred *cred; int numsegs; size_t hdrsize; void *hdr; { - struct proc *p = td->td_proc; size_t off; /* Fill in the header. */ bzero(hdr, hdrsize); off = 0; - __elfN(puthdr)(p, hdr, &off, numsegs); + __elfN(puthdr)(td, hdr, &off, numsegs); /* Write it to the core file. */ return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL, td)); /* XXXKSE */ } static void -__elfN(puthdr)(struct proc *p, void *dst, size_t *off, int numsegs) +__elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs) { struct { prstatus_t status; prfpregset_t fpregset; prpsinfo_t psinfo; } *tempdata; prstatus_t *status; prfpregset_t *fpregset; prpsinfo_t *psinfo; - struct thread *first, *thr; + struct proc *p; + struct thread *thr; size_t ehoff, noteoff, notesz, phoff; + p = td->td_proc; + ehoff = *off; *off += sizeof(Elf_Ehdr); phoff = *off; *off += (numsegs + 1) * sizeof(Elf_Phdr); noteoff = *off; /* * Don't allocate space for the notes if we're just calculating * the size of the header. We also don't collect the data. */ if (dst != NULL) { tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK); status = &tempdata->status; fpregset = &tempdata->fpregset; psinfo = &tempdata->psinfo; } else { tempdata = NULL; status = NULL; fpregset = NULL; psinfo = NULL; } if (dst != NULL) { psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(prpsinfo_t); strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); /* * XXX - We don't fill in the command line arguments properly * yet. */ strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); } __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, sizeof *psinfo); /* - * We want to start with the registers of the initial thread in the - * process so that the .reg and .reg2 pseudo-sections created by bfd - * will be identical to the .reg/$PID and .reg2/$PID pseudo-sections. - * This makes sure that any tool that only looks for .reg and .reg2 - * and not for .reg/$PID and .reg2/$PID will behave the same as - * before. The first thread is the thread with an ID equal to the - * process' ID. - * Note that the initial thread may already be gone. In that case - * 'first' is NULL. + * For backward compatibility, we dump the registers of the current + * thread (as passed to us in td) first and set pr_pid to the PID of + * the process. We then dump the other threads, but with pr_pid set + * to the TID of the thread itself. This has two advantages: + * 1) We preserve the meaning of pr_pid for as much as is possible. + * 2) The debugger will select the current thread as its initial + * "thread", which is likely what we want. */ - thr = first = TAILQ_FIRST(&p->p_threads); - while (first != NULL && first->td_tid > PID_MAX) - first = TAILQ_NEXT(first, td_plist); - if (first != NULL) - thr = first; - do { + thr = td; + while (thr != NULL) { if (dst != NULL) { status->pr_version = PRSTATUS_VERSION; status->pr_statussz = sizeof(prstatus_t); status->pr_gregsetsz = sizeof(gregset_t); status->pr_fpregsetsz = sizeof(fpregset_t); status->pr_osreldate = osreldate; status->pr_cursig = p->p_sig; - status->pr_pid = thr->td_tid; + status->pr_pid = (thr == td) ? p->p_pid : thr->td_tid; fill_regs(thr, &status->pr_reg); fill_fpregs(thr, fpregset); } __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status, sizeof *status); __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset, sizeof *fpregset); + /* XXX allow for MD specific notes. */ - thr = (thr == first) ? TAILQ_FIRST(&p->p_threads) : + + thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : TAILQ_NEXT(thr, td_plist); - if (thr == first && thr != NULL) + if (thr == td) thr = TAILQ_NEXT(thr, td_plist); - } while (thr != NULL); + } notesz = *off - noteoff; if (dst != NULL) free(tempdata, M_TEMP); /* Align up to a page boundary for the program segments. */ *off = round_page(*off); if (dst != NULL) { Elf_Ehdr *ehdr; Elf_Phdr *phdr; struct phdr_closure phc; /* * Fill in the ELF header. */ ehdr = (Elf_Ehdr *)((char *)dst + ehoff); ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; ehdr->e_machine = ELF_ARCH; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = phoff; ehdr->e_flags = 0; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_phnum = numsegs + 1; ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shnum = 0; ehdr->e_shstrndx = SHN_UNDEF; /* * Fill in the program header entries. */ phdr = (Elf_Phdr *)((char *)dst + phoff); /* The note segement. */ phdr->p_type = PT_NOTE; phdr->p_offset = noteoff; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = 0; phdr->p_align = 0; phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = *off; - each_writable_segment(p, cb_put_phdr, &phc); + each_writable_segment(td, cb_put_phdr, &phc); } } static void __elfN(putnote)(void *dst, size_t *off, const char *name, int type, const void *desc, size_t descsz) { Elf_Note note; note.n_namesz = strlen(name) + 1; note.n_descsz = descsz; note.n_type = type; if (dst != NULL) bcopy(¬e, (char *)dst + *off, sizeof note); *off += sizeof note; if (dst != NULL) bcopy(name, (char *)dst + *off, note.n_namesz); *off += roundup2(note.n_namesz, sizeof(Elf_Size)); if (dst != NULL) bcopy(desc, (char *)dst + *off, note.n_descsz); *off += roundup2(note.n_descsz, sizeof(Elf_Size)); } /* * Tell kern_execve.c about it, with a little help from the linker. */ static struct execsw __elfN(execsw) = { __CONCAT(exec_, __elfN(imgact)), __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); Index: head/sys/kern/kern_fork.c =================================================================== --- head/sys/kern/kern_fork.c (revision 131148) +++ head/sys/kern/kern_fork.c (revision 131149) @@ -1,854 +1,853 @@ /* * Copyright (c) 1982, 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef _SYS_SYSPROTO_H_ struct fork_args { int dummy; }; #endif static int forksleep; /* Place for fork1() to sleep on. */ /* * MPSAFE */ /* ARGSUSED */ int fork(td, uap) struct thread *td; struct fork_args *uap; { int error; struct proc *p2; error = fork1(td, RFFDG | RFPROC, 0, &p2); if (error == 0) { td->td_retval[0] = p2->p_pid; td->td_retval[1] = 0; } return (error); } /* * MPSAFE */ /* ARGSUSED */ int vfork(td, uap) struct thread *td; struct vfork_args *uap; { int error; struct proc *p2; error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2); if (error == 0) { td->td_retval[0] = p2->p_pid; td->td_retval[1] = 0; } return (error); } /* * MPSAFE */ int rfork(td, uap) struct thread *td; struct rfork_args *uap; { struct proc *p2; int error; /* Don't allow kernel-only flags. */ if ((uap->flags & RFKERNELONLY) != 0) return (EINVAL); error = fork1(td, uap->flags, 0, &p2); if (error == 0) { td->td_retval[0] = p2 ? p2->p_pid : 0; td->td_retval[1] = 0; } return (error); } int nprocs = 1; /* process 0 */ int lastpid = 0; SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, "Last used PID"); /* * Random component to lastpid generation. We mix in a random factor to make * it a little harder to predict. We sanity check the modulus value to avoid * doing it in critical paths. Don't let it be too small or we pointlessly * waste randomness entropy, and don't let it be impossibly large. Using a * modulus that is too big causes a LOT more process table scans and slows * down fork processing as the pidchecked caching is defeated. */ static int randompid = 0; static int sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) { int error, pid; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error != 0) return(error); sx_xlock(&allproc_lock); pid = randompid; error = sysctl_handle_int(oidp, &pid, 0, req); if (error == 0 && req->newptr != NULL) { if (pid < 0 || pid > PID_MAX - 100) /* out of range */ pid = PID_MAX - 100; else if (pid < 2) /* NOP */ pid = 0; else if (pid < 100) /* Make it reasonable */ pid = 100; randompid = pid; } sx_xunlock(&allproc_lock); return (error); } SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); int fork1(td, flags, pages, procp) struct thread *td; int flags; int pages; struct proc **procp; { struct proc *p1, *p2, *pptr; uid_t uid; struct proc *newproc; int ok, trypid; static int curfail, pidchecked = 0; static struct timeval lastfail; struct filedesc *fd; struct filedesc_to_leader *fdtol; struct thread *td2; struct kse *ke2; struct ksegrp *kg2; struct sigacts *newsigacts; int error; /* Can't copy and clear. */ if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) return (EINVAL); p1 = td->td_proc; /* * Here we don't create a new process, but we divorce * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { mtx_lock(&Giant); vm_forkproc(td, NULL, NULL, flags); mtx_unlock(&Giant); /* * Close all file descriptors. */ if (flags & RFCFDG) { struct filedesc *fdtmp; FILEDESC_LOCK(td->td_proc->p_fd); fdtmp = fdinit(td->td_proc->p_fd); FILEDESC_UNLOCK(td->td_proc->p_fd); fdfree(td); p1->p_fd = fdtmp; } /* * Unshare file descriptors (from parent). */ if (flags & RFFDG) { FILEDESC_LOCK(p1->p_fd); if (p1->p_fd->fd_refcnt > 1) { struct filedesc *newfd; newfd = fdcopy(td->td_proc->p_fd); FILEDESC_UNLOCK(p1->p_fd); fdfree(td); p1->p_fd = newfd; } else FILEDESC_UNLOCK(p1->p_fd); } *procp = NULL; return (0); } /* * Note 1:1 allows for forking with one thread coming out on the * other side with the expectation that the process is about to * exec. */ if (p1->p_flag & P_SA) { /* * Idle the other threads for a second. * Since the user space is copied, it must remain stable. * In addition, all threads (from the user perspective) * need to either be suspended or in the kernel, * where they will try restart in the parent and will * be aborted in the child. */ PROC_LOCK(p1); if (thread_single(SINGLE_NO_EXIT)) { /* Abort. Someone else is single threading before us. */ PROC_UNLOCK(p1); return (ERESTART); } PROC_UNLOCK(p1); /* * All other activity in this process * is now suspended at the user boundary, * (or other safe places if we think of any). */ } /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); #ifdef MAC mac_init_proc(newproc); #endif /* We have to lock the process tree while we look for a pid. */ sx_slock(&proctree_lock); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. Don't allow * a nonprivileged user to use the last ten processes; don't let root * exceed the limit. The variable nprocs is the current number of * processes, maxproc is the limit. */ sx_xlock(&allproc_lock); uid = td->td_ucred->cr_ruid; if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { error = EAGAIN; goto fail; } /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. */ PROC_LOCK(p1); ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, (uid != 0) ? lim_cur(p1, RLIMIT_NPROC) : 0); PROC_UNLOCK(p1); if (!ok) { error = EAGAIN; goto fail; } /* * Increment the nprocs resource before blocking can occur. There * are hard-limits as to the number of processes that can run. */ nprocs++; /* * Find an unused process ID. We remember a range of unused IDs * ready to use (from lastpid+1 through pidchecked-1). * * If RFHIGHPID is set (used during system boot), do not allocate * low-numbered pids. */ trypid = lastpid + 1; if (flags & RFHIGHPID) { if (trypid < 10) trypid = 10; } else { if (randompid) trypid += arc4random() % randompid; } retry: /* * If the process ID prototype has wrapped around, * restart somewhat above 0, as the low-numbered procs * tend to include daemons that don't exit. */ if (trypid >= PID_MAX) { trypid = trypid % PID_MAX; if (trypid < 100) trypid += 100; pidchecked = 0; } if (trypid >= pidchecked) { int doingzomb = 0; pidchecked = PID_MAX; /* * Scan the active and zombie procs to check whether this pid * is in use. Remember the lowest pid that's greater * than trypid, so we can avoid checking for a while. */ p2 = LIST_FIRST(&allproc); again: for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { PROC_LOCK(p2); while (p2->p_pid == trypid || (p2->p_pgrp != NULL && (p2->p_pgrp->pg_id == trypid || (p2->p_session != NULL && p2->p_session->s_sid == trypid)))) { trypid++; if (trypid >= pidchecked) { PROC_UNLOCK(p2); goto retry; } } if (p2->p_pid > trypid && pidchecked > p2->p_pid) pidchecked = p2->p_pid; if (p2->p_pgrp != NULL) { if (p2->p_pgrp->pg_id > trypid && pidchecked > p2->p_pgrp->pg_id) pidchecked = p2->p_pgrp->pg_id; if (p2->p_session != NULL && p2->p_session->s_sid > trypid && pidchecked > p2->p_session->s_sid) pidchecked = p2->p_session->s_sid; } PROC_UNLOCK(p2); } if (!doingzomb) { doingzomb = 1; p2 = LIST_FIRST(&zombproc); goto again; } } sx_sunlock(&proctree_lock); /* * RFHIGHPID does not mess with the lastpid counter during boot. */ if (flags & RFHIGHPID) pidchecked = 0; else lastpid = trypid; p2 = newproc; p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = trypid; LIST_INSERT_HEAD(&allproc, p2, p_list); LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); sx_xunlock(&allproc_lock); /* * Malloc things while we don't hold any locks. */ if (flags & RFSIGSHARE) newsigacts = NULL; else newsigacts = sigacts_alloc(); /* * Copy filedesc. */ if (flags & RFCFDG) { FILEDESC_LOCK(td->td_proc->p_fd); fd = fdinit(td->td_proc->p_fd); FILEDESC_UNLOCK(td->td_proc->p_fd); fdtol = NULL; } else if (flags & RFFDG) { FILEDESC_LOCK(p1->p_fd); fd = fdcopy(td->td_proc->p_fd); FILEDESC_UNLOCK(p1->p_fd); fdtol = NULL; } else { fd = fdshare(p1->p_fd); if (p1->p_fdtol == NULL) p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, p1->p_leader); if ((flags & RFTHREAD) != 0) { /* * Shared file descriptor table and * shared process leaders. */ fdtol = p1->p_fdtol; FILEDESC_LOCK(p1->p_fd); fdtol->fdl_refcount++; FILEDESC_UNLOCK(p1->p_fd); } else { /* * Shared file descriptor table, and * different process leaders */ fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p1->p_fd, p2); } } /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ td2 = FIRST_THREAD_IN_PROC(p2); kg2 = FIRST_KSEGRP_IN_PROC(p2); ke2 = FIRST_KSE_IN_KSEGRP(kg2); /* Allocate and switch to an alternate kstack if specified. */ if (pages != 0) vm_thread_new_altkstack(td2, pages); PROC_LOCK(p2); PROC_LOCK(p1); #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) bzero(&p2->p_startzero, (unsigned) RANGEOF(struct proc, p_startzero, p_endzero)); bzero(&ke2->ke_startzero, (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero)); bzero(&td2->td_startzero, (unsigned) RANGEOF(struct thread, td_startzero, td_endzero)); bzero(&kg2->kg_startzero, (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); bcopy(&p1->p_startcopy, &p2->p_startcopy, (unsigned) RANGEOF(struct proc, p_startcopy, p_endcopy)); bcopy(&td->td_startcopy, &td2->td_startcopy, (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy, (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); #undef RANGEOF - td2->td_tid = p2->p_pid; td2->td_sigstk = td->td_sigstk; /* Set up the thread as an active thread (as if runnable). */ ke2->ke_state = KES_THREAD; ke2->ke_thread = td2; td2->td_kse = ke2; /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats substruct is set in vm_forkproc. */ p2->p_flag = 0; if (p1->p_flag & P_PROFIL) startprofclock(p2); mtx_lock_spin(&sched_lock); p2->p_sflag = PS_INMEM; /* * Allow the scheduler to adjust the priority of the child and * parent while we hold the sched_lock. */ sched_fork(p1, p2); mtx_unlock_spin(&sched_lock); p2->p_ucred = crhold(td->td_ucred); td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */ pargs_hold(p2->p_args); if (flags & RFSIGSHARE) { p2->p_sigacts = sigacts_hold(p1->p_sigacts); } else { sigacts_copy(newsigacts, p1->p_sigacts); p2->p_sigacts = newsigacts; } if (flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; else p2->p_sigparent = SIGCHLD; p2->p_textvp = p1->p_textvp; p2->p_fd = fd; p2->p_fdtol = fdtol; /* * p_limit is copy-on-write. Bump its refcount. */ p2->p_limit = lim_hold(p1->p_limit); PROC_UNLOCK(p1); PROC_UNLOCK(p2); /* Bump references to the text vnode (for procfs) */ if (p2->p_textvp) vref(p2->p_textvp); /* * Set up linkage for kernel based threading. */ if ((flags & RFTHREAD) != 0) { mtx_lock(&ppeers_lock); p2->p_peers = p1->p_peers; p1->p_peers = p2; p2->p_leader = p1->p_leader; mtx_unlock(&ppeers_lock); PROC_LOCK(p1->p_leader); if ((p1->p_leader->p_flag & P_WEXIT) != 0) { PROC_UNLOCK(p1->p_leader); /* * The task leader is exiting, so process p1 is * going to be killed shortly. Since p1 obviously * isn't dead yet, we know that the leader is either * sending SIGKILL's to all the processes in this * task or is sleeping waiting for all the peers to * exit. We let p1 complete the fork, but we need * to go ahead and kill the new process p2 since * the task leader may not get a chance to send * SIGKILL to it. We leave it on the list so that * the task leader will wait for this new process * to commit suicide. */ PROC_LOCK(p2); psignal(p2, SIGKILL); PROC_UNLOCK(p2); } else PROC_UNLOCK(p1->p_leader); } else { p2->p_peers = NULL; p2->p_leader = p2; } sx_xlock(&proctree_lock); PGRP_LOCK(p1->p_pgrp); PROC_LOCK(p2); PROC_LOCK(p1); /* * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; td2->td_pflags |= td->td_pflags & TDP_ALTSTACK; SESS_LOCK(p1->p_session); if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; SESS_UNLOCK(p1->p_session); if (flags & RFPPWAIT) p2->p_flag |= P_PPWAIT; p2->p_pgrp = p1->p_pgrp; LIST_INSERT_AFTER(p1, p2, p_pglist); PGRP_UNLOCK(p1->p_pgrp); LIST_INIT(&p2->p_children); callout_init(&p2->p_itcallout, CALLOUT_MPSAFE); #ifdef KTRACE /* * Copy traceflag and tracefile if enabled. */ mtx_lock(&ktrace_mtx); KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode")); if (p1->p_traceflag & KTRFAC_INHERIT) { p2->p_traceflag = p1->p_traceflag; if ((p2->p_tracevp = p1->p_tracevp) != NULL) { VREF(p2->p_tracevp); KASSERT(p1->p_tracecred != NULL, ("ktrace vnode with no cred")); p2->p_tracecred = crhold(p1->p_tracecred); } } mtx_unlock(&ktrace_mtx); #endif /* * If PF_FORK is set, the child process inherits the * procfs ioctl flags from its parent. */ if (p1->p_pfsflags & PF_FORK) { p2->p_stops = p1->p_stops; p2->p_pfsflags = p1->p_pfsflags; } /* * This begins the section where we must prevent the parent * from being swapped. */ _PHOLD(p1); PROC_UNLOCK(p1); /* * Attach the new process to its parent. * * If RFNOWAIT is set, the newly created process becomes a child * of init. This effectively disassociates the child from the * parent. */ if (flags & RFNOWAIT) pptr = initproc; else pptr = p1; p2->p_pptr = pptr; LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); sx_xunlock(&proctree_lock); /* Inform accounting that we have forked. */ p2->p_acflag = AFORK; PROC_UNLOCK(p2); /* * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ mtx_lock(&Giant); vm_forkproc(td, p2, td2, flags); if (flags == (RFFDG | RFPROC)) { cnt.v_forks++; cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { cnt.v_vforks++; cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; } else if (p1 == &proc0) { cnt.v_kthreads++; cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; } else { cnt.v_rforks++; cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; } mtx_unlock(&Giant); /* * Both processes are set up, now check if any loadable modules want * to adjust anything. * What if they have an error? XXX */ EVENTHANDLER_INVOKE(process_fork, p1, p2, flags); /* * Set the child start time and mark the process as being complete. */ microuptime(&p2->p_stats->p_start); mtx_lock_spin(&sched_lock); p2->p_state = PRS_NORMAL; /* * If RFSTOPPED not requested, make child runnable and add to * run queue. */ if ((flags & RFSTOPPED) == 0) { TD_SET_CAN_RUN(td2); setrunqueue(td2); } mtx_unlock_spin(&sched_lock); /* * Now can be swapped. */ PROC_LOCK(p1); _PRELE(p1); /* * Tell any interested parties about the new process. */ KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); PROC_UNLOCK(p1); /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, set P_PPWAIT on child, and sleep on our * proc (in case of exit). */ PROC_LOCK(p2); while (p2->p_flag & P_PPWAIT) msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0); PROC_UNLOCK(p2); /* * If other threads are waiting, let them continue now. */ if (p1->p_flag & P_SA) { PROC_LOCK(p1); thread_single_end(); PROC_UNLOCK(p1); } /* * Return child proc pointer to parent. */ *procp = p2; return (0); fail: sx_sunlock(&proctree_lock); if (ppsratecheck(&lastfail, &curfail, 1)) printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n", uid); sx_xunlock(&allproc_lock); #ifdef MAC mac_destroy_proc(newproc); #endif uma_zfree(proc_zone, newproc); if (p1->p_flag & P_SA) { PROC_LOCK(p1); thread_single_end(); PROC_UNLOCK(p1); } tsleep(&forksleep, PUSER, "fork", hz / 2); return (error); } /* * Handle the return of a child process from fork1(). This function * is called from the MD fork_trampoline() entry point. */ void fork_exit(callout, arg, frame) void (*callout)(void *, struct trapframe *); void *arg; struct trapframe *frame; { struct proc *p; struct thread *td; /* * Processes normally resume in mi_switch() after being * cpu_switch()'ed to, but when children start up they arrive here * instead, so we must do much the same things as mi_switch() would. */ if ((td = PCPU_GET(deadthread))) { PCPU_SET(deadthread, NULL); thread_stash(td); } td = curthread; p = td->td_proc; td->td_oncpu = PCPU_GET(cpuid); KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); /* * Finish setting up thread glue so that it begins execution in a * non-nested critical section with sched_lock held but not recursed. */ sched_lock.mtx_lock = (uintptr_t)td; mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); cpu_critical_fork_exit(); CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid, p->p_comm); mtx_unlock_spin(&sched_lock); /* * cpu_set_fork_handler intercepts this function call to * have this call a non-return function to stay in kernel mode. * initproc has its own fork handler, but it does return. */ KASSERT(callout != NULL, ("NULL callout in fork_exit")); callout(arg, frame); /* * Check if a kernel thread misbehaved and returned from its main * function. */ PROC_LOCK(p); if (p->p_flag & P_KTHREAD) { PROC_UNLOCK(p); printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", p->p_comm, p->p_pid); kthread_exit(0); } PROC_UNLOCK(p); #ifdef DIAGNOSTIC cred_free_thread(td); #endif mtx_assert(&Giant, MA_NOTOWNED); } /* * Simplified back end of syscall(), used when returning from fork() * directly into user mode. Giant is not held on entry, and must not * be held on return. This function is passed in to fork_exit() as the * first parameter and is called when returning to a new userland process. */ void fork_return(td, frame) struct thread *td; struct trapframe *frame; { userret(td, frame, 0); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) ktrsysret(SYS_fork, 0, 0); #endif mtx_assert(&Giant, MA_NOTOWNED); } Index: head/sys/kern/kern_kse.c =================================================================== --- head/sys/kern/kern_kse.c (revision 131148) +++ head/sys/kern/kern_kse.c (revision 131149) @@ -1,1270 +1,1268 @@ /* * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * KSEGRP related storage. */ static uma_zone_t upcall_zone; /* DEBUG ONLY */ extern int virtual_cpu; extern int thread_debug; extern int max_threads_per_proc; extern int max_groups_per_proc; extern int max_threads_hits; extern struct mtx kse_zombie_lock; #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) TAILQ_HEAD(, kse_upcall) zombie_upcalls = TAILQ_HEAD_INITIALIZER(zombie_upcalls); static int thread_update_usr_ticks(struct thread *td, int user); static void thread_alloc_spare(struct thread *td, struct thread *spare); /* move to proc.h */ extern void kse_purge(struct proc *p, struct thread *td); extern void kse_purge_group(struct thread *td); void kseinit(void); void kse_GC(void); struct kse_upcall * upcall_alloc(void) { struct kse_upcall *ku; ku = uma_zalloc(upcall_zone, M_WAITOK); bzero(ku, sizeof(*ku)); return (ku); } void upcall_free(struct kse_upcall *ku) { uma_zfree(upcall_zone, ku); } void upcall_link(struct kse_upcall *ku, struct ksegrp *kg) { mtx_assert(&sched_lock, MA_OWNED); TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); ku->ku_ksegrp = kg; kg->kg_numupcalls++; } void upcall_unlink(struct kse_upcall *ku) { struct ksegrp *kg = ku->ku_ksegrp; mtx_assert(&sched_lock, MA_OWNED); KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); kg->kg_numupcalls--; upcall_stash(ku); } void upcall_remove(struct thread *td) { if (td->td_upcall) { td->td_upcall->ku_owner = NULL; upcall_unlink(td->td_upcall); td->td_upcall = 0; } } #ifndef _SYS_SYSPROTO_H_ struct kse_switchin_args { const struct __mcontext *mcp; long val; long *loc; }; #endif int kse_switchin(struct thread *td, struct kse_switchin_args *uap) { mcontext_t mc; int error; error = (uap->mcp == NULL) ? EINVAL : 0; if (!error) error = copyin(uap->mcp, &mc, sizeof(mc)); if (!error && uap->loc != NULL) error = (suword(uap->loc, uap->val) != 0) ? EINVAL : 0; if (!error) error = set_mcontext(td, &mc); return ((error == 0) ? EJUSTRETURN : error); } /* struct kse_thr_interrupt_args { struct kse_thr_mailbox * tmbx; int cmd; long data; }; */ int kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) { struct proc *p; struct thread *td2; p = td->td_proc; if (!(p->p_flag & P_SA)) return (EINVAL); switch (uap->cmd) { case KSE_INTR_SENDSIG: if (uap->data < 0 || uap->data > _SIG_MAXSIG) return (EINVAL); case KSE_INTR_INTERRUPT: case KSE_INTR_RESTART: PROC_LOCK(p); mtx_lock_spin(&sched_lock); FOREACH_THREAD_IN_PROC(p, td2) { if (td2->td_mailbox == uap->tmbx) break; } if (td2 == NULL) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (ESRCH); } if (uap->cmd == KSE_INTR_SENDSIG) { if (uap->data > 0) { td2->td_flags &= ~TDF_INTERRUPT; mtx_unlock_spin(&sched_lock); tdsignal(td2, (int)uap->data, SIGTARGET_TD); } else { mtx_unlock_spin(&sched_lock); } } else { td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING; if (TD_CAN_UNBIND(td2)) td2->td_upcall->ku_flags |= KUF_DOUPCALL; if (uap->cmd == KSE_INTR_INTERRUPT) td2->td_intrval = EINTR; else td2->td_intrval = ERESTART; if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) sleepq_abort(td2); mtx_unlock_spin(&sched_lock); } PROC_UNLOCK(p); break; case KSE_INTR_SIGEXIT: if (uap->data < 1 || uap->data > _SIG_MAXSIG) return (EINVAL); PROC_LOCK(p); sigexit(td, (int)uap->data); break; default: return (EINVAL); } return (0); } /* struct kse_exit_args { register_t dummy; }; */ int kse_exit(struct thread *td, struct kse_exit_args *uap) { struct proc *p; struct ksegrp *kg; struct kse *ke; struct kse_upcall *ku, *ku2; int error, count; p = td->td_proc; /* * Ensure that this is only called from the UTS */ if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) return (EINVAL); kg = td->td_ksegrp; count = 0; /* * Calculate the existing non-exiting upcalls in this ksegroup. * If we are the last upcall but there are still other threads, * then do not exit. We need the other threads to be able to * complete whatever they are doing. * XXX This relies on the userland knowing what to do if we return. * It may be a better choice to convert ourselves into a kse_release * ( or similar) and wait in the kernel to be needed. */ PROC_LOCK(p); mtx_lock_spin(&sched_lock); FOREACH_UPCALL_IN_GROUP(kg, ku2) { if (ku2->ku_flags & KUF_EXITING) count++; } if ((kg->kg_numupcalls - count) == 1 && (kg->kg_numthreads > 1)) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (EDEADLK); } ku->ku_flags |= KUF_EXITING; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); /* * Mark the UTS mailbox as having been finished with. * If that fails then just go for a segfault. * XXX need to check it that can be deliverred without a mailbox. */ error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); PROC_LOCK(p); if (error) psignal(p, SIGSEGV); mtx_lock_spin(&sched_lock); upcall_remove(td); ke = td->td_kse; if (p->p_numthreads == 1) { kse_purge(p, td); p->p_flag &= ~P_SA; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { if (kg->kg_numthreads == 1) { /* Shutdown a group */ kse_purge_group(td); ke->ke_flags |= KEF_EXIT; } thread_stopped(p); thread_exit(); /* NOTREACHED */ } return (0); } /* * Either becomes an upcall or waits for an awakening event and * then becomes an upcall. Only error cases return. */ /* struct kse_release_args { struct timespec *timeout; }; */ int kse_release(struct thread *td, struct kse_release_args *uap) { struct proc *p; struct ksegrp *kg; struct kse_upcall *ku; struct timespec timeout; struct timeval tv; sigset_t sigset; int error; p = td->td_proc; kg = td->td_ksegrp; if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) return (EINVAL); if (uap->timeout != NULL) { if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) return (error); TIMESPEC_TO_TIMEVAL(&tv, &timeout); } if (td->td_pflags & TDP_SA) td->td_pflags |= TDP_UPCALLING; else { ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags); if (ku->ku_mflags == -1) { PROC_LOCK(p); sigexit(td, SIGSEGV); } } PROC_LOCK(p); if (ku->ku_mflags & KMF_WAITSIGEVENT) { /* UTS wants to wait for signal event */ if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL)) { td->td_kflags |= TDK_KSERELSIG; error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH, "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0)); td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP); } p->p_flag &= ~P_SIGEVENT; sigset = p->p_siglist; PROC_UNLOCK(p); error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught, sizeof(sigset)); } else { if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) { kg->kg_upsleeps++; td->td_kflags |= TDK_KSEREL; error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "kserel", (uap->timeout ? tvtohz(&tv) : 0)); td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP); kg->kg_upsleeps--; } PROC_UNLOCK(p); } if (ku->ku_flags & KUF_DOUPCALL) { mtx_lock_spin(&sched_lock); ku->ku_flags &= ~KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } return (0); } /* struct kse_wakeup_args { struct kse_mailbox *mbx; }; */ int kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) { struct proc *p; struct ksegrp *kg; struct kse_upcall *ku; struct thread *td2; p = td->td_proc; td2 = NULL; ku = NULL; /* KSE-enabled processes only, please. */ if (!(p->p_flag & P_SA)) return (EINVAL); PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (uap->mbx) { FOREACH_KSEGRP_IN_PROC(p, kg) { FOREACH_UPCALL_IN_GROUP(kg, ku) { if (ku->ku_mailbox == uap->mbx) break; } if (ku) break; } } else { kg = td->td_ksegrp; if (kg->kg_upsleeps) { mtx_unlock_spin(&sched_lock); wakeup_one(&kg->kg_completed); PROC_UNLOCK(p); return (0); } ku = TAILQ_FIRST(&kg->kg_upcalls); } if (ku == NULL) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return (ESRCH); } if ((td2 = ku->ku_owner) == NULL) { mtx_unlock_spin(&sched_lock); panic("%s: no owner", __func__); } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) { mtx_unlock_spin(&sched_lock); if (!(td2->td_kflags & TDK_WAKEUP)) { td2->td_kflags |= TDK_WAKEUP; if (td2->td_kflags & TDK_KSEREL) sleepq_remove(td2, &kg->kg_completed); else sleepq_remove(td2, &p->p_siglist); } } else { ku->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } PROC_UNLOCK(p); return (0); } /* * No new KSEG: first call: use current KSE, don't schedule an upcall * All other situations, do allocate max new KSEs and schedule an upcall. */ /* struct kse_create_args { struct kse_mailbox *mbx; int newgroup; }; */ int kse_create(struct thread *td, struct kse_create_args *uap) { struct kse *newke; struct ksegrp *newkg; struct ksegrp *kg; struct proc *p; struct kse_mailbox mbx; struct kse_upcall *newku; int err, ncpus, sa = 0, first = 0; struct thread *newtd; p = td->td_proc; if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) return (err); ncpus = mp_ncpus; if (virtual_cpu != 0) ncpus = virtual_cpu; if (!(mbx.km_flags & KMF_BOUND)) sa = TDP_SA; else ncpus = 1; PROC_LOCK(p); if (!(p->p_flag & P_SA)) { first = 1; p->p_flag |= P_SA; } PROC_UNLOCK(p); if (!sa && !uap->newgroup && !first) return (EINVAL); kg = td->td_ksegrp; if (uap->newgroup) { /* Have race condition but it is cheap */ if (p->p_numksegrps >= max_groups_per_proc) return (EPROCLIM); /* * If we want a new KSEGRP it doesn't matter whether * we have already fired up KSE mode before or not. * We put the process in KSE mode and create a new KSEGRP. */ newkg = ksegrp_alloc(); bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (p->p_numksegrps >= max_groups_per_proc) { mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); ksegrp_free(newkg); return (EPROCLIM); } ksegrp_link(newkg, p); sched_fork_ksegrp(kg, newkg); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } else { if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0) return (EINVAL); newkg = kg; } /* * Creating upcalls more than number of physical cpu does * not help performance. */ if (newkg->kg_numupcalls >= ncpus) return (EPROCLIM); if (newkg->kg_numupcalls == 0) { /* * Initialize KSE group * * For multiplxed group, create KSEs as many as physical * cpus. This increases concurrent even if userland * is not MP safe and can only run on single CPU. * In ideal world, every physical cpu should execute a thread. * If there is enough KSEs, threads in kernel can be * executed parallel on different cpus with full speed, * Concurrent in kernel shouldn't be restricted by number of * upcalls userland provides. Adding more upcall structures * only increases concurrent in userland. * * For bound thread group, because there is only thread in the * group, we only create one KSE for the group. Thread in this * kind of group will never schedule an upcall when blocked, * this intends to simulate pthread system scope thread. */ while (newkg->kg_kses < ncpus) { newke = kse_alloc(); bzero(&newke->ke_startzero, RANGEOF(struct kse, ke_startzero, ke_endzero)); #if 0 mtx_lock_spin(&sched_lock); bcopy(&ke->ke_startcopy, &newke->ke_startcopy, RANGEOF(struct kse, ke_startcopy, ke_endcopy)); mtx_unlock_spin(&sched_lock); #endif mtx_lock_spin(&sched_lock); kse_link(newke, newkg); sched_fork_kse(td->td_kse, newke); /* Add engine */ kse_reassign(newke); mtx_unlock_spin(&sched_lock); } } newku = upcall_alloc(); newku->ku_mailbox = uap->mbx; newku->ku_func = mbx.km_func; bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); /* For the first call this may not have been set */ if (td->td_standin == NULL) thread_alloc_spare(td, NULL); PROC_LOCK(p); if (newkg->kg_numupcalls >= ncpus) { PROC_UNLOCK(p); upcall_free(newku); return (EPROCLIM); } if (first && sa) { SIGSETOR(p->p_siglist, td->td_siglist); SIGEMPTYSET(td->td_siglist); SIGFILLSET(td->td_sigmask); SIG_CANTMASK(td->td_sigmask); } mtx_lock_spin(&sched_lock); PROC_UNLOCK(p); upcall_link(newku, newkg); if (mbx.km_quantum) newkg->kg_upquantum = max(1, mbx.km_quantum/tick); /* * Each upcall structure has an owner thread, find which * one owns it. */ if (uap->newgroup) { /* * Because new ksegrp hasn't thread, * create an initial upcall thread to own it. */ newtd = thread_schedule_upcall(td, newku); } else { /* * If current thread hasn't an upcall structure, * just assign the upcall to it. */ if (td->td_upcall == NULL) { newku->ku_owner = td; td->td_upcall = newku; newtd = td; } else { /* * Create a new upcall thread to own it. */ newtd = thread_schedule_upcall(td, newku); } } if (!sa) { newtd->td_mailbox = mbx.km_curthread; newtd->td_pflags &= ~TDP_SA; if (newtd != td) { mtx_unlock_spin(&sched_lock); cpu_set_upcall_kse(newtd, newku); mtx_lock_spin(&sched_lock); } } else { newtd->td_pflags |= TDP_SA; } if (newtd != td) setrunqueue(newtd); mtx_unlock_spin(&sched_lock); return (0); } /* * Initialize global thread allocation resources. */ void kseinit(void) { upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); } /* * Stash an embarasingly extra upcall into the zombie upcall queue. */ void upcall_stash(struct kse_upcall *ku) { mtx_lock_spin(&kse_zombie_lock); TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); mtx_unlock_spin(&kse_zombie_lock); } /* * Reap zombie kse resource. */ void kse_GC(void) { struct kse_upcall *ku_first, *ku_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant.. */ if (!TAILQ_EMPTY(&zombie_upcalls)) { mtx_lock_spin(&kse_zombie_lock); ku_first = TAILQ_FIRST(&zombie_upcalls); if (ku_first) TAILQ_INIT(&zombie_upcalls); mtx_unlock_spin(&kse_zombie_lock); while (ku_first) { ku_next = TAILQ_NEXT(ku_first, ku_link); upcall_free(ku_first); ku_first = ku_next; } } } /* * Store the thread context in the UTS's mailbox. * then add the mailbox at the head of a list we are building in user space. * The list is anchored in the ksegrp structure. */ int thread_export_context(struct thread *td, int willexit) { struct proc *p; struct ksegrp *kg; uintptr_t mbx; void *addr; int error = 0, temp, sig; mcontext_t mc; p = td->td_proc; kg = td->td_ksegrp; /* Export the user/machine context. */ get_mcontext(td, &mc, 0); addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); error = copyout(&mc, addr, sizeof(mcontext_t)); if (error) goto bad; /* Exports clock ticks in kernel mode */ addr = (caddr_t)(&td->td_mailbox->tm_sticks); temp = fuword32(addr) + td->td_usticks; if (suword32(addr, temp)) { error = EFAULT; goto bad; } /* * Post sync signal, or process SIGKILL and SIGSTOP. * For sync signal, it is only possible when the signal is not * caught by userland or process is being debugged. */ PROC_LOCK(p); if (td->td_flags & TDF_NEEDSIGCHK) { mtx_lock_spin(&sched_lock); td->td_flags &= ~TDF_NEEDSIGCHK; mtx_unlock_spin(&sched_lock); mtx_lock(&p->p_sigacts->ps_mtx); while ((sig = cursig(td)) != 0) postsig(sig); mtx_unlock(&p->p_sigacts->ps_mtx); } if (willexit) SIGFILLSET(td->td_sigmask); PROC_UNLOCK(p); /* Get address in latest mbox of list pointer */ addr = (void *)(&td->td_mailbox->tm_next); /* * Put the saved address of the previous first * entry into this one */ for (;;) { mbx = (uintptr_t)kg->kg_completed; if (suword(addr, mbx)) { error = EFAULT; goto bad; } PROC_LOCK(p); if (mbx == (uintptr_t)kg->kg_completed) { kg->kg_completed = td->td_mailbox; /* * The thread context may be taken away by * other upcall threads when we unlock * process lock. it's no longer valid to * use it again in any other places. */ td->td_mailbox = NULL; PROC_UNLOCK(p); break; } PROC_UNLOCK(p); } td->td_usticks = 0; return (0); bad: PROC_LOCK(p); sigexit(td, SIGILL); return (error); } /* * Take the list of completed mailboxes for this KSEGRP and put them on this * upcall's mailbox as it's the next one going up. */ static int thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) { struct proc *p = kg->kg_proc; void *addr; uintptr_t mbx; addr = (void *)(&ku->ku_mailbox->km_completed); for (;;) { mbx = (uintptr_t)kg->kg_completed; if (suword(addr, mbx)) { PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); return (EFAULT); } PROC_LOCK(p); if (mbx == (uintptr_t)kg->kg_completed) { kg->kg_completed = NULL; PROC_UNLOCK(p); break; } PROC_UNLOCK(p); } return (0); } /* * This function should be called at statclock interrupt time */ int thread_statclock(int user) { struct thread *td = curthread; struct ksegrp *kg = td->td_ksegrp; if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA)) return (0); if (user) { /* Current always do via ast() */ mtx_lock_spin(&sched_lock); td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); mtx_unlock_spin(&sched_lock); td->td_uuticks++; } else { if (td->td_mailbox != NULL) td->td_usticks++; else { /* XXXKSE * We will call thread_user_enter() for every * kernel entry in future, so if the thread mailbox * is NULL, it must be a UTS kernel, don't account * clock ticks for it. */ } } return (0); } /* * Export state clock ticks for userland */ static int thread_update_usr_ticks(struct thread *td, int user) { struct proc *p = td->td_proc; struct kse_thr_mailbox *tmbx; struct kse_upcall *ku; struct ksegrp *kg; caddr_t addr; u_int uticks; if ((ku = td->td_upcall) == NULL) return (-1); tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); if ((tmbx == NULL) || (tmbx == (void *)-1)) return (-1); if (user) { uticks = td->td_uuticks; td->td_uuticks = 0; addr = (caddr_t)&tmbx->tm_uticks; } else { uticks = td->td_usticks; td->td_usticks = 0; addr = (caddr_t)&tmbx->tm_sticks; } if (uticks) { if (suword32(addr, uticks+fuword32(addr))) { PROC_LOCK(p); psignal(p, SIGSEGV); PROC_UNLOCK(p); return (-2); } } kg = td->td_ksegrp; if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { mtx_lock_spin(&sched_lock); td->td_upcall->ku_flags |= KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } return (0); } /* * This function is intended to be used to initialize a spare thread * for upcall. Initialize thread's large data area outside sched_lock * for thread_schedule_upcall(). */ void thread_alloc_spare(struct thread *td, struct thread *spare) { if (td->td_standin) return; - if (spare == NULL) { + if (spare == NULL) spare = thread_alloc(); - spare->td_tid = thread_new_tid(); - } td->td_standin = spare; bzero(&spare->td_startzero, (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); spare->td_proc = td->td_proc; spare->td_ucred = crhold(td->td_ucred); } /* * Create a thread and schedule it for upcall on the KSE given. * Use our thread's standin so that we don't have to allocate one. */ struct thread * thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) { struct thread *td2; mtx_assert(&sched_lock, MA_OWNED); /* * Schedule an upcall thread on specified kse_upcall, * the kse_upcall must be free. * td must have a spare thread. */ KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); if ((td2 = td->td_standin) != NULL) { td->td_standin = NULL; } else { panic("no reserve thread when scheduling an upcall"); return (NULL); } CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", td2, td->td_proc->p_pid, td->td_proc->p_comm); bcopy(&td->td_startcopy, &td2->td_startcopy, (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); thread_link(td2, ku->ku_ksegrp); /* inherit parts of blocked thread's context as a good template */ cpu_set_upcall(td2, td); /* Let the new thread become owner of the upcall */ ku->ku_owner = td2; td2->td_upcall = ku; td2->td_flags = 0; td2->td_pflags = TDP_SA|TDP_UPCALLING; td2->td_kse = NULL; td2->td_state = TDS_CAN_RUN; td2->td_inhibitors = 0; SIGFILLSET(td2->td_sigmask); SIG_CANTMASK(td2->td_sigmask); sched_fork_thread(td, td2); return (td2); /* bogus.. should be a void function */ } /* * It is only used when thread generated a trap and process is being * debugged. */ void thread_signal_add(struct thread *td, int sig) { struct proc *p; siginfo_t siginfo; struct sigacts *ps; int error; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); ps = p->p_sigacts; mtx_assert(&ps->ps_mtx, MA_OWNED); cpu_thread_siginfo(sig, 0, &siginfo); mtx_unlock(&ps->ps_mtx); PROC_UNLOCK(p); error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo)); if (error) { PROC_LOCK(p); sigexit(td, SIGILL); } PROC_LOCK(p); SIGADDSET(td->td_sigmask, sig); mtx_lock(&ps->ps_mtx); } void thread_switchout(struct thread *td) { struct kse_upcall *ku; struct thread *td2; mtx_assert(&sched_lock, MA_OWNED); /* * If the outgoing thread is in threaded group and has never * scheduled an upcall, decide whether this is a short * or long term event and thus whether or not to schedule * an upcall. * If it is a short term event, just suspend it in * a way that takes its KSE with it. * Select the events for which we want to schedule upcalls. * For now it's just sleep. * XXXKSE eventually almost any inhibition could do. */ if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { /* * Release ownership of upcall, and schedule an upcall * thread, this new upcall thread becomes the owner of * the upcall structure. */ ku = td->td_upcall; ku->ku_owner = NULL; td->td_upcall = NULL; td->td_flags &= ~TDF_CAN_UNBIND; td2 = thread_schedule_upcall(td, ku); setrunqueue(td2); } } /* * Setup done on the thread when it enters the kernel. * XXXKSE Presently only for syscalls but eventually all kernel entries. */ void thread_user_enter(struct proc *p, struct thread *td) { struct ksegrp *kg; struct kse_upcall *ku; struct kse_thr_mailbox *tmbx; uint32_t tflags; kg = td->td_ksegrp; /* * First check that we shouldn't just abort. * But check if we are the single thread first! */ if (p->p_flag & P_SINGLE_EXIT) { PROC_LOCK(p); mtx_lock_spin(&sched_lock); thread_stopped(p); thread_exit(); /* NOTREACHED */ } /* * If we are doing a syscall in a KSE environment, * note where our mailbox is. There is always the * possibility that we could do this lazily (in kse_reassign()), * but for now do it every time. */ kg = td->td_ksegrp; if (td->td_pflags & TDP_SA) { ku = td->td_upcall; KASSERT(ku, ("%s: no upcall owned", __func__)); KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags); tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); if ((tmbx == NULL) || (tmbx == (void *)-1L) || (ku->ku_mflags & KMF_NOUPCALL)) { td->td_mailbox = NULL; } else { if (td->td_standin == NULL) thread_alloc_spare(td, NULL); tflags = fuword32(&tmbx->tm_flags); /* * On some architectures, TP register points to thread * mailbox but not points to kse mailbox, and userland * can not atomically clear km_curthread, but can * use TP register, and set TMF_NOUPCALL in thread * flag to indicate a critical region. */ if (tflags & TMF_NOUPCALL) { td->td_mailbox = NULL; } else { td->td_mailbox = tmbx; mtx_lock_spin(&sched_lock); td->td_flags |= TDF_CAN_UNBIND; mtx_unlock_spin(&sched_lock); } } } } /* * The extra work we go through if we are a threaded process when we * return to userland. * * If we are a KSE process and returning to user mode, check for * extra work to do before we return (e.g. for more syscalls * to complete first). If we were in a critical section, we should * just return to let it finish. Same if we were in the UTS (in * which case the mailbox's context's busy indicator will be set). * The only traps we suport will have set the mailbox. * We will clear it here. */ int thread_userret(struct thread *td, struct trapframe *frame) { int error = 0, upcalls, uts_crit; struct kse_upcall *ku; struct ksegrp *kg, *kg2; struct proc *p; struct timespec ts; p = td->td_proc; kg = td->td_ksegrp; ku = td->td_upcall; /* Nothing to do with bound thread */ if (!(td->td_pflags & TDP_SA)) return (0); /* * Stat clock interrupt hit in userland, it * is returning from interrupt, charge thread's * userland time for UTS. */ if (td->td_flags & TDF_USTATCLOCK) { thread_update_usr_ticks(td, 1); mtx_lock_spin(&sched_lock); td->td_flags &= ~TDF_USTATCLOCK; mtx_unlock_spin(&sched_lock); if (kg->kg_completed || (td->td_upcall->ku_flags & KUF_DOUPCALL)) thread_user_enter(p, td); } uts_crit = (td->td_mailbox == NULL); /* * Optimisation: * This thread has not started any upcall. * If there is no work to report other than ourself, * then it can return direct to userland. */ if (TD_CAN_UNBIND(td)) { mtx_lock_spin(&sched_lock); td->td_flags &= ~TDF_CAN_UNBIND; if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && (kg->kg_completed == NULL) && (ku->ku_flags & KUF_DOUPCALL) == 0 && (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { mtx_unlock_spin(&sched_lock); thread_update_usr_ticks(td, 0); nanotime(&ts); error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, sizeof(ts)); td->td_mailbox = 0; ku->ku_mflags = 0; if (error) goto out; return (0); } mtx_unlock_spin(&sched_lock); thread_export_context(td, 0); /* * There is something to report, and we own an upcall * strucuture, we can go to userland. * Turn ourself into an upcall thread. */ td->td_pflags |= TDP_UPCALLING; } else if (td->td_mailbox && (ku == NULL)) { thread_export_context(td, 1); PROC_LOCK(p); /* * There are upcall threads waiting for * work to do, wake one of them up. * XXXKSE Maybe wake all of them up. */ if (kg->kg_upsleeps) wakeup_one(&kg->kg_completed); mtx_lock_spin(&sched_lock); thread_stopped(p); thread_exit(); /* NOTREACHED */ } KASSERT(ku != NULL, ("upcall is NULL\n")); KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); if (p->p_numthreads > max_threads_per_proc) { max_threads_hits++; PROC_LOCK(p); mtx_lock_spin(&sched_lock); p->p_maxthrwaits++; while (p->p_numthreads > max_threads_per_proc) { upcalls = 0; FOREACH_KSEGRP_IN_PROC(p, kg2) { if (kg2->kg_numupcalls == 0) upcalls++; else upcalls += kg2->kg_numupcalls; } if (upcalls >= max_threads_per_proc) break; mtx_unlock_spin(&sched_lock); if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, "maxthreads", 0)) { mtx_lock_spin(&sched_lock); break; } else { mtx_lock_spin(&sched_lock); } } p->p_maxthrwaits--; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } if (td->td_pflags & TDP_UPCALLING) { uts_crit = 0; kg->kg_nextupcall = ticks+kg->kg_upquantum; /* * There is no more work to do and we are going to ride * this thread up to userland as an upcall. * Do the last parts of the setup needed for the upcall. */ CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", td, td->td_proc->p_pid, td->td_proc->p_comm); td->td_pflags &= ~TDP_UPCALLING; if (ku->ku_flags & KUF_DOUPCALL) { mtx_lock_spin(&sched_lock); ku->ku_flags &= ~KUF_DOUPCALL; mtx_unlock_spin(&sched_lock); } /* * Set user context to the UTS */ if (!(ku->ku_mflags & KMF_NOUPCALL)) { cpu_set_upcall_kse(td, ku); error = suword(&ku->ku_mailbox->km_curthread, 0); if (error) goto out; } /* * Unhook the list of completed threads. * anything that completes after this gets to * come in next time. * Put the list of completed thread mailboxes on * this KSE's mailbox. */ if (!(ku->ku_mflags & KMF_NOCOMPLETED) && (error = thread_link_mboxes(kg, ku)) != 0) goto out; } if (!uts_crit) { nanotime(&ts); error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); } out: if (error) { /* * Things are going to be so screwed we should just kill * the process. * how do we do that? */ PROC_LOCK(td->td_proc); psignal(td->td_proc, SIGSEGV); PROC_UNLOCK(td->td_proc); } else { /* * Optimisation: * Ensure that we have a spare thread available, * for when we re-enter the kernel. */ if (td->td_standin == NULL) thread_alloc_spare(td, NULL); } ku->ku_mflags = 0; /* * Clear thread mailbox first, then clear system tick count. * The order is important because thread_statclock() use * mailbox pointer to see if it is an userland thread or * an UTS kernel thread. */ td->td_mailbox = NULL; td->td_usticks = 0; return (error); /* go sync */ } int thread_upcall_check(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); if (td->td_kflags & TDK_WAKEUP) return (1); else return (0); } Index: head/sys/kern/kern_thr.c =================================================================== --- head/sys/kern/kern_thr.c (revision 131148) +++ head/sys/kern/kern_thr.c (revision 131149) @@ -1,315 +1,314 @@ /* * Copyright (c) 2003, Jeffrey Roberson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Back end support functions. */ void thr_exit1(void) { struct ksegrp *kg; struct thread *td; struct kse *ke; struct proc *p; td = curthread; p = td->td_proc; kg = td->td_ksegrp; ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); /* * Shutting down last thread in the proc. This will actually * call exit() in the trampoline when it returns. */ if (p->p_numthreads == 1) { PROC_UNLOCK(p); return; } /* * XXX Undelivered process wide signals should be reposted to the * proc. */ /* Clean up cpu resources. */ cpu_thread_exit(td); /* Unlink the thread from the process and kseg. */ thread_unlink(td); ke->ke_state = KES_UNQUEUED; ke->ke_thread = NULL; kse_unlink(ke); sched_exit_kse(TAILQ_NEXT(ke, ke_kglist), ke); /* * If we were stopped while waiting for all threads to exit and this * is the last thread wakeup the exiting thread. */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) if (p->p_numthreads == 1) thread_unsuspend_one(p->p_singlethread); PROC_UNLOCK(p); td->td_kse = NULL; td->td_state = TDS_INACTIVE; #if 0 td->td_proc = NULL; #endif td->td_ksegrp = NULL; td->td_last_kse = NULL; sched_exit_thread(TAILQ_NEXT(td, td_kglist), td); thread_stash(td); cpu_throw(td, choosethread()); } #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) /* * System call interface. */ int thr_create(struct thread *td, struct thr_create_args *uap) /* ucontext_t *ctx, thr_id_t *id, int flags */ { struct kse *ke0; struct thread *td0; ucontext_t ctx; int error; if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) return (error); /* Initialize our td. */ td0 = thread_alloc(); - td0->td_tid = thread_new_tid(); /* * Try the copyout as soon as we allocate the td so we don't have to * tear things down in a failure case below. */ if ((error = copyout(&td0, uap->id, sizeof(thr_id_t)))) { thread_free(td0); return (error); } bzero(&td0->td_startzero, (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td0->td_startcopy, (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); td0->td_proc = td->td_proc; PROC_LOCK(td->td_proc); td0->td_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); td0->td_ucred = crhold(td->td_ucred); /* Initialize our kse structure. */ ke0 = kse_alloc(); bzero(&ke0->ke_startzero, RANGEOF(struct kse, ke_startzero, ke_endzero)); /* Set up our machine context. */ cpu_set_upcall(td0, td); error = set_mcontext(td0, &ctx.uc_mcontext); if (error != 0) { kse_free(ke0); thread_free(td0); goto out; } /* Link the thread and kse into the ksegrp and make it runnable. */ mtx_lock_spin(&sched_lock); thread_link(td0, td->td_ksegrp); kse_link(ke0, td->td_ksegrp); /* Bind this thread and kse together. */ td0->td_kse = ke0; ke0->ke_thread = td0; sched_fork_kse(td->td_kse, ke0); sched_fork_thread(td, td0); TD_SET_CAN_RUN(td0); if ((uap->flags & THR_SUSPENDED) == 0) setrunqueue(td0); mtx_unlock_spin(&sched_lock); out: return (error); } int thr_self(struct thread *td, struct thr_self_args *uap) /* thr_id_t *id */ { int error; if ((error = copyout(&td, uap->id, sizeof(thr_id_t)))) return (error); return (0); } int thr_exit(struct thread *td, struct thr_exit_args *uap) /* NULL */ { struct proc *p; p = td->td_proc; PROC_LOCK(p); mtx_lock_spin(&sched_lock); /* * This unlocks proc and doesn't return unless this is the last * thread. */ thr_exit1(); mtx_unlock_spin(&sched_lock); return (0); } int thr_kill(struct thread *td, struct thr_kill_args *uap) /* thr_id_t id, int sig */ { struct thread *ttd; struct proc *p; int error; p = td->td_proc; error = 0; PROC_LOCK(p); FOREACH_THREAD_IN_PROC(p, ttd) { if (ttd == uap->id) break; } if (ttd == NULL) { error = ESRCH; goto out; } if (uap->sig == 0) goto out; if (!_SIG_VALID(uap->sig)) { error = EINVAL; goto out; } tdsignal(ttd, uap->sig, SIGTARGET_TD); out: PROC_UNLOCK(p); return (error); } int thr_suspend(struct thread *td, struct thr_suspend_args *uap) /* const struct timespec *timeout */ { struct timespec ts; struct timeval tv; int error; int hz; hz = 0; error = 0; if (uap->timeout != NULL) { error = copyin((const void *)uap->timeout, (void *)&ts, sizeof(struct timespec)); if (error != 0) return (error); if (ts.tv_nsec < 0 || ts.tv_nsec > 1000000000) return (EINVAL); if (ts.tv_sec == 0 && ts.tv_nsec == 0) return (ETIMEDOUT); TIMESPEC_TO_TIMEVAL(&tv, &ts); hz = tvtohz(&tv); } PROC_LOCK(td->td_proc); mtx_lock_spin(&sched_lock); if ((td->td_flags & TDF_THRWAKEUP) == 0) { mtx_unlock_spin(&sched_lock); error = msleep((void *)td, &td->td_proc->p_mtx, td->td_priority | PCATCH, "lthr", hz); mtx_lock_spin(&sched_lock); } td->td_flags &= ~TDF_THRWAKEUP; mtx_unlock_spin(&sched_lock); PROC_UNLOCK(td->td_proc); return (error == EWOULDBLOCK ? ETIMEDOUT : error); } int thr_wake(struct thread *td, struct thr_wake_args *uap) /* thr_id_t id */ { struct thread *tdsleeper, *ttd; tdsleeper = ((struct thread *)uap->id); PROC_LOCK(td->td_proc); FOREACH_THREAD_IN_PROC(td->td_proc, ttd) { if (ttd == tdsleeper) break; } if (ttd == NULL) { PROC_UNLOCK(td->td_proc); return (ESRCH); } mtx_lock_spin(&sched_lock); tdsleeper->td_flags |= TDF_THRWAKEUP; mtx_unlock_spin(&sched_lock); wakeup_one((void *)tdsleeper); PROC_UNLOCK(td->td_proc); return (0); } Index: head/sys/kern/kern_thread.c =================================================================== --- head/sys/kern/kern_thread.c (revision 131148) +++ head/sys/kern/kern_thread.c (revision 131149) @@ -1,1119 +1,1105 @@ /* * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * KSEGRP related storage. */ static uma_zone_t ksegrp_zone; static uma_zone_t kse_zone; static uma_zone_t thread_zone; /* DEBUG ONLY */ SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); static int thread_debug = 0; SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, &thread_debug, 0, "thread debug"); int max_threads_per_proc = 1500; SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, &max_threads_per_proc, 0, "Limit on threads per proc"); int max_groups_per_proc = 500; SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, &max_groups_per_proc, 0, "Limit on thread groups per proc"); int max_threads_hits; SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, &max_threads_hits, 0, ""); int virtual_cpu; #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); struct mtx kse_zombie_lock; MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); void kse_purge(struct proc *p, struct thread *td); void kse_purge_group(struct thread *td); /* move to proc.h */ extern void kseinit(void); extern void kse_GC(void); static int sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) { int error, new_val; int def_val; def_val = mp_ncpus; if (virtual_cpu == 0) new_val = def_val; else new_val = virtual_cpu; error = sysctl_handle_int(oidp, &new_val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (new_val < 0) return (EINVAL); virtual_cpu = new_val; return (0); } /* DEBUG ONLY */ SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", "debug virtual cpus"); /* * Thread ID allocator. The allocator keeps track of assigned IDs by * using a bitmap. The bitmap is created in parts. The parts are linked * together. */ typedef u_long tid_bitmap_word; #define TID_IDS_PER_PART 1024 #define TID_IDS_PER_IDX (sizeof(tid_bitmap_word) << 3) #define TID_BITMAP_SIZE (TID_IDS_PER_PART / TID_IDS_PER_IDX) #define TID_MIN (PID_MAX + 1) struct tid_bitmap_part { STAILQ_ENTRY(tid_bitmap_part) bmp_next; tid_bitmap_word bmp_bitmap[TID_BITMAP_SIZE]; lwpid_t bmp_base; int bmp_free; }; static STAILQ_HEAD(, tid_bitmap_part) tid_bitmap = STAILQ_HEAD_INITIALIZER(tid_bitmap); static uma_zone_t tid_zone; struct mtx tid_lock; MTX_SYSINIT(tid_lock, &tid_lock, "TID lock", MTX_DEF); /* * Prepare a thread for use. */ static void thread_ctor(void *mem, int size, void *arg) { struct thread *td; td = (struct thread *)mem; - td->td_tid = 0; td->td_state = TDS_INACTIVE; td->td_oncpu = NOCPU; /* * Note that td_critnest begins life as 1 because the thread is not * running and is thereby implicitly waiting to be on the receiving * end of a context switch. A context switch must occur inside a * critical section, and in fact, includes hand-off of the sched_lock. * After a context switch to a newly created thread, it will release * sched_lock for the first time, and its td_critnest will hit 0 for * the first time. This happens on the far end of a context switch, * and when it context switches away from itself, it will in fact go * back into a critical section, and hand off the sched lock to the * next thread. */ td->td_critnest = 1; } /* * Reclaim a thread after use. */ static void thread_dtor(void *mem, int size, void *arg) { struct thread *td; - struct tid_bitmap_part *bmp; - lwpid_t tid; - int bit, idx; td = (struct thread *)mem; - if (td->td_tid > PID_MAX) { - STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) { - if (td->td_tid >= bmp->bmp_base && - td->td_tid < bmp->bmp_base + TID_IDS_PER_PART) - break; - } - KASSERT(bmp != NULL, ("No TID bitmap?")); - mtx_lock(&tid_lock); - tid = td->td_tid - bmp->bmp_base; - idx = tid / TID_IDS_PER_IDX; - bit = 1UL << (tid % TID_IDS_PER_IDX); - bmp->bmp_bitmap[idx] |= bit; - bmp->bmp_free++; - mtx_unlock(&tid_lock); - } - #ifdef INVARIANTS /* Verify that this thread is in a safe state to free. */ switch (td->td_state) { case TDS_INHIBITED: case TDS_RUNNING: case TDS_CAN_RUN: case TDS_RUNQ: /* * We must never unlink a thread that is in one of * these states, because it is currently active. */ panic("bad state for thread unlinking"); /* NOTREACHED */ case TDS_INACTIVE: break; default: panic("bad thread state"); /* NOTREACHED */ } #endif } /* * Initialize type-stable parts of a thread (when newly created). */ static void thread_init(void *mem, int size) { - struct thread *td; + struct thread *td; + struct tid_bitmap_part *bmp, *new; + int bit, idx; td = (struct thread *)mem; + + mtx_lock(&tid_lock); + STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) { + if (bmp->bmp_free) + break; + } + /* Create a new bitmap if we run out of free bits. */ + if (bmp == NULL) { + mtx_unlock(&tid_lock); + new = uma_zalloc(tid_zone, M_WAITOK); + mtx_lock(&tid_lock); + bmp = STAILQ_LAST(&tid_bitmap, tid_bitmap_part, bmp_next); + if (bmp == NULL || bmp->bmp_free < TID_IDS_PER_PART/2) { + /* 1=free, 0=assigned. This way we can use ffsl(). */ + memset(new->bmp_bitmap, ~0U, sizeof(new->bmp_bitmap)); + new->bmp_base = (bmp == NULL) ? TID_MIN : + bmp->bmp_base + TID_IDS_PER_PART; + new->bmp_free = TID_IDS_PER_PART; + STAILQ_INSERT_TAIL(&tid_bitmap, new, bmp_next); + bmp = new; + new = NULL; + } + } else + new = NULL; + /* We have a bitmap with available IDs. */ + idx = 0; + while (idx < TID_BITMAP_SIZE && bmp->bmp_bitmap[idx] == 0UL) + idx++; + bit = ffsl(bmp->bmp_bitmap[idx]) - 1; + td->td_tid = bmp->bmp_base + idx * TID_IDS_PER_IDX + bit; + bmp->bmp_bitmap[idx] &= ~(1UL << bit); + bmp->bmp_free--; + mtx_unlock(&tid_lock); + if (new != NULL) + uma_zfree(tid_zone, new); + vm_thread_new(td, 0); cpu_thread_setup(td); td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_sched = (struct td_sched *)&td[1]; } /* * Tear down type-stable parts of a thread (just before being discarded). */ static void thread_fini(void *mem, int size) { - struct thread *td; + struct thread *td; + struct tid_bitmap_part *bmp; + lwpid_t tid; + int bit, idx; td = (struct thread *)mem; turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); vm_thread_dispose(td); + + STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) { + if (td->td_tid >= bmp->bmp_base && + td->td_tid < bmp->bmp_base + TID_IDS_PER_PART) + break; + } + KASSERT(bmp != NULL, ("No TID bitmap?")); + mtx_lock(&tid_lock); + tid = td->td_tid - bmp->bmp_base; + idx = tid / TID_IDS_PER_IDX; + bit = 1UL << (tid % TID_IDS_PER_IDX); + bmp->bmp_bitmap[idx] |= bit; + bmp->bmp_free++; + mtx_unlock(&tid_lock); } /* * Initialize type-stable parts of a kse (when newly created). */ static void kse_init(void *mem, int size) { struct kse *ke; ke = (struct kse *)mem; ke->ke_sched = (struct ke_sched *)&ke[1]; } /* * Initialize type-stable parts of a ksegrp (when newly created). */ static void ksegrp_init(void *mem, int size) { struct ksegrp *kg; kg = (struct ksegrp *)mem; kg->kg_sched = (struct kg_sched *)&kg[1]; } /* * KSE is linked into kse group. */ void kse_link(struct kse *ke, struct ksegrp *kg) { struct proc *p = kg->kg_proc; TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); kg->kg_kses++; ke->ke_state = KES_UNQUEUED; ke->ke_proc = p; ke->ke_ksegrp = kg; ke->ke_thread = NULL; ke->ke_oncpu = NOCPU; ke->ke_flags = 0; } void kse_unlink(struct kse *ke) { struct ksegrp *kg; mtx_assert(&sched_lock, MA_OWNED); kg = ke->ke_ksegrp; TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); if (ke->ke_state == KES_IDLE) { TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); kg->kg_idle_kses--; } --kg->kg_kses; /* * Aggregate stats from the KSE */ kse_stash(ke); } void ksegrp_link(struct ksegrp *kg, struct proc *p) { TAILQ_INIT(&kg->kg_threads); TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ kg->kg_proc = p; /* * the following counters are in the -zero- section * and may not need clearing */ kg->kg_numthreads = 0; kg->kg_runnable = 0; kg->kg_kses = 0; kg->kg_runq_kses = 0; /* XXXKSE change name */ kg->kg_idle_kses = 0; kg->kg_numupcalls = 0; /* link it in now that it's consistent */ p->p_numksegrps++; TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); } void ksegrp_unlink(struct ksegrp *kg) { struct proc *p; mtx_assert(&sched_lock, MA_OWNED); KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); p = kg->kg_proc; TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); p->p_numksegrps--; /* * Aggregate stats from the KSE */ ksegrp_stash(kg); } /* * For a newly created process, * link up all the structures and its initial threads etc. */ void proc_linkup(struct proc *p, struct ksegrp *kg, struct kse *ke, struct thread *td) { TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ TAILQ_INIT(&p->p_threads); /* all threads in proc */ TAILQ_INIT(&p->p_suspended); /* Threads suspended */ p->p_numksegrps = 0; p->p_numthreads = 0; ksegrp_link(kg, p); kse_link(ke, kg); thread_link(td, kg); } /* * Initialize global thread allocation resources. */ void threadinit(void) { thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, UMA_ALIGN_CACHE, 0); tid_zone = uma_zcreate("TID", sizeof(struct tid_bitmap_part), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), NULL, NULL, ksegrp_init, NULL, UMA_ALIGN_CACHE, 0); kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), NULL, NULL, kse_init, NULL, UMA_ALIGN_CACHE, 0); kseinit(); } /* * Stash an embarasingly extra thread into the zombie thread queue. */ void thread_stash(struct thread *td) { mtx_lock_spin(&kse_zombie_lock); TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); mtx_unlock_spin(&kse_zombie_lock); } /* * Stash an embarasingly extra kse into the zombie kse queue. */ void kse_stash(struct kse *ke) { mtx_lock_spin(&kse_zombie_lock); TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); mtx_unlock_spin(&kse_zombie_lock); } /* * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. */ void ksegrp_stash(struct ksegrp *kg) { mtx_lock_spin(&kse_zombie_lock); TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); mtx_unlock_spin(&kse_zombie_lock); } /* * Reap zombie kse resource. */ void thread_reap(void) { struct thread *td_first, *td_next; struct kse *ke_first, *ke_next; struct ksegrp *kg_first, * kg_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant.. */ if ((!TAILQ_EMPTY(&zombie_threads)) || (!TAILQ_EMPTY(&zombie_kses)) || (!TAILQ_EMPTY(&zombie_ksegrps))) { mtx_lock_spin(&kse_zombie_lock); td_first = TAILQ_FIRST(&zombie_threads); ke_first = TAILQ_FIRST(&zombie_kses); kg_first = TAILQ_FIRST(&zombie_ksegrps); if (td_first) TAILQ_INIT(&zombie_threads); if (ke_first) TAILQ_INIT(&zombie_kses); if (kg_first) TAILQ_INIT(&zombie_ksegrps); mtx_unlock_spin(&kse_zombie_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_runq); if (td_first->td_ucred) crfree(td_first->td_ucred); thread_free(td_first); td_first = td_next; } while (ke_first) { ke_next = TAILQ_NEXT(ke_first, ke_procq); kse_free(ke_first); ke_first = ke_next; } while (kg_first) { kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); ksegrp_free(kg_first); kg_first = kg_next; } } kse_GC(); } /* * Allocate a ksegrp. */ struct ksegrp * ksegrp_alloc(void) { return (uma_zalloc(ksegrp_zone, M_WAITOK)); } /* * Allocate a kse. */ struct kse * kse_alloc(void) { return (uma_zalloc(kse_zone, M_WAITOK)); } /* * Allocate a thread. */ struct thread * thread_alloc(void) { thread_reap(); /* check if any zombies to get */ return (uma_zalloc(thread_zone, M_WAITOK)); } /* * Deallocate a ksegrp. */ void ksegrp_free(struct ksegrp *td) { uma_zfree(ksegrp_zone, td); } /* * Deallocate a kse. */ void kse_free(struct kse *td) { uma_zfree(kse_zone, td); } /* * Deallocate a thread. */ void thread_free(struct thread *td) { cpu_thread_clean(td); uma_zfree(thread_zone, td); } - -/* - * Assign a thread ID. - */ -lwpid_t -thread_new_tid(void) -{ - struct tid_bitmap_part *bmp, *new; - lwpid_t tid; - int bit, idx; - - mtx_lock(&tid_lock); - STAILQ_FOREACH(bmp, &tid_bitmap, bmp_next) { - if (bmp->bmp_free) - break; - } - /* Create a new bitmap if we run out of free bits. */ - if (bmp == NULL) { - mtx_unlock(&tid_lock); - new = uma_zalloc(tid_zone, M_WAITOK); - mtx_lock(&tid_lock); - bmp = STAILQ_LAST(&tid_bitmap, tid_bitmap_part, bmp_next); - if (bmp == NULL || bmp->bmp_free < TID_IDS_PER_PART/2) { - /* 1=free, 0=assigned. This way we can use ffsl(). */ - memset(new->bmp_bitmap, ~0U, sizeof(new->bmp_bitmap)); - new->bmp_base = (bmp == NULL) ? TID_MIN : - bmp->bmp_base + TID_IDS_PER_PART; - new->bmp_free = TID_IDS_PER_PART; - STAILQ_INSERT_TAIL(&tid_bitmap, new, bmp_next); - bmp = new; - new = NULL; - } - } else - new = NULL; - /* We have a bitmap with available IDs. */ - idx = 0; - while (idx < TID_BITMAP_SIZE && bmp->bmp_bitmap[idx] == 0UL) - idx++; - bit = ffsl(bmp->bmp_bitmap[idx]) - 1; - tid = bmp->bmp_base + idx * TID_IDS_PER_IDX + bit; - bmp->bmp_bitmap[idx] &= ~(1UL << bit); - bmp->bmp_free--; - mtx_unlock(&tid_lock); - - if (new != NULL) - uma_zfree(tid_zone, new); - return (tid); -} - /* * Discard the current thread and exit from its context. * Always called with scheduler locked. * * Because we can't free a thread while we're operating under its context, * push the current thread into our CPU's deadthread holder. This means * we needn't worry about someone else grabbing our context before we * do a cpu_throw(). This may not be needed now as we are under schedlock. * Maybe we can just do a thread_stash() as thr_exit1 does. */ /* XXX * libthr expects its thread exit to return for the last * thread, meaning that the program is back to non-threaded * mode I guess. Because we do this (cpu_throw) unconditionally * here, they have their own version of it. (thr_exit1()) * that doesn't do it all if this was the last thread. * It is also called from thread_suspend_check(). * Of course in the end, they end up coming here through exit1 * anyhow.. After fixing 'thr' to play by the rules we should be able * to merge these two functions together. */ void thread_exit(void) { struct thread *td; struct kse *ke; struct proc *p; struct ksegrp *kg; td = curthread; kg = td->td_ksegrp; p = td->td_proc; ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); KASSERT(p != NULL, ("thread exiting without a process")); KASSERT(ke != NULL, ("thread exiting without a kse")); KASSERT(kg != NULL, ("thread exiting without a kse group")); PROC_LOCK_ASSERT(p, MA_OWNED); CTR1(KTR_PROC, "thread_exit: thread %p", td); mtx_assert(&Giant, MA_NOTOWNED); if (td->td_standin != NULL) { thread_stash(td->td_standin); td->td_standin = NULL; } cpu_thread_exit(td); /* XXXSMP */ /* * The last thread is left attached to the process * So that the whole bundle gets recycled. Skip * all this stuff. */ if (p->p_numthreads > 1) { thread_unlink(td); if (p->p_maxthrwaits) wakeup(&p->p_numthreads); /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SNGL is unset * in exit1() after it is the only survivor. */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_unsuspend_one(p->p_singlethread); } } /* * Because each upcall structure has an owner thread, * owner thread exits only when process is in exiting * state, so upcall to userland is no longer needed, * deleting upcall structure is safe here. * So when all threads in a group is exited, all upcalls * in the group should be automatically freed. */ if (td->td_upcall) upcall_remove(td); sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); sched_exit_kse(FIRST_KSE_IN_PROC(p), ke); ke->ke_state = KES_UNQUEUED; ke->ke_thread = NULL; /* * Decide what to do with the KSE attached to this thread. */ if (ke->ke_flags & KEF_EXIT) { kse_unlink(ke); if (kg->kg_kses == 0) { sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), kg); ksegrp_unlink(kg); } } else kse_reassign(ke); PROC_UNLOCK(p); td->td_kse = NULL; #if 0 td->td_proc = NULL; #endif td->td_ksegrp = NULL; td->td_last_kse = NULL; PCPU_SET(deadthread, td); } else { PROC_UNLOCK(p); } td->td_state = TDS_INACTIVE; /* XXX Shouldn't cpu_throw() here. */ mtx_assert(&sched_lock, MA_OWNED); cpu_throw(td, choosethread()); panic("I'm a teapot!"); /* NOTREACHED */ } /* * Do any thread specific cleanups that may be needed in wait() * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; mtx_assert(&Giant, MA_NOTOWNED); KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()")); FOREACH_THREAD_IN_PROC(p, td) { if (td->td_standin != NULL) { thread_free(td->td_standin); td->td_standin = NULL; } cpu_thread_clean(td); } thread_reap(); /* check for zombie threads etc. */ } /* * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. * * Note that we do not link to the proc's ucred here. * The thread is linked as if running but no KSE assigned. */ void thread_link(struct thread *td, struct ksegrp *kg) { struct proc *p; p = kg->kg_proc; td->td_state = TDS_INACTIVE; td->td_proc = p; td->td_ksegrp = kg; td->td_last_kse = NULL; td->td_flags = 0; td->td_kflags = 0; td->td_kse = NULL; LIST_INIT(&td->td_contested); callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); p->p_numthreads++; kg->kg_numthreads++; } void thread_unlink(struct thread *td) { struct proc *p = td->td_proc; struct ksegrp *kg = td->td_ksegrp; mtx_assert(&sched_lock, MA_OWNED); TAILQ_REMOVE(&p->p_threads, td, td_plist); p->p_numthreads--; TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); kg->kg_numthreads--; /* could clear a few other things here */ } /* * Purge a ksegrp resource. When a ksegrp is preparing to * exit, it calls this function. */ void kse_purge_group(struct thread *td) { struct ksegrp *kg; struct kse *ke; kg = td->td_ksegrp; KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { KASSERT(ke->ke_state == KES_IDLE, ("%s: wrong idle KSE state", __func__)); kse_unlink(ke); } KASSERT((kg->kg_kses == 1), ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); KASSERT((kg->kg_numupcalls == 0), ("%s: ksegrp still has %d upcall datas", __func__, kg->kg_numupcalls)); } /* * Purge a process's KSE resource. When a process is preparing to * exit, it calls kse_purge to release any extra KSE resources in * the process. */ void kse_purge(struct proc *p, struct thread *td) { struct ksegrp *kg; struct kse *ke; KASSERT(p->p_numthreads == 1, ("bad thread number")); while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); p->p_numksegrps--; /* * There is no ownership for KSE, after all threads * in the group exited, it is possible that some KSEs * were left in idle queue, gc them now. */ while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { KASSERT(ke->ke_state == KES_IDLE, ("%s: wrong idle KSE state", __func__)); TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); kg->kg_idle_kses--; TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); kg->kg_kses--; kse_stash(ke); } KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); KASSERT((kg->kg_numupcalls == 0), ("%s: ksegrp still has %d upcall datas", __func__, kg->kg_numupcalls)); if (kg != td->td_ksegrp) ksegrp_stash(kg); } TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); p->p_numksegrps++; } /* * Enforce single-threading. * * Returns 1 if the caller must abort (another thread is waiting to * exit the process or similar). Process is locked! * Returns 0 when you are successfully the only thread running. * A process has successfully single threaded in the suspend mode when * There are no threads in user mode. Threads in the kernel must be * allowed to continue until they get to the user boundary. They may even * copy out their return values and data before suspending. They may however be * accellerated in reaching the user boundary as we will wake up * any sleeping threads that are interruptable. (PCATCH). */ int thread_single(int force_exit) { struct thread *td; struct thread *td2; struct proc *p; int remaining; td = curthread; p = td->td_proc; mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((td != NULL), ("curthread is NULL")); if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) return (0); /* Is someone already single threading? */ if (p->p_singlethread) return (1); if (force_exit == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; } else p->p_flag &= ~P_SINGLE_EXIT; p->p_flag |= P_STOPPED_SINGLE; mtx_lock_spin(&sched_lock); p->p_singlethread = td; if (force_exit == SINGLE_EXIT) remaining = p->p_numthreads; else remaining = p->p_numthreads - p->p_suspcount; while (remaining != 1) { FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; td2->td_flags |= TDF_ASTPENDING; if (TD_IS_INHIBITED(td2)) { if (force_exit == SINGLE_EXIT) { if (TD_IS_SUSPENDED(td2)) { thread_unsuspend_one(td2); } if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { sleepq_abort(td2); } } else { if (TD_IS_SUSPENDED(td2)) continue; /* * maybe other inhibitted states too? * XXXKSE Is it totally safe to * suspend a non-interruptable thread? */ if (td2->td_inhibitors & (TDI_SLEEPING | TDI_SWAPPED)) thread_suspend_one(td2); } } } if (force_exit == SINGLE_EXIT) remaining = p->p_numthreads; else remaining = p->p_numthreads - p->p_suspcount; /* * Maybe we suspended some threads.. was it enough? */ if (remaining == 1) break; /* * Wake us up when everyone else has suspended. * In the mean time we suspend as well. */ thread_suspend_one(td); PROC_UNLOCK(p); mi_switch(SW_VOL); mtx_unlock_spin(&sched_lock); PROC_LOCK(p); mtx_lock_spin(&sched_lock); if (force_exit == SINGLE_EXIT) remaining = p->p_numthreads; else remaining = p->p_numthreads - p->p_suspcount; } if (force_exit == SINGLE_EXIT) { if (td->td_upcall) upcall_remove(td); kse_purge(p, td); } mtx_unlock_spin(&sched_lock); return (0); } /* * Called in from locations that can safely check to see * whether we have to suspend or at least throttle for a * single-thread event (e.g. fork). * * Such locations include userret(). * If the "return_instead" argument is non zero, the thread must be able to * accept 0 (caller may continue), or 1 (caller must abort) as a result. * * The 'return_instead' argument tells the function if it may do a * thread_exit() or suspend, or whether the caller must abort and back * out instead. * * If the thread that set the single_threading request has set the * P_SINGLE_EXIT bit in the process flags then this call will never return * if 'return_instead' is false, but will exit. * * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 *---------------+--------------------+--------------------- * 0 | returns 0 | returns 0 or 1 * | when ST ends | immediatly *---------------+--------------------+--------------------- * 1 | thread exits | returns 1 * | | immediatly * 0 = thread_exit() or suspension ok, * other = return error instead of stopping the thread. * * While a full suspension is under effect, even a single threading * thread would be suspended if it made this call (but it shouldn't). * This call should only be made from places where * thread_exit() would be safe as that may be the outcome unless * return_instead is set. */ int thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; td = curthread; p = td->td_proc; mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (P_SHOULDSTOP(p)) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { KASSERT(p->p_singlethread != NULL, ("singlethread not set")); /* * The only suspension in action is a * single-threading. Single threader need not stop. * XXX Should be safe to access unlocked * as it can only be set to be true by us. */ if (p->p_singlethread == td) return (0); /* Exempt from stopping. */ } if (return_instead) return (1); mtx_lock_spin(&sched_lock); thread_stopped(p); /* * If the process is waiting for us to exit, * this thread should just suicide. * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { if (p->p_flag & P_SA) thread_exit(); else thr_exit1(); } /* * When a thread suspends, it just * moves to the processes's suspend queue * and stays there. */ thread_suspend_one(td); if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_unsuspend_one(p->p_singlethread); } } PROC_UNLOCK(p); mi_switch(SW_INVOL); mtx_unlock_spin(&sched_lock); PROC_LOCK(p); } return (0); } void thread_suspend_one(struct thread *td) { struct proc *p = td->td_proc; mtx_assert(&sched_lock, MA_OWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); p->p_suspcount++; TD_SET_SUSPENDED(td); TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); /* * Hack: If we are suspending but are on the sleep queue * then we are in msleep or the cv equivalent. We * want to look like we have two Inhibitors. * May already be set.. doesn't matter. */ if (TD_ON_SLEEPQ(td)) TD_SET_SLEEPING(td); } void thread_unsuspend_one(struct thread *td) { struct proc *p = td->td_proc; mtx_assert(&sched_lock, MA_OWNED); PROC_LOCK_ASSERT(p, MA_OWNED); TAILQ_REMOVE(&p->p_suspended, td, td_runq); TD_CLR_SUSPENDED(td); p->p_suspcount--; setrunnable(td); } /* * Allow all threads blocked by single threading to continue running. */ void thread_unsuspend(struct proc *p) { struct thread *td; mtx_assert(&sched_lock, MA_OWNED); PROC_LOCK_ASSERT(p, MA_OWNED); if (!P_SHOULDSTOP(p)) { while (( td = TAILQ_FIRST(&p->p_suspended))) { thread_unsuspend_one(td); } } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && (p->p_numthreads == p->p_suspcount)) { /* * Stopping everything also did the job for the single * threading request. Now we've downgraded to single-threaded, * let it continue. */ thread_unsuspend_one(p->p_singlethread); } } void thread_single_end(void) { struct thread *td; struct proc *p; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); mtx_lock_spin(&sched_lock); p->p_singlethread = NULL; /* * If there are other threads they mey now run, * unless of course there is a blanket 'stop order' * on the process. The single threader must be allowed * to continue however as this is a bad place to stop. */ if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { while (( td = TAILQ_FIRST(&p->p_suspended))) { thread_unsuspend_one(td); } } mtx_unlock_spin(&sched_lock); } Index: head/sys/sys/proc.h =================================================================== --- head/sys/sys/proc.h (revision 131148) +++ head/sys/sys/proc.h (revision 131149) @@ -1,936 +1,935 @@ /*- * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.15 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #ifndef _KERNEL #include #endif #include #include #include #include #include /* XXX. */ #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #endif #include #include #include /* Machine-dependent proc substruct. */ /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { int s_count; /* (m) Ref cnt; pgrps in session. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct tty *s_ttyp; /* (m) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Pgrp id. */ int pg_jobc; /* (m) job cntl proc count */ struct mtx pg_mtx; /* Mutex to protect members */ }; /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by sched_lock mtx * k - only accessed by curthread * l - the attaching proc or attaching proc parent * m - Giant * n - not locked, lazy * o - ktrace lock * p - select lock (sellock) * q - td_contested lock * r - p_peers lock * x - created at fork, only changes during single threading in exec * z - zombie threads/kse/ksegroup lock * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct ithd; struct ke_sched; struct kg_sched; struct nlminfo; struct p_sched; struct sleepqueue; struct td_sched; struct trapframe; struct turnstile; /* * Here we define the four structures used for process information. * * The first is the thread. It might be though of as a "Kernel * Schedulable Entity Context". * This structure contains all the information as to where a thread of * execution is now, or was when it was suspended, why it was suspended, * and anything else that will be needed to restart it when it is * rescheduled. Always associated with a KSE when running, but can be * reassigned to an equivalent KSE when being restarted for * load balancing. Each of these is associated with a kernel stack * and a pcb. * * It is important to remember that a particular thread structure only * exists as long as the system call or kernel entrance (e.g. by pagefault) * which it is currently executing. It should therefore NEVER be referenced * by pointers in long lived structures that live longer than a single * request. If several threads complete their work at the same time, * they will all rewind their stacks to the user boundary, report their * completion state, and all but one will be freed. That last one will * be kept to provide a kernel stack and pcb for the NEXT syscall or kernel * entrance. (basically to save freeing and then re-allocating it) The KSE * keeps a cached thread available to allow it to quickly * get one when it needs a new one. There is also a system * cache of free threads. Threads have priority and partake in priority * inheritance schemes. */ struct thread; /* * The second structure is the Kernel Schedulable Entity. (KSE) * It represents the ability to take a slot in the scheduler queue. * As long as this is scheduled, it could continue to run any threads that * are assigned to the KSEGRP (see later) until either it runs out * of runnable threads of high enough priority, or CPU. * It runs on one CPU and is assigned a quantum of time. When a thread is * blocked, The KSE continues to run and will search for another thread * in a runnable state amongst those it has. It May decide to return to user * mode with a new 'empty' thread if there are no runnable threads. * Threads are temporarily associated with a KSE for scheduling reasons. */ struct kse; /* * The KSEGRP is allocated resources across a number of CPUs. * (Including a number of CPUxQUANTA. It parcels these QUANTA up among * its KSEs, each of which should be running in a different CPU. * BASE priority and total available quanta are properties of a KSEGRP. * Multiple KSEGRPs in a single process compete against each other * for total quanta in the same way that a forked child competes against * it's parent process. */ struct ksegrp; /* * A process is the owner of all system resources allocated to a task * except CPU quanta. * All KSEGs under one process see, and have the same access to, these * resources (e.g. files, memory, sockets, permissions kqueues). * A process may compete for CPU cycles on the same basis as a * forked process cluster by spawning several KSEGRPs. */ struct proc; /*************** * In pictures: With a single run queue used by all processors: RUNQ: --->KSE---KSE--... SLEEPQ:[]---THREAD---THREAD---THREAD | / []---THREAD KSEG---THREAD--THREAD--THREAD [] []---THREAD---THREAD (processors run THREADs from the KSEG until they are exhausted or the KSEG exhausts its quantum) With PER-CPU run queues: KSEs on the separate run queues directly They would be given priorities calculated from the KSEG. * *****************/ /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * The first KSE available in the correct group will run this thread. * If several are available, use the one on the same CPU as last time. * When waiting to be run, threads are hung off the KSEGRP in priority order. * with N runnable and queued KSEs in the KSEGRP, the first N threads * are linked to them. Other threads are not yet assigned. */ struct thread { struct proc *td_proc; /* (*) Associated process. */ struct ksegrp *td_ksegrp; /* (*) Associated KSEG. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_kglist; /* (*) All threads in this ksegrp. */ /* The two queues below should someday be merged. */ TAILQ_ENTRY(thread) td_slpq; /* (j) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (j) Lock queue. */ TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). XXXKSE */ TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ lwpid_t td_tid; /* (b) Thread ID. */ /* Cleared during fork1() or thread_sched_upcall(). */ #define td_startzero td_flags int td_flags; /* (j) TDF_* flags. */ int td_inhibitors; /* (j) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ struct kse *td_last_kse; /* (j) Previous value of td_kse. */ struct kse *td_kse; /* (j) Current KSE if running. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ void *td_wchan; /* (j) Sleep address. */ const char *td_wmesg; /* (j) Reason for sleep. */ u_char td_lastcpu; /* (j) Last cpu we were on. */ u_char td_oncpu; /* (j) Which cpu we are on. */ short td_locks; /* (k) DEBUG: lockmgr count of locks. */ struct turnstile *td_blocked; /* (j) Lock process is blocked on. */ struct ithd *td_ithd; /* (b) For interrupt threads only. */ const char *td_lockname; /* (j) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */ struct ucred *td_ucred; /* (k) Reference to credentials. */ struct thread *td_standin; /* (*) Use this for an upcall. */ u_int td_prticks; /* (*) Profclock hits in sys for user */ struct kse_upcall *td_upcall; /* (*) Upcall structure. */ u_int64_t td_sticks; /* (j) Statclock hits in system mode. */ u_int td_uuticks; /* (*) Statclock in user, for UTS. */ u_int td_usticks; /* (*) Statclock in kernel, for UTS. */ int td_intrval; /* (*) Return value of TDF_INTERRUPT. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ sigset_t td_sigmask; /* (c) Current signal mask. */ sigset_t td_siglist; /* (c) Sigs arrived, not delivered. */ sigset_t *td_waitset; /* (c) Wait set for sigwait. */ TAILQ_ENTRY(thread) td_umtx; /* (c?) Link for when we're blocked. */ volatile u_int td_generation; /* (k) Enable detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_kflags; /* (c) Flags for KSE threading. */ #define td_endzero td_base_pri /* Copied during fork1() or thread_sched_upcall(). */ #define td_startcopy td_endzero u_char td_base_pri; /* (j) Thread base kernel priority. */ u_char td_priority; /* (j) Thread active priority. */ #define td_endcopy td_pcb /* * fields that must be manually set in fork1() or thread_sched_upcall() * or already have been set in the allocator, contstructor, etc.. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; register_t td_retval[2]; /* (k) Syscall aux returns. */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ struct vm_object *td_altkstack_obj;/* (a) Alternate kstack object. */ vm_offset_t td_altkstack; /* (a) Kernel VA of alternate kstack. */ int td_altkstack_pages; /* (a) Size of the alternate kstack */ u_int td_critnest; /* (k) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct td_sched *td_sched; /* (*) Scheduler-specific data. */ }; /* Flags kept in td_flags: */ #define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump. */ #define TDF_CAN_UNBIND 0x000004 /* Only temporarily bound. */ #define TDF_SINTR 0x000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x000020 /* This is one of the per-CPU idle threads. */ #define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */ #define TDF_TSNOBLOCK 0x000100 /* Don't block on a turnstile due to race. */ #define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */ #define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */ #define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */ #define TDF_USTATCLOCK 0x004000 /* Finish user statclock hit at next AST. */ #define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */ #define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */ #define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */ #define TDF_UMTXWAKEUP 0x080000 /* Libthr thread must not sleep on a umtx. */ #define TDF_THRWAKEUP 0x100000 /* Libthr thread must not suspend itself. */ /* "Private" flags kept in td_pflags: */ #define TDP_OLDMASK 0x0001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x0002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x0004 /* Thread is currently in KTRACE code. */ #define TDP_UPCALLING 0x0008 /* This thread is doing an upcall. */ #define TDP_COWINPROGRESS 0x0010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x0020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x0040 /* Lock aquisition - deadlock treatment. */ #define TDP_SA 0x0080 /* A scheduler activation based thread. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_SWAPPED 0x0004 /* Stack not in mem.. bad juju if run. */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TDK_KSEREL 0x0001 /* Blocked in msleep on kg->kg_completed. */ #define TDK_KSERELSIG 0x0002 /* Blocked in msleep on p->p_siglist. */ #define TDK_WAKEUP 0x0004 /* Thread has been woken by kse_wakeup. */ #define TD_CAN_UNBIND(td) \ (((td)->td_flags & TDF_CAN_UNBIND) == TDF_CAN_UNBIND && \ ((td)->td_upcall != NULL)) #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ (td)->td_state = TDS_CAN_RUN; \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING #define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ #define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN /* * The schedulable entity that can be given a context to run. * A process may have several of these. Probably one per processor * but posibly a few more. In this universe they are grouped * with a KSEG that contains the priority and niceness * for the group. */ struct kse { struct proc *ke_proc; /* (*) Associated process. */ struct ksegrp *ke_ksegrp; /* (*) Associated KSEG. */ TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of KSEs in ke_ksegrp. */ TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of KSEs in this state. */ TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ #define ke_startzero ke_flags int ke_flags; /* (j) KEF_* flags. */ struct thread *ke_thread; /* (*) Active associated thread. */ fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ u_char ke_oncpu; /* (j) Which cpu we are on. */ char ke_rqindex; /* (j) Run queue index. */ enum { KES_UNUSED = 0x0, KES_IDLE, KES_ONRUNQ, KES_UNQUEUED, /* in transit */ KES_THREAD /* slaved to thread state */ } ke_state; /* (j) KSE status. */ #define ke_endzero ke_dummy u_char ke_dummy; struct ke_sched *ke_sched; /* (*) Scheduler-specific data. */ }; /* flags kept in ke_flags */ #define KEF_SCHED0 0x00001 /* For scheduler-specific use. */ #define KEF_SCHED1 0x00002 /* For scheduler-specific use. */ #define KEF_SCHED2 0X00004 /* For scheduler-specific use. */ #define KEF_SCHED3 0x00008 /* For scheduler-specific use. */ #define KEF_DIDRUN 0x02000 /* KSE actually ran. */ #define KEF_EXIT 0x04000 /* KSE is being killed. */ /* * The upcall management structure. * The upcall is used when returning to userland. If a thread does not have * an upcall on return to userland the thread exports its context and exits. */ struct kse_upcall { TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in KSEG. */ struct ksegrp *ku_ksegrp; /* Associated KSEG. */ struct thread *ku_owner; /* owning thread */ int ku_flags; /* KUF_* flags. */ struct kse_mailbox *ku_mailbox; /* userland mailbox address. */ stack_t ku_stack; /* userland upcall stack. */ void *ku_func; /* userland upcall function. */ unsigned int ku_mflags; /* cached upcall mailbox flags */ }; #define KUF_DOUPCALL 0x00001 /* Do upcall now, don't wait. */ #define KUF_EXITING 0x00002 /* Upcall structure is exiting. */ /* * Kernel-scheduled entity group (KSEG). The scheduler considers each KSEG to * be an indivisible unit from a time-sharing perspective, though each KSEG may * contain multiple KSEs. */ struct ksegrp { struct proc *kg_proc; /* (*) Process that contains this KSEG. */ TAILQ_ENTRY(ksegrp) kg_ksegrp; /* (*) Queue of KSEGs in kg_proc. */ TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */ TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) All idle KSEs. */ TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */ TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */ TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */ TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group. */ #define kg_startzero kg_estcpu u_int kg_estcpu; /* (j) Sum of the same field in KSEs. */ u_int kg_slptime; /* (j) How long completely blocked. */ struct thread *kg_last_assigned; /* (j) Last thread assigned to a KSE. */ int kg_runnable; /* (j) Num runnable threads on queue. */ int kg_runq_kses; /* (j) Num KSEs on runq. */ int kg_idle_kses; /* (j) Num KSEs on iq. */ int kg_numupcalls; /* (j) Num upcalls. */ int kg_upsleeps; /* (c) Num threads in kse_release(). */ struct kse_thr_mailbox *kg_completed; /* (c) Completed thread mboxes. */ int kg_nextupcall; /* (*) Next upcall time. */ int kg_upquantum; /* (*) Quantum to schedule an upcall. */ #define kg_endzero kg_pri_class #define kg_startcopy kg_endzero u_char kg_pri_class; /* (j) Scheduling class. */ u_char kg_user_pri; /* (j) User pri from estcpu and nice. */ #define kg_endcopy kg_numthreads int kg_numthreads; /* (j) Num threads in total. */ int kg_kses; /* (j) Num KSEs in group. */ struct kg_sched *kg_sched; /* (*) Scheduler-specific data. */ }; /* * The old fashionned process. May have multiple threads, KSEGRPs * and KSEs. Starts off with a single embedded KSEGRP, KSE and THREAD. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, ksegrp) p_ksegrps; /* (kg_ksegrp) All KSEGs. */ TAILQ_HEAD(, thread) p_threads; /* (td_plist) Threads. (shortcut) */ TAILQ_HEAD(, thread) p_suspended; /* (td_runq) Suspended threads. */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Ptr to open files structure. */ struct filedesc_to_leader *p_fdtol; /* (b) Ptr to tracking node */ /* Accumulated stats for all KSEs? */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Process limits. */ struct vm_object *p_upages_obj; /* (a) Upages object. */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ /* * The following don't make too much sense.. * See the td_ or ke_ versions of the same flags */ int p_flag; /* (c) P_* flags. */ int p_sflag; /* (j) PS_* flags. */ enum { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* KSEs can be run. */ PRS_ZOMBIE } p_state; /* (j/c) S* process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct mtx p_mtx; /* (n) Lock for this struct. */ /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_oppid pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtime; /* (j) Time swapped in or out. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct bintime p_runtime; /* (j) Real time. */ u_int64_t p_uu; /* (j) Previous user time in usec. */ u_int64_t p_su; /* (j) Previous system time in usec. */ u_int64_t p_iu; /* (j) Previous intr time in usec. */ u_int64_t p_uticks; /* (j) Statclock hits in user mode. */ u_int64_t p_sticks; /* (j) Statclock hits in system mode. */ u_int64_t p_iticks; /* (j) Statclock hits in intr. */ int p_profthreads; /* (c) Num threads in addupc_task. */ int p_maxthrwaits; /* (c) Max threads num waiters */ int p_traceflag; /* (o) Kernel trace points. */ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ struct ucred *p_tracecred; /* (o) Credentials to trace with. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ sigset_t p_siglist; /* (c) Sigs not delivered to a td. */ char p_lock; /* (c) Proclock (prevent swap) count. */ struct klist p_klist; /* (c) Knotes attached to this proc. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_long p_code; /* (n) For core dump/debugger XXX. */ u_int p_stops; /* (c) Stop event bitmask. */ u_int p_stype; /* (c) Stop event type. */ char p_step; /* (c) Process is stopped. */ u_char p_pfsflags; /* (c) Procfs flags. */ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ void *p_aioinfo; /* (?) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (c) # threads in suspended mode */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ char p_comm[MAXCOMLEN + 1]; /* (b) Process name. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (j) Current CPU limit in seconds. */ signed char p_nice; /* (c + j) Process "nice" value. */ /* End area that is copied on creation. */ #define p_endcopy p_xstat u_short p_xstat; /* (c) Exit status; also stop sig. */ int p_numthreads; /* (j) Number of threads. */ int p_numksegrps; /* (?) number of ksegrps */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ struct user *p_uarea; /* (k) Kernel VA of u-area (CPU). */ u_short p_acflag; /* (c) Accounting flags. */ struct rusage *p_ru; /* (a) Exit information. XXX */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ struct p_sched *p_sched; /* (*) Scheduler-specific data. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU 0xff /* For when we aren't on a CPU. (SMP) */ /* Status values (p_stat). */ /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ #define P_KTHREAD 0x00004 /* Kernel thread. (*)*/ #define P_NOLOAD 0x00008 /* Ignore during load avg calculations. */ #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00020 /* Has started profiling. */ #define P_STOPPROF 0x00040 /* Has thread in requesting to stop prof */ #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00800 /* Debugged process being traced. */ #define P_WAITED 0x01000 /* Someone is waiting for us. */ #define P_WEXIT 0x02000 /* Working on exiting. */ #define P_EXEC 0x04000 /* Process called exec. */ #define P_SA 0x08000 /* Using scheduler activations. */ #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x80000 /* Only one thread can continue */ /* (not to user) */ #define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x200000 /* Process pending signals changed. */ #define P_JAILED 0x1000000 /* Process is in jail. */ #define P_INEXEC 0x4000000 /* Process is in execve(). */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) /* These flags are kept in p_sflag and are protected with sched_lock. */ #define PS_INMEM 0x00001 /* Loaded into memory. */ #define PS_XCPU 0x00002 /* Exceeded CPU limit. */ #define PS_ALRMPEND 0x00020 /* Pending SIGVTALRM needs to be posted. */ #define PS_PROFPEND 0x00040 /* Pending SIGPROF needs to be posted. */ #define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */ #define PS_SWAPPINGOUT 0x00200 /* Process is being swapped out. */ #define PS_SWAPPINGIN 0x04000 /* Process is being swapped in. */ #define PS_MACPEND 0x08000 /* Ast()-based MAC event pending. */ /* used only in legacy conversion code */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_PARGS); MALLOC_DECLARE(M_PGRP); MALLOC_DECLARE(M_SESSION); MALLOC_DECLARE(M_SUBPROC); MALLOC_DECLARE(M_ZOMBIE); #endif #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_KSEGRP_IN_PROC(p, kg) \ TAILQ_FOREACH((kg), &(p)->p_ksegrps, kg_ksegrp) #define FOREACH_THREAD_IN_GROUP(kg, td) \ TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist) #define FOREACH_KSE_IN_GROUP(kg, ke) \ TAILQ_FOREACH((ke), &(kg)->kg_kseq, ke_kglist) #define FOREACH_UPCALL_IN_GROUP(kg, ku) \ TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) /* XXXKSE the lines below should probably only be used in 1:1 code */ #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) #define FIRST_KSEGRP_IN_PROC(p) TAILQ_FIRST(&(p)->p_ksegrps) #define FIRST_KSE_IN_KSEGRP(kg) TAILQ_FIRST(&(kg)->kg_kseq) #define FIRST_KSE_IN_PROC(p) FIRST_KSE_IN_KSEGRP(FIRST_KSEGRP_IN_PROC(p)) /* * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t, * as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID 100000 #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) #define SESSHOLD(s) ((s)->s_count++) #define SESSRELE(s) { \ if (--(s)->s_count == 0) \ FREE(s, M_SESSION); \ } #define STOPEVENT(p, e, v) do { \ if ((p)->p_stops & (e)) { \ PROC_LOCK(p); \ stopevent((p), (e), (v)); \ PROC_UNLOCK(p); \ } \ } while (0) #define _STOPEVENT(p, e, v) do { \ PROC_LOCK_ASSERT(p, MA_OWNED); \ if ((p)->p_stops & (e)) \ stopevent((p), (e), (v)); \ } while (0) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* Hold process U-area in memory, normally for ptrace/procfs work. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (p)->p_lock++; \ if (((p)->p_sflag & PS_INMEM) == 0) \ faultin((p)); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ (--(p)->p_lock); \ } while (0) /* Check whether a thread is safe to be swapped out. */ #define thread_safetoswapout(td) (TD_IS_SLEEPING(td) || TD_IS_SUSPENDED(td)) /* Lock and unlock process arguments. */ #define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock) #define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern u_long pidhash; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern struct sx proctree_lock; extern struct mtx pargs_ref_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread thread0; /* Primary thread in proc0. */ extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0. */ extern struct kse kse0; /* Primary kse in proc0. */ extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; extern int sched_quantum; /* Scheduling quantum in ticks. */ LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct proc *updateproc; /* Process slot for syncer (sic). */ extern struct uma_zone *proc_zone; extern int lastpid; struct proc *pfind(pid_t); /* Find process by id. */ struct pgrp *pgfind(pid_t); /* Find process group by id. */ struct proc *zpfind(pid_t); /* Find zombie process by id. */ void adjustrunqueue(struct thread *, int newpri); void ast(struct trapframe *framep); struct thread *choosethread(void); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); void faultin(struct proc *p); void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); int fork1(struct thread *, int, int, struct proc **); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); int leavepgrp(struct proc *p); void mi_switch(int flags); /* Flags for mi_switch(). */ #define SW_VOL 0x0001 /* Voluntary switch. */ #define SW_INVOL 0x0002 /* Involuntary switch. */ int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_free(struct pargs *pa); void pargs_hold(struct pargs *pa); void procinit(void); void threadinit(void); void proc_linkup(struct proc *p, struct ksegrp *kg, struct kse *ke, struct thread *td); void proc_reparent(struct proc *child, struct proc *newparent); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void setrunnable(struct thread *); void setrunqueue(struct thread *); void setsugid(struct proc *p); int sigonstack(size_t sp); void sleepinit(void); void stopevent(struct proc *, u_int, u_int); void cpu_idle(void); extern void (*cpu_idle_hook)(void); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *old, struct thread *new); void cpu_throw(struct thread *old, struct thread *new) __dead2; void unsleep(struct thread *); void userret(struct thread *, struct trapframe *, u_int); void cpu_exit(struct thread *); void exit1(struct thread *, int) __dead2; void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_set_fork_handler(struct thread *, void (*)(void *), void *); /* New in KSE. */ struct ksegrp *ksegrp_alloc(void); void ksegrp_free(struct ksegrp *kg); void ksegrp_stash(struct ksegrp *kg); struct kse *kse_alloc(void); void kse_free(struct kse *ke); void kse_stash(struct kse *ke); void cpu_set_upcall(struct thread *td, struct thread *td0); void cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_setup(struct thread *td); void cpu_thread_siginfo(int sig, u_long code, siginfo_t *si); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); void kse_reassign(struct kse *ke); void kse_link(struct kse *ke, struct ksegrp *kg); void kse_unlink(struct kse *ke); void ksegrp_link(struct ksegrp *kg, struct proc *p); void ksegrp_unlink(struct ksegrp *kg); void thread_signal_add(struct thread *td, int sig); struct thread *thread_alloc(void); void thread_exit(void) __dead2; int thread_export_context(struct thread *td, int willexit); void thread_free(struct thread *td); void thread_link(struct thread *td, struct ksegrp *kg); -lwpid_t thread_new_tid(void); void thread_reap(void); struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku); int thread_single(int how); #define SINGLE_NO_EXIT 0 /* values for 'how' */ #define SINGLE_EXIT 1 void thread_single_end(void); void thread_stash(struct thread *td); int thread_suspend_check(int how); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_unsuspend_one(struct thread *td); int thread_userret(struct thread *td, struct trapframe *frame); int thread_upcall_check(struct thread *td); void thread_user_enter(struct proc *p, struct thread *td); void thread_wait(struct proc *p); int thread_statclock(int user); struct kse_upcall *upcall_alloc(void); void upcall_free(struct kse_upcall *ku); void upcall_link(struct kse_upcall *ku, struct ksegrp *kg); void upcall_unlink(struct kse_upcall *ku); void upcall_remove(struct thread *td); void upcall_stash(struct kse_upcall *ke); void thread_sanity_check(struct thread *td, char *); void thread_stopped(struct proc *p); void thread_switchout(struct thread *td); void thr_exit1(void); #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */