diff --git a/sys/arm/arm/elf_machdep.c b/sys/arm/arm/elf_machdep.c index d993ba57c7d0..735c00766f9b 100644 --- a/sys/arm/arm/elf_machdep.c +++ b/sys/arm/arm/elf_machdep.c @@ -1,345 +1,335 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 1996-1998 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include "opt_ddb.h" /* for OPT_DDB */ #include "opt_global.h" /* for OPT_KDTRACE_HOOKS */ #include "opt_stack.h" /* for OPT_STACK */ static boolean_t elf32_arm_abi_supported(struct image_params *, int32_t *, uint32_t *); u_long elf_hwcap; u_long elf_hwcap2; struct sysentvec elf32_freebsd_sysvec = { .sv_size = SYS_MAXSYSCALL, .sv_table = sysent, .sv_transtrap = NULL, .sv_fixup = __elfN(freebsd_fixup), .sv_sendsig = sendsig, .sv_sigcode = sigcode, .sv_szsigcode = &szsigcode, .sv_name = "FreeBSD ELF32", .sv_coredump = __elfN(coredump), .sv_elf_core_osabi = ELFOSABI_FREEBSD, .sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR, .sv_elf_core_prepare_notes = __elfN(prepare_notes), .sv_imgact_try = NULL, .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_psstringssz = sizeof(struct ps_strings), .sv_stackprot = VM_PROT_ALL, .sv_copyout_auxargs = __elfN(freebsd_copyout_auxargs), .sv_copyout_strings = exec_copyout_strings, .sv_setregs = exec_setregs, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = SV_ASLR | SV_SHP | SV_TIMEKEEP | SV_RNG_SEED_VER | SV_ABI_FREEBSD | SV_ILP32, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, .sv_shared_page_base = SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &elf_hwcap, .sv_hwcap2 = &elf_hwcap2, .sv_onexec_old = exec_onexec_old, .sv_onexit = exit_onexit, .sv_regset_begin = SET_BEGIN(__elfN(regset)), .sv_regset_end = SET_LIMIT(__elfN(regset)), }; INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec); static Elf32_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_ARM, .compat_3_brand = "FreeBSD", .emul_path = NULL, .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf32_freebsd_sysvec, .interp_newpath = NULL, .brand_note = &elf32_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, .header_supported= elf32_arm_abi_supported, }; SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t) elf32_insert_brand_entry, &freebsd_brand_info); static boolean_t elf32_arm_abi_supported(struct image_params *imgp, int32_t *osrel __unused, uint32_t *fctl0 __unused) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; /* * When configured for EABI, FreeBSD supports EABI vesions 4 and 5. */ if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) { if (bootverbose) uprintf("Attempting to execute non EABI binary (rev %d) image %s", EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname); return (FALSE); } return (TRUE); } void -elf32_dump_thread(struct thread *td, void *dst, size_t *off) +elf32_dump_thread(struct thread *td __unused, void *dst __unused, + size_t *off __unused) { -#ifdef VFP - mcontext_vfp_t vfp; - - if (dst != NULL) { - get_vfpcontext(td, &vfp); - *off = elf32_populate_note(NT_ARM_VFP, &vfp, dst, sizeof(vfp), - NULL); - } else - *off = elf32_populate_note(NT_ARM_VFP, NULL, NULL, sizeof(vfp), - NULL); -#endif } bool elf_is_ifunc_reloc(Elf_Size r_info __unused) { return (false); } /* * It is possible for the compiler to emit relocations for unaligned data. * We handle this situation with these inlines. */ #define RELOC_ALIGNED_P(x) \ (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0) static __inline Elf_Addr load_ptr(Elf_Addr *where) { Elf_Addr res; if (RELOC_ALIGNED_P(where)) return *where; memcpy(&res, where, sizeof(res)); return (res); } static __inline void store_ptr(Elf_Addr *where, Elf_Addr val) { if (RELOC_ALIGNED_P(where)) *where = val; else memcpy(where, &val, sizeof(val)); } #undef RELOC_ALIGNED_P /* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf_Addr *where; Elf_Addr addr; Elf_Addr addend; Elf_Word rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; int error; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); addend = load_ptr(where); rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("unknown reloc type %d\n", type); } if (local) { if (rtype == R_ARM_RELATIVE) { /* A + B */ addr = elf_relocaddr(lf, relocbase + addend); if (load_ptr(where) != addr) store_ptr(where, addr); } return (0); } switch (rtype) { case R_ARM_NONE: /* none */ break; case R_ARM_ABS32: error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); store_ptr(where, addr + load_ptr(where)); break; case R_ARM_COPY: /* none */ /* * There shouldn't be copy relocations in kernel * objects. */ printf("kldload: unexpected R_COPY relocation, " "symbol index %d\n", symidx); return (-1); break; case R_ARM_JUMP_SLOT: error = lookup(lf, symidx, 1, &addr); if (error == 0) { store_ptr(where, addr); return (0); } return (-1); case R_ARM_RELATIVE: break; default: printf("kldload: unexpected relocation type %d, " "symbol index %d\n", rtype, symidx); return (-1); } return(0); } int elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup)); } int elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup)); } int elf_cpu_load_file(linker_file_t lf) { /* * The pmap code does not do an icache sync upon establishing executable * mappings in the kernel pmap. It's an optimization based on the fact * that kernel memory allocations always have EXECUTABLE protection even * when the memory isn't going to hold executable code. The only time * kernel memory holding instructions does need a sync is after loading * a kernel module, and that's when this function gets called. * * This syncs data and instruction caches after loading a module. We * don't worry about the kernel itself (lf->id is 1) as locore.S did * that on entry. Even if data cache maintenance was done by IO code, * the relocation fixup process creates dirty cache entries that we must * write back before doing icache sync. The instruction cache sync also * invalidates the branch predictor cache on platforms that have one. */ if (lf->id == 1) return (0); dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size); icache_inv_all(); #if defined(DDB) || defined(KDTRACE_HOOKS) || defined(STACK) /* * Inform the stack(9) code of the new module, so it can acquire its * per-module unwind data. */ unwind_module_loaded(lf); #endif return (0); } int elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused) { return (0); } int elf_cpu_unload_file(linker_file_t lf) { #if defined(DDB) || defined(KDTRACE_HOOKS) || defined(STACK) /* Inform the stack(9) code that this module is gone. */ unwind_module_unloaded(lf); #endif return (0); } diff --git a/sys/arm/arm/ptrace_machdep.c b/sys/arm/arm/ptrace_machdep.c index 563f962dc473..d8acc58d0fd0 100644 --- a/sys/arm/arm/ptrace_machdep.c +++ b/sys/arm/arm/ptrace_machdep.c @@ -1,62 +1,95 @@ /*- * Copyright (c) 2017 John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include +#include #include #include +#include #ifdef VFP #include #endif +#ifdef VFP +static bool +get_arm_vfp(struct regset *rs, struct thread *td, void *buf, size_t *sizep) +{ + if (buf != NULL) { + KASSERT(*sizep == sizeof(mcontext_vfp_t), + ("%s: invalid size", __func__)); + get_vfpcontext(td, buf); + } + *sizep = sizeof(mcontext_vfp_t); + return (true); +} + +static bool +set_arm_vfp(struct regset *rs, struct thread *td, void *buf, + size_t size) +{ + KASSERT(size == sizeof(mcontext_vfp_t), ("%s: invalid size", __func__)); + set_vfpcontext(td, buf); + return (true); +} + +static struct regset regset_arm_vfp = { + .note = NT_ARM_VFP, + .size = sizeof(mcontext_vfp_t), + .get = get_arm_vfp, + .set = set_arm_vfp, +}; +ELF_REGSET(regset_arm_vfp); +#endif + int cpu_ptrace(struct thread *td, int req, void *addr, int data) { #ifdef VFP mcontext_vfp_t vfp; #endif int error; switch (req) { #ifdef VFP case PT_GETVFPREGS: get_vfpcontext(td, &vfp); error = copyout(&vfp, addr, sizeof(vfp)); break; case PT_SETVFPREGS: error = copyin(addr, &vfp, sizeof(vfp)); if (error == 0) set_vfpcontext(td, &vfp); break; #endif default: error = EINVAL; } return (error); } diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c index 4322c07d6e82..4123c45e1e36 100644 --- a/sys/arm64/arm64/elf32_machdep.c +++ b/sys/arm64/arm64/elf32_machdep.c @@ -1,289 +1,288 @@ /*- * Copyright (c) 2014, 2015 The FreeBSD Foundation. * Copyright (c) 2014, 2017 Andrew Turner. * Copyright (c) 2018 Olivier Houchard * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define __ELF_WORD_SIZE 32 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include #define FREEBSD32_MINUSER 0x00001000 #define FREEBSD32_MAXUSER ((1ul << 32) - PAGE_SIZE) #define FREEBSD32_SHAREDPAGE (FREEBSD32_MAXUSER - PAGE_SIZE) #define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE extern const char *freebsd32_syscallnames[]; extern char aarch32_sigcode[]; extern int sz_aarch32_sigcode; static int freebsd32_fetch_syscall_args(struct thread *td); static void freebsd32_setregs(struct thread *td, struct image_params *imgp, u_long stack); static void freebsd32_set_syscall_retval(struct thread *, int); static boolean_t elf32_arm_abi_supported(struct image_params *, int32_t *, uint32_t *); extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask); u_long __read_frequently elf32_hwcap; u_long __read_frequently elf32_hwcap2; static struct sysentvec elf32_freebsd_sysvec = { .sv_size = SYS_MAXSYSCALL, .sv_table = freebsd32_sysent, .sv_transtrap = NULL, .sv_fixup = elf32_freebsd_fixup, .sv_sendsig = freebsd32_sendsig, .sv_sigcode = aarch32_sigcode, .sv_szsigcode = &sz_aarch32_sigcode, .sv_name = "FreeBSD ELF32", .sv_coredump = elf32_coredump, .sv_elf_core_osabi = ELFOSABI_FREEBSD, .sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR, .sv_elf_core_prepare_notes = elf32_prepare_notes, .sv_imgact_try = NULL, .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = FREEBSD32_MINUSER, .sv_maxuser = FREEBSD32_MAXUSER, .sv_usrstack = FREEBSD32_USRSTACK, .sv_psstrings = FREEBSD32_PS_STRINGS, .sv_psstringssz = sizeof(struct freebsd32_ps_strings), .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE, .sv_copyout_auxargs = elf32_freebsd_copyout_auxargs, .sv_copyout_strings = freebsd32_copyout_strings, .sv_setregs = freebsd32_setregs, .sv_fixlimit = NULL, // XXX .sv_maxssiz = NULL, .sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_SHP | SV_TIMEKEEP | SV_RNG_SEED_VER, .sv_set_syscall_retval = freebsd32_set_syscall_retval, .sv_fetch_syscall_args = freebsd32_fetch_syscall_args, .sv_syscallnames = freebsd32_syscallnames, .sv_shared_page_base = FREEBSD32_SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &elf32_hwcap, .sv_hwcap2 = &elf32_hwcap2, .sv_onexec_old = exec_onexec_old, .sv_onexit = exit_onexit, .sv_regset_begin = SET_BEGIN(__elfN(regset)), .sv_regset_end = SET_LIMIT(__elfN(regset)), }; INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec); static Elf32_Brandinfo freebsd32_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_ARM, .compat_3_brand = "FreeBSD", .emul_path = NULL, .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf32_freebsd_sysvec, .interp_newpath = "/libexec/ld-elf32.so.1", .brand_note = &elf32_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, .header_supported= elf32_arm_abi_supported, }; SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)elf32_insert_brand_entry, &freebsd32_brand_info); static boolean_t elf32_arm_abi_supported(struct image_params *imgp, int32_t *osrel __unused, uint32_t *fctl0 __unused) { const Elf32_Ehdr *hdr; /* Check if we support AArch32 */ if (ID_AA64PFR0_EL0_VAL(READ_SPECIALREG(id_aa64pfr0_el1)) != ID_AA64PFR0_EL0_64_32) return (FALSE); #define EF_ARM_EABI_VERSION(x) (((x) & EF_ARM_EABIMASK) >> 24) #define EF_ARM_EABI_FREEBSD_MIN 4 hdr = (const Elf32_Ehdr *)imgp->image_header; if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) { if (bootverbose) uprintf("Attempting to execute non EABI binary " "(rev %d) image %s", EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname); return (FALSE); } return (TRUE); } static int freebsd32_fetch_syscall_args(struct thread *td) { struct proc *p; register_t *ap; struct syscall_args *sa; int error, i, nap, narg; unsigned int args[4]; nap = 4; p = td->td_proc; ap = td->td_frame->tf_x; sa = &td->td_sa; /* r7 is the syscall id */ sa->code = td->td_frame->tf_x[7]; if (sa->code == SYS_syscall) { sa->code = *ap++; nap--; } else if (sa->code == SYS___syscall) { sa->code = ap[1]; nap -= 2; ap += 2; } if (sa->code >= p->p_sysent->sv_size) sa->callp = &p->p_sysent->sv_table[0]; else sa->callp = &p->p_sysent->sv_table[sa->code]; narg = sa->callp->sy_narg; for (i = 0; i < nap; i++) sa->args[i] = ap[i]; if (narg > nap) { if (narg - nap > nitems(args)) panic("Too many system call arguiments"); error = copyin((void *)td->td_frame->tf_x[13], args, (narg - nap) * sizeof(int)); for (i = 0; i < (narg - nap); i++) sa->args[i + nap] = args[i]; } td->td_retval[0] = 0; td->td_retval[1] = 0; return (0); } static void freebsd32_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; switch (error) { case 0: frame->tf_x[0] = td->td_retval[0]; frame->tf_x[1] = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; break; case ERESTART: /* * Reconstruct the pc to point at the swi. */ if ((frame->tf_spsr & PSR_T) != 0) frame->tf_elr -= 2; //THUMB_INSN_SIZE; else frame->tf_elr -= 4; //INSN_SIZE; break; case EJUSTRETURN: /* nothing to do */ break; default: frame->tf_x[0] = error; frame->tf_spsr |= PSR_C; break; } } static void freebsd32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf = td->td_frame; struct pcb *pcb = td->td_pcb; memset(tf, 0, sizeof(struct trapframe)); /* * We need to set x0 for init as it doesn't call * cpu_set_syscall_retval to copy the value. We also * need to set td_retval for the cases where we do. */ tf->tf_x[0] = stack; /* SP_usr is mapped to x13 */ tf->tf_x[13] = stack; /* LR_usr is mapped to x14 */ tf->tf_x[14] = imgp->entry_addr; tf->tf_elr = imgp->entry_addr; tf->tf_spsr = PSR_M_32; if ((uint32_t)imgp->entry_addr & 1) tf->tf_spsr |= PSR_T; #ifdef VFP vfp_reset_state(td, pcb); #endif /* * Clear debug register state. It is not applicable to the new process. */ bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs)); } void elf32_dump_thread(struct thread *td, void *dst, size_t *off) { - /* XXX: VFP */ } diff --git a/sys/arm64/arm64/freebsd32_machdep.c b/sys/arm64/arm64/freebsd32_machdep.c index f9b847d8b658..7338e2b410f9 100644 --- a/sys/arm64/arm64/freebsd32_machdep.c +++ b/sys/arm64/arm64/freebsd32_machdep.c @@ -1,476 +1,473 @@ /*- * Copyright (c) 2018 Olivier Houchard * Copyright (c) 2017 Nuxi, https://nuxi.nl/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include #include #include #include #include #include extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask); /* * The first two fields of a ucontext_t are the signal mask and the machine * context. The next field is uc_link; we want to avoid destroying the link * when copying out contexts. */ #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link) -#ifdef VFP -static void get_fpcontext32(struct thread *td, mcontext32_vfp_t *); -#endif - /* * Stubs for machine dependent 32-bits system calls. */ int freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap) { int error; #define ARM_SYNC_ICACHE 0 #define ARM_DRAIN_WRITEBUF 1 #define ARM_SET_TP 2 #define ARM_GET_TP 3 #define ARM_GET_VFPSTATE 4 switch(uap->op) { case ARM_SET_TP: WRITE_SPECIALREG(tpidr_el0, uap->parms); WRITE_SPECIALREG(tpidrro_el0, uap->parms); return 0; case ARM_SYNC_ICACHE: { struct { uint32_t addr; uint32_t size; } args; if ((error = copyin(uap->parms, &args, sizeof(args))) != 0) return (error); if ((uint64_t)args.addr + (uint64_t)args.size > 0xffffffff) return (EINVAL); cpu_icache_sync_range_checked(args.addr, args.size); return 0; } case ARM_GET_VFPSTATE: { mcontext32_vfp_t mcontext_vfp; struct { uint32_t mc_vfp_size; uint32_t mc_vfp; } args; if ((error = copyin(uap->parms, &args, sizeof(args))) != 0) return (error); if (args.mc_vfp_size != sizeof(mcontext_vfp)) return (EINVAL); #ifdef VFP get_fpcontext32(td, &mcontext_vfp); #else bzero(&mcontext_vfp, sizeof(mcontext_vfp)); #endif error = copyout(&mcontext_vfp, (void *)(uintptr_t)args.mc_vfp, sizeof(mcontext_vfp)); return error; } } return (EINVAL); } #ifdef VFP -static void +void get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp) { struct pcb *pcb; int i; KASSERT(td == curthread || TD_IS_SUSPENDED(td) || P_SHOULDSTOP(td->td_proc), ("not suspended thread %p", td)); memset(mcp, 0, sizeof(*mcp)); pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ if (td == curthread) vfp_save_state(td, pcb); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called get_fpcontext32 while the kernel is using the VFP")); KASSERT((pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPU flags set in get_fpcontext32")); for (i = 0; i < 32; i++) mcp->mcv_reg[i] = (uint64_t)pcb->pcb_fpustate.vfp_regs[i]; mcp->mcv_fpscr = VFP_FPSCR_FROM_SRCR(pcb->pcb_fpustate.vfp_fpcr, pcb->pcb_fpustate.vfp_fpsr); } } -static void +void set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp) { struct pcb *pcb; int i; critical_enter(); pcb = td->td_pcb; if (td == curthread) vfp_discard(td); for (i = 0; i < 32; i++) pcb->pcb_fpustate.vfp_regs[i] = mcp->mcv_reg[i]; pcb->pcb_fpustate.vfp_fpsr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr); pcb->pcb_fpustate.vfp_fpcr = VFP_FPSR_FROM_FPSCR(mcp->mcv_fpscr); critical_exit(); } #endif + static void get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { struct pcb *pcb; struct trapframe *tf; int i; pcb = td->td_pcb; tf = td->td_frame; if ((flags & GET_MC_CLEAR_RET) != 0) { mcp->mc_gregset[0] = 0; mcp->mc_gregset[16] = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gregset[0] = tf->tf_x[0]; mcp->mc_gregset[16] = tf->tf_spsr; } for (i = 1; i < 15; i++) mcp->mc_gregset[i] = tf->tf_x[i]; mcp->mc_gregset[15] = tf->tf_elr; mcp->mc_vfp_size = 0; mcp->mc_vfp_ptr = 0; memset(mcp->mc_spare, 0, sizeof(mcp->mc_spare)); } static int set_mcontext32(struct thread *td, mcontext32_t *mcp) { struct trapframe *tf; mcontext32_vfp_t mc_vfp; uint32_t spsr; int i; tf = td->td_frame; spsr = mcp->mc_gregset[16]; /* * There is no PSR_SS in the 32-bit kernel so ignore it if it's set * as we will set it later if needed. */ if ((spsr & ~(PSR_SETTABLE_32 | PSR_SS)) != (tf->tf_spsr & ~(PSR_SETTABLE_32 | PSR_SS))) return (EINVAL); spsr &= PSR_SETTABLE_32; spsr |= tf->tf_spsr & ~PSR_SETTABLE_32; if ((td->td_dbgflags & TDB_STEP) != 0) { spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; WRITE_SPECIALREG(mdscr_el1, READ_SPECIALREG(mdscr_el1) | MDSCR_SS); } for (i = 0; i < 15; i++) tf->tf_x[i] = mcp->mc_gregset[i]; tf->tf_elr = mcp->mc_gregset[15]; tf->tf_spsr = spsr; #ifdef VFP if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != 0) { if (copyin((void *)(uintptr_t)mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0) return (EFAULT); set_fpcontext32(td, &mc_vfp); } #endif return (0); } #define UC_COPY_SIZE offsetof(ucontext32_t, uc_link) int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { memset(&uc, 0, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC_COPY_SIZE); } return (ret); } int freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } return (ret); } int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { ucontext32_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext32(td, &uc.uc_mcontext); if (error != 0) return (0); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } int freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap) { ucontext32_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } return (ret); } void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe32 *fp, frame; struct sigacts *psp; struct siginfo32 siginfo; struct sysentvec *sysent; int onstack; int sig; int code; siginfo_to_siginfo32(&ksi->ksi_info, &siginfo); td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_x[13]); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe32 *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe32 *)td->td_frame->tf_x[13]; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe32 *)((unsigned long)(fp) &~ (8 - 1)); /* Populate the siginfo frame. */ get_mcontext32(td, &frame.sf_uc.uc_mcontext, 0); #ifdef VFP get_fpcontext32(td, &frame.sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp); frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)(uintptr_t)&fp->sf_vfp; #else frame.sf_uc.uc_mcontext.mc_vfp_size = 0; frame.sf_uc.uc_mcontext.mc_vfp_ptr = (uint32_t)NULL; #endif frame.sf_si = siginfo; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; frame.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_x[0] = sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_x[5] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_x[13] = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_x[14] = (register_t)sysent->sv_sigcode_base; else tf->tf_x[14] = (register_t)(PROC_PS_STRINGS(p) - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; /* Clear the single step flag while in the signal handler */ if ((td->td_pcb->pcb_flags & PCB_SINGLE_STEP) != 0) { td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; WRITE_SPECIALREG(mdscr_el1, READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS); isb(); } CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_x[14], tf->tf_x[13]); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } #ifdef COMPAT_43 /* * COMPAT_FREEBSD32 assumes we have this system call when COMPAT_43 is defined. * FreeBSD/arm provies a similar getpagesize() syscall. */ #define ARM32_PAGE_SIZE 4096 int ofreebsd32_getpagesize(struct thread *td, struct ofreebsd32_getpagesize_args *uap) { td->td_retval[0] = ARM32_PAGE_SIZE; return (0); } /* * Mirror the osigreturn definition in kern_sig.c for !i386 platforms. This * mirrors what's connected to the FreeBSD/arm syscall. */ int ofreebsd32_sigreturn(struct thread *td, struct ofreebsd32_sigreturn_args *uap) { return (nosys(td, (struct nosys_args *)uap)); } #endif diff --git a/sys/arm64/arm64/ptrace_machdep.c b/sys/arm64/arm64/ptrace_machdep.c index 144ff29aff47..abf1991a51a6 100644 --- a/sys/arm64/arm64/ptrace_machdep.c +++ b/sys/arm64/arm64/ptrace_machdep.c @@ -1,78 +1,112 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include +#if defined(VFP) && defined(COMPAT_FREEBSD32) +static bool +get_arm_vfp(struct regset *rs, struct thread *td, void *buf, size_t *sizep) +{ + if (buf != NULL) { + KASSERT(*sizep == sizeof(mcontext32_vfp_t), + ("%s: invalid size", __func__)); + get_fpcontext32(td, buf); + } + *sizep = sizeof(mcontext32_vfp_t); + return (true); +} + +static bool +set_arm_vfp(struct regset *rs, struct thread *td, void *buf, + size_t size) +{ + KASSERT(size == sizeof(mcontext32_vfp_t), ("%s: invalid size", + __func__)); + set_fpcontext32(td, buf); + return (true); +} + +static struct regset regset_arm_vfp = { + .note = NT_ARM_VFP, + .size = sizeof(mcontext32_vfp_t), + .get = get_arm_vfp, + .set = set_arm_vfp, +}; +ELF32_REGSET(regset_arm_vfp); +#endif + int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_elr = addr; return (0); } int ptrace_single_step(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); if ((td->td_frame->tf_spsr & PSR_SS) == 0) { td->td_frame->tf_spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; td->td_dbgflags |= TDB_STEP; } return (0); } int ptrace_clear_single_step(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); td->td_frame->tf_spsr &= ~PSR_SS; td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; td->td_dbgflags &= ~TDB_STEP; return (0); } diff --git a/sys/arm64/include/vfp.h b/sys/arm64/include/vfp.h index 9808b3a8d831..650eb6938f5e 100644 --- a/sys/arm64/include/vfp.h +++ b/sys/arm64/include/vfp.h @@ -1,100 +1,105 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_VFP_H_ #define _MACHINE_VFP_H_ /* VFPCR */ #define VFPCR_AHP (0x04000000) /* alt. half-precision: */ #define VFPCR_DN (0x02000000) /* default NaN enable */ #define VFPCR_FZ (0x01000000) /* flush to zero enabled */ #define VFPCR_RMODE_OFF 22 /* rounding mode offset */ #define VFPCR_RMODE_MASK (0x00c00000) /* rounding mode mask */ #define VFPCR_RMODE_RN (0x00000000) /* round nearest */ #define VFPCR_RMODE_RPI (0x00400000) /* round to plus infinity */ #define VFPCR_RMODE_RNI (0x00800000) /* round to neg infinity */ #define VFPCR_RMODE_RM (0x00c00000) /* round to zero */ #define VFPCR_STRIDE_OFF 20 /* vector stride -1 */ #define VFPCR_STRIDE_MASK (0x00300000) #define VFPCR_LEN_OFF 16 /* vector length -1 */ #define VFPCR_LEN_MASK (0x00070000) #define VFPCR_IDE (0x00008000) /* input subnormal exc enable */ #define VFPCR_IXE (0x00001000) /* inexact exception enable */ #define VFPCR_UFE (0x00000800) /* underflow exception enable */ #define VFPCR_OFE (0x00000400) /* overflow exception enable */ #define VFPCR_DZE (0x00000200) /* div by zero exception en */ #define VFPCR_IOE (0x00000100) /* invalid op exec enable */ #ifndef LOCORE struct vfpstate { __uint128_t vfp_regs[32]; uint32_t vfp_fpcr; uint32_t vfp_fpsr; }; #ifdef _KERNEL struct pcb; struct thread; void vfp_init(void); void vfp_discard(struct thread *); void vfp_reset_state(struct thread *, struct pcb *); void vfp_restore_state(void); void vfp_save_state(struct thread *, struct pcb *); struct fpu_kern_ctx; /* * Flags for fpu_kern_alloc_ctx(), fpu_kern_enter() and fpu_kern_thread(). */ #define FPU_KERN_NORMAL 0x0000 #define FPU_KERN_NOWAIT 0x0001 #define FPU_KERN_KTHR 0x0002 #define FPU_KERN_NOCTX 0x0004 struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int); void fpu_kern_free_ctx(struct fpu_kern_ctx *); void fpu_kern_enter(struct thread *, struct fpu_kern_ctx *, u_int); int fpu_kern_leave(struct thread *, struct fpu_kern_ctx *); int fpu_kern_thread(u_int); int is_fpu_kern_thread(u_int); /* Convert to and from Aarch32 FPSCR to Aarch64 FPCR/FPSR */ #define VFP_FPSCR_FROM_SRCR(vpsr, vpcr) ((vpsr) | ((vpcr) & 0x7c00000)) #define VFP_FPSR_FROM_FPSCR(vpscr) ((vpscr) &~ 0x7c00000) #define VFP_FPCR_FROM_FPSCR(vpsrc) ((vpsrc) & 0x7c00000) +#ifdef COMPAT_FREEBSD32 +void get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp); +void set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp); +#endif + #endif #endif #endif /* !_MACHINE_VFP_H_ */ diff --git a/usr.bin/gcore/elfcore.c b/usr.bin/gcore/elfcore.c index b93ef7ed92d6..42d90aaaf155 100644 --- a/usr.bin/gcore/elfcore.c +++ b/usr.bin/gcore/elfcore.c @@ -1,954 +1,951 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017 Dell EMC * Copyright (c) 2007 Sandvine Incorporated * Copyright (c) 1998 John D. Polstra * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "extern.h" /* * Code for generating ELF core dumps. */ struct map_entry { struct map_entry *next; vm_offset_t start; vm_offset_t end; vm_prot_t protection; }; typedef void (*segment_callback)(struct map_entry *, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; /* Closure for cb_size_segment(). */ struct sseg_closure { int count; /* Count of writable segments. */ size_t size; /* Total size of all writable segments. */ }; #ifdef ELFCORE_COMPAT_32 typedef struct fpreg32 elfcore_fpregset_t; typedef struct reg32 elfcore_gregset_t; typedef struct prpsinfo32 elfcore_prpsinfo_t; typedef struct prstatus32 elfcore_prstatus_t; typedef struct ptrace_lwpinfo32 elfcore_lwpinfo_t; static void elf_convert_lwpinfo(struct ptrace_lwpinfo32 *pld, struct ptrace_lwpinfo *pls); #else typedef fpregset_t elfcore_fpregset_t; typedef gregset_t elfcore_gregset_t; typedef prpsinfo_t elfcore_prpsinfo_t; typedef prstatus_t elfcore_prstatus_t; typedef struct ptrace_lwpinfo elfcore_lwpinfo_t; #define elf_convert_lwpinfo(d,s) *d = *s #endif typedef void* (*notefunc_t)(void *, size_t *); static void cb_put_phdr(struct map_entry *, void *); static void cb_size_segment(struct map_entry *, void *); static void each_dumpable_segment(struct map_entry *, segment_callback, void *closure); static void elf_detach(void); /* atexit() handler. */ static void *elf_note_prpsinfo(void *, size_t *); static void *elf_note_thrmisc(void *, size_t *); static void *elf_note_ptlwpinfo(void *, size_t *); -#if defined(__arm__) -static void *elf_note_arm_vfp(void *, size_t *); -#endif #if defined(__i386__) || defined(__amd64__) static void *elf_note_x86_xstate(void *, size_t *); #endif #if defined(__powerpc__) static void *elf_note_powerpc_vmx(void *, size_t *); static void *elf_note_powerpc_vsx(void *, size_t *); #endif static void *elf_note_procstat_auxv(void *, size_t *); static void *elf_note_procstat_files(void *, size_t *); static void *elf_note_procstat_groups(void *, size_t *); static void *elf_note_procstat_osrel(void *, size_t *); static void *elf_note_procstat_proc(void *, size_t *); static void *elf_note_procstat_psstrings(void *, size_t *); static void *elf_note_procstat_rlimit(void *, size_t *); static void *elf_note_procstat_umask(void *, size_t *); static void *elf_note_procstat_vmmap(void *, size_t *); static void elf_puthdr(int, pid_t, struct map_entry *, void *, size_t, size_t, size_t, int); static void elf_putnote(int, notefunc_t, void *, struct sbuf *); static void elf_putnotes(pid_t, struct sbuf *, size_t *); static void elf_putregnote(int, lwpid_t, struct sbuf *); static void freemap(struct map_entry *); static struct map_entry *readmap(pid_t); static void *procstat_sysctl(void *, int, size_t, size_t *sizep); static pid_t g_pid; /* Pid being dumped, global for elf_detach */ static int g_status; /* proc status after ptrace attach */ static int elf_ident(int efd, pid_t pid __unused, char *binfile __unused) { Elf_Ehdr hdr; int cnt; uint16_t machine; cnt = read(efd, &hdr, sizeof(hdr)); if (cnt != sizeof(hdr)) return (0); if (!IS_ELF(hdr)) return (0); switch (hdr.e_ident[EI_DATA]) { case ELFDATA2LSB: machine = le16toh(hdr.e_machine); break; case ELFDATA2MSB: machine = be16toh(hdr.e_machine); break; default: return (0); } if (!ELF_MACHINE_OK(machine)) return (0); /* Looks good. */ return (1); } static void elf_detach(void) { int sig; if (g_pid != 0) { /* * Forward any pending signals. SIGSTOP is generated by ptrace * itself, so ignore it. */ sig = WIFSTOPPED(g_status) ? WSTOPSIG(g_status) : 0; if (sig == SIGSTOP) sig = 0; ptrace(PT_DETACH, g_pid, (caddr_t)1, sig); } } /* * Write an ELF coredump for the given pid to the given fd. */ static void elf_coredump(int efd, int fd, pid_t pid) { struct map_entry *map; struct sseg_closure seginfo; struct sbuf *sb; void *hdr; size_t hdrsize, notesz, segoff; ssize_t n, old_len; Elf_Phdr *php; int i; /* Attach to process to dump. */ g_pid = pid; if (atexit(elf_detach) != 0) err(1, "atexit"); errno = 0; ptrace(PT_ATTACH, pid, NULL, 0); if (errno) err(1, "PT_ATTACH"); if (waitpid(pid, &g_status, 0) == -1) err(1, "waitpid"); /* Get the program's memory map. */ map = readmap(pid); /* Size the program segments. */ seginfo.count = 0; seginfo.size = 0; each_dumpable_segment(map, cb_size_segment, &seginfo); /* * Build the header and the notes using sbuf and write to the file. */ sb = sbuf_new_auto(); hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); if (seginfo.count + 1 >= PN_XNUM) hdrsize += sizeof(Elf_Shdr); /* Start header + notes section. */ sbuf_start_section(sb, NULL); /* Make empty header subsection. */ sbuf_start_section(sb, &old_len); sbuf_putc(sb, 0); sbuf_end_section(sb, old_len, hdrsize, 0); /* Put notes. */ elf_putnotes(pid, sb, ¬esz); /* Align up to a page boundary for the program segments. */ sbuf_end_section(sb, -1, PAGE_SIZE, 0); if (sbuf_finish(sb) != 0) err(1, "sbuf_finish"); hdr = sbuf_data(sb); segoff = sbuf_len(sb); /* Fill in the header. */ elf_puthdr(efd, pid, map, hdr, hdrsize, notesz, segoff, seginfo.count); n = write(fd, hdr, segoff); if (n == -1) err(1, "write"); if (n < segoff) errx(1, "short write"); /* Write the contents of all of the writable segments. */ php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; for (i = 0; i < seginfo.count; i++) { struct ptrace_io_desc iorequest; uintmax_t nleft = php->p_filesz; iorequest.piod_op = PIOD_READ_D; iorequest.piod_offs = (caddr_t)(uintptr_t)php->p_vaddr; while (nleft > 0) { char buf[8*1024]; size_t nwant; ssize_t ngot; if (nleft > sizeof(buf)) nwant = sizeof buf; else nwant = nleft; iorequest.piod_addr = buf; iorequest.piod_len = nwant; ptrace(PT_IO, pid, (caddr_t)&iorequest, 0); ngot = iorequest.piod_len; if ((size_t)ngot < nwant) errx(1, "short read wanted %zu, got %zd", nwant, ngot); ngot = write(fd, buf, nwant); if (ngot == -1) err(1, "write of segment %d failed", i); if ((size_t)ngot != nwant) errx(1, "short write"); nleft -= nwant; iorequest.piod_offs += ngot; } php++; } sbuf_delete(sb); freemap(map); } /* * A callback for each_dumpable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(struct map_entry *entry, void *closure) { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = 0; if (entry->protection & VM_PROT_READ) phdr->p_flags |= PF_R; if (entry->protection & VM_PROT_WRITE) phdr->p_flags |= PF_W; if (entry->protection & VM_PROT_EXECUTE) phdr->p_flags |= PF_X; phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_dumpable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(struct map_entry *entry, void *closure) { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } /* * For each segment in the given memory map, call the given function * with a pointer to the map entry and some arbitrary caller-supplied * data. */ static void each_dumpable_segment(struct map_entry *map, segment_callback func, void *closure) { struct map_entry *entry; for (entry = map; entry != NULL; entry = entry->next) (*func)(entry, closure); } static void elf_putnotes(pid_t pid, struct sbuf *sb, size_t *sizep) { lwpid_t *tids; size_t threads, old_len; ssize_t size; int i; errno = 0; threads = ptrace(PT_GETNUMLWPS, pid, NULL, 0); if (errno) err(1, "PT_GETNUMLWPS"); tids = malloc(threads * sizeof(*tids)); if (tids == NULL) errx(1, "out of memory"); errno = 0; ptrace(PT_GETLWPLIST, pid, (void *)tids, threads); if (errno) err(1, "PT_GETLWPLIST"); sbuf_start_section(sb, &old_len); elf_putnote(NT_PRPSINFO, elf_note_prpsinfo, &pid, sb); for (i = 0; i < threads; ++i) { elf_putregnote(NT_PRSTATUS, tids[i], sb); elf_putregnote(NT_FPREGSET, tids[i], sb); elf_putnote(NT_THRMISC, elf_note_thrmisc, tids + i, sb); elf_putnote(NT_PTLWPINFO, elf_note_ptlwpinfo, tids + i, sb); -#if defined(__arm__) - elf_putnote(NT_ARM_VFP, elf_note_arm_vfp, tids + i, sb); +#if (defined(ELFCORE_COMPAT_32) && defined(__aarch64__)) || defined(__arm__) + elf_putregnote(NT_ARM_VFP, tids[i], sb); #endif #if defined(__i386__) || defined(__amd64__) elf_putnote(NT_X86_XSTATE, elf_note_x86_xstate, tids + i, sb); #endif #if defined(__powerpc__) elf_putnote(NT_PPC_VMX, elf_note_powerpc_vmx, tids + i, sb); #ifndef __SPE__ elf_putnote(NT_PPC_VSX, elf_note_powerpc_vsx, tids + i, sb); #endif #endif } #ifndef ELFCORE_COMPAT_32 elf_putnote(NT_PROCSTAT_PROC, elf_note_procstat_proc, &pid, sb); elf_putnote(NT_PROCSTAT_FILES, elf_note_procstat_files, &pid, sb); elf_putnote(NT_PROCSTAT_VMMAP, elf_note_procstat_vmmap, &pid, sb); elf_putnote(NT_PROCSTAT_GROUPS, elf_note_procstat_groups, &pid, sb); elf_putnote(NT_PROCSTAT_UMASK, elf_note_procstat_umask, &pid, sb); elf_putnote(NT_PROCSTAT_RLIMIT, elf_note_procstat_rlimit, &pid, sb); elf_putnote(NT_PROCSTAT_OSREL, elf_note_procstat_osrel, &pid, sb); elf_putnote(NT_PROCSTAT_PSSTRINGS, elf_note_procstat_psstrings, &pid, sb); elf_putnote(NT_PROCSTAT_AUXV, elf_note_procstat_auxv, &pid, sb); #endif size = sbuf_end_section(sb, old_len, 1, 0); if (size == -1) err(1, "sbuf_end_section"); free(tids); *sizep = size; } /* * Emit one register set note section to sbuf. */ static void elf_putregnote(int type, lwpid_t tid, struct sbuf *sb) { Elf_Note note; struct iovec iov; ssize_t old_len; iov.iov_base = NULL; iov.iov_len = 0; if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0) return; iov.iov_base = calloc(1, iov.iov_len); if (iov.iov_base == NULL) errx(1, "out of memory"); if (ptrace(PT_GETREGSET, tid, (void *)&iov, type) != 0) errx(1, "failed to fetch register set %d", type); note.n_namesz = 8; /* strlen("FreeBSD") + 1 */ note.n_descsz = iov.iov_len; note.n_type = type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, "FreeBSD", note.n_namesz); sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, iov.iov_base, iov.iov_len); sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); free(iov.iov_base); } /* * Emit one note section to sbuf. */ static void elf_putnote(int type, notefunc_t notefunc, void *arg, struct sbuf *sb) { Elf_Note note; size_t descsz; ssize_t old_len; void *desc; desc = notefunc(arg, &descsz); note.n_namesz = 8; /* strlen("FreeBSD") + 1 */ note.n_descsz = descsz; note.n_type = type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, "FreeBSD", note.n_namesz); sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); if (descsz == 0) return; sbuf_start_section(sb, &old_len); sbuf_bcat(sb, desc, descsz); sbuf_end_section(sb, old_len, sizeof(Elf32_Size), 0); free(desc); } /* * Generate the ELF coredump header. */ static void elf_puthdr(int efd, pid_t pid, struct map_entry *map, void *hdr, size_t hdrsize, size_t notesz, size_t segoff, int numsegs) { Elf_Ehdr *ehdr, binhdr; Elf_Phdr *phdr; Elf_Shdr *shdr; struct phdr_closure phc; ssize_t cnt; cnt = read(efd, &binhdr, sizeof(binhdr)); if (cnt < 0) err(1, "Failed to re-read ELF header"); else if (cnt != sizeof(binhdr)) errx(1, "Failed to re-read ELF header"); ehdr = (Elf_Ehdr *)hdr; ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; ehdr->e_machine = binhdr.e_machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf_Ehdr); ehdr->e_flags = binhdr.e_flags; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shstrndx = SHN_UNDEF; if (numsegs + 1 < PN_XNUM) { ehdr->e_phnum = numsegs + 1; ehdr->e_shnum = 0; } else { ehdr->e_phnum = PN_XNUM; ehdr->e_shnum = 1; ehdr->e_shoff = ehdr->e_phoff + (numsegs + 1) * ehdr->e_phentsize; shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); memset(shdr, 0, sizeof(*shdr)); /* * A special first section is used to hold large segment and * section counts. This was proposed by Sun Microsystems in * Solaris and has been adopted by Linux; the standard ELF * tools are already familiar with the technique. * * See table 7-7 of the Solaris "Linker and Libraries Guide" * (or 12-7 depending on the version of the document) for more * details. */ shdr->sh_type = SHT_NULL; shdr->sh_size = ehdr->e_shnum; shdr->sh_link = ehdr->e_shstrndx; shdr->sh_info = numsegs + 1; } /* * Fill in the program header entries. */ phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); /* The note segment. */ phdr->p_type = PT_NOTE; phdr->p_offset = hdrsize; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = PF_R; phdr->p_align = sizeof(Elf32_Size); phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = segoff; each_dumpable_segment(map, cb_put_phdr, &phc); } /* * Free the memory map. */ static void freemap(struct map_entry *map) { struct map_entry *next; while (map != NULL) { next = map->next; free(map); map = next; } } /* * Read the process's memory map using kinfo_getvmmap(), and return a list of * VM map entries. Only the non-device read/writable segments are * returned. The map entries in the list aren't fully filled in; only * the items we need are present. */ static struct map_entry * readmap(pid_t pid) { struct map_entry *ent, **linkp, *map; struct kinfo_vmentry *vmentl, *kve; int i, nitems; vmentl = kinfo_getvmmap(pid, &nitems); if (vmentl == NULL) err(1, "cannot retrieve mappings for %u process", pid); map = NULL; linkp = ↦ for (i = 0; i < nitems; i++) { kve = &vmentl[i]; /* * Ignore 'malformed' segments or ones representing memory * mapping with MAP_NOCORE on. * If the 'full' support is disabled, just dump the most * meaningful data segments. */ if ((kve->kve_protection & KVME_PROT_READ) == 0 || (kve->kve_flags & KVME_FLAG_NOCOREDUMP) != 0 || kve->kve_type == KVME_TYPE_DEAD || kve->kve_type == KVME_TYPE_UNKNOWN || ((pflags & PFLAGS_FULL) == 0 && kve->kve_type != KVME_TYPE_DEFAULT && kve->kve_type != KVME_TYPE_VNODE && kve->kve_type != KVME_TYPE_SWAP && kve->kve_type != KVME_TYPE_PHYS)) continue; ent = calloc(1, sizeof(*ent)); if (ent == NULL) errx(1, "out of memory"); ent->start = (vm_offset_t)kve->kve_start; ent->end = (vm_offset_t)kve->kve_end; ent->protection = VM_PROT_READ | VM_PROT_WRITE; if ((kve->kve_protection & KVME_PROT_EXEC) != 0) ent->protection |= VM_PROT_EXECUTE; *linkp = ent; linkp = &ent->next; } free(vmentl); return (map); } /* * Miscellaneous note out functions. */ static void * elf_note_prpsinfo(void *arg, size_t *sizep) { char *cp, *end; pid_t pid; elfcore_prpsinfo_t *psinfo; struct kinfo_proc kip; size_t len; int name[4]; pid = *(pid_t *)arg; psinfo = calloc(1, sizeof(*psinfo)); if (psinfo == NULL) errx(1, "out of memory"); psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(*psinfo); name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_PID; name[3] = pid; len = sizeof(kip); if (sysctl(name, 4, &kip, &len, NULL, 0) == -1) err(1, "kern.proc.pid.%u", pid); if (kip.ki_pid != pid) err(1, "kern.proc.pid.%u", pid); strlcpy(psinfo->pr_fname, kip.ki_comm, sizeof(psinfo->pr_fname)); name[2] = KERN_PROC_ARGS; len = sizeof(psinfo->pr_psargs) - 1; if (sysctl(name, 4, psinfo->pr_psargs, &len, NULL, 0) == 0 && len > 0) { cp = psinfo->pr_psargs; end = cp + len - 1; for (;;) { cp = memchr(cp, '\0', end - cp); if (cp == NULL) break; *cp = ' '; } } else strlcpy(psinfo->pr_psargs, kip.ki_comm, sizeof(psinfo->pr_psargs)); psinfo->pr_pid = pid; *sizep = sizeof(*psinfo); return (psinfo); } static void * elf_note_thrmisc(void *arg, size_t *sizep) { lwpid_t tid; struct ptrace_lwpinfo lwpinfo; thrmisc_t *thrmisc; tid = *(lwpid_t *)arg; thrmisc = calloc(1, sizeof(*thrmisc)); if (thrmisc == NULL) errx(1, "out of memory"); ptrace(PT_LWPINFO, tid, (void *)&lwpinfo, sizeof(lwpinfo)); memset(&thrmisc->_pad, 0, sizeof(thrmisc->_pad)); strcpy(thrmisc->pr_tname, lwpinfo.pl_tdname); *sizep = sizeof(*thrmisc); return (thrmisc); } static void * elf_note_ptlwpinfo(void *arg, size_t *sizep) { lwpid_t tid; elfcore_lwpinfo_t *elf_info; struct ptrace_lwpinfo lwpinfo; void *p; tid = *(lwpid_t *)arg; p = calloc(1, sizeof(int) + sizeof(elfcore_lwpinfo_t)); if (p == NULL) errx(1, "out of memory"); *(int *)p = sizeof(elfcore_lwpinfo_t); elf_info = (void *)((int *)p + 1); ptrace(PT_LWPINFO, tid, (void *)&lwpinfo, sizeof(lwpinfo)); elf_convert_lwpinfo(elf_info, &lwpinfo); *sizep = sizeof(int) + sizeof(struct ptrace_lwpinfo); return (p); } #if defined(__arm__) static void * elf_note_arm_vfp(void *arg, size_t *sizep) { lwpid_t tid; struct vfpreg *vfp; static bool has_vfp = true; struct vfpreg info; tid = *(lwpid_t *)arg; if (has_vfp) { if (ptrace(PT_GETVFPREGS, tid, (void *)&info, 0) != 0) has_vfp = false; } if (!has_vfp) { *sizep = 0; return (NULL); } vfp = calloc(1, sizeof(*vfp)); memcpy(vfp, &info, sizeof(*vfp)); *sizep = sizeof(*vfp); return (vfp); } #endif #if defined(__i386__) || defined(__amd64__) static void * elf_note_x86_xstate(void *arg, size_t *sizep) { lwpid_t tid; char *xstate; static bool xsave_checked = false; static struct ptrace_xstate_info info; tid = *(lwpid_t *)arg; if (!xsave_checked) { if (ptrace(PT_GETXSTATE_INFO, tid, (void *)&info, sizeof(info)) != 0) info.xsave_len = 0; xsave_checked = true; } if (info.xsave_len == 0) { *sizep = 0; return (NULL); } xstate = calloc(1, info.xsave_len); ptrace(PT_GETXSTATE, tid, xstate, 0); *(uint64_t *)(xstate + X86_XSTATE_XCR0_OFFSET) = info.xsave_mask; *sizep = info.xsave_len; return (xstate); } #endif #if defined(__powerpc__) static void * elf_note_powerpc_vmx(void *arg, size_t *sizep) { lwpid_t tid; struct vmxreg *vmx; static bool has_vmx = true; struct vmxreg info; tid = *(lwpid_t *)arg; if (has_vmx) { if (ptrace(PT_GETVRREGS, tid, (void *)&info, sizeof(info)) != 0) has_vmx = false; } if (!has_vmx) { *sizep = 0; return (NULL); } vmx = calloc(1, sizeof(*vmx)); memcpy(vmx, &info, sizeof(*vmx)); *sizep = sizeof(*vmx); return (vmx); } static void * elf_note_powerpc_vsx(void *arg, size_t *sizep) { lwpid_t tid; char *vshr_data; static bool has_vsx = true; uint64_t vshr[32]; tid = *(lwpid_t *)arg; if (has_vsx) { if (ptrace(PT_GETVSRREGS, tid, (void *)vshr, sizeof(vshr)) != 0) has_vsx = false; } if (!has_vsx) { *sizep = 0; return (NULL); } vshr_data = calloc(1, sizeof(vshr)); memcpy(vshr_data, vshr, sizeof(vshr)); *sizep = sizeof(vshr); return (vshr_data); } #endif static void * procstat_sysctl(void *arg, int what, size_t structsz, size_t *sizep) { size_t len; pid_t pid; int name[4], structsize; void *buf, *p; pid = *(pid_t *)arg; structsize = structsz; name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = what; name[3] = pid; len = 0; if (sysctl(name, 4, NULL, &len, NULL, 0) == -1) err(1, "kern.proc.%d.%u", what, pid); buf = calloc(1, sizeof(structsize) + len * 4 / 3); if (buf == NULL) errx(1, "out of memory"); bcopy(&structsize, buf, sizeof(structsize)); p = (char *)buf + sizeof(structsize); if (sysctl(name, 4, p, &len, NULL, 0) == -1) err(1, "kern.proc.%d.%u", what, pid); *sizep = sizeof(structsize) + len; return (buf); } static void * elf_note_procstat_proc(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_PID | KERN_PROC_INC_THREAD, sizeof(struct kinfo_proc), sizep)); } static void * elf_note_procstat_files(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_FILEDESC, sizeof(struct kinfo_file), sizep)); } static void * elf_note_procstat_vmmap(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_VMMAP, sizeof(struct kinfo_vmentry), sizep)); } static void * elf_note_procstat_groups(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_GROUPS, sizeof(gid_t), sizep)); } static void * elf_note_procstat_umask(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_UMASK, sizeof(u_short), sizep)); } static void * elf_note_procstat_osrel(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_OSREL, sizeof(int), sizep)); } static void * elf_note_procstat_psstrings(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_PS_STRINGS, sizeof(vm_offset_t), sizep)); } static void * elf_note_procstat_auxv(void *arg, size_t *sizep) { return (procstat_sysctl(arg, KERN_PROC_AUXV, sizeof(Elf_Auxinfo), sizep)); } static void * elf_note_procstat_rlimit(void *arg, size_t *sizep) { pid_t pid; size_t len; int i, name[5], structsize; void *buf, *p; pid = *(pid_t *)arg; structsize = sizeof(struct rlimit) * RLIM_NLIMITS; buf = calloc(1, sizeof(structsize) + structsize); if (buf == NULL) errx(1, "out of memory"); bcopy(&structsize, buf, sizeof(structsize)); p = (char *)buf + sizeof(structsize); name[0] = CTL_KERN; name[1] = KERN_PROC; name[2] = KERN_PROC_RLIMIT; name[3] = pid; len = sizeof(struct rlimit); for (i = 0; i < RLIM_NLIMITS; i++) { name[4] = i; if (sysctl(name, 5, p, &len, NULL, 0) == -1) err(1, "kern.proc.rlimit.%u", pid); if (len != sizeof(struct rlimit)) errx(1, "kern.proc.rlimit.%u: short read", pid); p += len; } *sizep = sizeof(structsize) + structsize; return (buf); } struct dumpers __elfN(dump) = { elf_ident, elf_coredump }; TEXT_SET(dumpset, __elfN(dump));