Index: head/sys/mips/include/proc.h =================================================================== --- head/sys/mips/include/proc.h (revision 362121) +++ head/sys/mips/include/proc.h (revision 362122) @@ -1,101 +1,99 @@ /* $OpenBSD: proc.h,v 1.2 1998/09/15 10:50:12 pefo Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)proc.h 8.1 (Berkeley) 6/10/93 * JNPR: proc.h,v 1.7.2.1 2007/09/10 06:25:24 girish * $FreeBSD$ */ #ifndef _MACHINE_PROC_H_ #define _MACHINE_PROC_H_ #ifdef CPU_CNMIPS #include #endif /* * Machine-dependent part of the proc structure. */ struct mdthread { int md_flags; /* machine-dependent flags */ #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */ uint64_t md_upte[KSTACK_PAGES]; /* ptes for mapping u pcb */ #else int md_upte[KSTACK_PAGES]; #endif uintptr_t md_ss_addr; /* single step address for ptrace */ int md_ss_instr; /* single step instruction for ptrace */ register_t md_saved_intr; u_int md_spinlock_count; /* The following is CPU dependent, but kept in for compatibility */ int md_pc_ctrl; /* performance counter control */ int md_pc_count; /* performance counter */ int md_pc_spill; /* performance counter spill */ void *md_tls; - size_t md_tls_tcb_offset; /* TCB offset */ #ifdef CPU_CNMIPS struct octeon_cop2_state *md_cop2; /* kernel context */ struct octeon_cop2_state *md_ucop2; /* userland context */ #define COP2_OWNER_USERLAND 0x0000 /* Userland owns COP2 */ #define COP2_OWNER_KERNEL 0x0001 /* Kernel owns COP2 */ int md_cop2owner; #endif }; /* md_flags */ #define MDTD_FPUSED 0x0001 /* Process used the FPU */ #define MDTD_COP2USED 0x0002 /* Process used the COP2 */ struct mdproc { - /* Avoid empty structs because they are undefined behavior. */ - long md_spare; + size_t md_tls_tcb_offset; /* TCB offset */ }; struct syscall_args { u_int code; struct sysent *callp; register_t args[8]; int narg; struct trapframe *trapframe; }; #ifdef __mips_n64 #define KINFO_PROC_SIZE 1088 #define KINFO_PROC32_SIZE 816 #else #define KINFO_PROC_SIZE 816 #endif #endif /* !_MACHINE_PROC_H_ */ Index: head/sys/mips/mips/genassym.c =================================================================== --- head/sys/mips/mips/genassym.c (revision 362121) +++ head/sys/mips/mips/genassym.c (revision 362122) @@ -1,175 +1,177 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * from: src/sys/i386/i386/genassym.c,v 1.86.2.1 2000/05/16 06:58:06 dillon * JNPR: genassym.c,v 1.4 2007/08/09 11:23:32 katta */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_CNMIPS #include #endif #ifndef offsetof #define offsetof(t,m) (int)((&((t *)0L)->m)) #endif ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); +ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_UPTE, offsetof(struct thread, td_md.md_upte)); ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_MDFLAGS, offsetof(struct thread, td_md.md_flags)); ASSYM(TD_MDTLS, offsetof(struct thread, td_md.md_tls)); -ASSYM(TD_MDTLS_TCB_OFFSET, offsetof(struct thread, td_md.md_tls_tcb_offset)); + +ASSYM(P_MDTLS_TCB_OFFSET, offsetof(struct proc, p_md.md_tls_tcb_offset)); ASSYM(U_PCB_REGS, offsetof(struct pcb, pcb_regs.zero)); ASSYM(U_PCB_CONTEXT, offsetof(struct pcb, pcb_context)); ASSYM(U_PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(U_PCB_FPREGS, offsetof(struct pcb, pcb_regs.f0)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_SEGBASE, offsetof(struct pcpu, pc_segbase)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); #ifdef COMPAT_FREEBSD32 ASSYM(SIGF32_UC, offsetof(struct sigframe32, sf_uc)); #endif ASSYM(SIGFPE, SIGFPE); ASSYM(PAGE_SHIFT, PAGE_SHIFT); ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(PDRSHIFT, PDRSHIFT); ASSYM(SEGSHIFT, SEGSHIFT); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MDTD_COP2USED, MDTD_COP2USED); ASSYM(MIPS_KSEG0_START, MIPS_KSEG0_START); ASSYM(MIPS_KSEG1_START, MIPS_KSEG1_START); ASSYM(MIPS_KSEG2_START, MIPS_KSEG2_START); ASSYM(MIPS_XKSEG_START, MIPS_XKSEG_START); #ifdef CPU_CNMIPS ASSYM(TD_COP2OWNER, offsetof(struct thread, td_md.md_cop2owner)); ASSYM(TD_COP2, offsetof(struct thread, td_md.md_cop2)); ASSYM(TD_UCOP2, offsetof(struct thread, td_md.md_ucop2)); ASSYM(COP2_CRC_IV_OFFSET, offsetof(struct octeon_cop2_state, crc_iv)); ASSYM(COP2_CRC_LENGTH_OFFSET, offsetof(struct octeon_cop2_state, crc_length)); ASSYM(COP2_CRC_POLY_OFFSET, offsetof(struct octeon_cop2_state, crc_poly)); ASSYM(COP2_LLM_DAT0_OFFSET, offsetof(struct octeon_cop2_state, llm_dat)); ASSYM(COP2_LLM_DAT1_OFFSET, offsetof(struct octeon_cop2_state, llm_dat) + 8); ASSYM(COP2_3DES_IV_OFFSET, offsetof(struct octeon_cop2_state, _3des_iv)); ASSYM(COP2_3DES_KEY0_OFFSET, offsetof(struct octeon_cop2_state, _3des_key)); ASSYM(COP2_3DES_KEY1_OFFSET, offsetof(struct octeon_cop2_state, _3des_key) + 8); ASSYM(COP2_3DES_KEY2_OFFSET, offsetof(struct octeon_cop2_state, _3des_key) + 16); ASSYM(COP2_3DES_RESULT_OFFSET, offsetof(struct octeon_cop2_state, _3des_result)); ASSYM(COP2_AES_INP0_OFFSET, offsetof(struct octeon_cop2_state, aes_inp0)); ASSYM(COP2_AES_IV0_OFFSET, offsetof(struct octeon_cop2_state, aes_iv)); ASSYM(COP2_AES_IV1_OFFSET, offsetof(struct octeon_cop2_state, aes_iv) + 8); ASSYM(COP2_AES_KEY0_OFFSET, offsetof(struct octeon_cop2_state, aes_key)); ASSYM(COP2_AES_KEY1_OFFSET, offsetof(struct octeon_cop2_state, aes_key) + 8); ASSYM(COP2_AES_KEY2_OFFSET, offsetof(struct octeon_cop2_state, aes_key) + 16); ASSYM(COP2_AES_KEY3_OFFSET, offsetof(struct octeon_cop2_state, aes_key) + 24); ASSYM(COP2_AES_KEYLEN_OFFSET, offsetof(struct octeon_cop2_state, aes_keylen)); ASSYM(COP2_AES_RESULT0_OFFSET, offsetof(struct octeon_cop2_state, aes_result)); ASSYM(COP2_AES_RESULT1_OFFSET, offsetof(struct octeon_cop2_state, aes_result) + 8); ASSYM(COP2_HSH_DATW0_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw)); ASSYM(COP2_HSH_DATW1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 8); ASSYM(COP2_HSH_DATW2_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 16); ASSYM(COP2_HSH_DATW3_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 24); ASSYM(COP2_HSH_DATW4_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 32); ASSYM(COP2_HSH_DATW5_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 40); ASSYM(COP2_HSH_DATW6_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 48); ASSYM(COP2_HSH_DATW7_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 56); ASSYM(COP2_HSH_DATW8_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 64); ASSYM(COP2_HSH_DATW9_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 72); ASSYM(COP2_HSH_DATW10_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 80); ASSYM(COP2_HSH_DATW11_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 88); ASSYM(COP2_HSH_DATW12_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 96); ASSYM(COP2_HSH_DATW13_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 104); ASSYM(COP2_HSH_DATW14_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 112); ASSYM(COP2_HSH_IVW0_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw)); ASSYM(COP2_HSH_IVW1_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 8); ASSYM(COP2_HSH_IVW2_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 16); ASSYM(COP2_HSH_IVW3_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 24); ASSYM(COP2_HSH_IVW4_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 32); ASSYM(COP2_HSH_IVW5_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 40); ASSYM(COP2_HSH_IVW6_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 48); ASSYM(COP2_HSH_IVW7_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 56); ASSYM(COP2_GFM_MULT0_OFFSET, offsetof(struct octeon_cop2_state, gfm_mult)); ASSYM(COP2_GFM_MULT1_OFFSET, offsetof(struct octeon_cop2_state, gfm_mult) + 8); ASSYM(COP2_GFM_POLY_OFFSET, offsetof(struct octeon_cop2_state, gfm_poly)); ASSYM(COP2_GFM_RESULT0_OFFSET, offsetof(struct octeon_cop2_state, gfm_result)); ASSYM(COP2_GFM_RESULT1_OFFSET, offsetof(struct octeon_cop2_state, gfm_result) + 8); ASSYM(COP2_HSH_DATW0_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw)); ASSYM(COP2_HSH_DATW1_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 8); ASSYM(COP2_HSH_DATW2_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 16); ASSYM(COP2_HSH_DATW3_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 24); ASSYM(COP2_HSH_DATW4_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 32); ASSYM(COP2_HSH_DATW5_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 40); ASSYM(COP2_HSH_DATW6_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_datw) + 48); ASSYM(COP2_HSH_IVW0_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw)); ASSYM(COP2_HSH_IVW1_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 8); ASSYM(COP2_HSH_IVW2_PASS1_OFFSET, offsetof(struct octeon_cop2_state, hsh_ivw) + 16); #endif Index: head/sys/mips/mips/pm_machdep.c =================================================================== --- head/sys/mips/mips/pm_machdep.c (revision 362121) +++ head/sys/mips/mips/pm_machdep.c (revision 362122) @@ -1,516 +1,519 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * from: src/sys/i386/i386/machdep.c,v 1.385.2.3 2000/05/10 02:04:46 obrien * JNPR: pm_machdep.c,v 1.9.2.1 2007/08/16 15:59:10 girish */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define UCONTEXT_MAGIC 0xACEDBADE /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; struct sigacts *psp; struct sigframe sf, *sfp; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->sp); /* save user context */ bzero(&sf, sizeof(struct sigframe)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; sf.sf_uc.uc_mcontext.mc_tls = td->td_md.md_tls; sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe *)(((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe)) & ~(STACK_ALIGN - 1)); } else sfp = (struct sigframe *)((vm_offset_t)(regs->sp - sizeof(struct sigframe)) & ~(STACK_ALIGN - 1)); /* Build the argument list for the signal handler. */ regs->a0 = sig; regs->a2 = (register_t)(intptr_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ regs->a1 = (register_t)(intptr_t)&sfp->sf_si; /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; } else { /* Old FreeBSD-style arguments. */ regs->a1 = ksi->ksi_code; regs->a3 = (uintptr_t)ksi->ksi_addr; /* sf.sf_ahu.sf_handler = catcher; */ } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); sigexit(td, SIGILL); } regs->pc = (register_t)(intptr_t)catcher; regs->t9 = (register_t)(intptr_t)catcher; regs->sp = (register_t)(intptr_t)sfp; /* * Signal trampoline code is at base of user stack. */ regs->ra = (register_t)(intptr_t)PS_STRINGS - *(p->p_sysent->sv_szsigcode); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc as specified by * context left by sendsig. */ int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; error = copyin(uap->sigcntxp, &uc, sizeof(uc)); if (error != 0) return (error); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->pc = (register_t) addr; return 0; } static int ptrace_read_int(struct thread *td, uintptr_t addr, int *v) { if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v)) return (EFAULT); return (0); } static int ptrace_write_int(struct thread *td, uintptr_t addr, int v) { if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v)) return (EFAULT); return (0); } int ptrace_single_step(struct thread *td) { uintptr_t va; struct trapframe *locr0 = td->td_frame; int error; int bpinstr = MIPS_BREAK_SSTEP; int curinstr; struct proc *p; p = td->td_proc; PROC_UNLOCK(p); /* * Fetch what's at the current location. */ error = ptrace_read_int(td, locr0->pc, &curinstr); if (error) goto out; CTR3(KTR_PTRACE, "ptrace_single_step: tid %d, current instr at %#lx: %#08x", td->td_tid, locr0->pc, curinstr); /* compute next address after current location */ if (locr0->cause & MIPS_CR_BR_DELAY) { va = MipsEmulateBranch(locr0, locr0->pc, locr0->fsr, (uintptr_t)&curinstr); } else { va = locr0->pc + 4; } if (td->td_md.md_ss_addr) { printf("SS %s (%d): breakpoint already set at %p (va %p)\n", p->p_comm, p->p_pid, (void *)td->td_md.md_ss_addr, (void *)va); /* XXX */ error = EFAULT; goto out; } td->td_md.md_ss_addr = va; /* * Fetch what's at the current location. */ error = ptrace_read_int(td, (off_t)va, &td->td_md.md_ss_instr); if (error) goto out; /* * Store breakpoint instruction at the "next" location now. */ error = ptrace_write_int(td, va, bpinstr); /* * The sync'ing of I & D caches is done by proc_rwmem() * through proc_writemem(). */ out: PROC_LOCK(p); if (error == 0) CTR3(KTR_PTRACE, "ptrace_single_step: tid %d, break set at %#lx: (%#08x)", td->td_tid, va, td->td_md.md_ss_instr); return (error); } void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_context[PCB_REG_RA] = tf->ra; pcb->pcb_context[PCB_REG_PC] = tf->pc; pcb->pcb_context[PCB_REG_SP] = tf->sp; } int fill_regs(struct thread *td, struct reg *regs) { memcpy(regs, td->td_frame, sizeof(struct reg)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *f; register_t sr; f = (struct trapframe *) td->td_frame; /* * Don't allow the user to change SR */ sr = f->sr; memcpy(td->td_frame, regs, sizeof(struct reg)); f->sr = sr; return (0); } int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct trapframe *tp; tp = td->td_frame; PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(tp->sp); PROC_UNLOCK(curthread->td_proc); bcopy((void *)&td->td_frame->zero, (void *)&mcp->mc_regs, sizeof(mcp->mc_regs)); mcp->mc_fpused = td->td_md.md_flags & MDTD_FPUSED; if (mcp->mc_fpused) { bcopy((void *)&td->td_frame->f0, (void *)&mcp->mc_fpregs, sizeof(mcp->mc_fpregs)); } if (flags & GET_MC_CLEAR_RET) { mcp->mc_regs[V0] = 0; mcp->mc_regs[V1] = 0; mcp->mc_regs[A3] = 0; } mcp->mc_pc = td->td_frame->pc; mcp->mullo = td->td_frame->mullo; mcp->mulhi = td->td_frame->mulhi; mcp->mc_tls = td->td_md.md_tls; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tp; tp = td->td_frame; bcopy((void *)&mcp->mc_regs, (void *)&td->td_frame->zero, sizeof(mcp->mc_regs)); td->td_md.md_flags = mcp->mc_fpused & MDTD_FPUSED; if (mcp->mc_fpused) { bcopy((void *)&mcp->mc_fpregs, (void *)&td->td_frame->f0, sizeof(mcp->mc_fpregs)); } td->td_frame->pc = mcp->mc_pc; td->td_frame->mullo = mcp->mullo; td->td_frame->mulhi = mcp->mulhi; td->td_md.md_tls = mcp->mc_tls; /* Dont let user to set any bits in status and cause registers. */ return (0); } int fill_fpregs(struct thread *td, struct fpreg *fpregs) { if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); memcpy(fpregs, &td->td_frame->f0, sizeof(struct fpreg)); fpregs->r_regs[FIR_NUM] = cpuinfo.fpu_id; return 0; } int set_fpregs(struct thread *td, struct fpreg *fpregs) { if (PCPU_GET(fpcurthread) == td) PCPU_SET(fpcurthread, (struct thread *)0); memcpy(&td->td_frame->f0, fpregs, sizeof(struct fpreg)); return 0; } /* * Clear registers on exec * $sp is set to the stack pointer passed in. $pc is set to the entry * point given by the exec_package passed in, as is $t9 (used for PIC * code by the MIPS elf abi). */ void exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { bzero((caddr_t)td->td_frame, sizeof(struct trapframe)); td->td_frame->sp = ((register_t)stack) & ~(STACK_ALIGN - 1); /* * If we're running o32 or n32 programs but have 64-bit registers, * GCC may use stack-relative addressing near the top of user * address space that, due to sign extension, will yield an * invalid address. For instance, if sp is 0x7fffff00 then GCC * might do something like this to load a word from 0x7ffffff0: * * addu sp, sp, 32768 * lw t0, -32528(sp) * * On systems with 64-bit registers, sp is sign-extended to * 0xffffffff80007f00 and the load is instead done from * 0xffffffff7ffffff0. * * To prevent this, we subtract 64K from the stack pointer here * for processes with 32-bit pointers. */ #if defined(__mips_n32) || defined(__mips_n64) if (!SV_PROC_FLAG(td->td_proc, SV_LP64)) td->td_frame->sp -= 65536; #endif td->td_frame->pc = imgp->entry_addr & ~3; td->td_frame->t9 = imgp->entry_addr & ~3; /* abicall req */ td->td_frame->sr = MIPS_SR_KSU_USER | MIPS_SR_EXL | MIPS_SR_INT_IE | (mips_rd_status() & MIPS_SR_INT_MASK); #if defined(__mips_n32) || defined(__mips_n64) td->td_frame->sr |= MIPS_SR_PX; #endif #if defined(__mips_n64) if (SV_PROC_FLAG(td->td_proc, SV_LP64)) td->td_frame->sr |= MIPS_SR_UX; td->td_frame->sr |= MIPS_SR_KX; #endif /* * FREEBSD_DEVELOPERS_FIXME: * Setup any other CPU-Specific registers (Not MIPS Standard) * and/or bits in other standard MIPS registers (if CPU-Specific) * that are needed. */ /* * Set up arguments for the rtld-capable crt0: * a0 stack pointer * a1 rtld cleanup (filled in by dynamic loader) * a2 rtld object (filled in by dynamic loader) * a3 ps_strings */ td->td_frame->a0 = (register_t) stack; td->td_frame->a1 = 0; td->td_frame->a2 = 0; td->td_frame->a3 = (register_t)imgp->ps_strings; td->td_md.md_flags &= ~MDTD_FPUSED; if (PCPU_GET(fpcurthread) == td) PCPU_SET(fpcurthread, (struct thread *)0); td->td_md.md_ss_addr = 0; + td->td_md.md_tls = NULL; #ifdef COMPAT_FREEBSD32 if (!SV_PROC_FLAG(td->td_proc, SV_LP64)) - td->td_md.md_tls_tcb_offset = TLS_TP_OFFSET + TLS_TCB_SIZE32; + td->td_proc->p_md.md_tls_tcb_offset = TLS_TP_OFFSET + + TLS_TCB_SIZE32; else #endif - td->td_md.md_tls_tcb_offset = TLS_TP_OFFSET + TLS_TCB_SIZE; + td->td_proc->p_md.md_tls_tcb_offset = TLS_TP_OFFSET + + TLS_TCB_SIZE; } int ptrace_clear_single_step(struct thread *td) { struct proc *p; int error; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); if (!td->td_md.md_ss_addr) return EINVAL; /* * Restore original instruction and clear BP */ PROC_UNLOCK(p); CTR3(KTR_PTRACE, "ptrace_clear_single_step: tid %d, restore instr at %#lx: %#08x", td->td_tid, td->td_md.md_ss_addr, td->td_md.md_ss_instr); error = ptrace_write_int(td, td->td_md.md_ss_addr, td->td_md.md_ss_instr); PROC_LOCK(p); /* The sync'ing of I & D caches is done by proc_rwmem(). */ if (error != 0) { log(LOG_ERR, "SS %s %d: can't restore instruction at %p: %x\n", p->p_comm, p->p_pid, (void *)td->td_md.md_ss_addr, td->td_md.md_ss_instr); } td->td_md.md_ss_addr = 0; return 0; } Index: head/sys/mips/mips/swtch.S =================================================================== --- head/sys/mips/mips/swtch.S (revision 362121) +++ head/sys/mips/mips/swtch.S (revision 362122) @@ -1,690 +1,691 @@ /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Digital Equipment Corporation and Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright (C) 1989 Digital Equipment Corporation. * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby granted, * provided that the above copyright notice appears in all copies. * Digital Equipment Corporation makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s, * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL) * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s, * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL) * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s, * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL) * * from: @(#)locore.s 8.5 (Berkeley) 1/4/94 * JNPR: swtch.S,v 1.6.2.1 2007/09/10 10:36:50 girish * $FreeBSD$ */ /* * Contains code that is the first executed at boot time plus * assembly language support routines. */ #include #include #include #include #include #include #include #include "assym.inc" .set noreorder # Noreorder is default style! /* * Setup for and return to user. */ LEAF(fork_trampoline) move a0,s0 move a1,s1 jal _C_LABEL(fork_exit) move a2,s2 #BDSlot DO_AST mfc0 v0, MIPS_COP_0_STATUS and v0, ~(MIPS_SR_INT_IE) mtc0 v0, MIPS_COP_0_STATUS # disable interrupts COP0_SYNC /* * The use of k1 for storing the PCB pointer must be done only * after interrupts are disabled. Otherwise it will get overwritten * by the interrupt code. */ .set noat GET_CPU_PCPU(k1) PTR_L k1, PC_CURPCB(k1) RESTORE_U_PCB_REG(t0, MULLO, k1) RESTORE_U_PCB_REG(t1, MULHI, k1) mtlo t0 mthi t1 RESTORE_U_PCB_REG(a0, PC, k1) RESTORE_U_PCB_REG(AT, AST, k1) RESTORE_U_PCB_REG(v0, V0, k1) MTC0 a0, MIPS_COP_0_EXC_PC # set return address RESTORE_U_PCB_REG(v1, V1, k1) RESTORE_U_PCB_REG(a0, A0, k1) RESTORE_U_PCB_REG(a1, A1, k1) RESTORE_U_PCB_REG(a2, A2, k1) RESTORE_U_PCB_REG(a3, A3, k1) RESTORE_U_PCB_REG(t0, T0, k1) RESTORE_U_PCB_REG(t1, T1, k1) RESTORE_U_PCB_REG(t2, T2, k1) RESTORE_U_PCB_REG(t3, T3, k1) RESTORE_U_PCB_REG(ta0, TA0, k1) RESTORE_U_PCB_REG(ta1, TA1, k1) RESTORE_U_PCB_REG(ta2, TA2, k1) RESTORE_U_PCB_REG(ta3, TA3, k1) RESTORE_U_PCB_REG(s0, S0, k1) RESTORE_U_PCB_REG(s1, S1, k1) RESTORE_U_PCB_REG(s2, S2, k1) RESTORE_U_PCB_REG(s3, S3, k1) RESTORE_U_PCB_REG(s4, S4, k1) RESTORE_U_PCB_REG(s5, S5, k1) RESTORE_U_PCB_REG(s6, S6, k1) RESTORE_U_PCB_REG(s7, S7, k1) RESTORE_U_PCB_REG(t8, T8, k1) RESTORE_U_PCB_REG(t9, T9, k1) RESTORE_U_PCB_REG(k0, SR, k1) RESTORE_U_PCB_REG(gp, GP, k1) RESTORE_U_PCB_REG(s8, S8, k1) RESTORE_U_PCB_REG(ra, RA, k1) RESTORE_U_PCB_REG(sp, SP, k1) li k1, ~MIPS_SR_INT_MASK and k0, k0, k1 mfc0 k1, MIPS_COP_0_STATUS and k1, k1, MIPS_SR_INT_MASK or k0, k0, k1 mtc0 k0, MIPS_COP_0_STATUS # switch to user mode (when eret...) HAZARD_DELAY sync eret .set at END(fork_trampoline) /* * Update pcb, saving current processor state. * Note: this only works if pcbp != curproc's pcb since * cpu_switch() will copy over pcb_context. * * savectx(struct pcb *pcbp); */ LEAF(savectx) SAVE_U_PCB_CONTEXT(s0, PCB_REG_S0, a0) SAVE_U_PCB_CONTEXT(s1, PCB_REG_S1, a0) SAVE_U_PCB_CONTEXT(s2, PCB_REG_S2, a0) SAVE_U_PCB_CONTEXT(s3, PCB_REG_S3, a0) mfc0 v0, MIPS_COP_0_STATUS SAVE_U_PCB_CONTEXT(s4, PCB_REG_S4, a0) SAVE_U_PCB_CONTEXT(s5, PCB_REG_S5, a0) SAVE_U_PCB_CONTEXT(s6, PCB_REG_S6, a0) SAVE_U_PCB_CONTEXT(s7, PCB_REG_S7, a0) SAVE_U_PCB_CONTEXT(sp, PCB_REG_SP, a0) SAVE_U_PCB_CONTEXT(s8, PCB_REG_S8, a0) SAVE_U_PCB_CONTEXT(ra, PCB_REG_RA, a0) SAVE_U_PCB_CONTEXT(v0, PCB_REG_SR, a0) SAVE_U_PCB_CONTEXT(gp, PCB_REG_GP, a0) move v0, ra /* save 'ra' before we trash it */ jal 1f nop 1: SAVE_U_PCB_CONTEXT(ra, PCB_REG_PC, a0) move ra, v0 /* restore 'ra' before returning */ j ra move v0, zero END(savectx) NESTED(cpu_throw, CALLFRAME_SIZ, ra) mfc0 t0, MIPS_COP_0_STATUS # t0 = saved status register nop nop and a3, t0, ~(MIPS_SR_INT_IE) mtc0 a3, MIPS_COP_0_STATUS # Disable all interrupts ITLBNOPFIX j mips_sw1 # We're not interested in old # thread's context, so jump # right to action nop # BDSLOT END(cpu_throw) /* * cpu_switch(struct thread *old, struct thread *new, struct mutex *mtx); * a0 - old * a1 - new * a2 - mtx * Find the highest priority process and resume it. */ NESTED(cpu_switch, CALLFRAME_SIZ, ra) mfc0 t0, MIPS_COP_0_STATUS # t0 = saved status register nop nop and a3, t0, ~(MIPS_SR_INT_IE) mtc0 a3, MIPS_COP_0_STATUS # Disable all interrupts ITLBNOPFIX beqz a0, mips_sw1 move a3, a0 PTR_L a0, TD_PCB(a0) # load PCB addr of curproc SAVE_U_PCB_CONTEXT(sp, PCB_REG_SP, a0) # save old sp PTR_SUBU sp, sp, CALLFRAME_SIZ REG_S ra, CALLFRAME_RA(sp) .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ) SAVE_U_PCB_CONTEXT(s0, PCB_REG_S0, a0) # do a 'savectx()' SAVE_U_PCB_CONTEXT(s1, PCB_REG_S1, a0) SAVE_U_PCB_CONTEXT(s2, PCB_REG_S2, a0) SAVE_U_PCB_CONTEXT(s3, PCB_REG_S3, a0) SAVE_U_PCB_CONTEXT(s4, PCB_REG_S4, a0) SAVE_U_PCB_CONTEXT(s5, PCB_REG_S5, a0) SAVE_U_PCB_CONTEXT(s6, PCB_REG_S6, a0) SAVE_U_PCB_CONTEXT(s7, PCB_REG_S7, a0) SAVE_U_PCB_CONTEXT(s8, PCB_REG_S8, a0) SAVE_U_PCB_CONTEXT(ra, PCB_REG_RA, a0) # save return address SAVE_U_PCB_CONTEXT(t0, PCB_REG_SR, a0) # save status register SAVE_U_PCB_CONTEXT(gp, PCB_REG_GP, a0) jal getpc nop getpc: SAVE_U_PCB_CONTEXT(ra, PCB_REG_PC, a0) # save return address #ifdef CPU_CNMIPS lw t2, TD_MDFLAGS(a3) # get md_flags and t1, t2, MDTD_COP2USED beqz t1, cop2_untouched nop /* Clear cop2used flag */ and t2, t2, ~MDTD_COP2USED sw t2, TD_MDFLAGS(a3) and t2, t0, ~MIPS_SR_COP_2_BIT # clear COP_2 enable bit SAVE_U_PCB_CONTEXT(t2, PCB_REG_SR, a0) # save status register RESTORE_U_PCB_REG(t0, PS, a0) # get CPU status register and t2, t0, ~MIPS_SR_COP_2_BIT # clear COP_2 enable bit SAVE_U_PCB_REG(t2, PS, a0) # save stratus register /* preserve a0..a3 */ move s0, a0 move s1, a1 move s2, a2 move s3, a3 /* does kernel own COP2 context? */ lw t1, TD_COP2OWNER(a3) # get md_cop2owner beqz t1, userland_cop2 # 0 - it's userland context nop PTR_L a0, TD_COP2(a3) beqz a0, no_cop2_context nop j do_cop2_save nop userland_cop2: PTR_L a0, TD_UCOP2(a3) beqz a0, no_cop2_context nop do_cop2_save: jal octeon_cop2_save nop no_cop2_context: move a3, s3 move a2, s2 move a1, s1 move a0, s0 cop2_untouched: #endif PTR_S a2, TD_LOCK(a3) # Switchout td_lock mips_sw1: #if defined(SMP) && defined(SCHED_ULE) PTR_LA t0, _C_LABEL(blocked_lock) blocked_loop: PTR_L t1, TD_LOCK(a1) beq t0, t1, blocked_loop nop #endif move s7, a1 # Store newthread /* * Switch to new context. */ GET_CPU_PCPU(a3) PTR_S a1, PC_CURTHREAD(a3) PTR_L a2, TD_PCB(a1) PTR_S a2, PC_CURPCB(a3) PTR_L v0, TD_KSTACK(a1) #if defined(__mips_n64) PTR_LI s0, MIPS_XKSEG_START #else PTR_LI s0, MIPS_KSEG2_START # If Uarea addr is below kseg2, #endif bltu v0, s0, sw2 # no need to insert in TLB. PTE_L a1, TD_UPTE + 0(s7) # a1 = u. pte #0 PTE_L a2, TD_UPTE + PTESIZE(s7) # a2 = u. pte #1 /* * Wiredown the USPACE of newproc in TLB entry#0. Check whether target * USPACE is already in another place of TLB before that, and if so * invalidate that TLB entry. * NOTE: This is hard coded to UPAGES == 2. * Also, there should be no TLB faults at this point. */ MTC0 v0, MIPS_COP_0_TLB_HI # VPN = va HAZARD_DELAY tlbp # probe VPN HAZARD_DELAY mfc0 s0, MIPS_COP_0_TLB_INDEX HAZARD_DELAY PTR_LI t1, MIPS_KSEG0_START # invalidate tlb entry bltz s0, entry0set nop sll s0, PAGE_SHIFT + 1 addu t1, s0 MTC0 t1, MIPS_COP_0_TLB_HI PTE_MTC0 zero, MIPS_COP_0_TLB_LO0 PTE_MTC0 zero, MIPS_COP_0_TLB_LO1 HAZARD_DELAY tlbwi HAZARD_DELAY MTC0 v0, MIPS_COP_0_TLB_HI # set VPN again entry0set: /* SMP!! - Works only for unshared TLB case - i.e. no v-cpus */ mtc0 zero, MIPS_COP_0_TLB_INDEX # TLB entry #0 HAZARD_DELAY PTE_MTC0 a1, MIPS_COP_0_TLB_LO0 # upte[0] HAZARD_DELAY PTE_MTC0 a2, MIPS_COP_0_TLB_LO1 # upte[1] HAZARD_DELAY tlbwi # set TLB entry #0 HAZARD_DELAY /* * Now running on new u struct. */ sw2: PTR_L s0, TD_PCB(s7) RESTORE_U_PCB_CONTEXT(sp, PCB_REG_SP, s0) PTR_LA t1, _C_LABEL(pmap_activate) # s7 = new proc pointer jalr t1 # s7 = new proc pointer move a0, s7 # BDSLOT /* * Restore registers and return. */ move a0, s0 move a1, s7 RESTORE_U_PCB_CONTEXT(gp, PCB_REG_GP, a0) RESTORE_U_PCB_CONTEXT(v0, PCB_REG_SR, a0) # restore kernel context RESTORE_U_PCB_CONTEXT(ra, PCB_REG_RA, a0) RESTORE_U_PCB_CONTEXT(s0, PCB_REG_S0, a0) RESTORE_U_PCB_CONTEXT(s1, PCB_REG_S1, a0) RESTORE_U_PCB_CONTEXT(s2, PCB_REG_S2, a0) RESTORE_U_PCB_CONTEXT(s3, PCB_REG_S3, a0) RESTORE_U_PCB_CONTEXT(s4, PCB_REG_S4, a0) RESTORE_U_PCB_CONTEXT(s5, PCB_REG_S5, a0) RESTORE_U_PCB_CONTEXT(s6, PCB_REG_S6, a0) RESTORE_U_PCB_CONTEXT(s7, PCB_REG_S7, a0) RESTORE_U_PCB_CONTEXT(s8, PCB_REG_S8, a0) mfc0 t0, MIPS_COP_0_STATUS and t0, t0, MIPS_SR_INT_MASK and v0, v0, ~MIPS_SR_INT_MASK or v0, v0, t0 mtc0 v0, MIPS_COP_0_STATUS ITLBNOPFIX /* * Set the new thread's TLS pointer. * * Note that this code is removed if the CPU doesn't support ULRI by * remove_userlocal_code() in cpu.c. */ .globl cpu_switch_set_userlocal cpu_switch_set_userlocal: PTR_L t0, TD_MDTLS(a1) # Get TLS pointer - PTR_L t1, TD_MDTLS_TCB_OFFSET(a1) # Get TLS/TCB offset + PTR_L t1, TD_PROC(a1) + PTR_L t1, P_MDTLS_TCB_OFFSET(t1) # Get TLS/TCB offset PTR_ADDU v0, t0, t1 MTC0 v0, MIPS_COP_0_USERLOCAL, 2 # write it to ULR for rdhwr j ra nop END(cpu_switch) /*---------------------------------------------------------------------------- * * MipsSwitchFPState -- * * Save the current state into 'from' and restore it from 'to'. * * MipsSwitchFPState(from, to) * struct thread *from; * struct trapframe *to; * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------------- */ LEAF(MipsSwitchFPState) .set push .set hardfloat mfc0 t1, MIPS_COP_0_STATUS # Save old SR HAZARD_DELAY #if defined(__mips_n32) || defined(__mips_n64) or t0, t1, MIPS_SR_COP_1_BIT | MIPS_SR_FR # enable the coprocessor #else or t0, t1, MIPS_SR_COP_1_BIT # enable the coprocessor #endif mtc0 t0, MIPS_COP_0_STATUS HAZARD_DELAY ITLBNOPFIX beq a0, zero, 1f # skip save if NULL pointer nop /* * First read out the status register to make sure that all FP operations * have completed. */ PTR_L a0, TD_PCB(a0) # get pointer to pcb for proc cfc1 t0, MIPS_FPU_CSR # stall til FP done cfc1 t0, MIPS_FPU_CSR # now get status li t3, ~MIPS_SR_COP_1_BIT RESTORE_U_PCB_REG(t2, PS, a0) # get CPU status register SAVE_U_PCB_FPSR(t0, FSR_NUM, a0) # save FP status and t2, t2, t3 # clear COP_1 enable bit SAVE_U_PCB_REG(t2, PS, a0) # save new status register /* * Save the floating point registers. */ SAVE_U_PCB_FPREG($f0, F0_NUM, a0) SAVE_U_PCB_FPREG($f1, F1_NUM, a0) SAVE_U_PCB_FPREG($f2, F2_NUM, a0) SAVE_U_PCB_FPREG($f3, F3_NUM, a0) SAVE_U_PCB_FPREG($f4, F4_NUM, a0) SAVE_U_PCB_FPREG($f5, F5_NUM, a0) SAVE_U_PCB_FPREG($f6, F6_NUM, a0) SAVE_U_PCB_FPREG($f7, F7_NUM, a0) SAVE_U_PCB_FPREG($f8, F8_NUM, a0) SAVE_U_PCB_FPREG($f9, F9_NUM, a0) SAVE_U_PCB_FPREG($f10, F10_NUM, a0) SAVE_U_PCB_FPREG($f11, F11_NUM, a0) SAVE_U_PCB_FPREG($f12, F12_NUM, a0) SAVE_U_PCB_FPREG($f13, F13_NUM, a0) SAVE_U_PCB_FPREG($f14, F14_NUM, a0) SAVE_U_PCB_FPREG($f15, F15_NUM, a0) SAVE_U_PCB_FPREG($f16, F16_NUM, a0) SAVE_U_PCB_FPREG($f17, F17_NUM, a0) SAVE_U_PCB_FPREG($f18, F18_NUM, a0) SAVE_U_PCB_FPREG($f19, F19_NUM, a0) SAVE_U_PCB_FPREG($f20, F20_NUM, a0) SAVE_U_PCB_FPREG($f21, F21_NUM, a0) SAVE_U_PCB_FPREG($f22, F22_NUM, a0) SAVE_U_PCB_FPREG($f23, F23_NUM, a0) SAVE_U_PCB_FPREG($f24, F24_NUM, a0) SAVE_U_PCB_FPREG($f25, F25_NUM, a0) SAVE_U_PCB_FPREG($f26, F26_NUM, a0) SAVE_U_PCB_FPREG($f27, F27_NUM, a0) SAVE_U_PCB_FPREG($f28, F28_NUM, a0) SAVE_U_PCB_FPREG($f29, F29_NUM, a0) SAVE_U_PCB_FPREG($f30, F30_NUM, a0) SAVE_U_PCB_FPREG($f31, F31_NUM, a0) 1: /* * Restore the floating point registers. */ RESTORE_U_PCB_FPSR(t0, FSR_NUM, a1) # get status register RESTORE_U_PCB_FPREG($f0, F0_NUM, a1) RESTORE_U_PCB_FPREG($f1, F1_NUM, a1) RESTORE_U_PCB_FPREG($f2, F2_NUM, a1) RESTORE_U_PCB_FPREG($f3, F3_NUM, a1) RESTORE_U_PCB_FPREG($f4, F4_NUM, a1) RESTORE_U_PCB_FPREG($f5, F5_NUM, a1) RESTORE_U_PCB_FPREG($f6, F6_NUM, a1) RESTORE_U_PCB_FPREG($f7, F7_NUM, a1) RESTORE_U_PCB_FPREG($f8, F8_NUM, a1) RESTORE_U_PCB_FPREG($f9, F9_NUM, a1) RESTORE_U_PCB_FPREG($f10, F10_NUM, a1) RESTORE_U_PCB_FPREG($f11, F11_NUM, a1) RESTORE_U_PCB_FPREG($f12, F12_NUM, a1) RESTORE_U_PCB_FPREG($f13, F13_NUM, a1) RESTORE_U_PCB_FPREG($f14, F14_NUM, a1) RESTORE_U_PCB_FPREG($f15, F15_NUM, a1) RESTORE_U_PCB_FPREG($f16, F16_NUM, a1) RESTORE_U_PCB_FPREG($f17, F17_NUM, a1) RESTORE_U_PCB_FPREG($f18, F18_NUM, a1) RESTORE_U_PCB_FPREG($f19, F19_NUM, a1) RESTORE_U_PCB_FPREG($f20, F20_NUM, a1) RESTORE_U_PCB_FPREG($f21, F21_NUM, a1) RESTORE_U_PCB_FPREG($f22, F22_NUM, a1) RESTORE_U_PCB_FPREG($f23, F23_NUM, a1) RESTORE_U_PCB_FPREG($f24, F24_NUM, a1) RESTORE_U_PCB_FPREG($f25, F25_NUM, a1) RESTORE_U_PCB_FPREG($f26, F26_NUM, a1) RESTORE_U_PCB_FPREG($f27, F27_NUM, a1) RESTORE_U_PCB_FPREG($f28, F28_NUM, a1) RESTORE_U_PCB_FPREG($f29, F29_NUM, a1) RESTORE_U_PCB_FPREG($f30, F30_NUM, a1) RESTORE_U_PCB_FPREG($f31, F31_NUM, a1) and t0, t0, ~MIPS_FPU_EXCEPTION_BITS ctc1 t0, MIPS_FPU_CSR nop mtc0 t1, MIPS_COP_0_STATUS # Restore the status register. ITLBNOPFIX j ra nop .set pop END(MipsSwitchFPState) /*---------------------------------------------------------------------------- * * MipsFPID -- * * Read and return the floating point implementation register. * * uint32_t * MipsFPID(void) * * Results: * Floating point implementation register. * * Side effects: * None. * *---------------------------------------------------------------------------- */ LEAF(MipsFPID) .set push .set hardfloat mfc0 t1, MIPS_COP_0_STATUS # Save the status register. HAZARD_DELAY #if defined(__mips_n32) || defined(__mips_n64) or t0, t1, MIPS_SR_COP_1_BIT | MIPS_SR_FR #else or t0, t1, MIPS_SR_COP_1_BIT #endif mtc0 t0, MIPS_COP_0_STATUS # Enable the coprocessor HAZARD_DELAY ITLBNOPFIX cfc1 v0, MIPS_FPU_ID mtc0 t1, MIPS_COP_0_STATUS # Restore the status register. ITLBNOPFIX j ra nop .set pop END(MipsFPID) /*---------------------------------------------------------------------------- * * MipsSaveCurFPState -- * * Save the current floating point coprocessor state. * * MipsSaveCurFPState(td) * struct thread *td; * * Results: * None. * * Side effects: * machFPCurProcPtr is cleared. * *---------------------------------------------------------------------------- */ LEAF(MipsSaveCurFPState) .set push .set hardfloat PTR_L a0, TD_PCB(a0) # get pointer to pcb for thread mfc0 t1, MIPS_COP_0_STATUS # Disable interrupts and HAZARD_DELAY #if defined(__mips_n32) || defined(__mips_n64) or t0, t1, MIPS_SR_COP_1_BIT | MIPS_SR_FR # enable the coprocessor #else or t0, t1, MIPS_SR_COP_1_BIT # enable the coprocessor #endif mtc0 t0, MIPS_COP_0_STATUS HAZARD_DELAY ITLBNOPFIX GET_CPU_PCPU(a1) PTR_S zero, PC_FPCURTHREAD(a1) # indicate state has been saved /* * First read out the status register to make sure that all FP operations * have completed. */ RESTORE_U_PCB_REG(t2, PS, a0) # get CPU status register li t3, ~MIPS_SR_COP_1_BIT and t2, t2, t3 # clear COP_1 enable bit cfc1 t0, MIPS_FPU_CSR # stall til FP done cfc1 t0, MIPS_FPU_CSR # now get status SAVE_U_PCB_REG(t2, PS, a0) # save new status register SAVE_U_PCB_FPSR(t0, FSR_NUM, a0) # save FP status /* * Save the floating point registers. */ SAVE_U_PCB_FPREG($f0, F0_NUM, a0) SAVE_U_PCB_FPREG($f1, F1_NUM, a0) SAVE_U_PCB_FPREG($f2, F2_NUM, a0) SAVE_U_PCB_FPREG($f3, F3_NUM, a0) SAVE_U_PCB_FPREG($f4, F4_NUM, a0) SAVE_U_PCB_FPREG($f5, F5_NUM, a0) SAVE_U_PCB_FPREG($f6, F6_NUM, a0) SAVE_U_PCB_FPREG($f7, F7_NUM, a0) SAVE_U_PCB_FPREG($f8, F8_NUM, a0) SAVE_U_PCB_FPREG($f9, F9_NUM, a0) SAVE_U_PCB_FPREG($f10, F10_NUM, a0) SAVE_U_PCB_FPREG($f11, F11_NUM, a0) SAVE_U_PCB_FPREG($f12, F12_NUM, a0) SAVE_U_PCB_FPREG($f13, F13_NUM, a0) SAVE_U_PCB_FPREG($f14, F14_NUM, a0) SAVE_U_PCB_FPREG($f15, F15_NUM, a0) SAVE_U_PCB_FPREG($f16, F16_NUM, a0) SAVE_U_PCB_FPREG($f17, F17_NUM, a0) SAVE_U_PCB_FPREG($f18, F18_NUM, a0) SAVE_U_PCB_FPREG($f19, F19_NUM, a0) SAVE_U_PCB_FPREG($f20, F20_NUM, a0) SAVE_U_PCB_FPREG($f21, F21_NUM, a0) SAVE_U_PCB_FPREG($f22, F22_NUM, a0) SAVE_U_PCB_FPREG($f23, F23_NUM, a0) SAVE_U_PCB_FPREG($f24, F24_NUM, a0) SAVE_U_PCB_FPREG($f25, F25_NUM, a0) SAVE_U_PCB_FPREG($f26, F26_NUM, a0) SAVE_U_PCB_FPREG($f27, F27_NUM, a0) SAVE_U_PCB_FPREG($f28, F28_NUM, a0) SAVE_U_PCB_FPREG($f29, F29_NUM, a0) SAVE_U_PCB_FPREG($f30, F30_NUM, a0) SAVE_U_PCB_FPREG($f31, F31_NUM, a0) mtc0 t1, MIPS_COP_0_STATUS # Restore the status register. ITLBNOPFIX j ra nop .set pop END(MipsSaveCurFPState) /* * This code is copied the user's stack for returning from signal handlers * (see sendsig() and sigreturn()). We have to compute the address * of the sigcontext struct for the sigreturn call. */ .globl _C_LABEL(sigcode) _C_LABEL(sigcode): PTR_ADDU a0, sp, SIGF_UC # address of ucontext li v0, SYS_sigreturn # sigreturn (ucp) syscall break 0 # just in case sigreturn fails .globl _C_LABEL(esigcode) _C_LABEL(esigcode): .data .globl szsigcode szsigcode: .long esigcode-sigcode .text #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32) .globl _C_LABEL(sigcode32) _C_LABEL(sigcode32): addu a0, sp, SIGF32_UC # address of ucontext li v0, SYS_sigreturn # sigreturn (ucp) syscall break 0 # just in case sigreturn fails .globl _C_LABEL(esigcode32) _C_LABEL(esigcode32): .data .globl szsigcode32 szsigcode32: .long esigcode32-sigcode32 .text #endif Index: head/sys/mips/mips/sys_machdep.c =================================================================== --- head/sys/mips/mips/sys_machdep.c (revision 362121) +++ head/sys/mips/mips/sys_machdep.c (revision 362122) @@ -1,86 +1,86 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { int op; char *parms; }; #endif int sysarch(struct thread *td, struct sysarch_args *uap) { int error; void *tlsbase; switch (uap->op) { case MIPS_SET_TLS: td->td_md.md_tls = uap->parms; /* * If there is an user local register implementation (ULRI) * update it as well. Add the TLS and TCB offsets so the * value in this register is adjusted like in the case of the * rdhwr trap() instruction handler. */ if (cpuinfo.userlocal_reg == true) { mips_wr_userlocal((unsigned long)(uap->parms + - td->td_md.md_tls_tcb_offset)); + td->td_proc->p_md.md_tls_tcb_offset)); } return (0); case MIPS_GET_TLS: tlsbase = td->td_md.md_tls; error = copyout(&tlsbase, uap->parms, sizeof(tlsbase)); return (error); default: break; } return (EINVAL); } Index: head/sys/mips/mips/trap.c =================================================================== --- head/sys/mips/mips/trap.c (revision 362121) +++ head/sys/mips/mips/trap.c (revision 362122) @@ -1,1704 +1,1704 @@ /* $OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $ */ /* tracked to 1.23 */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1988 University of Utah. * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah Hdr: trap.c 1.32 91/04/06 * * from: @(#)trap.c 8.5 (Berkeley) 1/11/94 * JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #include #include #include #endif #ifdef KDTRACE_HOOKS #include #endif #ifdef TRAP_DEBUG int trap_debug = 0; SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW, &trap_debug, 0, "Debug information on all traps"); #endif #define lbu_macro(data, addr) \ __asm __volatile ("lbu %0, 0x0(%1)" \ : "=r" (data) /* outputs */ \ : "r" (addr)); /* inputs */ #define lb_macro(data, addr) \ __asm __volatile ("lb %0, 0x0(%1)" \ : "=r" (data) /* outputs */ \ : "r" (addr)); /* inputs */ #define lwl_macro(data, addr) \ __asm __volatile ("lwl %0, 0x0(%1)" \ : "+r" (data) /* outputs */ \ : "r" (addr)); /* inputs */ #define lwr_macro(data, addr) \ __asm __volatile ("lwr %0, 0x0(%1)" \ : "+r" (data) /* outputs */ \ : "r" (addr)); /* inputs */ #define ldl_macro(data, addr) \ __asm __volatile ("ldl %0, 0x0(%1)" \ : "+r" (data) /* outputs */ \ : "r" (addr)); /* inputs */ #define ldr_macro(data, addr) \ __asm __volatile ("ldr %0, 0x0(%1)" \ : "+r" (data) /* outputs */ \ : "r" (addr)); /* inputs */ #define sb_macro(data, addr) \ __asm __volatile ("sb %0, 0x0(%1)" \ : /* outputs */ \ : "r" (data), "r" (addr)); /* inputs */ #define swl_macro(data, addr) \ __asm __volatile ("swl %0, 0x0(%1)" \ : /* outputs */ \ : "r" (data), "r" (addr)); /* inputs */ #define swr_macro(data, addr) \ __asm __volatile ("swr %0, 0x0(%1)" \ : /* outputs */ \ : "r" (data), "r" (addr)); /* inputs */ #define sdl_macro(data, addr) \ __asm __volatile ("sdl %0, 0x0(%1)" \ : /* outputs */ \ : "r" (data), "r" (addr)); /* inputs */ #define sdr_macro(data, addr) \ __asm __volatile ("sdr %0, 0x0(%1)" \ : /* outputs */ \ : "r" (data), "r" (addr)); /* inputs */ static void log_illegal_instruction(const char *, struct trapframe *); static void log_bad_page_fault(char *, struct trapframe *, int); static void log_frame_dump(struct trapframe *frame); static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **); int (*dtrace_invop_jump_addr)(struct trapframe *); #ifdef TRAP_DEBUG static void trap_frame_dump(struct trapframe *frame); #endif void (*machExceptionTable[]) (void)= { /* * The kernel exception handlers. */ MipsKernIntr, /* external interrupt */ MipsKernGenException, /* TLB modification */ MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */ MipsTLBInvalidException,/* TLB miss (store) */ MipsKernGenException, /* address error (load or I-fetch) */ MipsKernGenException, /* address error (store) */ MipsKernGenException, /* bus error (I-fetch) */ MipsKernGenException, /* bus error (load or store) */ MipsKernGenException, /* system call */ MipsKernGenException, /* breakpoint */ MipsKernGenException, /* reserved instruction */ MipsKernGenException, /* coprocessor unusable */ MipsKernGenException, /* arithmetic overflow */ MipsKernGenException, /* trap exception */ MipsKernGenException, /* virtual coherence exception inst */ MipsKernGenException, /* floating point exception */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* watch exception */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* reserved */ MipsKernGenException, /* virtual coherence exception data */ /* * The user exception handlers. */ MipsUserIntr, /* 0 */ MipsUserGenException, /* 1 */ MipsTLBInvalidException,/* 2 */ MipsTLBInvalidException,/* 3 */ MipsUserGenException, /* 4 */ MipsUserGenException, /* 5 */ MipsUserGenException, /* 6 */ MipsUserGenException, /* 7 */ MipsUserGenException, /* 8 */ MipsUserGenException, /* 9 */ MipsUserGenException, /* 10 */ MipsUserGenException, /* 11 */ MipsUserGenException, /* 12 */ MipsUserGenException, /* 13 */ MipsUserGenException, /* 14 */ MipsUserGenException, /* 15 */ MipsUserGenException, /* 16 */ MipsUserGenException, /* 17 */ MipsUserGenException, /* 18 */ MipsUserGenException, /* 19 */ MipsUserGenException, /* 20 */ MipsUserGenException, /* 21 */ MipsUserGenException, /* 22 */ MipsUserGenException, /* 23 */ MipsUserGenException, /* 24 */ MipsUserGenException, /* 25 */ MipsUserGenException, /* 26 */ MipsUserGenException, /* 27 */ MipsUserGenException, /* 28 */ MipsUserGenException, /* 29 */ MipsUserGenException, /* 20 */ MipsUserGenException, /* 31 */ }; char *trap_type[] = { "external interrupt", "TLB modification", "TLB miss (load or instr. fetch)", "TLB miss (store)", "address error (load or I-fetch)", "address error (store)", "bus error (I-fetch)", "bus error (load or store)", "system call", "breakpoint", "reserved instruction", "coprocessor unusable", "arithmetic overflow", "trap", "virtual coherency instruction", "floating point", "reserved 16", "reserved 17", "reserved 18", "reserved 19", "reserved 20", "reserved 21", "reserved 22", "watch", "reserved 24", "reserved 25", "reserved 26", "reserved 27", "reserved 28", "reserved 29", "reserved 30", "virtual coherency data", }; #if !defined(SMP) && (defined(DDB) || defined(DEBUG)) struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug; #endif #define KERNLAND(x) ((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS) #define DELAYBRANCH(x) ((x) & MIPS_CR_BR_DELAY) /* * MIPS load/store access type */ enum { MIPS_LHU_ACCESS = 1, MIPS_LH_ACCESS, MIPS_LWU_ACCESS, MIPS_LW_ACCESS, MIPS_LD_ACCESS, MIPS_SH_ACCESS, MIPS_SW_ACCESS, MIPS_SD_ACCESS }; char *access_name[] = { "Load Halfword Unsigned", "Load Halfword", "Load Word Unsigned", "Load Word", "Load Doubleword", "Store Halfword", "Store Word", "Store Doubleword" }; #ifdef CPU_CNMIPS #include #endif static int allow_unaligned_acc = 1; SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW, &allow_unaligned_acc, 0, "Allow unaligned accesses"); /* * FP emulation is assumed to work on O32, but the code is outdated and crufty * enough that it's a more sensible default to have it disabled when using * other ABIs. At the very least, it needs a lot of help in using * type-semantic ABI-oblivious macros for everything it does. */ #if defined(__mips_o32) static int emulate_fp = 1; #else static int emulate_fp = 0; #endif SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW, &emulate_fp, 0, "Emulate unimplemented FPU instructions"); static int emulate_unaligned_access(struct trapframe *frame, int mode); extern void fswintrberr(void); /* XXX */ int cpu_fetch_syscall_args(struct thread *td) { struct trapframe *locr0; struct sysentvec *se; struct syscall_args *sa; int error, nsaved; locr0 = td->td_frame; sa = &td->td_sa; bzero(sa->args, sizeof(sa->args)); /* compute next PC after syscall instruction */ td->td_pcb->pcb_tpc = sa->trapframe->pc; /* Remember if restart */ if (DELAYBRANCH(sa->trapframe->cause)) /* Check BD bit */ locr0->pc = MipsEmulateBranch(locr0, sa->trapframe->pc, 0, 0); else locr0->pc += sizeof(int); sa->code = locr0->v0; switch (sa->code) { case SYS___syscall: case SYS_syscall: /* * This is an indirect syscall, in which the code is the first argument. */ #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32) if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) { /* * Like syscall, but code is a quad, so as to maintain alignment * for the rest of the arguments. */ if (_QUAD_LOWWORD == 0) sa->code = locr0->a0; else sa->code = locr0->a1; sa->args[0] = locr0->a2; sa->args[1] = locr0->a3; nsaved = 2; break; } #endif /* * This is either not a quad syscall, or is a quad syscall with a * new ABI in which quads fit in a single register. */ sa->code = locr0->a0; sa->args[0] = locr0->a1; sa->args[1] = locr0->a2; sa->args[2] = locr0->a3; nsaved = 3; #if defined(__mips_n32) || defined(__mips_n64) #ifdef COMPAT_FREEBSD32 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) { #endif /* * Non-o32 ABIs support more arguments in registers. */ sa->args[3] = locr0->a4; sa->args[4] = locr0->a5; sa->args[5] = locr0->a6; sa->args[6] = locr0->a7; nsaved += 4; #ifdef COMPAT_FREEBSD32 } #endif #endif break; default: /* * A direct syscall, arguments are just parameters to the syscall. */ sa->args[0] = locr0->a0; sa->args[1] = locr0->a1; sa->args[2] = locr0->a2; sa->args[3] = locr0->a3; nsaved = 4; #if defined (__mips_n32) || defined(__mips_n64) #ifdef COMPAT_FREEBSD32 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) { #endif /* * Non-o32 ABIs support more arguments in registers. */ sa->args[4] = locr0->a4; sa->args[5] = locr0->a5; sa->args[6] = locr0->a6; sa->args[7] = locr0->a7; nsaved += 4; #ifdef COMPAT_FREEBSD32 } #endif #endif break; } #ifdef TRAP_DEBUG if (trap_debug) printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid); #endif se = td->td_proc->p_sysent; /* * XXX * Shouldn't this go before switching on the code? */ if (sa->code >= se->sv_size) sa->callp = &se->sv_table[0]; else sa->callp = &se->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; if (sa->narg > nsaved) { #if defined(__mips_n32) || defined(__mips_n64) /* * XXX * Is this right for new ABIs? I think the 4 there * should be 8, size there are 8 registers to skip, * not 4, but I'm not certain. */ #ifdef COMPAT_FREEBSD32 if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) #endif printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n", sa->code, td->td_proc->p_pid, sa->narg, nsaved); #endif #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32) if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { unsigned i; int32_t arg; error = 0; /* XXX GCC is awful. */ for (i = nsaved; i < sa->narg; i++) { error = copyin((caddr_t)(intptr_t)(locr0->sp + (4 + (i - nsaved)) * sizeof(int32_t)), (caddr_t)&arg, sizeof arg); if (error != 0) break; sa->args[i] = arg; } } else #endif error = copyin((caddr_t)(intptr_t)(locr0->sp + 4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved], (u_int)(sa->narg - nsaved) * sizeof(register_t)); if (error != 0) { locr0->v0 = error; locr0->a3 = 1; } } else error = 0; if (error == 0) { td->td_retval[0] = 0; td->td_retval[1] = locr0->v1; } return (error); } #undef __FBSDID #define __FBSDID(x) #include "../../kern/subr_syscall.c" /* * Handle an exception. * Called from MipsKernGenException() or MipsUserGenException() * when a processor trap occurs. * In the case of a kernel trap, we return the pc where to resume if * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc. */ register_t trap(struct trapframe *trapframe) { int type, usermode; int i = 0; unsigned ucode = 0; struct thread *td = curthread; struct proc *p = curproc; vm_prot_t ftype; pmap_t pmap; int access_type; ksiginfo_t ksi; char *msg = NULL; intptr_t addr = 0; register_t pc; int cop, error; register_t *frame_regs; trapdebug_enter(trapframe, 0); #ifdef KDB if (kdb_active) { kdb_reenter(); return (0); } #endif type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT; if (TRAPF_USERMODE(trapframe)) { type |= T_USER; usermode = 1; } else { usermode = 0; } /* * Enable hardware interrupts if they were on before the trap. If it * was off disable all so we don't accidently enable it when doing a * return to userland. */ if (trapframe->sr & MIPS_SR_INT_IE) { set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK); intr_enable(); } else { intr_disable(); } #ifdef TRAP_DEBUG if (trap_debug) { static vm_offset_t last_badvaddr = 0; static vm_offset_t this_badvaddr = 0; static int count = 0; u_int32_t pid; printf("trap type %x (%s - ", type, trap_type[type & (~T_USER)]); if (type & T_USER) printf("user mode)\n"); else printf("kernel mode)\n"); #ifdef SMP printf("cpuid = %d\n", PCPU_GET(cpuid)); #endif pid = mips_rd_entryhi() & TLBHI_ASID_MASK; printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n", (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra, (intmax_t)trapframe->sp, (intmax_t)trapframe->sr, (curproc ? curproc->p_pid : -1), pid); switch (type & ~T_USER) { case T_TLB_MOD: case T_TLB_LD_MISS: case T_TLB_ST_MISS: case T_ADDR_ERR_LD: case T_ADDR_ERR_ST: this_badvaddr = trapframe->badvaddr; break; case T_SYSCALL: this_badvaddr = trapframe->ra; break; default: this_badvaddr = trapframe->pc; break; } if ((last_badvaddr == this_badvaddr) && ((type & ~T_USER) != T_SYSCALL) && ((type & ~T_USER) != T_COP_UNUSABLE)) { if (++count == 3) { trap_frame_dump(trapframe); panic("too many faults at %p\n", (void *)last_badvaddr); } } else { last_badvaddr = this_badvaddr; count = 0; } } #endif #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ /* * XXXDTRACE: add pid probe handler here (if ever) */ if (!usermode) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(trapframe, type) != 0) return (trapframe->pc); } #endif switch (type) { case T_MCHECK: #ifdef DDB kdb_trap(type, 0, trapframe); #endif panic("MCHECK\n"); break; case T_TLB_MOD: /* check for kernel address */ if (KERNLAND(trapframe->badvaddr)) { if (pmap_emulate_modified(kernel_pmap, trapframe->badvaddr) != 0) { ftype = VM_PROT_WRITE; goto kernel_fault; } return (trapframe->pc); } /* FALLTHROUGH */ case T_TLB_MOD + T_USER: pmap = &p->p_vmspace->vm_pmap; if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) { ftype = VM_PROT_WRITE; goto dofault; } if (!usermode) return (trapframe->pc); goto out; case T_TLB_LD_MISS: case T_TLB_ST_MISS: ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; /* check for kernel address */ if (KERNLAND(trapframe->badvaddr)) { vm_offset_t va; int rv; kernel_fault: va = (vm_offset_t)trapframe->badvaddr; rv = vm_fault_trap(kernel_map, va, ftype, VM_FAULT_NORMAL, NULL, NULL); if (rv == KERN_SUCCESS) return (trapframe->pc); if (td->td_pcb->pcb_onfault != NULL) { pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault; td->td_pcb->pcb_onfault = NULL; return (pc); } goto err; } /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (td->td_pcb->pcb_onfault == NULL) goto err; goto dofault; case T_TLB_LD_MISS + T_USER: ftype = VM_PROT_READ; goto dofault; case T_TLB_ST_MISS + T_USER: ftype = VM_PROT_WRITE; dofault: { vm_offset_t va; struct vmspace *vm; vm_map_t map; int rv = 0; vm = p->p_vmspace; map = &vm->vm_map; va = (vm_offset_t)trapframe->badvaddr; if (KERNLAND(trapframe->badvaddr)) { /* * Don't allow user-mode faults in kernel * address space. */ goto nogo; } rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL, &i, &ucode); /* * XXXDTRACE: add dtrace_doubletrap_func here? */ #ifdef VMFAULT_TRACE printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n", map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr, ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc); #endif if (rv == KERN_SUCCESS) { if (!usermode) { return (trapframe->pc); } goto out; } nogo: if (!usermode) { if (td->td_pcb->pcb_onfault != NULL) { pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault; td->td_pcb->pcb_onfault = NULL; return (pc); } goto err; } addr = trapframe->pc; msg = "BAD_PAGE_FAULT"; log_bad_page_fault(msg, trapframe, type); break; } case T_ADDR_ERR_LD + T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST + T_USER: /* misaligned or kseg access */ if (trapframe->badvaddr < 0 || trapframe->badvaddr >= VM_MAXUSER_ADDRESS) { msg = "ADDRESS_SPACE_ERR"; } else if (allow_unaligned_acc) { int mode; if (type == (T_ADDR_ERR_LD + T_USER)) mode = VM_PROT_READ; else mode = VM_PROT_WRITE; access_type = emulate_unaligned_access(trapframe, mode); if (access_type != 0) goto out; msg = "ALIGNMENT_FIX_ERR"; } else { msg = "ADDRESS_ERR"; } /* FALL THROUGH */ case T_BUS_ERR_IFETCH + T_USER: /* BERR asserted to cpu */ case T_BUS_ERR_LD_ST + T_USER: /* BERR asserted to cpu */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; addr = trapframe->pc; if (!msg) msg = "BUS_ERR"; log_bad_page_fault(msg, trapframe, type); break; case T_SYSCALL + T_USER: { td->td_sa.trapframe = trapframe; syscallenter(td); #if !defined(SMP) && (defined(DDB) || defined(DEBUG)) if (trp == trapdebug) trapdebug[TRAPSIZE - 1].code = td->td_sa.code; else trp[-1].code = td->td_sa.code; #endif trapdebug_enter(td->td_frame, -td->td_sa.code); /* * The sync'ing of I & D caches for SYS_ptrace() is * done by procfs_domem() through procfs_rwmem() * instead of being done here under a special check * for SYS_ptrace(). */ syscallret(td); return (trapframe->pc); } #if defined(KDTRACE_HOOKS) || defined(DDB) case T_BREAK: #ifdef KDTRACE_HOOKS if (!usermode && dtrace_invop_jump_addr != NULL && dtrace_invop_jump_addr(trapframe) == 0) return (trapframe->pc); #endif #ifdef DDB kdb_trap(type, 0, trapframe); return (trapframe->pc); #endif #endif case T_BREAK + T_USER: { intptr_t va; uint32_t instr; i = SIGTRAP; ucode = TRAP_BRKPT; addr = trapframe->pc; /* compute address of break instruction */ va = trapframe->pc; if (DELAYBRANCH(trapframe->cause)) va += sizeof(int); if (td->td_md.md_ss_addr != va) break; /* read break instruction */ instr = fuword32((caddr_t)va); if (instr != MIPS_BREAK_SSTEP) break; CTR3(KTR_PTRACE, "trap: tid %d, single step at %#lx: %#08x", td->td_tid, va, instr); PROC_LOCK(p); _PHOLD(p); error = ptrace_clear_single_step(td); _PRELE(p); PROC_UNLOCK(p); if (error == 0) ucode = TRAP_TRACE; break; } case T_IWATCH + T_USER: case T_DWATCH + T_USER: { intptr_t va; /* compute address of trapped instruction */ va = trapframe->pc; if (DELAYBRANCH(trapframe->cause)) va += sizeof(int); printf("watch exception @ %p\n", (void *)va); i = SIGTRAP; ucode = TRAP_BRKPT; addr = va; break; } case T_TRAP + T_USER: { intptr_t va; struct trapframe *locr0 = td->td_frame; /* compute address of trap instruction */ va = trapframe->pc; if (DELAYBRANCH(trapframe->cause)) va += sizeof(int); if (DELAYBRANCH(trapframe->cause)) { /* Check BD bit */ locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); } else { locr0->pc += sizeof(int); } addr = va; i = SIGEMT; /* Stuff it with something for now */ break; } case T_RES_INST + T_USER: { InstFmt inst; inst = *(InstFmt *)(intptr_t)trapframe->pc; switch (inst.RType.op) { case OP_SPECIAL3: switch (inst.RType.func) { case OP_RDHWR: /* Register 29 used for TLS */ if (inst.RType.rd == 29) { frame_regs = &(trapframe->zero); frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls; - frame_regs[inst.RType.rt] += td->td_md.md_tls_tcb_offset; + frame_regs[inst.RType.rt] += td->td_proc->p_md.md_tls_tcb_offset; trapframe->pc += sizeof(int); goto out; } break; } break; } log_illegal_instruction("RES_INST", trapframe); i = SIGILL; addr = trapframe->pc; } break; case T_C2E: case T_C2E + T_USER: goto err; break; case T_COP_UNUSABLE: #ifdef CPU_CNMIPS cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT; /* Handle only COP2 exception */ if (cop != 2) goto err; addr = trapframe->pc; /* save userland cop2 context if it has been touched */ if ((td->td_md.md_flags & MDTD_COP2USED) && (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) { if (td->td_md.md_ucop2) octeon_cop2_save(td->td_md.md_ucop2); else panic("COP2 was used in user mode but md_ucop2 is NULL"); } if (td->td_md.md_cop2 == NULL) { td->td_md.md_cop2 = octeon_cop2_alloc_ctx(); if (td->td_md.md_cop2 == NULL) panic("Failed to allocate COP2 context"); memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2)); } octeon_cop2_restore(td->td_md.md_cop2); /* Make userland re-request its context */ td->td_frame->sr &= ~MIPS_SR_COP_2_BIT; td->td_md.md_flags |= MDTD_COP2USED; td->td_md.md_cop2owner = COP2_OWNER_KERNEL; /* Enable COP2, it will be disabled in cpu_switch */ mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT); return (trapframe->pc); #else goto err; break; #endif case T_COP_UNUSABLE + T_USER: cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT; if (cop == 1) { /* FP (COP1) instruction */ if (cpuinfo.fpu_id == 0) { log_illegal_instruction("COP1_UNUSABLE", trapframe); i = SIGILL; break; } addr = trapframe->pc; MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame); PCPU_SET(fpcurthread, td); #if defined(__mips_n32) || defined(__mips_n64) td->td_frame->sr |= MIPS_SR_COP_1_BIT | MIPS_SR_FR; #else td->td_frame->sr |= MIPS_SR_COP_1_BIT; #endif td->td_md.md_flags |= MDTD_FPUSED; goto out; } #ifdef CPU_CNMIPS else if (cop == 2) { addr = trapframe->pc; if ((td->td_md.md_flags & MDTD_COP2USED) && (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) { if (td->td_md.md_cop2) octeon_cop2_save(td->td_md.md_cop2); else panic("COP2 was used in kernel mode but md_cop2 is NULL"); } if (td->td_md.md_ucop2 == NULL) { td->td_md.md_ucop2 = octeon_cop2_alloc_ctx(); if (td->td_md.md_ucop2 == NULL) panic("Failed to allocate userland COP2 context"); memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2)); } octeon_cop2_restore(td->td_md.md_ucop2); td->td_frame->sr |= MIPS_SR_COP_2_BIT; td->td_md.md_flags |= MDTD_COP2USED; td->td_md.md_cop2owner = COP2_OWNER_USERLAND; goto out; } #endif else { log_illegal_instruction("COPn_UNUSABLE", trapframe); i = SIGILL; /* only FPU instructions allowed */ break; } case T_FPE: #if !defined(SMP) && (defined(DDB) || defined(DEBUG)) trapDump("fpintr"); #else printf("FPU Trap: PC %#jx CR %x SR %x\n", (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr); goto err; #endif case T_FPE + T_USER: if (!emulate_fp) { i = SIGFPE; addr = trapframe->pc; break; } MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc); goto out; case T_OVFLOW + T_USER: i = SIGFPE; addr = trapframe->pc; break; case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ #ifdef TRAP_DEBUG if (trap_debug) { printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type, (intmax_t)trapframe->badvaddr); } #endif /* Only allow emulation on a user address */ if (allow_unaligned_acc && ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) { int mode; if (type == T_ADDR_ERR_LD) mode = VM_PROT_READ; else mode = VM_PROT_WRITE; access_type = emulate_unaligned_access(trapframe, mode); if (access_type != 0) return (trapframe->pc); } /* FALLTHROUGH */ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ if (td->td_pcb->pcb_onfault != NULL) { pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault; td->td_pcb->pcb_onfault = NULL; return (pc); } /* FALLTHROUGH */ default: err: #if !defined(SMP) && defined(DEBUG) trapDump("trap"); #endif #ifdef SMP printf("cpu:%d-", PCPU_GET(cpuid)); #endif printf("Trap cause = %d (%s - ", type, trap_type[type & (~T_USER)]); if (type & T_USER) printf("user mode)\n"); else printf("kernel mode)\n"); #ifdef TRAP_DEBUG if (trap_debug) printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n", (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra, (intmax_t)trapframe->sr); #endif #ifdef KDB if (debugger_on_trap) { kdb_why = KDB_WHY_TRAP; kdb_trap(type, 0, trapframe); kdb_why = KDB_WHY_UNSET; } #endif panic("trap"); } td->td_frame->pc = trapframe->pc; td->td_frame->cause = trapframe->cause; td->td_frame->badvaddr = trapframe->badvaddr; ksiginfo_init_trap(&ksi); ksi.ksi_signo = i; ksi.ksi_code = ucode; ksi.ksi_addr = (void *)addr; ksi.ksi_trapno = type; trapsignal(td, &ksi); out: /* * Note: we should only get here if returning to user mode. */ userret(td, trapframe); return (trapframe->pc); } #if !defined(SMP) && (defined(DDB) || defined(DEBUG)) void trapDump(char *msg) { register_t s; int i; s = intr_disable(); printf("trapDump(%s)\n", msg); for (i = 0; i < TRAPSIZE; i++) { if (trp == trapdebug) { trp = &trapdebug[TRAPSIZE - 1]; } else { trp--; } if (trp->cause == 0) break; printf("%s: ADR %jx PC %jx CR %jx SR %jx\n", trap_type[(trp->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT], (intmax_t)trp->vadr, (intmax_t)trp->pc, (intmax_t)trp->cause, (intmax_t)trp->status); printf(" RA %jx SP %jx code %d\n", (intmax_t)trp->ra, (intmax_t)trp->sp, (int)trp->code); } intr_restore(s); } #endif /* * Return the resulting PC as if the branch was executed. */ uintptr_t MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR, uintptr_t instptr) { InstFmt inst; register_t *regsPtr = (register_t *) framePtr; uintptr_t retAddr = 0; int condition; #define GetBranchDest(InstPtr, inst) \ (InstPtr + 4 + ((short)inst.IType.imm << 2)) if (instptr) { if (instptr < MIPS_KSEG0_START) inst.word = fuword32((void *)instptr); else inst = *(InstFmt *) instptr; } else { if ((vm_offset_t)instPC < MIPS_KSEG0_START) inst.word = fuword32((void *)instPC); else inst = *(InstFmt *) instPC; } switch ((int)inst.JType.op) { case OP_SPECIAL: switch ((int)inst.RType.func) { case OP_JR: case OP_JALR: retAddr = regsPtr[inst.RType.rs]; break; default: retAddr = instPC + 4; break; } break; case OP_BCOND: switch ((int)inst.IType.rt) { case OP_BLTZ: case OP_BLTZL: case OP_BLTZAL: case OP_BLTZALL: if ((int)(regsPtr[inst.RType.rs]) < 0) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; case OP_BGEZ: case OP_BGEZL: case OP_BGEZAL: case OP_BGEZALL: if ((int)(regsPtr[inst.RType.rs]) >= 0) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; case OP_TGEI: case OP_TGEIU: case OP_TLTI: case OP_TLTIU: case OP_TEQI: case OP_TNEI: retAddr = instPC + 4; /* Like syscall... */ break; default: panic("MipsEmulateBranch: Bad branch cond"); } break; case OP_J: case OP_JAL: retAddr = (inst.JType.target << 2) | ((unsigned)(instPC + 4) & 0xF0000000); break; case OP_BEQ: case OP_BEQL: if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt]) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; case OP_BNE: case OP_BNEL: if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt]) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; case OP_BLEZ: case OP_BLEZL: if ((int)(regsPtr[inst.RType.rs]) <= 0) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; case OP_BGTZ: case OP_BGTZL: if ((int)(regsPtr[inst.RType.rs]) > 0) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; case OP_COP1: switch (inst.RType.rs) { case OP_BCx: case OP_BCy: if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE) condition = fpcCSR & MIPS_FPU_COND_BIT; else condition = !(fpcCSR & MIPS_FPU_COND_BIT); if (condition) retAddr = GetBranchDest(instPC, inst); else retAddr = instPC + 8; break; default: retAddr = instPC + 4; } break; default: retAddr = instPC + 4; } return (retAddr); } static void log_frame_dump(struct trapframe *frame) { log(LOG_ERR, "Trapframe Register Dump:\n"); log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n", (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1); log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n", (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3); #if defined(__mips_n32) || defined(__mips_n64) log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n", (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7); log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n", (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3); #else log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n", (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3); log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n", (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7); #endif log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n", (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1); log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n", (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5); log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n", (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1); log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n", (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra); log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n", (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr); log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n", (intmax_t)frame->cause, (intmax_t)frame->pc); } #ifdef TRAP_DEBUG static void trap_frame_dump(struct trapframe *frame) { printf("Trapframe Register Dump:\n"); printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n", (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1); printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n", (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3); #if defined(__mips_n32) || defined(__mips_n64) printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n", (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7); printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n", (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3); #else printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n", (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3); printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n", (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7); #endif printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n", (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1); printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n", (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5); printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n", (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1); printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n", (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra); printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n", (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr); printf("\tcause: %#jx\tpc: %#jx\n", (intmax_t)frame->cause, (intmax_t)frame->pc); } #endif static void get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp) { pt_entry_t *ptep; pd_entry_t *pdep; struct proc *p = curproc; pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)])); if (*pdep) ptep = pmap_pte(&p->p_vmspace->vm_pmap, va); else ptep = (pt_entry_t *)0; *pdepp = pdep; *ptepp = ptep; } static void log_illegal_instruction(const char *msg, struct trapframe *frame) { pt_entry_t *ptep; pd_entry_t *pdep; unsigned int *addr, instr[4]; struct thread *td; struct proc *p; register_t pc; td = curthread; p = td->td_proc; #ifdef SMP printf("cpuid = %d\n", PCPU_GET(cpuid)); #endif pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0); log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n", msg, p->p_pid, (long)td->td_tid, p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, (intmax_t)pc, (intmax_t)frame->ra); /* log registers in trap frame */ log_frame_dump(frame); get_mapping_info((vm_offset_t)pc, &pdep, &ptep); /* * Dump a few words around faulting instruction, if the addres is * valid. */ addr = (unsigned int *)(intptr_t)pc; if ((pc & 3) == 0 && copyin(addr, instr, sizeof(instr)) == 0) { /* dump page table entry for faulting instruction */ log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n", (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0)); log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n", addr); log(LOG_ERR, "%08x %08x %08x %08x\n", instr[0], instr[1], instr[2], instr[3]); } else { log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n", (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0)); } } static void log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type) { pt_entry_t *ptep; pd_entry_t *pdep; unsigned int *addr, instr[4]; struct thread *td; struct proc *p; char *read_or_write; register_t pc; trap_type &= ~T_USER; td = curthread; p = td->td_proc; #ifdef SMP printf("cpuid = %d\n", PCPU_GET(cpuid)); #endif switch (trap_type) { case T_TLB_MOD: case T_TLB_ST_MISS: case T_ADDR_ERR_ST: read_or_write = "write"; break; case T_TLB_LD_MISS: case T_ADDR_ERR_LD: case T_BUS_ERR_IFETCH: read_or_write = "read"; break; default: read_or_write = "unknown"; } pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0); log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault " "(type %#x) at %#jx\n", msg, p->p_pid, (long)td->td_tid, p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, (intmax_t)pc, read_or_write, trap_type, (intmax_t)frame->badvaddr); /* log registers in trap frame */ log_frame_dump(frame); get_mapping_info((vm_offset_t)pc, &pdep, &ptep); /* * Dump a few words around faulting instruction, if the addres is * valid. */ addr = (unsigned int *)(intptr_t)pc; if ((pc & 3) == 0 && pc != frame->badvaddr && trap_type != T_BUS_ERR_IFETCH && copyin((caddr_t)(intptr_t)pc, instr, sizeof(instr)) == 0) { /* dump page table entry for faulting instruction */ log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n", (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0)); log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n", addr); log(LOG_ERR, "%08x %08x %08x %08x\n", instr[0], instr[1], instr[2], instr[3]); } else { log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n", (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0)); } get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep); log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n", (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0)); } /* * Unaligned load/store emulation */ static int mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc) { register_t *reg = (register_t *) frame; u_int32_t inst = *((u_int32_t *)(intptr_t)pc); register_t value_msb = 0, value = 0; unsigned size; /* * ADDR_ERR faults have higher priority than TLB * Miss faults. Therefore, it is necessary to * verify that the faulting address is a valid * virtual address within the process' address space * before trying to emulate the unaligned access. */ switch (MIPS_INST_OPCODE(inst)) { case OP_LHU: case OP_LH: case OP_SH: size = 2; break; case OP_LWU: case OP_LW: case OP_SW: size = 4; break; case OP_LD: case OP_SD: size = 8; break; default: printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst)); return (0); } if (!useracc((void *)rounddown2((vm_offset_t)addr, size), size * 2, mode)) return (0); /* * XXX * Handle LL/SC LLD/SCD. */ switch (MIPS_INST_OPCODE(inst)) { case OP_LHU: KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction.")); lbu_macro(value_msb, addr); addr += 1; lbu_macro(value, addr); value |= value_msb << 8; reg[MIPS_INST_RT(inst)] = value; return (MIPS_LHU_ACCESS); case OP_LH: KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction.")); lb_macro(value_msb, addr); addr += 1; lbu_macro(value, addr); value |= value_msb << 8; reg[MIPS_INST_RT(inst)] = value; return (MIPS_LH_ACCESS); case OP_LWU: KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction.")); lwl_macro(value, addr); addr += 3; lwr_macro(value, addr); value &= 0xffffffff; reg[MIPS_INST_RT(inst)] = value; return (MIPS_LWU_ACCESS); case OP_LW: KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction.")); lwl_macro(value, addr); addr += 3; lwr_macro(value, addr); reg[MIPS_INST_RT(inst)] = value; return (MIPS_LW_ACCESS); #if defined(__mips_n32) || defined(__mips_n64) case OP_LD: KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction.")); ldl_macro(value, addr); addr += 7; ldr_macro(value, addr); reg[MIPS_INST_RT(inst)] = value; return (MIPS_LD_ACCESS); #endif case OP_SH: KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction.")); value = reg[MIPS_INST_RT(inst)]; value_msb = value >> 8; sb_macro(value_msb, addr); addr += 1; sb_macro(value, addr); return (MIPS_SH_ACCESS); case OP_SW: KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction.")); value = reg[MIPS_INST_RT(inst)]; swl_macro(value, addr); addr += 3; swr_macro(value, addr); return (MIPS_SW_ACCESS); #if defined(__mips_n32) || defined(__mips_n64) case OP_SD: KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction.")); value = reg[MIPS_INST_RT(inst)]; sdl_macro(value, addr); addr += 7; sdr_macro(value, addr); return (MIPS_SD_ACCESS); #endif } panic("%s: should not be reached.", __func__); } /* * XXX TODO: SMP? */ static struct timeval unaligned_lasterr; static int unaligned_curerr; static int unaligned_pps_log_limit = 4; SYSCTL_INT(_machdep, OID_AUTO, unaligned_log_pps_limit, CTLFLAG_RWTUN, &unaligned_pps_log_limit, 0, "limit number of userland unaligned log messages per second"); static int emulate_unaligned_access(struct trapframe *frame, int mode) { register_t pc; int access_type = 0; struct thread *td = curthread; struct proc *p = curproc; pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0); /* * Fall through if it's instruction fetch exception */ if (!((pc & 3) || (pc == frame->badvaddr))) { /* * Handle unaligned load and store */ /* * Return access type if the instruction was emulated. * Otherwise restore pc and fall through. */ access_type = mips_unaligned_load_store(frame, mode, frame->badvaddr, pc); if (access_type) { if (DELAYBRANCH(frame->cause)) frame->pc = MipsEmulateBranch(frame, frame->pc, 0, 0); else frame->pc += 4; if (ppsratecheck(&unaligned_lasterr, &unaligned_curerr, unaligned_pps_log_limit)) { /* XXX TODO: keep global/tid/pid counters? */ log(LOG_INFO, "Unaligned %s: pid=%ld (%s), tid=%ld, " "pc=%#jx, badvaddr=%#jx\n", access_name[access_type - 1], (long) p->p_pid, p->p_comm, (long) td->td_tid, (intmax_t)pc, (intmax_t)frame->badvaddr); } } } return access_type; } Index: head/sys/mips/mips/vm_machdep.c =================================================================== --- head/sys/mips/mips/vm_machdep.c (revision 362121) +++ head/sys/mips/mips/vm_machdep.c (revision 362122) @@ -1,616 +1,611 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ * from: src/sys/i386/i386/vm_machdep.c,v 1.132.2.2 2000/08/26 04:19:26 yokota * JNPR: vm_machdep.c,v 1.8.2.2 2007/08/16 15:59:17 girish */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; if ((flags & RFPROC) == 0) return; /* It is assumed that the vm_thread_alloc called * cpu_thread_alloc() before cpu_fork is called. */ /* Point the pcb to the top of the stack */ pcb2 = td2->td_pcb; /* Copy td1's pcb, note that in this case * our pcb also includes the td_frame being copied * too. The older mips2 code did an additional copy * of the td_frame, for us that's not needed any * longer (this copy does them both) */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point mdproc and then copy over td1's contents * md_proc is empty for MIPS */ td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED; /* * Set up return-value registers as fork() libc stub expects. */ td2->td_frame->v0 = 0; td2->td_frame->v1 = 1; td2->td_frame->a3 = 0; if (td1 == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td1); pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; /* Make sp 64-bit aligned */ pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb & ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td2; pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td2->td_frame; pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); /* * FREEBSD_DEVELOPERS_FIXME: * Setup any other CPU-Specific registers (Not MIPS Standard) * and/or bits in other standard MIPS registers (if CPU-Specific) * that are needed. */ td2->td_md.md_tls = td1->td_md.md_tls; - td2->td_md.md_tls_tcb_offset = td1->td_md.md_tls_tcb_offset; + p2->p_md.md_tls_tcb_offset = td1->td_proc->p_md.md_tls_tcb_offset; td2->td_md.md_saved_intr = MIPS_SR_INT_IE; td2->td_md.md_spinlock_count = 1; #ifdef CPU_CNMIPS if (td1->td_md.md_flags & MDTD_COP2USED) { if (td1->td_md.md_cop2owner == COP2_OWNER_USERLAND) { if (td1->td_md.md_ucop2) octeon_cop2_save(td1->td_md.md_ucop2); else panic("cpu_fork: ucop2 is NULL but COP2 is enabled"); } else { if (td1->td_md.md_cop2) octeon_cop2_save(td1->td_md.md_cop2); else panic("cpu_fork: cop2 is NULL but COP2 is enabled"); } } if (td1->td_md.md_cop2) { td2->td_md.md_cop2 = octeon_cop2_alloc_ctx(); memcpy(td2->td_md.md_cop2, td1->td_md.md_cop2, sizeof(*td1->td_md.md_cop2)); } if (td1->td_md.md_ucop2) { td2->td_md.md_ucop2 = octeon_cop2_alloc_ctx(); memcpy(td2->td_md.md_ucop2, td1->td_md.md_ucop2, sizeof(*td1->td_md.md_ucop2)); } td2->td_md.md_cop2owner = td1->td_md.md_cop2owner; pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; /* Clear COP2 bits for userland & kernel */ td2->td_frame->sr &= ~MIPS_SR_COP_2_BIT; pcb2->pcb_context[PCB_REG_SR] &= ~MIPS_SR_COP_2_BIT; #endif } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { /* * Note that the trap frame follows the args, so the function * is really called like this: func(arg, frame); */ td->td_pcb->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)func; td->td_pcb->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)arg; } void cpu_exit(struct thread *td) { } void cpu_thread_exit(struct thread *td) { if (PCPU_GET(fpcurthread) == td) PCPU_GET(fpcurthread) = (struct thread *)0; #ifdef CPU_CNMIPS if (td->td_md.md_cop2) memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2)); if (td->td_md.md_ucop2) memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2)); #endif } void cpu_thread_free(struct thread *td) { #ifdef CPU_CNMIPS if (td->td_md.md_cop2) octeon_cop2_free_ctx(td->td_md.md_cop2); if (td->td_md.md_ucop2) octeon_cop2_free_ctx(td->td_md.md_ucop2); td->td_md.md_cop2 = NULL; td->td_md.md_ucop2 = NULL; #endif } void cpu_thread_clean(struct thread *td) { } void cpu_thread_swapin(struct thread *td) { pt_entry_t *pte; int i; /* * The kstack may be at a different physical address now. * Cache the PTEs for the Kernel stack in the machine dependent * part of the thread struct so cpu_switch() can quickly map in * the pcb struct and kernel stack. */ for (i = 0; i < KSTACK_PAGES; i++) { pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; } } void cpu_thread_swapout(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { pt_entry_t *pte; int i; KASSERT((td->td_kstack & (1 << PAGE_SHIFT)) == 0, ("kernel stack must be aligned.")); td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = &td->td_pcb->pcb_regs; for (i = 0; i < KSTACK_PAGES; i++) { pte = pmap_pte(kernel_pmap, td->td_kstack + i * PAGE_SIZE); td->td_md.md_upte[i] = *pte & ~TLBLO_SWBITS_MASK; } } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *locr0 = td->td_frame; unsigned int code; int quad_syscall; code = locr0->v0; quad_syscall = 0; #if defined(__mips_n32) || defined(__mips_n64) #ifdef COMPAT_FREEBSD32 if (code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) quad_syscall = 1; #endif #else if (code == SYS___syscall) quad_syscall = 1; #endif if (code == SYS_syscall) code = locr0->a0; else if (code == SYS___syscall) { if (quad_syscall) code = _QUAD_LOWWORD ? locr0->a1 : locr0->a0; else code = locr0->a0; } switch (error) { case 0: if (quad_syscall && code != SYS_lseek) { /* * System call invoked through the * SYS___syscall interface but the * return value is really just 32 * bits. */ locr0->v0 = td->td_retval[0]; if (_QUAD_LOWWORD) locr0->v1 = td->td_retval[0]; locr0->a3 = 0; } else { locr0->v0 = td->td_retval[0]; locr0->v1 = td->td_retval[1]; locr0->a3 = 0; } break; case ERESTART: locr0->pc = td->td_pcb->pcb_tpc; break; case EJUSTRETURN: break; /* nothing to do */ default: if (quad_syscall && code != SYS_lseek) { locr0->v0 = error; if (_QUAD_LOWWORD) locr0->v1 = error; locr0->a3 = 1; } else { locr0->v0 = error; locr0->a3 = 1; } } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { struct pcb *pcb2; /* Point the pcb to the top of the stack. */ pcb2 = td->td_pcb; /* * Copy the upcall pcb. This loads kernel regs. * Those not loaded individually below get their default * values here. * * XXXKSE It might be a good idea to simply skip this as * the values of the other registers may be unimportant. * This would remove any requirement for knowing the KSE * at this time (see the matching comment below for * more analysis) (need a good safe default). * In MIPS, the trapframe is the first element of the PCB * and gets copied when we copy the PCB. No separate copy * is needed. */ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); /* * Set registers for trampoline to user mode. */ pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; /* Make sp 64-bit aligned */ pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td->td_pcb & ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td; pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td->td_frame; /* Dont set IE bit in SR. sched lock release will take care of it */ pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & (MIPS_SR_PX | MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); /* * FREEBSD_DEVELOPERS_FIXME: * Setup any other CPU-Specific registers (Not MIPS Standard) * that are needed. */ /* Setup to release spin count in in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_intr = MIPS_SR_INT_IE; #if 0 /* Maybe we need to fix this? */ td->td_md.md_saved_sr = ( (MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT) | (MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX) | (MIPS_SR_INT_IE | MIPS_HARD_INT_MASK)); #endif + td->td_md.md_tls = NULL; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; register_t sp, sr; sp = (((intptr_t)stack->ss_sp + stack->ss_size) & ~(STACK_ALIGN - 1)) - CALLFRAME_SIZ; /* * Set the trap frame to point at the beginning of the uts * function. */ tf = td->td_frame; sr = tf->sr; bzero(tf, sizeof(struct trapframe)); tf->sp = sp; tf->sr = sr; tf->pc = (register_t)(intptr_t)entry; /* * MIPS ABI requires T9 to be the same as PC * in subroutine entry point */ tf->t9 = (register_t)(intptr_t)entry; tf->a0 = (register_t)(intptr_t)arg; /* * FREEBSD_DEVELOPERS_FIXME: * Setup any other CPU-Specific registers (Not MIPS Standard) * that are needed. */ } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending) busdma_swi(); } int cpu_set_user_tls(struct thread *td, void *tls_base) { -#if defined(__mips_n64) && defined(COMPAT_FREEBSD32) - if (td->td_proc && SV_PROC_FLAG(td->td_proc, SV_ILP32)) - td->td_md.md_tls_tcb_offset = TLS_TP_OFFSET + TLS_TCB_SIZE32; - else -#endif - td->td_md.md_tls_tcb_offset = TLS_TP_OFFSET + TLS_TCB_SIZE; td->td_md.md_tls = (char*)tls_base; if (td == curthread && cpuinfo.userlocal_reg == true) { mips_wr_userlocal((unsigned long)tls_base + - td->td_md.md_tls_tcb_offset); + td->td_proc->p_md.md_tls_tcb_offset); } return (0); } #ifdef DDB #include #define DB_PRINT_REG(ptr, regname) \ db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->regname)) #define DB_PRINT_REG_ARRAY(ptr, arrname, regname) \ db_printf(" %-12s %p\n", #regname, (void *)(intptr_t)((ptr)->arrname[regname])) static void dump_trapframe(struct trapframe *trapframe) { db_printf("Trapframe at %p\n", trapframe); DB_PRINT_REG(trapframe, zero); DB_PRINT_REG(trapframe, ast); DB_PRINT_REG(trapframe, v0); DB_PRINT_REG(trapframe, v1); DB_PRINT_REG(trapframe, a0); DB_PRINT_REG(trapframe, a1); DB_PRINT_REG(trapframe, a2); DB_PRINT_REG(trapframe, a3); #if defined(__mips_n32) || defined(__mips_n64) DB_PRINT_REG(trapframe, a4); DB_PRINT_REG(trapframe, a5); DB_PRINT_REG(trapframe, a6); DB_PRINT_REG(trapframe, a7); DB_PRINT_REG(trapframe, t0); DB_PRINT_REG(trapframe, t1); DB_PRINT_REG(trapframe, t2); DB_PRINT_REG(trapframe, t3); #else DB_PRINT_REG(trapframe, t0); DB_PRINT_REG(trapframe, t1); DB_PRINT_REG(trapframe, t2); DB_PRINT_REG(trapframe, t3); DB_PRINT_REG(trapframe, t4); DB_PRINT_REG(trapframe, t5); DB_PRINT_REG(trapframe, t6); DB_PRINT_REG(trapframe, t7); #endif DB_PRINT_REG(trapframe, s0); DB_PRINT_REG(trapframe, s1); DB_PRINT_REG(trapframe, s2); DB_PRINT_REG(trapframe, s3); DB_PRINT_REG(trapframe, s4); DB_PRINT_REG(trapframe, s5); DB_PRINT_REG(trapframe, s6); DB_PRINT_REG(trapframe, s7); DB_PRINT_REG(trapframe, t8); DB_PRINT_REG(trapframe, t9); DB_PRINT_REG(trapframe, k0); DB_PRINT_REG(trapframe, k1); DB_PRINT_REG(trapframe, gp); DB_PRINT_REG(trapframe, sp); DB_PRINT_REG(trapframe, s8); DB_PRINT_REG(trapframe, ra); DB_PRINT_REG(trapframe, sr); DB_PRINT_REG(trapframe, mullo); DB_PRINT_REG(trapframe, mulhi); DB_PRINT_REG(trapframe, badvaddr); DB_PRINT_REG(trapframe, cause); DB_PRINT_REG(trapframe, pc); } DB_SHOW_COMMAND(pcb, ddb_dump_pcb) { struct thread *td; struct pcb *pcb; struct trapframe *trapframe; /* Determine which thread to examine. */ if (have_addr) td = db_lookup_thread(addr, true); else td = curthread; pcb = td->td_pcb; db_printf("Thread %d at %p\n", td->td_tid, td); db_printf("PCB at %p\n", pcb); trapframe = &pcb->pcb_regs; dump_trapframe(trapframe); db_printf("PCB Context:\n"); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S0); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S1); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S2); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S3); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S4); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S5); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S6); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S7); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SP); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_S8); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_RA); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_SR); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_GP); DB_PRINT_REG_ARRAY(pcb, pcb_context, PCB_REG_PC); db_printf("PCB onfault = %p\n", pcb->pcb_onfault); db_printf("md_saved_intr = 0x%0lx\n", (long)td->td_md.md_saved_intr); db_printf("md_spinlock_count = %d\n", td->td_md.md_spinlock_count); if (td->td_frame != trapframe) { db_printf("td->td_frame %p is not the same as pcb_regs %p\n", td->td_frame, trapframe); } } /* * Dump the trapframe beginning at address specified by first argument. */ DB_SHOW_COMMAND(trapframe, ddb_dump_trapframe) { if (!have_addr) return; dump_trapframe((struct trapframe *)addr); } #endif /* DDB */