Index: head/sys/powerpc/aim/trap.c =================================================================== --- head/sys/powerpc/aim/trap.c (revision 281095) +++ head/sys/powerpc/aim/trap.c (nonexistent) @@ -1,752 +0,0 @@ -/*- - * Copyright (C) 1995, 1996 Wolfgang Solfrank. - * Copyright (C) 1995, 1996 TooLs GmbH. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by TooLs GmbH. - * 4. The name of TooLs GmbH may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $ - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void trap_fatal(struct trapframe *frame); -static void printtrap(u_int vector, struct trapframe *frame, int isfatal, - int user); -static int trap_pfault(struct trapframe *frame, int user); -static int fix_unaligned(struct thread *td, struct trapframe *frame); -static int handle_onfault(struct trapframe *frame); -static void syscall(struct trapframe *frame); - -#ifdef __powerpc64__ - void handle_kernel_slb_spill(int, register_t, register_t); -static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr); -extern int n_slbs; -#endif - -struct powerpc_exception { - u_int vector; - char *name; -}; - -#ifdef KDTRACE_HOOKS -#include - -int (*dtrace_invop_jump_addr)(struct trapframe *); -#endif - -static struct powerpc_exception powerpc_exceptions[] = { - { 0x0100, "system reset" }, - { 0x0200, "machine check" }, - { 0x0300, "data storage interrupt" }, - { 0x0380, "data segment exception" }, - { 0x0400, "instruction storage interrupt" }, - { 0x0480, "instruction segment exception" }, - { 0x0500, "external interrupt" }, - { 0x0600, "alignment" }, - { 0x0700, "program" }, - { 0x0800, "floating-point unavailable" }, - { 0x0900, "decrementer" }, - { 0x0c00, "system call" }, - { 0x0d00, "trace" }, - { 0x0e00, "floating-point assist" }, - { 0x0f00, "performance monitoring" }, - { 0x0f20, "altivec unavailable" }, - { 0x0f40, "vsx unavailable" }, - { 0x1000, "instruction tlb miss" }, - { 0x1100, "data load tlb miss" }, - { 0x1200, "data store tlb miss" }, - { 0x1300, "instruction breakpoint" }, - { 0x1400, "system management" }, - { 0x1600, "altivec assist" }, - { 0x1700, "thermal management" }, - { 0x2000, "run mode/trace" }, - { 0x3000, NULL } -}; - -static const char * -trapname(u_int vector) -{ - struct powerpc_exception *pe; - - for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) { - if (pe->vector == vector) - return (pe->name); - } - - return ("unknown"); -} - -void -trap(struct trapframe *frame) -{ - struct thread *td; - struct proc *p; -#ifdef KDTRACE_HOOKS - uint32_t inst; -#endif - int sig, type, user; - u_int ucode; - ksiginfo_t ksi; - - PCPU_INC(cnt.v_trap); - - td = curthread; - p = td->td_proc; - - type = ucode = frame->exc; - sig = 0; - user = frame->srr1 & PSL_PR; - - CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name, - trapname(type), user ? "user" : "kernel"); - -#ifdef KDTRACE_HOOKS - /* - * A trap can occur while DTrace executes a probe. Before - * executing the probe, DTrace blocks re-scheduling and sets - * a flag in its per-cpu flags to indicate that it doesn't - * want to fault. On returning from the probe, the no-fault - * flag is cleared and finally re-scheduling is enabled. - * - * If the DTrace kernel module has registered a trap handler, - * call it and if it returns non-zero, assume that it has - * handled the trap and modified the trap frame so that this - * function can return normally. - */ - if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0) - return; -#endif - - if (user) { - td->td_pticks = 0; - td->td_frame = frame; - if (td->td_ucred != p->p_ucred) - cred_update_thread(td); - - /* User Mode Traps */ - switch (type) { - case EXC_RUNMODETRC: - case EXC_TRC: - frame->srr1 &= ~PSL_SE; - sig = SIGTRAP; - ucode = TRAP_TRACE; - break; - -#ifdef __powerpc64__ - case EXC_ISE: - case EXC_DSE: - if (handle_user_slb_spill(&p->p_vmspace->vm_pmap, - (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){ - sig = SIGSEGV; - ucode = SEGV_MAPERR; - } - break; -#endif - case EXC_DSI: - case EXC_ISI: - sig = trap_pfault(frame, 1); - if (sig == SIGSEGV) - ucode = SEGV_MAPERR; - break; - - case EXC_SC: - syscall(frame); - break; - - case EXC_FPU: - KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU, - ("FPU already enabled for thread")); - enable_fpu(td); - break; - - case EXC_VEC: - KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC, - ("Altivec already enabled for thread")); - enable_vec(td); - break; - - case EXC_VSX: - KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX, - ("VSX already enabled for thread")); - if (!(td->td_pcb->pcb_flags & PCB_VEC)) - enable_vec(td); - if (!(td->td_pcb->pcb_flags & PCB_FPU)) - save_fpu(td); - td->td_pcb->pcb_flags |= PCB_VSX; - enable_fpu(td); - break; - - case EXC_VECAST_G4: - case EXC_VECAST_G5: - /* - * We get a VPU assist exception for IEEE mode - * vector operations on denormalized floats. - * Emulating this is a giant pain, so for now, - * just switch off IEEE mode and treat them as - * zero. - */ - - save_vec(td); - td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ; - enable_vec(td); - break; - - case EXC_ALI: - if (fix_unaligned(td, frame) != 0) { - sig = SIGBUS; - ucode = BUS_ADRALN; - } - else - frame->srr0 += 4; - break; - - case EXC_PGM: - /* Identify the trap reason */ - if (frame->srr1 & EXC_PGM_TRAP) { -#ifdef KDTRACE_HOOKS - inst = fuword32((const void *)frame->srr0); - if (inst == 0x0FFFDDDD && - dtrace_pid_probe_ptr != NULL) { - struct reg regs; - fill_regs(td, ®s); - (*dtrace_pid_probe_ptr)(®s); - break; - } -#endif - sig = SIGTRAP; - ucode = TRAP_BRKPT; - } else { - sig = ppc_instr_emulate(frame, td->td_pcb); - if (sig == SIGILL) { - if (frame->srr1 & EXC_PGM_PRIV) - ucode = ILL_PRVOPC; - else if (frame->srr1 & EXC_PGM_ILLEGAL) - ucode = ILL_ILLOPC; - } else if (sig == SIGFPE) - ucode = FPE_FLTINV; /* Punt for now, invalid operation. */ - } - break; - - case EXC_MCHK: - /* - * Note that this may not be recoverable for the user - * process, depending on the type of machine check, - * but it at least prevents the kernel from dying. - */ - sig = SIGBUS; - ucode = BUS_OBJERR; - break; - - default: - trap_fatal(frame); - } - } else { - /* Kernel Mode Traps */ - - KASSERT(cold || td->td_ucred != NULL, - ("kernel trap doesn't have ucred")); - switch (type) { -#ifdef KDTRACE_HOOKS - case EXC_PGM: - if (frame->srr1 & EXC_PGM_TRAP) { - if (*(uint32_t *)frame->srr0 == EXC_DTRACE) { - if (dtrace_invop_jump_addr != NULL) { - dtrace_invop_jump_addr(frame); - return; - } - } - } - break; -#endif -#ifdef __powerpc64__ - case EXC_DSE: - if ((frame->dar & SEGMENT_MASK) == USER_ADDR) { - __asm __volatile ("slbmte %0, %1" :: - "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), - "r"(USER_SLB_SLBE)); - return; - } - break; -#endif - case EXC_DSI: - if (trap_pfault(frame, 0) == 0) - return; - break; - case EXC_MCHK: - if (handle_onfault(frame)) - return; - break; - default: - break; - } - trap_fatal(frame); - } - - if (sig != 0) { - if (p->p_sysent->sv_transtrap != NULL) - sig = (p->p_sysent->sv_transtrap)(sig, type); - ksiginfo_init_trap(&ksi); - ksi.ksi_signo = sig; - ksi.ksi_code = (int) ucode; /* XXX, not POSIX */ - /* ksi.ksi_addr = ? */ - ksi.ksi_trapno = type; - trapsignal(td, &ksi); - } - - userret(td, frame); -} - -static void -trap_fatal(struct trapframe *frame) -{ - - printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); -#ifdef KDB - if ((debugger_on_panic || kdb_active) && - kdb_trap(frame->exc, 0, frame)) - return; -#endif - panic("%s trap", trapname(frame->exc)); -} - -static void -printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) -{ - - printf("\n"); - printf("%s %s trap:\n", isfatal ? "fatal" : "handled", - user ? "user" : "kernel"); - printf("\n"); - printf(" exception = 0x%x (%s)\n", vector, trapname(vector)); - switch (vector) { - case EXC_DSE: - case EXC_DSI: - printf(" virtual address = 0x%" PRIxPTR "\n", frame->dar); - printf(" dsisr = 0x%" PRIxPTR "\n", - frame->cpu.aim.dsisr); - break; - case EXC_ISE: - case EXC_ISI: - printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0); - break; - } - printf(" srr0 = 0x%" PRIxPTR "\n", frame->srr0); - printf(" srr1 = 0x%" PRIxPTR "\n", frame->srr1); - printf(" lr = 0x%" PRIxPTR "\n", frame->lr); - printf(" curthread = %p\n", curthread); - if (curthread != NULL) - printf(" pid = %d, comm = %s\n", - curthread->td_proc->p_pid, curthread->td_name); - printf("\n"); -} - -/* - * Handles a fatal fault when we have onfault state to recover. Returns - * non-zero if there was onfault recovery state available. - */ -static int -handle_onfault(struct trapframe *frame) -{ - struct thread *td; - faultbuf *fb; - - td = curthread; - fb = td->td_pcb->pcb_onfault; - if (fb != NULL) { - frame->srr0 = (*fb)[0]; - frame->fixreg[1] = (*fb)[1]; - frame->fixreg[2] = (*fb)[2]; - frame->fixreg[3] = 1; - frame->cr = (*fb)[3]; - bcopy(&(*fb)[4], &frame->fixreg[13], - 19 * sizeof(register_t)); - return (1); - } - return (0); -} - -int -cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) -{ - struct proc *p; - struct trapframe *frame; - caddr_t params; - size_t argsz; - int error, n, i; - - p = td->td_proc; - frame = td->td_frame; - - sa->code = frame->fixreg[0]; - params = (caddr_t)(frame->fixreg + FIRSTARG); - n = NARGREG; - - if (sa->code == SYS_syscall) { - /* - * code is first argument, - * followed by actual args. - */ - sa->code = *(register_t *) params; - params += sizeof(register_t); - n -= 1; - } else if (sa->code == SYS___syscall) { - /* - * Like syscall, but code is a quad, - * so as to maintain quad alignment - * for the rest of the args. - */ - if (SV_PROC_FLAG(p, SV_ILP32)) { - params += sizeof(register_t); - sa->code = *(register_t *) params; - params += sizeof(register_t); - n -= 2; - } else { - sa->code = *(register_t *) params; - params += sizeof(register_t); - n -= 1; - } - } - - if (p->p_sysent->sv_mask) - sa->code &= p->p_sysent->sv_mask; - if (sa->code >= p->p_sysent->sv_size) - sa->callp = &p->p_sysent->sv_table[0]; - else - sa->callp = &p->p_sysent->sv_table[sa->code]; - - sa->narg = sa->callp->sy_narg; - - if (SV_PROC_FLAG(p, SV_ILP32)) { - argsz = sizeof(uint32_t); - - for (i = 0; i < n; i++) - sa->args[i] = ((u_register_t *)(params))[i] & - 0xffffffff; - } else { - argsz = sizeof(uint64_t); - - for (i = 0; i < n; i++) - sa->args[i] = ((u_register_t *)(params))[i]; - } - - if (sa->narg > n) - error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n, - (sa->narg - n) * argsz); - else - error = 0; - -#ifdef __powerpc64__ - if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) { - /* Expand the size of arguments copied from the stack */ - - for (i = sa->narg; i >= n; i--) - sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n]; - } -#endif - - if (error == 0) { - td->td_retval[0] = 0; - td->td_retval[1] = frame->fixreg[FIRSTARG + 1]; - } - return (error); -} - -#include "../../kern/subr_syscall.c" - -void -syscall(struct trapframe *frame) -{ - struct thread *td; - struct syscall_args sa; - int error; - - td = curthread; - td->td_frame = frame; - -#ifdef __powerpc64__ - /* - * Speculatively restore last user SLB segment, which we know is - * invalid already, since we are likely to do copyin()/copyout(). - */ - __asm __volatile ("slbmte %0, %1; isync" :: - "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); -#endif - - error = syscallenter(td, &sa); - syscallret(td, error, &sa); -} - -#ifdef __powerpc64__ -/* Handle kernel SLB faults -- runs in real mode, all seat belts off */ -void -handle_kernel_slb_spill(int type, register_t dar, register_t srr0) -{ - struct slb *slbcache; - uint64_t slbe, slbv; - uint64_t esid, addr; - int i; - - addr = (type == EXC_ISE) ? srr0 : dar; - slbcache = PCPU_GET(slb); - esid = (uintptr_t)addr >> ADDR_SR_SHFT; - slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; - - /* See if the hardware flushed this somehow (can happen in LPARs) */ - for (i = 0; i < n_slbs; i++) - if (slbcache[i].slbe == (slbe | (uint64_t)i)) - return; - - /* Not in the map, needs to actually be added */ - slbv = kernel_va_to_slbv(addr); - if (slbcache[USER_SLB_SLOT].slbe == 0) { - for (i = 0; i < n_slbs; i++) { - if (i == USER_SLB_SLOT) - continue; - if (!(slbcache[i].slbe & SLBE_VALID)) - goto fillkernslb; - } - - if (i == n_slbs) - slbcache[USER_SLB_SLOT].slbe = 1; - } - - /* Sacrifice a random SLB entry that is not the user entry */ - i = mftb() % n_slbs; - if (i == USER_SLB_SLOT) - i = (i+1) % n_slbs; - -fillkernslb: - /* Write new entry */ - slbcache[i].slbv = slbv; - slbcache[i].slbe = slbe | (uint64_t)i; - - /* Trap handler will restore from cache on exit */ -} - -static int -handle_user_slb_spill(pmap_t pm, vm_offset_t addr) -{ - struct slb *user_entry; - uint64_t esid; - int i; - - esid = (uintptr_t)addr >> ADDR_SR_SHFT; - - PMAP_LOCK(pm); - user_entry = user_va_to_slb_entry(pm, addr); - - if (user_entry == NULL) { - /* allocate_vsid auto-spills it */ - (void)allocate_user_vsid(pm, esid, 0); - } else { - /* - * Check that another CPU has not already mapped this. - * XXX: Per-thread SLB caches would be better. - */ - for (i = 0; i < pm->pm_slb_len; i++) - if (pm->pm_slb[i] == user_entry) - break; - - if (i == pm->pm_slb_len) - slb_insert_user(pm, user_entry); - } - PMAP_UNLOCK(pm); - - return (0); -} -#endif - -static int -trap_pfault(struct trapframe *frame, int user) -{ - vm_offset_t eva, va; - struct thread *td; - struct proc *p; - vm_map_t map; - vm_prot_t ftype; - int rv; - register_t user_sr; - - td = curthread; - p = td->td_proc; - if (frame->exc == EXC_ISI) { - eva = frame->srr0; - ftype = VM_PROT_EXECUTE; - if (frame->srr1 & SRR1_ISI_PFAULT) - ftype |= VM_PROT_READ; - } else { - eva = frame->dar; - if (frame->cpu.aim.dsisr & DSISR_STORE) - ftype = VM_PROT_WRITE; - else - ftype = VM_PROT_READ; - } - - if (user) { - map = &p->p_vmspace->vm_map; - } else { - if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { - if (p->p_vmspace == NULL) - return (SIGSEGV); - - map = &p->p_vmspace->vm_map; - - user_sr = td->td_pcb->pcb_cpu.aim.usr_segm; - eva &= ADDR_PIDX | ADDR_POFF; - eva |= user_sr << ADDR_SR_SHFT; - } else { - map = kernel_map; - } - } - va = trunc_page(eva); - - if (map != kernel_map) { - /* - * Keep swapout from messing with us during this - * critical time. - */ - PROC_LOCK(p); - ++p->p_lock; - PROC_UNLOCK(p); - - /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); - - PROC_LOCK(p); - --p->p_lock; - PROC_UNLOCK(p); - /* - * XXXDTRACE: add dtrace_doubletrap_func here? - */ - } else { - /* - * Don't have to worry about process locking or stacks in the - * kernel. - */ - rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); - } - - if (rv == KERN_SUCCESS) - return (0); - - if (!user && handle_onfault(frame)) - return (0); - - return (SIGSEGV); -} - -/* - * For now, this only deals with the particular unaligned access case - * that gcc tends to generate. Eventually it should handle all of the - * possibilities that can happen on a 32-bit PowerPC in big-endian mode. - */ - -static int -fix_unaligned(struct thread *td, struct trapframe *frame) -{ - struct thread *fputhread; - int indicator, reg; - double *fpr; - - indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr); - - switch (indicator) { - case EXC_ALI_LFD: - case EXC_ALI_STFD: - reg = EXC_ALI_RST(frame->cpu.aim.dsisr); - fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr; - fputhread = PCPU_GET(fputhread); - - /* Juggle the FPU to ensure that we've initialized - * the FPRs, and that their current state is in - * the PCB. - */ - if (fputhread != td) { - if (fputhread) - save_fpu(fputhread); - enable_fpu(td); - } - save_fpu(td); - - if (indicator == EXC_ALI_LFD) { - if (copyin((void *)frame->dar, fpr, - sizeof(double)) != 0) - return -1; - enable_fpu(td); - } else { - if (copyout(fpr, (void *)frame->dar, - sizeof(double)) != 0) - return -1; - } - return 0; - break; - } - - return -1; -} - Property changes on: head/sys/powerpc/aim/trap.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/aim/machdep.c =================================================================== --- head/sys/powerpc/aim/machdep.c (revision 281095) +++ head/sys/powerpc/aim/machdep.c (revision 281096) @@ -1,972 +1,947 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __powerpc64__ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int cold = 1; #ifdef __powerpc64__ extern int n_slbs; int cacheline_size = 128; #else int cacheline_size = 32; #endif int hw_direct_map = 1; extern void *ap_pcpu; struct pcpu __pcpu[MAXCPU]; static struct trapframe frame0; char machine[] = "powerpc"; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); static void cpu_startup(void *); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, CTLFLAG_RD, &cacheline_size, 0, ""); uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *); long Maxmem = 0; long realmem = 0; #ifndef __powerpc64__ struct bat battable[16]; #endif struct kva_md_info kmi; static void cpu_startup(void *dummy) { /* * Initialise the decrementer-based clock. */ decr_init(); /* * Good {morning,afternoon,evening,night}. */ cpu_setup(PCPU_GET(cpuid)); #ifdef PERFMON perfmon_init(); #endif printf("real memory = %ld (%ld MB)\n", ptoa(physmem), ptoa(physmem) / 1048576); realmem = physmem; if (bootverbose) printf("available KVA = %zd (%zd MB)\n", virtual_end - virtual_avail, (virtual_end - virtual_avail) / 1048576); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_offset_t size1 = phys_avail[indx + 1] - phys_avail[indx]; #ifdef __powerpc64__ printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n", #else printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n", #endif phys_avail[indx], phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count), ptoa(vm_cnt.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); } extern vm_offset_t __startkernel, __endkernel; extern unsigned char __bss_start[]; extern unsigned char __sbss_start[]; extern unsigned char __sbss_end[]; extern unsigned char _end[]; #ifndef __powerpc64__ /* Bits for running on 64-bit systems in 32-bit mode. */ extern void *testppc64, *testppc64size; extern void *restorebridge, *restorebridgesize; extern void *rfid_patch, *rfi_patch1, *rfi_patch2; extern void *trapcode64; extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; #endif extern void *rstcode, *rstcodeend; extern void *trapcode, *trapcodeend; extern void *generictrap, *generictrap64; extern void *slbtrap, *slbtrapend; extern void *alitrap, *aliend; extern void *dsitrap, *dsiend; extern void *decrint, *decrsize; extern void *extint, *extsize; extern void *dblow, *dbend; extern void *imisstrap, *imisssize; extern void *dlmisstrap, *dlmisssize; extern void *dsmisstrap, *dsmisssize; uintptr_t powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp) { struct pcpu *pc; vm_offset_t startkernel, endkernel; size_t trap_offset, trapsize; vm_offset_t trap; void *kmdp; char *env; register_t msr, scratch; uint8_t *cache_check; int cacheline_warn; #ifndef __powerpc64__ int ppc64; #endif #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif kmdp = NULL; trap_offset = 0; cacheline_warn = 0; /* First guess at start/end kernel positions */ startkernel = __startkernel; endkernel = __endkernel; /* Check for ePAPR loader, which puts a magic value into r6 */ if (mdp == (void *)0x65504150) mdp = NULL; /* * Parse metadata if present and fetch parameters. Must be done * before console is inited so cninit gets the right value of * boothowto. */ if (mdp != NULL) { preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) { boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); endkernel = ulmax(endkernel, MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t)); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif } } else { bzero(__sbss_start, __sbss_end - __sbss_start); bzero(__bss_start, _end - __bss_start); } /* Store boot environment state */ OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry); /* * Init params/tunables that can be overridden by the loader */ init_param1(); /* * Start initializing proc0 and thread0. */ proc_linkup0(&proc0, &thread0); thread0.td_frame = &frame0; /* * Set up per-cpu data. */ pc = __pcpu; pcpu_init(pc, 0, sizeof(struct pcpu)); pc->pc_curthread = &thread0; #ifdef __powerpc64__ __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); #else __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); #endif pc->pc_cpuid = 0; __asm __volatile("mtsprg 0, %0" :: "r"(pc)); /* * Init mutexes, which we use heavily in PMAP */ mutex_init(); /* * Install the OF client interface */ OF_bootstrap(); /* * Initialize the console before printing anything. */ cninit(); /* * Complain if there is no metadata. */ if (mdp == NULL || kmdp == NULL) { printf("powerpc_init: no loader metadata.\n"); } /* * Init KDB */ kdb_init(); /* Various very early CPU fix ups */ switch (mfpvr() >> 16) { /* * PowerPC 970 CPUs have a misfeature requested by Apple that * makes them pretend they have a 32-byte cacheline. Turn this * off before we measure the cacheline size. */ case IBM970: case IBM970FX: case IBM970MP: case IBM970GX: scratch = mfspr(SPR_HID5); scratch &= ~HID5_970_DCBZ_SIZE_HI; mtspr(SPR_HID5, scratch); break; #ifdef __powerpc64__ case IBMPOWER7: case IBMPOWER7PLUS: case IBMPOWER8: case IBMPOWER8E: /* XXX: get from ibm,slb-size in device tree */ n_slbs = 32; break; #endif } /* * Initialize the interrupt tables and figure out our cache line * size and whether or not we need the 64-bit bridge code. */ /* * Disable translation in case the vector area hasn't been * mapped (G5). Note that no OFW calls can be made until * translation is re-enabled. */ msr = mfmsr(); mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); /* * Measure the cacheline size using dcbz * * Use EXC_PGM as a playground. We are about to overwrite it * anyway, we know it exists, and we know it is cache-aligned. */ cache_check = (void *)EXC_PGM; for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) cache_check[cacheline_size] = 0xff; __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); /* Find the first byte dcbz did not zero to get the cache line size */ for (cacheline_size = 0; cacheline_size < 0x100 && cache_check[cacheline_size] == 0; cacheline_size++); /* Work around psim bug */ if (cacheline_size == 0) { cacheline_warn = 1; cacheline_size = 32; } /* Make sure the kernel icache is valid before we go too much further */ __syncicache((caddr_t)startkernel, endkernel - startkernel); #ifndef __powerpc64__ /* * Figure out whether we need to use the 64 bit PMAP. This works by * executing an instruction that is only legal on 64-bit PPC (mtmsrd), * and setting ppc64 = 0 if that causes a trap. */ ppc64 = 1; bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); __syncicache((void *)EXC_PGM, (size_t)&testppc64size); __asm __volatile("\ mfmsr %0; \ mtsprg2 %1; \ \ mtmsrd %0; \ mfsprg2 %1;" : "=r"(scratch), "=r"(ppc64)); if (ppc64) cpu_features |= PPC_FEATURE_64; /* * Now copy restorebridge into all the handlers, if necessary, * and set up the trap tables. */ if (cpu_features & PPC_FEATURE_64) { /* Patch the two instances of rfi -> rfid */ bcopy(&rfid_patch,&rfi_patch1,4); #ifdef KDB /* rfi_patch2 is at the end of dbleave */ bcopy(&rfid_patch,&rfi_patch2,4); #endif } #else /* powerpc64 */ cpu_features |= PPC_FEATURE_64; #endif trapsize = (size_t)&trapcodeend - (size_t)&trapcode; /* * Copy generic handler into every possible trap. Special cases will get * different ones in a minute. */ for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) bcopy(&trapcode, (void *)trap, trapsize); #ifndef __powerpc64__ if (cpu_features & PPC_FEATURE_64) { /* * Copy a code snippet to restore 32-bit bridge mode * to the top of every non-generic trap handler */ trap_offset += (size_t)&restorebridgesize; bcopy(&restorebridge, (void *)EXC_RST, trap_offset); bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); } #endif bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - (size_t)&rstcode); #ifdef KDB bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - (size_t)&dblow); #endif bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - (size_t)&alitrap); bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - (size_t)&dsitrap); #ifdef __powerpc64__ /* Set TOC base so that the interrupt code can get at it */ *((void **)TRAP_GENTRAP) = &generictrap; *((register_t *)TRAP_TOCBASE) = toc; bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); #else /* Set branch address for trap code */ if (cpu_features & PPC_FEATURE_64) *((void **)TRAP_GENTRAP) = &generictrap64; else *((void **)TRAP_GENTRAP) = &generictrap; *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; /* G2-specific TLB miss helper handlers */ bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); #endif __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); /* * Restore MSR */ mtmsr(msr); /* Warn if cachline size was not determined */ if (cacheline_warn == 1) { printf("WARNING: cacheline size undetermined, setting to 32\n"); } /* * Choose a platform module so we can get the physical memory map. */ platform_probe_and_attach(); /* * Initialise virtual memory. Use BUS_PROBE_GENERIC priority * in case the platform module had a better idea of what we * should do. */ if (cpu_features & PPC_FEATURE_64) pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); pmap_bootstrap(startkernel, endkernel); mtmsr(PSL_KERNSET & ~PSL_EE); /* * Initialize params/tunables that are derived from memsize */ init_param2(physmem); /* * Grab booted kernel's name */ env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } /* * Finish setting up thread0. */ thread0.td_pcb = (struct pcb *) ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~15UL); bzero((void *)thread0.td_pcb, sizeof(struct pcb)); pc->pc_curpcb = thread0.td_pcb; /* Initialise the message buffer. */ msgbufinit(msgbufp, msgbufsize); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif return (((uintptr_t)thread0.td_pcb - (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); } void bzero(void *buf, size_t len) { caddr_t p; p = buf; while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { *p++ = 0; len--; } while (len >= sizeof(u_long) * 8) { *(u_long*) p = 0; *((u_long*) p + 1) = 0; *((u_long*) p + 2) = 0; *((u_long*) p + 3) = 0; len -= sizeof(u_long) * 8; *((u_long*) p + 4) = 0; *((u_long*) p + 5) = 0; *((u_long*) p + 6) = 0; *((u_long*) p + 7) = 0; p += sizeof(u_long) * 8; } while (len >= sizeof(u_long)) { *(u_long*) p = 0; len -= sizeof(u_long); p += sizeof(u_long); } while (len) { *p++ = 0; len--; } } void cpu_boot(int howto) { } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* TBD */ } /* * Shutdown the CPU as much as possible. */ void cpu_halt(void) { OF_exit(); } int ptrace_set_pc(struct thread *td, unsigned long addr) { struct trapframe *tf; tf = td->td_frame; tf->srr0 = (register_t)addr; return (0); } int ptrace_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 |= PSL_SE; return (0); } int ptrace_clear_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 &= ~PSL_SE; return (0); } void kdb_cpu_clear_singlestep(void) { kdb_frame->srr1 &= ~PSL_SE; } void kdb_cpu_set_singlestep(void) { kdb_frame->srr1 |= PSL_SE; } /* * Initialise a struct pcpu. */ void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) { #ifdef __powerpc64__ /* Copy the SLB contents from the current CPU */ memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); #endif } void spinlock_enter(void) { struct thread *td; register_t msr; td = curthread; if (td->td_md.md_spinlock_count == 0) { __asm __volatile("or 2,2,2"); /* Set high thread priority */ msr = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_msr = msr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t msr; td = curthread; critical_exit(); msr = td->td_md.md_saved_msr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) { intr_restore(msr); __asm __volatile("or 6,6,6"); /* Set normal thread priority */ } } -int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ - -int -db_trap_glue(struct trapframe *frame) -{ - if (!(frame->srr1 & PSL_PR) - && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC - || (frame->exc == EXC_PGM - && (frame->srr1 & 0x20000)) - || frame->exc == EXC_BPT - || frame->exc == EXC_DSI)) { - int type = frame->exc; - - /* Ignore DTrace traps. */ - if (*(uint32_t *)frame->srr0 == EXC_DTRACE) - return (0); - if (type == EXC_PGM && (frame->srr1 & 0x20000)) { - type = T_BREAKPOINT; - } - return (kdb_trap(type, 0, frame)); - } - - return (0); -} - #ifndef __powerpc64__ uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); } #endif vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { return (pa); } /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ void flush_disable_caches(void) { register_t msr; register_t msscr0; register_t cache_reg; volatile uint32_t *memp; uint32_t temp; int i; int x; msr = mfmsr(); powerpc_sync(); mtmsr(msr & ~(PSL_EE | PSL_DR)); msscr0 = mfspr(SPR_MSSCR0); msscr0 &= ~MSSCR0_L2PFE; mtspr(SPR_MSSCR0, msscr0); powerpc_sync(); isync(); __asm__ __volatile__("dssall; sync"); powerpc_sync(); isync(); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); /* Lock the L1 Data cache. */ mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); powerpc_sync(); isync(); mtspr(SPR_LDSTCR, 0); /* * Perform this in two stages: Flush the cache starting in RAM, then do it * from ROM. */ memp = (volatile uint32_t *)0x00000000; for (i = 0; i < 128 * 1024; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } memp = (volatile uint32_t *)0xfff00000; x = 0xfe; for (; x != 0xff;) { mtspr(SPR_LDSTCR, x); for (i = 0; i < 128; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } x = ((x << 1) | 1) & 0xff; } mtspr(SPR_LDSTCR, 0); cache_reg = mfspr(SPR_L2CR); if (cache_reg & L2CR_L2E) { cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); while (mfspr(SPR_L2CR) & L2CR_L2HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L2CR_L2E; mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2I); powerpc_sync(); while (mfspr(SPR_L2CR) & L2CR_L2I) ; /* Busy wait for L2 cache invalidate */ powerpc_sync(); } cache_reg = mfspr(SPR_L3CR); if (cache_reg & L3CR_L3E) { cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); while (mfspr(SPR_L3CR) & L3CR_L3HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L3CR_L3E; mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3I); powerpc_sync(); while (mfspr(SPR_L3CR) & L3CR_L3I) ; /* Busy wait for L3 cache invalidate */ powerpc_sync(); } mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); powerpc_sync(); isync(); mtmsr(msr); } void cpu_sleep() { static u_quad_t timebase = 0; static register_t sprgs[4]; static register_t srrs[2]; jmp_buf resetjb; struct thread *fputd; struct thread *vectd; register_t hid0; register_t msr; register_t saved_msr; ap_pcpu = pcpup; PCPU_SET(restore, &resetjb); saved_msr = mfmsr(); fputd = PCPU_GET(fputhread); vectd = PCPU_GET(vecthread); if (fputd != NULL) save_fpu(fputd); if (vectd != NULL) save_vec(vectd); if (setjmp(resetjb) == 0) { sprgs[0] = mfspr(SPR_SPRG0); sprgs[1] = mfspr(SPR_SPRG1); sprgs[2] = mfspr(SPR_SPRG2); sprgs[3] = mfspr(SPR_SPRG3); srrs[0] = mfspr(SPR_SRR0); srrs[1] = mfspr(SPR_SRR1); timebase = mftb(); powerpc_sync(); flush_disable_caches(); hid0 = mfspr(SPR_HID0); hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; powerpc_sync(); isync(); msr = mfmsr() | PSL_POW; mtspr(SPR_HID0, hid0); powerpc_sync(); while (1) mtmsr(msr); } mttb(timebase); PCPU_SET(curthread, curthread); PCPU_SET(curpcb, curthread->td_pcb); pmap_activate(curthread); powerpc_sync(); mtspr(SPR_SPRG0, sprgs[0]); mtspr(SPR_SPRG1, sprgs[1]); mtspr(SPR_SPRG2, sprgs[2]); mtspr(SPR_SPRG3, sprgs[3]); mtspr(SPR_SRR0, srrs[0]); mtspr(SPR_SRR1, srrs[1]); mtmsr(saved_msr); if (fputd == curthread) enable_fpu(curthread); if (vectd == curthread) enable_vec(curthread); powerpc_sync(); } Index: head/sys/powerpc/booke/trap.c =================================================================== --- head/sys/powerpc/booke/trap.c (revision 281095) +++ head/sys/powerpc/booke/trap.c (nonexistent) @@ -1,519 +0,0 @@ -/*- - * Copyright (C) 1995, 1996 Wolfgang Solfrank. - * Copyright (C) 1995, 1996 TooLs GmbH. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by TooLs GmbH. - * 4. The name of TooLs GmbH may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $ - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_fpu_emu.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define FAULTBUF_LR 0 -#define FAULTBUF_R1 1 -#define FAULTBUF_R2 2 -#define FAULTBUF_CR 3 -#define FAULTBUF_CTR 4 -#define FAULTBUF_XER 5 -#define FAULTBUF_R13 6 - -static void trap_fatal(struct trapframe *frame); -static void printtrap(u_int vector, struct trapframe *frame, int isfatal, - int user); -static int trap_pfault(struct trapframe *frame, int user); -static int fix_unaligned(struct thread *td, struct trapframe *frame); -static int handle_onfault(struct trapframe *frame); -static void syscall(struct trapframe *frame); - -struct powerpc_exception { - u_int vector; - char *name; -}; - -static struct powerpc_exception powerpc_exceptions[] = { - { EXC_CRIT, "critical input" }, - { EXC_MCHK, "machine check" }, - { EXC_DSI, "data storage interrupt" }, - { EXC_ISI, "instruction storage interrupt" }, - { EXC_EXI, "external interrupt" }, - { EXC_ALI, "alignment" }, - { EXC_PGM, "program" }, - { EXC_SC, "system call" }, - { EXC_APU, "auxiliary proc unavailable" }, - { EXC_DECR, "decrementer" }, - { EXC_FIT, "fixed-interval timer" }, - { EXC_WDOG, "watchdog timer" }, - { EXC_DTMISS, "data tlb miss" }, - { EXC_ITMISS, "instruction tlb miss" }, - { EXC_DEBUG, "debug" }, - { EXC_PERF, "performance monitoring" }, - { EXC_LAST, NULL } -}; - -static const char * -trapname(u_int vector) -{ - struct powerpc_exception *pe; - - for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) { - if (pe->vector == vector) - return (pe->name); - } - - return ("unknown"); -} - -void -trap(struct trapframe *frame) -{ - struct thread *td; - struct proc *p; - int sig, type, user; - ksiginfo_t ksi; - -#ifdef KDB - if (kdb_active) { - kdb_reenter(); - return; - } -#endif - - PCPU_INC(cnt.v_trap); - - td = curthread; - p = td->td_proc; - - type = frame->exc; - sig = 0; - user = (frame->srr1 & PSL_PR) ? 1 : 0; - - CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm, - trapname(type), user ? "user" : "kernel"); - - if (user) { - td->td_frame = frame; - if (td->td_ucred != p->p_ucred) - cred_update_thread(td); - - /* User Mode Traps */ - switch (type) { - case EXC_DSI: - case EXC_ISI: - sig = trap_pfault(frame, 1); - break; - - case EXC_SC: - syscall(frame); - break; - - case EXC_ALI: - if (fix_unaligned(td, frame) != 0) - sig = SIGBUS; - else - frame->srr0 += 4; - break; - - case EXC_DEBUG: /* Single stepping */ - mtspr(SPR_DBSR, mfspr(SPR_DBSR)); - frame->srr1 &= ~PSL_DE; - frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC); - sig = SIGTRAP; - break; - - case EXC_PGM: /* Program exception */ - sig = ppc_instr_emulate(frame, td->td_pcb); - break; - - default: - trap_fatal(frame); - } - } else { - /* Kernel Mode Traps */ - KASSERT(cold || td->td_ucred != NULL, - ("kernel trap doesn't have ucred")); - - switch (type) { - case EXC_DEBUG: - mtspr(SPR_DBSR, mfspr(SPR_DBSR)); - kdb_trap(frame->exc, 0, frame); - return; - - case EXC_DSI: - if (trap_pfault(frame, 0) == 0) - return; - break; - - case EXC_MCHK: - if (handle_onfault(frame)) - return; - break; -#ifdef KDB - case EXC_PGM: - if (frame->cpu.booke.esr & ESR_PTR) - kdb_trap(EXC_PGM, 0, frame); - return; -#endif - default: - break; - } - trap_fatal(frame); - } - - if (sig != 0) { - if (p->p_sysent->sv_transtrap != NULL) - sig = (p->p_sysent->sv_transtrap)(sig, type); - ksiginfo_init_trap(&ksi); - ksi.ksi_signo = sig; - ksi.ksi_code = type; /* XXX, not POSIX */ - /* ksi.ksi_addr = ? */ - ksi.ksi_trapno = type; - trapsignal(td, &ksi); - } - - userret(td, frame); -} - -static void -trap_fatal(struct trapframe *frame) -{ - - printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); -#ifdef KDB - if ((debugger_on_panic || kdb_active) && - kdb_trap(frame->exc, 0, frame)) - return; -#endif - panic("%s trap", trapname(frame->exc)); -} - -static void -printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) -{ - register_t va = 0; - - printf("\n"); - printf("%s %s trap:\n", isfatal ? "fatal" : "handled", - user ? "user" : "kernel"); - printf("\n"); - printf(" exception = 0x%x (%s)\n", vector, trapname(vector)); - - switch (vector) { - case EXC_DTMISS: - case EXC_DSI: - va = frame->dar; - break; - - case EXC_ITMISS: - case EXC_ISI: - va = frame->srr0; - break; - } - - printf(" virtual address = 0x%08x\n", va); - printf(" srr0 = 0x%08x\n", frame->srr0); - printf(" srr1 = 0x%08x\n", frame->srr1); - printf(" curthread = %p\n", curthread); - if (curthread != NULL) - printf(" pid = %d, comm = %s\n", - curthread->td_proc->p_pid, curthread->td_proc->p_comm); - printf("\n"); -} - -/* - * Handles a fatal fault when we have onfault state to recover. Returns - * non-zero if there was onfault recovery state available. - */ -static int -handle_onfault(struct trapframe *frame) -{ - struct thread *td; - faultbuf *fb; - - td = curthread; - fb = td->td_pcb->pcb_onfault; - if (fb != NULL) { - frame->srr0 = (*fb)[FAULTBUF_LR]; - frame->fixreg[1] = (*fb)[FAULTBUF_R1]; - frame->fixreg[2] = (*fb)[FAULTBUF_R2]; - frame->fixreg[3] = 1; - frame->cr = (*fb)[FAULTBUF_CR]; - frame->ctr = (*fb)[FAULTBUF_CTR]; - frame->xer = (*fb)[FAULTBUF_XER]; - bcopy(&(*fb)[FAULTBUF_R13], &frame->fixreg[13], - 19 * sizeof(register_t)); - return (1); - } - return (0); -} - -int -cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) -{ - struct proc *p; - struct trapframe *frame; - caddr_t params; - int error, n; - - p = td->td_proc; - frame = td->td_frame; - - sa->code = frame->fixreg[0]; - params = (caddr_t)(frame->fixreg + FIRSTARG); - n = NARGREG; - - if (sa->code == SYS_syscall) { - /* - * code is first argument, - * followed by actual args. - */ - sa->code = *(u_int *) params; - params += sizeof(register_t); - n -= 1; - } else if (sa->code == SYS___syscall) { - /* - * Like syscall, but code is a quad, - * so as to maintain quad alignment - * for the rest of the args. - */ - params += sizeof(register_t); - sa->code = *(u_int *) params; - params += sizeof(register_t); - n -= 2; - } - - if (p->p_sysent->sv_mask) - sa->code &= p->p_sysent->sv_mask; - if (sa->code >= p->p_sysent->sv_size) - sa->callp = &p->p_sysent->sv_table[0]; - else - sa->callp = &p->p_sysent->sv_table[sa->code]; - sa->narg = sa->callp->sy_narg; - - bcopy(params, sa->args, n * sizeof(register_t)); - if (sa->narg > n) { - error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n, - (sa->narg - n) * sizeof(register_t)); - } else - error = 0; - - if (error == 0) { - td->td_retval[0] = 0; - td->td_retval[1] = frame->fixreg[FIRSTARG + 1]; - } - return (error); -} - -#include "../../kern/subr_syscall.c" - -void -syscall(struct trapframe *frame) -{ - struct thread *td; - struct syscall_args sa; - int error; - - td = curthread; - td->td_frame = frame; - - error = syscallenter(td, &sa); - syscallret(td, error, &sa); -} - -static int -trap_pfault(struct trapframe *frame, int user) -{ - vm_offset_t eva, va; - struct thread *td; - struct proc *p; - vm_map_t map; - vm_prot_t ftype; - int rv; - - td = curthread; - p = td->td_proc; - - if (frame->exc == EXC_ISI) { - eva = frame->srr0; - ftype = VM_PROT_READ | VM_PROT_EXECUTE; - - } else { - eva = frame->dar; - if (frame->cpu.booke.esr & ESR_ST) - ftype = VM_PROT_WRITE; - else - ftype = VM_PROT_READ; - } - - if (user) { - KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); - map = &p->p_vmspace->vm_map; - } else { - if (eva < VM_MAXUSER_ADDRESS) { - - if (p->p_vmspace == NULL) - return (SIGSEGV); - - map = &p->p_vmspace->vm_map; - - } else { - map = kernel_map; - } - } - va = trunc_page(eva); - - if (map != kernel_map) { - /* - * Keep swapout from messing with us during this - * critical time. - */ - PROC_LOCK(p); - ++p->p_lock; - PROC_UNLOCK(p); - - /* Fault in the user page: */ - rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); - - PROC_LOCK(p); - --p->p_lock; - PROC_UNLOCK(p); - } else { - /* - * Don't have to worry about process locking or stacks in the - * kernel. - */ - rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); - } - - if (rv == KERN_SUCCESS) - return (0); - - if (!user && handle_onfault(frame)) - return (0); - - return ((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); -} - -/* - * For now, this only deals with the particular unaligned access case - * that gcc tends to generate. Eventually it should handle all of the - * possibilities that can happen on a 32-bit PowerPC in big-endian mode. - */ - -static int -fix_unaligned(struct thread *td, struct trapframe *frame) -{ -#if 0 - struct thread *fputhread; - int indicator, reg; - double *fpr; - - indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr); - - switch (indicator) { - case EXC_ALI_LFD: - case EXC_ALI_STFD: - reg = EXC_ALI_RST(frame->dsisr); - fpr = &td->td_pcb->pcb_fpu.fpr[reg]; - fputhread = PCPU_GET(fputhread); - /* Juggle the FPU to ensure that we've initialized - * the FPRs, and that their current state is in - * the PCB. - */ - if (fputhread != td) { - if (fputhread) - save_fpu(fputhread); - enable_fpu(td); - } - save_fpu(td); - - if (indicator == EXC_ALI_LFD) { - if (copyin((void *)frame->dar, fpr, - sizeof(double)) != 0) - return -1; - enable_fpu(td); - } else { - if (copyout(fpr, (void *)frame->dar, - sizeof(double)) != 0) - return -1; - } - return 0; - break; - } - -#endif - return (-1); -} - -#ifdef KDB -int db_trap_glue(struct trapframe *); -int -db_trap_glue(struct trapframe *tf) -{ - if (!(tf->srr1 & PSL_PR)) - return (kdb_trap(tf->exc, 0, tf)); - return (0); -} -#endif Property changes on: head/sys/powerpc/booke/trap.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/powerpc/trap.c =================================================================== --- head/sys/powerpc/powerpc/trap.c (nonexistent) +++ head/sys/powerpc/powerpc/trap.c (revision 281096) @@ -0,0 +1,816 @@ +/*- + * Copyright (C) 1995, 1996 Wolfgang Solfrank. + * Copyright (C) 1995, 1996 TooLs GmbH. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by TooLs GmbH. + * 4. The name of TooLs GmbH may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FAULTBUF_LR 0 +#define FAULTBUF_R1 1 +#define FAULTBUF_R2 2 +#define FAULTBUF_CR 3 +#define FAULTBUF_R13 6 + +static void trap_fatal(struct trapframe *frame); +static void printtrap(u_int vector, struct trapframe *frame, int isfatal, + int user); +static int trap_pfault(struct trapframe *frame, int user); +static int fix_unaligned(struct thread *td, struct trapframe *frame); +static int handle_onfault(struct trapframe *frame); +static void syscall(struct trapframe *frame); + +#ifdef __powerpc64__ + void handle_kernel_slb_spill(int, register_t, register_t); +static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr); +extern int n_slbs; +#endif + +struct powerpc_exception { + u_int vector; + char *name; +}; + +#ifdef KDTRACE_HOOKS +#include + +int (*dtrace_invop_jump_addr)(struct trapframe *); +#endif + +static struct powerpc_exception powerpc_exceptions[] = { + { EXC_CRIT, "critical input" }, + { EXC_RST, "system reset" }, + { EXC_MCHK, "machine check" }, + { EXC_DSI, "data storage interrupt" }, + { EXC_DSE, "data segment exception" }, + { EXC_ISI, "instruction storage interrupt" }, + { EXC_ISE, "instruction segment exception" }, + { EXC_EXI, "external interrupt" }, + { EXC_ALI, "alignment" }, + { EXC_PGM, "program" }, + { EXC_FPU, "floating-point unavailable" }, + { EXC_APU, "auxiliary proc unavailable" }, + { EXC_DECR, "decrementer" }, + { EXC_FIT, "fixed-interval timer" }, + { EXC_WDOG, "watchdog timer" }, + { EXC_SC, "system call" }, + { EXC_TRC, "trace" }, + { EXC_FPA, "floating-point assist" }, + { EXC_DEBUG, "debug" }, + { EXC_PERF, "performance monitoring" }, + { EXC_VEC, "altivec unavailable" }, + { EXC_VSX, "vsx unavailable" }, + { EXC_ITMISS, "instruction tlb miss" }, + { EXC_DLMISS, "data load tlb miss" }, + { EXC_DSMISS, "data store tlb miss" }, + { EXC_BPT, "instruction breakpoint" }, + { EXC_SMI, "system management" }, + { EXC_VECAST_G4, "altivec assist" }, + { EXC_THRM, "thermal management" }, + { EXC_RUNMODETRC, "run mode/trace" }, + { EXC_LAST, NULL } +}; + +static const char * +trapname(u_int vector) +{ + struct powerpc_exception *pe; + + for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) { + if (pe->vector == vector) + return (pe->name); + } + + return ("unknown"); +} + +void +trap(struct trapframe *frame) +{ + struct thread *td; + struct proc *p; +#ifdef KDTRACE_HOOKS + uint32_t inst; +#endif + int sig, type, user; + u_int ucode; + ksiginfo_t ksi; + + PCPU_INC(cnt.v_trap); + + td = curthread; + p = td->td_proc; + + type = ucode = frame->exc; + sig = 0; + user = frame->srr1 & PSL_PR; + + CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name, + trapname(type), user ? "user" : "kernel"); + +#ifdef KDTRACE_HOOKS + /* + * A trap can occur while DTrace executes a probe. Before + * executing the probe, DTrace blocks re-scheduling and sets + * a flag in its per-cpu flags to indicate that it doesn't + * want to fault. On returning from the probe, the no-fault + * flag is cleared and finally re-scheduling is enabled. + * + * If the DTrace kernel module has registered a trap handler, + * call it and if it returns non-zero, assume that it has + * handled the trap and modified the trap frame so that this + * function can return normally. + */ + if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0) + return; +#endif + + if (user) { + td->td_pticks = 0; + td->td_frame = frame; + if (td->td_ucred != p->p_ucred) + cred_update_thread(td); + + /* User Mode Traps */ + switch (type) { + case EXC_RUNMODETRC: + case EXC_TRC: + frame->srr1 &= ~PSL_SE; + sig = SIGTRAP; + ucode = TRAP_TRACE; + break; + +#ifdef __powerpc64__ + case EXC_ISE: + case EXC_DSE: + if (handle_user_slb_spill(&p->p_vmspace->vm_pmap, + (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){ + sig = SIGSEGV; + ucode = SEGV_MAPERR; + } + break; +#endif + case EXC_DSI: + case EXC_ISI: + sig = trap_pfault(frame, 1); + if (sig == SIGSEGV) + ucode = SEGV_MAPERR; + break; + + case EXC_SC: + syscall(frame); + break; + + case EXC_FPU: + KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU, + ("FPU already enabled for thread")); + enable_fpu(td); + break; + + case EXC_VEC: + KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC, + ("Altivec already enabled for thread")); + enable_vec(td); + break; + + case EXC_VSX: + KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX, + ("VSX already enabled for thread")); + if (!(td->td_pcb->pcb_flags & PCB_VEC)) + enable_vec(td); + if (!(td->td_pcb->pcb_flags & PCB_FPU)) + save_fpu(td); + td->td_pcb->pcb_flags |= PCB_VSX; + enable_fpu(td); + break; + + case EXC_VECAST_G4: + case EXC_VECAST_G5: + /* + * We get a VPU assist exception for IEEE mode + * vector operations on denormalized floats. + * Emulating this is a giant pain, so for now, + * just switch off IEEE mode and treat them as + * zero. + */ + + save_vec(td); + td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ; + enable_vec(td); + break; + + case EXC_ALI: + if (fix_unaligned(td, frame) != 0) { + sig = SIGBUS; + ucode = BUS_ADRALN; + } + else + frame->srr0 += 4; + break; + + case EXC_DEBUG: /* Single stepping */ + mtspr(SPR_DBSR, mfspr(SPR_DBSR)); + frame->srr1 &= ~PSL_DE; + frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC); + sig = SIGTRAP; + ucode = TRAP_TRACE; + break; + + case EXC_PGM: + /* Identify the trap reason */ +#ifdef AIM + if (frame->srr1 & EXC_PGM_TRAP) { +#else + if (frame->cpu.booke.esr & ESR_PTR) { +#endif +#ifdef KDTRACE_HOOKS + inst = fuword32((const void *)frame->srr0); + if (inst == 0x0FFFDDDD && + dtrace_pid_probe_ptr != NULL) { + struct reg regs; + fill_regs(td, ®s); + (*dtrace_pid_probe_ptr)(®s); + break; + } +#endif + sig = SIGTRAP; + ucode = TRAP_BRKPT; + } else { + sig = ppc_instr_emulate(frame, td->td_pcb); + if (sig == SIGILL) { + if (frame->srr1 & EXC_PGM_PRIV) + ucode = ILL_PRVOPC; + else if (frame->srr1 & EXC_PGM_ILLEGAL) + ucode = ILL_ILLOPC; + } else if (sig == SIGFPE) + ucode = FPE_FLTINV; /* Punt for now, invalid operation. */ + } + break; + + case EXC_MCHK: + /* + * Note that this may not be recoverable for the user + * process, depending on the type of machine check, + * but it at least prevents the kernel from dying. + */ + sig = SIGBUS; + ucode = BUS_OBJERR; + break; + + default: + trap_fatal(frame); + } + } else { + /* Kernel Mode Traps */ + + KASSERT(cold || td->td_ucred != NULL, + ("kernel trap doesn't have ucred")); + switch (type) { +#ifdef KDTRACE_HOOKS + case EXC_PGM: + if (frame->srr1 & EXC_PGM_TRAP) { + if (*(uint32_t *)frame->srr0 == EXC_DTRACE) { + if (dtrace_invop_jump_addr != NULL) { + dtrace_invop_jump_addr(frame); + return; + } + } + } + break; +#endif +#ifdef __powerpc64__ + case EXC_DSE: + if ((frame->dar & SEGMENT_MASK) == USER_ADDR) { + __asm __volatile ("slbmte %0, %1" :: + "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), + "r"(USER_SLB_SLBE)); + return; + } + break; +#endif + case EXC_DSI: + if (trap_pfault(frame, 0) == 0) + return; + break; + case EXC_MCHK: + if (handle_onfault(frame)) + return; + break; + default: + break; + } + trap_fatal(frame); + } + + if (sig != 0) { + if (p->p_sysent->sv_transtrap != NULL) + sig = (p->p_sysent->sv_transtrap)(sig, type); + ksiginfo_init_trap(&ksi); + ksi.ksi_signo = sig; + ksi.ksi_code = (int) ucode; /* XXX, not POSIX */ + /* ksi.ksi_addr = ? */ + ksi.ksi_trapno = type; + trapsignal(td, &ksi); + } + + userret(td, frame); +} + +static void +trap_fatal(struct trapframe *frame) +{ + + printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); +#ifdef KDB + if ((debugger_on_panic || kdb_active) && + kdb_trap(frame->exc, 0, frame)) + return; +#endif + panic("%s trap", trapname(frame->exc)); +} + +static void +printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) +{ + + printf("\n"); + printf("%s %s trap:\n", isfatal ? "fatal" : "handled", + user ? "user" : "kernel"); + printf("\n"); + printf(" exception = 0x%x (%s)\n", vector, trapname(vector)); + switch (vector) { + case EXC_DTMISS: + case EXC_DSE: + case EXC_DSI: + printf(" virtual address = 0x%" PRIxPTR "\n", frame->dar); + printf(" dsisr = 0x%" PRIxPTR "\n", + frame->cpu.aim.dsisr); + break; + case EXC_ITMISS: + case EXC_ISE: + case EXC_ISI: + printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0); + break; + } + printf(" srr0 = 0x%" PRIxPTR "\n", frame->srr0); + printf(" srr1 = 0x%" PRIxPTR "\n", frame->srr1); + printf(" lr = 0x%" PRIxPTR "\n", frame->lr); + printf(" curthread = %p\n", curthread); + if (curthread != NULL) + printf(" pid = %d, comm = %s\n", + curthread->td_proc->p_pid, curthread->td_name); + printf("\n"); +} + +/* + * Handles a fatal fault when we have onfault state to recover. Returns + * non-zero if there was onfault recovery state available. + */ +static int +handle_onfault(struct trapframe *frame) +{ + struct thread *td; + faultbuf *fb; + + td = curthread; + fb = td->td_pcb->pcb_onfault; + if (fb != NULL) { + frame->srr0 = (*fb)[FAULTBUF_LR]; + frame->fixreg[1] = (*fb)[FAULTBUF_R1]; + frame->fixreg[2] = (*fb)[FAULTBUF_R2]; + frame->fixreg[3] = 1; + frame->cr = (*fb)[FAULTBUF_CR]; + bcopy(&(*fb)[FAULTBUF_R13], &frame->fixreg[13], + 19 * sizeof(register_t)); + return (1); + } + return (0); +} + +int +cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) +{ + struct proc *p; + struct trapframe *frame; + caddr_t params; + size_t argsz; + int error, n, i; + + p = td->td_proc; + frame = td->td_frame; + + sa->code = frame->fixreg[0]; + params = (caddr_t)(frame->fixreg + FIRSTARG); + n = NARGREG; + + if (sa->code == SYS_syscall) { + /* + * code is first argument, + * followed by actual args. + */ + sa->code = *(register_t *) params; + params += sizeof(register_t); + n -= 1; + } else if (sa->code == SYS___syscall) { + /* + * Like syscall, but code is a quad, + * so as to maintain quad alignment + * for the rest of the args. + */ + if (SV_PROC_FLAG(p, SV_ILP32)) { + params += sizeof(register_t); + sa->code = *(register_t *) params; + params += sizeof(register_t); + n -= 2; + } else { + sa->code = *(register_t *) params; + params += sizeof(register_t); + n -= 1; + } + } + + if (p->p_sysent->sv_mask) + sa->code &= p->p_sysent->sv_mask; + if (sa->code >= p->p_sysent->sv_size) + sa->callp = &p->p_sysent->sv_table[0]; + else + sa->callp = &p->p_sysent->sv_table[sa->code]; + + sa->narg = sa->callp->sy_narg; + + if (SV_PROC_FLAG(p, SV_ILP32)) { + argsz = sizeof(uint32_t); + + for (i = 0; i < n; i++) + sa->args[i] = ((u_register_t *)(params))[i] & + 0xffffffff; + } else { + argsz = sizeof(uint64_t); + + for (i = 0; i < n; i++) + sa->args[i] = ((u_register_t *)(params))[i]; + } + + if (sa->narg > n) + error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n, + (sa->narg - n) * argsz); + else + error = 0; + +#ifdef __powerpc64__ + if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) { + /* Expand the size of arguments copied from the stack */ + + for (i = sa->narg; i >= n; i--) + sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n]; + } +#endif + + if (error == 0) { + td->td_retval[0] = 0; + td->td_retval[1] = frame->fixreg[FIRSTARG + 1]; + } + return (error); +} + +#include "../../kern/subr_syscall.c" + +void +syscall(struct trapframe *frame) +{ + struct thread *td; + struct syscall_args sa; + int error; + + td = curthread; + td->td_frame = frame; + +#ifdef __powerpc64__ + /* + * Speculatively restore last user SLB segment, which we know is + * invalid already, since we are likely to do copyin()/copyout(). + */ + __asm __volatile ("slbmte %0, %1; isync" :: + "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); +#endif + + error = syscallenter(td, &sa); + syscallret(td, error, &sa); +} + +#ifdef __powerpc64__ +/* Handle kernel SLB faults -- runs in real mode, all seat belts off */ +void +handle_kernel_slb_spill(int type, register_t dar, register_t srr0) +{ + struct slb *slbcache; + uint64_t slbe, slbv; + uint64_t esid, addr; + int i; + + addr = (type == EXC_ISE) ? srr0 : dar; + slbcache = PCPU_GET(slb); + esid = (uintptr_t)addr >> ADDR_SR_SHFT; + slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; + + /* See if the hardware flushed this somehow (can happen in LPARs) */ + for (i = 0; i < n_slbs; i++) + if (slbcache[i].slbe == (slbe | (uint64_t)i)) + return; + + /* Not in the map, needs to actually be added */ + slbv = kernel_va_to_slbv(addr); + if (slbcache[USER_SLB_SLOT].slbe == 0) { + for (i = 0; i < n_slbs; i++) { + if (i == USER_SLB_SLOT) + continue; + if (!(slbcache[i].slbe & SLBE_VALID)) + goto fillkernslb; + } + + if (i == n_slbs) + slbcache[USER_SLB_SLOT].slbe = 1; + } + + /* Sacrifice a random SLB entry that is not the user entry */ + i = mftb() % n_slbs; + if (i == USER_SLB_SLOT) + i = (i+1) % n_slbs; + +fillkernslb: + /* Write new entry */ + slbcache[i].slbv = slbv; + slbcache[i].slbe = slbe | (uint64_t)i; + + /* Trap handler will restore from cache on exit */ +} + +static int +handle_user_slb_spill(pmap_t pm, vm_offset_t addr) +{ + struct slb *user_entry; + uint64_t esid; + int i; + + esid = (uintptr_t)addr >> ADDR_SR_SHFT; + + PMAP_LOCK(pm); + user_entry = user_va_to_slb_entry(pm, addr); + + if (user_entry == NULL) { + /* allocate_vsid auto-spills it */ + (void)allocate_user_vsid(pm, esid, 0); + } else { + /* + * Check that another CPU has not already mapped this. + * XXX: Per-thread SLB caches would be better. + */ + for (i = 0; i < pm->pm_slb_len; i++) + if (pm->pm_slb[i] == user_entry) + break; + + if (i == pm->pm_slb_len) + slb_insert_user(pm, user_entry); + } + PMAP_UNLOCK(pm); + + return (0); +} +#endif + +static int +trap_pfault(struct trapframe *frame, int user) +{ + vm_offset_t eva, va; + struct thread *td; + struct proc *p; + vm_map_t map; + vm_prot_t ftype; + int rv; +#ifdef AIM + register_t user_sr; +#endif + + td = curthread; + p = td->td_proc; + if (frame->exc == EXC_ISI) { + eva = frame->srr0; + ftype = VM_PROT_EXECUTE; + if (frame->srr1 & SRR1_ISI_PFAULT) + ftype |= VM_PROT_READ; + } else { + eva = frame->dar; +#ifdef BOOKE + if (frame->cpu.booke.esr & ESR_ST) +#else + if (frame->cpu.aim.dsisr & DSISR_STORE) +#endif + ftype = VM_PROT_WRITE; + else + ftype = VM_PROT_READ; + } + + if (user) { + KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); + map = &p->p_vmspace->vm_map; + } else { +#ifdef BOOKE + if (eva < VM_MAXUSER_ADDRESS) { +#else + if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { +#endif + if (p->p_vmspace == NULL) + return (SIGSEGV); + + map = &p->p_vmspace->vm_map; + +#ifdef AIM + user_sr = td->td_pcb->pcb_cpu.aim.usr_segm; + eva &= ADDR_PIDX | ADDR_POFF; + eva |= user_sr << ADDR_SR_SHFT; +#endif + } else { + map = kernel_map; + } + } + va = trunc_page(eva); + + if (map != kernel_map) { + /* + * Keep swapout from messing with us during this + * critical time. + */ + PROC_LOCK(p); + ++p->p_lock; + PROC_UNLOCK(p); + + /* Fault in the user page: */ + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); + + PROC_LOCK(p); + --p->p_lock; + PROC_UNLOCK(p); + /* + * XXXDTRACE: add dtrace_doubletrap_func here? + */ + } else { + /* + * Don't have to worry about process locking or stacks in the + * kernel. + */ + rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); + } + + if (rv == KERN_SUCCESS) + return (0); + + if (!user && handle_onfault(frame)) + return (0); + + return (SIGSEGV); +} + +/* + * For now, this only deals with the particular unaligned access case + * that gcc tends to generate. Eventually it should handle all of the + * possibilities that can happen on a 32-bit PowerPC in big-endian mode. + */ + +static int +fix_unaligned(struct thread *td, struct trapframe *frame) +{ + struct thread *fputhread; + int indicator, reg; + double *fpr; + + indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr); + + switch (indicator) { + case EXC_ALI_LFD: + case EXC_ALI_STFD: + reg = EXC_ALI_RST(frame->cpu.aim.dsisr); + fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr; + fputhread = PCPU_GET(fputhread); + + /* Juggle the FPU to ensure that we've initialized + * the FPRs, and that their current state is in + * the PCB. + */ + if (fputhread != td) { + if (fputhread) + save_fpu(fputhread); + enable_fpu(td); + } + save_fpu(td); + + if (indicator == EXC_ALI_LFD) { + if (copyin((void *)frame->dar, fpr, + sizeof(double)) != 0) + return (-1); + enable_fpu(td); + } else { + if (copyout(fpr, (void *)frame->dar, + sizeof(double)) != 0) + return (-1); + } + return (0); + break; + } + + return (-1); +} + +#ifdef KDB +int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ + +int +db_trap_glue(struct trapframe *frame) +{ + if (!(frame->srr1 & PSL_PR) + && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC + || (frame->exc == EXC_PGM + && (frame->srr1 & 0x20000)) + || frame->exc == EXC_BPT + || frame->exc == EXC_DSI)) { + int type = frame->exc; + + /* Ignore DTrace traps. */ + if (*(uint32_t *)frame->srr0 == EXC_DTRACE) + return (0); + if (type == EXC_PGM && (frame->srr1 & 0x20000)) { + type = T_BREAKPOINT; + } + return (kdb_trap(type, 0, frame)); + } + + return (0); +} +#endif Property changes on: head/sys/powerpc/powerpc/trap.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property