Index: head/sys/powerpc/booke/booke_machdep.c =================================================================== --- head/sys/powerpc/booke/booke_machdep.c (revision 339513) +++ head/sys/powerpc/booke/booke_machdep.c (revision 339514) @@ -1,446 +1,454 @@ /*- * Copyright (C) 2006-2012 Semihalf * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif extern unsigned char _etext[]; extern unsigned char _edata[]; extern unsigned char __bss_start[]; extern unsigned char __sbss_start[]; extern unsigned char __sbss_end[]; extern unsigned char _end[]; extern vm_offset_t __endkernel; /* * Bootinfo is passed to us by legacy loaders. Save the address of the * structure to handle backward compatibility. */ uint32_t *bootinfo; void print_kernel_section_addr(void); void print_kenv(void); uintptr_t booke_init(u_long, u_long); void ivor_setup(void); extern void *interrupt_vector_base; extern void *int_critical_input; extern void *int_machine_check; extern void *int_data_storage; extern void *int_instr_storage; extern void *int_external_input; extern void *int_alignment; extern void *int_fpu; extern void *int_program; extern void *int_syscall; extern void *int_decrementer; extern void *int_fixed_interval_timer; extern void *int_watchdog; extern void *int_data_tlb_error; extern void *int_inst_tlb_error; extern void *int_debug; extern void *int_debug_ed; extern void *int_vec; extern void *int_vecast; +#ifdef __SPE__ +extern void *int_spe_fpdata; +extern void *int_spe_fpround; +#endif #ifdef HWPMC_HOOKS extern void *int_performance_counter; #endif #define SET_TRAP(ivor, handler) \ KASSERT(((uintptr_t)(&handler) & ~0xffffUL) == \ ((uintptr_t)(&interrupt_vector_base) & ~0xffffUL), \ ("Handler " #handler " too far from interrupt vector base")); \ mtspr(ivor, (uintptr_t)(&handler) & 0xffffUL); uintptr_t powerpc_init(vm_offset_t fdt, vm_offset_t, vm_offset_t, void *mdp, uint32_t mdp_cookie); void booke_cpu_init(void); void booke_cpu_init(void) { cpu_features |= PPC_FEATURE_BOOKE; psl_kernset = PSL_CE | PSL_ME | PSL_EE; #ifdef __powerpc64__ psl_kernset |= PSL_CM; #endif psl_userset = psl_kernset | PSL_PR; #ifdef __powerpc64__ psl_userset32 = psl_userset & ~PSL_CM; #endif psl_userstatic = ~(PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); pmap_mmu_install(MMU_TYPE_BOOKE, BUS_PROBE_GENERIC); } void ivor_setup(void) { mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & ~0xffffUL); SET_TRAP(SPR_IVOR0, int_critical_input); SET_TRAP(SPR_IVOR1, int_machine_check); SET_TRAP(SPR_IVOR2, int_data_storage); SET_TRAP(SPR_IVOR3, int_instr_storage); SET_TRAP(SPR_IVOR4, int_external_input); SET_TRAP(SPR_IVOR5, int_alignment); SET_TRAP(SPR_IVOR6, int_program); SET_TRAP(SPR_IVOR8, int_syscall); SET_TRAP(SPR_IVOR10, int_decrementer); SET_TRAP(SPR_IVOR11, int_fixed_interval_timer); SET_TRAP(SPR_IVOR12, int_watchdog); SET_TRAP(SPR_IVOR13, int_data_tlb_error); SET_TRAP(SPR_IVOR14, int_inst_tlb_error); SET_TRAP(SPR_IVOR15, int_debug); #ifdef HWPMC_HOOKS SET_TRAP(SPR_IVOR35, int_performance_counter); #endif switch ((mfpvr() >> 16) & 0xffff) { case FSL_E6500: SET_TRAP(SPR_IVOR32, int_vec); SET_TRAP(SPR_IVOR33, int_vecast); /* FALLTHROUGH */ case FSL_E500mc: case FSL_E5500: SET_TRAP(SPR_IVOR7, int_fpu); SET_TRAP(SPR_IVOR15, int_debug_ed); break; case FSL_E500v1: case FSL_E500v2: SET_TRAP(SPR_IVOR32, int_vec); +#ifdef __SPE__ + SET_TRAP(SPR_IVOR33, int_spe_fpdata); + SET_TRAP(SPR_IVOR34, int_spe_fpround); +#endif break; } #ifdef __powerpc64__ /* Set 64-bit interrupt mode. */ mtspr(SPR_EPCR, mfspr(SPR_EPCR) | EPCR_ICM); #endif } static int booke_check_for_fdt(uint32_t arg1, vm_offset_t *dtbp) { void *ptr; int fdt_size; if (arg1 % 8 != 0) return (-1); ptr = (void *)pmap_early_io_map(arg1, PAGE_SIZE); if (fdt_check_header(ptr) != 0) return (-1); /* * Read FDT total size from the header of FDT. * This for sure hits within first page which is * already mapped. */ fdt_size = fdt_totalsize((void *)ptr); /* * Ok, arg1 points to FDT, so we need to map it in. * First, unmap this page and then map FDT again with full size */ pmap_early_io_unmap((vm_offset_t)ptr, PAGE_SIZE); ptr = (void *)pmap_early_io_map(arg1, fdt_size); *dtbp = (vm_offset_t)ptr; return (0); } uintptr_t booke_init(u_long arg1, u_long arg2) { uintptr_t ret; void *mdp; vm_offset_t dtbp, end; end = (uintptr_t)_end; dtbp = (vm_offset_t)NULL; /* Set up TLB initially */ bootinfo = NULL; bzero(__sbss_start, __sbss_end - __sbss_start); bzero(__bss_start, _end - __bss_start); tlb1_init(); /* * Handle the various ways we can get loaded and started: * - FreeBSD's loader passes the pointer to the metadata * in arg1, with arg2 undefined. arg1 has a value that's * relative to the kernel's link address (i.e. larger * than 0xc0000000). * - Juniper's loader passes the metadata pointer in arg2 * and sets arg1 to zero. This is to signal that the * loader maps the kernel and starts it at its link * address (unlike the FreeBSD loader). * - U-Boot passes the standard argc and argv parameters * in arg1 and arg2 (resp). arg1 is between 1 and some * relatively small number, such as 64K. arg2 is the * physical address of the argv vector. * - ePAPR loaders pass an FDT blob in r3 (arg1) and the magic hex * string 0x45504150 ('EPAP') in r6 (which has been lost by now). * r4 (arg2) is supposed to be set to zero, but is not always. */ if (arg1 == 0) /* Juniper loader */ mdp = (void *)arg2; else if (booke_check_for_fdt(arg1, &dtbp) == 0) { /* ePAPR */ end = roundup(end, 8); memmove((void *)end, (void *)dtbp, fdt_totalsize((void *)dtbp)); dtbp = end; end += fdt_totalsize((void *)dtbp); __endkernel = end; mdp = NULL; } else if (arg1 > (uintptr_t)btext) /* FreeBSD loader */ mdp = (void *)arg1; else /* U-Boot */ mdp = NULL; /* Default to 32 byte cache line size. */ switch ((mfpvr()) >> 16) { case FSL_E500mc: case FSL_E5500: case FSL_E6500: cacheline_size = 64; break; } /* * Last element is a magic cookie that indicates that the metadata * pointer is meaningful. */ ret = powerpc_init(dtbp, 0, 0, mdp, (mdp == NULL) ? 0 : 0xfb5d104d); /* Enable caches */ booke_enable_l1_cache(); booke_enable_l2_cache(); booke_enable_bpred(); return (ret); } #define RES_GRANULE cacheline_size extern uintptr_t tlb0_miss_locks[]; /* Initialise a struct pcpu. */ void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) { pcpu->pc_booke.tid_next = TID_MIN; #ifdef SMP uintptr_t *ptr; int words_per_gran = RES_GRANULE / sizeof(uintptr_t); ptr = &tlb0_miss_locks[cpuid * words_per_gran]; pcpu->pc_booke.tlb_lock = ptr; *ptr = TLB_UNLOCKED; *(ptr + 1) = 0; /* recurse counter */ #endif } /* Shutdown the CPU as much as possible. */ void cpu_halt(void) { mtmsr(mfmsr() & ~(PSL_CE | PSL_EE | PSL_ME | PSL_DE)); while (1) ; } int ptrace_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 |= PSL_DE; tf->cpu.booke.dbcr0 |= (DBCR0_IDM | DBCR0_IC); return (0); } int ptrace_clear_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 &= ~PSL_DE; tf->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); return (0); } void kdb_cpu_clear_singlestep(void) { register_t r; r = mfspr(SPR_DBCR0); mtspr(SPR_DBCR0, r & ~DBCR0_IC); kdb_frame->srr1 &= ~PSL_DE; } void kdb_cpu_set_singlestep(void) { register_t r; r = mfspr(SPR_DBCR0); mtspr(SPR_DBCR0, r | DBCR0_IC | DBCR0_IDM); kdb_frame->srr1 |= PSL_DE; } Index: head/sys/powerpc/booke/spe.c =================================================================== --- head/sys/powerpc/booke/spe.c (revision 339513) +++ head/sys/powerpc/booke/spe.c (revision 339514) @@ -1,183 +1,634 @@ /*- * Copyright (C) 1996 Wolfgang Solfrank. * Copyright (C) 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: fpu.c,v 1.5 2001/07/22 11:29:46 wiz Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include +#include #include #include +#include +#include +#include + +void spe_handle_fpdata(struct trapframe *); +void spe_handle_fpround(struct trapframe *); +static int spe_emu_instr(uint32_t, struct fpemu *, struct fpn **, uint32_t *); + static void save_vec_int(struct thread *td) { int msr; struct pcb *pcb; pcb = td->td_pcb; /* * Temporarily re-enable the vector unit during the save */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Save the vector registers and SPEFSCR to the PCB */ #define EVSTDW(n) __asm ("evstdw %1,0(%0)" \ :: "b"(pcb->pcb_vec.vr[n]), "n"(n)); EVSTDW(0); EVSTDW(1); EVSTDW(2); EVSTDW(3); EVSTDW(4); EVSTDW(5); EVSTDW(6); EVSTDW(7); EVSTDW(8); EVSTDW(9); EVSTDW(10); EVSTDW(11); EVSTDW(12); EVSTDW(13); EVSTDW(14); EVSTDW(15); EVSTDW(16); EVSTDW(17); EVSTDW(18); EVSTDW(19); EVSTDW(20); EVSTDW(21); EVSTDW(22); EVSTDW(23); EVSTDW(24); EVSTDW(25); EVSTDW(26); EVSTDW(27); EVSTDW(28); EVSTDW(29); EVSTDW(30); EVSTDW(31); #undef EVSTDW __asm ( "evxor 0,0,0\n" "evaddumiaaw 0,0\n" - "evstdd 0,0(%0)" :: "b"(&pcb->pcb_vec.vr[17][0])); + "evstdd 0,0(%0)" :: "b"(&pcb->pcb_vec.spare[0])); pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR); /* * Disable vector unit again */ isync(); mtmsr(msr); } void enable_vec(struct thread *td) { int msr; struct pcb *pcb; struct trapframe *tf; pcb = td->td_pcb; tf = trapframe(td); /* * Save the thread's SPE CPU number, and set the CPU's current * vector thread */ td->td_pcb->pcb_veccpu = PCPU_GET(cpuid); PCPU_SET(vecthread, td); /* * Enable the vector unit for when the thread returns from the * exception. If this is the first time the unit has been used by * the thread, initialise the vector registers and VSCR to 0, and * set the flag to indicate that the vector unit is in use. */ tf->srr1 |= PSL_VEC; if (!(pcb->pcb_flags & PCB_VEC)) { memset(&pcb->pcb_vec, 0, sizeof pcb->pcb_vec); pcb->pcb_flags |= PCB_VEC; + pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR); } /* * Temporarily enable the vector unit so the registers * can be restored. */ msr = mfmsr(); mtmsr(msr | PSL_VEC); - isync(); /* Restore SPEFSCR and ACC. Use %r0 as the scratch for ACC. */ mtspr(SPR_SPEFSCR, pcb->pcb_vec.vscr); __asm __volatile("evldd 0, 0(%0); evmra 0,0\n" - :: "b"(&pcb->pcb_vec.vr[17][0])); + :: "b"(&pcb->pcb_vec.spare[0])); /* * The lower half of each register will be restored on trap return. Use * %r0 as a scratch register, and restore it last. */ #define EVLDW(n) __asm __volatile("evldw 0, 0(%0); evmergehilo "#n",0,"#n \ :: "b"(&pcb->pcb_vec.vr[n])); EVLDW(1); EVLDW(2); EVLDW(3); EVLDW(4); EVLDW(5); EVLDW(6); EVLDW(7); EVLDW(8); EVLDW(9); EVLDW(10); EVLDW(11); EVLDW(12); EVLDW(13); EVLDW(14); EVLDW(15); EVLDW(16); EVLDW(17); EVLDW(18); EVLDW(19); EVLDW(20); EVLDW(21); EVLDW(22); EVLDW(23); EVLDW(24); EVLDW(25); EVLDW(26); EVLDW(27); EVLDW(28); EVLDW(29); EVLDW(30); EVLDW(31); EVLDW(0); #undef EVLDW isync(); mtmsr(msr); } void save_vec(struct thread *td) { struct pcb *pcb; pcb = td->td_pcb; save_vec_int(td); /* * Clear the current vec thread and pcb's CPU id * XXX should this be left clear to allow lazy save/restore ? */ pcb->pcb_veccpu = INT_MAX; PCPU_SET(vecthread, NULL); } /* * Save SPE state without dropping ownership. This will only save state if * the current vector-thread is `td'. */ void save_vec_nodrop(struct thread *td) { struct thread *vtd; vtd = PCPU_GET(vecthread); if (td != vtd) { return; } save_vec_int(td); +} + + +#define SPE_INST_MASK 0x31f +#define EADD 0x200 +#define ESUB 0x201 +#define EABS 0x204 +#define ENABS 0x205 +#define ENEG 0x206 +#define EMUL 0x208 +#define EDIV 0x209 +#define ECMPGT 0x20c +#define ECMPLT 0x20d +#define ECMPEQ 0x20e +#define ECFUI 0x210 +#define ECFSI 0x211 +#define ECTUI 0x214 +#define ECTSI 0x215 +#define ECTUF 0x216 +#define ECTSF 0x217 +#define ECTUIZ 0x218 +#define ECTSIZ 0x21a + +#define SPE 0x4 +#define SPFP 0x6 +#define DPFP 0x7 + +#define SPE_OPC 4 +#define OPC_SHIFT 26 + +#define EVFSADD 0x280 +#define EVFSSUB 0x281 +#define EVFSABS 0x284 +#define EVFSNABS 0x285 +#define EVFSNEG 0x286 +#define EVFSMUL 0x288 +#define EVFSDIV 0x289 +#define EVFSCMPGT 0x28c +#define EVFSCMPLT 0x28d +#define EVFSCMPEQ 0x28e +#define EVFSCFUI 0x290 +#define EVFSCFSI 0x291 +#define EVFSCTUI 0x294 +#define EVFSCTSI 0x295 +#define EVFSCTUF 0x296 +#define EVFSCTSF 0x297 +#define EVFSCTUIZ 0x298 +#define EVFSCTSIZ 0x29a + +#define EFSADD 0x2c0 +#define EFSSUB 0x2c1 +#define EFSABS 0x2c4 +#define EFSNABS 0x2c5 +#define EFSNEG 0x2c6 +#define EFSMUL 0x2c8 +#define EFSDIV 0x2c9 +#define EFSCMPGT 0x2cc +#define EFSCMPLT 0x2cd +#define EFSCMPEQ 0x2ce +#define EFSCFD 0x2cf +#define EFSCFUI 0x2d0 +#define EFSCFSI 0x2d1 +#define EFSCTUI 0x2d4 +#define EFSCTSI 0x2d5 +#define EFSCTUF 0x2d6 +#define EFSCTSF 0x2d7 +#define EFSCTUIZ 0x2d8 +#define EFSCTSIZ 0x2da + +#define EFDADD 0x2e0 +#define EFDSUB 0x2e1 +#define EFDABS 0x2e4 +#define EFDNABS 0x2e5 +#define EFDNEG 0x2e6 +#define EFDMUL 0x2e8 +#define EFDDIV 0x2e9 +#define EFDCMPGT 0x2ec +#define EFDCMPLT 0x2ed +#define EFDCMPEQ 0x2ee +#define EFDCFS 0x2ef +#define EFDCFUI 0x2f0 +#define EFDCFSI 0x2f1 +#define EFDCTUI 0x2f4 +#define EFDCTSI 0x2f5 +#define EFDCTUF 0x2f6 +#define EFDCTSF 0x2f7 +#define EFDCTUIZ 0x2f8 +#define EFDCTSIZ 0x2fa + +enum { + NONE, + SINGLE, + DOUBLE, + VECTOR, +}; + +static uint32_t fpscr_to_spefscr(uint32_t fpscr) +{ + uint32_t spefscr; + + spefscr = 0; + + if (fpscr & FPSCR_VX) + spefscr |= SPEFSCR_FINV; + if (fpscr & FPSCR_OX) + spefscr |= SPEFSCR_FOVF; + if (fpscr & FPSCR_UX) + spefscr |= SPEFSCR_FUNF; + if (fpscr & FPSCR_ZX) + spefscr |= SPEFSCR_FDBZ; + if (fpscr & FPSCR_XX) + spefscr |= SPEFSCR_FX; + + return (spefscr); +} + +/* Sign is 0 for unsigned, 1 for signed. */ +static int +spe_to_int(struct fpemu *fpemu, struct fpn *fpn, uint32_t *val, int sign) +{ + uint32_t res[2]; + + res[0] = fpu_ftox(fpemu, fpn, res); + if (res[0] != UINT_MAX && res[0] != 0) + fpemu->fe_cx |= FPSCR_OX; + else if (sign == 0 && res[0] != 0) + fpemu->fe_cx |= FPSCR_UX; + else + *val = res[1]; + + return (0); +} + +/* Masked instruction */ +/* + * For compare instructions, returns 1 if success, 0 if not. For all others, + * returns -1, or -2 if no result needs recorded. + */ +static int +spe_emu_instr(uint32_t instr, struct fpemu *fpemu, + struct fpn **result, uint32_t *iresult) +{ + switch (instr & SPE_INST_MASK) { + case EABS: + case ENABS: + case ENEG: + /* Taken care of elsewhere. */ + break; + case ECTUIZ: + fpemu->fe_cx &= ~FPSCR_RN; + fpemu->fe_cx |= FP_RZ; + case ECTUI: + spe_to_int(fpemu, &fpemu->fe_f2, iresult, 0); + return (-1); + case ECTSIZ: + fpemu->fe_cx &= ~FPSCR_RN; + fpemu->fe_cx |= FP_RZ; + case ECTSI: + spe_to_int(fpemu, &fpemu->fe_f2, iresult, 1); + return (-1); + case EADD: + *result = fpu_add(fpemu); + break; + case ESUB: + *result = fpu_sub(fpemu); + break; + case EMUL: + *result = fpu_mul(fpemu); + break; + case EDIV: + *result = fpu_div(fpemu); + break; + case ECMPGT: + fpu_compare(fpemu, 0); + if (fpemu->fe_cx & FPSCR_FG) + return (1); + return (0); + case ECMPLT: + fpu_compare(fpemu, 0); + if (fpemu->fe_cx & FPSCR_FL) + return (1); + return (0); + case ECMPEQ: + fpu_compare(fpemu, 0); + if (fpemu->fe_cx & FPSCR_FE) + return (1); + return (0); + default: + printf("Unknown instruction %x\n", instr); + } + + return (-1); +} + +static int +spe_explode(struct fpemu *fe, struct fpn *fp, uint32_t type, + uint32_t hi, uint32_t lo) +{ + uint32_t s; + + fp->fp_sign = hi >> 31; + fp->fp_sticky = 0; + switch (type) { + case SINGLE: + s = fpu_stof(fp, hi); + break; + + case DOUBLE: + s = fpu_dtof(fp, hi, lo); + break; + } + + if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) { + /* + * Input is a signalling NaN. All operations that return + * an input NaN operand put it through a ``NaN conversion'', + * which basically just means ``turn on the quiet bit''. + * We do this here so that all NaNs internally look quiet + * (we can tell signalling ones by their class). + */ + fp->fp_mant[0] |= FP_QUIETBIT; + fe->fe_cx = FPSCR_VXSNAN; /* assert invalid operand */ + s = FPC_SNAN; + } + fp->fp_class = s; + + return (0); +} + +void +spe_handle_fpdata(struct trapframe *frame) +{ + struct fpemu fpemu; + struct fpn *result; + uint32_t instr, instr_sec_op; + uint32_t cr_shift, ra, rb, rd, src; + uint32_t high, low, res; /* For vector operations. */ + uint32_t spefscr = 0; + uint32_t ftod_res[2]; + int width; /* Single, Double, Vector, Integer */ + int err; + + err = fueword32((void *)frame->srr0, &instr); + + if (err != 0) + return; + /* Fault. */; + + if ((instr >> OPC_SHIFT) != SPE_OPC) + return; + + /* + * 'cr' field is the upper 3 bits of rd. Magically, since a) rd is 5 + * bits, b) each 'cr' field is 4 bits, and c) Only the 'GT' bit is + * modified for most compare operations, the full value of rd can be + * used as a shift value. + */ + rd = (instr >> 21) & 0x1f; + ra = (instr >> 16) & 0x1f; + rb = (instr >> 11) & 0x1f; + src = (instr >> 5) & 0x7; + cr_shift = 28 - (rd & 0x1f); + + instr_sec_op = (instr & 0x7ff); + + memset(&fpemu, 0, sizeof(fpemu)); + + width = NONE; + switch (src) { + case SPE: + save_vec_nodrop(curthread); + switch (instr_sec_op) { + case EVFSABS: + curthread->td_pcb->pcb_vec.vr[rd][0] = + curthread->td_pcb->pcb_vec.vr[ra][0] & ~(1U << 31); + frame->fixreg[rd] = frame->fixreg[ra] & ~(1U << 31); + break; + case EVFSNABS: + curthread->td_pcb->pcb_vec.vr[rd][0] = + curthread->td_pcb->pcb_vec.vr[ra][0] | (1U << 31); + frame->fixreg[rd] = frame->fixreg[ra] | (1U << 31); + break; + case EVFSNEG: + curthread->td_pcb->pcb_vec.vr[rd][0] = + curthread->td_pcb->pcb_vec.vr[ra][0] ^ (1U << 31); + frame->fixreg[rd] = frame->fixreg[ra] ^ (1U << 31); + break; + default: + /* High word */ + spe_explode(&fpemu, &fpemu.fe_f1, SINGLE, + curthread->td_pcb->pcb_vec.vr[ra][0], 0); + spe_explode(&fpemu, &fpemu.fe_f2, SINGLE, + curthread->td_pcb->pcb_vec.vr[rb][0], 0); + high = spe_emu_instr(instr_sec_op, &fpemu, &result, + &curthread->td_pcb->pcb_vec.vr[rd][0]); + + spefscr = fpscr_to_spefscr(fpemu.fe_cx) << 16; + /* Clear the fpemu to start over on the lower bits. */ + memset(&fpemu, 0, sizeof(fpemu)); + + /* Now low word */ + spe_explode(&fpemu, &fpemu.fe_f1, SINGLE, + frame->fixreg[ra], 0); + spe_explode(&fpemu, &fpemu.fe_f2, SINGLE, + frame->fixreg[rb], 0); + spefscr |= fpscr_to_spefscr(fpemu.fe_cx); + low = spe_emu_instr(instr_sec_op, &fpemu, &result, + &frame->fixreg[rd]); + if (instr_sec_op == EVFSCMPEQ || + instr_sec_op == EVFSCMPGT || + instr_sec_op == EVFSCMPLT) { + res = (high << 3) | (low << 2) | + ((high | low) << 1) | (high & low); + width = NONE; + } else + width = VECTOR; + break; + } + enable_vec(curthread); + goto end; + + case SPFP: + switch (instr_sec_op) { + case EFSABS: + frame->fixreg[rd] = frame->fixreg[ra] & ~(1U << 31); + break; + case EFSNABS: + frame->fixreg[rd] = frame->fixreg[ra] | (1U << 31); + break; + case EFSNEG: + frame->fixreg[rd] = frame->fixreg[ra] ^ (1U << 31); + break; + case EFSCFD: + spe_explode(&fpemu, &fpemu.fe_f3, DOUBLE, + curthread->td_pcb->pcb_vec.vr[rb][0], + frame->fixreg[rb]); + result = &fpemu.fe_f3; + width = SINGLE; + break; + default: + spe_explode(&fpemu, &fpemu.fe_f1, SINGLE, + frame->fixreg[ra], 0); + spe_explode(&fpemu, &fpemu.fe_f2, SINGLE, + frame->fixreg[rb], 0); + width = SINGLE; + } + break; + case DPFP: + save_vec_nodrop(curthread); + switch (instr_sec_op) { + case EFDABS: + curthread->td_pcb->pcb_vec.vr[rd][0] = + curthread->td_pcb->pcb_vec.vr[ra][0] & ~(1U << 31); + break; + case EFDNABS: + curthread->td_pcb->pcb_vec.vr[rd][0] = + curthread->td_pcb->pcb_vec.vr[ra][0] | (1U << 31); + break; + case EFDNEG: + curthread->td_pcb->pcb_vec.vr[rd][0] = + curthread->td_pcb->pcb_vec.vr[ra][0] ^ (1U << 31); + break; + case EFDCFS: + spe_explode(&fpemu, &fpemu.fe_f3, SINGLE, + frame->fixreg[rb], 0); + result = &fpemu.fe_f3; + width = DOUBLE; + break; + default: + spe_explode(&fpemu, &fpemu.fe_f1, DOUBLE, + curthread->td_pcb->pcb_vec.vr[ra][0], + frame->fixreg[ra]); + spe_explode(&fpemu, &fpemu.fe_f2, DOUBLE, + curthread->td_pcb->pcb_vec.vr[rb][0], + frame->fixreg[rb]); + width = DOUBLE; + } + break; + } + switch (instr_sec_op) { + case EFDCFS: + case EFSCFD: + /* Already handled. */ + break; + default: + res = spe_emu_instr(instr_sec_op, &fpemu, &result, + &frame->fixreg[rd]); + if (res != -1) + res <<= 2; + break; + } + + switch (instr_sec_op & SPE_INST_MASK) { + case ECMPEQ: + case ECMPGT: + case ECMPLT: + frame->cr &= ~(0xf << cr_shift); + frame->cr |= (res << cr_shift); + break; + case ECTUI: + case ECTUIZ: + case ECTSI: + case ECTSIZ: + break; + default: + switch (width) { + case NONE: + case VECTOR: + break; + case SINGLE: + frame->fixreg[rd] = fpu_ftos(&fpemu, result); + break; + case DOUBLE: + curthread->td_pcb->pcb_vec.vr[rd][0] = + fpu_ftod(&fpemu, result, ftod_res); + frame->fixreg[rd] = ftod_res[1]; + enable_vec(curthread); + break; + default: + panic("Unknown storage width %d", width); + break; + } + } + +end: + spefscr |= (mfspr(SPR_SPEFSCR) & ~SPEFSCR_FINVS); + mtspr(SPR_SPEFSCR, spefscr); + frame->srr0 += 4; + + return; +} + +void +spe_handle_fpround(struct trapframe *frame) +{ + + /* + * Punt fpround exceptions for now. This leaves the truncated result in + * the register. We'll deal with overflow/underflow later. + */ + return; } Index: head/sys/powerpc/booke/trap_subr.S =================================================================== --- head/sys/powerpc/booke/trap_subr.S (revision 339513) +++ head/sys/powerpc/booke/trap_subr.S (revision 339514) @@ -1,1114 +1,1136 @@ /*- * Copyright (C) 2006-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * Copyright (C) 2006 Juniper Networks, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */ /* * NOTICE: This is not a standalone file. to use it, #include it in * your port's locore.S, like so: * * #include */ /* * SPRG usage notes * * SPRG0 - pcpu pointer * SPRG1 - all interrupts except TLB miss, critical, machine check * SPRG2 - critical * SPRG3 - machine check * SPRG4-6 - scratch * */ /* Get the per-CPU data structure */ #define GET_CPUINFO(r) mfsprg0 r #define RES_GRANULE 64 #define RES_LOCK 0 /* offset to the 'lock' word */ #ifdef __powerpc64__ #define RES_RECURSE 8 /* offset to the 'recurse' word */ #else #define RES_RECURSE 4 /* offset to the 'recurse' word */ #endif /* * Standard interrupt prolog * * sprg_sp - SPRG{1-3} reg used to temporarily store the SP * savearea - temp save area (pc_{tempsave, disisave, critsave, mchksave}) * isrr0-1 - save restore registers with CPU state at interrupt time (may be * SRR0-1, CSRR0-1, MCSRR0-1 * * 1. saves in the given savearea: * - R30-31 * - DEAR, ESR * - xSRR0-1 * * 2. saves CR -> R30 * * 3. switches to kstack if needed * * 4. notes: * - R31 can be used as scratch register until a new frame is layed on * the stack with FRAME_SETUP * * - potential TLB miss: NO. Saveareas are always acessible via TLB1 * permanent entries, and within this prolog we do not dereference any * locations potentially not in the TLB */ #define STANDARD_PROLOG(sprg_sp, savearea, isrr0, isrr1) \ mtspr sprg_sp, %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \ mfdear %r30; \ mfesr %r31; \ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \ mfspr %r30, isrr0; \ mfspr %r31, isrr1; /* MSR at interrupt time */ \ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \ isync; \ mfspr %r1, sprg_sp; /* Restore SP */ \ mfcr %r30; /* Save CR */ \ /* switch to per-thread kstack if intr taken in user mode */ \ mtcr %r31; /* MSR at interrupt time */ \ bf 17, 1f; \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \ 1: #define STANDARD_CRIT_PROLOG(sprg_sp, savearea, isrr0, isrr1) \ mtspr sprg_sp, %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \ mfdear %r30; \ mfesr %r31; \ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \ mfspr %r30, isrr0; \ mfspr %r31, isrr1; /* MSR at interrupt time */ \ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \ mfspr %r30, SPR_SRR0; \ mfspr %r31, SPR_SRR1; /* MSR at interrupt time */ \ STORE %r30, (savearea+BOOKE_CRITSAVE_SRR0)(%r1); \ STORE %r31, (savearea+BOOKE_CRITSAVE_SRR1)(%r1); \ isync; \ mfspr %r1, sprg_sp; /* Restore SP */ \ mfcr %r30; /* Save CR */ \ /* switch to per-thread kstack if intr taken in user mode */ \ mtcr %r31; /* MSR at interrupt time */ \ bf 17, 1f; \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \ 1: /* * FRAME_SETUP assumes: * SPRG{1-3} SP at the time interrupt occured * savearea r30-r31, DEAR, ESR, xSRR0-1 * r30 CR * r31 scratch * r1 kernel stack * * sprg_sp - SPRG reg containing SP at the time interrupt occured * savearea - temp save * exc - exception number (EXC_xxx) * * 1. sets a new frame * 2. saves in the frame: * - R0, R1 (SP at the time of interrupt), R2, LR, CR * - R3-31 (R30-31 first restored from savearea) * - XER, CTR, DEAR, ESR (from savearea), xSRR0-1 * * Notes: * - potential TLB miss: YES, since we make dereferences to kstack, which * can happen not covered (we can have up to two DTLB misses if fortunate * enough i.e. when kstack crosses page boundary and both pages are * untranslated) */ #ifdef __powerpc64__ #define SAVE_REGS(r) \ std %r3, FRAME_3+CALLSIZE(r); \ std %r4, FRAME_4+CALLSIZE(r); \ std %r5, FRAME_5+CALLSIZE(r); \ std %r6, FRAME_6+CALLSIZE(r); \ std %r7, FRAME_7+CALLSIZE(r); \ std %r8, FRAME_8+CALLSIZE(r); \ std %r9, FRAME_9+CALLSIZE(r); \ std %r10, FRAME_10+CALLSIZE(r); \ std %r11, FRAME_11+CALLSIZE(r); \ std %r12, FRAME_12+CALLSIZE(r); \ std %r13, FRAME_13+CALLSIZE(r); \ std %r14, FRAME_14+CALLSIZE(r); \ std %r15, FRAME_15+CALLSIZE(r); \ std %r16, FRAME_16+CALLSIZE(r); \ std %r17, FRAME_17+CALLSIZE(r); \ std %r18, FRAME_18+CALLSIZE(r); \ std %r19, FRAME_19+CALLSIZE(r); \ std %r20, FRAME_20+CALLSIZE(r); \ std %r21, FRAME_21+CALLSIZE(r); \ std %r22, FRAME_22+CALLSIZE(r); \ std %r23, FRAME_23+CALLSIZE(r); \ std %r24, FRAME_24+CALLSIZE(r); \ std %r25, FRAME_25+CALLSIZE(r); \ std %r26, FRAME_26+CALLSIZE(r); \ std %r27, FRAME_27+CALLSIZE(r); \ std %r28, FRAME_28+CALLSIZE(r); \ std %r29, FRAME_29+CALLSIZE(r); \ std %r30, FRAME_30+CALLSIZE(r); \ std %r31, FRAME_31+CALLSIZE(r) #define LD_REGS(r) \ ld %r3, FRAME_3+CALLSIZE(r); \ ld %r4, FRAME_4+CALLSIZE(r); \ ld %r5, FRAME_5+CALLSIZE(r); \ ld %r6, FRAME_6+CALLSIZE(r); \ ld %r7, FRAME_7+CALLSIZE(r); \ ld %r8, FRAME_8+CALLSIZE(r); \ ld %r9, FRAME_9+CALLSIZE(r); \ ld %r10, FRAME_10+CALLSIZE(r); \ ld %r11, FRAME_11+CALLSIZE(r); \ ld %r12, FRAME_12+CALLSIZE(r); \ ld %r13, FRAME_13+CALLSIZE(r); \ ld %r14, FRAME_14+CALLSIZE(r); \ ld %r15, FRAME_15+CALLSIZE(r); \ ld %r16, FRAME_16+CALLSIZE(r); \ ld %r17, FRAME_17+CALLSIZE(r); \ ld %r18, FRAME_18+CALLSIZE(r); \ ld %r19, FRAME_19+CALLSIZE(r); \ ld %r20, FRAME_20+CALLSIZE(r); \ ld %r21, FRAME_21+CALLSIZE(r); \ ld %r22, FRAME_22+CALLSIZE(r); \ ld %r23, FRAME_23+CALLSIZE(r); \ ld %r24, FRAME_24+CALLSIZE(r); \ ld %r25, FRAME_25+CALLSIZE(r); \ ld %r26, FRAME_26+CALLSIZE(r); \ ld %r27, FRAME_27+CALLSIZE(r); \ ld %r28, FRAME_28+CALLSIZE(r); \ ld %r29, FRAME_29+CALLSIZE(r); \ ld %r30, FRAME_30+CALLSIZE(r); \ ld %r31, FRAME_31+CALLSIZE(r) #else #define SAVE_REGS(r) \ stmw %r3, FRAME_3+CALLSIZE(r) #define LD_REGS(r) \ lmw %r3, FRAME_3+CALLSIZE(r) #endif #define FRAME_SETUP(sprg_sp, savearea, exc) \ mfspr %r31, sprg_sp; /* get saved SP */ \ /* establish a new stack frame and put everything on it */ \ STU %r31, -(FRAMELEN+REDZONE)(%r1); \ STORE %r0, FRAME_0+CALLSIZE(%r1); /* save r0 in the trapframe */ \ STORE %r31, FRAME_1+CALLSIZE(%r1); /* save SP " " */ \ STORE %r2, FRAME_2+CALLSIZE(%r1); /* save r2 " " */ \ mflr %r31; \ STORE %r31, FRAME_LR+CALLSIZE(%r1); /* save LR " " */ \ STORE %r30, FRAME_CR+CALLSIZE(%r1); /* save CR " " */ \ GET_CPUINFO(%r2); \ LOAD %r30, (savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \ LOAD %r31, (savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \ /* save R3-31 */ \ SAVE_REGS(%r1); \ /* save DEAR, ESR */ \ LOAD %r28, (savearea+CPUSAVE_BOOKE_DEAR)(%r2); \ LOAD %r29, (savearea+CPUSAVE_BOOKE_ESR)(%r2); \ STORE %r28, FRAME_BOOKE_DEAR+CALLSIZE(%r1); \ STORE %r29, FRAME_BOOKE_ESR+CALLSIZE(%r1); \ /* save XER, CTR, exc number */ \ mfxer %r3; \ mfctr %r4; \ STORE %r3, FRAME_XER+CALLSIZE(%r1); \ STORE %r4, FRAME_CTR+CALLSIZE(%r1); \ li %r5, exc; \ STORE %r5, FRAME_EXC+CALLSIZE(%r1); \ /* save DBCR0 */ \ mfspr %r3, SPR_DBCR0; \ STORE %r3, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \ /* save xSSR0-1 */ \ LOAD %r30, (savearea+CPUSAVE_SRR0)(%r2); \ LOAD %r31, (savearea+CPUSAVE_SRR1)(%r2); \ STORE %r30, FRAME_SRR0+CALLSIZE(%r1); \ STORE %r31, FRAME_SRR1+CALLSIZE(%r1); \ LOAD THREAD_REG, PC_CURTHREAD(%r2); \ /* * * isrr0-1 - save restore registers to restore CPU state to (may be * SRR0-1, CSRR0-1, MCSRR0-1 * * Notes: * - potential TLB miss: YES. The deref'd kstack may be not covered */ #define FRAME_LEAVE(isrr0, isrr1) \ wrteei 0; \ /* restore CTR, XER, LR, CR */ \ LOAD %r4, FRAME_CTR+CALLSIZE(%r1); \ LOAD %r5, FRAME_XER+CALLSIZE(%r1); \ LOAD %r6, FRAME_LR+CALLSIZE(%r1); \ LOAD %r7, FRAME_CR+CALLSIZE(%r1); \ mtctr %r4; \ mtxer %r5; \ mtlr %r6; \ mtcr %r7; \ /* restore DBCR0 */ \ LOAD %r4, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \ mtspr SPR_DBCR0, %r4; \ /* restore xSRR0-1 */ \ LOAD %r30, FRAME_SRR0+CALLSIZE(%r1); \ LOAD %r31, FRAME_SRR1+CALLSIZE(%r1); \ mtspr isrr0, %r30; \ mtspr isrr1, %r31; \ /* restore R2-31, SP */ \ LD_REGS(%r1); \ LOAD %r2, FRAME_2+CALLSIZE(%r1); \ LOAD %r0, FRAME_0+CALLSIZE(%r1); \ LOAD %r1, FRAME_1+CALLSIZE(%r1); \ isync /* * TLB miss prolog * * saves LR, CR, SRR0-1, R20-31 in the TLBSAVE area * * Notes: * - potential TLB miss: NO. It is crucial that we do not generate a TLB * miss within the TLB prolog itself! * - TLBSAVE is always translated */ #ifdef __powerpc64__ #define TLB_SAVE_REGS(br) \ std %r20, (TLBSAVE_BOOKE_R20)(br); \ std %r21, (TLBSAVE_BOOKE_R21)(br); \ std %r22, (TLBSAVE_BOOKE_R22)(br); \ std %r23, (TLBSAVE_BOOKE_R23)(br); \ std %r24, (TLBSAVE_BOOKE_R24)(br); \ std %r25, (TLBSAVE_BOOKE_R25)(br); \ std %r26, (TLBSAVE_BOOKE_R26)(br); \ std %r27, (TLBSAVE_BOOKE_R27)(br); \ std %r28, (TLBSAVE_BOOKE_R28)(br); \ std %r29, (TLBSAVE_BOOKE_R29)(br); \ std %r30, (TLBSAVE_BOOKE_R30)(br); \ std %r31, (TLBSAVE_BOOKE_R31)(br); #define TLB_RESTORE_REGS(br) \ ld %r20, (TLBSAVE_BOOKE_R20)(br); \ ld %r21, (TLBSAVE_BOOKE_R21)(br); \ ld %r22, (TLBSAVE_BOOKE_R22)(br); \ ld %r23, (TLBSAVE_BOOKE_R23)(br); \ ld %r24, (TLBSAVE_BOOKE_R24)(br); \ ld %r25, (TLBSAVE_BOOKE_R25)(br); \ ld %r26, (TLBSAVE_BOOKE_R26)(br); \ ld %r27, (TLBSAVE_BOOKE_R27)(br); \ ld %r28, (TLBSAVE_BOOKE_R28)(br); \ ld %r29, (TLBSAVE_BOOKE_R29)(br); \ ld %r30, (TLBSAVE_BOOKE_R30)(br); \ ld %r31, (TLBSAVE_BOOKE_R31)(br); #define TLB_NEST(outr,inr) \ rlwinm outr, inr, 7, 22, 24; /* 8 x TLBSAVE_LEN */ #else #define TLB_SAVE_REGS(br) \ stmw %r20, TLBSAVE_BOOKE_R20(br) #define TLB_RESTORE_REGS(br) \ lmw %r20, TLBSAVE_BOOKE_R20(br) #define TLB_NEST(outr,inr) \ rlwinm outr, inr, 6, 23, 25; /* 4 x TLBSAVE_LEN */ #endif #define TLB_PROLOG \ mtsprg4 %r1; /* Save SP */ \ mtsprg5 %r28; \ mtsprg6 %r29; \ /* calculate TLB nesting level and TLBSAVE instance address */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \ TLB_NEST(%r29,%r28); \ addi %r28, %r28, 1; \ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \ addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \ add %r1, %r1, %r29; /* current TLBSAVE ptr */ \ \ /* save R20-31 */ \ mfsprg5 %r28; \ mfsprg6 %r29; \ TLB_SAVE_REGS(%r1); \ /* save LR, CR */ \ mflr %r30; \ mfcr %r31; \ STORE %r30, (TLBSAVE_BOOKE_LR)(%r1); \ STORE %r31, (TLBSAVE_BOOKE_CR)(%r1); \ /* save SRR0-1 */ \ mfsrr0 %r30; /* execution addr at interrupt time */ \ mfsrr1 %r31; /* MSR at interrupt time*/ \ STORE %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \ STORE %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \ isync; \ mfsprg4 %r1 /* * restores LR, CR, SRR0-1, R20-31 from the TLBSAVE area * * same notes as for the TLB_PROLOG */ #define TLB_RESTORE \ mtsprg4 %r1; /* Save SP */ \ GET_CPUINFO(%r1); /* Per-cpu structure */ \ /* calculate TLB nesting level and TLBSAVE instance addr */ \ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \ subi %r28, %r28, 1; \ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \ TLB_NEST(%r29,%r28); \ addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \ add %r1, %r1, %r29; \ \ /* restore LR, CR */ \ LOAD %r30, (TLBSAVE_BOOKE_LR)(%r1); \ LOAD %r31, (TLBSAVE_BOOKE_CR)(%r1); \ mtlr %r30; \ mtcr %r31; \ /* restore SRR0-1 */ \ LOAD %r30, (TLBSAVE_BOOKE_SRR0)(%r1); \ LOAD %r31, (TLBSAVE_BOOKE_SRR1)(%r1); \ mtsrr0 %r30; \ mtsrr1 %r31; \ /* restore R20-31 */ \ TLB_RESTORE_REGS(%r1); \ mfsprg4 %r1 #ifdef SMP #define TLB_LOCK \ GET_CPUINFO(%r20); \ LOAD %r21, PC_CURTHREAD(%r20); \ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \ \ 1: LOADX %r23, 0, %r22; \ CMPI %r23, TLB_UNLOCKED; \ beq 2f; \ \ /* check if this is recursion */ \ CMPL cr0, %r21, %r23; \ bne- 1b; \ \ 2: /* try to acquire lock */ \ STOREX %r21, 0, %r22; \ bne- 1b; \ \ /* got it, update recursion counter */ \ lwz %r21, RES_RECURSE(%r22); \ addi %r21, %r21, 1; \ stw %r21, RES_RECURSE(%r22); \ isync; \ msync #define TLB_UNLOCK \ GET_CPUINFO(%r20); \ LOAD %r21, PC_CURTHREAD(%r20); \ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \ \ /* update recursion counter */ \ lwz %r23, RES_RECURSE(%r22); \ subi %r23, %r23, 1; \ stw %r23, RES_RECURSE(%r22); \ \ cmplwi %r23, 0; \ bne 1f; \ isync; \ msync; \ \ /* release the lock */ \ li %r23, TLB_UNLOCKED; \ STORE %r23, 0(%r22); \ 1: isync; \ msync #else #define TLB_LOCK #define TLB_UNLOCK #endif /* SMP */ #define INTERRUPT(label) \ .globl label; \ .align 5; \ CNAME(label): /* * Interrupt handling routines in BookE can be flexibly placed and do not have * to live in pre-defined vectors location. Note they need to be TLB-mapped at * all times in order to be able to handle exceptions. We thus arrange for * them to be part of kernel text which is always TLB-accessible. * * The interrupt handling routines have to be 16 bytes aligned: we align them * to 32 bytes (cache line length) which supposedly performs better. * */ .text .globl CNAME(interrupt_vector_base) .align 5 interrupt_vector_base: /***************************************************************************** * Catch-all handler to handle uninstalled IVORs ****************************************************************************/ INTERRUPT(int_unknown) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_RSVD) b trap_common /***************************************************************************** * Critical input interrupt ****************************************************************************/ INTERRUPT(int_critical_input) STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1) FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_CRIT) GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(powerpc_interrupt) TOC_RESTORE FRAME_LEAVE(SPR_CSRR0, SPR_CSRR1) rfci /***************************************************************************** * Machine check interrupt ****************************************************************************/ INTERRUPT(int_machine_check) STANDARD_PROLOG(SPR_SPRG3, PC_BOOKE_MCHKSAVE, SPR_MCSRR0, SPR_MCSRR1) FRAME_SETUP(SPR_SPRG3, PC_BOOKE_MCHKSAVE, EXC_MCHK) GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(powerpc_interrupt) TOC_RESTORE FRAME_LEAVE(SPR_MCSRR0, SPR_MCSRR1) rfmci /***************************************************************************** * Data storage interrupt ****************************************************************************/ INTERRUPT(int_data_storage) STANDARD_PROLOG(SPR_SPRG1, PC_DISISAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_DISISAVE, EXC_DSI) b trap_common /***************************************************************************** * Instruction storage interrupt ****************************************************************************/ INTERRUPT(int_instr_storage) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_ISI) b trap_common /***************************************************************************** * External input interrupt ****************************************************************************/ INTERRUPT(int_external_input) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_EXI) b trap_common INTERRUPT(int_alignment) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_ALI) b trap_common INTERRUPT(int_program) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_PGM) b trap_common INTERRUPT(int_fpu) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_FPU) b trap_common /***************************************************************************** * System call ****************************************************************************/ INTERRUPT(int_syscall) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SC) b trap_common /***************************************************************************** * Decrementer interrupt ****************************************************************************/ INTERRUPT(int_decrementer) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_DECR) b trap_common /***************************************************************************** * Fixed interval timer ****************************************************************************/ INTERRUPT(int_fixed_interval_timer) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_FIT) b trap_common /***************************************************************************** * Watchdog interrupt ****************************************************************************/ INTERRUPT(int_watchdog) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_WDOG) b trap_common /***************************************************************************** * Altivec Unavailable interrupt ****************************************************************************/ INTERRUPT(int_vec) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_VEC) b trap_common /***************************************************************************** * Altivec Assist interrupt ****************************************************************************/ INTERRUPT(int_vecast) STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_VECAST_E) b trap_common +#ifdef __SPE__ +/***************************************************************************** + * Floating point Assist interrupt + ****************************************************************************/ +INTERRUPT(int_spe_fpdata) + STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) + FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SPFPD) + addi %r3, %r1, CALLSIZE + bl spe_handle_fpdata + FRAME_LEAVE(SPR_SRR0, SPR_SRR1) + rfi + +INTERRUPT(int_spe_fpround) + STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) + FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_SPFPR) + addi %r3, %r1, CALLSIZE + bl spe_handle_fpround + FRAME_LEAVE(SPR_SRR0, SPR_SRR1) + rfi +#endif + + #ifdef HWPMC_HOOKS /***************************************************************************** * PMC Interrupt ****************************************************************************/ INTERRUPT(int_performance_counter) STANDARD_PROLOG(SPR_SPRG3, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1) FRAME_SETUP(SPR_SPRG3, PC_TEMPSAVE, EXC_PERF) b trap_common #endif /***************************************************************************** * Data TLB miss interrupt * * There can be nested TLB misses - while handling a TLB miss we reference * data structures that may be not covered by translations. We support up to * TLB_NESTED_MAX-1 nested misses. * * Registers use: * r31 - dear * r30 - unused * r29 - saved mas0 * r28 - saved mas1 * r27 - saved mas2 * r26 - pmap address * r25 - pte address * * r20:r23 - scratch registers ****************************************************************************/ INTERRUPT(int_data_tlb_error) TLB_PROLOG TLB_LOCK mfdear %r31 /* * Save MAS0-MAS2 registers. There might be another tlb miss during * pte lookup overwriting current contents (which was hw filled). */ mfspr %r29, SPR_MAS0 mfspr %r28, SPR_MAS1 mfspr %r27, SPR_MAS2 /* Check faulting address. */ LOAD_ADDR(%r21, VM_MAXUSER_ADDRESS) CMPL cr0, %r31, %r21 blt search_user_pmap /* If it's kernel address, allow only supervisor mode misses. */ mfsrr1 %r21 mtcr %r21 bt 17, search_failed /* check MSR[PR] */ search_kernel_pmap: /* Load r26 with kernel_pmap address */ bl 1f #ifdef __powerpc64__ .llong kernel_pmap_store-. #else .long kernel_pmap_store-. #endif 1: mflr %r21 LOAD %r26, 0(%r21) add %r26, %r21, %r26 /* kernel_pmap_store in r26 */ /* Force kernel tid, set TID to 0 in MAS1. */ li %r21, 0 rlwimi %r28, %r21, 0, 8, 15 /* clear TID bits */ tlb_miss_handle: /* This may result in nested tlb miss. */ bl pte_lookup /* returns PTE address in R25 */ CMPI %r25, 0 /* pte found? */ beq search_failed /* Finish up, write TLB entry. */ bl tlb_fill_entry tlb_miss_return: TLB_UNLOCK TLB_RESTORE rfi search_user_pmap: /* Load r26 with current user space process pmap */ GET_CPUINFO(%r26) LOAD %r26, PC_CURPMAP(%r26) b tlb_miss_handle search_failed: /* * Whenever we don't find a TLB mapping in PT, set a TLB0 entry with * the faulting virtual address anyway, but put a fake RPN and no * access rights. This should cause a following {D,I}SI exception. */ lis %r23, 0xffff0000@h /* revoke all permissions */ /* Load MAS registers. */ mtspr SPR_MAS0, %r29 isync mtspr SPR_MAS1, %r28 isync mtspr SPR_MAS2, %r27 isync mtspr SPR_MAS3, %r23 isync bl zero_mas7 bl zero_mas8 tlbwe msync isync b tlb_miss_return /***************************************************************************** * * Return pte address that corresponds to given pmap/va. If there is no valid * entry return 0. * * input: r26 - pmap * input: r31 - dear * output: r25 - pte address * * scratch regs used: r21 * ****************************************************************************/ pte_lookup: CMPI %r26, 0 beq 1f /* fail quickly if pmap is invalid */ #ifdef __powerpc64__ rldicl %r21, %r31, (64 - PP2D_L_L), (64 - PP2D_L_NUM) /* pp2d offset */ rldicl %r25, %r31, (64 - PP2D_H_L), (64 - PP2D_H_NUM) rldimi %r21, %r25, PP2D_L_NUM, (64 - (PP2D_L_NUM + PP2D_H_NUM)) slwi %r21, %r21, PP2D_ENTRY_SHIFT /* multiply by pp2d entry size */ addi %r25, %r26, PM_PP2D /* pmap pm_pp2d[] address */ add %r25, %r25, %r21 /* offset within pm_pp2d[] table */ ld %r25, 0(%r25) /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */ cmpdi %r25, 0 beq 1f #if PAGE_SIZE < 65536 rldicl %r21, %r31, (64 - PDIR_L), (64 - PDIR_NUM) /* pdir offset */ slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */ add %r25, %r25, %r21 /* offset within pdir table */ ld %r25, 0(%r25) /* get ptbl address, i.e. pmap->pm_pp2d[pp2d_idx][pdir_idx] */ cmpdi %r25, 0 beq 1f #endif rldicl %r21, %r31, (64 - PTBL_L), (64 - PTBL_NUM) /* ptbl offset */ slwi %r21, %r21, PTBL_ENTRY_SHIFT /* multiply by pte entry size */ #else srwi %r21, %r31, PDIR_SHIFT /* pdir offset */ slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */ addi %r25, %r26, PM_PDIR /* pmap pm_dir[] address */ add %r25, %r25, %r21 /* offset within pm_pdir[] table */ /* * Get ptbl address, i.e. pmap->pm_pdir[pdir_idx] * This load may cause a Data TLB miss for non-kernel pmap! */ LOAD %r25, 0(%r25) CMPI %r25, 0 beq 2f lis %r21, PTBL_MASK@h ori %r21, %r21, PTBL_MASK@l and %r21, %r21, %r31 /* ptbl offset, multiply by ptbl entry size */ srwi %r21, %r21, (PTBL_SHIFT - PTBL_ENTRY_SHIFT) #endif add %r25, %r25, %r21 /* address of pte entry */ /* * Get pte->flags * This load may cause a Data TLB miss for non-kernel pmap! */ lwz %r21, PTE_FLAGS(%r25) andi. %r21, %r21, PTE_VALID@l bne 2f 1: li %r25, 0 2: blr /***************************************************************************** * * Load MAS1-MAS3 registers with data, write TLB entry * * input: * r29 - mas0 * r28 - mas1 * r27 - mas2 * r25 - pte * * output: none * * scratch regs: r21-r23 * ****************************************************************************/ tlb_fill_entry: /* * Update PTE flags: we have to do it atomically, as pmap_protect() * running on other CPUs could attempt to update the flags at the same * time. */ li %r23, PTE_FLAGS 1: lwarx %r21, %r23, %r25 /* get pte->flags */ oris %r21, %r21, PTE_REFERENCED@h /* set referenced bit */ andi. %r22, %r21, (PTE_SW | PTE_UW)@l /* check if writable */ beq 2f ori %r21, %r21, PTE_MODIFIED@l /* set modified bit */ 2: stwcx. %r21, %r23, %r25 /* write it back */ bne- 1b /* Update MAS2. */ rlwimi %r27, %r21, 13, 27, 30 /* insert WIMG bits from pte */ /* Setup MAS3 value in r23. */ LOAD %r23, PTE_RPN(%r25) /* get pte->rpn */ #ifdef __powerpc64__ rldicr %r22, %r23, 52, 51 /* extract MAS3 portion of RPN */ rldicl %r23, %r23, 20, 54 /* extract MAS7 portion of RPN */ rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */ #else rlwinm %r22, %r23, 20, 0, 11 /* extract MAS3 portion of RPN */ rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */ rlwimi %r22, %r21, 20, 12, 19 /* insert lower 8 RPN bits to MAS3 */ rlwinm %r23, %r23, 20, 24, 31 /* MAS7 portion of RPN */ #endif /* Load MAS registers. */ mtspr SPR_MAS0, %r29 isync mtspr SPR_MAS1, %r28 isync mtspr SPR_MAS2, %r27 isync mtspr SPR_MAS3, %r22 isync mtspr SPR_MAS7, %r23 isync mflr %r21 bl zero_mas8 mtlr %r21 tlbwe isync msync blr /***************************************************************************** * Instruction TLB miss interrupt * * Same notes as for the Data TLB miss ****************************************************************************/ INTERRUPT(int_inst_tlb_error) TLB_PROLOG TLB_LOCK mfsrr0 %r31 /* faulting address */ /* * Save MAS0-MAS2 registers. There might be another tlb miss during pte * lookup overwriting current contents (which was hw filled). */ mfspr %r29, SPR_MAS0 mfspr %r28, SPR_MAS1 mfspr %r27, SPR_MAS2 mfsrr1 %r21 mtcr %r21 /* check MSR[PR] */ bt 17, search_user_pmap b search_kernel_pmap .globl interrupt_vector_top interrupt_vector_top: /***************************************************************************** * Debug interrupt ****************************************************************************/ INTERRUPT(int_debug) STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1) FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_DEBUG) bl int_debug_int FRAME_LEAVE(SPR_CSRR0, SPR_CSRR1) rfci INTERRUPT(int_debug_ed) STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_DSRR0, SPR_DSRR1) FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_DEBUG) bl int_debug_int FRAME_LEAVE(SPR_DSRR0, SPR_DSRR1) rfdi /* .long 0x4c00004e */ /* Internal helper for debug interrupt handling. */ /* Common code between e500v1/v2 and e500mc-based cores. */ int_debug_int: mflr %r14 GET_CPUINFO(%r3) LOAD %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3) bl 0f ADDR(interrupt_vector_base-.) ADDR(interrupt_vector_top-.) 0: mflr %r5 LOAD %r4,0(%r5) /* interrupt_vector_base in r4 */ add %r4,%r4,%r5 CMPL cr0, %r3, %r4 blt trap_common LOAD %r4,WORD_SIZE(%r5) /* interrupt_vector_top in r4 */ add %r4,%r4,%r5 addi %r4,%r4,4 CMPL cr0, %r3, %r4 bge trap_common /* Disable single-stepping for the interrupt handlers. */ LOAD %r3, FRAME_SRR1+CALLSIZE(%r1); rlwinm %r3, %r3, 0, 23, 21 STORE %r3, FRAME_SRR1+CALLSIZE(%r1); /* Restore srr0 and srr1 as they could have been clobbered. */ GET_CPUINFO(%r4) LOAD %r3, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR0)(%r4); mtspr SPR_SRR0, %r3 LOAD %r4, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR1)(%r4); mtspr SPR_SRR1, %r4 mtlr %r14 blr /***************************************************************************** * Common trap code ****************************************************************************/ trap_common: /* Call C trap dispatcher */ GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(powerpc_interrupt) TOC_RESTORE .globl CNAME(trapexit) /* exported for db_backtrace use */ CNAME(trapexit): /* disable interrupts */ wrteei 0 /* Test AST pending - makes sense for user process only */ LOAD %r5, FRAME_SRR1+CALLSIZE(%r1) mtcr %r5 bf 17, 1f GET_CPUINFO(%r3) LOAD %r4, PC_CURTHREAD(%r3) lwz %r4, TD_FLAGS(%r4) lis %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@h ori %r5, %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@l and. %r4, %r4, %r5 beq 1f /* re-enable interrupts before calling ast() */ wrteei 1 addi %r3, %r1, CALLSIZE bl CNAME(ast) TOC_RESTORE .globl CNAME(asttrapexit) /* db_backtrace code sentinel #2 */ CNAME(asttrapexit): b trapexit /* test ast ret value ? */ 1: FRAME_LEAVE(SPR_SRR0, SPR_SRR1) rfi #if defined(KDB) /* * Deliberate entry to dbtrap */ /* .globl CNAME(breakpoint)*/ ASENTRY_NOPROF(breakpoint) mtsprg1 %r1 mfmsr %r3 mtsrr1 %r3 li %r4, ~(PSL_EE | PSL_ME)@l oris %r4, %r4, ~(PSL_EE | PSL_ME)@h and %r3, %r3, %r4 mtmsr %r3 /* disable interrupts */ isync GET_CPUINFO(%r3) STORE %r30, (PC_DBSAVE+CPUSAVE_R30)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_R31)(%r3) mflr %r31 mtsrr0 %r31 mfdear %r30 mfesr %r31 STORE %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3) mfsrr0 %r30 mfsrr1 %r31 STORE %r30, (PC_DBSAVE+CPUSAVE_SRR0)(%r3) STORE %r31, (PC_DBSAVE+CPUSAVE_SRR1)(%r3) isync mfcr %r30 /* * Now the kdb trap catching code. */ dbtrap: FRAME_SETUP(SPR_SPRG1, PC_DBSAVE, EXC_DEBUG) /* Call C trap code: */ GET_TOCBASE(%r2) addi %r3, %r1, CALLSIZE bl CNAME(db_trap_glue) TOC_RESTORE or. %r3, %r3, %r3 bne dbleave /* This wasn't for KDB, so switch to real trap: */ b trap_common dbleave: FRAME_LEAVE(SPR_SRR0, SPR_SRR1) rfi #endif /* KDB */ #ifdef SMP ENTRY(tlb_lock) GET_CPUINFO(%r5) LOAD %r5, PC_CURTHREAD(%r5) 1: LOADX %r4, 0, %r3 CMPI %r4, TLB_UNLOCKED bne 1b STOREX %r5, 0, %r3 bne- 1b isync msync blr ENTRY(tlb_unlock) isync msync li %r4, TLB_UNLOCKED STORE %r4, 0(%r3) isync msync blr /* * TLB miss spin locks. For each CPU we have a reservation granule (32 bytes); * only a single word from this granule will actually be used as a spin lock * for mutual exclusion between TLB miss handler and pmap layer that * manipulates page table contents. */ .data .align 5 GLOBAL(tlb0_miss_locks) .space RES_GRANULE * MAXCPU #endif Index: head/sys/powerpc/include/spr.h =================================================================== --- head/sys/powerpc/include/spr.h (revision 339513) +++ head/sys/powerpc/include/spr.h (revision 339514) @@ -1,849 +1,876 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: spr.h,v 1.25 2002/08/14 15:38:40 matt Exp $ * $FreeBSD$ */ #ifndef _POWERPC_SPR_H_ #define _POWERPC_SPR_H_ #ifndef _LOCORE #define mtspr(reg, val) \ __asm __volatile("mtspr %0,%1" : : "K"(reg), "r"(val)) #define mfspr(reg) \ ( { register_t val; \ __asm __volatile("mfspr %0,%1" : "=r"(val) : "K"(reg)); \ val; } ) #ifndef __powerpc64__ /* The following routines allow manipulation of the full 64-bit width * of SPRs on 64 bit CPUs in bridge mode */ #define mtspr64(reg,valhi,vallo,scratch) \ __asm __volatile(" \ mfmsr %0; \ insrdi %0,%5,1,0; \ mtmsrd %0; \ isync; \ \ sld %1,%1,%4; \ or %1,%1,%2; \ mtspr %3,%1; \ srd %1,%1,%4; \ \ clrldi %0,%0,1; \ mtmsrd %0; \ isync;" \ : "=r"(scratch), "=r"(valhi) : "r"(vallo), "K"(reg), "r"(32), "r"(1)) #define mfspr64upper(reg,scratch) \ ( { register_t val; \ __asm __volatile(" \ mfmsr %0; \ insrdi %0,%4,1,0; \ mtmsrd %0; \ isync; \ \ mfspr %1,%2; \ srd %1,%1,%3; \ \ clrldi %0,%0,1; \ mtmsrd %0; \ isync;" \ : "=r"(scratch), "=r"(val) : "K"(reg), "r"(32), "r"(1)); \ val; } ) #endif #endif /* _LOCORE */ /* * Special Purpose Register declarations. * * The first column in the comments indicates which PowerPC * architectures the SPR is valid on - 4 for 4xx series, * 6 for 6xx/7xx series and 8 for 8xx and 8xxx series. */ #define SPR_MQ 0x000 /* .6. 601 MQ register */ #define SPR_XER 0x001 /* 468 Fixed Point Exception Register */ #define SPR_RTCU_R 0x004 /* .6. 601 RTC Upper - Read */ #define SPR_RTCL_R 0x005 /* .6. 601 RTC Lower - Read */ #define SPR_LR 0x008 /* 468 Link Register */ #define SPR_CTR 0x009 /* 468 Count Register */ #define SPR_DSCR 0x011 /* Data Stream Control Register */ #define SPR_DSISR 0x012 /* .68 DSI exception source */ #define DSISR_DIRECT 0x80000000 /* Direct-store error exception */ #define DSISR_NOTFOUND 0x40000000 /* Translation not found */ #define DSISR_PROTECT 0x08000000 /* Memory access not permitted */ #define DSISR_INVRX 0x04000000 /* Reserve-indexed insn direct-store access */ #define DSISR_STORE 0x02000000 /* Store operation */ #define DSISR_DABR 0x00400000 /* DABR match */ #define DSISR_SEGMENT 0x00200000 /* XXX; not in 6xx PEM */ #define DSISR_EAR 0x00100000 /* eciwx/ecowx && EAR[E] == 0 */ #define SPR_DAR 0x013 /* .68 Data Address Register */ #define SPR_RTCU_W 0x014 /* .6. 601 RTC Upper - Write */ #define SPR_RTCL_W 0x015 /* .6. 601 RTC Lower - Write */ #define SPR_DEC 0x016 /* .68 DECrementer register */ #define SPR_SDR1 0x019 /* .68 Page table base address register */ #define SPR_SRR0 0x01a /* 468 Save/Restore Register 0 */ #define SPR_SRR1 0x01b /* 468 Save/Restore Register 1 */ #define SRR1_ISI_PFAULT 0x40000000 /* ISI page not found */ #define SRR1_ISI_NOEXECUTE 0x10000000 /* Memory marked no-execute */ #define SRR1_ISI_PP 0x08000000 /* PP bits forbid access */ #define SPR_DECAR 0x036 /* ..8 Decrementer auto reload */ #define SPR_EIE 0x050 /* ..8 Exception Interrupt ??? */ #define SPR_EID 0x051 /* ..8 Exception Interrupt ??? */ #define SPR_NRI 0x052 /* ..8 Exception Interrupt ??? */ #define SPR_FSCR 0x099 /* Facility Status and Control Register */ #define FSCR_IC_MASK 0xFF00000000000000ULL /* FSCR[0:7] is Interrupt Cause */ #define FSCR_IC_FP 0x0000000000000000ULL /* FP unavailable */ #define FSCR_IC_VSX 0x0100000000000000ULL /* VSX unavailable */ #define FSCR_IC_DSCR 0x0200000000000000ULL /* Access to the DSCR at SPRs 3 or 17 */ #define FSCR_IC_PM 0x0300000000000000ULL /* Read or write access of a Performance Monitor SPR in group A */ #define FSCR_IC_BHRB 0x0400000000000000ULL /* Execution of a BHRB Instruction */ #define FSCR_IC_HTM 0x0500000000000000ULL /* Access to a Transactional Memory */ /* Reserved 0x0600000000000000ULL */ #define FSCR_IC_EBB 0x0700000000000000ULL /* Access to Event-Based Branch */ #define FSCR_IC_TAR 0x0800000000000000ULL /* Access to Target Address Register */ #define FSCR_IC_STOP 0x0900000000000000ULL /* Access to the 'stop' instruction in privileged non-hypervisor state */ #define FSCR_IC_MSG 0x0A00000000000000ULL /* Access to 'msgsndp' or 'msgclrp' instructions */ #define FSCR_IC_SCV 0x0C00000000000000ULL /* Execution of a 'scv' instruction */ #define SPR_USPRG0 0x100 /* 4.. User SPR General 0 */ #define SPR_VRSAVE 0x100 /* .6. AltiVec VRSAVE */ #define SPR_SPRG0 0x110 /* 468 SPR General 0 */ #define SPR_SPRG1 0x111 /* 468 SPR General 1 */ #define SPR_SPRG2 0x112 /* 468 SPR General 2 */ #define SPR_SPRG3 0x113 /* 468 SPR General 3 */ #define SPR_SPRG4 0x114 /* 4.. SPR General 4 */ #define SPR_SPRG5 0x115 /* 4.. SPR General 5 */ #define SPR_SPRG6 0x116 /* 4.. SPR General 6 */ #define SPR_SPRG7 0x117 /* 4.. SPR General 7 */ #define SPR_SCOMC 0x114 /* ... SCOM Address Register (970) */ #define SPR_SCOMD 0x115 /* ... SCOM Data Register (970) */ #define SPR_ASR 0x118 /* ... Address Space Register (PPC64) */ #define SPR_EAR 0x11a /* .68 External Access Register */ #define SPR_PVR 0x11f /* 468 Processor Version Register */ #define MPC601 0x0001 #define MPC603 0x0003 #define MPC604 0x0004 #define MPC602 0x0005 #define MPC603e 0x0006 #define MPC603ev 0x0007 #define MPC750 0x0008 #define MPC750CL 0x7000 /* Nintendo Wii's Broadway */ #define MPC604ev 0x0009 #define MPC7400 0x000c #define MPC620 0x0014 #define IBM403 0x0020 #define IBM401A1 0x0021 #define IBM401B2 0x0022 #define IBM401C2 0x0023 #define IBM401D2 0x0024 #define IBM401E2 0x0025 #define IBM401F2 0x0026 #define IBM401G2 0x0027 #define IBMRS64II 0x0033 #define IBMRS64III 0x0034 #define IBMPOWER4 0x0035 #define IBMRS64III_2 0x0036 #define IBMRS64IV 0x0037 #define IBMPOWER4PLUS 0x0038 #define IBM970 0x0039 #define IBMPOWER5 0x003a #define IBMPOWER5PLUS 0x003b #define IBM970FX 0x003c #define IBMPOWER6 0x003e #define IBMPOWER7 0x003f #define IBMPOWER3 0x0040 #define IBMPOWER3PLUS 0x0041 #define IBM970MP 0x0044 #define IBM970GX 0x0045 #define IBMPOWERPCA2 0x0049 #define IBMPOWER7PLUS 0x004a #define IBMPOWER8E 0x004b #define IBMPOWER8 0x004d #define IBMPOWER9 0x004e #define MPC860 0x0050 #define IBMCELLBE 0x0070 #define MPC8240 0x0081 #define PA6T 0x0090 #define IBM405GP 0x4011 #define IBM405L 0x4161 #define IBM750FX 0x7000 #define MPC745X_P(v) ((v & 0xFFF8) == 0x8000) #define MPC7450 0x8000 #define MPC7455 0x8001 #define MPC7457 0x8002 #define MPC7447A 0x8003 #define MPC7448 0x8004 #define MPC7410 0x800c #define MPC8245 0x8081 #define FSL_E500v1 0x8020 #define FSL_E500v2 0x8021 #define FSL_E500mc 0x8023 #define FSL_E5500 0x8024 #define FSL_E6500 0x8040 #define FSL_E300C1 0x8083 #define FSL_E300C2 0x8084 #define FSL_E300C3 0x8085 #define FSL_E300C4 0x8086 #define LPCR_PECE_WAKESET (LPCR_PECE_EXT | LPCR_PECE_DECR | LPCR_PECE_ME) #define SPR_EPCR 0x133 #define EPCR_EXTGS 0x80000000 #define EPCR_DTLBGS 0x40000000 #define EPCR_ITLBGS 0x20000000 #define EPCR_DSIGS 0x10000000 #define EPCR_ISIGS 0x08000000 #define EPCR_DUVGS 0x04000000 #define EPCR_ICM 0x02000000 #define EPCR_GICMGS 0x01000000 #define EPCR_DGTMI 0x00800000 #define EPCR_DMIUH 0x00400000 #define EPCR_PMGS 0x00200000 #define SPR_HSRR0 0x13a #define SPR_HSRR1 0x13b #define SPR_LPCR 0x13e /* Logical Partitioning Control */ #define LPCR_LPES 0x008 /* Bit 60 */ #define LPCR_HVICE 0x002 /* Hypervisor Virtualization Interrupt (Arch 3.0) */ #define LPCR_PECE_DRBL (1ULL << 16) /* Directed Privileged Doorbell */ #define LPCR_PECE_HDRBL (1ULL << 15) /* Directed Hypervisor Doorbell */ #define LPCR_PECE_EXT (1ULL << 14) /* External exceptions */ #define LPCR_PECE_DECR (1ULL << 13) /* Decrementer exceptions */ #define LPCR_PECE_ME (1ULL << 12) /* Machine Check and Hypervisor */ /* Maintenance exceptions */ #define SPR_LPID 0x13f /* Logical Partitioning Control */ #define SPR_PTCR 0x1d0 /* Partition Table Control Register */ #define SPR_SPEFSCR 0x200 /* ..8 Signal Processing Engine FSCR. */ +#define SPEFSCR_SOVH 0x80000000 +#define SPEFSCR_OVH 0x40000000 +#define SPEFSCR_FGH 0x20000000 +#define SPEFSCR_FXH 0x10000000 +#define SPEFSCR_FINVH 0x08000000 +#define SPEFSCR_FDBZH 0x04000000 +#define SPEFSCR_FUNFH 0x02000000 +#define SPEFSCR_FOVFH 0x01000000 +#define SPEFSCR_FINXS 0x00200000 +#define SPEFSCR_FINVS 0x00100000 +#define SPEFSCR_FDBZS 0x00080000 +#define SPEFSCR_FUNFS 0x00040000 +#define SPEFSCR_FOVFS 0x00020000 +#define SPEFSCR_SOV 0x00008000 +#define SPEFSCR_OV 0x00004000 +#define SPEFSCR_FG 0x00002000 +#define SPEFSCR_FX 0x00001000 +#define SPEFSCR_FINV 0x00000800 +#define SPEFSCR_FDBZ 0x00000400 +#define SPEFSCR_FUNF 0x00000200 +#define SPEFSCR_FOVF 0x00000100 +#define SPEFSCR_FINXE 0x00000040 +#define SPEFSCR_FINVE 0x00000020 +#define SPEFSCR_FDBZE 0x00000010 +#define SPEFSCR_FUNFE 0x00000008 +#define SPEFSCR_FOVFE 0x00000004 +#define SPEFSCR_FRMC_M 0x00000003 #define SPR_IBAT0U 0x210 /* .6. Instruction BAT Reg 0 Upper */ #define SPR_IBAT0L 0x211 /* .6. Instruction BAT Reg 0 Lower */ #define SPR_IBAT1U 0x212 /* .6. Instruction BAT Reg 1 Upper */ #define SPR_IBAT1L 0x213 /* .6. Instruction BAT Reg 1 Lower */ #define SPR_IBAT2U 0x214 /* .6. Instruction BAT Reg 2 Upper */ #define SPR_IBAT2L 0x215 /* .6. Instruction BAT Reg 2 Lower */ #define SPR_IBAT3U 0x216 /* .6. Instruction BAT Reg 3 Upper */ #define SPR_IBAT3L 0x217 /* .6. Instruction BAT Reg 3 Lower */ #define SPR_DBAT0U 0x218 /* .6. Data BAT Reg 0 Upper */ #define SPR_DBAT0L 0x219 /* .6. Data BAT Reg 0 Lower */ #define SPR_DBAT1U 0x21a /* .6. Data BAT Reg 1 Upper */ #define SPR_DBAT1L 0x21b /* .6. Data BAT Reg 1 Lower */ #define SPR_DBAT2U 0x21c /* .6. Data BAT Reg 2 Upper */ #define SPR_DBAT2L 0x21d /* .6. Data BAT Reg 2 Lower */ #define SPR_DBAT3U 0x21e /* .6. Data BAT Reg 3 Upper */ #define SPR_DBAT3L 0x21f /* .6. Data BAT Reg 3 Lower */ #define SPR_IC_CST 0x230 /* ..8 Instruction Cache CSR */ #define IC_CST_IEN 0x80000000 /* I cache is ENabled (RO) */ #define IC_CST_CMD_INVALL 0x0c000000 /* I cache invalidate all */ #define IC_CST_CMD_UNLOCKALL 0x0a000000 /* I cache unlock all */ #define IC_CST_CMD_UNLOCK 0x08000000 /* I cache unlock block */ #define IC_CST_CMD_LOADLOCK 0x06000000 /* I cache load & lock block */ #define IC_CST_CMD_DISABLE 0x04000000 /* I cache disable */ #define IC_CST_CMD_ENABLE 0x02000000 /* I cache enable */ #define IC_CST_CCER1 0x00200000 /* I cache error type 1 (RO) */ #define IC_CST_CCER2 0x00100000 /* I cache error type 2 (RO) */ #define IC_CST_CCER3 0x00080000 /* I cache error type 3 (RO) */ #define SPR_IBAT4U 0x230 /* .6. Instruction BAT Reg 4 Upper */ #define SPR_IC_ADR 0x231 /* ..8 Instruction Cache Address */ #define SPR_IBAT4L 0x231 /* .6. Instruction BAT Reg 4 Lower */ #define SPR_IC_DAT 0x232 /* ..8 Instruction Cache Data */ #define SPR_IBAT5U 0x232 /* .6. Instruction BAT Reg 5 Upper */ #define SPR_IBAT5L 0x233 /* .6. Instruction BAT Reg 5 Lower */ #define SPR_IBAT6U 0x234 /* .6. Instruction BAT Reg 6 Upper */ #define SPR_IBAT6L 0x235 /* .6. Instruction BAT Reg 6 Lower */ #define SPR_IBAT7U 0x236 /* .6. Instruction BAT Reg 7 Upper */ #define SPR_IBAT7L 0x237 /* .6. Instruction BAT Reg 7 Lower */ #define SPR_DC_CST 0x230 /* ..8 Data Cache CSR */ #define DC_CST_DEN 0x80000000 /* D cache ENabled (RO) */ #define DC_CST_DFWT 0x40000000 /* D cache Force Write-Thru (RO) */ #define DC_CST_LES 0x20000000 /* D cache Little Endian Swap (RO) */ #define DC_CST_CMD_FLUSH 0x0e000000 /* D cache invalidate all */ #define DC_CST_CMD_INVALL 0x0c000000 /* D cache invalidate all */ #define DC_CST_CMD_UNLOCKALL 0x0a000000 /* D cache unlock all */ #define DC_CST_CMD_UNLOCK 0x08000000 /* D cache unlock block */ #define DC_CST_CMD_CLRLESWAP 0x07000000 /* D cache clr little-endian swap */ #define DC_CST_CMD_LOADLOCK 0x06000000 /* D cache load & lock block */ #define DC_CST_CMD_SETLESWAP 0x05000000 /* D cache set little-endian swap */ #define DC_CST_CMD_DISABLE 0x04000000 /* D cache disable */ #define DC_CST_CMD_CLRFWT 0x03000000 /* D cache clear forced write-thru */ #define DC_CST_CMD_ENABLE 0x02000000 /* D cache enable */ #define DC_CST_CMD_SETFWT 0x01000000 /* D cache set forced write-thru */ #define DC_CST_CCER1 0x00200000 /* D cache error type 1 (RO) */ #define DC_CST_CCER2 0x00100000 /* D cache error type 2 (RO) */ #define DC_CST_CCER3 0x00080000 /* D cache error type 3 (RO) */ #define SPR_DBAT4U 0x238 /* .6. Data BAT Reg 4 Upper */ #define SPR_DC_ADR 0x231 /* ..8 Data Cache Address */ #define SPR_DBAT4L 0x239 /* .6. Data BAT Reg 4 Lower */ #define SPR_DC_DAT 0x232 /* ..8 Data Cache Data */ #define SPR_DBAT5U 0x23a /* .6. Data BAT Reg 5 Upper */ #define SPR_DBAT5L 0x23b /* .6. Data BAT Reg 5 Lower */ #define SPR_DBAT6U 0x23c /* .6. Data BAT Reg 6 Upper */ #define SPR_DBAT6L 0x23d /* .6. Data BAT Reg 6 Lower */ #define SPR_DBAT7U 0x23e /* .6. Data BAT Reg 7 Upper */ #define SPR_DBAT7L 0x23f /* .6. Data BAT Reg 7 Lower */ #define SPR_SPRG8 0x25c /* ..8 SPR General 8 */ #define SPR_MI_CTR 0x310 /* ..8 IMMU control */ #define Mx_CTR_GPM 0x80000000 /* Group Protection Mode */ #define Mx_CTR_PPM 0x40000000 /* Page Protection Mode */ #define Mx_CTR_CIDEF 0x20000000 /* Cache-Inhibit DEFault */ #define MD_CTR_WTDEF 0x20000000 /* Write-Through DEFault */ #define Mx_CTR_RSV4 0x08000000 /* Reserve 4 TLB entries */ #define MD_CTR_TWAM 0x04000000 /* TableWalk Assist Mode */ #define Mx_CTR_PPCS 0x02000000 /* Priv/user state compare mode */ #define Mx_CTR_TLB_INDX 0x000001f0 /* TLB index mask */ #define Mx_CTR_TLB_INDX_BITPOS 8 /* TLB index shift */ #define SPR_MI_AP 0x312 /* ..8 IMMU access protection */ #define Mx_GP_SUPER(n) (0 << (2*(15-(n)))) /* access is supervisor */ #define Mx_GP_PAGE (1 << (2*(15-(n)))) /* access is page protect */ #define Mx_GP_SWAPPED (2 << (2*(15-(n)))) /* access is swapped */ #define Mx_GP_USER (3 << (2*(15-(n)))) /* access is user */ #define SPR_MI_EPN 0x313 /* ..8 IMMU effective number */ #define Mx_EPN_EPN 0xfffff000 /* Effective Page Number mask */ #define Mx_EPN_EV 0x00000020 /* Entry Valid */ #define Mx_EPN_ASID 0x0000000f /* Address Space ID */ #define SPR_MI_TWC 0x315 /* ..8 IMMU tablewalk control */ #define MD_TWC_L2TB 0xfffff000 /* Level-2 Tablewalk Base */ #define Mx_TWC_APG 0x000001e0 /* Access Protection Group */ #define Mx_TWC_G 0x00000010 /* Guarded memory */ #define Mx_TWC_PS 0x0000000c /* Page Size (L1) */ #define MD_TWC_WT 0x00000002 /* Write-Through */ #define Mx_TWC_V 0x00000001 /* Entry Valid */ #define SPR_MI_RPN 0x316 /* ..8 IMMU real (phys) page number */ #define Mx_RPN_RPN 0xfffff000 /* Real Page Number */ #define Mx_RPN_PP 0x00000ff0 /* Page Protection */ #define Mx_RPN_SPS 0x00000008 /* Small Page Size */ #define Mx_RPN_SH 0x00000004 /* SHared page */ #define Mx_RPN_CI 0x00000002 /* Cache Inhibit */ #define Mx_RPN_V 0x00000001 /* Valid */ #define SPR_MD_CTR 0x318 /* ..8 DMMU control */ #define SPR_M_CASID 0x319 /* ..8 CASID */ #define M_CASID 0x0000000f /* Current AS Id */ #define SPR_MD_AP 0x31a /* ..8 DMMU access protection */ #define SPR_MD_EPN 0x31b /* ..8 DMMU effective number */ #define SPR_970MMCR0 0x31b /* ... Monitor Mode Control Register 0 (PPC 970) */ #define SPR_970MMCR0_PMC1SEL(x) ((x) << 8) /* PMC1 selector (970) */ #define SPR_970MMCR0_PMC2SEL(x) ((x) << 1) /* PMC2 selector (970) */ #define SPR_970MMCR1 0x31e /* ... Monitor Mode Control Register 1 (PPC 970) */ #define SPR_970MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ #define SPR_970MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ #define SPR_970MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ #define SPR_970MMCR1_PMC6SEL(x) (((x) & 0x1f) << 12) /* PMC 6 selector */ #define SPR_970MMCR1_PMC7SEL(x) (((x) & 0x1f) << 7) /* PMC 7 selector */ #define SPR_970MMCR1_PMC8SEL(x) (((x) & 0x1f) << 2) /* PMC 8 selector */ #define SPR_970MMCRA 0x312 /* ... Monitor Mode Control Register 2 (PPC 970) */ #define SPR_970PMC1 0x313 /* ... PMC 1 */ #define SPR_970PMC2 0x314 /* ... PMC 2 */ #define SPR_970PMC3 0x315 /* ... PMC 3 */ #define SPR_970PMC4 0x316 /* ... PMC 4 */ #define SPR_970PMC5 0x317 /* ... PMC 5 */ #define SPR_970PMC6 0x318 /* ... PMC 6 */ #define SPR_970PMC7 0x319 /* ... PMC 7 */ #define SPR_970PMC8 0x31a /* ... PMC 8 */ #define SPR_M_TWB 0x31c /* ..8 MMU tablewalk base */ #define M_TWB_L1TB 0xfffff000 /* level-1 translation base */ #define M_TWB_L1INDX 0x00000ffc /* level-1 index */ #define SPR_MD_TWC 0x31d /* ..8 DMMU tablewalk control */ #define SPR_MD_RPN 0x31e /* ..8 DMMU real (phys) page number */ #define SPR_MD_TW 0x31f /* ..8 MMU tablewalk scratch */ #define SPR_MI_CAM 0x330 /* ..8 IMMU CAM entry read */ #define SPR_MI_RAM0 0x331 /* ..8 IMMU RAM entry read reg 0 */ #define SPR_MI_RAM1 0x332 /* ..8 IMMU RAM entry read reg 1 */ #define SPR_MD_CAM 0x338 /* ..8 IMMU CAM entry read */ #define SPR_MD_RAM0 0x339 /* ..8 IMMU RAM entry read reg 0 */ #define SPR_MD_RAM1 0x33a /* ..8 IMMU RAM entry read reg 1 */ #define SPR_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ #define SPR_PMCR 0x374 /* Processor Management Control Register */ #define SPR_UMMCR2 0x3a0 /* .6. User Monitor Mode Control Register 2 */ #define SPR_UMMCR0 0x3a8 /* .6. User Monitor Mode Control Register 0 */ #define SPR_USIA 0x3ab /* .6. User Sampled Instruction Address */ #define SPR_UMMCR1 0x3ac /* .6. User Monitor Mode Control Register 1 */ #define SPR_ZPR 0x3b0 /* 4.. Zone Protection Register */ #define SPR_MMCR2 0x3b0 /* .6. Monitor Mode Control Register 2 */ #define SPR_MMCR2_THRESHMULT_32 0x80000000 /* Multiply MMCR0 threshold by 32 */ #define SPR_MMCR2_THRESHMULT_2 0x00000000 /* Multiply MMCR0 threshold by 2 */ #define SPR_PID 0x3b1 /* 4.. Process ID */ #define SPR_PMC5 0x3b1 /* .6. Performance Counter Register 5 */ #define SPR_PMC6 0x3b2 /* .6. Performance Counter Register 6 */ #define SPR_CCR0 0x3b3 /* 4.. Core Configuration Register 0 */ #define SPR_IAC3 0x3b4 /* 4.. Instruction Address Compare 3 */ #define SPR_IAC4 0x3b5 /* 4.. Instruction Address Compare 4 */ #define SPR_DVC1 0x3b6 /* 4.. Data Value Compare 1 */ #define SPR_DVC2 0x3b7 /* 4.. Data Value Compare 2 */ #define SPR_MMCR0 0x3b8 /* .6. Monitor Mode Control Register 0 */ #define SPR_MMCR0_FC 0x80000000 /* Freeze counters */ #define SPR_MMCR0_FCS 0x40000000 /* Freeze counters in supervisor mode */ #define SPR_MMCR0_FCP 0x20000000 /* Freeze counters in user mode */ #define SPR_MMCR0_FCM1 0x10000000 /* Freeze counters when mark=1 */ #define SPR_MMCR0_FCM0 0x08000000 /* Freeze counters when mark=0 */ #define SPR_MMCR0_PMXE 0x04000000 /* Enable PM interrupt */ #define SPR_MMCR0_FCECE 0x02000000 /* Freeze counters after event */ #define SPR_MMCR0_TBSEL_15 0x01800000 /* Count bit 15 of TBL */ #define SPR_MMCR0_TBSEL_19 0x01000000 /* Count bit 19 of TBL */ #define SPR_MMCR0_TBSEL_23 0x00800000 /* Count bit 23 of TBL */ #define SPR_MMCR0_TBSEL_31 0x00000000 /* Count bit 31 of TBL */ #define SPR_MMCR0_TBEE 0x00400000 /* Time-base event enable */ #define SPR_MMCRO_THRESHOLD(x) ((x) << 16) /* Threshold value */ #define SPR_MMCR0_PMC1CE 0x00008000 /* PMC1 condition enable */ #define SPR_MMCR0_PMCNCE 0x00004000 /* PMCn condition enable */ #define SPR_MMCR0_TRIGGER 0x00002000 /* Trigger */ #define SPR_MMCR0_PMC1SEL(x) (((x) & 0x3f) << 6) /* PMC1 selector */ #define SPR_MMCR0_PMC2SEL(x) (((x) & 0x3f) << 0) /* PMC2 selector */ #define SPR_SGR 0x3b9 /* 4.. Storage Guarded Register */ #define SPR_PMC1 0x3b9 /* .6. Performance Counter Register 1 */ #define SPR_DCWR 0x3ba /* 4.. Data Cache Write-through Register */ #define SPR_PMC2 0x3ba /* .6. Performance Counter Register 2 */ #define SPR_SLER 0x3bb /* 4.. Storage Little Endian Register */ #define SPR_SIA 0x3bb /* .6. Sampled Instruction Address */ #define SPR_MMCR1 0x3bc /* .6. Monitor Mode Control Register 2 */ #define SPR_MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ #define SPR_MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ #define SPR_MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ #define SPR_MMCR1_PMC6SEL(x) (((x) & 0x3f) << 11) /* PMC 6 selector */ #define SPR_SU0R 0x3bc /* 4.. Storage User-defined 0 Register */ #define SPR_PMC3 0x3bd /* .6. Performance Counter Register 3 */ #define SPR_PMC4 0x3be /* .6. Performance Counter Register 4 */ #define SPR_DMISS 0x3d0 /* .68 Data TLB Miss Address Register */ #define SPR_DCMP 0x3d1 /* .68 Data TLB Compare Register */ #define SPR_HASH1 0x3d2 /* .68 Primary Hash Address Register */ #define SPR_ICDBDR 0x3d3 /* 4.. Instruction Cache Debug Data Register */ #define SPR_HASH2 0x3d3 /* .68 Secondary Hash Address Register */ #define SPR_IMISS 0x3d4 /* .68 Instruction TLB Miss Address Register */ #define SPR_TLBMISS 0x3d4 /* .6. TLB Miss Address Register */ #define SPR_DEAR 0x3d5 /* 4.. Data Error Address Register */ #define SPR_ICMP 0x3d5 /* .68 Instruction TLB Compare Register */ #define SPR_PTEHI 0x3d5 /* .6. Instruction TLB Compare Register */ #define SPR_EVPR 0x3d6 /* 4.. Exception Vector Prefix Register */ #define SPR_RPA 0x3d6 /* .68 Required Physical Address Register */ #define SPR_PTELO 0x3d6 /* .6. Required Physical Address Register */ #define SPR_TSR 0x150 /* ..8 Timer Status Register */ #define SPR_TCR 0x154 /* ..8 Timer Control Register */ #define TSR_ENW 0x80000000 /* Enable Next Watchdog */ #define TSR_WIS 0x40000000 /* Watchdog Interrupt Status */ #define TSR_WRS_MASK 0x30000000 /* Watchdog Reset Status */ #define TSR_WRS_NONE 0x00000000 /* No watchdog reset has occurred */ #define TSR_WRS_CORE 0x10000000 /* Core reset was forced by the watchdog */ #define TSR_WRS_CHIP 0x20000000 /* Chip reset was forced by the watchdog */ #define TSR_WRS_SYSTEM 0x30000000 /* System reset was forced by the watchdog */ #define TSR_PIS 0x08000000 /* PIT Interrupt Status */ #define TSR_DIS 0x08000000 /* Decrementer Interrupt Status */ #define TSR_FIS 0x04000000 /* FIT Interrupt Status */ #define TCR_WP_MASK 0xc0000000 /* Watchdog Period mask */ #define TCR_WP_2_17 0x00000000 /* 2**17 clocks */ #define TCR_WP_2_21 0x40000000 /* 2**21 clocks */ #define TCR_WP_2_25 0x80000000 /* 2**25 clocks */ #define TCR_WP_2_29 0xc0000000 /* 2**29 clocks */ #define TCR_WRC_MASK 0x30000000 /* Watchdog Reset Control mask */ #define TCR_WRC_NONE 0x00000000 /* No watchdog reset */ #define TCR_WRC_CORE 0x10000000 /* Core reset */ #define TCR_WRC_CHIP 0x20000000 /* Chip reset */ #define TCR_WRC_SYSTEM 0x30000000 /* System reset */ #define TCR_WIE 0x08000000 /* Watchdog Interrupt Enable */ #define TCR_PIE 0x04000000 /* PIT Interrupt Enable */ #define TCR_DIE 0x04000000 /* Pecrementer Interrupt Enable */ #define TCR_FP_MASK 0x03000000 /* FIT Period */ #define TCR_FP_2_9 0x00000000 /* 2**9 clocks */ #define TCR_FP_2_13 0x01000000 /* 2**13 clocks */ #define TCR_FP_2_17 0x02000000 /* 2**17 clocks */ #define TCR_FP_2_21 0x03000000 /* 2**21 clocks */ #define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ #define TCR_ARE 0x00400000 /* Auto Reload Enable */ #define SPR_PIT 0x3db /* 4.. Programmable Interval Timer */ #define SPR_SRR2 0x3de /* 4.. Save/Restore Register 2 */ #define SPR_SRR3 0x3df /* 4.. Save/Restore Register 3 */ #define SPR_HID0 0x3f0 /* ..8 Hardware Implementation Register 0 */ #define SPR_HID1 0x3f1 /* ..8 Hardware Implementation Register 1 */ #define SPR_HID2 0x3f3 /* ..8 Hardware Implementation Register 2 */ #define SPR_HID4 0x3f4 /* ..8 Hardware Implementation Register 4 */ #define SPR_HID5 0x3f6 /* ..8 Hardware Implementation Register 5 */ #define SPR_HID6 0x3f9 /* ..8 Hardware Implementation Register 6 */ #define SPR_CELL_TSRL 0x380 /* ... Cell BE Thread Status Register */ #define SPR_CELL_TSCR 0x399 /* ... Cell BE Thread Switch Register */ #if defined(AIM) #define SPR_DBSR 0x3f0 /* 4.. Debug Status Register */ #define DBSR_IC 0x80000000 /* Instruction completion debug event */ #define DBSR_BT 0x40000000 /* Branch Taken debug event */ #define DBSR_EDE 0x20000000 /* Exception debug event */ #define DBSR_TIE 0x10000000 /* Trap Instruction debug event */ #define DBSR_UDE 0x08000000 /* Unconditional debug event */ #define DBSR_IA1 0x04000000 /* IAC1 debug event */ #define DBSR_IA2 0x02000000 /* IAC2 debug event */ #define DBSR_DR1 0x01000000 /* DAC1 Read debug event */ #define DBSR_DW1 0x00800000 /* DAC1 Write debug event */ #define DBSR_DR2 0x00400000 /* DAC2 Read debug event */ #define DBSR_DW2 0x00200000 /* DAC2 Write debug event */ #define DBSR_IDE 0x00100000 /* Imprecise debug event */ #define DBSR_IA3 0x00080000 /* IAC3 debug event */ #define DBSR_IA4 0x00040000 /* IAC4 debug event */ #define DBSR_MRR 0x00000300 /* Most recent reset */ #define SPR_DBCR0 0x3f2 /* 4.. Debug Control Register 0 */ #define SPR_DBCR1 0x3bd /* 4.. Debug Control Register 1 */ #define SPR_IAC1 0x3f4 /* 4.. Instruction Address Compare 1 */ #define SPR_IAC2 0x3f5 /* 4.. Instruction Address Compare 2 */ #define SPR_DAC1 0x3f6 /* 4.. Data Address Compare 1 */ #define SPR_DAC2 0x3f7 /* 4.. Data Address Compare 2 */ #define SPR_PIR 0x3ff /* .6. Processor Identification Register */ #elif defined(BOOKE) #define SPR_PIR 0x11e /* ..8 Processor Identification Register */ #define SPR_DBSR 0x130 /* ..8 Debug Status Register */ #define DBSR_IDE 0x80000000 /* Imprecise debug event. */ #define DBSR_UDE 0x40000000 /* Unconditional debug event. */ #define DBSR_MRR 0x30000000 /* Most recent Reset (mask). */ #define DBSR_ICMP 0x08000000 /* Instr. complete debug event. */ #define DBSR_BRT 0x04000000 /* Branch taken debug event. */ #define DBSR_IRPT 0x02000000 /* Interrupt taken debug event. */ #define DBSR_TRAP 0x01000000 /* Trap instr. debug event. */ #define DBSR_IAC1 0x00800000 /* Instr. address compare #1. */ #define DBSR_IAC2 0x00400000 /* Instr. address compare #2. */ #define DBSR_IAC3 0x00200000 /* Instr. address compare #3. */ #define DBSR_IAC4 0x00100000 /* Instr. address compare #4. */ #define DBSR_DAC1R 0x00080000 /* Data addr. read compare #1. */ #define DBSR_DAC1W 0x00040000 /* Data addr. write compare #1. */ #define DBSR_DAC2R 0x00020000 /* Data addr. read compare #2. */ #define DBSR_DAC2W 0x00010000 /* Data addr. write compare #2. */ #define DBSR_RET 0x00008000 /* Return debug event. */ #define SPR_DBCR0 0x134 /* ..8 Debug Control Register 0 */ #define SPR_DBCR1 0x135 /* ..8 Debug Control Register 1 */ #define SPR_IAC1 0x138 /* ..8 Instruction Address Compare 1 */ #define SPR_IAC2 0x139 /* ..8 Instruction Address Compare 2 */ #define SPR_DAC1 0x13c /* ..8 Data Address Compare 1 */ #define SPR_DAC2 0x13d /* ..8 Data Address Compare 2 */ #endif #define DBCR0_EDM 0x80000000 /* External Debug Mode */ #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ #define DBCR0_RST_MASK 0x30000000 /* ReSeT */ #define DBCR0_RST_NONE 0x00000000 /* No action */ #define DBCR0_RST_CORE 0x10000000 /* Core reset */ #define DBCR0_RST_CHIP 0x20000000 /* Chip reset */ #define DBCR0_RST_SYSTEM 0x30000000 /* System reset */ #define DBCR0_IC 0x08000000 /* Instruction Completion debug event */ #define DBCR0_BT 0x04000000 /* Branch Taken debug event */ #define DBCR0_EDE 0x02000000 /* Exception Debug Event */ #define DBCR0_TDE 0x01000000 /* Trap Debug Event */ #define DBCR0_IA1 0x00800000 /* IAC (Instruction Address Compare) 1 debug event */ #define DBCR0_IA2 0x00400000 /* IAC 2 debug event */ #define DBCR0_IA12 0x00200000 /* Instruction Address Range Compare 1-2 */ #define DBCR0_IA12X 0x00100000 /* IA12 eXclusive */ #define DBCR0_IA3 0x00080000 /* IAC 3 debug event */ #define DBCR0_IA4 0x00040000 /* IAC 4 debug event */ #define DBCR0_IA34 0x00020000 /* Instruction Address Range Compare 3-4 */ #define DBCR0_IA34X 0x00010000 /* IA34 eXclusive */ #define DBCR0_IA12T 0x00008000 /* Instruction Address Range Compare 1-2 range Toggle */ #define DBCR0_IA34T 0x00004000 /* Instruction Address Range Compare 3-4 range Toggle */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ #define SPR_IABR 0x3f2 /* ..8 Instruction Address Breakpoint Register 0 */ #define SPR_DABR 0x3f5 /* .6. Data Address Breakpoint Register */ #define SPR_MSSCR0 0x3f6 /* .6. Memory SubSystem Control Register */ #define MSSCR0_SHDEN 0x80000000 /* 0: Shared-state enable */ #define MSSCR0_SHDPEN3 0x40000000 /* 1: ~SHD[01] signal enable in MEI mode */ #define MSSCR0_L1INTVEN 0x38000000 /* 2-4: L1 data cache ~HIT intervention enable */ #define MSSCR0_L2INTVEN 0x07000000 /* 5-7: L2 data cache ~HIT intervention enable*/ #define MSSCR0_DL1HWF 0x00800000 /* 8: L1 data cache hardware flush */ #define MSSCR0_MBO 0x00400000 /* 9: must be one */ #define MSSCR0_EMODE 0x00200000 /* 10: MPX bus mode (read-only) */ #define MSSCR0_ABD 0x00100000 /* 11: address bus driven (read-only) */ #define MSSCR0_MBZ 0x000fffff /* 12-31: must be zero */ #define MSSCR0_L2PFE 0x00000003 /* 30-31: L2 prefetch enable */ #define SPR_MSSSR0 0x3f7 /* .6. Memory Subsystem Status Register (MPC745x) */ #define MSSSR0_L2TAG 0x00040000 /* 13: L2 tag parity error */ #define MSSSR0_L2DAT 0x00020000 /* 14: L2 data parity error */ #define MSSSR0_L3TAG 0x00010000 /* 15: L3 tag parity error */ #define MSSSR0_L3DAT 0x00008000 /* 16: L3 data parity error */ #define MSSSR0_APE 0x00004000 /* 17: Address parity error */ #define MSSSR0_DPE 0x00002000 /* 18: Data parity error */ #define MSSSR0_TEA 0x00001000 /* 19: Bus transfer error acknowledge */ #define SPR_LDSTCR 0x3f8 /* .6. Load/Store Control Register */ #define SPR_L2PM 0x3f8 /* .6. L2 Private Memory Control Register */ #define SPR_L2CR 0x3f9 /* .6. L2 Control Register */ #define L2CR_L2E 0x80000000 /* 0: L2 enable */ #define L2CR_L2PE 0x40000000 /* 1: L2 data parity enable */ #define L2CR_L2SIZ 0x30000000 /* 2-3: L2 size */ #define L2SIZ_2M 0x00000000 #define L2SIZ_256K 0x10000000 #define L2SIZ_512K 0x20000000 #define L2SIZ_1M 0x30000000 #define L2CR_L2CLK 0x0e000000 /* 4-6: L2 clock ratio */ #define L2CLK_DIS 0x00000000 /* disable L2 clock */ #define L2CLK_10 0x02000000 /* core clock / 1 */ #define L2CLK_15 0x04000000 /* / 1.5 */ #define L2CLK_20 0x08000000 /* / 2 */ #define L2CLK_25 0x0a000000 /* / 2.5 */ #define L2CLK_30 0x0c000000 /* / 3 */ #define L2CR_L2RAM 0x01800000 /* 7-8: L2 RAM type */ #define L2RAM_FLOWTHRU_BURST 0x00000000 #define L2RAM_PIPELINE_BURST 0x01000000 #define L2RAM_PIPELINE_LATE 0x01800000 #define L2CR_L2DO 0x00400000 /* 9: L2 data-only. Setting this bit disables instruction caching. */ #define L2CR_L2I 0x00200000 /* 10: L2 global invalidate. */ #define L2CR_L2IO_7450 0x00010000 /* 11: L2 instruction-only (MPC745x). */ #define L2CR_L2CTL 0x00100000 /* 11: L2 RAM control (ZZ enable). Enables automatic operation of the L2ZZ (low-power mode) signal. */ #define L2CR_L2WT 0x00080000 /* 12: L2 write-through. */ #define L2CR_L2TS 0x00040000 /* 13: L2 test support. */ #define L2CR_L2OH 0x00030000 /* 14-15: L2 output hold. */ #define L2CR_L2DO_7450 0x00010000 /* 15: L2 data-only (MPC745x). */ #define L2CR_L2SL 0x00008000 /* 16: L2 DLL slow. */ #define L2CR_L2DF 0x00004000 /* 17: L2 differential clock. */ #define L2CR_L2BYP 0x00002000 /* 18: L2 DLL bypass. */ #define L2CR_L2FA 0x00001000 /* 19: L2 flush assist (for software flush). */ #define L2CR_L2HWF 0x00000800 /* 20: L2 hardware flush. */ #define L2CR_L2IO 0x00000400 /* 21: L2 instruction-only. */ #define L2CR_L2CLKSTP 0x00000200 /* 22: L2 clock stop. */ #define L2CR_L2DRO 0x00000100 /* 23: L2DLL rollover checkstop enable. */ #define L2CR_L2IP 0x00000001 /* 31: L2 global invalidate in */ /* progress (read only). */ #define SPR_L3CR 0x3fa /* .6. L3 Control Register */ #define L3CR_L3E 0x80000000 /* 0: L3 enable */ #define L3CR_L3PE 0x40000000 /* 1: L3 data parity enable */ #define L3CR_L3APE 0x20000000 #define L3CR_L3SIZ 0x10000000 /* 3: L3 size (0=1MB, 1=2MB) */ #define L3CR_L3CLKEN 0x08000000 /* 4: Enables L3_CLK[0:1] */ #define L3CR_L3CLK 0x03800000 #define L3CR_L3IO 0x00400000 #define L3CR_L3CLKEXT 0x00200000 #define L3CR_L3CKSPEXT 0x00100000 #define L3CR_L3OH1 0x00080000 #define L3CR_L3SPO 0x00040000 #define L3CR_L3CKSP 0x00030000 #define L3CR_L3PSP 0x0000e000 #define L3CR_L3REP 0x00001000 #define L3CR_L3HWF 0x00000800 #define L3CR_L3I 0x00000400 /* 21: L3 global invalidate */ #define L3CR_L3RT 0x00000300 #define L3CR_L3NIRCA 0x00000080 #define L3CR_L3DO 0x00000040 #define L3CR_PMEN 0x00000004 #define L3CR_PMSIZ 0x00000003 #define SPR_DCCR 0x3fa /* 4.. Data Cache Cachability Register */ #define SPR_ICCR 0x3fb /* 4.. Instruction Cache Cachability Register */ #define SPR_THRM1 0x3fc /* .6. Thermal Management Register */ #define SPR_THRM2 0x3fd /* .6. Thermal Management Register */ #define SPR_THRM_TIN 0x80000000 /* Thermal interrupt bit (RO) */ #define SPR_THRM_TIV 0x40000000 /* Thermal interrupt valid (RO) */ #define SPR_THRM_THRESHOLD(x) ((x) << 23) /* Thermal sensor threshold */ #define SPR_THRM_TID 0x00000004 /* Thermal interrupt direction */ #define SPR_THRM_TIE 0x00000002 /* Thermal interrupt enable */ #define SPR_THRM_VALID 0x00000001 /* Valid bit */ #define SPR_THRM3 0x3fe /* .6. Thermal Management Register */ #define SPR_THRM_TIMER(x) ((x) << 1) /* Sampling interval timer */ #define SPR_THRM_ENABLE 0x00000001 /* TAU Enable */ #define SPR_FPECR 0x3fe /* .6. Floating-Point Exception Cause Register */ /* Time Base Register declarations */ #define TBR_TBL 0x10c /* 468 Time Base Lower - read */ #define TBR_TBU 0x10d /* 468 Time Base Upper - read */ #define TBR_TBWL 0x11c /* 468 Time Base Lower - supervisor, write */ #define TBR_TBWU 0x11d /* 468 Time Base Upper - supervisor, write */ /* Performance counter declarations */ #define PMC_OVERFLOW 0x80000000 /* Counter has overflowed */ /* The first five countable [non-]events are common to many PMC's */ #define PMCN_NONE 0 /* Count nothing */ #define PMCN_CYCLES 1 /* Processor cycles */ #define PMCN_ICOMP 2 /* Instructions completed */ #define PMCN_TBLTRANS 3 /* TBL bit transitions */ #define PCMN_IDISPATCH 4 /* Instructions dispatched */ /* Similar things for the 970 PMC direct counters */ #define PMC970N_NONE 0x8 /* Count nothing */ #define PMC970N_CYCLES 0xf /* Processor cycles */ #define PMC970N_ICOMP 0x9 /* Instructions completed */ #if defined(BOOKE) #define SPR_MCARU 0x239 /* ..8 Machine Check Address register upper bits */ #define SPR_MCSR 0x23c /* ..8 Machine Check Syndrome register */ #define SPR_MCAR 0x23d /* ..8 Machine Check Address register */ #define SPR_ESR 0x003e /* ..8 Exception Syndrome Register */ #define ESR_PIL 0x08000000 /* Program interrupt - illegal */ #define ESR_PPR 0x04000000 /* Program interrupt - privileged */ #define ESR_PTR 0x02000000 /* Program interrupt - trap */ #define ESR_ST 0x00800000 /* Store operation */ #define ESR_DLK 0x00200000 /* Data storage, D cache locking */ #define ESR_ILK 0x00100000 /* Data storage, I cache locking */ #define ESR_BO 0x00020000 /* Data/instruction storage, byte ordering */ #define ESR_SPE 0x00000080 /* SPE exception bit */ #define SPR_CSRR0 0x03a /* ..8 58 Critical SRR0 */ #define SPR_CSRR1 0x03b /* ..8 59 Critical SRR1 */ #define SPR_MCSRR0 0x23a /* ..8 570 Machine check SRR0 */ #define SPR_MCSRR1 0x23b /* ..8 571 Machine check SRR1 */ #define SPR_DSRR0 0x23e /* ..8 574 Debug SRR0 */ #define SPR_DSRR1 0x23f /* ..8 575 Debug SRR1 */ #define SPR_MMUCR 0x3b2 /* 4.. MMU Control Register */ #define MMUCR_SWOA (0x80000000 >> 7) #define MMUCR_U1TE (0x80000000 >> 9) #define MMUCR_U2SWOAE (0x80000000 >> 10) #define MMUCR_DULXE (0x80000000 >> 12) #define MMUCR_IULXE (0x80000000 >> 13) #define MMUCR_STS (0x80000000 >> 15) #define MMUCR_STID_MASK (0xFF000000 >> 24) #define SPR_MMUCSR0 0x3f4 /* ..8 1012 MMU Control and Status Register 0 */ #define MMUCSR0_L2TLB0_FI 0x04 /* TLB0 flash invalidate */ #define MMUCSR0_L2TLB1_FI 0x02 /* TLB1 flash invalidate */ #define SPR_SVR 0x3ff /* ..8 1023 System Version Register */ #define SVR_MPC8533 0x8034 #define SVR_MPC8533E 0x803c #define SVR_MPC8541 0x8072 #define SVR_MPC8541E 0x807a #define SVR_MPC8548 0x8031 #define SVR_MPC8548E 0x8039 #define SVR_MPC8555 0x8071 #define SVR_MPC8555E 0x8079 #define SVR_MPC8572 0x80e0 #define SVR_MPC8572E 0x80e8 #define SVR_P1011 0x80e5 #define SVR_P1011E 0x80ed #define SVR_P1013 0x80e7 #define SVR_P1013E 0x80ef #define SVR_P1020 0x80e4 #define SVR_P1020E 0x80ec #define SVR_P1022 0x80e6 #define SVR_P1022E 0x80ee #define SVR_P2010 0x80e3 #define SVR_P2010E 0x80eb #define SVR_P2020 0x80e2 #define SVR_P2020E 0x80ea #define SVR_P2041 0x8210 #define SVR_P2041E 0x8218 #define SVR_P3041 0x8211 #define SVR_P3041E 0x8219 #define SVR_P4040 0x8200 #define SVR_P4040E 0x8208 #define SVR_P4080 0x8201 #define SVR_P4080E 0x8209 #define SVR_P5010 0x8221 #define SVR_P5010E 0x8229 #define SVR_P5020 0x8220 #define SVR_P5020E 0x8228 #define SVR_P5021 0x8205 #define SVR_P5021E 0x820d #define SVR_P5040 0x8204 #define SVR_P5040E 0x820c #define SVR_VER(svr) (((svr) >> 16) & 0xffff) #define SPR_PID0 0x030 /* ..8 Process ID Register 0 */ #define SPR_PID1 0x279 /* ..8 Process ID Register 1 */ #define SPR_PID2 0x27a /* ..8 Process ID Register 2 */ #define SPR_TLB0CFG 0x2B0 /* ..8 TLB 0 Config Register */ #define SPR_TLB1CFG 0x2B1 /* ..8 TLB 1 Config Register */ #define TLBCFG_ASSOC_MASK 0xff000000 /* Associativity of TLB */ #define TLBCFG_ASSOC_SHIFT 24 #define TLBCFG_NENTRY_MASK 0x00000fff /* Number of entries in TLB */ #define SPR_IVPR 0x03f /* ..8 Interrupt Vector Prefix Register */ #define SPR_IVOR0 0x190 /* ..8 Critical input */ #define SPR_IVOR1 0x191 /* ..8 Machine check */ #define SPR_IVOR2 0x192 #define SPR_IVOR3 0x193 #define SPR_IVOR4 0x194 #define SPR_IVOR5 0x195 #define SPR_IVOR6 0x196 #define SPR_IVOR7 0x197 #define SPR_IVOR8 0x198 #define SPR_IVOR9 0x199 #define SPR_IVOR10 0x19a #define SPR_IVOR11 0x19b #define SPR_IVOR12 0x19c #define SPR_IVOR13 0x19d #define SPR_IVOR14 0x19e #define SPR_IVOR15 0x19f #define SPR_IVOR32 0x210 #define SPR_IVOR33 0x211 #define SPR_IVOR34 0x212 #define SPR_IVOR35 0x213 #define SPR_MAS0 0x270 /* ..8 MMU Assist Register 0 Book-E/e500 */ #define SPR_MAS1 0x271 /* ..8 MMU Assist Register 1 Book-E/e500 */ #define SPR_MAS2 0x272 /* ..8 MMU Assist Register 2 Book-E/e500 */ #define SPR_MAS3 0x273 /* ..8 MMU Assist Register 3 Book-E/e500 */ #define SPR_MAS4 0x274 /* ..8 MMU Assist Register 4 Book-E/e500 */ #define SPR_MAS5 0x275 /* ..8 MMU Assist Register 5 Book-E */ #define SPR_MAS6 0x276 /* ..8 MMU Assist Register 6 Book-E/e500 */ #define SPR_MAS7 0x3B0 /* ..8 MMU Assist Register 7 Book-E/e500 */ #define SPR_MAS8 0x155 /* ..8 MMU Assist Register 8 Book-E/e500 */ #define SPR_L1CFG0 0x203 /* ..8 L1 cache configuration register 0 */ #define SPR_L1CFG1 0x204 /* ..8 L1 cache configuration register 1 */ #define SPR_CCR1 0x378 #define CCR1_L2COBE 0x00000040 #define DCR_L2DCDCRAI 0x0000 /* L2 D-Cache DCR Address Pointer */ #define DCR_L2DCDCRDI 0x0001 /* L2 D-Cache DCR Data Indirect */ #define DCR_L2CR0 0x00 /* L2 Cache Configuration Register 0 */ #define L2CR0_AS 0x30000000 #define SPR_L1CSR0 0x3F2 /* ..8 L1 Cache Control and Status Register 0 */ #define L1CSR0_DCPE 0x00010000 /* Data Cache Parity Enable */ #define L1CSR0_DCLFR 0x00000100 /* Data Cache Lock Bits Flash Reset */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ #define SPR_L1CSR1 0x3F3 /* ..8 L1 Cache Control and Status Register 1 */ #define L1CSR1_ICPE 0x00010000 /* Instruction Cache Parity Enable */ #define L1CSR1_ICUL 0x00000400 /* Instr Cache Unable to Lock */ #define L1CSR1_ICLFR 0x00000100 /* Instruction Cache Lock Bits Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */ #define SPR_L2CSR0 0x3F9 /* ..8 L2 Cache Control and Status Register 0 */ #define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */ #define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity Enable */ #define L2CSR0_L2FI 0x00200000 /* L2 Cache Flash Invalidate */ #define L2CSR0_L2LFC 0x00000400 /* L2 Cache Lock Flags Clear */ #define SPR_BUCSR 0x3F5 /* ..8 Branch Unit Control and Status Register */ #define BUCSR_BPEN 0x00000001 /* Branch Prediction Enable */ #define BUCSR_BBFI 0x00000200 /* Branch Buffer Flash Invalidate */ #endif /* BOOKE */ #endif /* !_POWERPC_SPR_H_ */ Index: head/sys/powerpc/include/trap.h =================================================================== --- head/sys/powerpc/include/trap.h (revision 339513) +++ head/sys/powerpc/include/trap.h (revision 339514) @@ -1,155 +1,157 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: trap.h,v 1.7 2002/02/22 13:51:40 kleink Exp $ * $FreeBSD$ */ #ifndef _POWERPC_TRAP_H_ #define _POWERPC_TRAP_H_ #define EXC_RSVD 0x0000 /* Reserved */ #define EXC_RST 0x0100 /* Reset; all but IBM4xx */ #define EXC_MCHK 0x0200 /* Machine Check */ #define EXC_DSI 0x0300 /* Data Storage Interrupt */ #define EXC_DSE 0x0380 /* Data Segment Interrupt */ #define EXC_ISI 0x0400 /* Instruction Storage Interrupt */ #define EXC_ISE 0x0480 /* Instruction Segment Interrupt */ #define EXC_EXI 0x0500 /* External Interrupt */ #define EXC_ALI 0x0600 /* Alignment Interrupt */ #define EXC_PGM 0x0700 /* Program Interrupt */ #define EXC_FPU 0x0800 /* Floating-point Unavailable */ #define EXC_DECR 0x0900 /* Decrementer Interrupt */ #define EXC_SC 0x0c00 /* System Call */ #define EXC_TRC 0x0d00 /* Trace */ #define EXC_FPA 0x0e00 /* Floating-point Assist */ /* The following is only available on the 601: */ #define EXC_RUNMODETRC 0x2000 /* Run Mode/Trace Exception */ /* The following are only available on 970(G5): */ #define EXC_VECAST_G5 0x1700 /* AltiVec Assist */ /* The following are only available on 7400(G4): */ #define EXC_VEC 0x0f20 /* AltiVec Unavailable */ #define EXC_VECAST_G4 0x1600 /* AltiVec Assist */ /* The following are only available on 604/750/7400: */ #define EXC_PERF 0x0f00 /* Performance Monitoring */ #define EXC_BPT 0x1300 /* Instruction Breakpoint */ #define EXC_SMI 0x1400 /* System Managment Interrupt */ /* The following are only available on 750/7400: */ #define EXC_THRM 0x1700 /* Thermal Management Interrupt */ /* And these are only on the 603: */ #define EXC_IMISS 0x1000 /* Instruction translation miss */ #define EXC_DLMISS 0x1100 /* Data load translation miss */ #define EXC_DSMISS 0x1200 /* Data store translation miss */ /* Power ISA 2.06+: */ #define EXC_HDSI 0x0e00 /* Hypervisor Data Storage */ #define EXC_HISI 0x0e20 /* Hypervisor Instruction Storage */ #define EXC_HEA 0x0e40 /* Hypervisor Emulation Assistance */ #define EXC_HMI 0x0e60 /* Hypervisor Maintenance */ #define EXC_VSX 0x0f40 /* VSX Unavailable */ /* Power ISA 2.07+: */ #define EXC_FAC 0x0f60 /* Facility Unavailable */ #define EXC_HFAC 0x0f80 /* Hypervisor Facility Unavailable */ /* Power ISA 3.0+: */ #define EXC_HVI 0x0ea0 /* Hypervisor Virtualization */ /* The following are available on 4xx and 85xx */ #define EXC_CRIT 0x0100 /* Critical Input Interrupt */ #define EXC_PIT 0x1000 /* Programmable Interval Timer */ #define EXC_FIT 0x1010 /* Fixed Interval Timer */ #define EXC_WDOG 0x1020 /* Watchdog Timer */ #define EXC_DTMISS 0x1100 /* Data TLB Miss */ #define EXC_ITMISS 0x1200 /* Instruction TLB Miss */ #define EXC_APU 0x1300 /* Auxiliary Processing Unit */ #define EXC_DEBUG 0x2f10 /* Debug trap */ #define EXC_VECAST_E 0x2f20 /* Altivec Assist (Book-E) */ +#define EXC_SPFPD 0x2f30 /* SPE Floating-point Data */ +#define EXC_SPFPR 0x2f40 /* SPE Floating-point Round */ #define EXC_LAST 0x2f00 /* Last possible exception vector */ #define EXC_AST 0x3000 /* Fake AST vector */ /* Trap was in user mode */ #define EXC_USER 0x10000 /* * EXC_ALI sets bits in the DSISR and DAR to provide enough * information to recover from the unaligned access without needing to * parse the offending instruction. This includes certain bits of the * opcode, and information about what registers are used. The opcode * indicator values below come from Appendix F of Book III of "The * PowerPC Architecture". */ #define EXC_ALI_OPCODE_INDICATOR(dsisr) ((dsisr >> 10) & 0x7f) #define EXC_ALI_LFD 0x09 #define EXC_ALI_STFD 0x0b /* Macros to extract register information */ #define EXC_ALI_RST(dsisr) ((dsisr >> 5) & 0x1f) /* source or target */ #define EXC_ALI_RA(dsisr) (dsisr & 0x1f) #define EXC_ALI_SPE_REG(instr) ((instr >> 21) & 0x1f) /* * SRR1 bits for program exception traps. These identify what caused * the program exception. See section 6.5.9 of the Power ISA Version * 2.05. */ #define EXC_PGM_FPENABLED (1UL << 20) #define EXC_PGM_ILLEGAL (1UL << 19) #define EXC_PGM_PRIV (1UL << 18) #define EXC_PGM_TRAP (1UL << 17) /* DTrace trap opcode. */ #define EXC_DTRACE 0x7ffff808 /* Magic pointer to store TOC base and other info for trap handlers on ppc64 */ #define TRAP_GENTRAP 0x1f0 #define TRAP_TOCBASE 0x1f8 #ifndef LOCORE struct trapframe; struct pcb; void trap(struct trapframe *); int ppc_instr_emulate(struct trapframe *, struct pcb *); #endif #endif /* _POWERPC_TRAP_H_ */ Index: head/sys/powerpc/powerpc/exec_machdep.c =================================================================== --- head/sys/powerpc/powerpc/exec_machdep.c (revision 339513) +++ head/sys/powerpc/powerpc/exec_machdep.c (revision 339514) @@ -1,1097 +1,1105 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_fpu_emu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FPU_EMU #include #endif #ifdef COMPAT_FREEBSD32 #include #include #include typedef struct __ucontext32 { sigset_t uc_sigmask; mcontext32_t uc_mcontext; uint32_t uc_link; struct sigaltstack32 uc_stack; uint32_t uc_flags; uint32_t __spare__[4]; } ucontext32_t; struct sigframe32 { ucontext32_t sf_uc; struct siginfo32 sf_si; }; static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags); #endif static int grab_mcontext(struct thread *, mcontext_t *, int); void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct trapframe *tf; struct sigacts *psp; struct sigframe sf; struct thread *td; struct proc *p; #ifdef COMPAT_FREEBSD32 struct siginfo32 siginfo32; struct sigframe32 sf32; #endif size_t sfpsize; caddr_t sfp, usfp; int oonstack, rndfsize; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; oonstack = sigonstack(tf->fixreg[1]); /* * Fill siginfo structure. */ ksi->ksi_info.si_signo = ksi->ksi_signo; ksi->ksi_info.si_addr = (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ? tf->dar : tf->srr0); #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32)) { siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32); sig = siginfo32.si_signo; code = siginfo32.si_code; sfp = (caddr_t)&sf32; sfpsize = sizeof(sf32); rndfsize = roundup(sizeof(sf32), 16); /* * Save user context */ memset(&sf32, 0, sizeof(sf32)); grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0); sf32.sf_uc.uc_sigmask = *mask; sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size; sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; } else { #endif sig = ksi->ksi_signo; code = ksi->ksi_code; sfp = (caddr_t)&sf; sfpsize = sizeof(sf); #ifdef __powerpc64__ /* * 64-bit PPC defines a 288 byte scratch region * below the stack. */ rndfsize = 288 + roundup(sizeof(sf), 48); #else rndfsize = roundup(sizeof(sf), 16); #endif /* * Save user context */ memset(&sf, 0, sizeof(sf)); grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; #ifdef COMPAT_FREEBSD32 } #endif CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* * Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size - rndfsize) & ~0xFul); } else { usfp = (void *)((tf->fixreg[1] - rndfsize) & ~0xFul); } /* * Save the floating-point state, if necessary, then copy it. */ /* XXX */ /* * Set up the registers to return to sigcode. * * r1/sp - sigframe ptr * lr - sig function, dispatched to by blrl in trampoline * r3 - sig number * r4 - SIGINFO ? &siginfo : exception code * r5 - user context * srr0 - trampoline function addr */ tf->lr = (register_t)catcher; tf->fixreg[1] = (register_t)usfp; tf->fixreg[FIRSTARG] = sig; #ifdef COMPAT_FREEBSD32 tf->fixreg[FIRSTARG+2] = (register_t)usfp + ((SV_PROC_FLAG(p, SV_ILP32)) ? offsetof(struct sigframe32, sf_uc) : offsetof(struct sigframe, sf_uc)); #else tf->fixreg[FIRSTARG+2] = (register_t)usfp + offsetof(struct sigframe, sf_uc); #endif if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* * Signal handler installed with SA_SIGINFO. */ #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32)) { sf32.sf_si = siginfo32; tf->fixreg[FIRSTARG+1] = (register_t)usfp + offsetof(struct sigframe32, sf_si); sf32.sf_si = siginfo32; } else { #endif tf->fixreg[FIRSTARG+1] = (register_t)usfp + offsetof(struct sigframe, sf_si); sf.sf_si = ksi->ksi_info; #ifdef COMPAT_FREEBSD32 } #endif } else { /* Old FreeBSD-style arguments. */ tf->fixreg[FIRSTARG+1] = code; tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ? tf->dar : tf->srr0; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); tf->srr0 = (register_t)p->p_sysent->sv_sigcode_base; /* * copy the frame out to userland. */ if (copyout(sfp, usfp, sfpsize) != 0) { /* * Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp); PROC_LOCK(p); sigexit(td, SIGILL); } CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->srr0, tf->fixreg[1]); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sys_sigreturn(td, (struct sigreturn_args *)uap); } #endif /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_lr = tf->srr0; pcb->pcb_sp = tf->fixreg[1]; } /* * get_mcontext/sendsig helper routine that doesn't touch the * proc lock */ static int grab_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct pcb *pcb; int i; pcb = td->td_pcb; memset(mcp, 0, sizeof(mcontext_t)); mcp->mc_vers = _MC_VERSION; mcp->mc_flags = 0; memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe)); if (flags & GET_MC_CLEAR_RET) { mcp->mc_gpr[3] = 0; mcp->mc_gpr[4] = 0; } /* * This assumes that floating-point context is *not* lazy, * so if the thread has used FP there would have been a * FP-unavailable exception that would have set things up * correctly. */ if (pcb->pcb_flags & PCB_FPREGS) { if (pcb->pcb_flags & PCB_FPU) { KASSERT(td == curthread, ("get_mcontext: fp save not curthread")); critical_enter(); save_fpu(td); critical_exit(); } mcp->mc_flags |= _MC_FP_VALID; memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); for (i = 0; i < 32; i++) memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, sizeof(double)); } if (pcb->pcb_flags & PCB_VSX) { for (i = 0; i < 32; i++) memcpy(&mcp->mc_vsxfpreg[i], &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double)); } /* * Repeat for Altivec context */ if (pcb->pcb_flags & PCB_VEC) { KASSERT(td == curthread, ("get_mcontext: fp save not curthread")); critical_enter(); save_vec(td); critical_exit(); mcp->mc_flags |= _MC_AV_VALID; mcp->mc_vscr = pcb->pcb_vec.vscr; mcp->mc_vrsave = pcb->pcb_vec.vrsave; memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec)); } mcp->mc_len = sizeof(*mcp); return (0); } int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { int error; error = grab_mcontext(td, mcp, flags); if (error == 0) { PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); PROC_UNLOCK(curthread->td_proc); } return (error); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct pcb *pcb; struct trapframe *tf; register_t tls; int i; pcb = td->td_pcb; tf = td->td_frame; if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp)) return (EINVAL); /* * Don't let the user set privileged MSR bits */ if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) { return (EINVAL); } /* Copy trapframe, preserving TLS pointer across context change */ if (SV_PROC_FLAG(td->td_proc, SV_LP64)) tls = tf->fixreg[13]; else tls = tf->fixreg[2]; memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame)); if (SV_PROC_FLAG(td->td_proc, SV_LP64)) tf->fixreg[13] = tls; else tf->fixreg[2] = tls; if (mcp->mc_flags & _MC_FP_VALID) { /* enable_fpu() will happen lazily on a fault */ pcb->pcb_flags |= PCB_FPREGS; memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double)); bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr)); for (i = 0; i < 32; i++) { memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i], sizeof(double)); memcpy(&pcb->pcb_fpu.fpr[i].vsr[2], &mcp->mc_vsxfpreg[i], sizeof(double)); } } if (mcp->mc_flags & _MC_AV_VALID) { if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) { critical_enter(); enable_vec(td); critical_exit(); } pcb->pcb_vec.vscr = mcp->mc_vscr; pcb->pcb_vec.vrsave = mcp->mc_vrsave; memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec)); } return (0); } /* * Set set up registers on exec. */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; register_t argc; tf = trapframe(td); bzero(tf, sizeof *tf); #ifdef __powerpc64__ tf->fixreg[1] = -roundup(-stack + 48, 16); #else tf->fixreg[1] = -roundup(-stack + 8, 16); #endif /* * Set up arguments for _start(): * _start(argc, argv, envp, obj, cleanup, ps_strings); * * Notes: * - obj and cleanup are the auxilliary and termination * vectors. They are fixed up by ld.elf_so. * - ps_strings is a NetBSD extention, and will be * ignored by executables which are strictly * compliant with the SVR4 ABI. */ /* Collect argc from the user stack */ argc = fuword((void *)stack); tf->fixreg[3] = argc; tf->fixreg[4] = stack + sizeof(register_t); tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t); tf->fixreg[6] = 0; /* auxillary vector */ tf->fixreg[7] = 0; /* termination vector */ tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */ tf->srr0 = imgp->entry_addr; #ifdef __powerpc64__ tf->fixreg[12] = imgp->entry_addr; #endif tf->srr1 = psl_userset | PSL_FE_DFLT; td->td_pcb->pcb_flags = 0; } #ifdef COMPAT_FREEBSD32 void ppc32_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; uint32_t argc; tf = trapframe(td); bzero(tf, sizeof *tf); tf->fixreg[1] = -roundup(-stack + 8, 16); argc = fuword32((void *)stack); tf->fixreg[3] = argc; tf->fixreg[4] = stack + sizeof(uint32_t); tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t); tf->fixreg[6] = 0; /* auxillary vector */ tf->fixreg[7] = 0; /* termination vector */ tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */ tf->srr0 = imgp->entry_addr; tf->srr1 = psl_userset32 | PSL_FE_DFLT; td->td_pcb->pcb_flags = 0; } #endif int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; memcpy(regs, tf, sizeof(struct reg)); return (0); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { /* No debug registers on PowerPC */ return (ENOSYS); } int fill_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; int i; pcb = td->td_pcb; if ((pcb->pcb_flags & PCB_FPREGS) == 0) memset(fpregs, 0, sizeof(struct fpreg)); else { memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); for (i = 0; i < 32; i++) memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, sizeof(double)); } return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; memcpy(tf, regs, sizeof(struct reg)); return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { /* No debug registers on PowerPC */ return (ENOSYS); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; int i; pcb = td->td_pcb; pcb->pcb_flags |= PCB_FPREGS; memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double)); for (i = 0; i < 32; i++) { memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i], sizeof(double)); } return (0); } #ifdef COMPAT_FREEBSD32 int set_regs32(struct thread *td, struct reg32 *regs) { struct trapframe *tf; int i; tf = td->td_frame; for (i = 0; i < 32; i++) tf->fixreg[i] = regs->fixreg[i]; tf->lr = regs->lr; tf->cr = regs->cr; tf->xer = regs->xer; tf->ctr = regs->ctr; tf->srr0 = regs->pc; return (0); } int fill_regs32(struct thread *td, struct reg32 *regs) { struct trapframe *tf; int i; tf = td->td_frame; for (i = 0; i < 32; i++) regs->fixreg[i] = tf->fixreg[i]; regs->lr = tf->lr; regs->cr = tf->cr; regs->xer = tf->xer; regs->ctr = tf->ctr; regs->pc = tf->srr0; return (0); } static int grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { mcontext_t mcp64; int i, error; error = grab_mcontext(td, &mcp64, flags); if (error != 0) return (error); mcp->mc_vers = mcp64.mc_vers; mcp->mc_flags = mcp64.mc_flags; mcp->mc_onstack = mcp64.mc_onstack; mcp->mc_len = mcp64.mc_len; memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec)); memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av)); for (i = 0; i < 42; i++) mcp->mc_frame[i] = mcp64.mc_frame[i]; memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg)); memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg)); return (0); } static int get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { int error; error = grab_mcontext32(td, mcp, flags); if (error == 0) { PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); PROC_UNLOCK(curthread->td_proc); } return (error); } static int set_mcontext32(struct thread *td, mcontext32_t *mcp) { mcontext_t mcp64; int i, error; mcp64.mc_vers = mcp->mc_vers; mcp64.mc_flags = mcp->mc_flags; mcp64.mc_onstack = mcp->mc_onstack; mcp64.mc_len = mcp->mc_len; memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec)); memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av)); for (i = 0; i < 42; i++) mcp64.mc_frame[i] = mcp->mc_frame[i]; mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL); memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg)); memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg)); error = set_mcontext(td, &mcp64); return (error); } #endif #ifdef COMPAT_FREEBSD32 int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { ucontext32_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext32(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } /* * The first two fields of a ucontext_t are the signal mask and the machine * context. The next field is uc_link; we want to avoid destroying the link * when copying out contexts. */ #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link) int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE); } return (ret); } int freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } return (ret == 0 ? EJUSTRETURN : ret); } int freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap) { ucontext32_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } } return (ret == 0 ? EJUSTRETURN : ret); } #endif void cpu_set_syscall_retval(struct thread *td, int error) { struct proc *p; struct trapframe *tf; int fixup; if (error == EJUSTRETURN) return; p = td->td_proc; tf = td->td_frame; if (tf->fixreg[0] == SYS___syscall && (SV_PROC_FLAG(p, SV_ILP32))) { int code = tf->fixreg[FIRSTARG + 1]; if (p->p_sysent->sv_mask) code &= p->p_sysent->sv_mask; fixup = ( #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek) code != SYS_freebsd6_lseek && #endif code != SYS_lseek) ? 1 : 0; } else fixup = 0; switch (error) { case 0: if (fixup) { /* * 64-bit return, 32-bit syscall. Fixup byte order */ tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = td->td_retval[0]; } else { tf->fixreg[FIRSTARG] = td->td_retval[0]; tf->fixreg[FIRSTARG + 1] = td->td_retval[1]; } tf->cr &= ~0x10000000; /* Unset summary overflow */ break; case ERESTART: /* * Set user's pc back to redo the system call. */ tf->srr0 -= 4; break; default: tf->fixreg[FIRSTARG] = SV_ABI_ERRNO(p, error); tf->cr |= 0x10000000; /* Set summary overflow */ break; } } /* * Threading functions */ void cpu_thread_exit(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { struct pcb *pcb; pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL); td->td_pcb = pcb; td->td_frame = (struct trapframe *)pcb - 1; } void cpu_thread_free(struct thread *td) { } int cpu_set_user_tls(struct thread *td, void *tls_base) { if (SV_PROC_FLAG(td->td_proc, SV_LP64)) td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010; else td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008; return (0); } void cpu_copy_thread(struct thread *td, struct thread *td0) { struct pcb *pcb2; struct trapframe *tf; struct callframe *cf; pcb2 = td->td_pcb; /* Copy the upcall pcb */ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); /* Create a stack for the new thread */ tf = td->td_frame; bcopy(td0->td_frame, tf, sizeof(struct trapframe)); tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = 0; tf->cr &= ~0x10000000; /* Set registers for trampoline to user mode. */ cf = (struct callframe *)tf - 1; memset(cf, 0, sizeof(struct callframe)); cf->cf_func = (register_t)fork_return; cf->cf_arg0 = (register_t)td; cf->cf_arg1 = (register_t)tf; pcb2->pcb_sp = (register_t)cf; #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) pcb2->pcb_lr = ((register_t *)fork_trampoline)[0]; pcb2->pcb_toc = ((register_t *)fork_trampoline)[1]; #else pcb2->pcb_lr = (register_t)fork_trampoline; pcb2->pcb_context[0] = pcb2->pcb_lr; #endif pcb2->pcb_cpu.aim.usr_vsid = 0; +#ifdef __SPE__ + pcb2->pcb_vec.vscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | + SPEFSCR_FUNFE | SPEFSCR_FOVFE; +#endif /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_msr = psl_kernset; } void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; uintptr_t sp; tf = td->td_frame; /* align stack and alloc space for frame ptr and saved LR */ #ifdef __powerpc64__ sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) & ~0x1f; #else sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) & ~0x1f; #endif bzero(tf, sizeof(struct trapframe)); tf->fixreg[1] = (register_t)sp; tf->fixreg[3] = (register_t)arg; if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { tf->srr0 = (register_t)entry; #ifdef __powerpc64__ tf->srr1 = psl_userset32 | PSL_FE_DFLT; #else tf->srr1 = psl_userset | PSL_FE_DFLT; #endif } else { #ifdef __powerpc64__ register_t entry_desc[3]; (void)copyin((void *)entry, entry_desc, sizeof(entry_desc)); tf->srr0 = entry_desc[0]; tf->fixreg[2] = entry_desc[1]; tf->fixreg[11] = entry_desc[2]; tf->srr1 = psl_userset | PSL_FE_DFLT; #endif } td->td_pcb->pcb_flags = 0; +#ifdef __SPE__ + td->td_pcb->pcb_vec.vscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | + SPEFSCR_FUNFE | SPEFSCR_FOVFE; +#endif td->td_retval[0] = (register_t)entry; td->td_retval[1] = 0; } static int emulate_mfspr(int spr, int reg, struct trapframe *frame){ struct thread *td; td = curthread; if (spr == SPR_DSCR) { // If DSCR was never set, get the default DSCR if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0) td->td_pcb->pcb_dscr = mfspr(SPR_DSCR); frame->fixreg[reg] = td->td_pcb->pcb_dscr; frame->srr0 += 4; return 0; } else return SIGILL; } static int emulate_mtspr(int spr, int reg, struct trapframe *frame){ struct thread *td; td = curthread; if (spr == SPR_DSCR) { td->td_pcb->pcb_flags |= PCB_CDSCR; td->td_pcb->pcb_dscr = frame->fixreg[reg]; frame->srr0 += 4; return 0; } else return SIGILL; } #define XFX 0xFC0007FF int ppc_instr_emulate(struct trapframe *frame, struct pcb *pcb) { uint32_t instr; int reg, sig; int rs, spr; instr = fuword32((void *)frame->srr0); sig = SIGILL; if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */ reg = (instr & ~0xfc1fffff) >> 21; frame->fixreg[reg] = mfpvr(); frame->srr0 += 4; return (0); } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */ rs = (instr & 0x3e00000) >> 21; spr = (instr & 0x1ff800) >> 16; return emulate_mfspr(spr, rs, frame); } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */ rs = (instr & 0x3e00000) >> 21; spr = (instr & 0x1ff800) >> 16; return emulate_mtspr(spr, rs, frame); } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */ powerpc_sync(); /* Do a heavy-weight sync */ frame->srr0 += 4; return (0); } #ifdef FPU_EMU if (!(pcb->pcb_flags & PCB_FPREGS)) { bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu)); pcb->pcb_flags |= PCB_FPREGS; } sig = fpu_emulate(frame, &pcb->pcb_fpu); #endif return (sig); } Index: head/sys/powerpc/powerpc/swtch32.S =================================================================== --- head/sys/powerpc/powerpc/swtch32.S (revision 339513) +++ head/sys/powerpc/powerpc/swtch32.S (revision 339514) @@ -1,215 +1,219 @@ /* $FreeBSD$ */ /* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "assym.inc" #include "opt_sched.h" #include #include #include #include #include /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) mr %r2, %r4 li %r14,0 /* Tell cpu_switchin not to release a thread */ b cpu_switchin /* * void cpu_switch(struct thread *old, * struct thread *new, * struct mutex *mtx); * * Switch to a new thread saving the current state in the old thread. */ ENTRY(cpu_switch) lwz %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */ stmw %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs. These can now be used for scratch */ mfcr %r16 /* Save the condition register */ stw %r16,PCB_CR(%r6) mflr %r16 /* Save the link register */ stw %r16,PCB_LR(%r6) stw %r1,PCB_SP(%r6) /* Save the stack pointer */ mr %r14,%r3 /* Copy the old thread ptr... */ mr %r2,%r4 /* and the new thread ptr in curthread */ mr %r16,%r5 /* and the new lock */ mr %r17,%r6 /* and the PCB */ lwz %r7,PCB_FLAGS(%r17) /* Save FPU context if needed */ andi. %r7, %r7, PCB_FPU beq .L1 bl save_fpu .L1: mr %r3,%r14 /* restore old thread ptr */ lwz %r7,PCB_FLAGS(%r17) /* Save Altivec context if needed */ andi. %r7, %r7, PCB_VEC beq .L2 bl save_vec .L2: #if defined(__SPE__) mfspr %r3,SPR_SPEFSCR stw %r3,PCB_VSCR(%r17) #endif mr %r3,%r14 /* restore old thread ptr */ bl pmap_deactivate /* Deactivate the current pmap */ sync /* Make sure all of that finished */ cpu_switchin: #if defined(SMP) && defined(SCHED_ULE) /* Wait for the new thread to become unblocked */ bl _GLOBAL_OFFSET_TABLE_@local-4 mflr %r6 lwz %r6,blocked_lock@got(%r6) blocked_loop: lwz %r7,TD_LOCK(%r2) cmpw %r6,%r7 beq- blocked_loop isync #endif lwz %r17,TD_PCB(%r2) /* Get new current PCB */ lwz %r1,PCB_SP(%r17) /* Load new stack pointer */ /* Release old thread now that we have a stack pointer set up */ cmpwi %r14,0 beq- 1f stw %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */ 1: mfsprg %r7,0 /* Get the pcpu pointer */ stw %r2,PC_CURTHREAD(%r7) /* Store new current thread */ lwz %r17,TD_PCB(%r2) /* Store new current PCB */ stw %r17,PC_CURPCB(%r7) mr %r3,%r2 /* Get new thread ptr */ bl pmap_activate /* Activate the new address space */ lwz %r6, PCB_FLAGS(%r17) /* Restore FPU context if needed */ andi. %r6, %r6, PCB_FPU beq .L3 mr %r3,%r2 /* Pass curthread to enable_fpu */ bl enable_fpu .L3: lwz %r6, PCB_FLAGS(%r17) /* Restore Altivec context if needed */ andi. %r6, %r6, PCB_VEC beq .L4 mr %r3,%r2 /* Pass curthread to enable_vec */ bl enable_vec .L4: #if defined(__SPE__) lwz %r3,PCB_VSCR(%r17) mtspr SPR_SPEFSCR,%r3 #endif /* thread to restore is in r3 */ mr %r3,%r17 /* Recover PCB ptr */ lmw %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs */ lwz %r5,PCB_CR(%r3) /* Load the condition register */ mtcr %r5 lwz %r5,PCB_LR(%r3) /* Load the link register */ mtlr %r5 lwz %r1,PCB_SP(%r3) /* Load the stack pointer */ /* * Perform a dummy stwcx. to clear any reservations we may have * inherited from the previous thread. It doesn't matter if the * stwcx succeeds or not. pcb_context[0] can be clobbered. */ stwcx. %r1, 0, %r3 blr /* * savectx(pcb) * Update pcb, saving current processor state */ ENTRY(savectx) stmw %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs */ mfcr %r4 /* Save the condition register */ stw %r4,PCB_CR(%r3) mflr %r4 /* Save the link register */ stw %r4,PCB_LR(%r3) blr /* * fork_trampoline() * Set up the return from cpu_fork() */ ENTRY(fork_trampoline) lwz %r3,CF_FUNC(%r1) lwz %r4,CF_ARG0(%r1) lwz %r5,CF_ARG1(%r1) bl fork_exit addi %r1,%r1,CF_SIZE-FSP /* Allow 8 bytes in front of trapframe to simulate FRAME_SETUP does when allocating space for a frame pointer/saved LR */ +#ifdef __SPE__ + li %r3,SPEFSCR_FINVE|SPEFSCR_FDBZE|SPEFSCR_FUNFE|SPEFSCR_FOVFE + mtspr SPR_SPEFSCR, %r3 +#endif b trapexit