Index: head/sys/powerpc/include/pcb.h =================================================================== --- head/sys/powerpc/include/pcb.h (revision 346789) +++ head/sys/powerpc/include/pcb.h (revision 346790) @@ -1,111 +1,113 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pcb.h,v 1.4 2000/06/04 11:57:17 tsubai Exp $ * $FreeBSD$ */ #ifndef _MACHINE_PCB_H_ #define _MACHINE_PCB_H_ #include #ifndef _STANDALONE struct pcb { register_t pcb_context[20]; /* non-volatile r14-r31 */ register_t pcb_cr; /* Condition register */ register_t pcb_sp; /* stack pointer */ register_t pcb_toc; /* toc pointer */ register_t pcb_lr; /* link register */ register_t pcb_dscr; /* dscr value */ + register_t pcb_fscr; struct pmap *pcb_pm; /* pmap of our vmspace */ jmp_buf *pcb_onfault; /* For use during copyin/copyout */ int pcb_flags; #define PCB_FPU 0x1 /* Process uses FPU */ #define PCB_FPREGS 0x2 /* Process had FPU registers initialized */ #define PCB_VEC 0x4 /* Process had Altivec initialized */ #define PCB_VSX 0x8 /* Process had VSX initialized */ #define PCB_CDSCR 0x10 /* Process had Custom DSCR initialized */ #define PCB_HTM 0x20 /* Process had HTM initialized */ +#define PCB_CFSCR 0x40 /* Process had FSCR updated */ struct fpu { union { double fpr; uint32_t vsr[4]; } fpr[32]; double fpscr; /* FPSCR stored as double for easier access */ } pcb_fpu; /* Floating point processor */ unsigned int pcb_fpcpu; /* which CPU had our FPU stuff. */ struct vec { uint32_t vr[32][4]; uint32_t spare[2]; uint32_t vrsave; uint32_t vscr; /* aligned at vector element 3 */ } pcb_vec __aligned(16); /* Vector processor */ unsigned int pcb_veccpu; /* which CPU had our vector stuff. */ struct htm { uint64_t tfhar; uint64_t texasr; uint64_t tfiar; } pcb_htm; union { struct { vm_offset_t usr_segm; /* Base address */ register_t usr_vsid; /* USER_SR segment */ } aim; struct { register_t dbcr0; } booke; } pcb_cpu; vm_offset_t pcb_lastill; /* Last illegal instruction */ }; #endif #ifdef _KERNEL struct trapframe; #ifndef curpcb extern struct pcb *curpcb; #endif extern struct pmap *curpm; extern struct proc *fpuproc; void makectx(struct trapframe *, struct pcb *); void savectx(struct pcb *) __returns_twice; #endif #endif /* _MACHINE_PCB_H_ */ Index: head/sys/powerpc/include/spr.h =================================================================== --- head/sys/powerpc/include/spr.h (revision 346789) +++ head/sys/powerpc/include/spr.h (revision 346790) @@ -1,892 +1,894 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: spr.h,v 1.25 2002/08/14 15:38:40 matt Exp $ * $FreeBSD$ */ #ifndef _POWERPC_SPR_H_ #define _POWERPC_SPR_H_ #ifndef _LOCORE #define mtspr(reg, val) \ __asm __volatile("mtspr %0,%1" : : "K"(reg), "r"(val)) #define mfspr(reg) \ ( { register_t val; \ __asm __volatile("mfspr %0,%1" : "=r"(val) : "K"(reg)); \ val; } ) #ifndef __powerpc64__ /* The following routines allow manipulation of the full 64-bit width * of SPRs on 64 bit CPUs in bridge mode */ #define mtspr64(reg,valhi,vallo,scratch) \ __asm __volatile(" \ mfmsr %0; \ insrdi %0,%5,1,0; \ mtmsrd %0; \ isync; \ \ sld %1,%1,%4; \ or %1,%1,%2; \ mtspr %3,%1; \ srd %1,%1,%4; \ \ clrldi %0,%0,1; \ mtmsrd %0; \ isync;" \ : "=r"(scratch), "=r"(valhi) : "r"(vallo), "K"(reg), "r"(32), "r"(1)) #define mfspr64upper(reg,scratch) \ ( { register_t val; \ __asm __volatile(" \ mfmsr %0; \ insrdi %0,%4,1,0; \ mtmsrd %0; \ isync; \ \ mfspr %1,%2; \ srd %1,%1,%3; \ \ clrldi %0,%0,1; \ mtmsrd %0; \ isync;" \ : "=r"(scratch), "=r"(val) : "K"(reg), "r"(32), "r"(1)); \ val; } ) #endif #endif /* _LOCORE */ /* * Special Purpose Register declarations. * * The first column in the comments indicates which PowerPC * architectures the SPR is valid on - 4 for 4xx series, * 6 for 6xx/7xx series and 8 for 8xx and 8xxx series. */ #define SPR_MQ 0x000 /* .6. 601 MQ register */ #define SPR_XER 0x001 /* 468 Fixed Point Exception Register */ +#define SPR_DSCR 0x003 /* .6. Data Stream Control Register (Unprivileged) */ #define SPR_RTCU_R 0x004 /* .6. 601 RTC Upper - Read */ #define SPR_RTCL_R 0x005 /* .6. 601 RTC Lower - Read */ #define SPR_LR 0x008 /* 468 Link Register */ #define SPR_CTR 0x009 /* 468 Count Register */ -#define SPR_DSCR 0x011 /* Data Stream Control Register */ +#define SPR_DSCRP 0x011 /* Data Stream Control Register (Privileged) */ #define SPR_DSISR 0x012 /* .68 DSI exception source */ #define DSISR_DIRECT 0x80000000 /* Direct-store error exception */ #define DSISR_NOTFOUND 0x40000000 /* Translation not found */ #define DSISR_PROTECT 0x08000000 /* Memory access not permitted */ #define DSISR_INVRX 0x04000000 /* Reserve-indexed insn direct-store access */ #define DSISR_STORE 0x02000000 /* Store operation */ #define DSISR_DABR 0x00400000 /* DABR match */ #define DSISR_SEGMENT 0x00200000 /* XXX; not in 6xx PEM */ #define DSISR_EAR 0x00100000 /* eciwx/ecowx && EAR[E] == 0 */ #define SPR_DAR 0x013 /* .68 Data Address Register */ #define SPR_RTCU_W 0x014 /* .6. 601 RTC Upper - Write */ #define SPR_RTCL_W 0x015 /* .6. 601 RTC Lower - Write */ #define SPR_DEC 0x016 /* .68 DECrementer register */ #define SPR_SDR1 0x019 /* .68 Page table base address register */ #define SPR_SRR0 0x01a /* 468 Save/Restore Register 0 */ #define SPR_SRR1 0x01b /* 468 Save/Restore Register 1 */ #define SRR1_ISI_PFAULT 0x40000000 /* ISI page not found */ #define SRR1_ISI_NOEXECUTE 0x10000000 /* Memory marked no-execute */ #define SRR1_ISI_PP 0x08000000 /* PP bits forbid access */ #define SPR_DECAR 0x036 /* ..8 Decrementer auto reload */ #define SPR_EIE 0x050 /* ..8 Exception Interrupt ??? */ #define SPR_EID 0x051 /* ..8 Exception Interrupt ??? */ #define SPR_NRI 0x052 /* ..8 Exception Interrupt ??? */ #define SPR_FSCR 0x099 /* Facility Status and Control Register */ #define FSCR_IC_MASK 0xFF00000000000000ULL /* FSCR[0:7] is Interrupt Cause */ #define FSCR_IC_FP 0x0000000000000000ULL /* FP unavailable */ #define FSCR_IC_VSX 0x0100000000000000ULL /* VSX unavailable */ #define FSCR_IC_DSCR 0x0200000000000000ULL /* Access to the DSCR at SPRs 3 or 17 */ #define FSCR_IC_PM 0x0300000000000000ULL /* Read or write access of a Performance Monitor SPR in group A */ #define FSCR_IC_BHRB 0x0400000000000000ULL /* Execution of a BHRB Instruction */ #define FSCR_IC_HTM 0x0500000000000000ULL /* Access to a Transactional Memory */ /* Reserved 0x0600000000000000ULL */ #define FSCR_IC_EBB 0x0700000000000000ULL /* Access to Event-Based Branch */ #define FSCR_IC_TAR 0x0800000000000000ULL /* Access to Target Address Register */ #define FSCR_IC_STOP 0x0900000000000000ULL /* Access to the 'stop' instruction in privileged non-hypervisor state */ #define FSCR_IC_MSG 0x0A00000000000000ULL /* Access to 'msgsndp' or 'msgclrp' instructions */ #define FSCR_IC_SCV 0x0C00000000000000ULL /* Execution of a 'scv' instruction */ +#define FSCR_DSCR 0x0000000000000004ULL /* DSCR available in PR state */ #define SPR_USPRG0 0x100 /* 4.. User SPR General 0 */ #define SPR_VRSAVE 0x100 /* .6. AltiVec VRSAVE */ #define SPR_SPRG0 0x110 /* 468 SPR General 0 */ #define SPR_SPRG1 0x111 /* 468 SPR General 1 */ #define SPR_SPRG2 0x112 /* 468 SPR General 2 */ #define SPR_SPRG3 0x113 /* 468 SPR General 3 */ #define SPR_SPRG4 0x114 /* 4.. SPR General 4 */ #define SPR_SPRG5 0x115 /* 4.. SPR General 5 */ #define SPR_SPRG6 0x116 /* 4.. SPR General 6 */ #define SPR_SPRG7 0x117 /* 4.. SPR General 7 */ #define SPR_SCOMC 0x114 /* ... SCOM Address Register (970) */ #define SPR_SCOMD 0x115 /* ... SCOM Data Register (970) */ #define SPR_ASR 0x118 /* ... Address Space Register (PPC64) */ #define SPR_EAR 0x11a /* .68 External Access Register */ #define SPR_PVR 0x11f /* 468 Processor Version Register */ #define MPC601 0x0001 #define MPC603 0x0003 #define MPC604 0x0004 #define MPC602 0x0005 #define MPC603e 0x0006 #define MPC603ev 0x0007 #define MPC750 0x0008 #define MPC750CL 0x7000 /* Nintendo Wii's Broadway */ #define MPC604ev 0x0009 #define MPC7400 0x000c #define MPC620 0x0014 #define IBM403 0x0020 #define IBM401A1 0x0021 #define IBM401B2 0x0022 #define IBM401C2 0x0023 #define IBM401D2 0x0024 #define IBM401E2 0x0025 #define IBM401F2 0x0026 #define IBM401G2 0x0027 #define IBMRS64II 0x0033 #define IBMRS64III 0x0034 #define IBMPOWER4 0x0035 #define IBMRS64III_2 0x0036 #define IBMRS64IV 0x0037 #define IBMPOWER4PLUS 0x0038 #define IBM970 0x0039 #define IBMPOWER5 0x003a #define IBMPOWER5PLUS 0x003b #define IBM970FX 0x003c #define IBMPOWER6 0x003e #define IBMPOWER7 0x003f #define IBMPOWER3 0x0040 #define IBMPOWER3PLUS 0x0041 #define IBM970MP 0x0044 #define IBM970GX 0x0045 #define IBMPOWERPCA2 0x0049 #define IBMPOWER7PLUS 0x004a #define IBMPOWER8E 0x004b #define IBMPOWER8NVL 0x004c #define IBMPOWER8 0x004d #define IBMPOWER9 0x004e #define MPC860 0x0050 #define IBMCELLBE 0x0070 #define MPC8240 0x0081 #define PA6T 0x0090 #define IBM405GP 0x4011 #define IBM405L 0x4161 #define IBM750FX 0x7000 #define MPC745X_P(v) ((v & 0xFFF8) == 0x8000) #define MPC7450 0x8000 #define MPC7455 0x8001 #define MPC7457 0x8002 #define MPC7447A 0x8003 #define MPC7448 0x8004 #define MPC7410 0x800c #define MPC8245 0x8081 #define FSL_E500v1 0x8020 #define FSL_E500v2 0x8021 #define FSL_E500mc 0x8023 #define FSL_E5500 0x8024 #define FSL_E6500 0x8040 #define FSL_E300C1 0x8083 #define FSL_E300C2 0x8084 #define FSL_E300C3 0x8085 #define FSL_E300C4 0x8086 #define LPCR_PECE_WAKESET (LPCR_PECE_EXT | LPCR_PECE_DECR | LPCR_PECE_ME) #define SPR_EPCR 0x133 #define EPCR_EXTGS 0x80000000 #define EPCR_DTLBGS 0x40000000 #define EPCR_ITLBGS 0x20000000 #define EPCR_DSIGS 0x10000000 #define EPCR_ISIGS 0x08000000 #define EPCR_DUVGS 0x04000000 #define EPCR_ICM 0x02000000 #define EPCR_GICMGS 0x01000000 #define EPCR_DGTMI 0x00800000 #define EPCR_DMIUH 0x00400000 #define EPCR_PMGS 0x00200000 #define SPR_HSRR0 0x13a #define SPR_HSRR1 0x13b #define SPR_LPCR 0x13e /* Logical Partitioning Control */ #define LPCR_LPES 0x008 /* Bit 60 */ #define LPCR_HVICE 0x002 /* Hypervisor Virtualization Interrupt (Arch 3.0) */ #define LPCR_PECE_DRBL (1ULL << 16) /* Directed Privileged Doorbell */ #define LPCR_PECE_HDRBL (1ULL << 15) /* Directed Hypervisor Doorbell */ #define LPCR_PECE_EXT (1ULL << 14) /* External exceptions */ #define LPCR_PECE_DECR (1ULL << 13) /* Decrementer exceptions */ #define LPCR_PECE_ME (1ULL << 12) /* Machine Check and Hypervisor */ /* Maintenance exceptions */ #define SPR_LPID 0x13f /* Logical Partitioning Control */ #define SPR_HMER 0x150 /* Hypervisor Maintenance Exception Register */ #define SPR_HMEER 0x151 /* Hypervisor Maintenance Exception Enable Register */ #define SPR_PTCR 0x1d0 /* Partition Table Control Register */ #define SPR_SPEFSCR 0x200 /* ..8 Signal Processing Engine FSCR. */ #define SPEFSCR_SOVH 0x80000000 #define SPEFSCR_OVH 0x40000000 #define SPEFSCR_FGH 0x20000000 #define SPEFSCR_FXH 0x10000000 #define SPEFSCR_FINVH 0x08000000 #define SPEFSCR_FDBZH 0x04000000 #define SPEFSCR_FUNFH 0x02000000 #define SPEFSCR_FOVFH 0x01000000 #define SPEFSCR_FINXS 0x00200000 #define SPEFSCR_FINVS 0x00100000 #define SPEFSCR_FDBZS 0x00080000 #define SPEFSCR_FUNFS 0x00040000 #define SPEFSCR_FOVFS 0x00020000 #define SPEFSCR_SOV 0x00008000 #define SPEFSCR_OV 0x00004000 #define SPEFSCR_FG 0x00002000 #define SPEFSCR_FX 0x00001000 #define SPEFSCR_FINV 0x00000800 #define SPEFSCR_FDBZ 0x00000400 #define SPEFSCR_FUNF 0x00000200 #define SPEFSCR_FOVF 0x00000100 #define SPEFSCR_FINXE 0x00000040 #define SPEFSCR_FINVE 0x00000020 #define SPEFSCR_FDBZE 0x00000010 #define SPEFSCR_FUNFE 0x00000008 #define SPEFSCR_FOVFE 0x00000004 #define SPEFSCR_FRMC_M 0x00000003 #define SPR_IBAT0U 0x210 /* .6. Instruction BAT Reg 0 Upper */ #define SPR_IBAT0L 0x211 /* .6. Instruction BAT Reg 0 Lower */ #define SPR_IBAT1U 0x212 /* .6. Instruction BAT Reg 1 Upper */ #define SPR_IBAT1L 0x213 /* .6. Instruction BAT Reg 1 Lower */ #define SPR_IBAT2U 0x214 /* .6. Instruction BAT Reg 2 Upper */ #define SPR_IBAT2L 0x215 /* .6. Instruction BAT Reg 2 Lower */ #define SPR_IBAT3U 0x216 /* .6. Instruction BAT Reg 3 Upper */ #define SPR_IBAT3L 0x217 /* .6. Instruction BAT Reg 3 Lower */ #define SPR_DBAT0U 0x218 /* .6. Data BAT Reg 0 Upper */ #define SPR_DBAT0L 0x219 /* .6. Data BAT Reg 0 Lower */ #define SPR_DBAT1U 0x21a /* .6. Data BAT Reg 1 Upper */ #define SPR_DBAT1L 0x21b /* .6. Data BAT Reg 1 Lower */ #define SPR_DBAT2U 0x21c /* .6. Data BAT Reg 2 Upper */ #define SPR_DBAT2L 0x21d /* .6. Data BAT Reg 2 Lower */ #define SPR_DBAT3U 0x21e /* .6. Data BAT Reg 3 Upper */ #define SPR_DBAT3L 0x21f /* .6. Data BAT Reg 3 Lower */ #define SPR_IC_CST 0x230 /* ..8 Instruction Cache CSR */ #define IC_CST_IEN 0x80000000 /* I cache is ENabled (RO) */ #define IC_CST_CMD_INVALL 0x0c000000 /* I cache invalidate all */ #define IC_CST_CMD_UNLOCKALL 0x0a000000 /* I cache unlock all */ #define IC_CST_CMD_UNLOCK 0x08000000 /* I cache unlock block */ #define IC_CST_CMD_LOADLOCK 0x06000000 /* I cache load & lock block */ #define IC_CST_CMD_DISABLE 0x04000000 /* I cache disable */ #define IC_CST_CMD_ENABLE 0x02000000 /* I cache enable */ #define IC_CST_CCER1 0x00200000 /* I cache error type 1 (RO) */ #define IC_CST_CCER2 0x00100000 /* I cache error type 2 (RO) */ #define IC_CST_CCER3 0x00080000 /* I cache error type 3 (RO) */ #define SPR_IBAT4U 0x230 /* .6. Instruction BAT Reg 4 Upper */ #define SPR_IC_ADR 0x231 /* ..8 Instruction Cache Address */ #define SPR_IBAT4L 0x231 /* .6. Instruction BAT Reg 4 Lower */ #define SPR_IC_DAT 0x232 /* ..8 Instruction Cache Data */ #define SPR_IBAT5U 0x232 /* .6. Instruction BAT Reg 5 Upper */ #define SPR_IBAT5L 0x233 /* .6. Instruction BAT Reg 5 Lower */ #define SPR_IBAT6U 0x234 /* .6. Instruction BAT Reg 6 Upper */ #define SPR_IBAT6L 0x235 /* .6. Instruction BAT Reg 6 Lower */ #define SPR_IBAT7U 0x236 /* .6. Instruction BAT Reg 7 Upper */ #define SPR_IBAT7L 0x237 /* .6. Instruction BAT Reg 7 Lower */ #define SPR_DC_CST 0x230 /* ..8 Data Cache CSR */ #define DC_CST_DEN 0x80000000 /* D cache ENabled (RO) */ #define DC_CST_DFWT 0x40000000 /* D cache Force Write-Thru (RO) */ #define DC_CST_LES 0x20000000 /* D cache Little Endian Swap (RO) */ #define DC_CST_CMD_FLUSH 0x0e000000 /* D cache invalidate all */ #define DC_CST_CMD_INVALL 0x0c000000 /* D cache invalidate all */ #define DC_CST_CMD_UNLOCKALL 0x0a000000 /* D cache unlock all */ #define DC_CST_CMD_UNLOCK 0x08000000 /* D cache unlock block */ #define DC_CST_CMD_CLRLESWAP 0x07000000 /* D cache clr little-endian swap */ #define DC_CST_CMD_LOADLOCK 0x06000000 /* D cache load & lock block */ #define DC_CST_CMD_SETLESWAP 0x05000000 /* D cache set little-endian swap */ #define DC_CST_CMD_DISABLE 0x04000000 /* D cache disable */ #define DC_CST_CMD_CLRFWT 0x03000000 /* D cache clear forced write-thru */ #define DC_CST_CMD_ENABLE 0x02000000 /* D cache enable */ #define DC_CST_CMD_SETFWT 0x01000000 /* D cache set forced write-thru */ #define DC_CST_CCER1 0x00200000 /* D cache error type 1 (RO) */ #define DC_CST_CCER2 0x00100000 /* D cache error type 2 (RO) */ #define DC_CST_CCER3 0x00080000 /* D cache error type 3 (RO) */ #define SPR_DBAT4U 0x238 /* .6. Data BAT Reg 4 Upper */ #define SPR_DC_ADR 0x231 /* ..8 Data Cache Address */ #define SPR_DBAT4L 0x239 /* .6. Data BAT Reg 4 Lower */ #define SPR_DC_DAT 0x232 /* ..8 Data Cache Data */ #define SPR_DBAT5U 0x23a /* .6. Data BAT Reg 5 Upper */ #define SPR_DBAT5L 0x23b /* .6. Data BAT Reg 5 Lower */ #define SPR_DBAT6U 0x23c /* .6. Data BAT Reg 6 Upper */ #define SPR_DBAT6L 0x23d /* .6. Data BAT Reg 6 Lower */ #define SPR_DBAT7U 0x23e /* .6. Data BAT Reg 7 Upper */ #define SPR_DBAT7L 0x23f /* .6. Data BAT Reg 7 Lower */ #define SPR_SPRG8 0x25c /* ..8 SPR General 8 */ #define SPR_MI_CTR 0x310 /* ..8 IMMU control */ #define Mx_CTR_GPM 0x80000000 /* Group Protection Mode */ #define Mx_CTR_PPM 0x40000000 /* Page Protection Mode */ #define Mx_CTR_CIDEF 0x20000000 /* Cache-Inhibit DEFault */ #define MD_CTR_WTDEF 0x20000000 /* Write-Through DEFault */ #define Mx_CTR_RSV4 0x08000000 /* Reserve 4 TLB entries */ #define MD_CTR_TWAM 0x04000000 /* TableWalk Assist Mode */ #define Mx_CTR_PPCS 0x02000000 /* Priv/user state compare mode */ #define Mx_CTR_TLB_INDX 0x000001f0 /* TLB index mask */ #define Mx_CTR_TLB_INDX_BITPOS 8 /* TLB index shift */ #define SPR_MI_AP 0x312 /* ..8 IMMU access protection */ #define Mx_GP_SUPER(n) (0 << (2*(15-(n)))) /* access is supervisor */ #define Mx_GP_PAGE (1 << (2*(15-(n)))) /* access is page protect */ #define Mx_GP_SWAPPED (2 << (2*(15-(n)))) /* access is swapped */ #define Mx_GP_USER (3 << (2*(15-(n)))) /* access is user */ #define SPR_MI_EPN 0x313 /* ..8 IMMU effective number */ #define Mx_EPN_EPN 0xfffff000 /* Effective Page Number mask */ #define Mx_EPN_EV 0x00000020 /* Entry Valid */ #define Mx_EPN_ASID 0x0000000f /* Address Space ID */ #define SPR_MI_TWC 0x315 /* ..8 IMMU tablewalk control */ #define MD_TWC_L2TB 0xfffff000 /* Level-2 Tablewalk Base */ #define Mx_TWC_APG 0x000001e0 /* Access Protection Group */ #define Mx_TWC_G 0x00000010 /* Guarded memory */ #define Mx_TWC_PS 0x0000000c /* Page Size (L1) */ #define MD_TWC_WT 0x00000002 /* Write-Through */ #define Mx_TWC_V 0x00000001 /* Entry Valid */ #define SPR_MI_RPN 0x316 /* ..8 IMMU real (phys) page number */ #define Mx_RPN_RPN 0xfffff000 /* Real Page Number */ #define Mx_RPN_PP 0x00000ff0 /* Page Protection */ #define Mx_RPN_SPS 0x00000008 /* Small Page Size */ #define Mx_RPN_SH 0x00000004 /* SHared page */ #define Mx_RPN_CI 0x00000002 /* Cache Inhibit */ #define Mx_RPN_V 0x00000001 /* Valid */ #define SPR_MD_CTR 0x318 /* ..8 DMMU control */ #define SPR_M_CASID 0x319 /* ..8 CASID */ #define M_CASID 0x0000000f /* Current AS Id */ #define SPR_MD_AP 0x31a /* ..8 DMMU access protection */ #define SPR_MD_EPN 0x31b /* ..8 DMMU effective number */ #define SPR_970MMCR0 0x31b /* ... Monitor Mode Control Register 0 (PPC 970) */ #define SPR_970MMCR0_PMC1SEL(x) ((x) << 8) /* PMC1 selector (970) */ #define SPR_970MMCR0_PMC2SEL(x) ((x) << 1) /* PMC2 selector (970) */ #define SPR_970MMCR1 0x31e /* ... Monitor Mode Control Register 1 (PPC 970) */ #define SPR_970MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ #define SPR_970MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ #define SPR_970MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ #define SPR_970MMCR1_PMC6SEL(x) (((x) & 0x1f) << 12) /* PMC 6 selector */ #define SPR_970MMCR1_PMC7SEL(x) (((x) & 0x1f) << 7) /* PMC 7 selector */ #define SPR_970MMCR1_PMC8SEL(x) (((x) & 0x1f) << 2) /* PMC 8 selector */ #define SPR_970MMCRA 0x312 /* ... Monitor Mode Control Register 2 (PPC 970) */ #define SPR_970PMC1 0x313 /* ... PMC 1 */ #define SPR_970PMC2 0x314 /* ... PMC 2 */ #define SPR_970PMC3 0x315 /* ... PMC 3 */ #define SPR_970PMC4 0x316 /* ... PMC 4 */ #define SPR_970PMC5 0x317 /* ... PMC 5 */ #define SPR_970PMC6 0x318 /* ... PMC 6 */ #define SPR_970PMC7 0x319 /* ... PMC 7 */ #define SPR_970PMC8 0x31a /* ... PMC 8 */ #define SPR_M_TWB 0x31c /* ..8 MMU tablewalk base */ #define M_TWB_L1TB 0xfffff000 /* level-1 translation base */ #define M_TWB_L1INDX 0x00000ffc /* level-1 index */ #define SPR_MD_TWC 0x31d /* ..8 DMMU tablewalk control */ #define SPR_MD_RPN 0x31e /* ..8 DMMU real (phys) page number */ #define SPR_MD_TW 0x31f /* ..8 MMU tablewalk scratch */ #define SPR_MI_CAM 0x330 /* ..8 IMMU CAM entry read */ #define SPR_MI_RAM0 0x331 /* ..8 IMMU RAM entry read reg 0 */ #define SPR_MI_RAM1 0x332 /* ..8 IMMU RAM entry read reg 1 */ #define SPR_MD_CAM 0x338 /* ..8 IMMU CAM entry read */ #define SPR_MD_RAM0 0x339 /* ..8 IMMU RAM entry read reg 0 */ #define SPR_MD_RAM1 0x33a /* ..8 IMMU RAM entry read reg 1 */ #define SPR_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ #define PSSCR_PLS_S 60 #define PSSCR_PLS_M (0xf << PSSCR_PLS_S) #define PSSCR_SD (1 << 22) #define PSSCR_ESL (1 << 21) #define PSSCR_EC (1 << 20) #define PSSCR_PSLL_S 16 #define PSSCR_PSLL_M (0xf << PSSCR_PSLL_S) #define PSSCR_TR_S 8 #define PSSCR_TR_M (0x3 << PSSCR_TR_S) #define PSSCR_MTL_S 4 #define PSSCR_MTL_M (0xf << PSSCR_MTL_S) #define PSSCR_RL_S 0 #define PSSCR_RL_M (0xf << PSSCR_RL_S) #define SPR_PMCR 0x374 /* Processor Management Control Register */ #define SPR_UMMCR2 0x3a0 /* .6. User Monitor Mode Control Register 2 */ #define SPR_UMMCR0 0x3a8 /* .6. User Monitor Mode Control Register 0 */ #define SPR_USIA 0x3ab /* .6. User Sampled Instruction Address */ #define SPR_UMMCR1 0x3ac /* .6. User Monitor Mode Control Register 1 */ #define SPR_ZPR 0x3b0 /* 4.. Zone Protection Register */ #define SPR_MMCR2 0x3b0 /* .6. Monitor Mode Control Register 2 */ #define SPR_MMCR2_THRESHMULT_32 0x80000000 /* Multiply MMCR0 threshold by 32 */ #define SPR_MMCR2_THRESHMULT_2 0x00000000 /* Multiply MMCR0 threshold by 2 */ #define SPR_PID 0x3b1 /* 4.. Process ID */ #define SPR_PMC5 0x3b1 /* .6. Performance Counter Register 5 */ #define SPR_PMC6 0x3b2 /* .6. Performance Counter Register 6 */ #define SPR_CCR0 0x3b3 /* 4.. Core Configuration Register 0 */ #define SPR_IAC3 0x3b4 /* 4.. Instruction Address Compare 3 */ #define SPR_IAC4 0x3b5 /* 4.. Instruction Address Compare 4 */ #define SPR_DVC1 0x3b6 /* 4.. Data Value Compare 1 */ #define SPR_DVC2 0x3b7 /* 4.. Data Value Compare 2 */ #define SPR_MMCR0 0x3b8 /* .6. Monitor Mode Control Register 0 */ #define SPR_MMCR0_FC 0x80000000 /* Freeze counters */ #define SPR_MMCR0_FCS 0x40000000 /* Freeze counters in supervisor mode */ #define SPR_MMCR0_FCP 0x20000000 /* Freeze counters in user mode */ #define SPR_MMCR0_FCM1 0x10000000 /* Freeze counters when mark=1 */ #define SPR_MMCR0_FCM0 0x08000000 /* Freeze counters when mark=0 */ #define SPR_MMCR0_PMXE 0x04000000 /* Enable PM interrupt */ #define SPR_MMCR0_FCECE 0x02000000 /* Freeze counters after event */ #define SPR_MMCR0_TBSEL_15 0x01800000 /* Count bit 15 of TBL */ #define SPR_MMCR0_TBSEL_19 0x01000000 /* Count bit 19 of TBL */ #define SPR_MMCR0_TBSEL_23 0x00800000 /* Count bit 23 of TBL */ #define SPR_MMCR0_TBSEL_31 0x00000000 /* Count bit 31 of TBL */ #define SPR_MMCR0_TBEE 0x00400000 /* Time-base event enable */ #define SPR_MMCRO_THRESHOLD(x) ((x) << 16) /* Threshold value */ #define SPR_MMCR0_PMC1CE 0x00008000 /* PMC1 condition enable */ #define SPR_MMCR0_PMCNCE 0x00004000 /* PMCn condition enable */ #define SPR_MMCR0_TRIGGER 0x00002000 /* Trigger */ #define SPR_MMCR0_PMC1SEL(x) (((x) & 0x3f) << 6) /* PMC1 selector */ #define SPR_MMCR0_PMC2SEL(x) (((x) & 0x3f) << 0) /* PMC2 selector */ #define SPR_SGR 0x3b9 /* 4.. Storage Guarded Register */ #define SPR_PMC1 0x3b9 /* .6. Performance Counter Register 1 */ #define SPR_DCWR 0x3ba /* 4.. Data Cache Write-through Register */ #define SPR_PMC2 0x3ba /* .6. Performance Counter Register 2 */ #define SPR_SLER 0x3bb /* 4.. Storage Little Endian Register */ #define SPR_SIA 0x3bb /* .6. Sampled Instruction Address */ #define SPR_MMCR1 0x3bc /* .6. Monitor Mode Control Register 2 */ #define SPR_MMCR1_PMC3SEL(x) (((x) & 0x1f) << 27) /* PMC 3 selector */ #define SPR_MMCR1_PMC4SEL(x) (((x) & 0x1f) << 22) /* PMC 4 selector */ #define SPR_MMCR1_PMC5SEL(x) (((x) & 0x1f) << 17) /* PMC 5 selector */ #define SPR_MMCR1_PMC6SEL(x) (((x) & 0x3f) << 11) /* PMC 6 selector */ #define SPR_SU0R 0x3bc /* 4.. Storage User-defined 0 Register */ #define SPR_PMC3 0x3bd /* .6. Performance Counter Register 3 */ #define SPR_PMC4 0x3be /* .6. Performance Counter Register 4 */ #define SPR_DMISS 0x3d0 /* .68 Data TLB Miss Address Register */ #define SPR_DCMP 0x3d1 /* .68 Data TLB Compare Register */ #define SPR_HASH1 0x3d2 /* .68 Primary Hash Address Register */ #define SPR_ICDBDR 0x3d3 /* 4.. Instruction Cache Debug Data Register */ #define SPR_HASH2 0x3d3 /* .68 Secondary Hash Address Register */ #define SPR_IMISS 0x3d4 /* .68 Instruction TLB Miss Address Register */ #define SPR_TLBMISS 0x3d4 /* .6. TLB Miss Address Register */ #define SPR_DEAR 0x3d5 /* 4.. Data Error Address Register */ #define SPR_ICMP 0x3d5 /* .68 Instruction TLB Compare Register */ #define SPR_PTEHI 0x3d5 /* .6. Instruction TLB Compare Register */ #define SPR_EVPR 0x3d6 /* 4.. Exception Vector Prefix Register */ #define SPR_RPA 0x3d6 /* .68 Required Physical Address Register */ #define SPR_PTELO 0x3d6 /* .6. Required Physical Address Register */ #define SPR_TSR 0x150 /* ..8 Timer Status Register */ #define SPR_TCR 0x154 /* ..8 Timer Control Register */ #define TSR_ENW 0x80000000 /* Enable Next Watchdog */ #define TSR_WIS 0x40000000 /* Watchdog Interrupt Status */ #define TSR_WRS_MASK 0x30000000 /* Watchdog Reset Status */ #define TSR_WRS_NONE 0x00000000 /* No watchdog reset has occurred */ #define TSR_WRS_CORE 0x10000000 /* Core reset was forced by the watchdog */ #define TSR_WRS_CHIP 0x20000000 /* Chip reset was forced by the watchdog */ #define TSR_WRS_SYSTEM 0x30000000 /* System reset was forced by the watchdog */ #define TSR_PIS 0x08000000 /* PIT Interrupt Status */ #define TSR_DIS 0x08000000 /* Decrementer Interrupt Status */ #define TSR_FIS 0x04000000 /* FIT Interrupt Status */ #define TCR_WP_MASK 0xc0000000 /* Watchdog Period mask */ #define TCR_WP_2_17 0x00000000 /* 2**17 clocks */ #define TCR_WP_2_21 0x40000000 /* 2**21 clocks */ #define TCR_WP_2_25 0x80000000 /* 2**25 clocks */ #define TCR_WP_2_29 0xc0000000 /* 2**29 clocks */ #define TCR_WRC_MASK 0x30000000 /* Watchdog Reset Control mask */ #define TCR_WRC_NONE 0x00000000 /* No watchdog reset */ #define TCR_WRC_CORE 0x10000000 /* Core reset */ #define TCR_WRC_CHIP 0x20000000 /* Chip reset */ #define TCR_WRC_SYSTEM 0x30000000 /* System reset */ #define TCR_WIE 0x08000000 /* Watchdog Interrupt Enable */ #define TCR_PIE 0x04000000 /* PIT Interrupt Enable */ #define TCR_DIE 0x04000000 /* Pecrementer Interrupt Enable */ #define TCR_FP_MASK 0x03000000 /* FIT Period */ #define TCR_FP_2_9 0x00000000 /* 2**9 clocks */ #define TCR_FP_2_13 0x01000000 /* 2**13 clocks */ #define TCR_FP_2_17 0x02000000 /* 2**17 clocks */ #define TCR_FP_2_21 0x03000000 /* 2**21 clocks */ #define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ #define TCR_ARE 0x00400000 /* Auto Reload Enable */ #define SPR_PIT 0x3db /* 4.. Programmable Interval Timer */ #define SPR_SRR2 0x3de /* 4.. Save/Restore Register 2 */ #define SPR_SRR3 0x3df /* 4.. Save/Restore Register 3 */ #define SPR_HID0 0x3f0 /* ..8 Hardware Implementation Register 0 */ #define SPR_HID1 0x3f1 /* ..8 Hardware Implementation Register 1 */ #define SPR_HID2 0x3f3 /* ..8 Hardware Implementation Register 2 */ #define SPR_HID4 0x3f4 /* ..8 Hardware Implementation Register 4 */ #define SPR_HID5 0x3f6 /* ..8 Hardware Implementation Register 5 */ #define SPR_HID6 0x3f9 /* ..8 Hardware Implementation Register 6 */ #define SPR_CELL_TSRL 0x380 /* ... Cell BE Thread Status Register */ #define SPR_CELL_TSCR 0x399 /* ... Cell BE Thread Switch Register */ #if defined(AIM) #define SPR_DBSR 0x3f0 /* 4.. Debug Status Register */ #define DBSR_IC 0x80000000 /* Instruction completion debug event */ #define DBSR_BT 0x40000000 /* Branch Taken debug event */ #define DBSR_EDE 0x20000000 /* Exception debug event */ #define DBSR_TIE 0x10000000 /* Trap Instruction debug event */ #define DBSR_UDE 0x08000000 /* Unconditional debug event */ #define DBSR_IA1 0x04000000 /* IAC1 debug event */ #define DBSR_IA2 0x02000000 /* IAC2 debug event */ #define DBSR_DR1 0x01000000 /* DAC1 Read debug event */ #define DBSR_DW1 0x00800000 /* DAC1 Write debug event */ #define DBSR_DR2 0x00400000 /* DAC2 Read debug event */ #define DBSR_DW2 0x00200000 /* DAC2 Write debug event */ #define DBSR_IDE 0x00100000 /* Imprecise debug event */ #define DBSR_IA3 0x00080000 /* IAC3 debug event */ #define DBSR_IA4 0x00040000 /* IAC4 debug event */ #define DBSR_MRR 0x00000300 /* Most recent reset */ #define SPR_DBCR0 0x3f2 /* 4.. Debug Control Register 0 */ #define SPR_DBCR1 0x3bd /* 4.. Debug Control Register 1 */ #define SPR_IAC1 0x3f4 /* 4.. Instruction Address Compare 1 */ #define SPR_IAC2 0x3f5 /* 4.. Instruction Address Compare 2 */ #define SPR_DAC1 0x3f6 /* 4.. Data Address Compare 1 */ #define SPR_DAC2 0x3f7 /* 4.. Data Address Compare 2 */ #define SPR_PIR 0x3ff /* .6. Processor Identification Register */ #elif defined(BOOKE) #define SPR_PIR 0x11e /* ..8 Processor Identification Register */ #define SPR_DBSR 0x130 /* ..8 Debug Status Register */ #define DBSR_IDE 0x80000000 /* Imprecise debug event. */ #define DBSR_UDE 0x40000000 /* Unconditional debug event. */ #define DBSR_MRR 0x30000000 /* Most recent Reset (mask). */ #define DBSR_ICMP 0x08000000 /* Instr. complete debug event. */ #define DBSR_BRT 0x04000000 /* Branch taken debug event. */ #define DBSR_IRPT 0x02000000 /* Interrupt taken debug event. */ #define DBSR_TRAP 0x01000000 /* Trap instr. debug event. */ #define DBSR_IAC1 0x00800000 /* Instr. address compare #1. */ #define DBSR_IAC2 0x00400000 /* Instr. address compare #2. */ #define DBSR_IAC3 0x00200000 /* Instr. address compare #3. */ #define DBSR_IAC4 0x00100000 /* Instr. address compare #4. */ #define DBSR_DAC1R 0x00080000 /* Data addr. read compare #1. */ #define DBSR_DAC1W 0x00040000 /* Data addr. write compare #1. */ #define DBSR_DAC2R 0x00020000 /* Data addr. read compare #2. */ #define DBSR_DAC2W 0x00010000 /* Data addr. write compare #2. */ #define DBSR_RET 0x00008000 /* Return debug event. */ #define SPR_DBCR0 0x134 /* ..8 Debug Control Register 0 */ #define SPR_DBCR1 0x135 /* ..8 Debug Control Register 1 */ #define SPR_IAC1 0x138 /* ..8 Instruction Address Compare 1 */ #define SPR_IAC2 0x139 /* ..8 Instruction Address Compare 2 */ #define SPR_DAC1 0x13c /* ..8 Data Address Compare 1 */ #define SPR_DAC2 0x13d /* ..8 Data Address Compare 2 */ #endif #define DBCR0_EDM 0x80000000 /* External Debug Mode */ #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ #define DBCR0_RST_MASK 0x30000000 /* ReSeT */ #define DBCR0_RST_NONE 0x00000000 /* No action */ #define DBCR0_RST_CORE 0x10000000 /* Core reset */ #define DBCR0_RST_CHIP 0x20000000 /* Chip reset */ #define DBCR0_RST_SYSTEM 0x30000000 /* System reset */ #define DBCR0_IC 0x08000000 /* Instruction Completion debug event */ #define DBCR0_BT 0x04000000 /* Branch Taken debug event */ #define DBCR0_EDE 0x02000000 /* Exception Debug Event */ #define DBCR0_TDE 0x01000000 /* Trap Debug Event */ #define DBCR0_IA1 0x00800000 /* IAC (Instruction Address Compare) 1 debug event */ #define DBCR0_IA2 0x00400000 /* IAC 2 debug event */ #define DBCR0_IA12 0x00200000 /* Instruction Address Range Compare 1-2 */ #define DBCR0_IA12X 0x00100000 /* IA12 eXclusive */ #define DBCR0_IA3 0x00080000 /* IAC 3 debug event */ #define DBCR0_IA4 0x00040000 /* IAC 4 debug event */ #define DBCR0_IA34 0x00020000 /* Instruction Address Range Compare 3-4 */ #define DBCR0_IA34X 0x00010000 /* IA34 eXclusive */ #define DBCR0_IA12T 0x00008000 /* Instruction Address Range Compare 1-2 range Toggle */ #define DBCR0_IA34T 0x00004000 /* Instruction Address Range Compare 3-4 range Toggle */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ #define SPR_IABR 0x3f2 /* ..8 Instruction Address Breakpoint Register 0 */ #define SPR_DABR 0x3f5 /* .6. Data Address Breakpoint Register */ #define SPR_MSSCR0 0x3f6 /* .6. Memory SubSystem Control Register */ #define MSSCR0_SHDEN 0x80000000 /* 0: Shared-state enable */ #define MSSCR0_SHDPEN3 0x40000000 /* 1: ~SHD[01] signal enable in MEI mode */ #define MSSCR0_L1INTVEN 0x38000000 /* 2-4: L1 data cache ~HIT intervention enable */ #define MSSCR0_L2INTVEN 0x07000000 /* 5-7: L2 data cache ~HIT intervention enable*/ #define MSSCR0_DL1HWF 0x00800000 /* 8: L1 data cache hardware flush */ #define MSSCR0_MBO 0x00400000 /* 9: must be one */ #define MSSCR0_EMODE 0x00200000 /* 10: MPX bus mode (read-only) */ #define MSSCR0_ABD 0x00100000 /* 11: address bus driven (read-only) */ #define MSSCR0_MBZ 0x000fffff /* 12-31: must be zero */ #define MSSCR0_L2PFE 0x00000003 /* 30-31: L2 prefetch enable */ #define SPR_MSSSR0 0x3f7 /* .6. Memory Subsystem Status Register (MPC745x) */ #define MSSSR0_L2TAG 0x00040000 /* 13: L2 tag parity error */ #define MSSSR0_L2DAT 0x00020000 /* 14: L2 data parity error */ #define MSSSR0_L3TAG 0x00010000 /* 15: L3 tag parity error */ #define MSSSR0_L3DAT 0x00008000 /* 16: L3 data parity error */ #define MSSSR0_APE 0x00004000 /* 17: Address parity error */ #define MSSSR0_DPE 0x00002000 /* 18: Data parity error */ #define MSSSR0_TEA 0x00001000 /* 19: Bus transfer error acknowledge */ #define SPR_LDSTCR 0x3f8 /* .6. Load/Store Control Register */ #define SPR_L2PM 0x3f8 /* .6. L2 Private Memory Control Register */ #define SPR_L2CR 0x3f9 /* .6. L2 Control Register */ #define L2CR_L2E 0x80000000 /* 0: L2 enable */ #define L2CR_L2PE 0x40000000 /* 1: L2 data parity enable */ #define L2CR_L2SIZ 0x30000000 /* 2-3: L2 size */ #define L2SIZ_2M 0x00000000 #define L2SIZ_256K 0x10000000 #define L2SIZ_512K 0x20000000 #define L2SIZ_1M 0x30000000 #define L2CR_L2CLK 0x0e000000 /* 4-6: L2 clock ratio */ #define L2CLK_DIS 0x00000000 /* disable L2 clock */ #define L2CLK_10 0x02000000 /* core clock / 1 */ #define L2CLK_15 0x04000000 /* / 1.5 */ #define L2CLK_20 0x08000000 /* / 2 */ #define L2CLK_25 0x0a000000 /* / 2.5 */ #define L2CLK_30 0x0c000000 /* / 3 */ #define L2CR_L2RAM 0x01800000 /* 7-8: L2 RAM type */ #define L2RAM_FLOWTHRU_BURST 0x00000000 #define L2RAM_PIPELINE_BURST 0x01000000 #define L2RAM_PIPELINE_LATE 0x01800000 #define L2CR_L2DO 0x00400000 /* 9: L2 data-only. Setting this bit disables instruction caching. */ #define L2CR_L2I 0x00200000 /* 10: L2 global invalidate. */ #define L2CR_L2IO_7450 0x00010000 /* 11: L2 instruction-only (MPC745x). */ #define L2CR_L2CTL 0x00100000 /* 11: L2 RAM control (ZZ enable). Enables automatic operation of the L2ZZ (low-power mode) signal. */ #define L2CR_L2WT 0x00080000 /* 12: L2 write-through. */ #define L2CR_L2TS 0x00040000 /* 13: L2 test support. */ #define L2CR_L2OH 0x00030000 /* 14-15: L2 output hold. */ #define L2CR_L2DO_7450 0x00010000 /* 15: L2 data-only (MPC745x). */ #define L2CR_L2SL 0x00008000 /* 16: L2 DLL slow. */ #define L2CR_L2DF 0x00004000 /* 17: L2 differential clock. */ #define L2CR_L2BYP 0x00002000 /* 18: L2 DLL bypass. */ #define L2CR_L2FA 0x00001000 /* 19: L2 flush assist (for software flush). */ #define L2CR_L2HWF 0x00000800 /* 20: L2 hardware flush. */ #define L2CR_L2IO 0x00000400 /* 21: L2 instruction-only. */ #define L2CR_L2CLKSTP 0x00000200 /* 22: L2 clock stop. */ #define L2CR_L2DRO 0x00000100 /* 23: L2DLL rollover checkstop enable. */ #define L2CR_L2IP 0x00000001 /* 31: L2 global invalidate in */ /* progress (read only). */ #define SPR_L3CR 0x3fa /* .6. L3 Control Register */ #define L3CR_L3E 0x80000000 /* 0: L3 enable */ #define L3CR_L3PE 0x40000000 /* 1: L3 data parity enable */ #define L3CR_L3APE 0x20000000 #define L3CR_L3SIZ 0x10000000 /* 3: L3 size (0=1MB, 1=2MB) */ #define L3CR_L3CLKEN 0x08000000 /* 4: Enables L3_CLK[0:1] */ #define L3CR_L3CLK 0x03800000 #define L3CR_L3IO 0x00400000 #define L3CR_L3CLKEXT 0x00200000 #define L3CR_L3CKSPEXT 0x00100000 #define L3CR_L3OH1 0x00080000 #define L3CR_L3SPO 0x00040000 #define L3CR_L3CKSP 0x00030000 #define L3CR_L3PSP 0x0000e000 #define L3CR_L3REP 0x00001000 #define L3CR_L3HWF 0x00000800 #define L3CR_L3I 0x00000400 /* 21: L3 global invalidate */ #define L3CR_L3RT 0x00000300 #define L3CR_L3NIRCA 0x00000080 #define L3CR_L3DO 0x00000040 #define L3CR_PMEN 0x00000004 #define L3CR_PMSIZ 0x00000003 #define SPR_DCCR 0x3fa /* 4.. Data Cache Cachability Register */ #define SPR_ICCR 0x3fb /* 4.. Instruction Cache Cachability Register */ #define SPR_THRM1 0x3fc /* .6. Thermal Management Register */ #define SPR_THRM2 0x3fd /* .6. Thermal Management Register */ #define SPR_THRM_TIN 0x80000000 /* Thermal interrupt bit (RO) */ #define SPR_THRM_TIV 0x40000000 /* Thermal interrupt valid (RO) */ #define SPR_THRM_THRESHOLD(x) ((x) << 23) /* Thermal sensor threshold */ #define SPR_THRM_TID 0x00000004 /* Thermal interrupt direction */ #define SPR_THRM_TIE 0x00000002 /* Thermal interrupt enable */ #define SPR_THRM_VALID 0x00000001 /* Valid bit */ #define SPR_THRM3 0x3fe /* .6. Thermal Management Register */ #define SPR_THRM_TIMER(x) ((x) << 1) /* Sampling interval timer */ #define SPR_THRM_ENABLE 0x00000001 /* TAU Enable */ #define SPR_FPECR 0x3fe /* .6. Floating-Point Exception Cause Register */ /* Time Base Register declarations */ #define TBR_TBL 0x10c /* 468 Time Base Lower - read */ #define TBR_TBU 0x10d /* 468 Time Base Upper - read */ #define TBR_TBWL 0x11c /* 468 Time Base Lower - supervisor, write */ #define TBR_TBWU 0x11d /* 468 Time Base Upper - supervisor, write */ /* Performance counter declarations */ #define PMC_OVERFLOW 0x80000000 /* Counter has overflowed */ /* The first five countable [non-]events are common to many PMC's */ #define PMCN_NONE 0 /* Count nothing */ #define PMCN_CYCLES 1 /* Processor cycles */ #define PMCN_ICOMP 2 /* Instructions completed */ #define PMCN_TBLTRANS 3 /* TBL bit transitions */ #define PCMN_IDISPATCH 4 /* Instructions dispatched */ /* Similar things for the 970 PMC direct counters */ #define PMC970N_NONE 0x8 /* Count nothing */ #define PMC970N_CYCLES 0xf /* Processor cycles */ #define PMC970N_ICOMP 0x9 /* Instructions completed */ #if defined(BOOKE) #define SPR_MCARU 0x239 /* ..8 Machine Check Address register upper bits */ #define SPR_MCSR 0x23c /* ..8 Machine Check Syndrome register */ #define SPR_MCAR 0x23d /* ..8 Machine Check Address register */ #define SPR_ESR 0x003e /* ..8 Exception Syndrome Register */ #define ESR_PIL 0x08000000 /* Program interrupt - illegal */ #define ESR_PPR 0x04000000 /* Program interrupt - privileged */ #define ESR_PTR 0x02000000 /* Program interrupt - trap */ #define ESR_ST 0x00800000 /* Store operation */ #define ESR_DLK 0x00200000 /* Data storage, D cache locking */ #define ESR_ILK 0x00100000 /* Data storage, I cache locking */ #define ESR_BO 0x00020000 /* Data/instruction storage, byte ordering */ #define ESR_SPE 0x00000080 /* SPE exception bit */ #define SPR_CSRR0 0x03a /* ..8 58 Critical SRR0 */ #define SPR_CSRR1 0x03b /* ..8 59 Critical SRR1 */ #define SPR_MCSRR0 0x23a /* ..8 570 Machine check SRR0 */ #define SPR_MCSRR1 0x23b /* ..8 571 Machine check SRR1 */ #define SPR_DSRR0 0x23e /* ..8 574 Debug SRR0 */ #define SPR_DSRR1 0x23f /* ..8 575 Debug SRR1 */ #define SPR_MMUCR 0x3b2 /* 4.. MMU Control Register */ #define MMUCR_SWOA (0x80000000 >> 7) #define MMUCR_U1TE (0x80000000 >> 9) #define MMUCR_U2SWOAE (0x80000000 >> 10) #define MMUCR_DULXE (0x80000000 >> 12) #define MMUCR_IULXE (0x80000000 >> 13) #define MMUCR_STS (0x80000000 >> 15) #define MMUCR_STID_MASK (0xFF000000 >> 24) #define SPR_MMUCSR0 0x3f4 /* ..8 1012 MMU Control and Status Register 0 */ #define MMUCSR0_L2TLB0_FI 0x04 /* TLB0 flash invalidate */ #define MMUCSR0_L2TLB1_FI 0x02 /* TLB1 flash invalidate */ #define SPR_SVR 0x3ff /* ..8 1023 System Version Register */ #define SVR_MPC8533 0x8034 #define SVR_MPC8533E 0x803c #define SVR_MPC8541 0x8072 #define SVR_MPC8541E 0x807a #define SVR_MPC8548 0x8031 #define SVR_MPC8548E 0x8039 #define SVR_MPC8555 0x8071 #define SVR_MPC8555E 0x8079 #define SVR_MPC8572 0x80e0 #define SVR_MPC8572E 0x80e8 #define SVR_P1011 0x80e5 #define SVR_P1011E 0x80ed #define SVR_P1013 0x80e7 #define SVR_P1013E 0x80ef #define SVR_P1020 0x80e4 #define SVR_P1020E 0x80ec #define SVR_P1022 0x80e6 #define SVR_P1022E 0x80ee #define SVR_P2010 0x80e3 #define SVR_P2010E 0x80eb #define SVR_P2020 0x80e2 #define SVR_P2020E 0x80ea #define SVR_P2041 0x8210 #define SVR_P2041E 0x8218 #define SVR_P3041 0x8211 #define SVR_P3041E 0x8219 #define SVR_P4040 0x8200 #define SVR_P4040E 0x8208 #define SVR_P4080 0x8201 #define SVR_P4080E 0x8209 #define SVR_P5010 0x8221 #define SVR_P5010E 0x8229 #define SVR_P5020 0x8220 #define SVR_P5020E 0x8228 #define SVR_P5021 0x8205 #define SVR_P5021E 0x820d #define SVR_P5040 0x8204 #define SVR_P5040E 0x820c #define SVR_VER(svr) (((svr) >> 16) & 0xffff) #define SPR_PID0 0x030 /* ..8 Process ID Register 0 */ #define SPR_PID1 0x279 /* ..8 Process ID Register 1 */ #define SPR_PID2 0x27a /* ..8 Process ID Register 2 */ #define SPR_TLB0CFG 0x2B0 /* ..8 TLB 0 Config Register */ #define SPR_TLB1CFG 0x2B1 /* ..8 TLB 1 Config Register */ #define TLBCFG_ASSOC_MASK 0xff000000 /* Associativity of TLB */ #define TLBCFG_ASSOC_SHIFT 24 #define TLBCFG_NENTRY_MASK 0x00000fff /* Number of entries in TLB */ #define SPR_IVPR 0x03f /* ..8 Interrupt Vector Prefix Register */ #define SPR_IVOR0 0x190 /* ..8 Critical input */ #define SPR_IVOR1 0x191 /* ..8 Machine check */ #define SPR_IVOR2 0x192 #define SPR_IVOR3 0x193 #define SPR_IVOR4 0x194 #define SPR_IVOR5 0x195 #define SPR_IVOR6 0x196 #define SPR_IVOR7 0x197 #define SPR_IVOR8 0x198 #define SPR_IVOR9 0x199 #define SPR_IVOR10 0x19a #define SPR_IVOR11 0x19b #define SPR_IVOR12 0x19c #define SPR_IVOR13 0x19d #define SPR_IVOR14 0x19e #define SPR_IVOR15 0x19f #define SPR_IVOR32 0x210 #define SPR_IVOR33 0x211 #define SPR_IVOR34 0x212 #define SPR_IVOR35 0x213 #define SPR_MAS0 0x270 /* ..8 MMU Assist Register 0 Book-E/e500 */ #define SPR_MAS1 0x271 /* ..8 MMU Assist Register 1 Book-E/e500 */ #define SPR_MAS2 0x272 /* ..8 MMU Assist Register 2 Book-E/e500 */ #define SPR_MAS3 0x273 /* ..8 MMU Assist Register 3 Book-E/e500 */ #define SPR_MAS4 0x274 /* ..8 MMU Assist Register 4 Book-E/e500 */ #define SPR_MAS5 0x275 /* ..8 MMU Assist Register 5 Book-E */ #define SPR_MAS6 0x276 /* ..8 MMU Assist Register 6 Book-E/e500 */ #define SPR_MAS7 0x3B0 /* ..8 MMU Assist Register 7 Book-E/e500 */ #define SPR_MAS8 0x155 /* ..8 MMU Assist Register 8 Book-E/e500 */ #define SPR_L1CFG0 0x203 /* ..8 L1 cache configuration register 0 */ #define SPR_L1CFG1 0x204 /* ..8 L1 cache configuration register 1 */ #define SPR_CCR1 0x378 #define CCR1_L2COBE 0x00000040 #define DCR_L2DCDCRAI 0x0000 /* L2 D-Cache DCR Address Pointer */ #define DCR_L2DCDCRDI 0x0001 /* L2 D-Cache DCR Data Indirect */ #define DCR_L2CR0 0x00 /* L2 Cache Configuration Register 0 */ #define L2CR0_AS 0x30000000 #define SPR_L1CSR0 0x3F2 /* ..8 L1 Cache Control and Status Register 0 */ #define L1CSR0_DCPE 0x00010000 /* Data Cache Parity Enable */ #define L1CSR0_DCLFR 0x00000100 /* Data Cache Lock Bits Flash Reset */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ #define SPR_L1CSR1 0x3F3 /* ..8 L1 Cache Control and Status Register 1 */ #define L1CSR1_ICPE 0x00010000 /* Instruction Cache Parity Enable */ #define L1CSR1_ICUL 0x00000400 /* Instr Cache Unable to Lock */ #define L1CSR1_ICLFR 0x00000100 /* Instruction Cache Lock Bits Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */ #define SPR_L2CSR0 0x3F9 /* ..8 L2 Cache Control and Status Register 0 */ #define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */ #define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity Enable */ #define L2CSR0_L2FI 0x00200000 /* L2 Cache Flash Invalidate */ #define L2CSR0_L2LFC 0x00000400 /* L2 Cache Lock Flags Clear */ #define SPR_BUCSR 0x3F5 /* ..8 Branch Unit Control and Status Register */ #define BUCSR_BPEN 0x00000001 /* Branch Prediction Enable */ #define BUCSR_BBFI 0x00000200 /* Branch Buffer Flash Invalidate */ #endif /* BOOKE */ #endif /* !_POWERPC_SPR_H_ */ Index: head/sys/powerpc/powerpc/exec_machdep.c =================================================================== --- head/sys/powerpc/powerpc/exec_machdep.c (revision 346789) +++ head/sys/powerpc/powerpc/exec_machdep.c (revision 346790) @@ -1,1135 +1,1165 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_fpu_emu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FPU_EMU #include #endif #ifdef COMPAT_FREEBSD32 #include #include #include typedef struct __ucontext32 { sigset_t uc_sigmask; mcontext32_t uc_mcontext; uint32_t uc_link; struct sigaltstack32 uc_stack; uint32_t uc_flags; uint32_t __spare__[4]; } ucontext32_t; struct sigframe32 { ucontext32_t sf_uc; struct siginfo32 sf_si; }; static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags); #endif static int grab_mcontext(struct thread *, mcontext_t *, int); +static void cleanup_power_extras(struct thread *); + #ifdef __powerpc64__ extern struct sysentvec elf64_freebsd_sysvec_v2; #endif void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct trapframe *tf; struct sigacts *psp; struct sigframe sf; struct thread *td; struct proc *p; #ifdef COMPAT_FREEBSD32 struct siginfo32 siginfo32; struct sigframe32 sf32; #endif size_t sfpsize; caddr_t sfp, usfp; int oonstack, rndfsize; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; oonstack = sigonstack(tf->fixreg[1]); /* * Fill siginfo structure. */ ksi->ksi_info.si_signo = ksi->ksi_signo; ksi->ksi_info.si_addr = (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ? tf->dar : tf->srr0); #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32)) { siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32); sig = siginfo32.si_signo; code = siginfo32.si_code; sfp = (caddr_t)&sf32; sfpsize = sizeof(sf32); rndfsize = roundup(sizeof(sf32), 16); /* * Save user context */ memset(&sf32, 0, sizeof(sf32)); grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0); sf32.sf_uc.uc_sigmask = *mask; sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size; sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; } else { #endif sig = ksi->ksi_signo; code = ksi->ksi_code; sfp = (caddr_t)&sf; sfpsize = sizeof(sf); #ifdef __powerpc64__ /* * 64-bit PPC defines a 288 byte scratch region * below the stack. */ rndfsize = 288 + roundup(sizeof(sf), 48); #else rndfsize = roundup(sizeof(sf), 16); #endif /* * Save user context */ memset(&sf, 0, sizeof(sf)); grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; #ifdef COMPAT_FREEBSD32 } #endif CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* * Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size - rndfsize) & ~0xFul); } else { usfp = (void *)((tf->fixreg[1] - rndfsize) & ~0xFul); } /* * Save the floating-point state, if necessary, then copy it. */ /* XXX */ /* * Set up the registers to return to sigcode. * * r1/sp - sigframe ptr * lr - sig function, dispatched to by blrl in trampoline * r3 - sig number * r4 - SIGINFO ? &siginfo : exception code * r5 - user context * srr0 - trampoline function addr */ tf->lr = (register_t)catcher; tf->fixreg[1] = (register_t)usfp; tf->fixreg[FIRSTARG] = sig; #ifdef COMPAT_FREEBSD32 tf->fixreg[FIRSTARG+2] = (register_t)usfp + ((SV_PROC_FLAG(p, SV_ILP32)) ? offsetof(struct sigframe32, sf_uc) : offsetof(struct sigframe, sf_uc)); #else tf->fixreg[FIRSTARG+2] = (register_t)usfp + offsetof(struct sigframe, sf_uc); #endif if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* * Signal handler installed with SA_SIGINFO. */ #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32)) { sf32.sf_si = siginfo32; tf->fixreg[FIRSTARG+1] = (register_t)usfp + offsetof(struct sigframe32, sf_si); sf32.sf_si = siginfo32; } else { #endif tf->fixreg[FIRSTARG+1] = (register_t)usfp + offsetof(struct sigframe, sf_si); sf.sf_si = ksi->ksi_info; #ifdef COMPAT_FREEBSD32 } #endif } else { /* Old FreeBSD-style arguments. */ tf->fixreg[FIRSTARG+1] = code; tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ? tf->dar : tf->srr0; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); tf->srr0 = (register_t)p->p_sysent->sv_sigcode_base; /* * copy the frame out to userland. */ if (copyout(sfp, usfp, sfpsize) != 0) { /* * Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp); PROC_LOCK(p); sigexit(td, SIGILL); } CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->srr0, tf->fixreg[1]); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sys_sigreturn(td, (struct sigreturn_args *)uap); } #endif /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_lr = tf->srr0; pcb->pcb_sp = tf->fixreg[1]; } /* * get_mcontext/sendsig helper routine that doesn't touch the * proc lock */ static int grab_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct pcb *pcb; int i; pcb = td->td_pcb; memset(mcp, 0, sizeof(mcontext_t)); mcp->mc_vers = _MC_VERSION; mcp->mc_flags = 0; memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe)); if (flags & GET_MC_CLEAR_RET) { mcp->mc_gpr[3] = 0; mcp->mc_gpr[4] = 0; } /* * This assumes that floating-point context is *not* lazy, * so if the thread has used FP there would have been a * FP-unavailable exception that would have set things up * correctly. */ if (pcb->pcb_flags & PCB_FPREGS) { if (pcb->pcb_flags & PCB_FPU) { KASSERT(td == curthread, ("get_mcontext: fp save not curthread")); critical_enter(); save_fpu(td); critical_exit(); } mcp->mc_flags |= _MC_FP_VALID; memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); for (i = 0; i < 32; i++) memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, sizeof(double)); } if (pcb->pcb_flags & PCB_VSX) { for (i = 0; i < 32; i++) memcpy(&mcp->mc_vsxfpreg[i], &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double)); } /* * Repeat for Altivec context */ if (pcb->pcb_flags & PCB_VEC) { KASSERT(td == curthread, ("get_mcontext: fp save not curthread")); critical_enter(); save_vec(td); critical_exit(); mcp->mc_flags |= _MC_AV_VALID; mcp->mc_vscr = pcb->pcb_vec.vscr; mcp->mc_vrsave = pcb->pcb_vec.vrsave; memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec)); } mcp->mc_len = sizeof(*mcp); return (0); } int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { int error; error = grab_mcontext(td, mcp, flags); if (error == 0) { PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); PROC_UNLOCK(curthread->td_proc); } return (error); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct pcb *pcb; struct trapframe *tf; register_t tls; int i; pcb = td->td_pcb; tf = td->td_frame; if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp)) return (EINVAL); /* * Don't let the user set privileged MSR bits */ if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) { return (EINVAL); } /* Copy trapframe, preserving TLS pointer across context change */ if (SV_PROC_FLAG(td->td_proc, SV_LP64)) tls = tf->fixreg[13]; else tls = tf->fixreg[2]; memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame)); if (SV_PROC_FLAG(td->td_proc, SV_LP64)) tf->fixreg[13] = tls; else tf->fixreg[2] = tls; /* Disable FPU */ tf->srr1 &= ~PSL_FP; pcb->pcb_flags &= ~PCB_FPU; if (mcp->mc_flags & _MC_FP_VALID) { /* enable_fpu() will happen lazily on a fault */ pcb->pcb_flags |= PCB_FPREGS; memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double)); bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr)); for (i = 0; i < 32; i++) { memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i], sizeof(double)); memcpy(&pcb->pcb_fpu.fpr[i].vsr[2], &mcp->mc_vsxfpreg[i], sizeof(double)); } } if (mcp->mc_flags & _MC_AV_VALID) { if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) { critical_enter(); enable_vec(td); critical_exit(); } pcb->pcb_vec.vscr = mcp->mc_vscr; pcb->pcb_vec.vrsave = mcp->mc_vrsave; memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec)); } return (0); } /* + * Clean up extra POWER state. Some per-process registers and states are not + * managed by the MSR, so must be cleaned up explicitly on thread exit. + * + * Currently this includes: + * DSCR -- Data stream control register (PowerISA 2.06+) + * FSCR -- Facility Status and Control Register (PowerISA 2.07+) + */ +static void +cleanup_power_extras(struct thread *td) +{ + uint32_t pcb_flags; + + if (td != curthread) + return; + + pcb_flags = td->td_pcb->pcb_flags; + /* Clean up registers not managed by MSR. */ + if (pcb_flags & PCB_CFSCR) + mtspr(SPR_FSCR, 0); + if (pcb_flags & PCB_CDSCR) + mtspr(SPR_DSCRP, 0); +} + +/* * Set set up registers on exec. */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; register_t argc; tf = trapframe(td); bzero(tf, sizeof *tf); #ifdef __powerpc64__ tf->fixreg[1] = -roundup(-stack + 48, 16); #else tf->fixreg[1] = -roundup(-stack + 8, 16); #endif /* * Set up arguments for _start(): * _start(argc, argv, envp, obj, cleanup, ps_strings); * * Notes: * - obj and cleanup are the auxilliary and termination * vectors. They are fixed up by ld.elf_so. * - ps_strings is a NetBSD extention, and will be * ignored by executables which are strictly * compliant with the SVR4 ABI. */ /* Collect argc from the user stack */ argc = fuword((void *)stack); tf->fixreg[3] = argc; tf->fixreg[4] = stack + sizeof(register_t); tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t); tf->fixreg[6] = 0; /* auxillary vector */ tf->fixreg[7] = 0; /* termination vector */ tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */ tf->srr0 = imgp->entry_addr; #ifdef __powerpc64__ tf->fixreg[12] = imgp->entry_addr; #endif tf->srr1 = psl_userset | PSL_FE_DFLT; + cleanup_power_extras(td); td->td_pcb->pcb_flags = 0; } #ifdef COMPAT_FREEBSD32 void ppc32_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; uint32_t argc; tf = trapframe(td); bzero(tf, sizeof *tf); tf->fixreg[1] = -roundup(-stack + 8, 16); argc = fuword32((void *)stack); tf->fixreg[3] = argc; tf->fixreg[4] = stack + sizeof(uint32_t); tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t); tf->fixreg[6] = 0; /* auxillary vector */ tf->fixreg[7] = 0; /* termination vector */ tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */ tf->srr0 = imgp->entry_addr; tf->srr1 = psl_userset32 | PSL_FE_DFLT; + cleanup_power_extras(td); td->td_pcb->pcb_flags = 0; } #endif int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; memcpy(regs, tf, sizeof(struct reg)); return (0); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { /* No debug registers on PowerPC */ return (ENOSYS); } int fill_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; int i; pcb = td->td_pcb; if ((pcb->pcb_flags & PCB_FPREGS) == 0) memset(fpregs, 0, sizeof(struct fpreg)); else { memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); for (i = 0; i < 32; i++) memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, sizeof(double)); } return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; memcpy(tf, regs, sizeof(struct reg)); return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { /* No debug registers on PowerPC */ return (ENOSYS); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; int i; pcb = td->td_pcb; pcb->pcb_flags |= PCB_FPREGS; memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double)); for (i = 0; i < 32; i++) { memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i], sizeof(double)); } return (0); } #ifdef COMPAT_FREEBSD32 int set_regs32(struct thread *td, struct reg32 *regs) { struct trapframe *tf; int i; tf = td->td_frame; for (i = 0; i < 32; i++) tf->fixreg[i] = regs->fixreg[i]; tf->lr = regs->lr; tf->cr = regs->cr; tf->xer = regs->xer; tf->ctr = regs->ctr; tf->srr0 = regs->pc; return (0); } int fill_regs32(struct thread *td, struct reg32 *regs) { struct trapframe *tf; int i; tf = td->td_frame; for (i = 0; i < 32; i++) regs->fixreg[i] = tf->fixreg[i]; regs->lr = tf->lr; regs->cr = tf->cr; regs->xer = tf->xer; regs->ctr = tf->ctr; regs->pc = tf->srr0; return (0); } static int grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { mcontext_t mcp64; int i, error; error = grab_mcontext(td, &mcp64, flags); if (error != 0) return (error); mcp->mc_vers = mcp64.mc_vers; mcp->mc_flags = mcp64.mc_flags; mcp->mc_onstack = mcp64.mc_onstack; mcp->mc_len = mcp64.mc_len; memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec)); memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av)); for (i = 0; i < 42; i++) mcp->mc_frame[i] = mcp64.mc_frame[i]; memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg)); memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg)); return (0); } static int get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { int error; error = grab_mcontext32(td, mcp, flags); if (error == 0) { PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); PROC_UNLOCK(curthread->td_proc); } return (error); } static int set_mcontext32(struct thread *td, mcontext32_t *mcp) { mcontext_t mcp64; int i, error; mcp64.mc_vers = mcp->mc_vers; mcp64.mc_flags = mcp->mc_flags; mcp64.mc_onstack = mcp->mc_onstack; mcp64.mc_len = mcp->mc_len; memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec)); memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av)); for (i = 0; i < 42; i++) mcp64.mc_frame[i] = mcp->mc_frame[i]; mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL); memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg)); memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg)); error = set_mcontext(td, &mcp64); return (error); } #endif #ifdef COMPAT_FREEBSD32 int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { ucontext32_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext32(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } /* * The first two fields of a ucontext_t are the signal mask and the machine * context. The next field is uc_link; we want to avoid destroying the link * when copying out contexts. */ #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link) int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE); } return (ret); } int freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } return (ret == 0 ? EJUSTRETURN : ret); } int freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap) { ucontext32_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } } return (ret == 0 ? EJUSTRETURN : ret); } #endif void cpu_set_syscall_retval(struct thread *td, int error) { struct proc *p; struct trapframe *tf; int fixup; if (error == EJUSTRETURN) return; p = td->td_proc; tf = td->td_frame; if (tf->fixreg[0] == SYS___syscall && (SV_PROC_FLAG(p, SV_ILP32))) { int code = tf->fixreg[FIRSTARG + 1]; fixup = ( #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek) code != SYS_freebsd6_lseek && #endif code != SYS_lseek) ? 1 : 0; } else fixup = 0; switch (error) { case 0: if (fixup) { /* * 64-bit return, 32-bit syscall. Fixup byte order */ tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = td->td_retval[0]; } else { tf->fixreg[FIRSTARG] = td->td_retval[0]; tf->fixreg[FIRSTARG + 1] = td->td_retval[1]; } tf->cr &= ~0x10000000; /* Unset summary overflow */ break; case ERESTART: /* * Set user's pc back to redo the system call. */ tf->srr0 -= 4; break; default: tf->fixreg[FIRSTARG] = SV_ABI_ERRNO(p, error); tf->cr |= 0x10000000; /* Set summary overflow */ break; } } /* * Threading functions */ void cpu_thread_exit(struct thread *td) { + cleanup_power_extras(td); } void cpu_thread_clean(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { struct pcb *pcb; pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL); td->td_pcb = pcb; td->td_frame = (struct trapframe *)pcb - 1; } void cpu_thread_free(struct thread *td) { } int cpu_set_user_tls(struct thread *td, void *tls_base) { if (SV_PROC_FLAG(td->td_proc, SV_LP64)) td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010; else td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008; return (0); } void cpu_copy_thread(struct thread *td, struct thread *td0) { struct pcb *pcb2; struct trapframe *tf; struct callframe *cf; pcb2 = td->td_pcb; /* Copy the upcall pcb */ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); /* Create a stack for the new thread */ tf = td->td_frame; bcopy(td0->td_frame, tf, sizeof(struct trapframe)); tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = 0; tf->cr &= ~0x10000000; /* Set registers for trampoline to user mode. */ cf = (struct callframe *)tf - 1; memset(cf, 0, sizeof(struct callframe)); cf->cf_func = (register_t)fork_return; cf->cf_arg0 = (register_t)td; cf->cf_arg1 = (register_t)tf; pcb2->pcb_sp = (register_t)cf; #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) pcb2->pcb_lr = ((register_t *)fork_trampoline)[0]; pcb2->pcb_toc = ((register_t *)fork_trampoline)[1]; #else pcb2->pcb_lr = (register_t)fork_trampoline; pcb2->pcb_context[0] = pcb2->pcb_lr; #endif pcb2->pcb_cpu.aim.usr_vsid = 0; #ifdef __SPE__ pcb2->pcb_vec.vscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE; #endif /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_msr = psl_kernset; } void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; uintptr_t sp; tf = td->td_frame; /* align stack and alloc space for frame ptr and saved LR */ #ifdef __powerpc64__ sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) & ~0x1f; #else sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) & ~0x1f; #endif bzero(tf, sizeof(struct trapframe)); tf->fixreg[1] = (register_t)sp; tf->fixreg[3] = (register_t)arg; if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { tf->srr0 = (register_t)entry; #ifdef __powerpc64__ tf->srr1 = psl_userset32 | PSL_FE_DFLT; #else tf->srr1 = psl_userset | PSL_FE_DFLT; #endif } else { #ifdef __powerpc64__ if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) { tf->srr0 = (register_t)entry; /* ELFv2 ABI requires that the global entry point be in r12. */ tf->fixreg[12] = (register_t)entry; } else { register_t entry_desc[3]; (void)copyin((void *)entry, entry_desc, sizeof(entry_desc)); tf->srr0 = entry_desc[0]; tf->fixreg[2] = entry_desc[1]; tf->fixreg[11] = entry_desc[2]; } tf->srr1 = psl_userset | PSL_FE_DFLT; #endif } td->td_pcb->pcb_flags = 0; #ifdef __SPE__ td->td_pcb->pcb_vec.vscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE; #endif td->td_retval[0] = (register_t)entry; td->td_retval[1] = 0; } static int emulate_mfspr(int spr, int reg, struct trapframe *frame){ struct thread *td; td = curthread; - if (spr == SPR_DSCR) { + if (spr == SPR_DSCR || spr == SPR_DSCRP) { // If DSCR was never set, get the default DSCR if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0) - td->td_pcb->pcb_dscr = mfspr(SPR_DSCR); + td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP); frame->fixreg[reg] = td->td_pcb->pcb_dscr; frame->srr0 += 4; return 0; } else return SIGILL; } static int emulate_mtspr(int spr, int reg, struct trapframe *frame){ struct thread *td; td = curthread; - if (spr == SPR_DSCR) { + if (spr == SPR_DSCR || spr == SPR_DSCRP) { td->td_pcb->pcb_flags |= PCB_CDSCR; td->td_pcb->pcb_dscr = frame->fixreg[reg]; + mtspr(SPR_DSCRP, frame->fixreg[reg]); frame->srr0 += 4; return 0; } else return SIGILL; } #define XFX 0xFC0007FF int ppc_instr_emulate(struct trapframe *frame, struct thread *td) { struct pcb *pcb; uint32_t instr; int reg, sig; int rs, spr; instr = fuword32((void *)frame->srr0); sig = SIGILL; if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */ reg = (instr & ~0xfc1fffff) >> 21; frame->fixreg[reg] = mfpvr(); frame->srr0 += 4; return (0); } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */ rs = (instr & 0x3e00000) >> 21; spr = (instr & 0x1ff800) >> 16; return emulate_mfspr(spr, rs, frame); } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */ rs = (instr & 0x3e00000) >> 21; spr = (instr & 0x1ff800) >> 16; return emulate_mtspr(spr, rs, frame); } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */ powerpc_sync(); /* Do a heavy-weight sync */ frame->srr0 += 4; return (0); } pcb = td->td_pcb; #ifdef FPU_EMU if (!(pcb->pcb_flags & PCB_FPREGS)) { bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu)); pcb->pcb_flags |= PCB_FPREGS; } else if (pcb->pcb_flags & PCB_FPU) save_fpu(td); sig = fpu_emulate(frame, &pcb->pcb_fpu); if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU) enable_fpu(td); #endif if (sig == SIGILL) { if (pcb->pcb_lastill != frame->srr0) { /* Allow a second chance, in case of cache sync issues. */ sig = 0; pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4); pcb->pcb_lastill = frame->srr0; } } return (sig); } Index: head/sys/powerpc/powerpc/genassym.c =================================================================== --- head/sys/powerpc/powerpc/genassym.c (revision 346789) +++ head/sys/powerpc/powerpc/genassym.c (revision 346790) @@ -1,271 +1,273 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); ASSYM(PC_TEMPSAVE, offsetof(struct pcpu, pc_tempsave)); ASSYM(PC_DISISAVE, offsetof(struct pcpu, pc_disisave)); ASSYM(PC_DBSAVE, offsetof(struct pcpu, pc_dbsave)); ASSYM(PC_RESTORE, offsetof(struct pcpu, pc_restore)); #if defined(BOOKE) ASSYM(PC_BOOKE_CRITSAVE, offsetof(struct pcpu, pc_booke.critsave)); ASSYM(PC_BOOKE_MCHKSAVE, offsetof(struct pcpu, pc_booke.mchksave)); ASSYM(PC_BOOKE_TLBSAVE, offsetof(struct pcpu, pc_booke.tlbsave)); ASSYM(PC_BOOKE_TLB_LEVEL, offsetof(struct pcpu, pc_booke.tlb_level)); ASSYM(PC_BOOKE_TLB_LOCK, offsetof(struct pcpu, pc_booke.tlb_lock)); #endif ASSYM(CPUSAVE_R27, CPUSAVE_R27*sizeof(register_t)); ASSYM(CPUSAVE_R28, CPUSAVE_R28*sizeof(register_t)); ASSYM(CPUSAVE_R29, CPUSAVE_R29*sizeof(register_t)); ASSYM(CPUSAVE_R30, CPUSAVE_R30*sizeof(register_t)); ASSYM(CPUSAVE_R31, CPUSAVE_R31*sizeof(register_t)); ASSYM(CPUSAVE_SRR0, CPUSAVE_SRR0*sizeof(register_t)); ASSYM(CPUSAVE_SRR1, CPUSAVE_SRR1*sizeof(register_t)); ASSYM(CPUSAVE_AIM_DAR, CPUSAVE_AIM_DAR*sizeof(register_t)); ASSYM(CPUSAVE_AIM_DSISR, CPUSAVE_AIM_DSISR*sizeof(register_t)); ASSYM(CPUSAVE_BOOKE_DEAR, CPUSAVE_BOOKE_DEAR*sizeof(register_t)); ASSYM(CPUSAVE_BOOKE_ESR, CPUSAVE_BOOKE_ESR*sizeof(register_t)); ASSYM(BOOKE_CRITSAVE_SRR0, BOOKE_CRITSAVE_SRR0*sizeof(register_t)); ASSYM(BOOKE_CRITSAVE_SRR1, BOOKE_CRITSAVE_SRR1*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_LR, TLBSAVE_BOOKE_LR*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_CR, TLBSAVE_BOOKE_CR*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_SRR0, TLBSAVE_BOOKE_SRR0*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_SRR1, TLBSAVE_BOOKE_SRR1*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R20, TLBSAVE_BOOKE_R20*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R21, TLBSAVE_BOOKE_R21*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R22, TLBSAVE_BOOKE_R22*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R23, TLBSAVE_BOOKE_R23*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R24, TLBSAVE_BOOKE_R24*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R25, TLBSAVE_BOOKE_R25*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R26, TLBSAVE_BOOKE_R26*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R27, TLBSAVE_BOOKE_R27*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R28, TLBSAVE_BOOKE_R28*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R29, TLBSAVE_BOOKE_R29*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R30, TLBSAVE_BOOKE_R30*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R31, TLBSAVE_BOOKE_R31*sizeof(register_t)); ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock)); #if defined(AIM) ASSYM(USER_ADDR, USER_ADDR); #ifdef __powerpc64__ ASSYM(PC_KERNSLB, offsetof(struct pcpu, pc_aim.slb)); ASSYM(PC_USERSLB, offsetof(struct pcpu, pc_aim.userslb)); ASSYM(PC_SLBSAVE, offsetof(struct pcpu, pc_aim.slbsave)); ASSYM(PC_SLBSTACK, offsetof(struct pcpu, pc_aim.slbstack)); ASSYM(USER_SLB_SLOT, USER_SLB_SLOT); ASSYM(USER_SLB_SLBE, USER_SLB_SLBE); ASSYM(SEGMENT_MASK, SEGMENT_MASK); #else ASSYM(PM_SR, offsetof(struct pmap, pm_sr)); ASSYM(USER_SR, USER_SR); #endif #elif defined(BOOKE) #ifdef __powerpc64__ ASSYM(PM_PP2D, offsetof(struct pmap, pm_pp2d)); #else ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir)); #endif /* * With pte_t being a bitfield struct, these fields cannot be addressed via * offsetof(). */ ASSYM(PTE_RPN, 0); ASSYM(PTE_FLAGS, sizeof(uint32_t)); #if defined(BOOKE_E500) ASSYM(TLB_ENTRY_SIZE, sizeof(struct tlb_entry)); #endif #endif #ifdef __powerpc64__ ASSYM(FSP, 48); #else ASSYM(FSP, 8); #endif ASSYM(FRAMELEN, FRAMELEN); ASSYM(FRAME_0, offsetof(struct trapframe, fixreg[0])); ASSYM(FRAME_1, offsetof(struct trapframe, fixreg[1])); ASSYM(FRAME_2, offsetof(struct trapframe, fixreg[2])); ASSYM(FRAME_3, offsetof(struct trapframe, fixreg[3])); ASSYM(FRAME_4, offsetof(struct trapframe, fixreg[4])); ASSYM(FRAME_5, offsetof(struct trapframe, fixreg[5])); ASSYM(FRAME_6, offsetof(struct trapframe, fixreg[6])); ASSYM(FRAME_7, offsetof(struct trapframe, fixreg[7])); ASSYM(FRAME_8, offsetof(struct trapframe, fixreg[8])); ASSYM(FRAME_9, offsetof(struct trapframe, fixreg[9])); ASSYM(FRAME_10, offsetof(struct trapframe, fixreg[10])); ASSYM(FRAME_11, offsetof(struct trapframe, fixreg[11])); ASSYM(FRAME_12, offsetof(struct trapframe, fixreg[12])); ASSYM(FRAME_13, offsetof(struct trapframe, fixreg[13])); ASSYM(FRAME_14, offsetof(struct trapframe, fixreg[14])); ASSYM(FRAME_15, offsetof(struct trapframe, fixreg[15])); ASSYM(FRAME_16, offsetof(struct trapframe, fixreg[16])); ASSYM(FRAME_17, offsetof(struct trapframe, fixreg[17])); ASSYM(FRAME_18, offsetof(struct trapframe, fixreg[18])); ASSYM(FRAME_19, offsetof(struct trapframe, fixreg[19])); ASSYM(FRAME_20, offsetof(struct trapframe, fixreg[20])); ASSYM(FRAME_21, offsetof(struct trapframe, fixreg[21])); ASSYM(FRAME_22, offsetof(struct trapframe, fixreg[22])); ASSYM(FRAME_23, offsetof(struct trapframe, fixreg[23])); ASSYM(FRAME_24, offsetof(struct trapframe, fixreg[24])); ASSYM(FRAME_25, offsetof(struct trapframe, fixreg[25])); ASSYM(FRAME_26, offsetof(struct trapframe, fixreg[26])); ASSYM(FRAME_27, offsetof(struct trapframe, fixreg[27])); ASSYM(FRAME_28, offsetof(struct trapframe, fixreg[28])); ASSYM(FRAME_29, offsetof(struct trapframe, fixreg[29])); ASSYM(FRAME_30, offsetof(struct trapframe, fixreg[30])); ASSYM(FRAME_31, offsetof(struct trapframe, fixreg[31])); ASSYM(FRAME_LR, offsetof(struct trapframe, lr)); ASSYM(FRAME_CR, offsetof(struct trapframe, cr)); ASSYM(FRAME_CTR, offsetof(struct trapframe, ctr)); ASSYM(FRAME_XER, offsetof(struct trapframe, xer)); ASSYM(FRAME_SRR0, offsetof(struct trapframe, srr0)); ASSYM(FRAME_SRR1, offsetof(struct trapframe, srr1)); ASSYM(FRAME_EXC, offsetof(struct trapframe, exc)); ASSYM(FRAME_AIM_DAR, offsetof(struct trapframe, dar)); ASSYM(FRAME_AIM_DSISR, offsetof(struct trapframe, cpu.aim.dsisr)); ASSYM(FRAME_BOOKE_DEAR, offsetof(struct trapframe, dar)); ASSYM(FRAME_BOOKE_ESR, offsetof(struct trapframe, cpu.booke.esr)); ASSYM(FRAME_BOOKE_DBCR0, offsetof(struct trapframe, cpu.booke.dbcr0)); ASSYM(CF_FUNC, offsetof(struct callframe, cf_func)); ASSYM(CF_ARG0, offsetof(struct callframe, cf_arg0)); ASSYM(CF_ARG1, offsetof(struct callframe, cf_arg1)); ASSYM(CF_SIZE, sizeof(struct callframe)); ASSYM(PCB_CONTEXT, offsetof(struct pcb, pcb_context)); ASSYM(PCB_CR, offsetof(struct pcb, pcb_cr)); ASSYM(PCB_DSCR, offsetof(struct pcb, pcb_dscr)); +ASSYM(PCB_FSCR, offsetof(struct pcb, pcb_fscr)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_TOC, offsetof(struct pcb, pcb_toc)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_lr)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_FPU, PCB_FPU); ASSYM(PCB_VEC, PCB_VEC); ASSYM(PCB_CDSCR, PCB_CDSCR); +ASSYM(PCB_CFSCR, PCB_CFSCR); ASSYM(PCB_AIM_USR_VSID, offsetof(struct pcb, pcb_cpu.aim.usr_vsid)); ASSYM(PCB_BOOKE_DBCR0, offsetof(struct pcb, pcb_cpu.booke.dbcr0)); ASSYM(PCB_VSCR, offsetof(struct pcb, pcb_vec.vscr)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(DMAP_BASE_ADDRESS, DMAP_BASE_ADDRESS); ASSYM(MAXCOMLEN, MAXCOMLEN); #ifdef __powerpc64__ ASSYM(PSL_CM, PSL_CM); #endif ASSYM(PSL_GS, PSL_GS); ASSYM(PSL_DE, PSL_DE); ASSYM(PSL_DS, PSL_DS); ASSYM(PSL_IS, PSL_IS); ASSYM(PSL_CE, PSL_CE); ASSYM(PSL_UCLE, PSL_UCLE); ASSYM(PSL_WE, PSL_WE); ASSYM(PSL_UBLE, PSL_UBLE); #if defined(AIM) && defined(__powerpc64__) ASSYM(PSL_SF, PSL_SF); ASSYM(PSL_HV, PSL_HV); #endif ASSYM(PSL_POW, PSL_POW); ASSYM(PSL_ILE, PSL_ILE); ASSYM(PSL_LE, PSL_LE); ASSYM(PSL_SE, PSL_SE); ASSYM(PSL_RI, PSL_RI); ASSYM(PSL_DR, PSL_DR); ASSYM(PSL_IP, PSL_IP); ASSYM(PSL_IR, PSL_IR); ASSYM(PSL_FE_DIS, PSL_FE_DIS); ASSYM(PSL_FE_NONREC, PSL_FE_NONREC); ASSYM(PSL_FE_PREC, PSL_FE_PREC); ASSYM(PSL_FE_REC, PSL_FE_REC); ASSYM(PSL_VEC, PSL_VEC); ASSYM(PSL_BE, PSL_BE); ASSYM(PSL_EE, PSL_EE); ASSYM(PSL_FE0, PSL_FE0); ASSYM(PSL_FE1, PSL_FE1); ASSYM(PSL_FP, PSL_FP); ASSYM(PSL_ME, PSL_ME); ASSYM(PSL_PR, PSL_PR); ASSYM(PSL_PMM, PSL_PMM); Index: head/sys/powerpc/powerpc/swtch64.S =================================================================== --- head/sys/powerpc/powerpc/swtch64.S (revision 346789) +++ head/sys/powerpc/powerpc/swtch64.S (revision 346790) @@ -1,300 +1,317 @@ /* $FreeBSD$ */ /* $NetBSD: locore.S,v 1.24 2000/05/31 05:09:17 thorpej Exp $ */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "assym.inc" #include "opt_sched.h" #include #include #include #include #include #ifdef _CALL_ELF .abiversion _CALL_ELF #endif TOC_ENTRY(blocked_lock) /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) mr %r13, %r4 li %r14,0 /* Tell cpu_switchin not to release a thread */ b cpu_switchin /* * void cpu_switch(struct thread *old, * struct thread *new, * struct mutex *mtx); * * Switch to a new thread saving the current state in the old thread. + * + * Internally clobbers (not visible outside of this file): + * r18 - old thread pcb_flags + * r19 - new thread pcb_flags */ ENTRY(cpu_switch) ld %r6,TD_PCB(%r3) /* Get the old thread's PCB ptr */ std %r12,PCB_CONTEXT(%r6) /* Save the non-volatile GP regs. These can now be used for scratch */ std %r14,PCB_CONTEXT+2*8(%r6) std %r15,PCB_CONTEXT+3*8(%r6) std %r16,PCB_CONTEXT+4*8(%r6) std %r17,PCB_CONTEXT+5*8(%r6) std %r18,PCB_CONTEXT+6*8(%r6) std %r19,PCB_CONTEXT+7*8(%r6) std %r20,PCB_CONTEXT+8*8(%r6) std %r21,PCB_CONTEXT+9*8(%r6) std %r22,PCB_CONTEXT+10*8(%r6) std %r23,PCB_CONTEXT+11*8(%r6) std %r24,PCB_CONTEXT+12*8(%r6) std %r25,PCB_CONTEXT+13*8(%r6) std %r26,PCB_CONTEXT+14*8(%r6) std %r27,PCB_CONTEXT+15*8(%r6) std %r28,PCB_CONTEXT+16*8(%r6) std %r29,PCB_CONTEXT+17*8(%r6) std %r30,PCB_CONTEXT+18*8(%r6) std %r31,PCB_CONTEXT+19*8(%r6) mfcr %r16 /* Save the condition register */ std %r16,PCB_CR(%r6) mflr %r16 /* Save the link register */ std %r16,PCB_LR(%r6) std %r1,PCB_SP(%r6) /* Save the stack pointer */ std %r2,PCB_TOC(%r6) /* Save the TOC pointer */ mr %r14,%r3 /* Copy the old thread ptr... */ mr %r13,%r4 /* and the new thread ptr in curthread*/ mr %r16,%r5 /* and the new lock */ mr %r17,%r6 /* and the PCB */ stdu %r1,-48(%r1) lwz %r18, PCB_FLAGS(%r17) + andi. %r7, %r18, PCB_CFSCR + beq 1f + mfspr %r6, SPR_FSCR + std %r6, PCB_FSCR(%r17) +1: andi. %r7, %r18, PCB_CDSCR beq .L0 - /* Custom DSCR was set. Reseting it to enter kernel */ - li %r6, 0x0 - mtspr SPR_DSCR, %r6 + mfspr %r6, SPR_DSCRP + std %r6, PCB_DSCR(%r17) .L0: /* Save FPU context if needed */ andi. %r7, %r18, PCB_FPU beq .L1 bl save_fpu nop .L1: mr %r3,%r14 /* restore old thread ptr */ /* Save Altivec context if needed */ andi. %r7, %r18, PCB_VEC beq .L2 bl save_vec nop .L2: mr %r3,%r14 /* restore old thread ptr */ bl pmap_deactivate /* Deactivate the current pmap */ nop sync /* Make sure all of that finished */ cpu_switchin: #if defined(SMP) && defined(SCHED_ULE) /* Wait for the new thread to become unblocked */ addis %r6,%r2,TOC_REF(blocked_lock)@ha ld %r6,TOC_REF(blocked_lock)@l(%r6) blocked_loop: ld %r7,TD_LOCK(%r13) cmpd %r6,%r7 beq- blocked_loop isync #endif ld %r17,TD_PCB(%r13) /* Get new PCB */ ld %r1,PCB_SP(%r17) /* Load the stack pointer */ addi %r1,%r1,-48 /* Remember about cpu_switch stack frame */ /* Release old thread now that we have a stack pointer set up */ cmpdi %r14,0 beq- 1f std %r16,TD_LOCK(%r14) /* ULE: update old thread's lock */ 1: mfsprg %r7,0 /* Get the pcpu pointer */ std %r13,PC_CURTHREAD(%r7) /* Store new current thread */ ld %r17,TD_PCB(%r13) /* Store new current PCB */ std %r17,PC_CURPCB(%r7) mr %r3,%r13 /* Get new thread ptr */ bl pmap_activate /* Activate the new address space */ nop lwz %r19, PCB_FLAGS(%r17) /* Restore FPU context if needed */ andi. %r6, %r19, PCB_FPU beq .L3 mr %r3,%r13 /* Pass curthread to enable_fpu */ bl enable_fpu nop .L3: /* Restore Altivec context if needed */ andi. %r6, %r19, PCB_VEC beq .L31 mr %r3,%r13 /* Pass curthread to enable_vec */ bl enable_vec nop .L31: - /* Restore Custom DSCR if needed */ - andi. %r6, %r19, PCB_CDSCR - beq .L4 + /* Load custom DSCR on PowerISA 2.06+ CPUs. */ + /* Load changed FSCR on PowerISA 2.07+ CPUs. */ + or %r18,%r18,%r19 + /* Restore Custom DSCR if needed (zeroes if in old but not new) */ + andi. %r6, %r18, PCB_CDSCR + beq .L32 ld %r7, PCB_DSCR(%r17) /* Load the DSCR register*/ - mtspr SPR_DSCR, %r7 + mtspr SPR_DSCRP, %r7 +.L32: + /* Restore FSCR if needed (zeroes if in old but not new) */ + andi. %r6, %r18, PCB_CFSCR + beq .L4 + ld %r7, PCB_FSCR(%r17) /* Load the FSCR register*/ + mtspr SPR_FSCR, %r7 /* thread to restore is in r3 */ .L4: addi %r1,%r1,48 mr %r3,%r17 /* Recover PCB ptr */ ld %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs. */ ld %r14,PCB_CONTEXT+2*8(%r3) ld %r15,PCB_CONTEXT+3*8(%r3) ld %r16,PCB_CONTEXT+4*8(%r3) ld %r17,PCB_CONTEXT+5*8(%r3) ld %r18,PCB_CONTEXT+6*8(%r3) ld %r19,PCB_CONTEXT+7*8(%r3) ld %r20,PCB_CONTEXT+8*8(%r3) ld %r21,PCB_CONTEXT+9*8(%r3) ld %r22,PCB_CONTEXT+10*8(%r3) ld %r23,PCB_CONTEXT+11*8(%r3) ld %r24,PCB_CONTEXT+12*8(%r3) ld %r25,PCB_CONTEXT+13*8(%r3) ld %r26,PCB_CONTEXT+14*8(%r3) ld %r27,PCB_CONTEXT+15*8(%r3) ld %r28,PCB_CONTEXT+16*8(%r3) ld %r29,PCB_CONTEXT+17*8(%r3) ld %r30,PCB_CONTEXT+18*8(%r3) ld %r31,PCB_CONTEXT+19*8(%r3) ld %r5,PCB_CR(%r3) /* Load the condition register */ mtcr %r5 ld %r5,PCB_LR(%r3) /* Load the link register */ mtlr %r5 ld %r1,PCB_SP(%r3) /* Load the stack pointer */ ld %r2,PCB_TOC(%r3) /* Load the TOC pointer */ /* * Perform a dummy stdcx. to clear any reservations we may have * inherited from the previous thread. It doesn't matter if the * stdcx succeeds or not. pcb_context[0] can be clobbered. */ stdcx. %r1, 0, %r3 blr /* * savectx(pcb) * Update pcb, saving current processor state */ ENTRY(savectx) std %r12,PCB_CONTEXT(%r3) /* Save the non-volatile GP regs. */ std %r13,PCB_CONTEXT+1*8(%r3) std %r14,PCB_CONTEXT+2*8(%r3) std %r15,PCB_CONTEXT+3*8(%r3) std %r16,PCB_CONTEXT+4*8(%r3) std %r17,PCB_CONTEXT+5*8(%r3) std %r18,PCB_CONTEXT+6*8(%r3) std %r19,PCB_CONTEXT+7*8(%r3) std %r20,PCB_CONTEXT+8*8(%r3) std %r21,PCB_CONTEXT+9*8(%r3) std %r22,PCB_CONTEXT+10*8(%r3) std %r23,PCB_CONTEXT+11*8(%r3) std %r24,PCB_CONTEXT+12*8(%r3) std %r25,PCB_CONTEXT+13*8(%r3) std %r26,PCB_CONTEXT+14*8(%r3) std %r27,PCB_CONTEXT+15*8(%r3) std %r28,PCB_CONTEXT+16*8(%r3) std %r29,PCB_CONTEXT+17*8(%r3) std %r30,PCB_CONTEXT+18*8(%r3) std %r31,PCB_CONTEXT+19*8(%r3) mfcr %r4 /* Save the condition register */ std %r4,PCB_CR(%r3) std %r1,PCB_SP(%r3) /* Save the stack pointer */ std %r2,PCB_TOC(%r3) /* Save the TOC pointer */ mflr %r4 /* Save the link register */ std %r4,PCB_LR(%r3) blr /* * fork_trampoline() * Set up the return from cpu_fork() */ ENTRY_NOPROF(fork_trampoline) ld %r3,CF_FUNC(%r1) ld %r4,CF_ARG0(%r1) ld %r5,CF_ARG1(%r1) stdu %r1,-48(%r1) bl fork_exit nop addi %r1,%r1,48+CF_SIZE-FSP /* Allow 8 bytes in front of trapframe to simulate FRAME_SETUP does when allocating space for a frame pointer/saved LR */ bl trapexit nop Index: head/sys/powerpc/powerpc/trap.c =================================================================== --- head/sys/powerpc/powerpc/trap.c (revision 346789) +++ head/sys/powerpc/powerpc/trap.c (revision 346790) @@ -1,992 +1,998 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Below matches setjmp.S */ #define FAULTBUF_LR 21 #define FAULTBUF_R1 1 #define FAULTBUF_R2 2 #define FAULTBUF_CR 22 #define FAULTBUF_R14 3 #define MOREARGS(sp) ((caddr_t)((uintptr_t)(sp) + \ sizeof(struct callframe) - 3*sizeof(register_t))) /* more args go here */ static void trap_fatal(struct trapframe *frame); static void printtrap(u_int vector, struct trapframe *frame, int isfatal, int user); static int trap_pfault(struct trapframe *frame, int user); static int fix_unaligned(struct thread *td, struct trapframe *frame); static int handle_onfault(struct trapframe *frame); static void syscall(struct trapframe *frame); #if defined(__powerpc64__) && defined(AIM) void handle_kernel_slb_spill(int, register_t, register_t); static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr); extern int n_slbs; static void normalize_inputs(void); #endif extern vm_offset_t __startkernel; #ifdef KDB int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ #endif struct powerpc_exception { u_int vector; char *name; }; #ifdef KDTRACE_HOOKS #include int (*dtrace_invop_jump_addr)(struct trapframe *); #endif static struct powerpc_exception powerpc_exceptions[] = { { EXC_CRIT, "critical input" }, { EXC_RST, "system reset" }, { EXC_MCHK, "machine check" }, { EXC_DSI, "data storage interrupt" }, { EXC_DSE, "data segment exception" }, { EXC_ISI, "instruction storage interrupt" }, { EXC_ISE, "instruction segment exception" }, { EXC_EXI, "external interrupt" }, { EXC_ALI, "alignment" }, { EXC_PGM, "program" }, { EXC_HEA, "hypervisor emulation assistance" }, { EXC_FPU, "floating-point unavailable" }, { EXC_APU, "auxiliary proc unavailable" }, { EXC_DECR, "decrementer" }, { EXC_FIT, "fixed-interval timer" }, { EXC_WDOG, "watchdog timer" }, { EXC_SC, "system call" }, { EXC_TRC, "trace" }, { EXC_FPA, "floating-point assist" }, { EXC_DEBUG, "debug" }, { EXC_PERF, "performance monitoring" }, { EXC_VEC, "altivec unavailable" }, { EXC_VSX, "vsx unavailable" }, { EXC_FAC, "facility unavailable" }, { EXC_ITMISS, "instruction tlb miss" }, { EXC_DLMISS, "data load tlb miss" }, { EXC_DSMISS, "data store tlb miss" }, { EXC_BPT, "instruction breakpoint" }, { EXC_SMI, "system management" }, { EXC_VECAST_G4, "altivec assist" }, { EXC_THRM, "thermal management" }, { EXC_RUNMODETRC, "run mode/trace" }, { EXC_SOFT_PATCH, "soft patch exception" }, { EXC_LAST, NULL } }; #define ESR_BITMASK \ "\20" \ "\040b0\037b1\036b2\035b3\034PIL\033PRR\032PTR\031FP" \ "\030ST\027b9\026DLK\025ILK\024b12\023b13\022BO\021PIE" \ "\020b16\017b17\016b18\015b19\014b20\013b21\012b22\011b23" \ "\010SPE\007EPID\006b26\005b27\004b28\003b29\002b30\001b31" #define MCSR_BITMASK \ "\20" \ "\040MCP\037ICERR\036DCERR\035TLBPERR\034L2MMU_MHIT\033b5\032b6\031b7" \ "\030b8\027b9\026b10\025NMI\024MAV\023MEA\022b14\021IF" \ "\020LD\017ST\016LDG\015b19\014b20\013b21\012b22\011b23" \ "\010b24\007b25\006b26\005b27\004b28\003b29\002TLBSYNC\001BSL2_ERR" #define MSSSR_BITMASK \ "\20" \ "\040b0\037b1\036b2\035b3\034b4\033b5\032b6\031b7" \ "\030b8\027b9\026b10\025b11\024b12\023L2TAG\022L2DAT\021L3TAG" \ "\020L3DAT\017APE\016DPE\015TEA\014b20\013b21\012b22\011b23" \ "\010b24\007b25\006b26\005b27\004b28\003b29\002b30\001b31" static const char * trapname(u_int vector) { struct powerpc_exception *pe; for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) { if (pe->vector == vector) return (pe->name); } return ("unknown"); } static inline bool frame_is_trap_inst(struct trapframe *frame) { #ifdef AIM return (frame->exc == EXC_PGM && frame->srr1 & EXC_PGM_TRAP); #else return ((frame->cpu.booke.esr & ESR_PTR) != 0); #endif } void trap(struct trapframe *frame) { struct thread *td; struct proc *p; #ifdef KDTRACE_HOOKS uint32_t inst; #endif int sig, type, user; u_int ucode; ksiginfo_t ksi; register_t fscr; VM_CNT_INC(v_trap); #ifdef KDB if (kdb_active) { kdb_reenter(); return; } #endif td = curthread; p = td->td_proc; type = ucode = frame->exc; sig = 0; user = frame->srr1 & PSL_PR; CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name, trapname(type), user ? "user" : "kernel"); #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0) return; #endif if (user) { td->td_pticks = 0; td->td_frame = frame; if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); /* User Mode Traps */ switch (type) { case EXC_RUNMODETRC: case EXC_TRC: frame->srr1 &= ~PSL_SE; sig = SIGTRAP; ucode = TRAP_TRACE; break; #if defined(__powerpc64__) && defined(AIM) case EXC_ISE: case EXC_DSE: if (handle_user_slb_spill(&p->p_vmspace->vm_pmap, (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){ sig = SIGSEGV; ucode = SEGV_MAPERR; } break; #endif case EXC_DSI: case EXC_ISI: sig = trap_pfault(frame, 1); if (sig == SIGSEGV) ucode = SEGV_MAPERR; break; case EXC_SC: syscall(frame); break; case EXC_FPU: KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU, ("FPU already enabled for thread")); enable_fpu(td); break; case EXC_VEC: KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC, ("Altivec already enabled for thread")); enable_vec(td); break; case EXC_VSX: KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX, ("VSX already enabled for thread")); if (!(td->td_pcb->pcb_flags & PCB_VEC)) enable_vec(td); if (!(td->td_pcb->pcb_flags & PCB_FPU)) save_fpu(td); td->td_pcb->pcb_flags |= PCB_VSX; enable_fpu(td); break; case EXC_FAC: fscr = mfspr(SPR_FSCR); if ((fscr & FSCR_IC_MASK) == FSCR_IC_HTM) { CTR0(KTR_TRAP, "Hardware Transactional Memory subsystem disabled"); + } else if ((fscr & FSCR_IC_MASK) == FSCR_IC_DSCR) { + td->td_pcb->pcb_flags |= PCB_CFSCR | PCB_CDSCR; + fscr &= ~FSCR_IC_MASK; + mtspr(SPR_FSCR, fscr | FSCR_DSCR); + mtspr(SPR_DSCR, 0); + break; } sig = SIGILL; ucode = ILL_ILLOPC; break; case EXC_HEA: sig = SIGILL; ucode = ILL_ILLOPC; break; case EXC_VECAST_E: case EXC_VECAST_G4: case EXC_VECAST_G5: /* * We get a VPU assist exception for IEEE mode * vector operations on denormalized floats. * Emulating this is a giant pain, so for now, * just switch off IEEE mode and treat them as * zero. */ save_vec(td); td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ; enable_vec(td); break; case EXC_ALI: if (fix_unaligned(td, frame) != 0) { sig = SIGBUS; ucode = BUS_ADRALN; } else frame->srr0 += 4; break; case EXC_DEBUG: /* Single stepping */ mtspr(SPR_DBSR, mfspr(SPR_DBSR)); frame->srr1 &= ~PSL_DE; frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); sig = SIGTRAP; ucode = TRAP_TRACE; break; case EXC_PGM: /* Identify the trap reason */ if (frame_is_trap_inst(frame)) { #ifdef KDTRACE_HOOKS inst = fuword32((const void *)frame->srr0); if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) { (*dtrace_pid_probe_ptr)(frame); break; } #endif sig = SIGTRAP; ucode = TRAP_BRKPT; } else { sig = ppc_instr_emulate(frame, td); if (sig == SIGILL) { if (frame->srr1 & EXC_PGM_PRIV) ucode = ILL_PRVOPC; else if (frame->srr1 & EXC_PGM_ILLEGAL) ucode = ILL_ILLOPC; } else if (sig == SIGFPE) ucode = FPE_FLTINV; /* Punt for now, invalid operation. */ } break; case EXC_MCHK: /* * Note that this may not be recoverable for the user * process, depending on the type of machine check, * but it at least prevents the kernel from dying. */ sig = SIGBUS; ucode = BUS_OBJERR; break; #if defined(__powerpc64__) && defined(AIM) case EXC_SOFT_PATCH: /* * Point to the instruction that generated the exception to execute it again, * and normalize the register values. */ frame->srr0 -= 4; normalize_inputs(); break; #endif default: trap_fatal(frame); } } else { /* Kernel Mode Traps */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { case EXC_PGM: #ifdef KDTRACE_HOOKS if (frame_is_trap_inst(frame)) { if (*(uint32_t *)frame->srr0 == EXC_DTRACE) { if (dtrace_invop_jump_addr != NULL) { dtrace_invop_jump_addr(frame); return; } } } #endif #ifdef KDB if (db_trap_glue(frame)) return; #endif break; #if defined(__powerpc64__) && defined(AIM) case EXC_DSE: if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 && (frame->dar & SEGMENT_MASK) == USER_ADDR) { __asm __volatile ("slbmte %0, %1" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); return; } break; #endif case EXC_DSI: if (trap_pfault(frame, 0) == 0) return; break; case EXC_MCHK: if (handle_onfault(frame)) return; break; default: break; } trap_fatal(frame); } if (sig != 0) { if (p->p_sysent->sv_transtrap != NULL) sig = (p->p_sysent->sv_transtrap)(sig, type); ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = (int) ucode; /* XXX, not POSIX */ ksi.ksi_addr = (void *)frame->srr0; ksi.ksi_trapno = type; trapsignal(td, &ksi); } userret(td, frame); } static void trap_fatal(struct trapframe *frame) { #ifdef KDB bool handled; #endif printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); #ifdef KDB if (debugger_on_trap) { kdb_why = KDB_WHY_TRAP; handled = kdb_trap(frame->exc, 0, frame); kdb_why = KDB_WHY_UNSET; if (handled) return; } #endif panic("%s trap", trapname(frame->exc)); } static void cpu_printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) { #ifdef AIM uint16_t ver; switch (vector) { case EXC_DSE: case EXC_DSI: case EXC_DTMISS: printf(" dsisr = 0x%lx\n", (u_long)frame->cpu.aim.dsisr); break; case EXC_MCHK: ver = mfpvr() >> 16; if (MPC745X_P(ver)) printf(" msssr0 = 0x%b\n", (int)mfspr(SPR_MSSSR0), MSSSR_BITMASK); break; } #elif defined(BOOKE) vm_paddr_t pa; switch (vector) { case EXC_MCHK: pa = mfspr(SPR_MCARU); pa = (pa << 32) | (u_register_t)mfspr(SPR_MCAR); printf(" mcsr = 0x%b\n", (int)mfspr(SPR_MCSR), MCSR_BITMASK); printf(" mcar = 0x%jx\n", (uintmax_t)pa); } printf(" esr = 0x%b\n", (int)frame->cpu.booke.esr, ESR_BITMASK); #endif } static void printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) { printf("\n"); printf("%s %s trap:\n", isfatal ? "fatal" : "handled", user ? "user" : "kernel"); printf("\n"); printf(" exception = 0x%x (%s)\n", vector, trapname(vector)); switch (vector) { case EXC_DSE: case EXC_DSI: case EXC_DTMISS: case EXC_ALI: printf(" virtual address = 0x%" PRIxPTR "\n", frame->dar); break; case EXC_ISE: case EXC_ISI: case EXC_ITMISS: printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0); break; case EXC_MCHK: break; } cpu_printtrap(vector, frame, isfatal, user); printf(" srr0 = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n", frame->srr0, frame->srr0 - (register_t)(__startkernel - KERNBASE)); printf(" srr1 = 0x%lx\n", (u_long)frame->srr1); printf(" current msr = 0x%" PRIxPTR "\n", mfmsr()); printf(" lr = 0x%" PRIxPTR " (0x%" PRIxPTR ")\n", frame->lr, frame->lr - (register_t)(__startkernel - KERNBASE)); printf(" frame = %p\n", frame); printf(" curthread = %p\n", curthread); if (curthread != NULL) printf(" pid = %d, comm = %s\n", curthread->td_proc->p_pid, curthread->td_name); printf("\n"); } /* * Handles a fatal fault when we have onfault state to recover. Returns * non-zero if there was onfault recovery state available. */ static int handle_onfault(struct trapframe *frame) { struct thread *td; jmp_buf *fb; td = curthread; fb = td->td_pcb->pcb_onfault; if (fb != NULL) { frame->srr0 = (*fb)->_jb[FAULTBUF_LR]; frame->fixreg[1] = (*fb)->_jb[FAULTBUF_R1]; frame->fixreg[2] = (*fb)->_jb[FAULTBUF_R2]; frame->fixreg[3] = 1; frame->cr = (*fb)->_jb[FAULTBUF_CR]; bcopy(&(*fb)->_jb[FAULTBUF_R14], &frame->fixreg[14], 18 * sizeof(register_t)); td->td_pcb->pcb_onfault = NULL; /* Returns twice, not thrice */ return (1); } return (0); } int cpu_fetch_syscall_args(struct thread *td) { struct proc *p; struct trapframe *frame; struct syscall_args *sa; caddr_t params; size_t argsz; int error, n, i; p = td->td_proc; frame = td->td_frame; sa = &td->td_sa; sa->code = frame->fixreg[0]; params = (caddr_t)(frame->fixreg + FIRSTARG); n = NARGREG; if (sa->code == SYS_syscall) { /* * code is first argument, * followed by actual args. */ sa->code = *(register_t *) params; params += sizeof(register_t); n -= 1; } else if (sa->code == SYS___syscall) { /* * Like syscall, but code is a quad, * so as to maintain quad alignment * for the rest of the args. */ if (SV_PROC_FLAG(p, SV_ILP32)) { params += sizeof(register_t); sa->code = *(register_t *) params; params += sizeof(register_t); n -= 2; } else { sa->code = *(register_t *) params; params += sizeof(register_t); n -= 1; } } if (sa->code >= p->p_sysent->sv_size) sa->callp = &p->p_sysent->sv_table[0]; else sa->callp = &p->p_sysent->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; if (SV_PROC_FLAG(p, SV_ILP32)) { argsz = sizeof(uint32_t); for (i = 0; i < n; i++) sa->args[i] = ((u_register_t *)(params))[i] & 0xffffffff; } else { argsz = sizeof(uint64_t); for (i = 0; i < n; i++) sa->args[i] = ((u_register_t *)(params))[i]; } if (sa->narg > n) error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n, (sa->narg - n) * argsz); else error = 0; #ifdef __powerpc64__ if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) { /* Expand the size of arguments copied from the stack */ for (i = sa->narg; i >= n; i--) sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n]; } #endif if (error == 0) { td->td_retval[0] = 0; td->td_retval[1] = frame->fixreg[FIRSTARG + 1]; } return (error); } #include "../../kern/subr_syscall.c" void syscall(struct trapframe *frame) { struct thread *td; int error; td = curthread; td->td_frame = frame; #if defined(__powerpc64__) && defined(AIM) /* * Speculatively restore last user SLB segment, which we know is * invalid already, since we are likely to do copyin()/copyout(). */ if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0) __asm __volatile ("slbmte %0, %1; isync" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); #endif error = syscallenter(td); syscallret(td, error); } #if defined(__powerpc64__) && defined(AIM) /* Handle kernel SLB faults -- runs in real mode, all seat belts off */ void handle_kernel_slb_spill(int type, register_t dar, register_t srr0) { struct slb *slbcache; uint64_t slbe, slbv; uint64_t esid, addr; int i; addr = (type == EXC_ISE) ? srr0 : dar; slbcache = PCPU_GET(aim.slb); esid = (uintptr_t)addr >> ADDR_SR_SHFT; slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; /* See if the hardware flushed this somehow (can happen in LPARs) */ for (i = 0; i < n_slbs; i++) if (slbcache[i].slbe == (slbe | (uint64_t)i)) return; /* Not in the map, needs to actually be added */ slbv = kernel_va_to_slbv(addr); if (slbcache[USER_SLB_SLOT].slbe == 0) { for (i = 0; i < n_slbs; i++) { if (i == USER_SLB_SLOT) continue; if (!(slbcache[i].slbe & SLBE_VALID)) goto fillkernslb; } if (i == n_slbs) slbcache[USER_SLB_SLOT].slbe = 1; } /* Sacrifice a random SLB entry that is not the user entry */ i = mftb() % n_slbs; if (i == USER_SLB_SLOT) i = (i+1) % n_slbs; fillkernslb: /* Write new entry */ slbcache[i].slbv = slbv; slbcache[i].slbe = slbe | (uint64_t)i; /* Trap handler will restore from cache on exit */ } static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr) { struct slb *user_entry; uint64_t esid; int i; if (pm->pm_slb == NULL) return (-1); esid = (uintptr_t)addr >> ADDR_SR_SHFT; PMAP_LOCK(pm); user_entry = user_va_to_slb_entry(pm, addr); if (user_entry == NULL) { /* allocate_vsid auto-spills it */ (void)allocate_user_vsid(pm, esid, 0); } else { /* * Check that another CPU has not already mapped this. * XXX: Per-thread SLB caches would be better. */ for (i = 0; i < pm->pm_slb_len; i++) if (pm->pm_slb[i] == user_entry) break; if (i == pm->pm_slb_len) slb_insert_user(pm, user_entry); } PMAP_UNLOCK(pm); return (0); } #endif static int trap_pfault(struct trapframe *frame, int user) { vm_offset_t eva, va; struct thread *td; struct proc *p; vm_map_t map; vm_prot_t ftype; int rv, is_user; td = curthread; p = td->td_proc; if (frame->exc == EXC_ISI) { eva = frame->srr0; ftype = VM_PROT_EXECUTE; if (frame->srr1 & SRR1_ISI_PFAULT) ftype |= VM_PROT_READ; } else { eva = frame->dar; #ifdef BOOKE if (frame->cpu.booke.esr & ESR_ST) #else if (frame->cpu.aim.dsisr & DSISR_STORE) #endif ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; } if (user) { KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); map = &p->p_vmspace->vm_map; } else { rv = pmap_decode_kernel_ptr(eva, &is_user, &eva); if (rv != 0) return (SIGSEGV); if (is_user) map = &p->p_vmspace->vm_map; else map = kernel_map; } va = trunc_page(eva); /* Fault in the page. */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); /* * XXXDTRACE: add dtrace_doubletrap_func here? */ if (rv == KERN_SUCCESS) return (0); if (!user && handle_onfault(frame)) return (0); return (SIGSEGV); } /* * For now, this only deals with the particular unaligned access case * that gcc tends to generate. Eventually it should handle all of the * possibilities that can happen on a 32-bit PowerPC in big-endian mode. */ static int fix_unaligned(struct thread *td, struct trapframe *frame) { struct thread *fputhread; #ifdef __SPE__ uint32_t inst; #endif int indicator, reg; double *fpr; #ifdef __SPE__ indicator = (frame->cpu.booke.esr & (ESR_ST|ESR_SPE)); if (indicator & ESR_SPE) { if (copyin((void *)frame->srr0, &inst, sizeof(inst)) != 0) return (-1); reg = EXC_ALI_SPE_REG(inst); fpr = (double *)td->td_pcb->pcb_vec.vr[reg]; fputhread = PCPU_GET(vecthread); /* Juggle the SPE to ensure that we've initialized * the registers, and that their current state is in * the PCB. */ if (fputhread != td) { if (fputhread) save_vec(fputhread); enable_vec(td); } save_vec(td); if (!(indicator & ESR_ST)) { if (copyin((void *)frame->dar, fpr, sizeof(double)) != 0) return (-1); frame->fixreg[reg] = td->td_pcb->pcb_vec.vr[reg][1]; enable_vec(td); } else { td->td_pcb->pcb_vec.vr[reg][1] = frame->fixreg[reg]; if (copyout(fpr, (void *)frame->dar, sizeof(double)) != 0) return (-1); } return (0); } #else indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr); switch (indicator) { case EXC_ALI_LFD: case EXC_ALI_STFD: reg = EXC_ALI_RST(frame->cpu.aim.dsisr); fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr; fputhread = PCPU_GET(fputhread); /* Juggle the FPU to ensure that we've initialized * the FPRs, and that their current state is in * the PCB. */ if (fputhread != td) { if (fputhread) save_fpu(fputhread); enable_fpu(td); } save_fpu(td); if (indicator == EXC_ALI_LFD) { if (copyin((void *)frame->dar, fpr, sizeof(double)) != 0) return (-1); enable_fpu(td); } else { if (copyout(fpr, (void *)frame->dar, sizeof(double)) != 0) return (-1); } return (0); break; } #endif return (-1); } #if defined(__powerpc64__) && defined(AIM) #define MSKNSHL(x, m, n) "(((" #x ") & " #m ") << " #n ")" #define MSKNSHR(x, m, n) "(((" #x ") & " #m ") >> " #n ")" /* xvcpsgndp instruction, built in opcode format. * This can be changed to use mnemonic after a toolchain update. */ #define XVCPSGNDP(xt, xa, xb) \ __asm __volatile(".long (" \ MSKNSHL(60, 0x3f, 26) " | " \ MSKNSHL(xt, 0x1f, 21) " | " \ MSKNSHL(xa, 0x1f, 16) " | " \ MSKNSHL(xb, 0x1f, 11) " | " \ MSKNSHL(240, 0xff, 3) " | " \ MSKNSHR(xa, 0x20, 3) " | " \ MSKNSHR(xa, 0x20, 4) " | " \ MSKNSHR(xa, 0x20, 5) ")") /* Macros to normalize 1 or 10 VSX registers */ #define NORM(x) XVCPSGNDP(x, x, x) #define NORM10(x) \ NORM(x ## 0); NORM(x ## 1); NORM(x ## 2); NORM(x ## 3); NORM(x ## 4); \ NORM(x ## 5); NORM(x ## 6); NORM(x ## 7); NORM(x ## 8); NORM(x ## 9) static void normalize_inputs(void) { unsigned long msr; /* enable VSX */ msr = mfmsr(); mtmsr(msr | PSL_VSX); NORM(0); NORM(1); NORM(2); NORM(3); NORM(4); NORM(5); NORM(6); NORM(7); NORM(8); NORM(9); NORM10(1); NORM10(2); NORM10(3); NORM10(4); NORM10(5); NORM(60); NORM(61); NORM(62); NORM(63); /* restore MSR */ mtmsr(msr); } #endif #ifdef KDB int db_trap_glue(struct trapframe *frame) { if (!(frame->srr1 & PSL_PR) && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC || frame_is_trap_inst(frame) || frame->exc == EXC_BPT || frame->exc == EXC_DEBUG || frame->exc == EXC_DSI)) { int type = frame->exc; /* Ignore DTrace traps. */ if (*(uint32_t *)frame->srr0 == EXC_DTRACE) return (0); if (frame_is_trap_inst(frame)) { type = T_BREAKPOINT; } return (kdb_trap(type, 0, frame)); } return (0); } #endif