Index: head/sys/arm/include/armreg.h =================================================================== --- head/sys/arm/include/armreg.h (revision 306499) +++ head/sys/arm/include/armreg.h (revision 306500) @@ -1,473 +1,473 @@ /* $NetBSD: armreg.h,v 1.37 2007/01/06 00:50:54 christos Exp $ */ /*- * Copyright (c) 1998, 2001 Ben Harris * Copyright (c) 1994-1996 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_ARMREG_H #define MACHINE_ARMREG_H #define INSN_SIZE 4 #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define PSR_MODE 0x0000001f /* mode mask */ #define PSR_USR32_MODE 0x00000010 #define PSR_FIQ32_MODE 0x00000011 #define PSR_IRQ32_MODE 0x00000012 #define PSR_SVC32_MODE 0x00000013 #define PSR_MON32_MODE 0x00000016 #define PSR_ABT32_MODE 0x00000017 #define PSR_HYP32_MODE 0x0000001a #define PSR_UND32_MODE 0x0000001b #define PSR_SYS32_MODE 0x0000001f #define PSR_32_MODE 0x00000010 #define PSR_T 0x00000020 /* Instruction set bit */ #define PSR_F 0x00000040 /* FIQ disable bit */ #define PSR_I 0x00000080 /* IRQ disable bit */ #define PSR_A 0x00000100 /* Imprecise abort bit */ #define PSR_E 0x00000200 /* Data endianess bit */ #define PSR_GE 0x000f0000 /* Greater than or equal to bits */ #define PSR_J 0x01000000 /* Java bit */ #define PSR_Q 0x08000000 /* Sticky overflow bit */ #define PSR_V 0x10000000 /* Overflow bit */ #define PSR_C 0x20000000 /* Carry bit */ #define PSR_Z 0x40000000 /* Zero bit */ #define PSR_N 0x80000000 /* Negative bit */ #define PSR_FLAGS 0xf0000000 /* Flags mask. */ /* The high-order byte is always the implementor */ #define CPU_ID_IMPLEMENTOR_MASK 0xff000000 #define CPU_ID_ARM_LTD 0x41000000 /* 'A' */ #define CPU_ID_DEC 0x44000000 /* 'D' */ #define CPU_ID_MOTOROLA 0x4D000000 /* 'M' */ #define CPU_ID_QUALCOM 0x51000000 /* 'Q' */ #define CPU_ID_TI 0x54000000 /* 'T' */ #define CPU_ID_MARVELL 0x56000000 /* 'V' */ #define CPU_ID_INTEL 0x69000000 /* 'i' */ #define CPU_ID_FARADAY 0x66000000 /* 'f' */ #define CPU_ID_VARIANT_SHIFT 20 #define CPU_ID_VARIANT_MASK 0x00f00000 /* How to decide what format the CPUID is in. */ #define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000) #define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000) #define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x)) /* On recent ARMs this byte holds the architecture and variant (sub-model) */ #define CPU_ID_ARCH_MASK 0x000f0000 #define CPU_ID_ARCH_V3 0x00000000 #define CPU_ID_ARCH_V4 0x00010000 #define CPU_ID_ARCH_V4T 0x00020000 #define CPU_ID_ARCH_V5 0x00030000 #define CPU_ID_ARCH_V5T 0x00040000 #define CPU_ID_ARCH_V5TE 0x00050000 #define CPU_ID_ARCH_V5TEJ 0x00060000 #define CPU_ID_ARCH_V6 0x00070000 #define CPU_ID_CPUID_SCHEME 0x000f0000 /* Next three nybbles are part number */ #define CPU_ID_PARTNO_MASK 0x0000fff0 /* Intel XScale has sub fields in part number */ #define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */ #define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */ #define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */ /* And finally, the revision number. */ #define CPU_ID_REVISION_MASK 0x0000000f /* Individual CPUs are probably best IDed by everything but the revision. */ #define CPU_ID_CPU_MASK 0xfffffff0 /* ARM9 and later CPUs */ #define CPU_ID_ARM920T 0x41129200 #define CPU_ID_ARM920T_ALT 0x41009200 #define CPU_ID_ARM922T 0x41029220 #define CPU_ID_ARM926EJS 0x41069260 #define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */ #define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */ #define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */ #define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */ #define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */ #define CPU_ID_ARM1022ES 0x4105a220 #define CPU_ID_ARM1026EJS 0x4106a260 #define CPU_ID_ARM1136JS 0x4107b360 #define CPU_ID_ARM1136JSR1 0x4117b360 #define CPU_ID_ARM1176JZS 0x410fb760 /* CPUs that follow the CPUID scheme */ #define CPU_ID_SCHEME_MASK \ (CPU_ID_IMPLEMENTOR_MASK | CPU_ID_ARCH_MASK | CPU_ID_PARTNO_MASK) #define CPU_ID_CORTEXA5 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc050) #define CPU_ID_CORTEXA7 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc070) #define CPU_ID_CORTEXA8 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc080) #define CPU_ID_CORTEXA8R1 (CPU_ID_CORTEXA8 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA8R2 (CPU_ID_CORTEXA8 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA8R3 (CPU_ID_CORTEXA8 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc090) #define CPU_ID_CORTEXA9R1 (CPU_ID_CORTEXA9 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R2 (CPU_ID_CORTEXA9 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R3 (CPU_ID_CORTEXA9 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA9R4 (CPU_ID_CORTEXA9 | (4 << CPU_ID_VARIANT_SHIFT)) -/* XXX: Cortx-A12 is the old name for this part, it has been renamed the A17 */ +/* XXX: Cortex-A12 is the old name for this part, it has been renamed the A17 */ #define CPU_ID_CORTEXA12 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc0d0) #define CPU_ID_CORTEXA12R0 (CPU_ID_CORTEXA12 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15 (CPU_ID_ARM_LTD | CPU_ID_CPUID_SCHEME | 0xc0f0) #define CPU_ID_CORTEXA15R0 (CPU_ID_CORTEXA15 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R1 (CPU_ID_CORTEXA15 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R2 (CPU_ID_CORTEXA15 | (2 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_CORTEXA15R3 (CPU_ID_CORTEXA15 | (3 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_KRAIT300 (CPU_ID_QUALCOM | CPU_ID_CPUID_SCHEME | 0x06f0) /* Snapdragon S4 Pro/APQ8064 */ #define CPU_ID_KRAIT300R0 (CPU_ID_KRAIT300 | (0 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_KRAIT300R1 (CPU_ID_KRAIT300 | (1 << CPU_ID_VARIANT_SHIFT)) #define CPU_ID_TI925T 0x54029250 #define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */ #define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */ #define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */ /* * LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported * L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID. */ #ifdef SOC_MV_LOKIPLUS #define CPU_ID_MV88FR571_41 0x00000000 #else #define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */ #endif #define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_MV88SV584X_V7 0x562F5840 /* Marvell Sheeva 88SV584x v7 Core */ /* Marvell's CPUIDs with ARM ID in implementor field */ #define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_FA526 0x66015260 #define CPU_ID_FA626TE 0x66056260 #define CPU_ID_80200 0x69052000 #define CPU_ID_PXA250 0x69052100 /* sans core revision */ #define CPU_ID_PXA210 0x69052120 #define CPU_ID_PXA250A 0x69052100 /* 1st version Core */ #define CPU_ID_PXA210A 0x69052120 /* 1st version Core */ #define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */ #define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */ #define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */ #define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */ #define CPU_ID_PXA27X 0x69054110 #define CPU_ID_80321_400 0x69052420 #define CPU_ID_80321_600 0x69052430 #define CPU_ID_80321_400_B0 0x69052c20 #define CPU_ID_80321_600_B0 0x69052c30 #define CPU_ID_80219_400 0x69052e20 /* A0 stepping/revision. */ #define CPU_ID_80219_600 0x69052e30 /* A0 stepping/revision. */ #define CPU_ID_81342 0x69056810 #define CPU_ID_IXP425 0x690541c0 #define CPU_ID_IXP425_533 0x690541c0 #define CPU_ID_IXP425_400 0x690541d0 #define CPU_ID_IXP425_266 0x690541f0 #define CPU_ID_IXP435 0x69054040 #define CPU_ID_IXP465 0x69054200 /* CPUID registers */ #define ARM_PFR0_ARM_ISA_MASK 0x0000000f #define ARM_PFR0_THUMB_MASK 0x000000f0 #define ARM_PFR0_THUMB 0x10 #define ARM_PFR0_THUMB2 0x30 #define ARM_PFR0_JAZELLE_MASK 0x00000f00 #define ARM_PFR0_THUMBEE_MASK 0x0000f000 #define ARM_PFR1_ARMV4_MASK 0x0000000f #define ARM_PFR1_SEC_EXT_MASK 0x000000f0 #define ARM_PFR1_MICROCTRL_MASK 0x00000f00 /* * Post-ARM3 CP15 registers: * * 1 Control register * * 2 Translation Table Base * * 3 Domain Access Control * * 4 Reserved * * 5 Fault Status * * 6 Fault Address * * 7 Cache/write-buffer Control * * 8 TLB Control * * 9 Cache Lockdown * * 10 TLB Lockdown * * 11 Reserved * * 12 Reserved * * 13 Process ID (for FCSE) * * 14 Reserved * * 15 Implementation Dependent */ /* Some of the definitions below need cleaning up for V3/V4 architectures */ /* CPU control register (CP15 register 1) */ #define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */ #define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */ #define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */ #define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */ #define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */ #define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */ #define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */ #define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */ #define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */ #define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */ #define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */ #define CPU_CONTROL_SW_ENABLE 0x00000400 /* SW: SWP instruction enable */ #define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */ #define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */ #define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */ #define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */ #define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */ #define CPU_CONTROL_HAF_ENABLE 0x00020000 /* HA: Hardware Access Flag Enable */ #define CPU_CONTROL_FI_ENABLE 0x00200000 /* FI: Low interrupt latency */ #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */ #define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */ #define CPU_CONTROL_V_ENABLE 0x01000000 /* VE: Interrupt vectors enable */ #define CPU_CONTROL_EX_BEND 0x02000000 /* EE: exception endianness */ #define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */ #define CPU_CONTROL_NMFI 0x08000000 /* NMFI: Non maskable FIQ */ #define CPU_CONTROL_TR_ENABLE 0x10000000 /* TRE: TEX Remap*/ #define CPU_CONTROL_AF_ENABLE 0x20000000 /* AFE: Access Flag enable */ #define CPU_CONTROL_TE_ENABLE 0x40000000 /* TE: Thumb Exception enable */ #define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE /* ARM11x6 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM11X6_AUXCTL_RS 0x00000001 /* return stack */ #define ARM11X6_AUXCTL_DB 0x00000002 /* dynamic branch prediction */ #define ARM11X6_AUXCTL_SB 0x00000004 /* static branch prediction */ #define ARM11X6_AUXCTL_TR 0x00000008 /* MicroTLB replacement strat. */ #define ARM11X6_AUXCTL_EX 0x00000010 /* exclusive L1/L2 cache */ #define ARM11X6_AUXCTL_RA 0x00000020 /* clean entire cache disable */ #define ARM11X6_AUXCTL_RV 0x00000040 /* block transfer cache disable */ #define ARM11X6_AUXCTL_CZ 0x00000080 /* restrict cache size */ /* ARM1136 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1136_AUXCTL_PFI 0x80000000 /* PFI: partial FI mode. */ /* This is an undocumented flag * used to work around a cache bug * in r0 steppings. See errata * 364296. */ /* ARM1176 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1176_AUXCTL_PHD 0x10000000 /* inst. prefetch halting disable */ #define ARM1176_AUXCTL_BFD 0x20000000 /* branch folding disable */ #define ARM1176_AUXCTL_FSD 0x40000000 /* force speculative ops disable */ #define ARM1176_AUXCTL_FIO 0x80000000 /* low intr latency override */ /* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */ #define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */ #define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */ /* Note: XSCale core 3 uses those for LLR DCcahce attributes */ #define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */ #define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */ #define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */ #define XSCALE_AUXCTL_MD_MASK 0x00000030 /* Xscale Core 3 only */ #define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */ /* Marvell Extra Features Register (CP15 register 1, opcode2 0) */ #define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */ #define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */ #define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */ #define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */ #define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */ #define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */ #define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */ #define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */ #define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */ #define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */ /* Cache type register definitions */ #define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */ #define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */ #define CPU_CT_S (1U << 24) /* split cache */ #define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */ #define CPU_CT_FORMAT(x) ((x) >> 29) /* Cache type register definitions for ARM v7 */ #define CPU_CT_IMINLINE(x) ((x) & 0xf) /* I$ min line size */ #define CPU_CT_DMINLINE(x) (((x) >> 16) & 0xf) /* D$ min line size */ #define CPU_CT_CTYPE_WT 0 /* write-through */ #define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */ #define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */ #define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */ #define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */ #define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */ #define CPU_CT_xSIZE_M (1U << 2) /* multiplier */ #define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */ #define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */ #define CPU_CT_ARMV7 0x4 /* ARM v7 Cache type definitions */ #define CPUV7_CT_CTYPE_WT (1U << 31) #define CPUV7_CT_CTYPE_WB (1 << 30) #define CPUV7_CT_CTYPE_RA (1 << 29) #define CPUV7_CT_CTYPE_WA (1 << 28) #define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */ #define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */ #define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */ #define CPUV7_L2CTLR_NPROC_SHIFT 24 #define CPUV7_L2CTLR_NPROC(r) ((((r) >> CPUV7_L2CTLR_NPROC_SHIFT) & 3) + 1) #define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7) #define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7) #define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7) #define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7) #define CACHE_ICACHE 1 #define CACHE_DCACHE 2 #define CACHE_SEP_CACHE 3 #define CACHE_UNI_CACHE 4 /* Fault status register definitions */ #define FAULT_USER 0x10 #if __ARM_ARCH < 6 #define FAULT_TYPE_MASK 0x0f #define FAULT_WRTBUF_0 0x00 /* Vector Exception */ #define FAULT_WRTBUF_1 0x02 /* Terminal Exception */ #define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */ #define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */ #define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */ #define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */ #define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */ #define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */ #define FAULT_ALIGN_0 0x01 /* Alignment */ #define FAULT_ALIGN_1 0x03 /* Alignment */ #define FAULT_TRANS_S 0x05 /* Translation -- Section */ #define FAULT_TRANS_F 0x06 /* Translation -- Flag */ #define FAULT_TRANS_P 0x07 /* Translation -- Page */ #define FAULT_DOMAIN_S 0x09 /* Domain -- Section */ #define FAULT_DOMAIN_P 0x0b /* Domain -- Page */ #define FAULT_PERM_S 0x0d /* Permission -- Section */ #define FAULT_PERM_P 0x0f /* Permission -- Page */ #define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */ #define FAULT_EXTERNAL 0x400 /* External abort (armv6+) */ #define FAULT_WNR 0x800 /* Write-not-Read access (armv6+) */ #else /* __ARM_ARCH < 6 */ #define FAULT_ALIGN 0x001 /* Alignment Fault */ #define FAULT_DEBUG 0x002 /* Debug Event */ #define FAULT_ACCESS_L1 0x003 /* Access Bit (L1) */ #define FAULT_ICACHE 0x004 /* Instruction cache maintenance */ #define FAULT_TRAN_L1 0x005 /* Translation Fault (L1) */ #define FAULT_ACCESS_L2 0x006 /* Access Bit (L2) */ #define FAULT_TRAN_L2 0x007 /* Translation Fault (L2) */ #define FAULT_EA_PREC 0x008 /* External Abort */ #define FAULT_DOMAIN_L1 0x009 /* Domain Fault (L1) */ #define FAULT_DOMAIN_L2 0x00B /* Domain Fault (L2) */ #define FAULT_EA_TRAN_L1 0x00C /* External Translation Abort (L1) */ #define FAULT_PERM_L1 0x00D /* Permission Fault (L1) */ #define FAULT_EA_TRAN_L2 0x00E /* External Translation Abort (L2) */ #define FAULT_PERM_L2 0x00F /* Permission Fault (L2) */ #define FAULT_TLB_CONFLICT 0x010 /* TLB Conflict Abort */ #define FAULT_EA_IMPREC 0x016 /* Asynchronous External Abort */ #define FAULT_PE_IMPREC 0x018 /* Asynchronous Parity Error */ #define FAULT_PARITY 0x019 /* Parity Error */ #define FAULT_PE_TRAN_L1 0x01C /* Parity Error on Translation (L1) */ #define FAULT_PE_TRAN_L2 0x01E /* Parity Error on Translation (L2) */ #define FSR_TO_FAULT(fsr) (((fsr) & 0xF) | \ ((((fsr) & (1 << 10)) >> (10 - 4)))) #define FSR_LPAE (1 << 9) /* LPAE indicator */ #define FSR_WNR (1 << 11) /* Write-not-Read access */ #define FSR_EXT (1 << 12) /* DECERR/SLVERR for external*/ #define FSR_CM (1 << 13) /* Cache maintenance fault */ #endif /* !__ARM_ARCH < 6 */ /* * Address of the vector page, low and high versions. */ #ifndef __ASSEMBLER__ #define ARM_VECTORS_LOW 0x00000000U #define ARM_VECTORS_HIGH 0xffff0000U #else #define ARM_VECTORS_LOW 0 #define ARM_VECTORS_HIGH 0xffff0000 #endif /* * ARM Instructions * * 3 3 2 2 2 * 1 0 9 8 7 0 * +-------+-------------------------------------------------------+ * | cond | instruction dependant | * |c c c c| | * +-------+-------------------------------------------------------+ */ #define INSN_SIZE 4 /* Always 4 bytes */ #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define INSN_COND_AL 0xe0000000 /* Always condition */ /* ARM register defines */ #define ARM_REG_SIZE 4 #define ARM_REG_NUM_PC 15 #define ARM_REG_NUM_LR 14 #define ARM_REG_NUM_SP 13 #define THUMB_INSN_SIZE 2 /* Some are 4 bytes. */ #endif /* !MACHINE_ARMREG_H */ Index: head/sys/arm/include/cpu-v6.h =================================================================== --- head/sys/arm/include/cpu-v6.h (revision 306499) +++ head/sys/arm/include/cpu-v6.h (revision 306500) @@ -1,633 +1,655 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_CPU_V6_H #define MACHINE_CPU_V6_H /* There are no user serviceable parts here, they may change without notice */ #ifndef _KERNEL #error Only include this file in the kernel #endif #include #include #include #include #if __ARM_ARCH < 6 #error Only include this file for ARMv6 #else #define CPU_ASID_KERNEL 0 void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */ vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); #ifdef DEV_PMU #include #define PMU_OVSR_C 0x80000000 /* Cycle Counter */ extern uint32_t ccnt_hi[MAXCPU]; extern int pmu_attched; #endif /* DEV_PMU */ /* * Macros to generate CP15 (system control processor) read/write functions. */ #define _FX(s...) #s #define _RF0(fname, aname...) \ static __inline register_t \ fname(void) \ { \ register_t reg; \ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _R64F0(fname, aname) \ static __inline uint64_t \ fname(void) \ { \ uint64_t reg; \ __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _WF0(fname, aname...) \ static __inline void \ fname(void) \ { \ __asm __volatile("mcr\t" _FX(aname)); \ } #define _WF1(fname, aname...) \ static __inline void \ fname(register_t reg) \ { \ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ } #define _W64F1(fname, aname...) \ static __inline void \ fname(uint64_t reg) \ { \ __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ } /* * Raw CP15 maintenance operations * !!! not for external use !!! */ /* TLB */ _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ #endif _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ #endif _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ #endif _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ _WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) /* Cache and Branch predictor */ _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ #endif _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ #if __ARM_ARCH >= 7 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ #endif _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ #endif _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ /* * Publicly accessible functions */ /* CP14 Debug Registers */ _RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) _RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) _RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) _RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) _RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) _WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) _WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) _WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) _WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) /* Various control registers */ _RF0(cp15_cpacr_get, CP15_CPACR(%0)) _WF1(cp15_cpacr_set, CP15_CPACR(%0)) _RF0(cp15_dfsr_get, CP15_DFSR(%0)) _RF0(cp15_ifsr_get, CP15_IFSR(%0)) _WF1(cp15_prrr_set, CP15_PRRR(%0)) _WF1(cp15_nmrr_set, CP15_NMRR(%0)) _RF0(cp15_ttbr_get, CP15_TTBR0(%0)) _RF0(cp15_dfar_get, CP15_DFAR(%0)) #if __ARM_ARCH >= 7 _RF0(cp15_ifar_get, CP15_IFAR(%0)) _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) #endif _RF0(cp15_actlr_get, CP15_ACTLR(%0)) _WF1(cp15_actlr_set, CP15_ACTLR(%0)) _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) _WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0)) _WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0)) _RF0(cp15_par_get, CP15_PAR(%0)) _RF0(cp15_sctlr_get, CP15_SCTLR(%0)) /*CPU id registers */ _RF0(cp15_midr_get, CP15_MIDR(%0)) _RF0(cp15_ctr_get, CP15_CTR(%0)) _RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) _RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) _RF0(cp15_mpidr_get, CP15_MPIDR(%0)) _RF0(cp15_revidr_get, CP15_REVIDR(%0)) _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) _RF0(cp15_clidr_get, CP15_CLIDR(%0)) _RF0(cp15_aidr_get, CP15_AIDR(%0)) _WF1(cp15_csselr_set, CP15_CSSELR(%0)) _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) _RF0(cp15_cbar_get, CP15_CBAR(%0)) /* Performance Monitor registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) #elif __ARM_ARCH > 6 _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) _RF0(cp15_pmselr_get, CP15_PMSELR(%0)) _WF1(cp15_pmselr_set, CP15_PMSELR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) #endif _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) /* Generic Timer registers - only use when you know the hardware is available */ _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) #undef _FX #undef _RF0 #undef _WF0 #undef _WF1 /* * TLB maintenance operations. */ /* Local (i.e. not broadcasting ) operations. */ /* Flush all TLB entries (even global). */ static __inline void tlb_flush_all_local(void) { dsb(); _CP15_TLBIALL(); dsb(); } /* Flush all not global TLB entries. */ static __inline void tlb_flush_all_ng_local(void) { dsb(); _CP15_TLBIASID(CPU_ASID_KERNEL); dsb(); } /* Flush single TLB entry (even global). */ static __inline void tlb_flush_local(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Flush range of TLB entries (even global). */ static __inline void tlb_flush_range_local(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Broadcasting operations. */ #if __ARM_ARCH >= 7 && defined SMP +/* Used to detect SMP */ +extern int mp_ncpus; + static __inline void tlb_flush_all(void) { dsb(); - _CP15_TLBIALLIS(); + if (mp_ncpus == 1) + _CP15_TLBIALL(); + else + _CP15_TLBIALLIS(); dsb(); } static __inline void tlb_flush_all_ng(void) { dsb(); - _CP15_TLBIASIDIS(CPU_ASID_KERNEL); + if (mp_ncpus == 1) + _CP15_TLBIASID(CPU_ASID_KERNEL); + else + _CP15_TLBIASIDIS(CPU_ASID_KERNEL); dsb(); } static __inline void tlb_flush(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); - _CP15_TLBIMVAAIS(va); + if (mp_ncpus == 1) + _CP15_TLBIMVA(va | CPU_ASID_KERNEL); + else + _CP15_TLBIMVAAIS(va); dsb(); } static __inline void tlb_flush_range(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); - for (; va < eva; va += PAGE_SIZE) - _CP15_TLBIMVAAIS(va); + if (mp_ncpus == 1) { + for (; va < eva; va += PAGE_SIZE) + _CP15_TLBIMVA(va | CPU_ASID_KERNEL); + } else { + for (; va < eva; va += PAGE_SIZE) + _CP15_TLBIMVAAIS(va); + } dsb(); } #else /* SMP */ #define tlb_flush_all() tlb_flush_all_local() #define tlb_flush_all_ng() tlb_flush_all_ng_local() #define tlb_flush(va) tlb_flush_local(va) #define tlb_flush_range(va, size) tlb_flush_range_local(va, size) #endif /* SMP */ /* * Cache maintenance operations. */ /* Sync I and D caches to PoU */ static __inline void icache_sync(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP - _CP15_DCCMVAU(va); -#else - _CP15_DCCMVAC(va); + if (mp_ncpus > 1) + _CP15_DCCMVAU(va); + else #endif + _CP15_DCCMVAC(va); } dsb(); #if __ARM_ARCH >= 7 && defined SMP - _CP15_ICIALLUIS(); -#else - _CP15_ICIALLU(); + if (mp_ncpus > 1) + _CP15_ICIALLUIS(); + else #endif + _CP15_ICIALLU(); dsb(); isb(); } /* Invalidate I cache */ static __inline void icache_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP - _CP15_ICIALLUIS(); -#else - _CP15_ICIALLU(); + if (mp_ncpus > 1) + _CP15_ICIALLUIS(); + else #endif + _CP15_ICIALLU(); dsb(); isb(); } /* Invalidate branch predictor buffer */ static __inline void bpb_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP - _CP15_BPIALLIS(); -#else - _CP15_BPIALL(); + if (mp_ncpus > 1) + _CP15_BPIALLIS(); + else #endif + _CP15_BPIALL(); dsb(); isb(); } /* Write back D-cache to PoU */ static __inline void dcache_wb_pou(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP - _CP15_DCCMVAU(va); -#else - _CP15_DCCMVAC(va); + if (mp_ncpus > 1) + _CP15_DCCMVAU(va); + else #endif + _CP15_DCCMVAC(va); } dsb(); } /* * Invalidate D-cache to PoC * * Caches are invalidated from outermost to innermost as fresh cachelines * flow in this direction. In given range, if there was no dirty cacheline * in any cache before, no stale cacheline should remain in them after this * operation finishes. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); /* invalidate L2 first */ cpu_l2cache_inv_range(pa, size); /* then L1 */ va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* * Discard D-cache lines to PoC, prior to overwrite by DMA engine. * * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't * flow into L1 while invalidating. This routine is intended to be used only * when invalidating a buffer before a DMA operation loads new data into memory. * The concern in this case is that dirty lines are not evicted to main memory, * overwriting the DMA data. For that reason, the L1 is done first to ensure * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. */ static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; /* invalidate L1 first */ dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); /* then L2 */ cpu_l2cache_inv_range(pa, size); } /* * Write back D-cache to PoC * * Caches are written back from innermost to outermost as dirty cachelines * flow in this direction. In given range, no dirty cacheline should remain * in any cache after this operation finishes. */ static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); cpu_l2cache_wb_range(pa, size); } /* Write back and invalidate D-cache to PoC */ static __inline void dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) { vm_offset_t va; vm_offset_t eva = sva + size; dsb(); /* write back L1 first */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); /* then write back and invalidate L2 */ cpu_l2cache_wbinv_range(pa, size); /* then invalidate L1 */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* Set TTB0 register */ static __inline void cp15_ttbr_set(uint32_t reg) { dsb(); _CP15_TTB_SET(reg); dsb(); _CP15_BPIALL(); dsb(); isb(); tlb_flush_all_ng_local(); } /* * Functions for address checking: * * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access * * They must be called while interrupts are disabled to get consistent result. */ static __inline int cp15_ats1cpr_check(vm_offset_t addr) { cp15_ats1cpr_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } static __inline int cp15_ats1cpw_check(vm_offset_t addr) { cp15_ats1cpw_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } static __inline int cp15_ats1cur_check(vm_offset_t addr) { cp15_ats1cur_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } static __inline int cp15_ats1cuw_check(vm_offset_t addr) { cp15_ats1cuw_set(addr); isb(); return (cp15_par_get() & 0x01 ? EFAULT : 0); } #endif /* !__ARM_ARCH < 6 */ #endif /* !MACHINE_CPU_V6_H */