Index: head/sys/arm/include/cpu-v6.h =================================================================== --- head/sys/arm/include/cpu-v6.h (revision 290655) +++ head/sys/arm/include/cpu-v6.h (revision 290656) @@ -1,611 +1,612 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_CPU_V6_H #define MACHINE_CPU_V6_H /* There are no user serviceable parts here, they may change without notice */ #ifndef _KERNEL #error Only include this file in the kernel #else #include #include "machine/atomic.h" #include "machine/cpufunc.h" #include "machine/cpuinfo.h" #include "machine/sysreg.h" #define CPU_ASID_KERNEL 0 vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); /* * Macros to generate CP15 (system control processor) read/write functions. */ #define _FX(s...) #s #define _RF0(fname, aname...) \ static __inline register_t \ fname(void) \ { \ register_t reg; \ __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _R64F0(fname, aname) \ static __inline uint64_t \ fname(void) \ { \ uint64_t reg; \ __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ return(reg); \ } #define _WF0(fname, aname...) \ static __inline void \ fname(void) \ { \ __asm __volatile("mcr\t" _FX(aname)); \ } #define _WF1(fname, aname...) \ static __inline void \ fname(register_t reg) \ { \ __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ } #define _W64F1(fname, aname...) \ static __inline void \ fname(uint64_t reg) \ { \ __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ } /* * Raw CP15 maintenance operations * !!! not for external use !!! */ /* TLB */ _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ #endif _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ #endif _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ #if __ARM_ARCH >= 7 && defined SMP _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ #endif _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ _WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) /* Cache and Branch predictor */ _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ #endif _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ #if __ARM_ARCH >= 7 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ #endif _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ #if __ARM_ARCH >= 7 && defined SMP _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ #endif _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ /* * Publicly accessible functions */ /* Various control registers */ _RF0(cp15_cpacr_get, CP15_CPACR(%0)) _WF1(cp15_cpacr_set, CP15_CPACR(%0)) _RF0(cp15_dfsr_get, CP15_DFSR(%0)) _RF0(cp15_ifsr_get, CP15_IFSR(%0)) _WF1(cp15_prrr_set, CP15_PRRR(%0)) _WF1(cp15_nmrr_set, CP15_NMRR(%0)) _RF0(cp15_ttbr_get, CP15_TTBR0(%0)) _RF0(cp15_dfar_get, CP15_DFAR(%0)) #if __ARM_ARCH >= 7 _RF0(cp15_ifar_get, CP15_IFAR(%0)) _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) #endif /* ARMv6+ and XScale */ _RF0(cp15_actlr_get, CP15_ACTLR(%0)) _WF1(cp15_actlr_set, CP15_ACTLR(%0)) #if __ARM_ARCH >= 6 -_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)); -_RF0(cp15_par_get, CP15_PAR); +_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) +_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) +_RF0(cp15_par_get, CP15_PAR(%0)) _RF0(cp15_sctlr_get, CP15_SCTLR(%0)) #endif /*CPU id registers */ _RF0(cp15_midr_get, CP15_MIDR(%0)) _RF0(cp15_ctr_get, CP15_CTR(%0)) _RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) _RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) _RF0(cp15_mpidr_get, CP15_MPIDR(%0)) _RF0(cp15_revidr_get, CP15_REVIDR(%0)) _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) _RF0(cp15_clidr_get, CP15_CLIDR(%0)) _RF0(cp15_aidr_get, CP15_AIDR(%0)) _WF1(cp15_csselr_set, CP15_CSSELR(%0)) _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) _RF0(cp15_cbar_get, CP15_CBAR(%0)) /* Performance Monitor registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) #elif __ARM_ARCH > 6 _RF0(cp15_pmcr_get, CP15_PMCR(%0)) _WF1(cp15_pmcr_set, CP15_PMCR(%0)) _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) _RF0(cp15_pmselr_get, CP15_PMSELR(%0)) _WF1(cp15_pmselr_set, CP15_PMSELR(%0)) _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) _RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) #endif _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) /* Generic Timer registers - only use when you know the hardware is available */ _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) #undef _FX #undef _RF0 #undef _WF0 #undef _WF1 #if __ARM_ARCH >= 6 /* * Cache and TLB maintenance operations for armv6+ code. The #else block * provides armv4/v5 implementations for a few of these used in common code. */ /* * TLB maintenance operations. */ /* Local (i.e. not broadcasting ) operations. */ /* Flush all TLB entries (even global). */ static __inline void tlb_flush_all_local(void) { dsb(); _CP15_TLBIALL(); dsb(); } /* Flush all not global TLB entries. */ static __inline void tlb_flush_all_ng_local(void) { dsb(); _CP15_TLBIASID(CPU_ASID_KERNEL); dsb(); } /* Flush single TLB entry (even global). */ static __inline void tlb_flush_local(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Flush range of TLB entries (even global). */ static __inline void tlb_flush_range_local(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVA(va | CPU_ASID_KERNEL); dsb(); } /* Broadcasting operations. */ #if __ARM_ARCH >= 7 && defined SMP static __inline void tlb_flush_all(void) { dsb(); _CP15_TLBIALLIS(); dsb(); } static __inline void tlb_flush_all_ng(void) { dsb(); _CP15_TLBIASIDIS(CPU_ASID_KERNEL); dsb(); } static __inline void tlb_flush(vm_offset_t va) { KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); dsb(); _CP15_TLBIMVAAIS(va); dsb(); } static __inline void tlb_flush_range(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, size)); dsb(); for (; va < eva; va += PAGE_SIZE) _CP15_TLBIMVAAIS(va); dsb(); } #else /* SMP */ #define tlb_flush_all() tlb_flush_all_local() #define tlb_flush_all_ng() tlb_flush_all_ng_local() #define tlb_flush(va) tlb_flush_local(va) #define tlb_flush_range(va, size) tlb_flush_range_local(va, size) #endif /* SMP */ /* * Cache maintenance operations. */ /* Sync I and D caches to PoU */ static __inline void icache_sync(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); #if __ARM_ARCH >= 7 && defined SMP _CP15_ICIALLUIS(); #else _CP15_ICIALLU(); #endif dsb(); isb(); } /* Invalidate I cache */ static __inline void icache_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP _CP15_ICIALLUIS(); #else _CP15_ICIALLU(); #endif dsb(); isb(); } /* Invalidate branch predictor buffer */ static __inline void bpb_inv_all(void) { #if __ARM_ARCH >= 7 && defined SMP _CP15_BPIALLIS(); #else _CP15_BPIALL(); #endif dsb(); isb(); } /* Write back D-cache to PoU */ static __inline void dcache_wb_pou(vm_offset_t va, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else _CP15_DCCMVAC(va); #endif } dsb(); } /* * Invalidate D-cache to PoC * * Caches are invalidated from outermost to innermost as fresh cachelines * flow in this direction. In given range, if there was no dirty cacheline * in any cache before, no stale cacheline should remain in them after this * operation finishes. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); /* invalidate L2 first */ cpu_l2cache_inv_range(pa, size); /* then L1 */ va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* * Discard D-cache lines to PoC, prior to overwrite by DMA engine. * * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't * flow into L1 while invalidating. This routine is intended to be used only * when invalidating a buffer before a DMA operation loads new data into memory. * The concern in this case is that dirty lines are not evicted to main memory, * overwriting the DMA data. For that reason, the L1 is done first to ensure * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. */ static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; /* invalidate L1 first */ dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); /* then L2 */ cpu_l2cache_inv_range(pa, size); } /* * Write back D-cache to PoC * * Caches are written back from innermost to outermost as dirty cachelines * flow in this direction. In given range, no dirty cacheline should remain * in any cache after this operation finishes. */ static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_offset_t eva = va + size; dsb(); va &= ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); cpu_l2cache_wb_range(pa, size); } /* Write back and invalidate D-cache to PoC */ static __inline void dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) { vm_offset_t va; vm_offset_t eva = sva + size; dsb(); /* write back L1 first */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); /* then write back and invalidate L2 */ cpu_l2cache_wbinv_range(pa, size); /* then invalidate L1 */ va = sva & ~cpuinfo.dcache_line_mask; for ( ; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); } /* Set TTB0 register */ static __inline void cp15_ttbr_set(uint32_t reg) { dsb(); _CP15_TTB_SET(reg); dsb(); _CP15_BPIALL(); dsb(); isb(); tlb_flush_all_ng_local(); } #else /* ! __ARM_ARCH >= 6 */ /* * armv4/5 compatibility shims. * * These functions provide armv4 cache maintenance using the new armv6 names. * Included here are just the functions actually used now in common code; it may * be necessary to add things here over time. * * The callers of the dcache functions expect these routines to handle address * and size values which are not aligned to cacheline boundaries; the armv4 and * armv5 asm code handles that. */ static __inline void dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { cpu_dcache_inv_range(va, size); cpu_l2cache_inv_range(va, size); } static __inline void dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { /* See armv6 code, above, for why we do L2 before L1 in this case. */ cpu_l2cache_inv_range(va, size); cpu_dcache_inv_range(va, size); } static __inline void dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { cpu_dcache_wb_range(va, size); cpu_l2cache_wb_range(va, size); } #endif /* __ARM_ARCH >= 6 */ #endif /* _KERNEL */ #endif /* !MACHINE_CPU_V6_H */ Index: head/sys/arm/include/sysreg.h =================================================================== --- head/sys/arm/include/sysreg.h (revision 290655) +++ head/sys/arm/include/sysreg.h (revision 290656) @@ -1,289 +1,289 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Macros to make working with the System Control Registers simpler. * * Note that when register r0 is hard-coded in these definitions it means the * cp15 operation neither reads nor writes the register, and r0 is used only * because some syntatically-valid register name has to appear at that point to * keep the asm parser happy. */ #ifndef MACHINE_SYSREG_H #define MACHINE_SYSREG_H #include /* * CP15 C0 registers */ #define CP15_MIDR(rr) p15, 0, rr, c0, c0, 0 /* Main ID Register */ #define CP15_CTR(rr) p15, 0, rr, c0, c0, 1 /* Cache Type Register */ #define CP15_TCMTR(rr) p15, 0, rr, c0, c0, 2 /* TCM Type Register */ #define CP15_TLBTR(rr) p15, 0, rr, c0, c0, 3 /* TLB Type Register */ #define CP15_MPIDR(rr) p15, 0, rr, c0, c0, 5 /* Multiprocessor Affinity Register */ #define CP15_REVIDR(rr) p15, 0, rr, c0, c0, 6 /* Revision ID Register */ #define CP15_ID_PFR0(rr) p15, 0, rr, c0, c1, 0 /* Processor Feature Register 0 */ #define CP15_ID_PFR1(rr) p15, 0, rr, c0, c1, 1 /* Processor Feature Register 1 */ #define CP15_ID_DFR0(rr) p15, 0, rr, c0, c1, 2 /* Debug Feature Register 0 */ #define CP15_ID_AFR0(rr) p15, 0, rr, c0, c1, 3 /* Auxiliary Feature Register 0 */ #define CP15_ID_MMFR0(rr) p15, 0, rr, c0, c1, 4 /* Memory Model Feature Register 0 */ #define CP15_ID_MMFR1(rr) p15, 0, rr, c0, c1, 5 /* Memory Model Feature Register 1 */ #define CP15_ID_MMFR2(rr) p15, 0, rr, c0, c1, 6 /* Memory Model Feature Register 2 */ #define CP15_ID_MMFR3(rr) p15, 0, rr, c0, c1, 7 /* Memory Model Feature Register 3 */ #define CP15_ID_ISAR0(rr) p15, 0, rr, c0, c2, 0 /* Instruction Set Attribute Register 0 */ #define CP15_ID_ISAR1(rr) p15, 0, rr, c0, c2, 1 /* Instruction Set Attribute Register 1 */ #define CP15_ID_ISAR2(rr) p15, 0, rr, c0, c2, 2 /* Instruction Set Attribute Register 2 */ #define CP15_ID_ISAR3(rr) p15, 0, rr, c0, c2, 3 /* Instruction Set Attribute Register 3 */ #define CP15_ID_ISAR4(rr) p15, 0, rr, c0, c2, 4 /* Instruction Set Attribute Register 4 */ #define CP15_ID_ISAR5(rr) p15, 0, rr, c0, c2, 5 /* Instruction Set Attribute Register 5 */ #define CP15_CCSIDR(rr) p15, 1, rr, c0, c0, 0 /* Cache Size ID Registers */ #define CP15_CLIDR(rr) p15, 1, rr, c0, c0, 1 /* Cache Level ID Register */ #define CP15_AIDR(rr) p15, 1, rr, c0, c0, 7 /* Auxiliary ID Register */ #define CP15_CSSELR(rr) p15, 2, rr, c0, c0, 0 /* Cache Size Selection Register */ /* * CP15 C1 registers */ #define CP15_SCTLR(rr) p15, 0, rr, c1, c0, 0 /* System Control Register */ #define CP15_ACTLR(rr) p15, 0, rr, c1, c0, 1 /* IMPLEMENTATION DEFINED Auxiliary Control Register */ #define CP15_CPACR(rr) p15, 0, rr, c1, c0, 2 /* Coprocessor Access Control Register */ #define CP15_SCR(rr) p15, 0, rr, c1, c1, 0 /* Secure Configuration Register */ #define CP15_SDER(rr) p15, 0, rr, c1, c1, 1 /* Secure Debug Enable Register */ #define CP15_NSACR(rr) p15, 0, rr, c1, c1, 2 /* Non-Secure Access Control Register */ /* * CP15 C2 registers */ #define CP15_TTBR0(rr) p15, 0, rr, c2, c0, 0 /* Translation Table Base Register 0 */ #define CP15_TTBR1(rr) p15, 0, rr, c2, c0, 1 /* Translation Table Base Register 1 */ #define CP15_TTBCR(rr) p15, 0, rr, c2, c0, 2 /* Translation Table Base Control Register */ /* * CP15 C3 registers */ #define CP15_DACR(rr) p15, 0, rr, c3, c0, 0 /* Domain Access Control Register */ /* * CP15 C5 registers */ #define CP15_DFSR(rr) p15, 0, rr, c5, c0, 0 /* Data Fault Status Register */ #if __ARM_ARCH >= 6 /* From ARMv6: */ #define CP15_IFSR(rr) p15, 0, rr, c5, c0, 1 /* Instruction Fault Status Register */ #endif #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_ADFSR(rr) p15, 0, rr, c5, c1, 0 /* Auxiliary Data Fault Status Register */ #define CP15_AIFSR(rr) p15, 0, rr, c5, c1, 1 /* Auxiliary Instruction Fault Status Register */ #endif /* * CP15 C6 registers */ #define CP15_DFAR(rr) p15, 0, rr, c6, c0, 0 /* Data Fault Address Register */ #if __ARM_ARCH >= 6 /* From ARMv6k: */ #define CP15_IFAR(rr) p15, 0, rr, c6, c0, 2 /* Instruction Fault Address Register */ #endif /* * CP15 C7 registers */ #if __ARM_ARCH >= 7 && defined(SMP) /* From ARMv7: */ #define CP15_ICIALLUIS p15, 0, r0, c7, c1, 0 /* Instruction cache invalidate all PoU, IS */ #define CP15_BPIALLIS p15, 0, r0, c7, c1, 6 /* Branch predictor invalidate all IS */ #endif -#define CP15_PAR p15, 0, r0, c7, c4, 0 /* Physical Address Register */ +#define CP15_PAR(rr) p15, 0, rr, c7, c4, 0 /* Physical Address Register */ #define CP15_ICIALLU p15, 0, r0, c7, c5, 0 /* Instruction cache invalidate all PoU */ #define CP15_ICIMVAU(rr) p15, 0, rr, c7, c5, 1 /* Instruction cache invalidate */ #if __ARM_ARCH == 6 /* Deprecated in ARMv7 */ #define CP15_CP15ISB p15, 0, r0, c7, c5, 4 /* ISB */ #endif #define CP15_BPIALL p15, 0, r0, c7, c5, 6 /* Branch predictor invalidate all */ #define CP15_BPIMVA p15, 0, rr, c7, c5, 7 /* Branch predictor invalidate by MVA */ #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCIALL p15, 0, r0, c7, c6, 0 /* Data cache invalidate all */ #endif #define CP15_DCIMVAC(rr) p15, 0, rr, c7, c6, 1 /* Data cache invalidate by MVA PoC */ #define CP15_DCISW(rr) p15, 0, rr, c7, c6, 2 /* Data cache invalidate by set/way */ #define CP15_ATS1CPR(rr) p15, 0, rr, c7, c8, 0 /* Stage 1 Current state PL1 read */ #define CP15_ATS1CPW(rr) p15, 0, rr, c7, c8, 1 /* Stage 1 Current state PL1 write */ #define CP15_ATS1CUR(rr) p15, 0, rr, c7, c8, 2 /* Stage 1 Current state unprivileged read */ #define CP15_ATS1CUW(rr) p15, 0, rr, c7, c8, 3 /* Stage 1 Current state unprivileged write */ #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_ATS12NSOPR(rr) p15, 0, rr, c7, c8, 4 /* Stages 1 and 2 Non-secure only PL1 read */ #define CP15_ATS12NSOPW(rr) p15, 0, rr, c7, c8, 5 /* Stages 1 and 2 Non-secure only PL1 write */ #define CP15_ATS12NSOUR(rr) p15, 0, rr, c7, c8, 6 /* Stages 1 and 2 Non-secure only unprivileged read */ #define CP15_ATS12NSOUW(rr) p15, 0, rr, c7, c8, 7 /* Stages 1 and 2 Non-secure only unprivileged write */ #endif #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCCALL p15, 0, r0, c7, c10, 0 /* Data cache clean all */ #endif #define CP15_DCCMVAC(rr) p15, 0, rr, c7, c10, 1 /* Data cache clean by MVA PoC */ #define CP15_DCCSW(rr) p15, 0, rr, c7, c10, 2 /* Data cache clean by set/way */ #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_CP15DSB p15, 0, r0, c7, c10, 4 /* DSB */ #define CP15_CP15DMB p15, 0, r0, c7, c10, 5 /* DMB */ #define CP15_CP15WFI p15, 0, r0, c7, c0, 4 /* WFI */ #endif #if __ARM_ARCH >= 7 /* From ARMv7: */ #define CP15_DCCMVAU(rr) p15, 0, rr, c7, c11, 1 /* Data cache clean by MVA PoU */ #endif #if __ARM_ARCH == 6 /* Only ARMv6: */ #define CP15_DCCIALL p15, 0, r0, c7, c14, 0 /* Data cache clean and invalidate all */ #endif #define CP15_DCCIMVAC(rr) p15, 0, rr, c7, c14, 1 /* Data cache clean and invalidate by MVA PoC */ #define CP15_DCCISW(rr) p15, 0, rr, c7, c14, 2 /* Data cache clean and invalidate by set/way */ /* * CP15 C8 registers */ #if __ARM_ARCH >= 7 && defined(SMP) /* From ARMv7: */ #define CP15_TLBIALLIS p15, 0, r0, c8, c3, 0 /* Invalidate entire unified TLB IS */ #define CP15_TLBIMVAIS(rr) p15, 0, rr, c8, c3, 1 /* Invalidate unified TLB by MVA IS */ #define CP15_TLBIASIDIS(rr) p15, 0, rr, c8, c3, 2 /* Invalidate unified TLB by ASID IS */ #define CP15_TLBIMVAAIS(rr) p15, 0, rr, c8, c3, 3 /* Invalidate unified TLB by MVA, all ASID IS */ #endif #define CP15_TLBIALL p15, 0, r0, c8, c7, 0 /* Invalidate entire unified TLB */ #define CP15_TLBIMVA(rr) p15, 0, rr, c8, c7, 1 /* Invalidate unified TLB by MVA */ #define CP15_TLBIASID(rr) p15, 0, rr, c8, c7, 2 /* Invalidate unified TLB by ASID */ #if __ARM_ARCH >= 6 /* From ARMv6: */ #define CP15_TLBIMVAA(rr) p15, 0, rr, c8, c7, 3 /* Invalidate unified TLB by MVA, all ASID */ #endif /* * CP15 C9 registers */ #if __ARM_ARCH == 6 && defined(CPU_ARM1176) #define CP15_PMUSERENR(rr) p15, 0, rr, c15, c9, 0 /* Access Validation Control Register */ #define CP15_PMCR(rr) p15, 0, rr, c15, c12, 0 /* Performance Monitor Control Register */ #define CP15_PMCCNTR(rr) p15, 0, rr, c15, c12, 1 /* PM Cycle Count Register */ #elif __ARM_ARCH > 6 #define CP15_L2CTLR(rr) p15, 1, rr, c9, c0, 2 /* L2 Control Register */ #define CP15_PMCR(rr) p15, 0, rr, c9, c12, 0 /* Performance Monitor Control Register */ #define CP15_PMCNTENSET(rr) p15, 0, rr, c9, c12, 1 /* PM Count Enable Set Register */ #define CP15_PMCNTENCLR(rr) p15, 0, rr, c9, c12, 2 /* PM Count Enable Clear Register */ #define CP15_PMOVSR(rr) p15, 0, rr, c9, c12, 3 /* PM Overflow Flag Status Register */ #define CP15_PMSWINC(rr) p15, 0, rr, c9, c12, 4 /* PM Software Increment Register */ #define CP15_PMSELR(rr) p15, 0, rr, c9, c12, 5 /* PM Event Counter Selection Register */ #define CP15_PMCCNTR(rr) p15, 0, rr, c9, c13, 0 /* PM Cycle Count Register */ #define CP15_PMXEVTYPER(rr) p15, 0, rr, c9, c13, 1 /* PM Event Type Select Register */ #define CP15_PMXEVCNTRR(rr) p15, 0, rr, c9, c13, 2 /* PM Event Count Register */ #define CP15_PMUSERENR(rr) p15, 0, rr, c9, c14, 0 /* PM User Enable Register */ #define CP15_PMINTENSET(rr) p15, 0, rr, c9, c14, 1 /* PM Interrupt Enable Set Register */ #define CP15_PMINTENCLR(rr) p15, 0, rr, c9, c14, 2 /* PM Interrupt Enable Clear Register */ #endif /* * CP15 C10 registers */ /* Without LPAE this is PRRR, with LPAE it's MAIR0 */ #define CP15_PRRR(rr) p15, 0, rr, c10, c2, 0 /* Primary Region Remap Register */ #define CP15_MAIR0(rr) p15, 0, rr, c10, c2, 0 /* Memory Attribute Indirection Register 0 */ /* Without LPAE this is NMRR, with LPAE it's MAIR1 */ #define CP15_NMRR(rr) p15, 0, rr, c10, c2, 1 /* Normal Memory Remap Register */ #define CP15_MAIR1(rr) p15, 0, rr, c10, c2, 1 /* Memory Attribute Indirection Register 1 */ #define CP15_AMAIR0(rr) p15, 0, rr, c10, c3, 0 /* Auxiliary Memory Attribute Indirection Register 0 */ #define CP15_AMAIR1(rr) p15, 0, rr, c10, c3, 1 /* Auxiliary Memory Attribute Indirection Register 1 */ /* * CP15 C12 registers */ #define CP15_VBAR(rr) p15, 0, rr, c12, c0, 0 /* Vector Base Address Register */ #define CP15_MVBAR(rr) p15, 0, rr, c12, c0, 1 /* Monitor Vector Base Address Register */ #define CP15_ISR(rr) p15, 0, rr, c12, c1, 0 /* Interrupt Status Register */ /* * CP15 C13 registers */ #define CP15_FCSEIDR(rr) p15, 0, rr, c13, c0, 0 /* FCSE Process ID Register */ #define CP15_CONTEXTIDR(rr) p15, 0, rr, c13, c0, 1 /* Context ID Register */ #define CP15_TPIDRURW(rr) p15, 0, rr, c13, c0, 2 /* User Read/Write Thread ID Register */ #define CP15_TPIDRURO(rr) p15, 0, rr, c13, c0, 3 /* User Read-Only Thread ID Register */ #define CP15_TPIDRPRW(rr) p15, 0, rr, c13, c0, 4 /* PL1 only Thread ID Register */ /* * CP15 C14 registers * These are the Generic Timer registers and may be unallocated on some SoCs. * Only use these when you know the Generic Timer is available. */ #define CP15_CNTFRQ(rr) p15, 0, rr, c14, c0, 0 /* Counter Frequency Register */ #define CP15_CNTKCTL(rr) p15, 0, rr, c14, c1, 0 /* Timer PL1 Control Register */ #define CP15_CNTP_TVAL(rr) p15, 0, rr, c14, c2, 0 /* PL1 Physical Timer Value Register */ #define CP15_CNTP_CTL(rr) p15, 0, rr, c14, c2, 1 /* PL1 Physical Timer Control Register */ #define CP15_CNTV_TVAL(rr) p15, 0, rr, c14, c3, 0 /* Virtual Timer Value Register */ #define CP15_CNTV_CTL(rr) p15, 0, rr, c14, c3, 1 /* Virtual Timer Control Register */ #define CP15_CNTHCTL(rr) p15, 4, rr, c14, c1, 0 /* Timer PL2 Control Register */ #define CP15_CNTHP_TVAL(rr) p15, 4, rr, c14, c2, 0 /* PL2 Physical Timer Value Register */ #define CP15_CNTHP_CTL(rr) p15, 4, rr, c14, c2, 1 /* PL2 Physical Timer Control Register */ /* 64-bit registers for use with mcrr/mrrc */ #define CP15_CNTPCT(rq, rr) p15, 0, rq, rr, c14 /* Physical Count Register */ #define CP15_CNTVCT(rq, rr) p15, 1, rq, rr, c14 /* Virtual Count Register */ #define CP15_CNTP_CVAL(rq, rr) p15, 2, rq, rr, c14 /* PL1 Physical Timer Compare Value Register */ #define CP15_CNTV_CVAL(rq, rr) p15, 3, rq, rr, c14 /* Virtual Timer Compare Value Register */ #define CP15_CNTVOFF(rq, rr) p15, 4, rq, rr, c14 /* Virtual Offset Register */ #define CP15_CNTHP_CVAL(rq, rr) p15, 6, rq, rr, c14 /* PL2 Physical Timer Compare Value Register */ /* * CP15 C15 registers */ #define CP15_CBAR(rr) p15, 4, rr, c15, c0, 0 /* Configuration Base Address Register */ #endif /* !MACHINE_SYSREG_H */