Index: stable/10/sys/arm/arm/cpufunc.c =================================================================== --- stable/10/sys/arm/arm/cpufunc.c (revision 283334) +++ stable/10/sys/arm/arm/cpufunc.c (revision 283335) @@ -1,1784 +1,1795 @@ /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */ /*- * arm9 support code Copyright (C) 2001 ARM Ltd * Copyright (c) 1997 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpufuncs.c * * C functions for supporting CPU / MMU / TLB specific operations. * * Created : 30/01/97 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_XSCALE_80200 #include #include #endif #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219) #include #include #endif /* * Some definitions in i81342reg.h clash with i80321reg.h. * This only happens for the LINT kernel. As it happens, * we don't need anything from i81342reg.h that we already * got from somewhere else during a LINT compile. */ #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT) #include #endif #ifdef CPU_XSCALE_IXP425 #include #include #endif /* PRIMARY CACHE VARIABLES */ int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; /* 1 == use cpu_sleep(), 0 == don't */ int cpu_do_powersave; int ctrl; #ifdef CPU_ARM9 struct cpu_functions arm9_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ arm9_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ arm9_icache_sync_all, /* icache_sync_all */ arm9_icache_sync_range, /* icache_sync_range */ arm9_dcache_wbinv_all, /* dcache_wbinv_all */ arm9_dcache_wbinv_range, /* dcache_wbinv_range */ arm9_dcache_inv_range, /* dcache_inv_range */ arm9_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ arm9_idcache_wbinv_all, /* idcache_wbinv_all */ arm9_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm9_context_switch, /* context_switch */ arm9_setup /* cpu setup */ }; #endif /* CPU_ARM9 */ #if defined(CPU_ARM9E) || defined(CPU_ARM10) struct cpu_functions armv5_ec_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ armv5_ec_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm10_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ arm10_tlb_flushI_SE, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ armv5_ec_icache_sync_all, /* icache_sync_all */ armv5_ec_icache_sync_range, /* icache_sync_range */ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */ armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */ armv5_ec_dcache_inv_range, /* dcache_inv_range */ armv5_ec_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */ armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm10_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; struct cpu_functions sheeva_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ sheeva_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm10_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ arm10_tlb_flushI_SE, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ armv5_ec_icache_sync_all, /* icache_sync_all */ armv5_ec_icache_sync_range, /* icache_sync_range */ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */ sheeva_dcache_wbinv_range, /* dcache_wbinv_range */ sheeva_dcache_inv_range, /* dcache_inv_range */ sheeva_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */ sheeva_idcache_wbinv_range, /* idcache_wbinv_all */ sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */ sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */ sheeva_l2cache_inv_range, /* l2cache_inv_range */ sheeva_l2cache_wb_range, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ sheeva_cpu_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm10_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; #endif /* CPU_ARM9E || CPU_ARM10 */ #ifdef CPU_ARM10 struct cpu_functions arm10_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ arm10_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm10_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ arm10_tlb_flushI_SE, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ arm10_icache_sync_all, /* icache_sync_all */ arm10_icache_sync_range, /* icache_sync_range */ arm10_dcache_wbinv_all, /* dcache_wbinv_all */ arm10_dcache_wbinv_range, /* dcache_wbinv_range */ arm10_dcache_inv_range, /* dcache_inv_range */ arm10_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ arm10_idcache_wbinv_all, /* idcache_wbinv_all */ arm10_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm10_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; #endif /* CPU_ARM10 */ #ifdef CPU_MV_PJ4B struct cpu_functions pj4bv7_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ arm11_drain_writebuf, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ pj4b_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv7_tlb_flushID, /* tlb_flushID */ armv7_tlb_flushID_SE, /* tlb_flushID_SE */ armv7_tlb_flushID, /* tlb_flushI */ armv7_tlb_flushID_SE, /* tlb_flushI_SE */ armv7_tlb_flushID, /* tlb_flushD */ armv7_tlb_flushID_SE, /* tlb_flushD_SE */ /* Cache operations */ armv7_idcache_wbinv_all, /* icache_sync_all */ armv7_icache_sync_range, /* icache_sync_range */ armv7_dcache_wbinv_all, /* dcache_wbinv_all */ armv7_dcache_wbinv_range, /* dcache_wbinv_range */ armv7_dcache_inv_range, /* dcache_inv_range */ armv7_dcache_wb_range, /* dcache_wb_range */ armv7_idcache_inv_all, /* idcache_inv_all */ armv7_idcache_wbinv_all, /* idcache_wbinv_all */ armv7_idcache_wbinv_range, /* idcache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ pj4b_drain_readbuf, /* flush_prefetchbuf */ arm11_drain_writebuf, /* drain_writebuf */ pj4b_flush_brnchtgt_all, /* flush_brnchtgt_C */ pj4b_flush_brnchtgt_va, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm11_context_switch, /* context_switch */ pj4bv7_setup /* cpu setup */ }; #endif /* CPU_MV_PJ4B */ #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ defined(CPU_XSCALE_80219) struct cpu_functions xscale_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ xscale_cpwait, /* cpwait */ /* MMU functions */ xscale_control, /* control */ cpufunc_domains, /* domain */ xscale_setttb, /* setttb */ cpufunc_faultstatus, /* faultstatus */ cpufunc_faultaddress, /* faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ xscale_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ xscale_cache_syncI, /* icache_sync_all */ xscale_cache_syncI_rng, /* icache_sync_range */ xscale_cache_purgeD, /* dcache_wbinv_all */ xscale_cache_purgeD_rng, /* dcache_wbinv_range */ xscale_cache_flushD_rng, /* dcache_inv_range */ xscale_cache_cleanD_rng, /* dcache_wb_range */ xscale_cache_flushID, /* idcache_inv_all */ xscale_cache_purgeID, /* idcache_wbinv_all */ xscale_cache_purgeID_rng, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ xscale_cpu_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ xscale_context_switch, /* context_switch */ xscale_setup /* cpu setup */ }; #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 CPU_XSCALE_80219 */ #ifdef CPU_XSCALE_81342 struct cpu_functions xscalec3_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ xscale_cpwait, /* cpwait */ /* MMU functions */ xscale_control, /* control */ cpufunc_domains, /* domain */ xscalec3_setttb, /* setttb */ cpufunc_faultstatus, /* faultstatus */ cpufunc_faultaddress, /* faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ xscale_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ xscalec3_cache_syncI, /* icache_sync_all */ xscalec3_cache_syncI_rng, /* icache_sync_range */ xscalec3_cache_purgeD, /* dcache_wbinv_all */ xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */ xscale_cache_flushD_rng, /* dcache_inv_range */ xscalec3_cache_cleanD_rng, /* dcache_wb_range */ xscale_cache_flushID, /* idcache_inv_all */ xscalec3_cache_purgeID, /* idcache_wbinv_all */ xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */ xscalec3_l2cache_purge, /* l2cache_wbinv_all */ xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */ xscalec3_l2cache_flush_rng, /* l2cache_inv_range */ xscalec3_l2cache_clean_rng, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ xscale_cpu_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ xscalec3_context_switch, /* context_switch */ xscale_setup /* cpu setup */ }; #endif /* CPU_XSCALE_81342 */ #if defined(CPU_FA526) || defined(CPU_FA626TE) struct cpu_functions fa526_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* domain */ fa526_setttb, /* setttb */ cpufunc_faultstatus, /* faultstatus */ cpufunc_faultaddress, /* faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ fa526_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushI, /* tlb_flushI */ fa526_tlb_flushI_SE, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ fa526_icache_sync_all, /* icache_sync_all */ fa526_icache_sync_range, /* icache_sync_range */ fa526_dcache_wbinv_all, /* dcache_wbinv_all */ fa526_dcache_wbinv_range, /* dcache_wbinv_range */ fa526_dcache_inv_range, /* dcache_inv_range */ fa526_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ fa526_idcache_wbinv_all, /* idcache_wbinv_all */ fa526_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ fa526_flush_prefetchbuf, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */ fa526_cpu_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ fa526_context_switch, /* context_switch */ fa526_setup /* cpu setup */ }; #endif /* CPU_FA526 || CPU_FA626TE */ #if defined(CPU_ARM1136) struct cpu_functions arm1136_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ arm11x6_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ arm11_tlb_flushID, /* tlb_flushID */ arm11_tlb_flushID_SE, /* tlb_flushID_SE */ arm11_tlb_flushI, /* tlb_flushI */ arm11_tlb_flushI_SE, /* tlb_flushI_SE */ arm11_tlb_flushD, /* tlb_flushD */ arm11_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ arm11x6_icache_sync_all, /* icache_sync_all */ arm11x6_icache_sync_range, /* icache_sync_range */ arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */ armv6_dcache_wbinv_range, /* dcache_wbinv_range */ armv6_dcache_inv_range, /* dcache_inv_range */ armv6_dcache_wb_range, /* dcache_wb_range */ armv6_idcache_inv_all, /* idcache_inv_all */ arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */ arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */ arm11_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ arm11_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm11_context_switch, /* context_switch */ arm11x6_setup /* cpu setup */ }; #endif /* CPU_ARM1136 */ #if defined(CPU_ARM1176) struct cpu_functions arm1176_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ arm11x6_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ arm11_tlb_flushID, /* tlb_flushID */ arm11_tlb_flushID_SE, /* tlb_flushID_SE */ arm11_tlb_flushI, /* tlb_flushI */ arm11_tlb_flushI_SE, /* tlb_flushI_SE */ arm11_tlb_flushD, /* tlb_flushD */ arm11_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ arm11x6_icache_sync_all, /* icache_sync_all */ arm11x6_icache_sync_range, /* icache_sync_range */ arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */ armv6_dcache_wbinv_range, /* dcache_wbinv_range */ armv6_dcache_inv_range, /* dcache_inv_range */ armv6_dcache_wb_range, /* dcache_wb_range */ armv6_idcache_inv_all, /* idcache_inv_all */ arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */ arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */ arm11_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ arm11x6_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ arm11_context_switch, /* context_switch */ arm11x6_setup /* cpu setup */ }; #endif /*CPU_ARM1176 */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) struct cpu_functions cortexa_cpufuncs = { /* CPU functions */ cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ cpufunc_domains, /* Domain */ armv7_setttb, /* Setttb */ cpufunc_faultstatus, /* Faultstatus */ cpufunc_faultaddress, /* Faultaddress */ /* * TLB functions. ARMv7 does all TLB ops based on a unified TLB model * whether the hardware implements separate I+D or not, so we use the * same 'ID' functions for all 3 variations. */ armv7_tlb_flushID, /* tlb_flushID */ armv7_tlb_flushID_SE, /* tlb_flushID_SE */ armv7_tlb_flushID, /* tlb_flushI */ armv7_tlb_flushID_SE, /* tlb_flushI_SE */ armv7_tlb_flushID, /* tlb_flushD */ armv7_tlb_flushID_SE, /* tlb_flushD_SE */ /* Cache operations */ armv7_icache_sync_all, /* icache_sync_all */ armv7_icache_sync_range, /* icache_sync_range */ armv7_dcache_wbinv_all, /* dcache_wbinv_all */ armv7_dcache_wbinv_range, /* dcache_wbinv_range */ armv7_dcache_inv_range, /* dcache_inv_range */ armv7_dcache_wb_range, /* dcache_wb_range */ armv7_idcache_inv_all, /* idcache_inv_all */ armv7_idcache_wbinv_all, /* idcache_wbinv_all */ armv7_idcache_wbinv_range, /* idcache_wbinv_range */ /* * Note: For CPUs using the PL310 the L2 ops are filled in when the * L2 cache controller is actually enabled. */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ armv7_drain_writebuf, /* drain_writebuf */ cpufunc_nullop, /* flush_brnchtgt_C */ (void *)cpufunc_nullop, /* flush_brnchtgt_E */ armv7_sleep, /* sleep */ /* Soft functions */ cpufunc_null_fixup, /* dataabt_fixup */ cpufunc_null_fixup, /* prefetchabt_fixup */ armv7_context_switch, /* context_switch */ cortexa_setup /* cpu setup */ }; #endif /* CPU_CORTEXA */ /* * Global constants also used by locore.s */ struct cpu_functions cpufuncs; u_int cputype; u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */ #if defined(CPU_ARM9) || \ defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) || \ defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) || \ defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \ defined(CPU_CORTEXA) || defined(CPU_KRAIT) +/* Global cache line sizes, use 32 as default */ +int arm_dcache_min_line_size = 32; +int arm_icache_min_line_size = 32; +int arm_idcache_min_line_size = 32; + static void get_cachetype_cp15(void); /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; static void get_cachetype_cp15() { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); cpuid = cpufunc_id(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { + /* Resolve minimal cache line sizes */ + arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2); + arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2); + arm_idcache_min_line_size = + min(arm_icache_min_line_size, arm_dcache_min_line_size); + __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level); i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; arm_dcache_align = 1 << (CPUV7_CT_xSIZE_LEN(csize) + 4); arm_dcache_align_mask = arm_dcache_align - 1; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } #endif /* ARM9 || XSCALE */ /* * Cannot panic here as we may not have a console yet ... */ int set_cpufuncs() { cputype = cpufunc_id(); cputype &= CPU_ID_CPU_MASK; /* * NOTE: cpu_do_powersave defaults to off. If we encounter a * CPU type where we want to use it by default, then we set it. */ #ifdef CPU_ARM9 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && (cputype & 0x0000f000) == 0x00009000) { cpufuncs = arm9_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm9_dcache_sets_inc; arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm9_dcache_index_max = 0U - arm9_dcache_index_inc; #ifdef ARM9_CACHE_WRITE_THROUGH pmap_pte_init_arm9(); #else pmap_pte_init_generic(); #endif goto out; } #endif /* CPU_ARM9 */ #if defined(CPU_ARM9E) || defined(CPU_ARM10) if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD || cputype == CPU_ID_MV88FR571_41) { uint32_t sheeva_ctrl; sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE | MV_L2_ENABLE); /* * Workaround for Marvell MV78100 CPU: Cache prefetch * mechanism may affect the cache coherency validity, * so it needs to be disabled. * * Refer to errata document MV-S501058-00C.pdf (p. 3.1 * L2 Prefetching Mechanism) for details. */ if (cputype == CPU_ID_MV88FR571_VD || cputype == CPU_ID_MV88FR571_41) sheeva_ctrl |= MV_L2_PREFETCH_DISABLE; sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl); cpufuncs = sheeva_cpufuncs; get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) { cpufuncs = armv5_ec_cpufuncs; get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } #endif /* CPU_ARM9E || CPU_ARM10 */ #ifdef CPU_ARM10 if (/* cputype == CPU_ID_ARM1020T || */ cputype == CPU_ID_ARM1020E) { /* * Select write-through cacheing (this isn't really an * option on ARM1020T). */ cpufuncs = arm10_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm10_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm10_dcache_sets_inc; arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm10_dcache_index_max = 0U - arm10_dcache_index_inc; pmap_pte_init_generic(); goto out; } #endif /* CPU_ARM10 */ #if defined(CPU_ARM1136) || defined(CPU_ARM1176) if (cputype == CPU_ID_ARM1136JS || cputype == CPU_ID_ARM1136JSR1 || cputype == CPU_ID_ARM1176JZS) { #ifdef CPU_ARM1136 if (cputype == CPU_ID_ARM1136JS || cputype == CPU_ID_ARM1136JSR1) cpufuncs = arm1136_cpufuncs; #endif #ifdef CPU_ARM1176 if (cputype == CPU_ID_ARM1176JZS) cpufuncs = arm1176_cpufuncs; #endif cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); pmap_pte_init_mmu_v6(); goto out; } #endif /* CPU_ARM1136 || CPU_ARM1176 */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) if (cputype == CPU_ID_CORTEXA7 || cputype == CPU_ID_CORTEXA8R1 || cputype == CPU_ID_CORTEXA8R2 || cputype == CPU_ID_CORTEXA8R3 || cputype == CPU_ID_CORTEXA9R1 || cputype == CPU_ID_CORTEXA9R2 || cputype == CPU_ID_CORTEXA9R3 || cputype == CPU_ID_CORTEXA15R0 || cputype == CPU_ID_CORTEXA15R1 || cputype == CPU_ID_CORTEXA15R2 || cputype == CPU_ID_CORTEXA15R3 || cputype == CPU_ID_KRAIT ) { cpufuncs = cortexa_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); pmap_pte_init_mmu_v6(); /* Use powersave on this CPU. */ cpu_do_powersave = 1; goto out; } #endif /* CPU_CORTEXA */ #if defined(CPU_MV_PJ4B) if (cputype == CPU_ID_MV88SV581X_V7 || cputype == CPU_ID_MV88SV584X_V7 || cputype == CPU_ID_ARM_88SV581X_V7) { cpufuncs = pj4bv7_cpufuncs; get_cachetype_cp15(); pmap_pte_init_mmu_v6(); goto out; } #endif /* CPU_MV_PJ4B */ #if defined(CPU_FA526) || defined(CPU_FA626TE) if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) { cpufuncs = fa526_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ get_cachetype_cp15(); pmap_pte_init_generic(); /* Use powersave on this CPU. */ cpu_do_powersave = 1; goto out; } #endif /* CPU_FA526 || CPU_FA626TE */ #ifdef CPU_XSCALE_80200 if (cputype == CPU_ID_80200) { int rev = cpufunc_id() & CPU_ID_REVISION_MASK; i80200_icu_init(); #if defined(XSCALE_CCLKCFG) /* * Crank CCLKCFG to maximum legal value. */ __asm __volatile ("mcr p14, 0, %0, c6, c0, 0" : : "r" (XSCALE_CCLKCFG)); #endif /* * XXX Disable ECC in the Bus Controller Unit; we * don't really support it, yet. Clear any pending * error indications. */ __asm __volatile("mcr p13, 0, %0, c0, c1, 0" : : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); cpufuncs = xscale_cpufuncs; /* * i80200 errata: Step-A0 and A1 have a bug where * D$ dirty bits are not cleared on "invalidate by * address". * * Workaround: Clean cache line before invalidating. */ if (rev == 0 || rev == 1) cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ get_cachetype_cp15(); pmap_pte_init_xscale(); goto out; } #endif /* CPU_XSCALE_80200 */ #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219) if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 || cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) { cpufuncs = xscale_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ get_cachetype_cp15(); pmap_pte_init_xscale(); goto out; } #endif /* CPU_XSCALE_80321 */ #if defined(CPU_XSCALE_81342) if (cputype == CPU_ID_81342) { cpufuncs = xscalec3_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ get_cachetype_cp15(); pmap_pte_init_xscale(); goto out; } #endif /* CPU_XSCALE_81342 */ #ifdef CPU_XSCALE_PXA2X0 /* ignore core revision to test PXA2xx CPUs */ if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { cpufuncs = xscale_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ get_cachetype_cp15(); pmap_pte_init_xscale(); /* Use powersave on this CPU. */ cpu_do_powersave = 1; goto out; } #endif /* CPU_XSCALE_PXA2X0 */ #ifdef CPU_XSCALE_IXP425 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) { cpufuncs = xscale_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ get_cachetype_cp15(); pmap_pte_init_xscale(); goto out; } #endif /* CPU_XSCALE_IXP425 */ /* * Bzzzz. And the answer was ... */ panic("No support for this CPU type (%08x) in kernel", cputype); return(ARCHITECTURE_NOT_PRESENT); out: uma_set_align(arm_dcache_align_mask); return (0); } /* * Fixup routines for data and prefetch aborts. * * Several compile time symbols are used * * DEBUG_FAULT_CORRECTION - Print debugging information during the * correction of registers after a fault. */ /* * Null abort fixup routine. * For use when no fixup is required. */ int cpufunc_null_fixup(arg) void *arg; { return(ABORT_FIXUP_OK); } /* * CPU Setup code */ #if defined (CPU_ARM9) || \ defined(CPU_ARM9E) || \ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \ defined(CPU_ARM10) || defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\ defined(CPU_FA526) || defined(CPU_FA626TE) #define IGN 0 #define OR 1 #define BIC 2 struct cpu_option { char *co_name; int co_falseop; int co_trueop; int co_value; }; static u_int parse_cpu_options(char *, struct cpu_option *, u_int); static u_int parse_cpu_options(args, optlist, cpuctrl) char *args; struct cpu_option *optlist; u_int cpuctrl; { int integer; if (args == NULL) return(cpuctrl); while (optlist->co_name) { if (get_bootconf_option(args, optlist->co_name, BOOTOPT_TYPE_BOOLEAN, &integer)) { if (integer) { if (optlist->co_trueop == OR) cpuctrl |= optlist->co_value; else if (optlist->co_trueop == BIC) cpuctrl &= ~optlist->co_value; } else { if (optlist->co_falseop == OR) cpuctrl |= optlist->co_value; else if (optlist->co_falseop == BIC) cpuctrl &= ~optlist->co_value; } } ++optlist; } return(cpuctrl); } #endif /* CPU_ARM9 || XSCALE*/ #ifdef CPU_ARM9 struct cpu_option arm9_options[] = { { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, { NULL, IGN, IGN, 0 } }; void arm9_setup(args) char *args; { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_ROUNDROBIN; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC | CPU_CONTROL_ROUNDROBIN; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Set the control register */ cpu_control(cpuctrlmask, cpuctrl); ctrl = cpuctrl; } #endif /* CPU_ARM9 */ #if defined(CPU_ARM9E) || defined(CPU_ARM10) struct cpu_option arm10_options[] = { { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, { NULL, IGN, IGN, 0 } }; void arm10_setup(args) char *args; { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Now really make sure they are clean. */ __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Set the control register */ ctrl = cpuctrl; cpu_control(0xffffffff, cpuctrl); /* And again. */ cpu_idcache_wbinv_all(); } #endif /* CPU_ARM9E || CPU_ARM10 */ #if defined(CPU_ARM1136) || defined(CPU_ARM1176) \ || defined(CPU_MV_PJ4B) \ || defined(CPU_CORTEXA) || defined(CPU_KRAIT) static __inline void cpu_scc_setup_ccnt(void) { /* This is how you give userland access to the CCNT and PMCn * registers. * BEWARE! This gives write access also, which may not be what * you want! */ #ifdef _PMC_USER_READ_WRITE_ #if defined(CPU_ARM1136) || defined(CPU_ARM1176) /* Use the Secure User and Non-secure Access Validation Control Register * to allow userland access */ __asm volatile ("mcr p15, 0, %0, c15, c9, 0\n\t" : : "r"(0x00000001)); #else /* Set PMUSERENR[0] to allow userland access */ __asm volatile ("mcr p15, 0, %0, c9, c14, 0\n\t" : : "r"(0x00000001)); #endif #endif #if defined(CPU_ARM1136) || defined(CPU_ARM1176) /* Set PMCR[2,0] to enable counters and reset CCNT */ __asm volatile ("mcr p15, 0, %0, c15, c12, 0\n\t" : : "r"(0x00000005)); #else /* Set up the PMCCNTR register as a cyclecounter: * Set PMINTENCLR to 0xFFFFFFFF to block interrupts * Set PMCR[2,0] to enable counters and reset CCNT * Set PMCNTENSET to 0x80000000 to enable CCNT */ __asm volatile ("mcr p15, 0, %0, c9, c14, 2\n\t" "mcr p15, 0, %1, c9, c12, 0\n\t" "mcr p15, 0, %2, c9, c12, 1\n\t" : : "r"(0xFFFFFFFF), "r"(0x00000005), "r"(0x80000000)); #endif } #endif #if defined(CPU_ARM1136) || defined(CPU_ARM1176) struct cpu_option arm11_options[] = { { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, { NULL, IGN, IGN, 0 } }; void arm11x6_setup(char *args) { int cpuctrl, cpuctrl_wax; uint32_t auxctrl, auxctrl_wax; uint32_t tmp, tmp2; uint32_t sbz=0; uint32_t cpuid; cpuid = cpufunc_id(); cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE; /* * "write as existing" bits * inverse of this is mask */ cpuctrl_wax = (3 << 30) | /* SBZ */ (1 << 29) | /* FA */ (1 << 28) | /* TR */ (3 << 26) | /* SBZ */ (3 << 19) | /* SBZ */ (1 << 17); /* SBZ */ cpuctrl |= CPU_CONTROL_BPRD_ENABLE; cpuctrl |= CPU_CONTROL_V6_EXTPAGE; cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; auxctrl = 0; auxctrl_wax = ~0; /* * This options enables the workaround for the 364296 ARM1136 * r0pX errata (possible cache data corruption with * hit-under-miss enabled). It sets the undocumented bit 31 in * the auxiliary control register and the FI bit in the control * register, thus disabling hit-under-miss without putting the * processor into full low interrupt latency mode. ARM11MPCore * is not affected. */ if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ cpuctrl |= CPU_CONTROL_FI_ENABLE; auxctrl = ARM1136_AUXCTL_PFI; auxctrl_wax = ~ARM1136_AUXCTL_PFI; } /* * Enable an errata workaround */ if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ auxctrl = ARM1176_AUXCTL_PHD; auxctrl_wax = ~ARM1176_AUXCTL_PHD; } /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Now really make sure they are clean. */ __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); /* Allow detection code to find the VFP if it's fitted. */ __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); /* Set the control register */ ctrl = cpuctrl; cpu_control(~cpuctrl_wax, cpuctrl); __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" "and %1, %0, %2\n\t" "orr %1, %1, %3\n\t" "teq %0, %1\n\t" "mcrne p15, 0, %1, c1, c0, 1\n\t" : "=r"(tmp), "=r"(tmp2) : "r"(auxctrl_wax), "r"(auxctrl)); /* And again. */ cpu_idcache_wbinv_all(); cpu_scc_setup_ccnt(); } #endif /* CPU_ARM1136 || CPU_ARM1176 */ #ifdef CPU_MV_PJ4B void pj4bv7_setup(args) char *args; { int cpuctrl; pj4b_config(); cpuctrl = CPU_CONTROL_MMU_ENABLE; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif cpuctrl |= CPU_CONTROL_DC_ENABLE; cpuctrl |= (0xf << 3); cpuctrl |= CPU_CONTROL_BPRD_ENABLE; cpuctrl |= CPU_CONTROL_IC_ENABLE; if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; cpuctrl |= (0x5 << 16) | (1 < 22); cpuctrl |= CPU_CONTROL_V6_EXTPAGE; /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Set the control register */ ctrl = cpuctrl; cpu_control(0xFFFFFFFF, cpuctrl); /* And again. */ cpu_idcache_wbinv_all(); cpu_scc_setup_ccnt(); } #endif /* CPU_MV_PJ4B */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) void cortexa_setup(char *args) { int cpuctrl, cpuctrlmask; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */ CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */ CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */ CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */ CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */ CPU_CONTROL_VECRELOC; /* Vector relocation [13] */ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif /* Switch to big endian */ #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif /* Check if the vector page is at the high address (0xffff0000) */ if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Set the control register */ ctrl = cpuctrl; cpu_control(cpuctrlmask, cpuctrl); /* And again. */ cpu_idcache_wbinv_all(); #ifdef SMP armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */ #endif cpu_scc_setup_ccnt(); } #endif /* CPU_CORTEXA */ #if defined(CPU_FA526) || defined(CPU_FA626TE) struct cpu_option fa526_options[] = { #ifdef COMPAT_12 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, #endif /* COMPAT_12 */ { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, { NULL, IGN, IGN, 0 } }; void fa526_setup(char *args) { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl); #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Set the control register */ ctrl = cpuctrl; cpu_control(0xffffffff, cpuctrl); } #endif /* CPU_FA526 || CPU_FA626TE */ #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) struct cpu_option xscale_options[] = { #ifdef COMPAT_12 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, #endif /* COMPAT_12 */ { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, { NULL, IGN, IGN, 0 } }; void xscale_setup(args) char *args; { uint32_t auxctl; int cpuctrl, cpuctrlmask; /* * The XScale Write Buffer is always enabled. Our option * is to enable/disable coalescing. Note that bits 6:3 * must always be enabled. */ cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \ CPU_CONTROL_L2_ENABLE; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; #ifdef CPU_XSCALE_CORE3 cpuctrl |= CPU_CONTROL_L2_ENABLE; #endif /* Clear out the cache */ cpu_idcache_wbinv_all(); /* * Set the control register. Note that bits 6:3 must always * be set to 1. */ ctrl = cpuctrl; /* cpu_control(cpuctrlmask, cpuctrl);*/ cpu_control(0xffffffff, cpuctrl); /* Make sure write coalescing is turned on */ __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); #ifdef XSCALE_NO_COALESCE_WRITES auxctl |= XSCALE_AUXCTL_K; #else auxctl &= ~XSCALE_AUXCTL_K; #endif #ifdef CPU_XSCALE_CORE3 auxctl |= XSCALE_AUXCTL_LLR; auxctl |= XSCALE_AUXCTL_MD_MASK; #endif __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); } #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 CPU_XSCALE_80219 */ Index: stable/10/sys/arm/arm/cpufunc_asm_armv7.S =================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_armv7.S (revision 283334) +++ stable/10/sys/arm/arm/cpufunc_asm_armv7.S (revision 283335) @@ -1,366 +1,371 @@ /*- * Copyright (c) 2010 Per Odlund * Copyright (C) 2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include .cpu cortex-a8 .Lcoherency_level: .word _C_LABEL(arm_cache_loc) .Lcache_type: .word _C_LABEL(arm_cache_type) +.Larmv7_dcache_line_size: + .word _C_LABEL(arm_dcache_min_line_size) +.Larmv7_icache_line_size: + .word _C_LABEL(arm_icache_min_line_size) +.Larmv7_idcache_line_size: + .word _C_LABEL(arm_idcache_min_line_size) .Lway_mask: .word 0x3ff .Lmax_index: .word 0x7fff .Lpage_mask: .word 0xfff #define PT_NOS (1 << 5) #define PT_S (1 << 1) #define PT_INNER_NC 0 #define PT_INNER_WT (1 << 0) #define PT_INNER_WB ((1 << 0) | (1 << 6)) #define PT_INNER_WBWA (1 << 6) #define PT_OUTER_NC 0 #define PT_OUTER_WT (2 << 3) #define PT_OUTER_WB (3 << 3) #define PT_OUTER_WBWA (1 << 3) #ifdef SMP #define PT_ATTR (PT_S|PT_INNER_WBWA|PT_OUTER_WBWA|PT_NOS) #else #define PT_ATTR (PT_INNER_WBWA|PT_OUTER_WBWA) #endif ENTRY(armv7_setttb) dsb orr r0, r0, #PT_ATTR mcr CP15_TTBR0(r0) isb #ifdef SMP mcr CP15_TLBIALLIS #else mcr CP15_TLBIALL #endif dsb isb RET END(armv7_setttb) ENTRY(armv7_tlb_flushID) dsb #ifdef SMP mcr CP15_TLBIALLIS mcr CP15_BPIALLIS #else mcr CP15_TLBIALL mcr CP15_BPIALL #endif dsb isb mov pc, lr END(armv7_tlb_flushID) ENTRY(armv7_tlb_flushID_SE) ldr r1, .Lpage_mask bic r0, r0, r1 #ifdef SMP mcr CP15_TLBIMVAAIS(r0) mcr CP15_BPIALLIS #else mcr CP15_TLBIMVA(r0) mcr CP15_BPIALL #endif dsb isb mov pc, lr END(armv7_tlb_flushID_SE) /* Based on algorithm from ARM Architecture Reference Manual */ ENTRY(armv7_dcache_wbinv_all) stmdb sp!, {r4, r5, r6, r7, r8, r9} /* Get cache level */ ldr r0, .Lcoherency_level ldr r3, [r0] cmp r3, #0 beq Finished /* For each cache level */ mov r8, #0 Loop1: /* Get cache type for given level */ mov r2, r8, lsl #2 add r2, r2, r2 ldr r0, .Lcache_type ldr r1, [r0, r2] /* Get line size */ and r2, r1, #7 add r2, r2, #4 /* Get number of ways */ ldr r4, .Lway_mask ands r4, r4, r1, lsr #3 clz r5, r4 /* Get max index */ ldr r7, .Lmax_index ands r7, r7, r1, lsr #13 Loop2: mov r9, r4 Loop3: mov r6, r8, lsl #1 orr r6, r6, r9, lsl r5 orr r6, r6, r7, lsl r2 /* Clean and invalidate data cache by way/index */ mcr CP15_DCCISW(r6) subs r9, r9, #1 bge Loop3 subs r7, r7, #1 bge Loop2 Skip: add r8, r8, #1 cmp r3, r8 bne Loop1 Finished: dsb ldmia sp!, {r4, r5, r6, r7, r8, r9} RET END(armv7_dcache_wbinv_all) ENTRY(armv7_idcache_wbinv_all) stmdb sp!, {lr} bl armv7_dcache_wbinv_all #ifdef SMP mcr CP15_ICIALLUIS #else mcr CP15_ICIALLU #endif dsb isb ldmia sp!, {lr} RET END(armv7_idcache_wbinv_all) -/* XXX Temporary set it to 32 for MV cores, however this value should be - * get from Cache Type register - */ -.Larmv7_line_size: - .word 32 - ENTRY(armv7_dcache_wb_range) - ldr ip, .Larmv7_line_size + ldr ip, .Larmv7_dcache_line_size + ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_wb_next: mcr CP15_DCCMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_wb_next dsb /* data synchronization barrier */ RET END(armv7_dcache_wb_range) ENTRY(armv7_dcache_wbinv_range) - ldr ip, .Larmv7_line_size + ldr ip, .Larmv7_dcache_line_size + ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_wbinv_next: mcr CP15_DCCIMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_wbinv_next dsb /* data synchronization barrier */ RET END(armv7_dcache_wbinv_range) /* * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(armv7_dcache_inv_range) - ldr ip, .Larmv7_line_size + ldr ip, .Larmv7_dcache_line_size + ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_inv_next: mcr CP15_DCIMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_inv_next dsb /* data synchronization barrier */ RET END(armv7_dcache_inv_range) ENTRY(armv7_idcache_wbinv_range) - ldr ip, .Larmv7_line_size + ldr ip, .Larmv7_idcache_line_size + ldr ip, [ip] sub r3, ip, #1 and r2, r0, r3 add r1, r1, r2 bic r0, r0, r3 .Larmv7_id_wbinv_next: mcr CP15_ICIMVAU(r0) mcr CP15_DCCIMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_id_wbinv_next isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ RET END(armv7_idcache_wbinv_range) ENTRY_NP(armv7_icache_sync_all) #ifdef SMP mcr CP15_ICIALLUIS #else mcr CP15_ICIALLU #endif isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ RET END(armv7_icache_sync_all) ENTRY_NP(armv7_icache_sync_range) - ldr ip, .Larmv7_line_size + ldr ip, .Larmv7_icache_line_size + ldr ip, [ip] .Larmv7_sync_next: mcr CP15_ICIMVAU(r0) mcr CP15_DCCMVAC(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_sync_next isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ RET END(armv7_icache_sync_range) ENTRY(armv7_cpu_sleep) dsb /* data synchronization barrier */ wfi /* wait for interrupt */ RET END(armv7_cpu_sleep) ENTRY(armv7_context_switch) dsb orr r0, r0, #PT_ATTR mcr CP15_TTBR0(r0) isb #ifdef SMP mcr CP15_TLBIALLIS #else mcr CP15_TLBIALL #endif dsb isb RET END(armv7_context_switch) ENTRY(armv7_drain_writebuf) dsb RET END(armv7_drain_writebuf) ENTRY(armv7_sev) dsb sev nop RET END(armv7_sev) ENTRY(armv7_auxctrl) mrc CP15_ACTLR(r2) bic r3, r2, r0 /* Clear bits */ eor r3, r3, r1 /* XOR bits */ teq r2, r3 mcrne CP15_ACTLR(r3) mov r0, r2 RET END(armv7_auxctrl) /* * Invalidate all I+D+branch cache. Used by startup code, which counts * on the fact that only r0-r3,ip are modified and no stack space is used. */ ENTRY(armv7_idcache_inv_all) mov r0, #0 mcr CP15_CSSELR(r0) @ set cache level to L1 mrc CP15_CCSIDR(r0) ubfx r2, r0, #13, #15 @ get num sets - 1 from CCSIDR ubfx r3, r0, #3, #10 @ get numways - 1 from CCSIDR clz r1, r3 @ number of bits to MSB of way lsl r3, r3, r1 @ shift into position mov ip, #1 @ lsl ip, ip, r1 @ ip now contains the way decr ubfx r0, r0, #0, #3 @ get linesize from CCSIDR add r0, r0, #4 @ apply bias lsl r2, r2, r0 @ shift sets by log2(linesize) add r3, r3, r2 @ merge numsets - 1 with numways - 1 sub ip, ip, r2 @ subtract numsets - 1 from way decr mov r1, #1 lsl r1, r1, r0 @ r1 now contains the set decr mov r2, ip @ r2 now contains set way decr /* r3 = ways/sets, r2 = way decr, r1 = set decr, r0 and ip are free */ 1: mcr CP15_DCISW(r3) @ invalidate line movs r0, r3 @ get current way/set beq 2f @ at 0 means we are done. movs r0, r0, lsl #10 @ clear way bits leaving only set bits subne r3, r3, r1 @ non-zero?, decrement set # subeq r3, r3, r2 @ zero?, decrement way # and restore set count b 1b 2: dsb @ wait for stores to finish mov r0, #0 @ and ... mcr CP15_ICIALLU @ invalidate instruction+branch cache isb @ instruction sync barrier bx lr @ return END(armv7_idcache_inv_all) ENTRY_NP(armv7_sleep) dsb wfi bx lr END(armv7_sleep) Index: stable/10/sys/arm/arm/elf_trampoline.c =================================================================== --- stable/10/sys/arm/arm/elf_trampoline.c (revision 283334) +++ stable/10/sys/arm/arm/elf_trampoline.c (revision 283335) @@ -1,733 +1,744 @@ /*- * Copyright (c) 2005 Olivier Houchard. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Since we are compiled outside of the normal kernel build process, we * need to include opt_global.h manually. */ #include "opt_global.h" #include "opt_kernname.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include extern char kernel_start[]; extern char kernel_end[]; extern void *_end; void _start(void); void __start(void); void __startC(void); extern unsigned int cpufunc_id(void); extern void armv6_idcache_wbinv_all(void); extern void armv7_idcache_wbinv_all(void); extern void do_call(void *, void *, void *, int); #define GZ_HEAD 0xa #if defined(CPU_ARM9) #define cpu_idcache_wbinv_all arm9_idcache_wbinv_all extern void arm9_idcache_wbinv_all(void); #elif defined(CPU_FA526) || defined(CPU_FA626TE) #define cpu_idcache_wbinv_all fa526_idcache_wbinv_all extern void fa526_idcache_wbinv_all(void); #elif defined(CPU_ARM9E) #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all extern void armv5_ec_idcache_wbinv_all(void); #elif defined(CPU_ARM10) #define cpu_idcache_wbinv_all arm10_idcache_wbinv_all extern void arm10_idcache_wbinv_all(void); #elif defined(CPU_ARM1136) || defined(CPU_ARM1176) #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all #elif defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ defined(CPU_XSCALE_80219) #define cpu_idcache_wbinv_all xscale_cache_purgeID extern void xscale_cache_purgeID(void); #elif defined(CPU_XSCALE_81342) #define cpu_idcache_wbinv_all xscalec3_cache_purgeID extern void xscalec3_cache_purgeID(void); #elif defined(CPU_MV_PJ4B) #if !defined(SOC_MV_ARMADAXP) #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all extern void armv6_idcache_wbinv_all(void); #else #define cpu_idcache_wbinv_all() armadaxp_idcache_wbinv_all #endif #endif /* CPU_MV_PJ4B */ #ifdef CPU_XSCALE_81342 #define cpu_l2cache_wbinv_all xscalec3_l2cache_purge extern void xscalec3_l2cache_purge(void); #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all extern void sheeva_l2cache_wbinv_all(void); #elif defined(CPU_CORTEXA) || defined(CPU_KRAIT) #define cpu_idcache_wbinv_all armv7_idcache_wbinv_all #define cpu_l2cache_wbinv_all() #else #define cpu_l2cache_wbinv_all() #endif static void armadaxp_idcache_wbinv_all(void); int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size = 32; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; +int arm_dcache_min_line_size = 32; +int arm_icache_min_line_size = 32; +int arm_idcache_min_line_size = 32; + u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; int block_userspace_access = 0; extern int arm9_dcache_sets_inc; extern int arm9_dcache_sets_max; extern int arm9_dcache_index_max; extern int arm9_dcache_index_inc; static __inline void * memcpy(void *dst, const void *src, int len) { const char *s = src; char *d = dst; while (len) { if (0 && len >= 4 && !((vm_offset_t)d & 3) && !((vm_offset_t)s & 3)) { *(uint32_t *)d = *(uint32_t *)s; s += 4; d += 4; len -= 4; } else { *d++ = *s++; len--; } } return (dst); } static __inline void bzero(void *addr, int count) { char *tmp = (char *)addr; while (count > 0) { if (count >= 4 && !((vm_offset_t)tmp & 3)) { *(uint32_t *)tmp = 0; tmp += 4; count -= 4; } else { *tmp = 0; tmp++; count--; } } } static void arm9_setup(void); void _startC(void) { int tmp1; unsigned int sp = ((unsigned int)&_end & ~3) + 4; unsigned int pc, kernphysaddr; /* * Figure out the physical address the kernel was loaded at. This * assumes the entry point (this code right here) is in the first page, * which will always be the case for this trampoline code. */ __asm __volatile("mov %0, pc\n" : "=r" (pc)); kernphysaddr = pc & ~PAGE_MASK; #if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR) if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) || (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) { /* * We're running from flash, so just copy the whole thing * from flash to memory. * This is far from optimal, we could do the relocation or * the unzipping directly from flash to memory to avoid this * needless copy, but it would require to know the flash * physical address. */ unsigned int target_addr; unsigned int tmp_sp; uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000; target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR; tmp_sp = target_addr + 0x100000 + (unsigned int)&_end - (unsigned int)&_start; memcpy((char *)target_addr, (char *)src_addr, (unsigned int)&_end - (unsigned int)&_start); /* Temporary set the sp and jump to the new location. */ __asm __volatile( "mov sp, %1\n" "mov pc, %0\n" : : "r" (target_addr), "r" (tmp_sp)); } #endif #ifdef KZIP sp += KERNSIZE + 0x100; sp &= ~(L1_TABLE_SIZE - 1); sp += 2 * L1_TABLE_SIZE; #endif sp += 1024 * 1024; /* Should be enough for a stack */ __asm __volatile("adr %0, 2f\n" "bic %0, %0, #0xff000000\n" "and %1, %1, #0xff000000\n" "orr %0, %0, %1\n" "mrc p15, 0, %1, c1, c0, 0\n" "bic %1, %1, #1\n" /* Disable MMU */ "orr %1, %1, #(4 | 8)\n" /* Add DC enable, WBUF enable */ "orr %1, %1, #0x1000\n" /* Add IC enable */ "orr %1, %1, #(0x800)\n" /* BPRD enable */ "mcr p15, 0, %1, c1, c0, 0\n" "nop\n" "nop\n" "nop\n" "mov pc, %0\n" "2: nop\n" "mov sp, %2\n" : "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp)); #ifndef KZIP #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpufunc_id() & 0x0000f000) == 0x00009000) arm9_setup(); #endif #endif __start(); } static void get_cachetype_cp15() { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); cpuid = cpufunc_id(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { + /* Resolve minimal cache line sizes */ + arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2); + arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2); + arm_idcache_min_line_size = + (arm_dcache_min_line_size > arm_icache_min_line_size ? + arm_icache_min_line_size : arm_dcache_min_line_size); + __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1; i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } static void arm9_setup(void) { get_cachetype_cp15(); arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm9_dcache_sets_inc; arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm9_dcache_index_max = 0U - arm9_dcache_index_inc; } static void armadaxp_idcache_wbinv_all(void) { uint32_t feat; __asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat)); if (feat & ARM_PFR0_THUMBEE_MASK) armv7_idcache_wbinv_all(); else armv6_idcache_wbinv_all(); } #ifdef KZIP static unsigned char *orig_input, *i_input, *i_output; static u_int memcnt; /* Memory allocated: blocks */ static size_t memtot; /* Memory allocated: bytes */ /* * Library functions required by inflate(). */ #define MEMSIZ 0x8000 /* * Allocate memory block. */ unsigned char * kzipmalloc(int size) { void *ptr; static u_char mem[MEMSIZ]; if (memtot + size > MEMSIZ) return NULL; ptr = mem + memtot; memtot += size; memcnt++; return ptr; } /* * Free allocated memory block. */ void kzipfree(void *ptr) { memcnt--; if (!memcnt) memtot = 0; } void putstr(char *dummy) { } static int input(void *dummy) { if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) { return (GZ_EOF); } return *i_input++; } static int output(void *dummy, unsigned char *ptr, unsigned long len) { memcpy(i_output, ptr, len); i_output += len; return (0); } static void * inflate_kernel(void *kernel, void *startaddr) { struct inflate infl; unsigned char slide[GZ_WSIZE]; orig_input = kernel; memcnt = memtot = 0; i_input = (unsigned char *)kernel + GZ_HEAD; if (((char *)kernel)[3] & 0x18) { while (*i_input) i_input++; i_input++; } i_output = startaddr; bzero(&infl, sizeof(infl)); infl.gz_input = input; infl.gz_output = output; infl.gz_slide = slide; inflate(&infl); return ((char *)(((vm_offset_t)i_output & ~3) + 4)); } #endif void * load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, int d) { Elf32_Ehdr *eh; Elf32_Phdr phdr[64] /* XXX */, *php; Elf32_Shdr shdr[64] /* XXX */; int i,j; void *entry_point; int symtabindex = -1; int symstrindex = -1; vm_offset_t lastaddr = 0; Elf_Addr ssym = 0; Elf_Dyn *dp; eh = (Elf32_Ehdr *)kstart; ssym = 0; entry_point = (void*)eh->e_entry; memcpy(phdr, (void *)(kstart + eh->e_phoff ), eh->e_phnum * sizeof(phdr[0])); /* Determine lastaddr. */ for (i = 0; i < eh->e_phnum; i++) { if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz)) lastaddr = phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz; } /* Save the symbol tables, as there're about to be scratched. */ memcpy(shdr, (void *)(kstart + eh->e_shoff), sizeof(*shdr) * eh->e_shnum); if (eh->e_shnum * eh->e_shentsize != 0 && eh->e_shoff != 0) { for (i = 0; i < eh->e_shnum; i++) { if (shdr[i].sh_type == SHT_SYMTAB) { for (j = 0; j < eh->e_phnum; j++) { if (phdr[j].p_type == PT_LOAD && shdr[i].sh_offset >= phdr[j].p_offset && (shdr[i].sh_offset + shdr[i].sh_size <= phdr[j].p_offset + phdr[j].p_filesz)) { shdr[i].sh_offset = 0; shdr[i].sh_size = 0; j = eh->e_phnum; } } if (shdr[i].sh_offset != 0 && shdr[i].sh_size != 0) { symtabindex = i; symstrindex = shdr[i].sh_link; } } } func_end = roundup(func_end, sizeof(long)); if (symtabindex >= 0 && symstrindex >= 0) { ssym = lastaddr; if (d) { memcpy((void *)func_end, (void *)( shdr[symtabindex].sh_offset + kstart), shdr[symtabindex].sh_size); memcpy((void *)(func_end + shdr[symtabindex].sh_size), (void *)(shdr[symstrindex].sh_offset + kstart), shdr[symstrindex].sh_size); } else { lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); lastaddr += sizeof(shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); } } } if (!d) return ((void *)lastaddr); j = eh->e_phnum; for (i = 0; i < j; i++) { volatile char c; if (phdr[i].p_type != PT_LOAD) continue; memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr), (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz); /* Clean space from oversized segments, eg: bss. */ if (phdr[i].p_filesz < phdr[i].p_memsz) bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_filesz), phdr[i].p_memsz - phdr[i].p_filesz); } /* Now grab the symbol tables. */ if (symtabindex >= 0 && symstrindex >= 0) { *(Elf_Size *)lastaddr = shdr[symtabindex].sh_size; lastaddr += sizeof(shdr[symtabindex].sh_size); memcpy((void*)lastaddr, (void *)func_end, shdr[symtabindex].sh_size); lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); *(Elf_Size *)lastaddr = shdr[symstrindex].sh_size; lastaddr += sizeof(shdr[symstrindex].sh_size); memcpy((void*)lastaddr, (void*)(func_end + shdr[symtabindex].sh_size), shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER; *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR; *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR; } else *(Elf_Addr *)curaddr = 0; /* Invalidate the instruction cache. */ __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n" "mcr p15, 0, %0, c7, c10, 4\n" : : "r" (curaddr)); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" : "=r" (ssym)); /* Jump to the entry point. */ ((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))(); __asm __volatile(".globl func_end\n" "func_end:"); /* NOTREACHED */ return NULL; } extern char func_end[]; #define PMAP_DOMAIN_KERNEL 0 /* * Just define it instead of including the * whole VM headers set. */ int __hack; static __inline void setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend, int write_back) { unsigned int *pd = (unsigned int *)pt_addr; vm_paddr_t addr; int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT; int tmp; bzero(pd, L1_TABLE_SIZE); for (addr = physstart; addr < physend; addr += L1_S_SIZE) { pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr; if (write_back && 0) pd[addr >> L1_S_SHIFT] |= L1_S_B; } /* XXX: See below */ if (0xfff00000 < physstart || 0xfff00000 > physend) pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart; __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */ "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */ "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */ "mrc p15, 0, %0, c1, c0, 0\n" "orr %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */ "mov r0, r0\n" "sub pc, pc, #4\n" : "=r" (tmp) : "r" (pd), "r" (domain)); /* * XXX: This is the most stupid workaround I've ever wrote. * For some reason, the KB9202 won't boot the kernel unless * we access an address which is not in the * 0x20000000 - 0x20ffffff range. I hope I'll understand * what's going on later. */ __hack = *(volatile int *)0xfffff21c; } void __start(void) { void *curaddr; void *dst, *altdst; char *kernel = (char *)&kernel_start; int sp; int pt_addr; __asm __volatile("mov %0, pc" : "=r" (curaddr)); curaddr = (void*)((unsigned int)curaddr & 0xfff00000); #ifdef KZIP if (*kernel == 0x1f && kernel[1] == 0x8b) { pt_addr = (((int)&_end + KERNSIZE + 0x100) & ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpufunc_id() & 0x0000f000) == 0x00009000) arm9_setup(); #endif setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 1); /* Gzipped kernel */ dst = inflate_kernel(kernel, &_end); kernel = (char *)&_end; altdst = 4 + load_kernel((unsigned int)kernel, (unsigned int)curaddr, (unsigned int)&func_end + 800 , 0); if (altdst > dst) dst = altdst; /* * Disable MMU. Otherwise, setup_pagetables call below * might overwrite the L1 table we are currently using. */ cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #1\n" /* MMU_DISABLE */ "mcr p15, 0, %0, c1, c0, 0\n" :"=r" (pt_addr)); } else #endif dst = 4 + load_kernel((unsigned int)&kernel_start, (unsigned int)curaddr, (unsigned int)&func_end, 0); dst = (void *)(((vm_offset_t)dst & ~3)); pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 0); sp = pt_addr + L1_TABLE_SIZE + 8192; sp = sp &~3; dst = (void *)(sp + 4); memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - (unsigned int)&load_kernel + 800); do_call(dst, kernel, dst + (unsigned int)(&func_end) - (unsigned int)(&load_kernel) + 800, sp); } #ifdef __ARM_EABI__ /* We need to provide these functions but never call them */ void __aeabi_unwind_cpp_pr0(void); void __aeabi_unwind_cpp_pr1(void); void __aeabi_unwind_cpp_pr2(void); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2); void __aeabi_unwind_cpp_pr0(void) { } #endif Index: stable/10/sys/arm/include/armreg.h =================================================================== --- stable/10/sys/arm/include/armreg.h (revision 283334) +++ stable/10/sys/arm/include/armreg.h (revision 283335) @@ -1,440 +1,443 @@ /* $NetBSD: armreg.h,v 1.37 2007/01/06 00:50:54 christos Exp $ */ /*- * Copyright (c) 1998, 2001 Ben Harris * Copyright (c) 1994-1996 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef MACHINE_ARMREG_H #define MACHINE_ARMREG_H #include #define INSN_SIZE 4 #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define PSR_MODE 0x0000001f /* mode mask */ #define PSR_USR32_MODE 0x00000010 #define PSR_FIQ32_MODE 0x00000011 #define PSR_IRQ32_MODE 0x00000012 #define PSR_SVC32_MODE 0x00000013 #define PSR_MON32_MODE 0x00000016 #define PSR_ABT32_MODE 0x00000017 #define PSR_HYP32_MODE 0x0000001a #define PSR_UND32_MODE 0x0000001b #define PSR_SYS32_MODE 0x0000001f #define PSR_32_MODE 0x00000010 #define PSR_T 0x00000020 /* Instruction set bit */ #define PSR_F 0x00000040 /* FIQ disable bit */ #define PSR_I 0x00000080 /* IRQ disable bit */ #define PSR_A 0x00000100 /* Imprecise abort bit */ #define PSR_E 0x00000200 /* Data endianess bit */ #define PSR_GE 0x000f0000 /* Greater than or equal to bits */ #define PSR_J 0x01000000 /* Java bit */ #define PSR_Q 0x08000000 /* Sticky overflow bit */ #define PSR_V 0x10000000 /* Overflow bit */ #define PSR_C 0x20000000 /* Carry bit */ #define PSR_Z 0x40000000 /* Zero bit */ #define PSR_N 0x80000000 /* Negative bit */ #define PSR_FLAGS 0xf0000000 /* Flags mask. */ /* The high-order byte is always the implementor */ #define CPU_ID_IMPLEMENTOR_MASK 0xff000000 #define CPU_ID_ARM_LTD 0x41000000 /* 'A' */ #define CPU_ID_DEC 0x44000000 /* 'D' */ #define CPU_ID_INTEL 0x69000000 /* 'i' */ #define CPU_ID_TI 0x54000000 /* 'T' */ #define CPU_ID_FARADAY 0x66000000 /* 'f' */ /* How to decide what format the CPUID is in. */ #define CPU_ID_ISOLD(x) (((x) & 0x0000f000) == 0x00000000) #define CPU_ID_IS7(x) (((x) & 0x0000f000) == 0x00007000) #define CPU_ID_ISNEW(x) (!CPU_ID_ISOLD(x) && !CPU_ID_IS7(x)) /* On recent ARMs this byte holds the architecture and variant (sub-model) */ #define CPU_ID_ARCH_MASK 0x000f0000 #define CPU_ID_ARCH_V3 0x00000000 #define CPU_ID_ARCH_V4 0x00010000 #define CPU_ID_ARCH_V4T 0x00020000 #define CPU_ID_ARCH_V5 0x00030000 #define CPU_ID_ARCH_V5T 0x00040000 #define CPU_ID_ARCH_V5TE 0x00050000 #define CPU_ID_ARCH_V5TEJ 0x00060000 #define CPU_ID_ARCH_V6 0x00070000 #define CPU_ID_CPUID_SCHEME 0x000f0000 #define CPU_ID_VARIANT_MASK 0x00f00000 /* Next three nybbles are part number */ #define CPU_ID_PARTNO_MASK 0x0000fff0 /* Intel XScale has sub fields in part number */ #define CPU_ID_XSCALE_COREGEN_MASK 0x0000e000 /* core generation */ #define CPU_ID_XSCALE_COREREV_MASK 0x00001c00 /* core revision */ #define CPU_ID_XSCALE_PRODUCT_MASK 0x000003f0 /* product number */ /* And finally, the revision number. */ #define CPU_ID_REVISION_MASK 0x0000000f /* Individual CPUs are probably best IDed by everything but the revision. */ #define CPU_ID_CPU_MASK 0xfffffff0 /* ARM9 and later CPUs */ #define CPU_ID_ARM920T 0x41129200 #define CPU_ID_ARM920T_ALT 0x41009200 #define CPU_ID_ARM922T 0x41029220 #define CPU_ID_ARM926EJS 0x41069260 #define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */ #define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */ #define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */ #define CPU_ID_ARM966ESR1 0x41059660 /* XXX no MMU */ #define CPU_ID_ARM1020E 0x4115a200 /* (AKA arm10 rev 1) */ #define CPU_ID_ARM1022ES 0x4105a220 #define CPU_ID_ARM1026EJS 0x4106a260 #define CPU_ID_ARM1136JS 0x4107b360 #define CPU_ID_ARM1136JSR1 0x4117b360 #define CPU_ID_ARM1176JZS 0x410fb760 #define CPU_ID_CORTEXA7 0x410fc070 #define CPU_ID_CORTEXA8R1 0x411fc080 #define CPU_ID_CORTEXA8R2 0x412fc080 #define CPU_ID_CORTEXA8R3 0x413fc080 #define CPU_ID_CORTEXA9R1 0x411fc090 #define CPU_ID_CORTEXA9R2 0x412fc090 #define CPU_ID_CORTEXA9R3 0x413fc090 #define CPU_ID_CORTEXA15R0 0x410fc0f0 #define CPU_ID_CORTEXA15R1 0x411fc0f0 #define CPU_ID_CORTEXA15R2 0x412fc0f0 #define CPU_ID_CORTEXA15R3 0x413fc0f0 #define CPU_ID_KRAIT 0x510f06f0 /* Snapdragon S4 Pro/APQ8064 */ #define CPU_ID_TI925T 0x54029250 #define CPU_ID_MV88FR131 0x56251310 /* Marvell Feroceon 88FR131 Core */ #define CPU_ID_MV88FR331 0x56153310 /* Marvell Feroceon 88FR331 Core */ #define CPU_ID_MV88FR571_VD 0x56155710 /* Marvell Feroceon 88FR571-VD Core (ID from datasheet) */ /* * LokiPlus core has also ID set to 0x41159260 and this define cause execution of unsupported * L2-cache instructions so need to disable it. 0x41159260 is a generic ARM926E-S ID. */ #ifdef SOC_MV_LOKIPLUS #define CPU_ID_MV88FR571_41 0x00000000 #else #define CPU_ID_MV88FR571_41 0x41159260 /* Marvell Feroceon 88FR571-VD Core (actual ID from CPU reg) */ #endif #define CPU_ID_MV88SV581X_V7 0x561F5810 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_MV88SV584X_V7 0x562F5840 /* Marvell Sheeva 88SV584x v7 Core */ /* Marvell's CPUIDs with ARM ID in implementor field */ #define CPU_ID_ARM_88SV581X_V7 0x413FC080 /* Marvell Sheeva 88SV581x v7 Core */ #define CPU_ID_FA526 0x66015260 #define CPU_ID_FA626TE 0x66056260 #define CPU_ID_80200 0x69052000 #define CPU_ID_PXA250 0x69052100 /* sans core revision */ #define CPU_ID_PXA210 0x69052120 #define CPU_ID_PXA250A 0x69052100 /* 1st version Core */ #define CPU_ID_PXA210A 0x69052120 /* 1st version Core */ #define CPU_ID_PXA250B 0x69052900 /* 3rd version Core */ #define CPU_ID_PXA210B 0x69052920 /* 3rd version Core */ #define CPU_ID_PXA250C 0x69052d00 /* 4th version Core */ #define CPU_ID_PXA210C 0x69052d20 /* 4th version Core */ #define CPU_ID_PXA27X 0x69054110 #define CPU_ID_80321_400 0x69052420 #define CPU_ID_80321_600 0x69052430 #define CPU_ID_80321_400_B0 0x69052c20 #define CPU_ID_80321_600_B0 0x69052c30 #define CPU_ID_80219_400 0x69052e20 /* A0 stepping/revision. */ #define CPU_ID_80219_600 0x69052e30 /* A0 stepping/revision. */ #define CPU_ID_81342 0x69056810 #define CPU_ID_IXP425 0x690541c0 #define CPU_ID_IXP425_533 0x690541c0 #define CPU_ID_IXP425_400 0x690541d0 #define CPU_ID_IXP425_266 0x690541f0 #define CPU_ID_IXP435 0x69054040 #define CPU_ID_IXP465 0x69054200 /* CPUID registers */ #define ARM_PFR0_ARM_ISA_MASK 0x0000000f #define ARM_PFR0_THUMB_MASK 0x000000f0 #define ARM_PFR0_THUMB 0x10 #define ARM_PFR0_THUMB2 0x30 #define ARM_PFR0_JAZELLE_MASK 0x00000f00 #define ARM_PFR0_THUMBEE_MASK 0x0000f000 #define ARM_PFR1_ARMV4_MASK 0x0000000f #define ARM_PFR1_SEC_EXT_MASK 0x000000f0 #define ARM_PFR1_MICROCTRL_MASK 0x00000f00 /* * Post-ARM3 CP15 registers: * * 1 Control register * * 2 Translation Table Base * * 3 Domain Access Control * * 4 Reserved * * 5 Fault Status * * 6 Fault Address * * 7 Cache/write-buffer Control * * 8 TLB Control * * 9 Cache Lockdown * * 10 TLB Lockdown * * 11 Reserved * * 12 Reserved * * 13 Process ID (for FCSE) * * 14 Reserved * * 15 Implementation Dependent */ /* Some of the definitions below need cleaning up for V3/V4 architectures */ /* CPU control register (CP15 register 1) */ #define CPU_CONTROL_MMU_ENABLE 0x00000001 /* M: MMU/Protection unit enable */ #define CPU_CONTROL_AFLT_ENABLE 0x00000002 /* A: Alignment fault enable */ #define CPU_CONTROL_DC_ENABLE 0x00000004 /* C: IDC/DC enable */ #define CPU_CONTROL_WBUF_ENABLE 0x00000008 /* W: Write buffer enable */ #define CPU_CONTROL_32BP_ENABLE 0x00000010 /* P: 32-bit exception handlers */ #define CPU_CONTROL_32BD_ENABLE 0x00000020 /* D: 32-bit addressing */ #define CPU_CONTROL_LABT_ENABLE 0x00000040 /* L: Late abort enable */ #define CPU_CONTROL_BEND_ENABLE 0x00000080 /* B: Big-endian mode */ #define CPU_CONTROL_SYST_ENABLE 0x00000100 /* S: System protection bit */ #define CPU_CONTROL_ROM_ENABLE 0x00000200 /* R: ROM protection bit */ #define CPU_CONTROL_CPCLK 0x00000400 /* F: Implementation defined */ #define CPU_CONTROL_SW_ENABLE 0x00000400 /* SW: SWP instruction enable */ #define CPU_CONTROL_BPRD_ENABLE 0x00000800 /* Z: Branch prediction enable */ #define CPU_CONTROL_IC_ENABLE 0x00001000 /* I: IC enable */ #define CPU_CONTROL_VECRELOC 0x00002000 /* V: Vector relocation */ #define CPU_CONTROL_ROUNDROBIN 0x00004000 /* RR: Predictable replacement */ #define CPU_CONTROL_V4COMPAT 0x00008000 /* L4: ARMv4 compat LDR R15 etc */ #define CPU_CONTROL_HAF_ENABLE 0x00020000 /* HA: Hardware Access Flag Enable */ #define CPU_CONTROL_FI_ENABLE 0x00200000 /* FI: Low interrupt latency */ #define CPU_CONTROL_UNAL_ENABLE 0x00400000 /* U: unaligned data access */ #define CPU_CONTROL_V6_EXTPAGE 0x00800000 /* XP: ARMv6 extended page tables */ #define CPU_CONTROL_V_ENABLE 0x01000000 /* VE: Interrupt vectors enable */ #define CPU_CONTROL_EX_BEND 0x02000000 /* EE: exception endianness */ #define CPU_CONTROL_L2_ENABLE 0x04000000 /* L2 Cache enabled */ #define CPU_CONTROL_NMFI 0x08000000 /* NMFI: Non maskable FIQ */ #define CPU_CONTROL_TR_ENABLE 0x10000000 /* TRE: TEX Remap*/ #define CPU_CONTROL_AF_ENABLE 0x20000000 /* AFE: Access Flag enable */ #define CPU_CONTROL_TE_ENABLE 0x40000000 /* TE: Thumb Exception enable */ #define CPU_CONTROL_IDC_ENABLE CPU_CONTROL_DC_ENABLE /* ARM11x6 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM11X6_AUXCTL_RS 0x00000001 /* return stack */ #define ARM11X6_AUXCTL_DB 0x00000002 /* dynamic branch prediction */ #define ARM11X6_AUXCTL_SB 0x00000004 /* static branch prediction */ #define ARM11X6_AUXCTL_TR 0x00000008 /* MicroTLB replacement strat. */ #define ARM11X6_AUXCTL_EX 0x00000010 /* exclusive L1/L2 cache */ #define ARM11X6_AUXCTL_RA 0x00000020 /* clean entire cache disable */ #define ARM11X6_AUXCTL_RV 0x00000040 /* block transfer cache disable */ #define ARM11X6_AUXCTL_CZ 0x00000080 /* restrict cache size */ /* ARM1136 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1136_AUXCTL_PFI 0x80000000 /* PFI: partial FI mode. */ /* This is an undocumented flag * used to work around a cache bug * in r0 steppings. See errata * 364296. */ /* ARM1176 Auxiliary Control Register (CP15 register 1, opcode2 1) */ #define ARM1176_AUXCTL_PHD 0x10000000 /* inst. prefetch halting disable */ #define ARM1176_AUXCTL_BFD 0x20000000 /* branch folding disable */ #define ARM1176_AUXCTL_FSD 0x40000000 /* force speculative ops disable */ #define ARM1176_AUXCTL_FIO 0x80000000 /* low intr latency override */ /* XScale Auxillary Control Register (CP15 register 1, opcode2 1) */ #define XSCALE_AUXCTL_K 0x00000001 /* dis. write buffer coalescing */ #define XSCALE_AUXCTL_P 0x00000002 /* ECC protect page table access */ /* Note: XSCale core 3 uses those for LLR DCcahce attributes */ #define XSCALE_AUXCTL_MD_WB_RA 0x00000000 /* mini-D$ wb, read-allocate */ #define XSCALE_AUXCTL_MD_WB_RWA 0x00000010 /* mini-D$ wb, read/write-allocate */ #define XSCALE_AUXCTL_MD_WT 0x00000020 /* mini-D$ wt, read-allocate */ #define XSCALE_AUXCTL_MD_MASK 0x00000030 /* Xscale Core 3 only */ #define XSCALE_AUXCTL_LLR 0x00000400 /* Enable L2 for LLR Cache */ /* Marvell Extra Features Register (CP15 register 1, opcode2 0) */ #define MV_DC_REPLACE_LOCK 0x80000000 /* Replace DCache Lock */ #define MV_DC_STREAM_ENABLE 0x20000000 /* DCache Streaming Switch */ #define MV_WA_ENABLE 0x10000000 /* Enable Write Allocate */ #define MV_L2_PREFETCH_DISABLE 0x01000000 /* L2 Cache Prefetch Disable */ #define MV_L2_INV_EVICT_ERR 0x00800000 /* L2 Invalidates Uncorrectable Error Line Eviction */ #define MV_L2_ENABLE 0x00400000 /* L2 Cache enable */ #define MV_IC_REPLACE_LOCK 0x00080000 /* Replace ICache Lock */ #define MV_BGH_ENABLE 0x00040000 /* Branch Global History Register Enable */ #define MV_BTB_DISABLE 0x00020000 /* Branch Target Buffer Disable */ #define MV_L1_PARERR_ENABLE 0x00010000 /* L1 Parity Error Enable */ /* Cache type register definitions */ #define CPU_CT_ISIZE(x) ((x) & 0xfff) /* I$ info */ #define CPU_CT_DSIZE(x) (((x) >> 12) & 0xfff) /* D$ info */ #define CPU_CT_S (1U << 24) /* split cache */ #define CPU_CT_CTYPE(x) (((x) >> 25) & 0xf) /* cache type */ #define CPU_CT_FORMAT(x) ((x) >> 29) +/* Cache type register definitions for ARM v7 */ +#define CPU_CT_IMINLINE(x) ((x) & 0xf) /* I$ min line size */ +#define CPU_CT_DMINLINE(x) (((x) >> 16) & 0xf) /* D$ min line size */ #define CPU_CT_CTYPE_WT 0 /* write-through */ #define CPU_CT_CTYPE_WB1 1 /* write-back, clean w/ read */ #define CPU_CT_CTYPE_WB2 2 /* w/b, clean w/ cp15,7 */ #define CPU_CT_CTYPE_WB6 6 /* w/b, cp15,7, lockdown fmt A */ #define CPU_CT_CTYPE_WB7 7 /* w/b, cp15,7, lockdown fmt B */ #define CPU_CT_xSIZE_LEN(x) ((x) & 0x3) /* line size */ #define CPU_CT_xSIZE_M (1U << 2) /* multiplier */ #define CPU_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x7) /* associativity */ #define CPU_CT_xSIZE_SIZE(x) (((x) >> 6) & 0x7) /* size */ #define CPU_CT_ARMV7 0x4 /* ARM v7 Cache type definitions */ #define CPUV7_CT_CTYPE_WT (1U << 31) #define CPUV7_CT_CTYPE_WB (1 << 30) #define CPUV7_CT_CTYPE_RA (1 << 29) #define CPUV7_CT_CTYPE_WA (1 << 28) #define CPUV7_CT_xSIZE_LEN(x) ((x) & 0x7) /* line size */ #define CPUV7_CT_xSIZE_ASSOC(x) (((x) >> 3) & 0x3ff) /* associativity */ #define CPUV7_CT_xSIZE_SET(x) (((x) >> 13) & 0x7fff) /* num sets */ #define CPU_CLIDR_CTYPE(reg,x) (((reg) >> ((x) * 3)) & 0x7) #define CPU_CLIDR_LOUIS(reg) (((reg) >> 21) & 0x7) #define CPU_CLIDR_LOC(reg) (((reg) >> 24) & 0x7) #define CPU_CLIDR_LOUU(reg) (((reg) >> 27) & 0x7) #define CACHE_ICACHE 1 #define CACHE_DCACHE 2 #define CACHE_SEP_CACHE 3 #define CACHE_UNI_CACHE 4 /* Fault status register definitions */ #define FAULT_USER 0x10 #if __ARM_ARCH < 6 #define FAULT_TYPE_MASK 0x0f #define FAULT_WRTBUF_0 0x00 /* Vector Exception */ #define FAULT_WRTBUF_1 0x02 /* Terminal Exception */ #define FAULT_BUSERR_0 0x04 /* External Abort on Linefetch -- Section */ #define FAULT_BUSERR_1 0x06 /* External Abort on Linefetch -- Page */ #define FAULT_BUSERR_2 0x08 /* External Abort on Non-linefetch -- Section */ #define FAULT_BUSERR_3 0x0a /* External Abort on Non-linefetch -- Page */ #define FAULT_BUSTRNL1 0x0c /* External abort on Translation -- Level 1 */ #define FAULT_BUSTRNL2 0x0e /* External abort on Translation -- Level 2 */ #define FAULT_ALIGN_0 0x01 /* Alignment */ #define FAULT_ALIGN_1 0x03 /* Alignment */ #define FAULT_TRANS_S 0x05 /* Translation -- Section */ #define FAULT_TRANS_F 0x06 /* Translation -- Flag */ #define FAULT_TRANS_P 0x07 /* Translation -- Page */ #define FAULT_DOMAIN_S 0x09 /* Domain -- Section */ #define FAULT_DOMAIN_P 0x0b /* Domain -- Page */ #define FAULT_PERM_S 0x0d /* Permission -- Section */ #define FAULT_PERM_P 0x0f /* Permission -- Page */ #define FAULT_IMPRECISE 0x400 /* Imprecise exception (XSCALE) */ #define FAULT_EXTERNAL 0x400 /* External abort (armv6+) */ #define FAULT_WNR 0x800 /* Write-not-Read access (armv6+) */ #else /* __ARM_ARCH < 6 */ #define FAULT_ALIGN 0x001 /* Alignment Fault */ #define FAULT_DEBUG 0x002 /* Debug Event */ #define FAULT_ACCESS_L1 0x003 /* Access Bit (L1) */ #define FAULT_ICACHE 0x004 /* Instruction cache maintenance */ #define FAULT_TRAN_L1 0x005 /* Translation Fault (L1) */ #define FAULT_ACCESS_L2 0x006 /* Access Bit (L2) */ #define FAULT_TRAN_L2 0x007 /* Translation Fault (L2) */ #define FAULT_EA_PREC 0x008 /* External Abort */ #define FAULT_DOMAIN_L1 0x009 /* Domain Fault (L1) */ #define FAULT_DOMAIN_L2 0x00B /* Domain Fault (L2) */ #define FAULT_EA_TRAN_L1 0x00C /* External Translation Abort (L1) */ #define FAULT_PERM_L1 0x00D /* Permission Fault (L1) */ #define FAULT_EA_TRAN_L2 0x00E /* External Translation Abort (L2) */ #define FAULT_PERM_L2 0x00F /* Permission Fault (L2) */ #define FAULT_TLB_CONFLICT 0x010 /* Permission Fault (L2) */ #define FAULT_EA_IMPREC 0x016 /* Asynchronous External Abort */ #define FAULT_PE_IMPREC 0x018 /* Asynchronous Parity Error */ #define FAULT_PARITY 0x019 /* Parity Error */ #define FAULT_PE_TRAN_L1 0x01C /* Parity Error on Translation (L1) */ #define FAULT_PE_TRAN_L2 0x01E /* Parity Error on Translation (L2) */ #define FSR_TO_FAULT(fsr) (((fsr) & 0xF) | \ ((((fsr) & (1 << 10)) >> (10 - 4)))) #define FSR_LPAE (1 << 9) /* LPAE indicator */ #define FSR_WNR (1 << 11) /* Write-not-Read access */ #define FSR_EXT (1 << 12) /* DECERR/SLVERR for external*/ #define FSR_CM (1 << 13) /* Cache maintenance fault */ #endif /* !__ARM_ARCH < 6 */ /* * Address of the vector page, low and high versions. */ #ifndef __ASSEMBLER__ #define ARM_VECTORS_LOW 0x00000000U #define ARM_VECTORS_HIGH 0xffff0000U #else #define ARM_VECTORS_LOW 0 #define ARM_VECTORS_HIGH 0xffff0000 #endif /* * ARM Instructions * * 3 3 2 2 2 * 1 0 9 8 7 0 * +-------+-------------------------------------------------------+ * | cond | instruction dependant | * |c c c c| | * +-------+-------------------------------------------------------+ */ #define INSN_SIZE 4 /* Always 4 bytes */ #define INSN_COND_MASK 0xf0000000 /* Condition mask */ #define INSN_COND_AL 0xe0000000 /* Always condition */ #define THUMB_INSN_SIZE 2 /* Some are 4 bytes. */ #endif /* !MACHINE_ARMREG_H */ Index: stable/10 =================================================================== --- stable/10 (revision 283334) +++ stable/10 (revision 283335) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r278518