diff --git a/sys/arm/arm/cpufunc.c b/sys/arm/arm/cpufunc.c index a1bdad8037c1..a679f04f8f5e 100644 --- a/sys/arm/arm/cpufunc.c +++ b/sys/arm/arm/cpufunc.c @@ -1,592 +1,589 @@ /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * arm9 support code Copyright (C) 2001 ARM Ltd * Copyright (c) 1997 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpufuncs.c * * C functions for supporting CPU / MMU / TLB specific operations. * * Created : 30/01/97 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include /* PRIMARY CACHE VARIABLES */ int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; #if defined(CPU_ARM9E) static void arm10_setup(void); #endif #ifdef CPU_MV_PJ4B static void pj4bv7_setup(void); #endif #if defined(CPU_ARM1176) static void arm11x6_setup(void); #endif #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) static void cortexa_setup(void); #endif #if defined(CPU_ARM9E) struct cpu_functions armv5_ec_cpufuncs = { /* CPU functions */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ armv5_ec_setttb, /* Setttb */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ armv5_ec_icache_sync_range, /* icache_sync_range */ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */ armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */ armv5_ec_dcache_inv_range, /* dcache_inv_range */ armv5_ec_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */ armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */ cpufunc_nullop, /* l2cache_wbinv_all */ (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ armv4_drain_writebuf, /* drain_writebuf */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ arm9_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; struct cpu_functions sheeva_cpufuncs = { /* CPU functions */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ sheeva_setttb, /* Setttb */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ /* Cache operations */ armv5_ec_icache_sync_range, /* icache_sync_range */ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */ sheeva_dcache_wbinv_range, /* dcache_wbinv_range */ sheeva_dcache_inv_range, /* dcache_inv_range */ sheeva_dcache_wb_range, /* dcache_wb_range */ armv4_idcache_inv_all, /* idcache_inv_all */ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */ sheeva_idcache_wbinv_range, /* idcache_wbinv_all */ sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */ sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */ sheeva_l2cache_inv_range, /* l2cache_inv_range */ sheeva_l2cache_wb_range, /* l2cache_wb_range */ (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ /* Other functions */ armv4_drain_writebuf, /* drain_writebuf */ sheeva_cpu_sleep, /* sleep */ /* Soft functions */ arm9_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; #endif /* CPU_ARM9E */ #ifdef CPU_MV_PJ4B struct cpu_functions pj4bv7_cpufuncs = { /* Cache operations */ .cf_l2cache_wbinv_all = (void *)cpufunc_nullop, .cf_l2cache_wbinv_range = (void *)cpufunc_nullop, .cf_l2cache_inv_range = (void *)cpufunc_nullop, .cf_l2cache_wb_range = (void *)cpufunc_nullop, .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop, /* Other functions */ .cf_sleep = (void *)cpufunc_nullop, /* Soft functions */ .cf_setup = pj4bv7_setup }; #endif /* CPU_MV_PJ4B */ #if defined(CPU_ARM1176) struct cpu_functions arm1176_cpufuncs = { /* Cache operations */ .cf_l2cache_wbinv_all = (void *)cpufunc_nullop, .cf_l2cache_wbinv_range = (void *)cpufunc_nullop, .cf_l2cache_inv_range = (void *)cpufunc_nullop, .cf_l2cache_wb_range = (void *)cpufunc_nullop, .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop, /* Other functions */ .cf_sleep = arm11x6_sleep, /* Soft functions */ .cf_setup = arm11x6_setup }; #endif /*CPU_ARM1176 */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) struct cpu_functions cortexa_cpufuncs = { /* Cache operations */ /* * Note: For CPUs using the PL310 the L2 ops are filled in when the * L2 cache controller is actually enabled. */ .cf_l2cache_wbinv_all = cpufunc_nullop, .cf_l2cache_wbinv_range = (void *)cpufunc_nullop, .cf_l2cache_inv_range = (void *)cpufunc_nullop, .cf_l2cache_wb_range = (void *)cpufunc_nullop, .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop, /* Other functions */ .cf_sleep = armv7_cpu_sleep, /* Soft functions */ .cf_setup = cortexa_setup }; #endif /* CPU_CORTEXA || CPU_KRAIT */ /* * Global constants also used by locore.s */ struct cpu_functions cpufuncs; u_int cputype; -#if __ARM_ARCH <= 5 -u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */ -#endif #if defined (CPU_ARM9E) || \ defined(CPU_ARM1176) || \ defined(CPU_MV_PJ4B) || \ defined(CPU_CORTEXA) || defined(CPU_KRAIT) static void get_cachetype_cp15(void); /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; static void get_cachetype_cp15(void) { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; ctype = cp15_ctr_get(); cpuid = cp15_midr_get(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level); i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; arm_dcache_align = 1 << (CPUV7_CT_xSIZE_LEN(csize) + 4); arm_dcache_align_mask = arm_dcache_align - 1; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } #endif /* ARM9 || XSCALE */ /* * Cannot panic here as we may not have a console yet ... */ int set_cpufuncs(void) { cputype = cp15_midr_get(); cputype &= CPU_ID_CPU_MASK; #if defined(CPU_ARM9E) if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD || cputype == CPU_ID_MV88FR571_41) { uint32_t sheeva_ctrl; sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE | MV_L2_ENABLE); /* * Workaround for Marvell MV78100 CPU: Cache prefetch * mechanism may affect the cache coherency validity, * so it needs to be disabled. * * Refer to errata document MV-S501058-00C.pdf (p. 3.1 * L2 Prefetching Mechanism) for details. */ if (cputype == CPU_ID_MV88FR571_VD || cputype == CPU_ID_MV88FR571_41) sheeva_ctrl |= MV_L2_PREFETCH_DISABLE; sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl); cpufuncs = sheeva_cpufuncs; get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } else if (cputype == CPU_ID_ARM926EJS) { cpufuncs = armv5_ec_cpufuncs; get_cachetype_cp15(); pmap_pte_init_generic(); goto out; } #endif /* CPU_ARM9E */ #if defined(CPU_ARM1176) if (cputype == CPU_ID_ARM1176JZS) { cpufuncs = arm1176_cpufuncs; get_cachetype_cp15(); goto out; } #endif /* CPU_ARM1176 */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) switch(cputype & CPU_ID_SCHEME_MASK) { case CPU_ID_CORTEXA5: case CPU_ID_CORTEXA7: case CPU_ID_CORTEXA8: case CPU_ID_CORTEXA9: case CPU_ID_CORTEXA12: case CPU_ID_CORTEXA15: case CPU_ID_CORTEXA53: case CPU_ID_CORTEXA57: case CPU_ID_CORTEXA72: case CPU_ID_KRAIT300: cpufuncs = cortexa_cpufuncs; get_cachetype_cp15(); goto out; default: break; } #endif /* CPU_CORTEXA || CPU_KRAIT */ #if defined(CPU_MV_PJ4B) if (cputype == CPU_ID_MV88SV581X_V7 || cputype == CPU_ID_MV88SV584X_V7 || cputype == CPU_ID_ARM_88SV581X_V7) { cpufuncs = pj4bv7_cpufuncs; get_cachetype_cp15(); goto out; } #endif /* CPU_MV_PJ4B */ /* * Bzzzz. And the answer was ... */ panic("No support for this CPU type (%08x) in kernel", cputype); return(ARCHITECTURE_NOT_PRESENT); out: uma_set_align(arm_dcache_align_mask); return (0); } /* * CPU Setup code */ #if defined(CPU_ARM9E) static void arm10_setup(void) { int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS cpuctrl |= CPU_CONTROL_AFLT_ENABLE; #endif #ifdef __ARMEB__ cpuctrl |= CPU_CONTROL_BEND_ENABLE; #endif /* Clear out the cache */ cpu_idcache_wbinv_all(); /* Now really make sure they are clean. */ __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); if (vector_page == ARM_VECTORS_HIGH) cpuctrl |= CPU_CONTROL_VECRELOC; /* Set the control register */ cpu_control(0xffffffff, cpuctrl); /* And again. */ cpu_idcache_wbinv_all(); } #endif /* CPU_ARM9E || CPU_ARM10 */ #if defined(CPU_ARM1176) \ || defined(CPU_MV_PJ4B) \ || defined(CPU_CORTEXA) || defined(CPU_KRAIT) static __inline void cpu_scc_setup_ccnt(void) { /* This is how you give userland access to the CCNT and PMCn * registers. * BEWARE! This gives write access also, which may not be what * you want! */ #ifdef _PMC_USER_READ_WRITE_ /* Set PMUSERENR[0] to allow userland access */ cp15_pmuserenr_set(1); #endif #if defined(CPU_ARM1176) /* Set PMCR[2,0] to enable counters and reset CCNT */ cp15_pmcr_set(5); #else /* Set up the PMCCNTR register as a cyclecounter: * Set PMINTENCLR to 0xFFFFFFFF to block interrupts * Set PMCR[2,0] to enable counters and reset CCNT * Set PMCNTENSET to 0x80000000 to enable CCNT */ cp15_pminten_clr(0xFFFFFFFF); cp15_pmcr_set(5); cp15_pmcnten_set(0x80000000); #endif } #endif #if defined(CPU_ARM1176) static void arm11x6_setup(void) { uint32_t auxctrl, auxctrl_wax; uint32_t tmp, tmp2; uint32_t cpuid; cpuid = cp15_midr_get(); auxctrl = 0; auxctrl_wax = ~0; /* * Enable an errata workaround */ if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ auxctrl = ARM1176_AUXCTL_PHD; auxctrl_wax = ~ARM1176_AUXCTL_PHD; } tmp = cp15_actlr_get(); tmp2 = tmp; tmp &= auxctrl_wax; tmp |= auxctrl; if (tmp != tmp2) cp15_actlr_set(tmp); cpu_scc_setup_ccnt(); } #endif /* CPU_ARM1176 */ #ifdef CPU_MV_PJ4B static void pj4bv7_setup(void) { pj4b_config(); cpu_scc_setup_ccnt(); } #endif /* CPU_MV_PJ4B */ #if defined(CPU_CORTEXA) || defined(CPU_KRAIT) static void cortexa_setup(void) { cpu_scc_setup_ccnt(); } #endif /* CPU_CORTEXA || CPU_KRAIT */ diff --git a/sys/arm/arm/identcpu-v4.c b/sys/arm/arm/identcpu-v4.c deleted file mode 100644 index 243880485bc6..000000000000 --- a/sys/arm/arm/identcpu-v4.c +++ /dev/null @@ -1,341 +0,0 @@ -/* $NetBSD: cpu.c,v 1.55 2004/02/13 11:36:10 wiz Exp $ */ - -/*- - * Copyright (c) 1995 Mark Brinicombe. - * Copyright (c) 1995 Brini. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * RiscBSD kernel project - * - * cpu.c - * - * Probing and configuration for the master CPU - * - * Created : 10/10/95 - */ - -#include -__FBSDID("$FreeBSD$"); -#include -#include -#include -#include -#include -#include -#include - -#include - -char machine[] = "arm"; - -SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, - machine, 0, "Machine class"); - -static const char * const generic_steppings[16] = { - "rev 0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const xscale_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step C-0", - "step D-0", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i80219_steppings[16] = { - "step A-0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i80321_steppings[16] = { - "step A-0", "step B-0", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i81342_steppings[16] = { - "step A-0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Steppings for PXA2[15]0 */ -static const char * const pxa2x0_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step B-1", - "step B-2", "step C-0", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Steppings for PXA255/26x. - * rev 5: PXA26x B0, rev 6: PXA255 A0 - */ -static const char * const pxa255_steppings[16] = { - "rev 0", "rev 1", "rev 2", "step A-0", - "rev 4", "step B-0", "step A-0", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Stepping for PXA27x */ -static const char * const pxa27x_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step B-1", - "step C-0", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -struct cpuidtab { - u_int32_t cpuid; - enum cpu_class cpu_class; - const char *cpu_name; - const char * const *cpu_steppings; -}; - -const struct cpuidtab cpuids[] = { - { CPU_ID_ARM920T, CPU_CLASS_ARM9TDMI, "ARM920T", - generic_steppings }, - { CPU_ID_ARM920T_ALT, CPU_CLASS_ARM9TDMI, "ARM920T", - generic_steppings }, - { CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T", - generic_steppings }, - { CPU_ID_ARM926EJS, CPU_CLASS_ARM9EJS, "ARM926EJ-S", - generic_steppings }, - { CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T", - generic_steppings }, - { CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S", - generic_steppings }, - { CPU_ID_ARM966ES, CPU_CLASS_ARM9ES, "ARM966E-S", - generic_steppings }, - { CPU_ID_ARM966ESR1, CPU_CLASS_ARM9ES, "ARM966E-S", - generic_steppings }, - { CPU_ID_FA526, CPU_CLASS_ARM9TDMI, "FA526", - generic_steppings }, - { CPU_ID_FA626TE, CPU_CLASS_ARM9ES, "FA626TE", - generic_steppings }, - - { CPU_ID_TI925T, CPU_CLASS_ARM9TDMI, "TI ARM925T", - generic_steppings }, - - { CPU_ID_ARM1020E, CPU_CLASS_ARM10E, "ARM1020E", - generic_steppings }, - { CPU_ID_ARM1022ES, CPU_CLASS_ARM10E, "ARM1022E-S", - generic_steppings }, - { CPU_ID_ARM1026EJS, CPU_CLASS_ARM10EJ, "ARM1026EJ-S", - generic_steppings }, - - { CPU_ID_80200, CPU_CLASS_XSCALE, "i80200", - xscale_steppings }, - - { CPU_ID_80321_400, CPU_CLASS_XSCALE, "i80321 400MHz", - i80321_steppings }, - { CPU_ID_80321_600, CPU_CLASS_XSCALE, "i80321 600MHz", - i80321_steppings }, - { CPU_ID_80321_400_B0, CPU_CLASS_XSCALE, "i80321 400MHz", - i80321_steppings }, - { CPU_ID_80321_600_B0, CPU_CLASS_XSCALE, "i80321 600MHz", - i80321_steppings }, - - { CPU_ID_81342, CPU_CLASS_XSCALE, "i81342", - i81342_steppings }, - - { CPU_ID_80219_400, CPU_CLASS_XSCALE, "i80219 400MHz", - i80219_steppings }, - { CPU_ID_80219_600, CPU_CLASS_XSCALE, "i80219 600MHz", - i80219_steppings }, - - { CPU_ID_PXA27X, CPU_CLASS_XSCALE, "PXA27x", - pxa27x_steppings }, - { CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250", - pxa2x0_steppings }, - { CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_PXA250B, CPU_CLASS_XSCALE, "PXA250", - pxa2x0_steppings }, - { CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA255", - pxa255_steppings }, - { CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - - { CPU_ID_MV88FR131, CPU_CLASS_MARVELL, "Feroceon 88FR131", - generic_steppings }, - - { CPU_ID_MV88FR571_VD, CPU_CLASS_MARVELL, "Feroceon 88FR571-VD", - generic_steppings }, - { 0, CPU_CLASS_NONE, NULL, NULL } -}; - -struct cpu_classtab { - const char *class_name; - const char *class_option; -}; - -const struct cpu_classtab cpu_classes[] = { - { "unknown", NULL }, /* CPU_CLASS_NONE */ - { "ARM9TDMI", "CPU_ARM9TDMI" }, /* CPU_CLASS_ARM9TDMI */ - { "ARM9E-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9ES */ - { "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */ - { "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */ - { "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */ - { "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */ - { "Marvell", "CPU_MARVELL" }, /* CPU_CLASS_MARVELL */ -}; - -/* - * Report the type of the specified arm processor. This uses the generic and - * arm specific information in the cpu structure to identify the processor. - * The remaining fields in the cpu structure are filled in appropriately. - */ - -static const char * const wtnames[] = { - "write-through", - "write-back", - "write-back", - "**unknown 3**", - "**unknown 4**", - "write-back-locking", /* XXX XScale-specific? */ - "write-back-locking-A", - "write-back-locking-B", - "**unknown 8**", - "**unknown 9**", - "**unknown 10**", - "**unknown 11**", - "**unknown 12**", - "**unknown 13**", - "write-back-locking-C", - "**unknown 15**", -}; - -static void -print_enadis(int enadis, char *s) -{ - - printf(" %s %sabled", s, (enadis == 0) ? "dis" : "en"); -} - -enum cpu_class cpu_class = CPU_CLASS_NONE; - -void -identify_arm_cpu(void) -{ - u_int cpuid, ctrl; - int i; - - ctrl = cp15_sctlr_get(); - cpuid = cp15_midr_get(); - - if (cpuid == 0) { - printf("Processor failed probe - no CPU ID\n"); - return; - } - - for (i = 0; cpuids[i].cpuid != 0; i++) - if (cpuids[i].cpuid == (cpuid & CPU_ID_CPU_MASK)) { - cpu_class = cpuids[i].cpu_class; - printf("CPU: %s %s (%s core)\n", - cpuids[i].cpu_name, - cpuids[i].cpu_steppings[cpuid & - CPU_ID_REVISION_MASK], - cpu_classes[cpu_class].class_name); - break; - } - if (cpuids[i].cpuid == 0) - printf("unknown CPU (ID = 0x%x)\n", cpuid); - - printf(" "); - - if (ctrl & CPU_CONTROL_BEND_ENABLE) - printf(" Big-endian"); - else - printf(" Little-endian"); - - switch (cpu_class) { - case CPU_CLASS_ARM9TDMI: - case CPU_CLASS_ARM9ES: - case CPU_CLASS_ARM9EJS: - case CPU_CLASS_ARM10E: - case CPU_CLASS_ARM10EJ: - case CPU_CLASS_XSCALE: - case CPU_CLASS_MARVELL: - print_enadis(ctrl & CPU_CONTROL_DC_ENABLE, "DC"); - print_enadis(ctrl & CPU_CONTROL_IC_ENABLE, "IC"); -#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) - i = sheeva_control_ext(0, 0); - print_enadis(i & MV_WA_ENABLE, "WA"); - print_enadis(i & MV_DC_STREAM_ENABLE, "DC streaming"); - printf("\n "); - print_enadis((i & MV_BTB_DISABLE) == 0, "BTB"); - print_enadis(i & MV_L2_ENABLE, "L2"); - print_enadis((i & MV_L2_PREFETCH_DISABLE) == 0, - "L2 prefetch"); - printf("\n "); -#endif - break; - default: - break; - } - - print_enadis(ctrl & CPU_CONTROL_WBUF_ENABLE, "WB"); - if (ctrl & CPU_CONTROL_LABT_ENABLE) - printf(" LABT"); - else - printf(" EABT"); - - print_enadis(ctrl & CPU_CONTROL_BPRD_ENABLE, "branch prediction"); - printf("\n"); - - /* Print cache info. */ - if (arm_picache_line_size == 0 && arm_pdcache_line_size == 0) - return; - - if (arm_pcache_unified) { - printf(" %dKB/%dB %d-way %s unified cache\n", - arm_pdcache_size / 1024, - arm_pdcache_line_size, arm_pdcache_ways, - wtnames[arm_pcache_type]); - } else { - printf(" %dKB/%dB %d-way instruction cache\n", - arm_picache_size / 1024, - arm_picache_line_size, arm_picache_ways); - printf(" %dKB/%dB %d-way %s data cache\n", - arm_pdcache_size / 1024, - arm_pdcache_line_size, arm_pdcache_ways, - wtnames[arm_pcache_type]); - } -} diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c deleted file mode 100644 index b4c5b8d0e3ea..000000000000 --- a/sys/arm/arm/intr.c +++ /dev/null @@ -1,204 +0,0 @@ -/* $NetBSD: intr.c,v 1.12 2003/07/15 00:24:41 lukem Exp $ */ - -/*- - * SPDX-License-Identifier: BSD-4-Clause - * - * Copyright (c) 2004 Olivier Houchard. - * Copyright (c) 1994-1998 Mark Brinicombe. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe - * for the NetBSD Project. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * Soft interrupt and other generic interrupt functions. - */ - -#include "opt_platform.h" -#include "opt_hwpmc_hooks.h" - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef FDT -#include -#include -#endif - -#define INTRNAME_LEN (MAXCOMLEN + 1) - -typedef void (*mask_fn)(void *); - -static struct intr_event *intr_events[NIRQ]; - -void intr_irq_handler(struct trapframe *); - -void (*arm_post_filter)(void *) = NULL; -int (*arm_config_irq)(int irq, enum intr_trigger trig, - enum intr_polarity pol) = NULL; - -/* Data for statistics reporting. */ -u_long intrcnt[NIRQ]; -char intrnames[(NIRQ * INTRNAME_LEN) + 1]; -size_t sintrcnt = sizeof(intrcnt); -size_t sintrnames = sizeof(intrnames); - -/* - * Pre-format intrnames into an array of fixed-size strings containing spaces. - * This allows us to avoid the need for an intermediate table of indices into - * the names and counts arrays, while still meeting the requirements and - * assumptions of vmstat(8) and the kdb "show intrcnt" command, the two - * consumers of this data. - */ -static void -intr_init(void *unused) -{ - int i; - - for (i = 0; i < NIRQ; ++i) { - snprintf(&intrnames[i * INTRNAME_LEN], INTRNAME_LEN, "%-*s", - INTRNAME_LEN - 1, ""); - } -} - -SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); - -#ifdef FDT -int -intr_fdt_map_irq(phandle_t iparent, pcell_t *intr, int icells) -{ - fdt_pic_decode_t intr_decode; - phandle_t intr_parent; - int i, rv, interrupt, trig, pol; - - intr_parent = OF_node_from_xref(iparent); - for (i = 0; i < icells; i++) - intr[i] = cpu_to_fdt32(intr[i]); - - for (i = 0; fdt_pic_table[i] != NULL; i++) { - intr_decode = fdt_pic_table[i]; - rv = intr_decode(intr_parent, intr, &interrupt, &trig, &pol); - - if (rv == 0) { - /* This was recognized as our PIC and decoded. */ - interrupt = FDT_MAP_IRQ(intr_parent, interrupt); - return (interrupt); - } - } - - /* Not in table, so guess */ - interrupt = FDT_MAP_IRQ(intr_parent, fdt32_to_cpu(intr[0])); - - return (interrupt); -} -#endif - -void -arm_setup_irqhandler(const char *name, driver_filter_t *filt, - void (*hand)(void*), void *arg, int irq, int flags, void **cookiep) -{ - struct intr_event *event; - int error; - - if (irq < 0 || irq >= NIRQ) - return; - event = intr_events[irq]; - if (event == NULL) { - error = intr_event_create(&event, (void *)irq, 0, irq, - (mask_fn)arm_mask_irq, (mask_fn)arm_unmask_irq, - arm_post_filter, NULL, "intr%d:", irq); - if (error) - return; - intr_events[irq] = event; - snprintf(&intrnames[irq * INTRNAME_LEN], INTRNAME_LEN, - "irq%d: %-*s", irq, INTRNAME_LEN - 1, name); - } - intr_event_add_handler(event, name, filt, hand, arg, - intr_priority(flags), flags, cookiep); -} - -int -arm_remove_irqhandler(int irq, void *cookie) -{ - struct intr_event *event; - int error; - - event = intr_events[irq]; - arm_mask_irq(irq); - - error = intr_event_remove_handler(cookie); - - if (!CK_SLIST_EMPTY(&event->ie_handlers)) - arm_unmask_irq(irq); - return (error); -} - -void dosoftints(void); -void -dosoftints(void) -{ -} - -void -intr_irq_handler(struct trapframe *frame) -{ - struct intr_event *event; - int i; - - VM_CNT_INC(v_intr); - i = -1; - while ((i = arm_get_next_irq(i)) != -1) { - intrcnt[i]++; - event = intr_events[i]; - if (intr_event_handle(event, frame) != 0) { - /* XXX: Log stray IRQs */ - arm_mask_irq(i); - } - } -#ifdef HWPMC_HOOKS - if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) - pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, frame); -#endif -} diff --git a/sys/arm/arm/locore-v4.S b/sys/arm/arm/locore-v4.S deleted file mode 100644 index 8add9aee98e2..000000000000 --- a/sys/arm/arm/locore-v4.S +++ /dev/null @@ -1,494 +0,0 @@ -/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */ - -/*- - * Copyright 2011 Semihalf - * Copyright (C) 1994-1997 Mark Brinicombe - * Copyright (C) 1994 Brini - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of Brini may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include "assym.inc" -#include -#include -#include -#include - -__FBSDID("$FreeBSD$"); - -/* 2K initial stack is plenty, it is only used by initarm() */ -#define INIT_ARM_STACK_SIZE 2048 - -#define CPWAIT_BRANCH \ - sub pc, pc, #4 - -#define CPWAIT(tmp) \ - mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ - mov tmp, tmp /* wait for it to complete */ ;\ - CPWAIT_BRANCH /* branch to next insn */ - -/* - * This is for libkvm, and should be the address of the beginning - * of the kernel text segment (not necessarily the same as kernbase). - * - * These are being phased out. Newer copies of libkvm don't need these - * values as the information is added to the core file by inspecting - * the running kernel. - */ - .text - .align 2 - - .globl kernbase - .set kernbase,KERNVIRTADDR - -#ifdef PHYSADDR -.globl physaddr -.set physaddr,PHYSADDR -#endif - -/* - * On entry for FreeBSD boot ABI: - * r0 - metadata pointer or 0 (boothowto on AT91's boot2) - * r1 - if (r0 == 0) then metadata pointer - * On entry for Linux boot ABI: - * r0 - 0 - * r1 - machine type (passed as arg2 to initarm) - * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) - * - * For both types of boot we gather up the args, put them in a struct arm_boot_params - * structure and pass that to initarm. - */ - .globl btext -btext: -ASENTRY_NP(_start) - STOP_UNWINDING /* Can't unwind into the bootloader! */ - - mov r9, r0 /* 0 or boot mode from boot2 */ - mov r8, r1 /* Save Machine type */ - mov ip, r2 /* Save meta data */ - mov fp, r3 /* Future expansion */ - - /* Make sure interrupts are disabled. */ - mrs r7, cpsr - orr r7, r7, #(PSR_I | PSR_F) - msr cpsr_c, r7 - -#if defined (FLASHADDR) && defined(LOADERRAMADDR) -/* - * Sanity check the configuration. - * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases. - * ARMv4 and ARMv5 make assumptions on where they are loaded. - * TODO: Fix the ARMv4/v5 case. - */ -#ifndef PHYSADDR -#error PHYSADDR must be defined for this configuration -#endif - - /* Check if we're running from flash. */ - ldr r7, =FLASHADDR - /* - * If we're running with MMU disabled, test against the - * physical address instead. - */ - mrc CP15_SCTLR(r2) - ands r2, r2, #CPU_CONTROL_MMU_ENABLE - ldreq r6, =PHYSADDR - ldrne r6, =LOADERRAMADDR - cmp r7, r6 - bls flash_lower - cmp r7, pc - bhi from_ram - b do_copy - -flash_lower: - cmp r6, pc - bls from_ram -do_copy: - ldr r7, =KERNBASE - adr r1, _start - ldr r0, Lreal_start - ldr r2, Lend - sub r2, r2, r0 - sub r0, r0, r7 - add r0, r0, r6 - mov r4, r0 - bl memcpy - ldr r0, Lram_offset - add pc, r4, r0 -Lram_offset: .word from_ram-_C_LABEL(_start) -from_ram: - nop -#endif - -disable_mmu: - /* Disable MMU for a while */ - mrc CP15_SCTLR(r2) - bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ - CPU_CONTROL_WBUF_ENABLE) - bic r2, r2, #(CPU_CONTROL_IC_ENABLE) - bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) - mcr CP15_SCTLR(r2) - - nop - nop - nop - CPWAIT(r0) - -Lunmapped: - /* - * Build page table from scratch. - */ - - /* - * Figure out the physical address we're loaded at by assuming this - * entry point code is in the first L1 section and so if we clear the - * offset bits of the pc that will give us the section-aligned load - * address, which remains in r5 throughout all the following code. - */ - ldr r2, =(L1_S_OFFSET) - bic r5, pc, r2 - - /* Find the delta between VA and PA, result stays in r0 throughout. */ - adr r0, Lpagetable - bl translate_va_to_pa - - /* - * First map the entire 4GB address space as VA=PA. It's mapped as - * normal (cached) memory because it's for things like accessing the - * parameters passed in from the bootloader, which might be at any - * physical address, different for every platform. - */ - mov r1, #0 - mov r2, #0 - mov r3, #4096 - bl build_pagetables - - /* - * Next we do 64MiB starting at the physical load address, mapped to - * the VA the kernel is linked for. - */ - mov r1, r5 - ldr r2, =(KERNVIRTADDR) - mov r3, #64 - bl build_pagetables -#if defined(PHYSADDR) && (KERNVIRTADDR != KERNBASE) -/* - * If the kernel wasn't loaded at the beginning of the ram, map the memory - * before the kernel too, as some ports use that for pagetables, stack, etc... - */ - ldr r1, =PHYSADDR - ldr r2, =KERNBASE - ldr r3, =((KERNVIRTADDR - KERNBASE) / L1_S_SIZE) - bl build_pagetables -#endif - - /* Create a device mapping for early_printf if specified. */ -#if defined(SOCDEV_PA) && defined(SOCDEV_VA) - ldr r1, =SOCDEV_PA - ldr r2, =SOCDEV_VA - mov r3, #1 - bl build_device_pagetables -#endif - - mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ - mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ - - /* Set the Domain Access register. Very important! */ - mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) - mcr p15, 0, r0, c3, c0, 0 - /* - * Enable MMU. - */ - mrc CP15_SCTLR(r0) - orr r0, r0, #(CPU_CONTROL_MMU_ENABLE) - mcr CP15_SCTLR(r0) - nop - nop - nop - CPWAIT(r0) - - /* Transition the PC from physical to virtual addressing. */ - ldr pc,=mmu_done - -mmu_done: - nop - adr r1, .Lstart - ldmia r1, {r1, r2, sp} /* Set initial stack and */ - sub r2, r2, r1 /* get zero init data */ - mov r3, #0 -.L1: - str r3, [r1], #0x0004 /* get zero init data */ - subs r2, r2, #4 - bgt .L1 - -virt_done: - mov r1, #28 /* loader info size is 28 bytes also second arg */ - subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ - mov r0, sp /* loader info pointer is first arg */ - bic sp, sp, #7 /* align stack to 8 bytes */ - str r1, [r0] /* Store length of loader info */ - str r9, [r0, #4] /* Store r0 from boot loader */ - str r8, [r0, #8] /* Store r1 from boot loader */ - str ip, [r0, #12] /* store r2 from boot loader */ - str fp, [r0, #16] /* store r3 from boot loader */ - str r5, [r0, #20] /* store the physical address */ - adr r4, Lpagetable /* load the pagetable address */ - ldr r5, [r4, #4] - str r5, [r0, #24] /* store the pagetable address */ - mov fp, #0 /* trace back starts here */ - bl _C_LABEL(initarm) /* Off we go */ - - /* init arm will return the new stack pointer. */ - mov sp, r0 - - bl _C_LABEL(mi_startup) /* call mi_startup()! */ - - adr r0, .Lmainreturned - b _C_LABEL(panic) - /* NOTREACHED */ -END(_start) - -#define VA_TO_PA_POINTER(name, table) \ -name: ;\ - .word . ;\ - .word table - -/* - * Returns the physical address of a magic va to pa pointer. - * r0 - The pagetable data pointer. This must be built using the - * VA_TO_PA_POINTER macro. - * e.g. - * VA_TO_PA_POINTER(Lpagetable, pagetable) - * ... - * adr r0, Lpagetable - * bl translate_va_to_pa - * r0 will now contain the physical address of pagetable - * r1, r2 - Trashed - */ -translate_va_to_pa: - ldr r1, [r0] - sub r2, r1, r0 - /* At this point: r2 = VA - PA */ - - /* - * Find the physical address of the table. After these two - * instructions: - * r1 = va(pagetable) - * - * r0 = va(pagetable) - (VA - PA) - * = va(pagetable) - VA + PA - * = pa(pagetable) - */ - ldr r1, [r0, #4] - sub r0, r1, r2 - RET - -/* - * Builds the page table - * r0 - The table base address - * r1 - The physical address (trashed) - * r2 - The virtual address (trashed) - * r3 - The number of 1MiB sections - * r4 - Trashed - * - * Addresses must be 1MiB aligned - */ -build_device_pagetables: - ldr r4, =(L1_TYPE_S|L1_S_AP(AP_KRW)) - b 1f -build_pagetables: - /* Set the required page attributed */ - ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) -1: - orr r1, r4 - - /* Move the virtual address to the correct bit location */ - lsr r2, #(L1_S_SHIFT - 2) - - mov r4, r3 -2: - str r1, [r0, r2] - add r2, r2, #4 - add r1, r1, #(L1_S_SIZE) - adds r4, r4, #-1 - bhi 2b - - RET - -VA_TO_PA_POINTER(Lpagetable, pagetable) - -Lreal_start: - .word _start -Lend: - .word _edata - -.Lstart: - .word _edata - .word _ebss - .word svcstk + INIT_ARM_STACK_SIZE - -.Lvirt_done: - .word virt_done - -.Lmainreturned: - .asciz "main() returned" - .align 2 - - .bss -svcstk: - .space INIT_ARM_STACK_SIZE - -/* - * Memory for the initial pagetable. We are unable to place this in - * the bss as this will be cleared after the table is loaded. - */ - .section ".init_pagetable", "aw", %nobits - .align 14 /* 16KiB aligned */ -pagetable: - .space L1_TABLE_SIZE - - .text - .align 2 - -.Lcpufuncs: - .word _C_LABEL(cpufuncs) - -ENTRY_NP(cpu_halt) - mrs r2, cpsr - bic r2, r2, #(PSR_MODE) - orr r2, r2, #(PSR_SVC32_MODE) - orr r2, r2, #(PSR_I | PSR_F) - msr cpsr_fsxc, r2 - - ldr r4, .Lcpu_reset_address - ldr r4, [r4] - - ldr r0, .Lcpufuncs - mov lr, pc - ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] - mov lr, pc - ldr pc, [r0, #CF_L2CACHE_WBINV_ALL] - - /* - * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's - * necessary. - */ - - ldr r1, .Lcpu_reset_needs_v4_MMU_disable - ldr r1, [r1] - cmp r1, #0 - mov r2, #0 - - /* - * MMU & IDC off, 32 bit program & data space - * Hurl ourselves into the ROM - */ - mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE) - mcr CP15_SCTLR(r0) - mcrne p15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */ - mov pc, r4 - - /* - * _cpu_reset_address contains the address to branch to, to complete - * the cpu reset after turning the MMU off - * This variable is provided by the hardware specific code - */ -.Lcpu_reset_address: - .word _C_LABEL(cpu_reset_address) - - /* - * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the - * v4 MMU disable instruction needs executing... it is an illegal instruction - * on f.e. ARM6/7 that locks up the computer in an endless illegal - * instruction / data-abort / reset loop. - */ -.Lcpu_reset_needs_v4_MMU_disable: - .word _C_LABEL(cpu_reset_needs_v4_MMU_disable) -END(cpu_halt) - - -/* - * setjump + longjmp - */ -ENTRY(setjmp) - stmia r0, {r4-r14} - mov r0, #0x00000000 - RET -END(setjmp) - -ENTRY(longjmp) - ldmia r0, {r4-r14} - mov r0, #0x00000001 - RET -END(longjmp) - - .data - .global _C_LABEL(esym) -_C_LABEL(esym): .word _C_LABEL(end) - -ENTRY_NP(abort) - b _C_LABEL(abort) -END(abort) - -ENTRY_NP(sigcode) - mov r0, sp - add r0, r0, #SIGF_UC - - /* - * Call the sigreturn system call. - * - * We have to load r7 manually rather than using - * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is - * correct. Using the alternative places esigcode at the address - * of the data rather than the address one past the data. - */ - - ldr r7, [pc, #12] /* Load SYS_sigreturn */ - swi SYS_sigreturn - - /* Well if that failed we better exit quick ! */ - - ldr r7, [pc, #8] /* Load SYS_exit */ - swi SYS_exit - - /* Branch back to retry SYS_sigreturn */ - b . - 16 -END(sigcode) - .word SYS_sigreturn - .word SYS_exit - - .align 2 - .global _C_LABEL(esigcode) - _C_LABEL(esigcode): - - .data - .global szsigcode -szsigcode: - .long esigcode-sigcode - -/* End of locore.S */ diff --git a/sys/arm/arm/locore.S b/sys/arm/arm/locore.S index 0a6a2d305501..eba95f62e310 100644 --- a/sys/arm/arm/locore.S +++ b/sys/arm/arm/locore.S @@ -1,41 +1,37 @@ /*- * Copyright (c) 2014 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * The kernel build machinery wants the file containing the entry point to be * named locore.S, but we want separate files for v4 and v6 builds, so just * include the arch-appropriate file from this properly-named file. */ #include -#if __ARM_ARCH >= 6 #include "locore-v6.S" -#else -#include "locore-v4.S" -#endif diff --git a/sys/arm/arm/pmap-v4.c b/sys/arm/arm/pmap-v4.c deleted file mode 100644 index 5ef8a8e99dd0..000000000000 --- a/sys/arm/arm/pmap-v4.c +++ /dev/null @@ -1,4420 +0,0 @@ -/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ -/*- - * Copyright 2004 Olivier Houchard. - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/*- - * Copyright (c) 2002-2003 Wasabi Systems, Inc. - * Copyright (c) 2001 Richard Earnshaw - * Copyright (c) 2001-2002 Christopher Gilbert - * All rights reserved. - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/*- - * Copyright (c) 1999 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Charles M. Hannum. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/*- - * Copyright (c) 1994-1998 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * - * RiscBSD kernel project - * - * pmap.c - * - * Machine dependent vm stuff - * - * Created : 20/09/94 - */ - -/* - * Special compilation symbols - * PMAP_DEBUG - Build in pmap_debug_level code - * - * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c - */ -/* Include header files */ - -#include "opt_vm.h" - -#include -__FBSDID("$FreeBSD$"); -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef PMAP_DEBUG -#define PDEBUG(_lev_,_stat_) \ - if (pmap_debug_level >= (_lev_)) \ - ((_stat_)) -#define dprintf printf - -int pmap_debug_level = 0; -#define PMAP_INLINE -#else /* PMAP_DEBUG */ -#define PDEBUG(_lev_,_stat_) /* Nothing */ -#define dprintf(x, arg...) -#define PMAP_INLINE __inline -#endif /* PMAP_DEBUG */ - -extern struct pv_addr systempage; - -extern int last_fault_code; - -#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) -#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) -#define l2pte_valid(pte) ((pte) != 0) -#define l2pte_pa(pte) ((pte) & L2_S_FRAME) - -/* - * Internal function prototypes - */ -static void pmap_free_pv_entry (pv_entry_t); -static pv_entry_t pmap_get_pv_entry(void); - -static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, u_int); -static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); -static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); -static void pmap_alloc_l1(pmap_t); -static void pmap_free_l1(pmap_t); - -static int pmap_clearbit(struct vm_page *, u_int); - -static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); -static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); -static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); -static vm_offset_t kernel_pt_lookup(vm_paddr_t); - -static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); - -vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ -vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ -vm_offset_t pmap_curmaxkvaddr; -vm_paddr_t kernel_l1pa; - -vm_offset_t kernel_vm_end = 0; - -vm_offset_t vm_max_kernel_address; - -struct pmap kernel_pmap_store; - -static pt_entry_t *csrc_pte, *cdst_pte; -static vm_offset_t csrcp, cdstp, qmap_addr; -static struct mtx cmtx, qmap_mtx; - -static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); -/* - * These routines are called when the CPU type is identified to set up - * the PTE prototypes, cache modes, etc. - * - * The variables are always here, just in case LKMs need to reference - * them (though, they shouldn't). - */ - -static pt_entry_t pte_l1_s_cache_mode; -static pt_entry_t pte_l1_s_cache_mode_pt; -static pt_entry_t pte_l1_s_cache_mask; - -static pt_entry_t pte_l2_l_cache_mode; -static pt_entry_t pte_l2_l_cache_mode_pt; -static pt_entry_t pte_l2_l_cache_mask; - -static pt_entry_t pte_l2_s_cache_mode; -static pt_entry_t pte_l2_s_cache_mode_pt; -static pt_entry_t pte_l2_s_cache_mask; - -/* - * Crashdump maps. - */ -static caddr_t crashdumpmap; - -extern void bcopy_page(vm_offset_t, vm_offset_t); -extern void bzero_page(vm_offset_t); - -extern vm_offset_t alloc_firstaddr; - -char *_tmppt; - -/* - * Metadata for L1 translation tables. - */ -struct l1_ttable { - /* Entry on the L1 Table list */ - SLIST_ENTRY(l1_ttable) l1_link; - - /* Entry on the L1 Least Recently Used list */ - TAILQ_ENTRY(l1_ttable) l1_lru; - - /* Track how many domains are allocated from this L1 */ - volatile u_int l1_domain_use_count; - - /* - * A free-list of domain numbers for this L1. - * We avoid using ffs() and a bitmap to track domains since ffs() - * is slow on ARM. - */ - u_int8_t l1_domain_first; - u_int8_t l1_domain_free[PMAP_DOMAINS]; - - /* Physical address of this L1 page table */ - vm_paddr_t l1_physaddr; - - /* KVA of this L1 page table */ - pd_entry_t *l1_kva; -}; - -/* - * Convert a virtual address into its L1 table index. That is, the - * index used to locate the L2 descriptor table pointer in an L1 table. - * This is basically used to index l1->l1_kva[]. - * - * Each L2 descriptor table represents 1MB of VA space. - */ -#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) - -/* - * L1 Page Tables are tracked using a Least Recently Used list. - * - New L1s are allocated from the HEAD. - * - Freed L1s are added to the TAIl. - * - Recently accessed L1s (where an 'access' is some change to one of - * the userland pmaps which owns this L1) are moved to the TAIL. - */ -static TAILQ_HEAD(, l1_ttable) l1_lru_list; -/* - * A list of all L1 tables - */ -static SLIST_HEAD(, l1_ttable) l1_list; -static struct mtx l1_lru_lock; - -/* - * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. - * - * This is normally 16MB worth L2 page descriptors for any given pmap. - * Reference counts are maintained for L2 descriptors so they can be - * freed when empty. - */ -struct l2_dtable { - /* The number of L2 page descriptors allocated to this l2_dtable */ - u_int l2_occupancy; - - /* List of L2 page descriptors */ - struct l2_bucket { - pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ - vm_paddr_t l2b_phys; /* Physical address of same */ - u_short l2b_l1idx; /* This L2 table's L1 index */ - u_short l2b_occupancy; /* How many active descriptors */ - } l2_bucket[L2_BUCKET_SIZE]; -}; - -/* pmap_kenter_internal flags */ -#define KENTER_CACHE 0x1 -#define KENTER_USER 0x2 - -/* - * Given an L1 table index, calculate the corresponding l2_dtable index - * and bucket index within the l2_dtable. - */ -#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ - (L2_SIZE - 1)) -#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) - -/* - * Given a virtual address, this macro returns the - * virtual address required to drop into the next L2 bucket. - */ -#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) - -/* - * We try to map the page tables write-through, if possible. However, not - * all CPUs have a write-through cache mode, so on those we have to sync - * the cache when we frob page tables. - * - * We try to evaluate this at compile time, if possible. However, it's - * not always possible to do that, hence this run-time var. - */ -int pmap_needs_pte_sync; - -/* - * Macro to determine if a mapping might be resident in the - * instruction cache and/or TLB - */ -#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) - -/* - * Macro to determine if a mapping might be resident in the - * data cache and/or TLB - */ -#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) - -#ifndef PMAP_SHPGPERPROC -#define PMAP_SHPGPERPROC 200 -#endif - -#define pmap_is_current(pm) ((pm) == kernel_pmap || \ - curproc->p_vmspace->vm_map.pmap == (pm)) -static uma_zone_t pvzone = NULL; -uma_zone_t l2zone; -static uma_zone_t l2table_zone; -static vm_offset_t pmap_kernel_l2dtable_kva; -static vm_offset_t pmap_kernel_l2ptp_kva; -static vm_paddr_t pmap_kernel_l2ptp_phys; -static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; -static struct rwlock pvh_global_lock; - -void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); - -/* - * This list exists for the benefit of pmap_map_chunk(). It keeps track - * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can - * find them as necessary. - * - * Note that the data on this list MUST remain valid after initarm() returns, - * as pmap_bootstrap() uses it to contruct L2 table metadata. - */ -SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); - -static void -pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) -{ - int i; - - l1->l1_kva = l1pt; - l1->l1_domain_use_count = 0; - l1->l1_domain_first = 0; - - for (i = 0; i < PMAP_DOMAINS; i++) - l1->l1_domain_free[i] = i + 1; - - /* - * Copy the kernel's L1 entries to each new L1. - */ - if (l1pt != kernel_pmap->pm_l1->l1_kva) - memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); - - if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) - panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); - SLIST_INSERT_HEAD(&l1_list, l1, l1_link); - TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); -} - -static vm_offset_t -kernel_pt_lookup(vm_paddr_t pa) -{ - struct pv_addr *pv; - - SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { - if (pv->pv_pa == pa) - return (pv->pv_va); - } - return (0); -} - -void -pmap_pte_init_generic(void) -{ - - pte_l1_s_cache_mode = L1_S_B|L1_S_C; - pte_l1_s_cache_mask = L1_S_CACHE_MASK; - - pte_l2_l_cache_mode = L2_B|L2_C; - pte_l2_l_cache_mask = L2_L_CACHE_MASK; - - pte_l2_s_cache_mode = L2_B|L2_C; - pte_l2_s_cache_mask = L2_S_CACHE_MASK; - - /* - * If we have a write-through cache, set B and C. If - * we have a write-back cache, then we assume setting - * only C will make those pages write-through. - */ - if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { - pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; - pte_l2_l_cache_mode_pt = L2_B|L2_C; - pte_l2_s_cache_mode_pt = L2_B|L2_C; - } else { - pte_l1_s_cache_mode_pt = L1_S_C; - pte_l2_l_cache_mode_pt = L2_C; - pte_l2_s_cache_mode_pt = L2_C; - } -} - -/* - * Allocate an L1 translation table for the specified pmap. - * This is called at pmap creation time. - */ -static void -pmap_alloc_l1(pmap_t pm) -{ - struct l1_ttable *l1; - u_int8_t domain; - - /* - * Remove the L1 at the head of the LRU list - */ - mtx_lock(&l1_lru_lock); - l1 = TAILQ_FIRST(&l1_lru_list); - TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); - - /* - * Pick the first available domain number, and update - * the link to the next number. - */ - domain = l1->l1_domain_first; - l1->l1_domain_first = l1->l1_domain_free[domain]; - - /* - * If there are still free domain numbers in this L1, - * put it back on the TAIL of the LRU list. - */ - if (++l1->l1_domain_use_count < PMAP_DOMAINS) - TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); - - mtx_unlock(&l1_lru_lock); - - /* - * Fix up the relevant bits in the pmap structure - */ - pm->pm_l1 = l1; - pm->pm_domain = domain + 1; -} - -/* - * Free an L1 translation table. - * This is called at pmap destruction time. - */ -static void -pmap_free_l1(pmap_t pm) -{ - struct l1_ttable *l1 = pm->pm_l1; - - mtx_lock(&l1_lru_lock); - - /* - * If this L1 is currently on the LRU list, remove it. - */ - if (l1->l1_domain_use_count < PMAP_DOMAINS) - TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); - - /* - * Free up the domain number which was allocated to the pmap - */ - l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; - l1->l1_domain_first = pm->pm_domain - 1; - l1->l1_domain_use_count--; - - /* - * The L1 now must have at least 1 free domain, so add - * it back to the LRU list. If the use count is zero, - * put it at the head of the list, otherwise it goes - * to the tail. - */ - if (l1->l1_domain_use_count == 0) { - TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); - } else - TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); - - mtx_unlock(&l1_lru_lock); -} - -/* - * Returns a pointer to the L2 bucket associated with the specified pmap - * and VA, or NULL if no L2 bucket exists for the address. - */ -static PMAP_INLINE struct l2_bucket * -pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - u_short l1idx; - - l1idx = L1_IDX(va); - - if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || - (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) - return (NULL); - - return (l2b); -} - -/* - * Returns a pointer to the L2 bucket associated with the specified pmap - * and VA. - * - * If no L2 bucket exists, perform the necessary allocations to put an L2 - * bucket/page table in place. - * - * Note that if a new L2 bucket/page was allocated, the caller *must* - * increment the bucket occupancy counter appropriately *before* - * releasing the pmap's lock to ensure no other thread or cpu deallocates - * the bucket/page in the meantime. - */ -static struct l2_bucket * -pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - u_short l1idx; - - l1idx = L1_IDX(va); - - PMAP_ASSERT_LOCKED(pm); - rw_assert(&pvh_global_lock, RA_WLOCKED); - if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { - /* - * No mapping at this address, as there is - * no entry in the L1 table. - * Need to allocate a new l2_dtable. - */ - PMAP_UNLOCK(pm); - rw_wunlock(&pvh_global_lock); - if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) { - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - return (NULL); - } - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { - /* - * Someone already allocated the l2_dtable while - * we were doing the same. - */ - uma_zfree(l2table_zone, l2); - l2 = pm->pm_l2[L2_IDX(l1idx)]; - } else { - bzero(l2, sizeof(*l2)); - /* - * Link it into the parent pmap - */ - pm->pm_l2[L2_IDX(l1idx)] = l2; - } - } - - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - - /* - * Fetch pointer to the L2 page table associated with the address. - */ - if (l2b->l2b_kva == NULL) { - pt_entry_t *ptep; - - /* - * No L2 page table has been allocated. Chances are, this - * is because we just allocated the l2_dtable, above. - */ - l2->l2_occupancy++; - PMAP_UNLOCK(pm); - rw_wunlock(&pvh_global_lock); - ptep = uma_zalloc(l2zone, M_NOWAIT); - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - if (l2b->l2b_kva != NULL) { - /* We lost the race. */ - l2->l2_occupancy--; - uma_zfree(l2zone, ptep); - return (l2b); - } - l2b->l2b_phys = vtophys(ptep); - if (ptep == NULL) { - /* - * Oops, no more L2 page tables available at this - * time. We may need to deallocate the l2_dtable - * if we allocated a new one above. - */ - l2->l2_occupancy--; - if (l2->l2_occupancy == 0) { - pm->pm_l2[L2_IDX(l1idx)] = NULL; - uma_zfree(l2table_zone, l2); - } - return (NULL); - } - - l2b->l2b_kva = ptep; - l2b->l2b_l1idx = l1idx; - } - - return (l2b); -} - -static PMAP_INLINE void -#ifndef PMAP_INCLUDE_PTE_SYNC -pmap_free_l2_ptp(pt_entry_t *l2) -#else -pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) -#endif -{ -#ifdef PMAP_INCLUDE_PTE_SYNC - /* - * Note: With a write-back cache, we may need to sync this - * L2 table before re-using it. - * This is because it may have belonged to a non-current - * pmap, in which case the cache syncs would have been - * skipped when the pages were being unmapped. If the - * L2 table were then to be immediately re-allocated to - * the *current* pmap, it may well contain stale mappings - * which have not yet been cleared by a cache write-back - * and so would still be visible to the mmu. - */ - if (need_sync) - PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); -#endif - uma_zfree(l2zone, l2); -} -/* - * One or more mappings in the specified L2 descriptor table have just been - * invalidated. - * - * Garbage collect the metadata and descriptor table itself if necessary. - * - * The pmap lock must be acquired when this is called (not necessary - * for the kernel pmap). - */ -static void -pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) -{ - struct l2_dtable *l2; - pd_entry_t *pl1pd, l1pd; - pt_entry_t *ptep; - u_short l1idx; - - /* - * Update the bucket's reference count according to how many - * PTEs the caller has just invalidated. - */ - l2b->l2b_occupancy -= count; - - /* - * Note: - * - * Level 2 page tables allocated to the kernel pmap are never freed - * as that would require checking all Level 1 page tables and - * removing any references to the Level 2 page table. See also the - * comment elsewhere about never freeing bootstrap L2 descriptors. - * - * We make do with just invalidating the mapping in the L2 table. - * - * This isn't really a big deal in practice and, in fact, leads - * to a performance win over time as we don't need to continually - * alloc/free. - */ - if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) - return; - - /* - * There are no more valid mappings in this level 2 page table. - * Go ahead and NULL-out the pointer in the bucket, then - * free the page table. - */ - l1idx = l2b->l2b_l1idx; - ptep = l2b->l2b_kva; - l2b->l2b_kva = NULL; - - pl1pd = &pm->pm_l1->l1_kva[l1idx]; - - /* - * If the L1 slot matches the pmap's domain - * number, then invalidate it. - */ - l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); - if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { - *pl1pd = 0; - PTE_SYNC(pl1pd); - } - - /* - * Release the L2 descriptor table back to the pool cache. - */ -#ifndef PMAP_INCLUDE_PTE_SYNC - pmap_free_l2_ptp(ptep); -#else - pmap_free_l2_ptp(!pmap_is_current(pm), ptep); -#endif - - /* - * Update the reference count in the associated l2_dtable - */ - l2 = pm->pm_l2[L2_IDX(l1idx)]; - if (--l2->l2_occupancy > 0) - return; - - /* - * There are no more valid mappings in any of the Level 1 - * slots managed by this l2_dtable. Go ahead and NULL-out - * the pointer in the parent pmap and free the l2_dtable. - */ - pm->pm_l2[L2_IDX(l1idx)] = NULL; - uma_zfree(l2table_zone, l2); -} - -/* - * Pool cache constructors for L2 descriptor tables, metadata and pmap - * structures. - */ -static int -pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) -{ -#ifndef PMAP_INCLUDE_PTE_SYNC - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - - vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; - - /* - * The mappings for these page tables were initially made using - * pmap_kenter() by the pool subsystem. Therefore, the cache- - * mode will not be right for page table mappings. To avoid - * polluting the pmap_kenter() code with a special case for - * page tables, we simply fix up the cache-mode here if it's not - * correct. - */ - l2b = pmap_get_l2_bucket(kernel_pmap, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - pte = *ptep; - - if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { - /* - * Page tables must have the cache-mode set to - * Write-Thru. - */ - *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; - PTE_SYNC(ptep); - cpu_tlb_flushD_SE(va); - cpu_cpwait(); - } -#endif - memset(mem, 0, L2_TABLE_SIZE_REAL); - PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); - return (0); -} - -/* - * A bunch of routines to conditionally flush the caches/TLB depending - * on whether the specified pmap actually needs to be flushed at any - * given time. - */ -static PMAP_INLINE void -pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushID_SE(va); -} - -static PMAP_INLINE void -pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushD_SE(va); -} - -static PMAP_INLINE void -pmap_tlb_flushID(pmap_t pm) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushID(); -} -static PMAP_INLINE void -pmap_tlb_flushD(pmap_t pm) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushD(); -} - -static int -pmap_has_valid_mapping(pmap_t pm, vm_offset_t va) -{ - pd_entry_t *pde; - pt_entry_t *ptep; - - if (pmap_get_pde_pte(pm, va, &pde, &ptep) && - ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) - return (1); - - return (0); -} - -static PMAP_INLINE void -pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) -{ - vm_size_t rest; - - CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" - " len 0x%x ", pm, pm == kernel_pmap, va, len); - - if (pmap_is_current(pm) || pm == kernel_pmap) { - rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); - while (len > 0) { - if (pmap_has_valid_mapping(pm, va)) { - cpu_idcache_wbinv_range(va, rest); - cpu_l2cache_wbinv_range(va, rest); - } - len -= rest; - va += rest; - rest = MIN(PAGE_SIZE, len); - } - } -} - -static PMAP_INLINE void -pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, - boolean_t rd_only) -{ - vm_size_t rest; - - CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " - "len 0x%x ", pm, pm == kernel_pmap, va, len); - CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); - - if (pmap_is_current(pm)) { - rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); - while (len > 0) { - if (pmap_has_valid_mapping(pm, va)) { - if (do_inv && rd_only) { - cpu_dcache_inv_range(va, rest); - cpu_l2cache_inv_range(va, rest); - } else if (do_inv) { - cpu_dcache_wbinv_range(va, rest); - cpu_l2cache_wbinv_range(va, rest); - } else if (!rd_only) { - cpu_dcache_wb_range(va, rest); - cpu_l2cache_wb_range(va, rest); - } - } - len -= rest; - va += rest; - - rest = MIN(PAGE_SIZE, len); - } - } -} - -static PMAP_INLINE void -pmap_idcache_wbinv_all(pmap_t pm) -{ - - if (pmap_is_current(pm)) { - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - } -} - -#ifdef notyet -static PMAP_INLINE void -pmap_dcache_wbinv_all(pmap_t pm) -{ - - if (pmap_is_current(pm)) { - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - } -} -#endif - -/* - * PTE_SYNC_CURRENT: - * - * Make sure the pte is written out to RAM. - * We need to do this for one of two cases: - * - We're dealing with the kernel pmap - * - There is no pmap active in the cache/tlb. - * - The specified pmap is 'active' in the cache/tlb. - */ -#ifdef PMAP_INCLUDE_PTE_SYNC -#define PTE_SYNC_CURRENT(pm, ptep) \ -do { \ - if (PMAP_NEEDS_PTE_SYNC && \ - pmap_is_current(pm)) \ - PTE_SYNC(ptep); \ -} while (/*CONSTCOND*/0) -#else -#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ -#endif - -/* - * cacheable == -1 means we must make the entry uncacheable, 1 means - * cacheable; - */ -static __inline void -pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - - l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); - ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - - if (cacheable == 1) { - pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; - if (l2pte_valid(pte)) { - if (PV_BEEN_EXECD(pv->pv_flags)) { - pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); - } else if (PV_BEEN_REFD(pv->pv_flags)) { - pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); - } - } - } else { - pte = *ptep &~ L2_S_CACHE_MASK; - if ((va != pv->pv_va || pm != pv->pv_pmap) && - l2pte_valid(pte)) { - if (PV_BEEN_EXECD(pv->pv_flags)) { - pmap_idcache_wbinv_range(pv->pv_pmap, - pv->pv_va, PAGE_SIZE); - pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); - } else if (PV_BEEN_REFD(pv->pv_flags)) { - pmap_dcache_wb_range(pv->pv_pmap, - pv->pv_va, PAGE_SIZE, TRUE, - (pv->pv_flags & PVF_WRITE) == 0); - pmap_tlb_flushD_SE(pv->pv_pmap, - pv->pv_va); - } - } - } - *ptep = pte; - PTE_SYNC_CURRENT(pv->pv_pmap, ptep); -} - -static void -pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) -{ - int pmwc = 0; - int writable = 0, kwritable = 0, uwritable = 0; - int entries = 0, kentries = 0, uentries = 0; - struct pv_entry *pv; - - rw_assert(&pvh_global_lock, RA_WLOCKED); - - /* the cache gets written back/invalidated on context switch. - * therefore, if a user page shares an entry in the same page or - * with the kernel map and at least one is writable, then the - * cache entry must be set write-through. - */ - - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { - /* generate a count of the pv_entry uses */ - if (pv->pv_flags & PVF_WRITE) { - if (pv->pv_pmap == kernel_pmap) - kwritable++; - else if (pv->pv_pmap == pm) - uwritable++; - writable++; - } - if (pv->pv_pmap == kernel_pmap) - kentries++; - else { - if (pv->pv_pmap == pm) - uentries++; - entries++; - } - } - /* - * check if the user duplicate mapping has - * been removed. - */ - if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || - (uwritable > 1))) - pmwc = 1; - - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { - /* check for user uncachable conditions - order is important */ - if (pm != kernel_pmap && - (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { - if ((uentries > 1 && uwritable) || uwritable > 1) { - /* user duplicate mapping */ - if (pv->pv_pmap != kernel_pmap) - pv->pv_flags |= PVF_MWC; - - if (!(pv->pv_flags & PVF_NC)) { - pv->pv_flags |= PVF_NC; - pmap_set_cache_entry(pv, pm, va, -1); - } - continue; - } else /* no longer a duplicate user */ - pv->pv_flags &= ~PVF_MWC; - } - - /* - * check for kernel uncachable conditions - * kernel writable or kernel readable with writable user entry - */ - if ((kwritable && (entries || kentries > 1)) || - (kwritable > 1) || - ((kwritable != writable) && kentries && - (pv->pv_pmap == kernel_pmap || - (pv->pv_flags & PVF_WRITE) || - (pv->pv_flags & PVF_MWC)))) { - if (!(pv->pv_flags & PVF_NC)) { - pv->pv_flags |= PVF_NC; - pmap_set_cache_entry(pv, pm, va, -1); - } - continue; - } - - /* kernel and user are cachable */ - if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && - (pv->pv_flags & PVF_NC)) { - pv->pv_flags &= ~PVF_NC; - if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) - pmap_set_cache_entry(pv, pm, va, 1); - continue; - } - /* user is no longer sharable and writable */ - if (pm != kernel_pmap && - (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && - !pmwc && (pv->pv_flags & PVF_NC)) { - pv->pv_flags &= ~(PVF_NC | PVF_MWC); - if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) - pmap_set_cache_entry(pv, pm, va, 1); - } - } - - if ((kwritable == 0) && (writable == 0)) { - pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_aflag_clear(pg, PGA_WRITEABLE); - return; - } -} - -/* - * Modify pte bits for all ptes corresponding to the given physical address. - * We use `maskbits' rather than `clearbits' because we're always passing - * constants and the latter would require an extra inversion at run-time. - */ -static int -pmap_clearbit(struct vm_page *pg, u_int maskbits) -{ - struct l2_bucket *l2b; - struct pv_entry *pv; - pt_entry_t *ptep, npte, opte; - pmap_t pm; - vm_offset_t va; - u_int oflags; - int count = 0; - - rw_wlock(&pvh_global_lock); - - if (maskbits & PVF_WRITE) - maskbits |= PVF_MOD; - /* - * Clear saved attributes (modify, reference) - */ - pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); - - if (TAILQ_EMPTY(&pg->md.pv_list)) { - rw_wunlock(&pvh_global_lock); - return (0); - } - - /* - * Loop over all current mappings setting/clearing as appropos - */ - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { - va = pv->pv_va; - pm = pv->pv_pmap; - oflags = pv->pv_flags; - - if (!(oflags & maskbits)) { - if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { - if (pg->md.pv_memattr != - VM_MEMATTR_UNCACHEABLE) { - PMAP_LOCK(pm); - l2b = pmap_get_l2_bucket(pm, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - *ptep |= pte_l2_s_cache_mode; - PTE_SYNC(ptep); - PMAP_UNLOCK(pm); - } - pv->pv_flags &= ~(PVF_NC | PVF_MWC); - } - continue; - } - pv->pv_flags &= ~maskbits; - - PMAP_LOCK(pm); - - l2b = pmap_get_l2_bucket(pm, va); - - ptep = &l2b->l2b_kva[l2pte_index(va)]; - npte = opte = *ptep; - - if (maskbits & (PVF_WRITE|PVF_MOD)) { - if ((pv->pv_flags & PVF_NC)) { - /* - * Entry is not cacheable: - * - * Don't turn caching on again if this is a - * modified emulation. This would be - * inconsistent with the settings created by - * pmap_fix_cache(). Otherwise, it's safe - * to re-enable caching. - * - * There's no need to call pmap_fix_cache() - * here: all pages are losing their write - * permission. - */ - if (maskbits & PVF_WRITE) { - if (pg->md.pv_memattr != - VM_MEMATTR_UNCACHEABLE) - npte |= pte_l2_s_cache_mode; - pv->pv_flags &= ~(PVF_NC | PVF_MWC); - } - } else - if (opte & L2_S_PROT_W) { - vm_page_dirty(pg); - /* - * Entry is writable/cacheable: check if pmap - * is current if it is flush it, otherwise it - * won't be in the cache - */ - if (PV_BEEN_EXECD(oflags)) - pmap_idcache_wbinv_range(pm, pv->pv_va, - PAGE_SIZE); - else - if (PV_BEEN_REFD(oflags)) - pmap_dcache_wb_range(pm, pv->pv_va, - PAGE_SIZE, - (maskbits & PVF_REF) ? TRUE : FALSE, - FALSE); - } - - /* make the pte read only */ - npte &= ~L2_S_PROT_W; - } - - if (maskbits & PVF_REF) { - if ((pv->pv_flags & PVF_NC) == 0 && - (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { - /* - * Check npte here; we may have already - * done the wbinv above, and the validity - * of the PTE is the same for opte and - * npte. - */ - if (npte & L2_S_PROT_W) { - if (PV_BEEN_EXECD(oflags)) - pmap_idcache_wbinv_range(pm, - pv->pv_va, PAGE_SIZE); - else - if (PV_BEEN_REFD(oflags)) - pmap_dcache_wb_range(pm, - pv->pv_va, PAGE_SIZE, - TRUE, FALSE); - } else - if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { - /* XXXJRT need idcache_inv_range */ - if (PV_BEEN_EXECD(oflags)) - pmap_idcache_wbinv_range(pm, - pv->pv_va, PAGE_SIZE); - else - if (PV_BEEN_REFD(oflags)) - pmap_dcache_wb_range(pm, - pv->pv_va, PAGE_SIZE, - TRUE, TRUE); - } - } - - /* - * Make the PTE invalid so that we will take a - * page fault the next time the mapping is - * referenced. - */ - npte &= ~L2_TYPE_MASK; - npte |= L2_TYPE_INV; - } - - if (npte != opte) { - count++; - *ptep = npte; - PTE_SYNC(ptep); - /* Flush the TLB entry if a current pmap. */ - if (PV_BEEN_EXECD(oflags)) - pmap_tlb_flushID_SE(pm, pv->pv_va); - else - if (PV_BEEN_REFD(oflags)) - pmap_tlb_flushD_SE(pm, pv->pv_va); - } - - PMAP_UNLOCK(pm); - } - - if (maskbits & PVF_WRITE) - vm_page_aflag_clear(pg, PGA_WRITEABLE); - rw_wunlock(&pvh_global_lock); - return (count); -} - -/* - * main pv_entry manipulation functions: - * pmap_enter_pv: enter a mapping onto a vm_page list - * pmap_remove_pv: remove a mappiing from a vm_page list - * - * NOTE: pmap_enter_pv expects to lock the pvh itself - * pmap_remove_pv expects the caller to lock the pvh before calling - */ - -/* - * pmap_enter_pv: enter a mapping onto a vm_page's PV list - * - * => caller should hold the proper lock on pvh_global_lock - * => caller should have pmap locked - * => we will (someday) gain the lock on the vm_page's PV list - * => caller should adjust ptp's wire_count before calling - * => caller should not adjust pmap's wire_count - */ -static void -pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, - vm_offset_t va, u_int flags) -{ - - rw_assert(&pvh_global_lock, RA_WLOCKED); - PMAP_ASSERT_LOCKED(pm); - if (pg->md.pv_kva != 0) { - pve->pv_pmap = kernel_pmap; - pve->pv_va = pg->md.pv_kva; - pve->pv_flags = PVF_WRITE | PVF_UNMAN; - if (pm != kernel_pmap) - PMAP_LOCK(kernel_pmap); - TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); - TAILQ_INSERT_HEAD(&kernel_pmap->pm_pvlist, pve, pv_plist); - if (pm != kernel_pmap) - PMAP_UNLOCK(kernel_pmap); - pg->md.pv_kva = 0; - if ((pve = pmap_get_pv_entry()) == NULL) - panic("pmap_kenter_pv: no pv entries"); - } - pve->pv_pmap = pm; - pve->pv_va = va; - pve->pv_flags = flags; - TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); - TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); - pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); - if (pve->pv_flags & PVF_WIRED) - ++pm->pm_stats.wired_count; - vm_page_aflag_set(pg, PGA_REFERENCED); -} - -/* - * - * pmap_find_pv: Find a pv entry - * - * => caller should hold lock on vm_page - */ -static PMAP_INLINE struct pv_entry * -pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) -{ - struct pv_entry *pv; - - rw_assert(&pvh_global_lock, RA_WLOCKED); - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) - if (pm == pv->pv_pmap && va == pv->pv_va) - break; - return (pv); -} - -/* - * vector_page_setprot: - * - * Manipulate the protection of the vector page. - */ -void -vector_page_setprot(int prot) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep; - - l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); - - ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; - - *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); - PTE_SYNC(ptep); - cpu_tlb_flushD_SE(vector_page); - cpu_cpwait(); -} - -/* - * pmap_remove_pv: try to remove a mapping from a pv_list - * - * => caller should hold proper lock on pmap_main_lock - * => pmap should be locked - * => caller should hold lock on vm_page [so that attrs can be adjusted] - * => caller should adjust ptp's wire_count and free PTP if needed - * => caller should NOT adjust pmap's wire_count - * => we return the removed pve - */ - -static void -pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) -{ - - struct pv_entry *pv; - rw_assert(&pvh_global_lock, RA_WLOCKED); - PMAP_ASSERT_LOCKED(pm); - TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); - TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); - if (pve->pv_flags & PVF_WIRED) - --pm->pm_stats.wired_count; - if (pg->md.pvh_attrs & PVF_MOD) - vm_page_dirty(pg); - if (TAILQ_FIRST(&pg->md.pv_list) == NULL) - pg->md.pvh_attrs &= ~PVF_REF; - else - vm_page_aflag_set(pg, PGA_REFERENCED); - if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || - (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) - pmap_fix_cache(pg, pm, 0); - else if (pve->pv_flags & PVF_WRITE) { - TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) - if (pve->pv_flags & PVF_WRITE) - break; - if (!pve) { - pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_aflag_clear(pg, PGA_WRITEABLE); - } - } - pv = TAILQ_FIRST(&pg->md.pv_list); - if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && - TAILQ_NEXT(pv, pv_list) == NULL) { - pm = kernel_pmap; - pg->md.pv_kva = pv->pv_va; - /* a recursive pmap_nuke_pv */ - TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); - TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); - if (pv->pv_flags & PVF_WIRED) - --pm->pm_stats.wired_count; - pg->md.pvh_attrs &= ~PVF_REF; - pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_aflag_clear(pg, PGA_WRITEABLE); - pmap_free_pv_entry(pv); - } -} - -static struct pv_entry * -pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) -{ - struct pv_entry *pve; - - rw_assert(&pvh_global_lock, RA_WLOCKED); - pve = TAILQ_FIRST(&pg->md.pv_list); - - while (pve) { - if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ - pmap_nuke_pv(pg, pm, pve); - break; - } - pve = TAILQ_NEXT(pve, pv_list); - } - - if (pve == NULL && pg->md.pv_kva == va) - pg->md.pv_kva = 0; - - return(pve); /* return removed pve */ -} -/* - * - * pmap_modify_pv: Update pv flags - * - * => caller should hold lock on vm_page [so that attrs can be adjusted] - * => caller should NOT adjust pmap's wire_count - * => we return the old flags - * - * Modify a physical-virtual mapping in the pv table - */ -static u_int -pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, - u_int clr_mask, u_int set_mask) -{ - struct pv_entry *npv; - u_int flags, oflags; - - PMAP_ASSERT_LOCKED(pm); - rw_assert(&pvh_global_lock, RA_WLOCKED); - if ((npv = pmap_find_pv(pg, pm, va)) == NULL) - return (0); - - /* - * There is at least one VA mapping this page. - */ - - if (clr_mask & (PVF_REF | PVF_MOD)) - pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); - - oflags = npv->pv_flags; - npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; - - if ((flags ^ oflags) & PVF_WIRED) { - if (flags & PVF_WIRED) - ++pm->pm_stats.wired_count; - else - --pm->pm_stats.wired_count; - } - - if ((flags ^ oflags) & PVF_WRITE) - pmap_fix_cache(pg, pm, 0); - - return (oflags); -} - -/* Function to set the debug level of the pmap code */ -#ifdef PMAP_DEBUG -void -pmap_debug(int level) -{ - pmap_debug_level = level; - dprintf("pmap_debug: level=%d\n", pmap_debug_level); -} -#endif /* PMAP_DEBUG */ - -void -pmap_pinit0(struct pmap *pmap) -{ - PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); - - bcopy(kernel_pmap, pmap, sizeof(*pmap)); - bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); - PMAP_LOCK_INIT(pmap); -} - -/* - * Initialize a vm_page's machine-dependent fields. - */ -void -pmap_page_init(vm_page_t m) -{ - - TAILQ_INIT(&m->md.pv_list); - m->md.pv_memattr = VM_MEMATTR_DEFAULT; - m->md.pvh_attrs = 0; - m->md.pv_kva = 0; -} - -/* - * Initialize the pmap module. - * Called by vm_init, to initialize any structures that the pmap - * system needs to map virtual memory. - */ -void -pmap_init(void) -{ - int shpgperproc = PMAP_SHPGPERPROC; - - l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, - NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, - NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - - /* - * Initialize the PV entry allocator. - */ - pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, - NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); - pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; - uma_zone_reserve_kva(pvzone, pv_entry_max); - pv_entry_high_water = 9 * (pv_entry_max / 10); - - /* - * Now it is safe to enable pv_table recording. - */ - PDEBUG(1, printf("pmap_init: done!\n")); -} - -int -pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - pd_entry_t *pl1pd, l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - u_int l1idx; - int rv = 0; - - l1idx = L1_IDX(va); - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - - /* - * If there is no l2_dtable for this address, then the process - * has no business accessing it. - * - * Note: This will catch userland processes trying to access - * kernel addresses. - */ - l2 = pm->pm_l2[L2_IDX(l1idx)]; - if (l2 == NULL) - goto out; - - /* - * Likewise if there is no L2 descriptor table - */ - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - if (l2b->l2b_kva == NULL) - goto out; - - /* - * Check the PTE itself. - */ - ptep = &l2b->l2b_kva[l2pte_index(va)]; - pte = *ptep; - if (pte == 0) - goto out; - - /* - * Catch a userland access to the vector page mapped at 0x0 - */ - if (user && (pte & L2_S_PROT_U) == 0) - goto out; - if (va == vector_page) - goto out; - - pa = l2pte_pa(pte); - - if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { - /* - * This looks like a good candidate for "page modified" - * emulation... - */ - struct pv_entry *pv; - struct vm_page *pg; - - /* Extract the physical address of the page */ - if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { - goto out; - } - /* Get the current flags for this page. */ - - pv = pmap_find_pv(pg, pm, va); - if (pv == NULL) { - goto out; - } - - /* - * Do the flags say this page is writable? If not then it - * is a genuine write fault. If yes then the write fault is - * our fault as we did not reflect the write access in the - * PTE. Now we know a write has occurred we can correct this - * and also set the modified bit - */ - if ((pv->pv_flags & PVF_WRITE) == 0) { - goto out; - } - - pg->md.pvh_attrs |= PVF_REF | PVF_MOD; - vm_page_dirty(pg); - pv->pv_flags |= PVF_REF | PVF_MOD; - - /* - * Re-enable write permissions for the page. No need to call - * pmap_fix_cache(), since this is just a - * modified-emulation fault, and the PVF_WRITE bit isn't - * changing. We've already set the cacheable bits based on - * the assumption that we can write to this page. - */ - *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; - PTE_SYNC(ptep); - rv = 1; - } else - if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { - /* - * This looks like a good candidate for "page referenced" - * emulation. - */ - struct pv_entry *pv; - struct vm_page *pg; - - /* Extract the physical address of the page */ - if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) - goto out; - /* Get the current flags for this page. */ - - pv = pmap_find_pv(pg, pm, va); - if (pv == NULL) - goto out; - - pg->md.pvh_attrs |= PVF_REF; - pv->pv_flags |= PVF_REF; - - *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; - PTE_SYNC(ptep); - rv = 1; - } - - /* - * We know there is a valid mapping here, so simply - * fix up the L1 if necessary. - */ - pl1pd = &pm->pm_l1->l1_kva[l1idx]; - l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; - if (*pl1pd != l1pd) { - *pl1pd = l1pd; - PTE_SYNC(pl1pd); - rv = 1; - } - -#ifdef DEBUG - /* - * If 'rv == 0' at this point, it generally indicates that there is a - * stale TLB entry for the faulting address. This happens when two or - * more processes are sharing an L1. Since we don't flush the TLB on - * a context switch between such processes, we can take domain faults - * for mappings which exist at the same VA in both processes. EVEN IF - * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for - * example. - * - * This is extremely likely to happen if pmap_enter() updated the L1 - * entry for a recently entered mapping. In this case, the TLB is - * flushed for the new mapping, but there may still be TLB entries for - * other mappings belonging to other processes in the 1MB range - * covered by the L1 entry. - * - * Since 'rv == 0', we know that the L1 already contains the correct - * value, so the fault must be due to a stale TLB entry. - * - * Since we always need to flush the TLB anyway in the case where we - * fixed up the L1, or frobbed the L2 PTE, we effectively deal with - * stale TLB entries dynamically. - * - * However, the above condition can ONLY happen if the current L1 is - * being shared. If it happens when the L1 is unshared, it indicates - * that other parts of the pmap are not doing their job WRT managing - * the TLB. - */ - if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { - printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", - pm, (u_long)va, ftype); - printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", - l2, l2b, ptep, pl1pd); - printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", - pte, l1pd, last_fault_code); -#ifdef DDB - Debugger(); -#endif - } -#endif - - cpu_tlb_flushID_SE(va); - cpu_cpwait(); - - rv = 1; - -out: - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pm); - return (rv); -} - -void -pmap_postinit(void) -{ - struct l2_bucket *l2b; - struct l1_ttable *l1; - pd_entry_t *pl1pt; - pt_entry_t *ptep, pte; - vm_offset_t va, eva; - u_int loop, needed; - - needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); - needed -= 1; - l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); - - for (loop = 0; loop < needed; loop++, l1++) { - /* Allocate a L1 page table */ - va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, - 0xffffffff, L1_TABLE_SIZE, 0); - - if (va == 0) - panic("Cannot allocate L1 KVM"); - - eva = va + L1_TABLE_SIZE; - pl1pt = (pd_entry_t *)va; - - while (va < eva) { - l2b = pmap_get_l2_bucket(kernel_pmap, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - pte = *ptep; - pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; - *ptep = pte; - PTE_SYNC(ptep); - cpu_tlb_flushD_SE(va); - - va += PAGE_SIZE; - } - pmap_init_l1(l1, pl1pt); - } - -#ifdef DEBUG - printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", - needed); -#endif -} - -/* - * This is used to stuff certain critical values into the PCB where they - * can be accessed quickly from cpu_switch() et al. - */ -void -pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) -{ - struct l2_bucket *l2b; - - pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; - pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | - (DOMAIN_CLIENT << (pm->pm_domain * 2)); - - if (vector_page < KERNBASE) { - pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; - l2b = pmap_get_l2_bucket(pm, vector_page); - pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | - L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); - } else - pcb->pcb_pl1vec = NULL; -} - -void -pmap_activate(struct thread *td) -{ - pmap_t pm; - struct pcb *pcb; - - pm = vmspace_pmap(td->td_proc->p_vmspace); - pcb = td->td_pcb; - - critical_enter(); - pmap_set_pcb_pagedir(pm, pcb); - - if (td == curthread) { - u_int cur_dacr, cur_ttb; - - __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); - __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); - - cur_ttb &= ~(L1_TABLE_SIZE - 1); - - if (cur_ttb == (u_int)pcb->pcb_pagedir && - cur_dacr == pcb->pcb_dacr) { - /* - * No need to switch address spaces. - */ - critical_exit(); - return; - } - - /* - * We MUST, I repeat, MUST fix up the L1 entry corresponding - * to 'vector_page' in the incoming L1 table before switching - * to it otherwise subsequent interrupts/exceptions (including - * domain faults!) will jump into hyperspace. - */ - if (pcb->pcb_pl1vec) { - *pcb->pcb_pl1vec = pcb->pcb_l1vec; - /* - * Don't need to PTE_SYNC() at this point since - * cpu_setttb() is about to flush both the cache - * and the TLB. - */ - } - - cpu_domains(pcb->pcb_dacr); - cpu_setttb(pcb->pcb_pagedir); - } - critical_exit(); -} - -static int -pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) -{ - pd_entry_t *pdep, pde; - pt_entry_t *ptep, pte; - vm_offset_t pa; - int rv = 0; - - /* - * Make sure the descriptor itself has the correct cache mode - */ - pdep = &kl1[L1_IDX(va)]; - pde = *pdep; - - if (l1pte_section_p(pde)) { - if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { - *pdep = (pde & ~L1_S_CACHE_MASK) | - pte_l1_s_cache_mode_pt; - PTE_SYNC(pdep); - cpu_dcache_wbinv_range((vm_offset_t)pdep, - sizeof(*pdep)); - cpu_l2cache_wbinv_range((vm_offset_t)pdep, - sizeof(*pdep)); - rv = 1; - } - } else { - pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); - ptep = (pt_entry_t *)kernel_pt_lookup(pa); - if (ptep == NULL) - panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); - - ptep = &ptep[l2pte_index(va)]; - pte = *ptep; - if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { - *ptep = (pte & ~L2_S_CACHE_MASK) | - pte_l2_s_cache_mode_pt; - PTE_SYNC(ptep); - cpu_dcache_wbinv_range((vm_offset_t)ptep, - sizeof(*ptep)); - cpu_l2cache_wbinv_range((vm_offset_t)ptep, - sizeof(*ptep)); - rv = 1; - } - } - - return (rv); -} - -static void -pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, - pt_entry_t **ptep) -{ - vm_offset_t va = *availp; - struct l2_bucket *l2b; - - if (ptep) { - l2b = pmap_get_l2_bucket(kernel_pmap, va); - if (l2b == NULL) - panic("pmap_alloc_specials: no l2b for 0x%x", va); - - *ptep = &l2b->l2b_kva[l2pte_index(va)]; - } - - *vap = va; - *availp = va + (PAGE_SIZE * pages); -} - -/* - * Bootstrap the system enough to run with virtual memory. - * - * On the arm this is called after mapping has already been enabled - * and just syncs the pmap module with what has already been done. - * [We can't call it easily with mapping off since the kernel is not - * mapped with PA == VA, hence we would have to relocate every address - * from the linked base (virtual) address "KERNBASE" to the actual - * (physical) address starting relative to 0] - */ -#define PMAP_STATIC_L2_SIZE 16 -void -pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt) -{ - static struct l1_ttable static_l1; - static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; - struct l1_ttable *l1 = &static_l1; - struct l2_dtable *l2; - struct l2_bucket *l2b; - pd_entry_t pde; - pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; - pt_entry_t *ptep; - pt_entry_t *qmap_pte; - vm_paddr_t pa; - vm_offset_t va; - vm_size_t size; - int l1idx, l2idx, l2next = 0; - - PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", - firstaddr, vm_max_kernel_address)); - - virtual_avail = firstaddr; - kernel_pmap->pm_l1 = l1; - kernel_l1pa = l1pt->pv_pa; - - /* - * Scan the L1 translation table created by initarm() and create - * the required metadata for all valid mappings found in it. - */ - for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { - pde = kernel_l1pt[l1idx]; - - /* - * We're only interested in Coarse mappings. - * pmap_extract() can deal with section mappings without - * recourse to checking L2 metadata. - */ - if ((pde & L1_TYPE_MASK) != L1_TYPE_C) - continue; - - /* - * Lookup the KVA of this L2 descriptor table - */ - pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); - ptep = (pt_entry_t *)kernel_pt_lookup(pa); - - if (ptep == NULL) { - panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", - (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); - } - - /* - * Fetch the associated L2 metadata structure. - * Allocate a new one if necessary. - */ - if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { - if (l2next == PMAP_STATIC_L2_SIZE) - panic("pmap_bootstrap: out of static L2s"); - kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = - &static_l2[l2next++]; - } - - /* - * One more L1 slot tracked... - */ - l2->l2_occupancy++; - - /* - * Fill in the details of the L2 descriptor in the - * appropriate bucket. - */ - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - l2b->l2b_kva = ptep; - l2b->l2b_phys = pa; - l2b->l2b_l1idx = l1idx; - - /* - * Establish an initial occupancy count for this descriptor - */ - for (l2idx = 0; - l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); - l2idx++) { - if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { - l2b->l2b_occupancy++; - } - } - - /* - * Make sure the descriptor itself has the correct cache mode. - * If not, fix it, but whine about the problem. Port-meisters - * should consider this a clue to fix up their initarm() - * function. :) - */ - if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { - printf("pmap_bootstrap: WARNING! wrong cache mode for " - "L2 pte @ %p\n", ptep); - } - } - - /* - * Ensure the primary (kernel) L1 has the correct cache mode for - * a page table. Bitch if it is not correctly set. - */ - for (va = (vm_offset_t)kernel_l1pt; - va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { - if (pmap_set_pt_cache_mode(kernel_l1pt, va)) - printf("pmap_bootstrap: WARNING! wrong cache mode for " - "primary L1 @ 0x%x\n", va); - } - - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - cpu_tlb_flushID(); - cpu_cpwait(); - - PMAP_LOCK_INIT(kernel_pmap); - CPU_FILL(&kernel_pmap->pm_active); - kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; - TAILQ_INIT(&kernel_pmap->pm_pvlist); - - /* - * Initialize the global pv list lock. - */ - rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); - - /* - * Reserve some special page table entries/VA space for temporary - * mapping of pages. - */ - pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); - pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); - pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); - pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); - pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte); - pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte); - size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) / - L1_S_SIZE; - pmap_alloc_specials(&virtual_avail, - round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, - &pmap_kernel_l2ptp_kva, NULL); - - size = howmany(size, L2_BUCKET_SIZE); - pmap_alloc_specials(&virtual_avail, - round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, - &pmap_kernel_l2dtable_kva, NULL); - - pmap_alloc_specials(&virtual_avail, - 1, (vm_offset_t*)&_tmppt, NULL); - pmap_alloc_specials(&virtual_avail, - MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); - SLIST_INIT(&l1_list); - TAILQ_INIT(&l1_lru_list); - mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); - pmap_init_l1(l1, kernel_l1pt); - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - - virtual_avail = round_page(virtual_avail); - virtual_end = vm_max_kernel_address; - kernel_vm_end = pmap_curmaxkvaddr; - mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); - mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF); - - pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); -} - -/*************************************************** - * Pmap allocation/deallocation routines. - ***************************************************/ - -/* - * Release any resources held by the given physical map. - * Called when a pmap initialized by pmap_pinit is being released. - * Should only be called if the map contains no valid mappings. - */ -void -pmap_release(pmap_t pmap) -{ - struct pcb *pcb; - - pmap_idcache_wbinv_all(pmap); - cpu_l2cache_wbinv_all(); - pmap_tlb_flushID(pmap); - cpu_cpwait(); - if (vector_page < KERNBASE) { - struct pcb *curpcb = PCPU_GET(curpcb); - pcb = thread0.td_pcb; - if (pmap_is_current(pmap)) { - /* - * Frob the L1 entry corresponding to the vector - * page so that it contains the kernel pmap's domain - * number. This will ensure pmap_remove() does not - * pull the current vector page out from under us. - */ - critical_enter(); - *pcb->pcb_pl1vec = pcb->pcb_l1vec; - cpu_domains(pcb->pcb_dacr); - cpu_setttb(pcb->pcb_pagedir); - critical_exit(); - } - pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); - /* - * Make sure cpu_switch(), et al, DTRT. This is safe to do - * since this process has no remaining mappings of its own. - */ - curpcb->pcb_pl1vec = pcb->pcb_pl1vec; - curpcb->pcb_l1vec = pcb->pcb_l1vec; - curpcb->pcb_dacr = pcb->pcb_dacr; - curpcb->pcb_pagedir = pcb->pcb_pagedir; - } - pmap_free_l1(pmap); - - dprintf("pmap_release()\n"); -} - -/* - * Helper function for pmap_grow_l2_bucket() - */ -static __inline int -pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep; - vm_paddr_t pa; - struct vm_page *pg; - - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); - if (pg == NULL) - return (1); - pa = VM_PAGE_TO_PHYS(pg); - - if (pap) - *pap = pa; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - - ptep = &l2b->l2b_kva[l2pte_index(va)]; - *ptep = L2_S_PROTO | pa | cache_mode | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); - PTE_SYNC(ptep); - return (0); -} - -/* - * This is the same as pmap_alloc_l2_bucket(), except that it is only - * used by pmap_growkernel(). - */ -static __inline struct l2_bucket * -pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - struct l1_ttable *l1; - pd_entry_t *pl1pd; - u_short l1idx; - vm_offset_t nva; - - l1idx = L1_IDX(va); - - if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { - /* - * No mapping at this address, as there is - * no entry in the L1 table. - * Need to allocate a new l2_dtable. - */ - nva = pmap_kernel_l2dtable_kva; - if ((nva & PAGE_MASK) == 0) { - /* - * Need to allocate a backing page - */ - if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) - return (NULL); - } - - l2 = (struct l2_dtable *)nva; - nva += sizeof(struct l2_dtable); - - if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & - PAGE_MASK)) { - /* - * The new l2_dtable straddles a page boundary. - * Map in another page to cover it. - */ - if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) - return (NULL); - } - - pmap_kernel_l2dtable_kva = nva; - - /* - * Link it into the parent pmap - */ - pm->pm_l2[L2_IDX(l1idx)] = l2; - memset(l2, 0, sizeof(*l2)); - } - - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - - /* - * Fetch pointer to the L2 page table associated with the address. - */ - if (l2b->l2b_kva == NULL) { - pt_entry_t *ptep; - - /* - * No L2 page table has been allocated. Chances are, this - * is because we just allocated the l2_dtable, above. - */ - nva = pmap_kernel_l2ptp_kva; - ptep = (pt_entry_t *)nva; - if ((nva & PAGE_MASK) == 0) { - /* - * Need to allocate a backing page - */ - if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, - &pmap_kernel_l2ptp_phys)) - return (NULL); - PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); - } - memset(ptep, 0, L2_TABLE_SIZE_REAL); - l2->l2_occupancy++; - l2b->l2b_kva = ptep; - l2b->l2b_l1idx = l1idx; - l2b->l2b_phys = pmap_kernel_l2ptp_phys; - - pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; - pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; - } - - /* Distribute new L1 entry to all other L1s */ - SLIST_FOREACH(l1, &l1_list, l1_link) { - pl1pd = &l1->l1_kva[L1_IDX(va)]; - *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | - L1_C_PROTO; - PTE_SYNC(pl1pd); - } - - return (l2b); -} - -/* - * grow the number of kernel page table entries, if needed - */ -void -pmap_growkernel(vm_offset_t addr) -{ - pmap_t kpm = kernel_pmap; - - if (addr <= pmap_curmaxkvaddr) - return; /* we are OK */ - - /* - * whoops! we need to add kernel PTPs - */ - - /* Map 1MB at a time */ - for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) - pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); - - /* - * flush out the cache, expensive but growkernel will happen so - * rarely - */ - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - cpu_tlb_flushD(); - cpu_cpwait(); - kernel_vm_end = pmap_curmaxkvaddr; -} - -/* - * Remove all pages from specified address space - * this aids process exit speeds. Also, this code - * is special cased for current process only, but - * can have the more generic (and slightly slower) - * mode enabled. This is much faster than pmap_remove - * in the case of running down an entire address space. - */ -void -pmap_remove_pages(pmap_t pmap) -{ - struct pv_entry *pv, *npv; - struct l2_bucket *l2b = NULL; - vm_page_t m; - pt_entry_t *pt; - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { - if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { - /* Cannot remove wired or unmanaged pages now. */ - npv = TAILQ_NEXT(pv, pv_plist); - continue; - } - pmap->pm_stats.resident_count--; - l2b = pmap_get_l2_bucket(pmap, pv->pv_va); - KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); - pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - m = PHYS_TO_VM_PAGE(*pt & L2_S_FRAME); - KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); - *pt = 0; - PTE_SYNC(pt); - npv = TAILQ_NEXT(pv, pv_plist); - pmap_nuke_pv(m, pmap, pv); - if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_aflag_clear(m, PGA_WRITEABLE); - pmap_free_pv_entry(pv); - pmap_free_l2_bucket(pmap, l2b, 1); - } - rw_wunlock(&pvh_global_lock); - cpu_tlb_flushID(); - cpu_cpwait(); - PMAP_UNLOCK(pmap); -} - -/*************************************************** - * Low level mapping routines..... - ***************************************************/ - -/* Map a section into the KVA. */ - -/* - * Make a temporary mapping for a physical address. This is only intended - * to be used for panic dumps. - */ -void * -pmap_kenter_temporary(vm_paddr_t pa, int i) -{ - vm_offset_t va; - - va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); - pmap_kenter(va, pa); - return ((void *)crashdumpmap); -} - -/* - * add a wired page to the kva - * note that in order for the mapping to take effect -- you - * should do a invltlb after doing the pmap_kenter... - */ -static PMAP_INLINE void -pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) -{ - struct l2_bucket *l2b; - pt_entry_t *pte; - pt_entry_t opte; - struct pv_entry *pve; - vm_page_t m; - - PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", - (uint32_t) va, (uint32_t) pa)); - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - if (l2b == NULL) - l2b = pmap_grow_l2_bucket(kernel_pmap, va); - KASSERT(l2b != NULL, ("No L2 Bucket")); - pte = &l2b->l2b_kva[l2pte_index(va)]; - opte = *pte; - PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", - (uint32_t) pte, opte, *pte)); - if (l2pte_valid(opte)) { - pmap_kremove(va); - } else { - if (opte == 0) - l2b->l2b_occupancy++; - } - *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, - VM_PROT_READ | VM_PROT_WRITE); - if (flags & KENTER_CACHE) - *pte |= pte_l2_s_cache_mode; - if (flags & KENTER_USER) - *pte |= L2_S_PROT_U; - PTE_SYNC(pte); - - /* - * A kernel mapping may not be the page's only mapping, so create a PV - * entry to ensure proper caching. - * - * The existence test for the pvzone is used to delay the recording of - * kernel mappings until the VM system is fully initialized. - * - * This expects the physical memory to have a vm_page_array entry. - */ - if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) != NULL) { - rw_wlock(&pvh_global_lock); - if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { - if ((pve = pmap_get_pv_entry()) == NULL) - panic("pmap_kenter_internal: no pv entries"); - PMAP_LOCK(kernel_pmap); - pmap_enter_pv(m, pve, kernel_pmap, va, - PVF_WRITE | PVF_UNMAN); - pmap_fix_cache(m, kernel_pmap, va); - PMAP_UNLOCK(kernel_pmap); - } else { - m->md.pv_kva = va; - } - rw_wunlock(&pvh_global_lock); - } -} - -void -pmap_kenter(vm_offset_t va, vm_paddr_t pa) -{ - pmap_kenter_internal(va, pa, KENTER_CACHE); -} - -void -pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) -{ - - pmap_kenter_internal(va, pa, 0); -} - -void -pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) -{ - vm_offset_t sva; - - KASSERT((size & PAGE_MASK) == 0, - ("%s: device mapping not page-sized", __func__)); - - sva = va; - while (size != 0) { - pmap_kenter_internal(va, pa, 0); - va += PAGE_SIZE; - pa += PAGE_SIZE; - size -= PAGE_SIZE; - } -} - -void -pmap_kremove_device(vm_offset_t va, vm_size_t size) -{ - vm_offset_t sva; - - KASSERT((size & PAGE_MASK) == 0, - ("%s: device mapping not page-sized", __func__)); - - sva = va; - while (size != 0) { - pmap_kremove(va); - va += PAGE_SIZE; - size -= PAGE_SIZE; - } -} - -void -pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) -{ - - pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); - /* - * Call pmap_fault_fixup now, to make sure we'll have no exception - * at the first use of the new address, or bad things will happen, - * as we use one of these addresses in the exception handlers. - */ - pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); -} - -vm_paddr_t -pmap_kextract(vm_offset_t va) -{ - - return (pmap_extract_locked(kernel_pmap, va)); -} - -/* - * remove a page from the kernel pagetables - */ -void -pmap_kremove(vm_offset_t va) -{ - struct l2_bucket *l2b; - pt_entry_t *pte, opte; - struct pv_entry *pve; - vm_page_t m; - vm_offset_t pa; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - if (!l2b) - return; - KASSERT(l2b != NULL, ("No L2 Bucket")); - pte = &l2b->l2b_kva[l2pte_index(va)]; - opte = *pte; - if (l2pte_valid(opte)) { - /* pa = vtophs(va) taken from pmap_extract() */ - if ((opte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); - /* note: should never have to remove an allocation - * before the pvzone is initialized. - */ - rw_wlock(&pvh_global_lock); - PMAP_LOCK(kernel_pmap); - if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && - (pve = pmap_remove_pv(m, kernel_pmap, va))) - pmap_free_pv_entry(pve); - PMAP_UNLOCK(kernel_pmap); - rw_wunlock(&pvh_global_lock); - va = va & ~PAGE_MASK; - cpu_dcache_wbinv_range(va, PAGE_SIZE); - cpu_l2cache_wbinv_range(va, PAGE_SIZE); - cpu_tlb_flushD_SE(va); - cpu_cpwait(); - *pte = 0; - } -} - -/* - * Used to map a range of physical addresses into kernel - * virtual address space. - * - * The value passed in '*virt' is a suggested virtual address for - * the mapping. Architectures which can support a direct-mapped - * physical to virtual region can return the appropriate address - * within that region, leaving '*virt' unchanged. Other - * architectures should map the pages starting at '*virt' and - * update '*virt' with the first usable address after the mapped - * region. - */ -vm_offset_t -pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) -{ - vm_offset_t sva = *virt; - vm_offset_t va = sva; - - PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " - "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, - prot)); - - while (start < end) { - pmap_kenter(va, start); - va += PAGE_SIZE; - start += PAGE_SIZE; - } - *virt = va; - return (sva); -} - -static void -pmap_wb_page(vm_page_t m) -{ - struct pv_entry *pv; - - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, - (pv->pv_flags & PVF_WRITE) == 0); -} - -static void -pmap_inv_page(vm_page_t m) -{ - struct pv_entry *pv; - - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); -} -/* - * Add a list of wired pages to the kva - * this routine is only used for temporary - * kernel mappings that do not need to have - * page modification or references recorded. - * Note that old mappings are simply written - * over. The page *must* be wired. - */ -void -pmap_qenter(vm_offset_t va, vm_page_t *m, int count) -{ - int i; - - for (i = 0; i < count; i++) { - pmap_wb_page(m[i]); - pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), - KENTER_CACHE); - va += PAGE_SIZE; - } -} - -/* - * this routine jerks page mappings from the - * kernel -- it is meant only for temporary mappings. - */ -void -pmap_qremove(vm_offset_t va, int count) -{ - vm_paddr_t pa; - int i; - - for (i = 0; i < count; i++) { - pa = vtophys(va); - if (pa) { - pmap_inv_page(PHYS_TO_VM_PAGE(pa)); - pmap_kremove(va); - } - va += PAGE_SIZE; - } -} - -/* - * pmap_object_init_pt preloads the ptes for a given object - * into the specified pmap. This eliminates the blast of soft - * faults on process startup and immediately after an mmap. - */ -void -pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, - vm_pindex_t pindex, vm_size_t size) -{ - - VM_OBJECT_ASSERT_WLOCKED(object); - KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, - ("pmap_object_init_pt: non-device object")); -} - -/* - * pmap_is_prefaultable: - * - * Return whether or not the specified virtual address is elgible - * for prefault. - */ -boolean_t -pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) -{ - pd_entry_t *pde; - pt_entry_t *pte; - - if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) - return (FALSE); - KASSERT(pte != NULL, ("Valid mapping but no pte ?")); - if (*pte == 0) - return (TRUE); - return (FALSE); -} - -/* - * Fetch pointers to the PDE/PTE for the given pmap/VA pair. - * Returns TRUE if the mapping exists, else FALSE. - * - * NOTE: This function is only used by a couple of arm-specific modules. - * It is not safe to take any pmap locks here, since we could be right - * in the middle of debugging the pmap anyway... - * - * It is possible for this routine to return FALSE even though a valid - * mapping does exist. This is because we don't lock, so the metadata - * state may be inconsistent. - * - * NOTE: We can return a NULL *ptp in the case where the L1 pde is - * a "section" mapping. - */ -boolean_t -pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) -{ - struct l2_dtable *l2; - pd_entry_t *pl1pd, l1pd; - pt_entry_t *ptep; - u_short l1idx; - - if (pm->pm_l1 == NULL) - return (FALSE); - - l1idx = L1_IDX(va); - *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; - l1pd = *pl1pd; - - if (l1pte_section_p(l1pd)) { - *ptp = NULL; - return (TRUE); - } - - if (pm->pm_l2 == NULL) - return (FALSE); - - l2 = pm->pm_l2[L2_IDX(l1idx)]; - - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { - return (FALSE); - } - - *ptp = &ptep[l2pte_index(va)]; - return (TRUE); -} - -/* - * Routine: pmap_remove_all - * Function: - * Removes this physical page from - * all physical maps in which it resides. - * Reflects back modify bits to the pager. - * - * Notes: - * Original versions of this routine were very - * inefficient because they iteratively called - * pmap_remove (slow...) - */ -void -pmap_remove_all(vm_page_t m) -{ - pv_entry_t pv; - pt_entry_t *ptep; - struct l2_bucket *l2b; - boolean_t flush = FALSE; - pmap_t curpm; - int flags = 0; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_remove_all: page %p is not managed", m)); - if (TAILQ_EMPTY(&m->md.pv_list)) - return; - rw_wlock(&pvh_global_lock); - - /* - * XXX This call shouldn't exist. Iterating over the PV list twice, - * once in pmap_clearbit() and again below, is both unnecessary and - * inefficient. The below code should itself write back the cache - * entry before it destroys the mapping. - */ - pmap_clearbit(m, PVF_WRITE); - curpm = vmspace_pmap(curproc->p_vmspace); - while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { - if (flush == FALSE && (pv->pv_pmap == curpm || - pv->pv_pmap == kernel_pmap)) - flush = TRUE; - - PMAP_LOCK(pv->pv_pmap); - /* - * Cached contents were written-back in pmap_clearbit(), - * but we still have to invalidate the cache entry to make - * sure stale data are not retrieved when another page will be - * mapped under this virtual address. - */ - if (pmap_is_current(pv->pv_pmap)) { - cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); - if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) - cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); - } - - if (pv->pv_flags & PVF_UNMAN) { - /* remove the pv entry, but do not remove the mapping - * and remember this is a kernel mapped page - */ - m->md.pv_kva = pv->pv_va; - } else { - /* remove the mapping and pv entry */ - l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); - KASSERT(l2b != NULL, ("No l2 bucket")); - ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - *ptep = 0; - PTE_SYNC_CURRENT(pv->pv_pmap, ptep); - pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); - pv->pv_pmap->pm_stats.resident_count--; - flags |= pv->pv_flags; - } - pmap_nuke_pv(m, pv->pv_pmap, pv); - PMAP_UNLOCK(pv->pv_pmap); - pmap_free_pv_entry(pv); - } - - if (flush) { - if (PV_BEEN_EXECD(flags)) - pmap_tlb_flushID(curpm); - else - pmap_tlb_flushD(curpm); - } - vm_page_aflag_clear(m, PGA_WRITEABLE); - rw_wunlock(&pvh_global_lock); -} - -/* - * Set the physical protection on the - * specified range of this map as requested. - */ -void -pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - vm_offset_t next_bucket; - u_int flags; - int flush; - - CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", - pm, sva, eva, prot); - - if ((prot & VM_PROT_READ) == 0) { - pmap_remove(pm, sva, eva); - return; - } - - if (prot & VM_PROT_WRITE) { - /* - * If this is a read->write transition, just ignore it and let - * vm_fault() take care of it later. - */ - return; - } - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - - /* - * OK, at this point, we know we're doing write-protect operation. - * If the pmap is active, write-back the range. - */ - pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); - - flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; - flags = 0; - - while (sva < eva) { - next_bucket = L2_NEXT_BUCKET(sva); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(pm, sva); - if (l2b == NULL) { - sva = next_bucket; - continue; - } - - ptep = &l2b->l2b_kva[l2pte_index(sva)]; - - while (sva < next_bucket) { - if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { - struct vm_page *pg; - u_int f; - - pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); - pte &= ~L2_S_PROT_W; - *ptep = pte; - PTE_SYNC(ptep); - - if (!(pg->oflags & VPO_UNMANAGED)) { - f = pmap_modify_pv(pg, pm, sva, - PVF_WRITE, 0); - if (f & PVF_WRITE) - vm_page_dirty(pg); - } else - f = 0; - - if (flush >= 0) { - flush++; - flags |= f; - } else - if (PV_BEEN_EXECD(f)) - pmap_tlb_flushID_SE(pm, sva); - else - if (PV_BEEN_REFD(f)) - pmap_tlb_flushD_SE(pm, sva); - } - - sva += PAGE_SIZE; - ptep++; - } - } - - if (flush) { - if (PV_BEEN_EXECD(flags)) - pmap_tlb_flushID(pm); - else - if (PV_BEEN_REFD(flags)) - pmap_tlb_flushD(pm); - } - rw_wunlock(&pvh_global_lock); - - PMAP_UNLOCK(pm); -} - -/* - * Insert the given physical page (p) at - * the specified virtual address (v) in the - * target physical map with the protection requested. - * - * If specified, the page will be wired down, meaning - * that the related pte can not be reclaimed. - * - * NB: This is the only routine which MAY NOT lazy-evaluate - * or lose information. That is, this routine must actually - * insert this page into the given map NOW. - */ - -int -pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - u_int flags, int8_t psind __unused) -{ - int rv; - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - rv = pmap_enter_locked(pmap, va, m, prot, flags); - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); - return (rv); -} - -/* - * The pvh global and pmap locks must be held. - */ -static int -pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - u_int flags) -{ - struct l2_bucket *l2b = NULL; - struct vm_page *opg; - struct pv_entry *pve = NULL; - pt_entry_t *ptep, npte, opte; - u_int nflags; - u_int oflags; - vm_paddr_t pa; - - PMAP_ASSERT_LOCKED(pmap); - rw_assert(&pvh_global_lock, RA_WLOCKED); - if (va == vector_page) { - pa = systempage.pv_pa; - m = NULL; - } else { - if ((m->oflags & VPO_UNMANAGED) == 0) { - if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0) - VM_PAGE_OBJECT_BUSY_ASSERT(m); - else - VM_OBJECT_ASSERT_LOCKED(m->object); - } - pa = VM_PAGE_TO_PHYS(m); - } - nflags = 0; - if (prot & VM_PROT_WRITE) - nflags |= PVF_WRITE; - if (prot & VM_PROT_EXECUTE) - nflags |= PVF_EXEC; - if ((flags & PMAP_ENTER_WIRED) != 0) - nflags |= PVF_WIRED; - PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " - "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); - - if (pmap == kernel_pmap) { - l2b = pmap_get_l2_bucket(pmap, va); - if (l2b == NULL) - l2b = pmap_grow_l2_bucket(pmap, va); - } else { -do_l2b_alloc: - l2b = pmap_alloc_l2_bucket(pmap, va); - if (l2b == NULL) { - if ((flags & PMAP_ENTER_NOSLEEP) == 0) { - PMAP_UNLOCK(pmap); - rw_wunlock(&pvh_global_lock); - vm_wait(NULL); - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - goto do_l2b_alloc; - } - return (KERN_RESOURCE_SHORTAGE); - } - } - - ptep = &l2b->l2b_kva[l2pte_index(va)]; - - opte = *ptep; - npte = pa; - oflags = 0; - if (opte) { - /* - * There is already a mapping at this address. - * If the physical address is different, lookup the - * vm_page. - */ - if (l2pte_pa(opte) != pa) - opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); - else - opg = m; - } else - opg = NULL; - - if ((prot & (VM_PROT_ALL)) || - (!m || m->md.pvh_attrs & PVF_REF)) { - /* - * - The access type indicates that we don't need - * to do referenced emulation. - * OR - * - The physical page has already been referenced - * so no need to re-do referenced emulation here. - */ - npte |= L2_S_PROTO; - - nflags |= PVF_REF; - - if (m && ((prot & VM_PROT_WRITE) != 0 || - (m->md.pvh_attrs & PVF_MOD))) { - /* - * This is a writable mapping, and the - * page's mod state indicates it has - * already been modified. Make it - * writable from the outset. - */ - nflags |= PVF_MOD; - if (!(m->md.pvh_attrs & PVF_MOD)) - vm_page_dirty(m); - } - if (m && opte) - vm_page_aflag_set(m, PGA_REFERENCED); - } else { - /* - * Need to do page referenced emulation. - */ - npte |= L2_TYPE_INV; - } - - if (prot & VM_PROT_WRITE) { - npte |= L2_S_PROT_W; - if (m != NULL && - (m->oflags & VPO_UNMANAGED) == 0) - vm_page_aflag_set(m, PGA_WRITEABLE); - } - if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) - npte |= pte_l2_s_cache_mode; - if (m && m == opg) { - /* - * We're changing the attrs of an existing mapping. - */ - oflags = pmap_modify_pv(m, pmap, va, - PVF_WRITE | PVF_EXEC | PVF_WIRED | - PVF_MOD | PVF_REF, nflags); - - /* - * We may need to flush the cache if we're - * doing rw-ro... - */ - if (pmap_is_current(pmap) && - (oflags & PVF_NC) == 0 && - (opte & L2_S_PROT_W) != 0 && - (prot & VM_PROT_WRITE) == 0 && - (opte & L2_TYPE_MASK) != L2_TYPE_INV) { - cpu_dcache_wb_range(va, PAGE_SIZE); - cpu_l2cache_wb_range(va, PAGE_SIZE); - } - } else { - /* - * New mapping, or changing the backing page - * of an existing mapping. - */ - if (opg) { - /* - * Replacing an existing mapping with a new one. - * It is part of our managed memory so we - * must remove it from the PV list - */ - if ((pve = pmap_remove_pv(opg, pmap, va))) { - /* note for patch: the oflags/invalidation was moved - * because PG_FICTITIOUS pages could free the pve - */ - oflags = pve->pv_flags; - /* - * If the old mapping was valid (ref/mod - * emulation creates 'invalid' mappings - * initially) then make sure to frob - * the cache. - */ - if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { - if (PV_BEEN_EXECD(oflags)) { - pmap_idcache_wbinv_range(pmap, va, - PAGE_SIZE); - } else - if (PV_BEEN_REFD(oflags)) { - pmap_dcache_wb_range(pmap, va, - PAGE_SIZE, TRUE, - (oflags & PVF_WRITE) == 0); - } - } - - /* free/allocate a pv_entry for UNMANAGED pages if - * this physical page is not/is already mapped. - */ - - if (m && (m->oflags & VPO_UNMANAGED) && - !m->md.pv_kva && - TAILQ_EMPTY(&m->md.pv_list)) { - pmap_free_pv_entry(pve); - pve = NULL; - } - } else if (m && - (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || - !TAILQ_EMPTY(&m->md.pv_list))) - pve = pmap_get_pv_entry(); - } else if (m && - (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || - !TAILQ_EMPTY(&m->md.pv_list))) - pve = pmap_get_pv_entry(); - - if (m) { - if ((m->oflags & VPO_UNMANAGED)) { - if (!TAILQ_EMPTY(&m->md.pv_list) || - m->md.pv_kva) { - KASSERT(pve != NULL, ("No pv")); - nflags |= PVF_UNMAN; - pmap_enter_pv(m, pve, pmap, va, nflags); - } else - m->md.pv_kva = va; - } else { - KASSERT(va < kmi.clean_sva || - va >= kmi.clean_eva, - ("pmap_enter: managed mapping within the clean submap")); - KASSERT(pve != NULL, ("No pv")); - pmap_enter_pv(m, pve, pmap, va, nflags); - } - } - } - /* - * Make sure userland mappings get the right permissions - */ - if (pmap != kernel_pmap && va != vector_page) { - npte |= L2_S_PROT_U; - } - - /* - * Keep the stats up to date - */ - if (opte == 0) { - l2b->l2b_occupancy++; - pmap->pm_stats.resident_count++; - } - - /* - * If this is just a wiring change, the two PTEs will be - * identical, so there's no need to update the page table. - */ - if (npte != opte) { - boolean_t is_cached = pmap_is_current(pmap); - - *ptep = npte; - if (is_cached) { - /* - * We only need to frob the cache/tlb if this pmap - * is current - */ - PTE_SYNC(ptep); - if (L1_IDX(va) != L1_IDX(vector_page) && - l2pte_valid(npte)) { - /* - * This mapping is likely to be accessed as - * soon as we return to userland. Fix up the - * L1 entry to avoid taking another - * page/domain fault. - */ - pd_entry_t *pl1pd, l1pd; - - pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; - l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | - L1_C_PROTO; - if (*pl1pd != l1pd) { - *pl1pd = l1pd; - PTE_SYNC(pl1pd); - } - } - } - - if (PV_BEEN_EXECD(oflags)) - pmap_tlb_flushID_SE(pmap, va); - else if (PV_BEEN_REFD(oflags)) - pmap_tlb_flushD_SE(pmap, va); - - if (m) - pmap_fix_cache(m, pmap, va); - } - return (KERN_SUCCESS); -} - -/* - * Maps a sequence of resident pages belonging to the same object. - * The sequence begins with the given page m_start. This page is - * mapped at the given virtual address start. Each subsequent page is - * mapped at a virtual address that is offset from start by the same - * amount as the page is offset from m_start within the object. The - * last page in the sequence is the page with the largest offset from - * m_start that can be mapped at a virtual address less than the given - * virtual address end. Not every virtual page between start and end - * is mapped; only those for which a resident page exists with the - * corresponding offset from m_start are mapped. - */ -void -pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, - vm_page_t m_start, vm_prot_t prot) -{ - vm_page_t m; - vm_pindex_t diff, psize; - - VM_OBJECT_ASSERT_LOCKED(m_start->object); - - psize = atop(end - start); - m = m_start; - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - pmap_enter_locked(pmap, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | - PMAP_ENTER_QUICK_LOCKED); - m = TAILQ_NEXT(m, listq); - } - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); -} - -/* - * this code makes some *MAJOR* assumptions: - * 1. Current pmap & pmap exists. - * 2. Not wired. - * 3. Read access. - * 4. No page table pages. - * but is *MUCH* faster than pmap_enter... - */ - -void -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) -{ - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED); - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); -} - -/* - * Clear the wired attribute from the mappings for the specified range of - * addresses in the given pmap. Every valid mapping within that range - * must have the wired attribute set. In contrast, invalid mappings - * cannot have the wired attribute set, so they are ignored. - * - * XXX Wired mappings of unmanaged pages cannot be counted by this pmap - * implementation. - */ -void -pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - pv_entry_t pv; - vm_offset_t next_bucket; - vm_page_t m; - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - while (sva < eva) { - next_bucket = L2_NEXT_BUCKET(sva); - if (next_bucket > eva) - next_bucket = eva; - l2b = pmap_get_l2_bucket(pmap, sva); - if (l2b == NULL) { - sva = next_bucket; - continue; - } - for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket; - sva += PAGE_SIZE, ptep++) { - if ((pte = *ptep) == 0 || - (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL || - (m->oflags & VPO_UNMANAGED) != 0) - continue; - pv = pmap_find_pv(m, pmap, sva); - if ((pv->pv_flags & PVF_WIRED) == 0) - panic("pmap_unwire: pv %p isn't wired", pv); - pv->pv_flags &= ~PVF_WIRED; - pmap->pm_stats.wired_count--; - } - } - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); -} - -/* - * Copy the range specified by src_addr/len - * from the source map to the range dst_addr/len - * in the destination map. - * - * This routine is only advisory and need not do anything. - */ -void -pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, - vm_size_t len, vm_offset_t src_addr) -{ -} - -/* - * Routine: pmap_extract - * Function: - * Extract the physical page address associated - * with the given map/virtual_address pair. - */ -vm_paddr_t -pmap_extract(pmap_t pmap, vm_offset_t va) -{ - vm_paddr_t pa; - - PMAP_LOCK(pmap); - pa = pmap_extract_locked(pmap, va); - PMAP_UNLOCK(pmap); - return (pa); -} - -static vm_paddr_t -pmap_extract_locked(pmap_t pmap, vm_offset_t va) -{ - struct l2_dtable *l2; - pd_entry_t l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - u_int l1idx; - - if (pmap != kernel_pmap) - PMAP_ASSERT_LOCKED(pmap); - l1idx = L1_IDX(va); - l1pd = pmap->pm_l1->l1_kva[l1idx]; - if (l1pte_section_p(l1pd)) { - /* - * These should only happen for the kernel pmap. - */ - KASSERT(pmap == kernel_pmap, ("unexpected section")); - /* XXX: what to do about the bits > 32 ? */ - if (l1pd & L1_S_SUPERSEC) - pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); - else - pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); - } else { - /* - * Note that we can't rely on the validity of the L1 - * descriptor as an indication that a mapping exists. - * We have to look it up in the L2 dtable. - */ - l2 = pmap->pm_l2[L2_IDX(l1idx)]; - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) - return (0); - pte = ptep[l2pte_index(va)]; - if (pte == 0) - return (0); - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - } - return (pa); -} - -/* - * Atomically extract and hold the physical page with the given - * pmap and virtual address pair if that mapping permits the given - * protection. - * - */ -vm_page_t -pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) -{ - struct l2_dtable *l2; - pd_entry_t l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - vm_page_t m; - u_int l1idx; - - l1idx = L1_IDX(va); - m = NULL; - - PMAP_LOCK(pmap); - l1pd = pmap->pm_l1->l1_kva[l1idx]; - if (l1pte_section_p(l1pd)) { - /* - * These should only happen for kernel_pmap - */ - KASSERT(pmap == kernel_pmap, ("huh")); - /* XXX: what to do about the bits > 32 ? */ - if (l1pd & L1_S_SUPERSEC) - pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); - else - pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); - if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { - m = PHYS_TO_VM_PAGE(pa); - if (!vm_page_wire_mapped(m)) - m = NULL; - } - } else { - /* - * Note that we can't rely on the validity of the L1 - * descriptor as an indication that a mapping exists. - * We have to look it up in the L2 dtable. - */ - l2 = pmap->pm_l2[L2_IDX(l1idx)]; - - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { - PMAP_UNLOCK(pmap); - return (NULL); - } - - ptep = &ptep[l2pte_index(va)]; - pte = *ptep; - - if (pte == 0) { - PMAP_UNLOCK(pmap); - return (NULL); - } - if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - m = PHYS_TO_VM_PAGE(pa); - if (!vm_page_wire_mapped(m)) - m = NULL; - } - } - PMAP_UNLOCK(pmap); - return (m); -} - -vm_paddr_t -pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) -{ - struct l2_dtable *l2; - pd_entry_t l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - u_int l1idx; - - l1idx = L1_IDX(va); - l1pd = kernel_pmap->pm_l1->l1_kva[l1idx]; - if (l1pte_section_p(l1pd)) { - if (l1pd & L1_S_SUPERSEC) - pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); - else - pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); - pte = L2_S_PROTO | pa | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); - } else { - l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]; - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { - pte = 0; - pa = 0; - goto out; - } - pte = ptep[l2pte_index(va)]; - if (pte == 0) { - pa = 0; - goto out; - } - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - } -out: - if (pte2p != NULL) - *pte2p = pte; - return (pa); -} - -/* - * Initialize a preallocated and zeroed pmap structure, - * such as one in a vmspace structure. - */ - -int -pmap_pinit(pmap_t pmap) -{ - PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); - - pmap_alloc_l1(pmap); - bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); - - CPU_ZERO(&pmap->pm_active); - - TAILQ_INIT(&pmap->pm_pvlist); - bzero(&pmap->pm_stats, sizeof pmap->pm_stats); - pmap->pm_stats.resident_count = 1; - if (vector_page < KERNBASE) { - pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), - VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); - } - return (1); -} - -/*************************************************** - * page management routines. - ***************************************************/ - -static void -pmap_free_pv_entry(pv_entry_t pv) -{ - pv_entry_count--; - uma_zfree(pvzone, pv); -} - -/* - * get a new pv_entry, allocating a block from the system - * when needed. - * the memory allocation is performed bypassing the malloc code - * because of the possibility of allocations at interrupt time. - */ -static pv_entry_t -pmap_get_pv_entry(void) -{ - pv_entry_t ret_value; - - pv_entry_count++; - if (pv_entry_count > pv_entry_high_water) - pagedaemon_wakeup(0); /* XXX ARM NUMA */ - ret_value = uma_zalloc(pvzone, M_NOWAIT); - return ret_value; -} - -/* - * Remove the given range of addresses from the specified map. - * - * It is assumed that the start and end are properly - * rounded to the page size. - */ -#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 -void -pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) -{ - struct l2_bucket *l2b; - vm_offset_t next_bucket; - pt_entry_t *ptep; - u_int total; - u_int mappings, is_exec, is_refd; - int flushall = 0; - - /* - * we lock in the pmap => pv_head direction - */ - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - total = 0; - while (sva < eva) { - /* - * Do one L2 bucket's worth at a time. - */ - next_bucket = L2_NEXT_BUCKET(sva); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(pm, sva); - if (l2b == NULL) { - sva = next_bucket; - continue; - } - - ptep = &l2b->l2b_kva[l2pte_index(sva)]; - mappings = 0; - - while (sva < next_bucket) { - struct vm_page *pg; - pt_entry_t pte; - vm_paddr_t pa; - - pte = *ptep; - - if (pte == 0) { - /* - * Nothing here, move along - */ - sva += PAGE_SIZE; - ptep++; - continue; - } - - pm->pm_stats.resident_count--; - pa = l2pte_pa(pte); - is_exec = 0; - is_refd = 1; - - /* - * Update flags. In a number of circumstances, - * we could cluster a lot of these and do a - * number of sequential pages in one go. - */ - if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { - struct pv_entry *pve; - - pve = pmap_remove_pv(pg, pm, sva); - if (pve) { - is_exec = PV_BEEN_EXECD(pve->pv_flags); - is_refd = PV_BEEN_REFD(pve->pv_flags); - pmap_free_pv_entry(pve); - } - } - - if (l2pte_valid(pte) && pmap_is_current(pm)) { - if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { - total++; - if (is_exec) { - cpu_idcache_wbinv_range(sva, - PAGE_SIZE); - cpu_l2cache_wbinv_range(sva, - PAGE_SIZE); - cpu_tlb_flushID_SE(sva); - } else if (is_refd) { - cpu_dcache_wbinv_range(sva, - PAGE_SIZE); - cpu_l2cache_wbinv_range(sva, - PAGE_SIZE); - cpu_tlb_flushD_SE(sva); - } - } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { - /* flushall will also only get set for - * for a current pmap - */ - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - flushall = 1; - total++; - } - } - *ptep = 0; - PTE_SYNC(ptep); - - sva += PAGE_SIZE; - ptep++; - mappings++; - } - - pmap_free_l2_bucket(pm, l2b, mappings); - } - - rw_wunlock(&pvh_global_lock); - if (flushall) - cpu_tlb_flushID(); - PMAP_UNLOCK(pm); -} - -/* - * pmap_zero_page() - * - * Zero a given physical page by mapping it at a page hook point. - * In doing the zero page op, the page we zero is mapped cachable, as with - * StrongARM accesses to non-cached pages are non-burst making writing - * _any_ bulk data very slow. - */ -static void -pmap_zero_page_generic(vm_paddr_t phys, int off, int size) -{ - - if (_arm_bzero && size >= _min_bzero_size && - _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) - return; - - mtx_lock(&cmtx); - /* - * Hook in the page, zero it, invalidate the TLB as needed. - * - * Note the temporary zero-page mapping must be a non-cached page in - * order to work without corruption when write-allocate is enabled. - */ - *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - if (off || size != PAGE_SIZE) - bzero((void *)(cdstp + off), size); - else - bzero_page(cdstp); - - mtx_unlock(&cmtx); -} - -/* - * pmap_zero_page zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. - */ -void -pmap_zero_page(vm_page_t m) -{ - pmap_zero_page_generic(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); -} - -/* - * pmap_zero_page_area zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. - * - * off and size may not cover an area beyond a single hardware page. - */ -void -pmap_zero_page_area(vm_page_t m, int off, int size) -{ - - pmap_zero_page_generic(VM_PAGE_TO_PHYS(m), off, size); -} - -#if 0 -/* - * pmap_clean_page() - * - * This is a local function used to work out the best strategy to clean - * a single page referenced by its entry in the PV table. It should be used by - * pmap_copy_page, pmap_zero page and maybe some others later on. - * - * Its policy is effectively: - * o If there are no mappings, we don't bother doing anything with the cache. - * o If there is one mapping, we clean just that page. - * o If there are multiple mappings, we clean the entire cache. - * - * So that some functions can be further optimised, it returns 0 if it didn't - * clean the entire cache, or 1 if it did. - * - * XXX One bug in this routine is that if the pv_entry has a single page - * mapped at 0x00000000 a whole cache clean will be performed rather than - * just the 1 page. Since this should not occur in everyday use and if it does - * it will just result in not the most efficient clean for the page. - * - * We don't yet use this function but may want to. - */ -static int -pmap_clean_page(struct pv_entry *pv, boolean_t is_src) -{ - pmap_t pm, pm_to_clean = NULL; - struct pv_entry *npv; - u_int cache_needs_cleaning = 0; - u_int flags = 0; - vm_offset_t page_to_clean = 0; - - if (pv == NULL) { - /* nothing mapped in so nothing to flush */ - return (0); - } - - /* - * Since we flush the cache each time we change to a different - * user vmspace, we only need to flush the page if it is in the - * current pmap. - */ - if (curthread) - pm = vmspace_pmap(curproc->p_vmspace); - else - pm = kernel_pmap; - - for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { - if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { - flags |= npv->pv_flags; - /* - * The page is mapped non-cacheable in - * this map. No need to flush the cache. - */ - if (npv->pv_flags & PVF_NC) { -#ifdef DIAGNOSTIC - if (cache_needs_cleaning) - panic("pmap_clean_page: " - "cache inconsistency"); -#endif - break; - } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) - continue; - if (cache_needs_cleaning) { - page_to_clean = 0; - break; - } else { - page_to_clean = npv->pv_va; - pm_to_clean = npv->pv_pmap; - } - cache_needs_cleaning = 1; - } - } - if (page_to_clean) { - if (PV_BEEN_EXECD(flags)) - pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, - PAGE_SIZE); - else - pmap_dcache_wb_range(pm_to_clean, page_to_clean, - PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); - } else if (cache_needs_cleaning) { - if (PV_BEEN_EXECD(flags)) - pmap_idcache_wbinv_all(pm); - else - pmap_dcache_wbinv_all(pm); - return (1); - } - return (0); -} -#endif - -/* - * pmap_copy_page copies the specified (machine independent) - * page by mapping the page into virtual memory and using - * bcopy to copy the page, one machine dependent page at a - * time. - */ - -/* - * pmap_copy_page() - * - * Copy one physical page into another, by mapping the pages into - * hook points. The same comment regarding cachability as in - * pmap_zero_page also applies here. - */ -static void -pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) -{ -#if 0 - struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); -#endif - - /* - * Clean the source page. Hold the source page's lock for - * the duration of the copy so that no other mappings can - * be created while we have a potentially aliased mapping. - */ -#if 0 - /* - * XXX: Not needed while we call cpu_dcache_wbinv_all() in - * pmap_copy_page(). - */ - (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); -#endif - /* - * Map the pages into the page hook points, copy them, and purge - * the cache for the appropriate page. Invalidate the TLB - * as required. - */ - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | src | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | dst | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy_page(csrcp, cdstp); - mtx_unlock(&cmtx); - cpu_dcache_inv_range(csrcp, PAGE_SIZE); - cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); - cpu_l2cache_inv_range(csrcp, PAGE_SIZE); - cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); -} - -void -pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) -{ - - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | a_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | b_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); - mtx_unlock(&cmtx); - cpu_dcache_inv_range(csrcp + a_offs, cnt); - cpu_dcache_wbinv_range(cdstp + b_offs, cnt); - cpu_l2cache_inv_range(csrcp + a_offs, cnt); - cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); -} - -void -pmap_copy_page(vm_page_t src, vm_page_t dst) -{ - - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && - _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), - (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) - return; - pmap_copy_page_generic(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); -} - -/* - * We have code to do unmapped I/O. However, it isn't quite right and - * causes un-page-aligned I/O to devices to fail (most notably newfs - * or fsck). We give up a little performance to not allow unmapped I/O - * to gain stability. - */ -int unmapped_buf_allowed = 0; - -void -pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], - vm_offset_t b_offset, int xfersize) -{ - vm_page_t a_pg, b_pg; - vm_offset_t a_pg_offset, b_pg_offset; - int cnt; - - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - while (xfersize > 0) { - a_pg = ma[a_offset >> PAGE_SHIFT]; - a_pg_offset = a_offset & PAGE_MASK; - cnt = min(xfersize, PAGE_SIZE - a_pg_offset); - b_pg = mb[b_offset >> PAGE_SHIFT]; - b_pg_offset = b_offset & PAGE_MASK; - cnt = min(cnt, PAGE_SIZE - b_pg_offset); - pmap_copy_page_offs_generic(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, - VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); - xfersize -= cnt; - a_offset += cnt; - b_offset += cnt; - } -} - -vm_offset_t -pmap_quick_enter_page(vm_page_t m) -{ - /* - * Don't bother with a PCPU pageframe, since we don't support - * SMP for anything pre-armv7. Use pmap_kenter() to ensure - * caching is handled correctly for multiple mappings of the - * same physical page. - */ - - mtx_assert(&qmap_mtx, MA_NOTOWNED); - mtx_lock(&qmap_mtx); - - pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m)); - - return (qmap_addr); -} - -void -pmap_quick_remove_page(vm_offset_t addr) -{ - KASSERT(addr == qmap_addr, - ("pmap_quick_remove_page: invalid address")); - mtx_assert(&qmap_mtx, MA_OWNED); - pmap_kremove(addr); - mtx_unlock(&qmap_mtx); -} - -/* - * this routine returns true if a physical page resides - * in the given pmap. - */ -boolean_t -pmap_page_exists_quick(pmap_t pmap, vm_page_t m) -{ - pv_entry_t pv; - int loops = 0; - boolean_t rv; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_page_exists_quick: page %p is not managed", m)); - rv = FALSE; - rw_wlock(&pvh_global_lock); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - if (pv->pv_pmap == pmap) { - rv = TRUE; - break; - } - loops++; - if (loops >= 16) - break; - } - rw_wunlock(&pvh_global_lock); - return (rv); -} - -/* - * pmap_page_wired_mappings: - * - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - pv_entry_t pv; - int count; - - count = 0; - if ((m->oflags & VPO_UNMANAGED) != 0) - return (count); - rw_wlock(&pvh_global_lock); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - if ((pv->pv_flags & PVF_WIRED) != 0) - count++; - rw_wunlock(&pvh_global_lock); - return (count); -} - -/* - * This function is advisory. - */ -void -pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) -{ -} - -/* - * pmap_ts_referenced: - * - * Return the count of reference bits for a page, clearing all of them. - */ -int -pmap_ts_referenced(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_ts_referenced: page %p is not managed", m)); - return (pmap_clearbit(m, PVF_REF)); -} - -boolean_t -pmap_is_modified(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_is_modified: page %p is not managed", m)); - if (m->md.pvh_attrs & PVF_MOD) - return (TRUE); - - return(FALSE); -} - -/* - * Clear the modify bits on the specified physical page. - */ -void -pmap_clear_modify(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_modify: page %p is not managed", m)); - vm_page_assert_busied(m); - - if (!pmap_page_is_write_mapped(m)) - return; - if (m->md.pvh_attrs & PVF_MOD) - pmap_clearbit(m, PVF_MOD); -} - -/* - * pmap_is_referenced: - * - * Return whether or not the specified physical page was referenced - * in any physical maps. - */ -boolean_t -pmap_is_referenced(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_is_referenced: page %p is not managed", m)); - return ((m->md.pvh_attrs & PVF_REF) != 0); -} - -/* - * Clear the write and modified bits in each of the given page's mappings. - */ -void -pmap_remove_write(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_remove_write: page %p is not managed", m)); - vm_page_assert_busied(m); - - if (pmap_page_is_write_mapped(m)) - pmap_clearbit(m, PVF_WRITE); -} - -/* - * Perform the pmap work for mincore(2). If the page is not both referenced and - * modified by this pmap, returns its physical address so that the caller can - * find other mappings. - */ -int -pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - vm_page_t m; - int val; - boolean_t managed; - - PMAP_LOCK(pmap); - l2b = pmap_get_l2_bucket(pmap, addr); - if (l2b == NULL) { - PMAP_UNLOCK(pmap); - return (0); - } - ptep = &l2b->l2b_kva[l2pte_index(addr)]; - pte = *ptep; - if (!l2pte_valid(pte)) { - PMAP_UNLOCK(pmap); - return (0); - } - val = MINCORE_INCORE; - if (pte & L2_S_PROT_W) - val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; - managed = false; - pa = l2pte_pa(pte); - m = PHYS_TO_VM_PAGE(pa); - if (m != NULL && !(m->oflags & VPO_UNMANAGED)) - managed = true; - if (managed) { - /* - * The ARM pmap tries to maintain a per-mapping - * reference bit. The trouble is that it's kept in - * the PV entry, not the PTE, so it's costly to access - * here. You would need to acquire the pvh global - * lock, call pmap_find_pv(), and introduce a custom - * version of vm_page_pa_tryrelock() that releases and - * reacquires the pvh global lock. In the end, I - * doubt it's worthwhile. This may falsely report - * the given address as referenced. - */ - if ((m->md.pvh_attrs & PVF_REF) != 0) - val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; - } - if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != - (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { - *pap = pa; - } - PMAP_UNLOCK(pmap); - return (val); -} - -void -pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) -{ -} - -/* - * Increase the starting virtual address of the given mapping if a - * different alignment might result in more superpage mappings. - */ -void -pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, - vm_offset_t *addr, vm_size_t size) -{ -} - -#define BOOTSTRAP_DEBUG - -/* - * pmap_map_section: - * - * Create a single section mapping. - */ -void -pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, - int prot, int cache) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pd_entry_t fl; - - KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); - - switch (cache) { - case PTE_NOCACHE: - default: - fl = 0; - break; - - case PTE_CACHE: - fl = pte_l1_s_cache_mode; - break; - - case PTE_PAGETABLE: - fl = pte_l1_s_cache_mode_pt; - break; - } - - pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | - L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); - PTE_SYNC(&pde[va >> L1_S_SHIFT]); - -} - -/* - * pmap_link_l2pt: - * - * Link the L2 page table specified by l2pv.pv_pa into the L1 - * page table at the slot for "va". - */ -void -pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt, proto; - u_int slot = va >> L1_S_SHIFT; - - proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; - -#ifdef VERBOSE_INIT_ARM - printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); -#endif - - pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); - - PTE_SYNC(&pde[slot]); - - SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); - -} - -/* - * pmap_map_entry - * - * Create a single page mapping. - */ -void -pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, - int cache) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t fl; - pt_entry_t *pte; - - KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); - - switch (cache) { - case PTE_NOCACHE: - default: - fl = 0; - break; - - case PTE_CACHE: - fl = pte_l2_s_cache_mode; - break; - - case PTE_PAGETABLE: - fl = pte_l2_s_cache_mode_pt; - break; - } - - if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) - panic("pmap_map_entry: no L2 table for VA 0x%08x", va); - - pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); - - if (pte == NULL) - panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); - - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; - PTE_SYNC(&pte[l2pte_index(va)]); -} - -/* - * pmap_map_chunk: - * - * Map a chunk of memory using the most efficient mappings - * possible (section. large page, small page) into the - * provided L1 and L2 tables at the specified virtual address. - */ -vm_size_t -pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, - vm_size_t size, int prot, int cache) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t *pte, f1, f2s, f2l; - vm_size_t resid; - int i; - - resid = roundup2(size, PAGE_SIZE); - - if (l1pt == 0) - panic("pmap_map_chunk: no L1 table provided"); - -#ifdef VERBOSE_INIT_ARM - printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " - "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); -#endif - - switch (cache) { - case PTE_NOCACHE: - default: - f1 = 0; - f2l = 0; - f2s = 0; - break; - - case PTE_CACHE: - f1 = pte_l1_s_cache_mode; - f2l = pte_l2_l_cache_mode; - f2s = pte_l2_s_cache_mode; - break; - - case PTE_PAGETABLE: - f1 = pte_l1_s_cache_mode_pt; - f2l = pte_l2_l_cache_mode_pt; - f2s = pte_l2_s_cache_mode_pt; - break; - } - - size = resid; - - while (resid > 0) { - /* See if we can use a section mapping. */ - if (L1_S_MAPPABLE_P(va, pa, resid)) { -#ifdef VERBOSE_INIT_ARM - printf("S"); -#endif - pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | - L1_S_PROT(PTE_KERNEL, prot) | f1 | - L1_S_DOM(PMAP_DOMAIN_KERNEL); - PTE_SYNC(&pde[va >> L1_S_SHIFT]); - va += L1_S_SIZE; - pa += L1_S_SIZE; - resid -= L1_S_SIZE; - continue; - } - - /* - * Ok, we're going to use an L2 table. Make sure - * one is actually in the corresponding L1 slot - * for the current VA. - */ - if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) - panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); - - pte = (pt_entry_t *) kernel_pt_lookup( - pde[L1_IDX(va)] & L1_C_ADDR_MASK); - if (pte == NULL) - panic("pmap_map_chunk: can't find L2 table for VA" - "0x%08x", va); - /* See if we can use a L2 large page mapping. */ - if (L2_L_MAPPABLE_P(va, pa, resid)) { -#ifdef VERBOSE_INIT_ARM - printf("L"); -#endif - for (i = 0; i < 16; i++) { - pte[l2pte_index(va) + i] = - L2_L_PROTO | pa | - L2_L_PROT(PTE_KERNEL, prot) | f2l; - PTE_SYNC(&pte[l2pte_index(va) + i]); - } - va += L2_L_SIZE; - pa += L2_L_SIZE; - resid -= L2_L_SIZE; - continue; - } - - /* Use a small page mapping. */ -#ifdef VERBOSE_INIT_ARM - printf("P"); -#endif - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; - PTE_SYNC(&pte[l2pte_index(va)]); - va += PAGE_SIZE; - pa += PAGE_SIZE; - resid -= PAGE_SIZE; - } -#ifdef VERBOSE_INIT_ARM - printf("\n"); -#endif - return (size); - -} - -void -pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) -{ - /* - * Remember the memattr in a field that gets used to set the appropriate - * bits in the PTEs as mappings are established. - */ - m->md.pv_memattr = ma; - - /* - * It appears that this function can only be called before any mappings - * for the page are established on ARM. If this ever changes, this code - * will need to walk the pv_list and make each of the existing mappings - * uncacheable, being careful to sync caches and PTEs (and maybe - * invalidate TLB?) for any current mapping it modifies. - */ - if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) - panic("Can't change memattr on page with existing mappings"); -} - -boolean_t -pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) -{ - - return (mode == VM_MEMATTR_DEFAULT || mode == VM_MEMATTR_UNCACHEABLE); -} diff --git a/sys/arm/arm/swtch-v4.S b/sys/arm/arm/swtch-v4.S deleted file mode 100644 index 17c8f8009664..000000000000 --- a/sys/arm/arm/swtch-v4.S +++ /dev/null @@ -1,376 +0,0 @@ -/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ - -/*- - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -/*- - * Copyright (c) 1994-1998 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * RiscBSD kernel project - * - * cpuswitch.S - * - * cpu switching functions - * - * Created : 15/10/94 - * - */ - -#include "assym.inc" -#include "opt_sched.h" - -#include -#include -#include -#include - -__FBSDID("$FreeBSD$"); - - -#define GET_PCPU(tmp, tmp2) \ - ldr tmp, .Lcurpcpu - -#ifdef VFP - .fpu vfp /* allow VFP instructions */ -#endif - -.Lcurpcpu: - .word _C_LABEL(__pcpu) -.Lblocked_lock: - .word _C_LABEL(blocked_lock) - - -#define DOMAIN_CLIENT 0x01 - -.Lcpufuncs: - .word _C_LABEL(cpufuncs) - -/* - * cpu_throw(oldtd, newtd) - * - * Remove current thread state, then select the next thread to run - * and load its state. - * r0 = oldtd - * r1 = newtd - */ -ENTRY(cpu_throw) - mov r5, r1 - - /* - * r0 = oldtd - * r5 = newtd - */ - -#ifdef VFP /* This thread is dying, disable */ - bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ -#endif - - GET_PCPU(r7, r9) - ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */ - - /* Switch to lwp0 context */ - - ldr r9, .Lcpufuncs - mov lr, pc - ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] - ldr r0, [r7, #(PCB_PL1VEC)] - ldr r1, [r7, #(PCB_DACR)] - /* - * r0 = Pointer to L1 slot for vector_page (or NULL) - * r1 = lwp0's DACR - * r5 = lwp0 - * r7 = lwp0's PCB - * r9 = cpufuncs - */ - - /* - * Ensure the vector table is accessible by fixing up lwp0's L1 - */ - cmp r0, #0 /* No need to fixup vector table? */ - ldrne r3, [r0] /* But if yes, fetch current value */ - ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */ - mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */ - cmpne r3, r2 /* Stuffing the same value? */ - strne r2, [r0] /* Store if not. */ - -#ifdef PMAP_INCLUDE_PTE_SYNC - /* - * Need to sync the cache to make sure that last store is - * visible to the MMU. - */ - movne r1, #4 - movne lr, pc - ldrne pc, [r9, #CF_DCACHE_WB_RANGE] -#endif /* PMAP_INCLUDE_PTE_SYNC */ - - /* - * Note: We don't do the same optimisation as cpu_switch() with - * respect to avoiding flushing the TLB if we're switching to - * the same L1 since this process' VM space may be about to go - * away, so we don't want *any* turds left in the TLB. - */ - - /* Switch the memory to the new process */ - ldr r0, [r7, #(PCB_PAGEDIR)] - mov lr, pc - ldr pc, [r9, #CF_CONTEXT_SWITCH] - - GET_PCPU(r6, r4) - /* Hook in a new pcb */ - str r7, [r6, #PC_CURPCB] - /* We have a new curthread now so make a note it */ - str r5, [r6, #PC_CURTHREAD] - - /* Set the new tp */ - ldr r6, [r5, #(TD_MD + MD_TP)] - ldr r4, =ARM_TP_ADDRESS - str r6, [r4] - ldr r6, [r5, #(TD_MD + MD_RAS_START)] - str r6, [r4, #4] /* ARM_RAS_START */ - ldr r6, [r5, #(TD_MD + MD_RAS_END)] - str r6, [r4, #8] /* ARM_RAS_END */ - - /* Restore all the saved registers and exit */ - add r3, r7, #PCB_R4 - ldmia r3, {r4-r12, sp, pc} -END(cpu_throw) - -/* - * cpu_switch(oldtd, newtd, lock) - * - * Save the current thread state, then select the next thread to run - * and load its state. - * r0 = oldtd - * r1 = newtd - * r2 = lock (new lock for old thread) - */ -ENTRY(cpu_switch) - /* Interrupts are disabled. */ - /* Save all the registers in the old thread's pcb. */ - ldr r3, [r0, #(TD_PCB)] - - /* Restore all the saved registers and exit */ - add r3, #(PCB_R4) - stmia r3, {r4-r12, sp, lr, pc} - - mov r6, r2 /* Save the mutex */ - - /* rem: r0 = old lwp */ - /* rem: interrupts are disabled */ - - /* Process is now on a processor. */ - /* We have a new curthread now so make a note it */ - GET_PCPU(r7, r2) - str r1, [r7, #PC_CURTHREAD] - - /* Hook in a new pcb */ - ldr r2, [r1, #TD_PCB] - str r2, [r7, #PC_CURPCB] - - /* Stage two : Save old context */ - - /* Get the user structure for the old thread. */ - ldr r2, [r0, #(TD_PCB)] - mov r4, r0 /* Save the old thread. */ - - /* Store the old tp; userland can change it on armv4. */ - ldr r3, =ARM_TP_ADDRESS - ldr r9, [r3] - str r9, [r0, #(TD_MD + MD_TP)] - ldr r9, [r3, #4] - str r9, [r0, #(TD_MD + MD_RAS_START)] - ldr r9, [r3, #8] - str r9, [r0, #(TD_MD + MD_RAS_END)] - - /* Set the new tp */ - ldr r9, [r1, #(TD_MD + MD_TP)] - str r9, [r3] - ldr r9, [r1, #(TD_MD + MD_RAS_START)] - str r9, [r3, #4] - ldr r9, [r1, #(TD_MD + MD_RAS_END)] - str r9, [r3, #8] - - /* Get the user structure for the new process in r9 */ - ldr r9, [r1, #(TD_PCB)] - - /* rem: r2 = old PCB */ - /* rem: r9 = new PCB */ - /* rem: interrupts are enabled */ - -#ifdef VFP - fmrx r0, fpexc /* If the VFP is enabled */ - tst r0, #(VFPEXC_EN) /* the current thread has */ - movne r1, #1 /* used it, so go save */ - addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */ - blne _C_LABEL(vfp_store) /* and disable the VFP. */ -#endif - - /* r0-r3 now free! */ - - /* Third phase : restore saved context */ - - /* rem: r2 = old PCB */ - /* rem: r9 = new PCB */ - - ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */ - mov r2, #DOMAIN_CLIENT - cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ - beq .Lcs_context_switched /* Yup. Don't flush cache */ - mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */ - /* - * Get the new L1 table pointer into r11. If we're switching to - * an LWP with the same address space as the outgoing one, we can - * skip the cache purge and the TTB load. - * - * To avoid data dep stalls that would happen anyway, we try - * and get some useful work done in the mean time. - */ - mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ - ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ - - teq r10, r11 /* Same L1? */ - cmpeq r0, r5 /* Same DACR? */ - beq .Lcs_context_switched /* yes! */ - - /* - * Definitely need to flush the cache. - */ - - ldr r1, .Lcpufuncs - mov lr, pc - ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] - -.Lcs_cache_purge_skipped: - /* rem: r6 = lock */ - /* rem: r9 = new PCB */ - /* rem: r10 = old L1 */ - /* rem: r11 = new L1 */ - - mov r2, #0x00000000 - ldr r7, [r9, #(PCB_PL1VEC)] - - /* - * Ensure the vector table is accessible by fixing up the L1 - */ - cmp r7, #0 /* No need to fixup vector table? */ - ldrne r2, [r7] /* But if yes, fetch current value */ - ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */ - mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */ - cmpne r2, r0 /* Stuffing the same value? */ -#ifndef PMAP_INCLUDE_PTE_SYNC - strne r0, [r7] /* Nope, update it */ -#else - beq .Lcs_same_vector - str r0, [r7] /* Otherwise, update it */ - - /* - * Need to sync the cache to make sure that last store is - * visible to the MMU. - */ - ldr r2, .Lcpufuncs - mov r0, r7 - mov r1, #4 - mov lr, pc - ldr pc, [r2, #CF_DCACHE_WB_RANGE] - -.Lcs_same_vector: -#endif /* PMAP_INCLUDE_PTE_SYNC */ - - cmp r10, r11 /* Switching to the same L1? */ - ldr r10, .Lcpufuncs - beq .Lcs_same_l1 /* Yup. */ - /* - * Do a full context switch, including full TLB flush. - */ - mov r0, r11 - mov lr, pc - ldr pc, [r10, #CF_CONTEXT_SWITCH] - - b .Lcs_context_switched - - /* - * We're switching to a different process in the same L1. - * In this situation, we only need to flush the TLB for the - * vector_page mapping, and even then only if r7 is non-NULL. - */ -.Lcs_same_l1: - cmp r7, #0 - movne r0, #0 /* We *know* vector_page's VA is 0x0 */ - movne lr, pc - ldrne pc, [r10, #CF_TLB_FLUSHID_SE] - -.Lcs_context_switched: - - /* Release the old thread */ - str r6, [r4, #TD_LOCK] - - /* XXXSCW: Safe to re-enable FIQs here */ - - /* rem: r9 = new PCB */ - - /* Restore all the saved registers and exit */ - add r3, r9, #PCB_R4 - ldmia r3, {r4-r12, sp, pc} -END(cpu_switch) diff --git a/sys/arm/arm/trap-v4.c b/sys/arm/arm/trap-v4.c deleted file mode 100644 index 8841631a89ad..000000000000 --- a/sys/arm/arm/trap-v4.c +++ /dev/null @@ -1,717 +0,0 @@ -/* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ - -/*- - * Copyright 2004 Olivier Houchard - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -/*- - * Copyright (c) 1994-1997 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * RiscBSD kernel project - * - * fault.c - * - * Fault handlers - * - * Created : 28/11/94 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef KDB -#include -#endif - -#ifdef KDTRACE_HOOKS -#include -#endif - -#define ReadWord(a) (*((volatile unsigned int *)(a))) - -#ifdef DEBUG -int last_fault_code; /* For the benefit of pmap_fault_fixup() */ -#endif - -struct ksig { - int signb; - u_long code; -}; -struct data_abort { - int (*func)(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); - const char *desc; -}; - -static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); -static int dab_align(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); -static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); -static void prefetch_abort_handler(struct trapframe *); - -static const struct data_abort data_aborts[] = { - {dab_fatal, "Vector Exception"}, - {dab_align, "Alignment Fault 1"}, - {dab_fatal, "Terminal Exception"}, - {dab_align, "Alignment Fault 3"}, - {dab_buserr, "External Linefetch Abort (S)"}, - {NULL, "Translation Fault (S)"}, - {dab_buserr, "External Linefetch Abort (P)"}, - {NULL, "Translation Fault (P)"}, - {dab_buserr, "External Non-Linefetch Abort (S)"}, - {NULL, "Domain Fault (S)"}, - {dab_buserr, "External Non-Linefetch Abort (P)"}, - {NULL, "Domain Fault (P)"}, - {dab_buserr, "External Translation Abort (L1)"}, - {NULL, "Permission Fault (S)"}, - {dab_buserr, "External Translation Abort (L2)"}, - {NULL, "Permission Fault (P)"} -}; - -/* Determine if a fault came from user mode */ -#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) - -/* Determine if 'x' is a permission fault */ -#define IS_PERMISSION_FAULT(x) \ - (((1 << ((x) & FAULT_TYPE_MASK)) & \ - ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) - -static __inline void -call_trapsignal(struct thread *td, int sig, u_long code) -{ - ksiginfo_t ksi; - - ksiginfo_init_trap(&ksi); - ksi.ksi_signo = sig; - ksi.ksi_code = (int)code; - trapsignal(td, &ksi); -} - -void -abort_handler(struct trapframe *tf, int type) -{ - struct vm_map *map; - struct pcb *pcb; - struct thread *td; - u_int user, far, fsr; - vm_prot_t ftype; - void *onfault; - vm_offset_t va; - int error = 0, signo, ucode; - struct ksig ksig; - struct proc *p; - - if (type == 1) - return (prefetch_abort_handler(tf)); - - /* Grab FAR/FSR before enabling interrupts */ - far = cp15_dfar_get(); - fsr = cp15_dfsr_get(); -#if 0 - printf("data abort: fault address=%p (from pc=%p lr=%p)\n", - (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); -#endif - - /* Update vmmeter statistics */ -#if 0 - vmexp.traps++; -#endif - - td = curthread; - p = td->td_proc; - - VM_CNT_INC(v_trap); - /* Data abort came from user mode? */ - user = TRAP_USERMODE(tf); - - if (user) { - td->td_pticks = 0; - td->td_frame = tf; - if (td->td_cowgen != td->td_proc->p_cowgen) - thread_cow_update(td); - } - /* Grab the current pcb */ - pcb = td->td_pcb; - /* Re-enable interrupts if they were enabled previously */ - if (td->td_md.md_spinlock_count == 0) { - if (__predict_true(tf->tf_spsr & PSR_I) == 0) - enable_interrupts(PSR_I); - if (__predict_true(tf->tf_spsr & PSR_F) == 0) - enable_interrupts(PSR_F); - } - - /* Invoke the appropriate handler, if necessary */ - if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { - if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, - td, &ksig)) { - signo = ksig.signb; - ucode = ksig.code; - goto do_trapsignal; - } - goto out; - } - - /* - * At this point, we're dealing with one of the following data aborts: - * - * FAULT_TRANS_S - Translation -- Section - * FAULT_TRANS_P - Translation -- Page - * FAULT_DOMAIN_S - Domain -- Section - * FAULT_DOMAIN_P - Domain -- Page - * FAULT_PERM_S - Permission -- Section - * FAULT_PERM_P - Permission -- Page - * - * These are the main virtual memory-related faults signalled by - * the MMU. - */ - - /* - * Make sure the Program Counter is sane. We could fall foul of - * someone executing Thumb code, in which case the PC might not - * be word-aligned. This would cause a kernel alignment fault - * further down if we have to decode the current instruction. - * XXX: It would be nice to be able to support Thumb at some point. - */ - if (__predict_false((tf->tf_pc & 3) != 0)) { - if (user) { - /* - * Give the user an illegal instruction signal. - */ - /* Deliver a SIGILL to the process */ - signo = SIGILL; - ucode = 0; - goto do_trapsignal; - } - - /* - * The kernel never executes Thumb code. - */ - printf("\ndata_abort_fault: Misaligned Kernel-mode " - "Program Counter\n"); - dab_fatal(tf, fsr, far, td, &ksig); - } - - va = trunc_page((vm_offset_t)far); - - /* - * It is only a kernel address space fault iff: - * 1. user == 0 and - * 2. pcb_onfault not set or - * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. - */ - if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || - (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && - __predict_true((pcb->pcb_onfault == NULL || - (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { - map = kernel_map; - - /* Was the fault due to the FPE/IPKDB ? */ - if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { - /* - * Force exit via userret() - * This is necessary as the FPE is an extension to - * userland that actually runs in a priveledged mode - * but uses USR mode permissions for its accesses. - */ - user = 1; - signo = SIGSEGV; - ucode = 0; - goto do_trapsignal; - } - } else { - map = &td->td_proc->p_vmspace->vm_map; - } - - /* - * We need to know whether the page should be mapped as R or R/W. - * On armv4, the fault status register does not indicate whether - * the access was a read or write. We know that a permission fault - * can only be the result of a write to a read-only location, so we - * can deal with those quickly. Otherwise we need to disassemble - * the faulting instruction to determine if it was a write. - */ - if (IS_PERMISSION_FAULT(fsr)) - ftype = VM_PROT_WRITE; - else { - u_int insn = ReadWord(tf->tf_pc); - - if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ - ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ - ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ - ftype = VM_PROT_WRITE; - } else { - if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ - ftype = VM_PROT_READ | VM_PROT_WRITE; - else - ftype = VM_PROT_READ; - } - } - - /* - * See if the fault is as a result of ref/mod emulation, - * or domain mismatch. - */ -#ifdef DEBUG - last_fault_code = fsr; -#endif - if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, - NULL, "Kernel page fault") != 0) - goto fatal_pagefault; - - if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, - user)) { - goto out; - } - - onfault = pcb->pcb_onfault; - pcb->pcb_onfault = NULL; - error = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL, &signo, &ucode); - pcb->pcb_onfault = onfault; - if (__predict_true(error == KERN_SUCCESS)) - goto out; -fatal_pagefault: - if (user == 0) { - if (pcb->pcb_onfault) { - tf->tf_r0 = error; - tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; - return; - } - - printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, - error); - dab_fatal(tf, fsr, far, td, &ksig); - } - -do_trapsignal: - call_trapsignal(td, signo, ucode); -out: - /* If returning to user mode, make sure to invoke userret() */ - if (user) - userret(td, tf); -} - -/* - * dab_fatal() handles the following data aborts: - * - * FAULT_WRTBUF_0 - Vector Exception - * FAULT_WRTBUF_1 - Terminal Exception - * - * We should never see these on a properly functioning system. - * - * This function is also called by the other handlers if they - * detect a fatal problem. - * - * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. - */ -static int -dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, - struct ksig *ksig) -{ - const char *mode; -#ifdef KDB - bool handled; -#endif - -#ifdef KDB - if (kdb_active) { - kdb_reenter(); - return (0); - } -#endif -#ifdef KDTRACE_HOOKS - if (!TRAP_USERMODE(tf)) { - if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) - return (0); - } -#endif - - mode = TRAP_USERMODE(tf) ? "user" : "kernel"; - - disable_interrupts(PSR_I|PSR_F); - if (td != NULL) { - printf("Fatal %s mode data abort: '%s'\n", mode, - data_aborts[fsr & FAULT_TYPE_MASK].desc); - printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); - if ((fsr & FAULT_IMPRECISE) == 0) - printf("%08x, ", far); - else - printf("Invalid, "); - printf("spsr=%08x\n", tf->tf_spsr); - } else { - printf("Fatal %s mode prefetch abort at 0x%08x\n", - mode, tf->tf_pc); - printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); - } - - printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", - tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); - printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", - tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); - printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", - tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); - printf("r12=%08x, ", tf->tf_r12); - - if (TRAP_USERMODE(tf)) - printf("usp=%08x, ulr=%08x", - tf->tf_usr_sp, tf->tf_usr_lr); - else - printf("ssp=%08x, slr=%08x", - tf->tf_svc_sp, tf->tf_svc_lr); - printf(", pc =%08x\n\n", tf->tf_pc); - -#ifdef KDB - if (debugger_on_trap) { - kdb_why = KDB_WHY_TRAP; - handled = kdb_trap(fsr, 0, tf); - kdb_why = KDB_WHY_UNSET; - if (handled) - return (0); - } -#endif - panic("Fatal abort"); - /*NOTREACHED*/ -} - -/* - * dab_align() handles the following data aborts: - * - * FAULT_ALIGN_0 - Alignment fault - * FAULT_ALIGN_1 - Alignment fault - * - * These faults are fatal if they happen in kernel mode. Otherwise, we - * deliver a bus error to the process. - */ -static int -dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, - struct ksig *ksig) -{ - - /* Alignment faults are always fatal if they occur in kernel mode */ - if (!TRAP_USERMODE(tf)) { - if (!td || !td->td_pcb->pcb_onfault) - dab_fatal(tf, fsr, far, td, ksig); - tf->tf_r0 = EFAULT; - tf->tf_pc = (int)td->td_pcb->pcb_onfault; - return (0); - } - - /* pcb_onfault *must* be NULL at this point */ - - /* Deliver a bus error signal to the process */ - ksig->code = 0; - ksig->signb = SIGBUS; - td->td_frame = tf; - - return (1); -} - -/* - * dab_buserr() handles the following data aborts: - * - * FAULT_BUSERR_0 - External Abort on Linefetch -- Section - * FAULT_BUSERR_1 - External Abort on Linefetch -- Page - * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section - * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page - * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 - * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 - * - * If pcb_onfault is set, flag the fault and return to the handler. - * If the fault occurred in user mode, give the process a SIGBUS. - * - * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 - * can be flagged as imprecise in the FSR. This causes a real headache - * since some of the machine state is lost. In this case, tf->tf_pc - * may not actually point to the offending instruction. In fact, if - * we've taken a double abort fault, it generally points somewhere near - * the top of "data_abort_entry" in exception.S. - * - * In all other cases, these data aborts are considered fatal. - */ -static int -dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, - struct ksig *ksig) -{ - struct pcb *pcb = td->td_pcb; - -#ifdef __XSCALE__ - if ((fsr & FAULT_IMPRECISE) != 0 && - (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { - /* - * Oops, an imprecise, double abort fault. We've lost the - * r14_abt/spsr_abt values corresponding to the original - * abort, and the spsr saved in the trapframe indicates - * ABT mode. - */ - tf->tf_spsr &= ~PSR_MODE; - - /* - * We use a simple heuristic to determine if the double abort - * happened as a result of a kernel or user mode access. - * If the current trapframe is at the top of the kernel stack, - * the fault _must_ have come from user mode. - */ - if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) { - /* - * Kernel mode. We're either about to die a - * spectacular death, or pcb_onfault will come - * to our rescue. Either way, the current value - * of tf->tf_pc is irrelevant. - */ - tf->tf_spsr |= PSR_SVC32_MODE; - if (pcb->pcb_onfault == NULL) - printf("\nKernel mode double abort!\n"); - } else { - /* - * User mode. We've lost the program counter at the - * time of the fault (not that it was accurate anyway; - * it's not called an imprecise fault for nothing). - * About all we can do is copy r14_usr to tf_pc and - * hope for the best. The process is about to get a - * SIGBUS, so it's probably history anyway. - */ - tf->tf_spsr |= PSR_USR32_MODE; - tf->tf_pc = tf->tf_usr_lr; - } - } - - /* FAR is invalid for imprecise exceptions */ - if ((fsr & FAULT_IMPRECISE) != 0) - far = 0; -#endif /* __XSCALE__ */ - - if (pcb->pcb_onfault) { - tf->tf_r0 = EFAULT; - tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; - return (0); - } - - /* - * At this point, if the fault happened in kernel mode, we're toast - */ - if (!TRAP_USERMODE(tf)) - dab_fatal(tf, fsr, far, td, ksig); - - /* Deliver a bus error signal to the process */ - ksig->signb = SIGBUS; - ksig->code = 0; - td->td_frame = tf; - - return (1); -} - -/* - * void prefetch_abort_handler(struct trapframe *tf) - * - * Abort handler called when instruction execution occurs at - * a non existent or restricted (access permissions) memory page. - * If the address is invalid and we were in SVC mode then panic as - * the kernel should never prefetch abort. - * If the address is invalid and the page is mapped then the user process - * does no have read permission so send it a signal. - * Otherwise fault the page in and try again. - */ -static void -prefetch_abort_handler(struct trapframe *tf) -{ - struct thread *td; - struct proc * p; - struct vm_map *map; - vm_offset_t fault_pc, va; - int error = 0, signo, ucode; - struct ksig ksig; - -#if 0 - /* Update vmmeter statistics */ - uvmexp.traps++; -#endif -#if 0 - printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, - (void*)tf->tf_usr_lr); -#endif - - td = curthread; - p = td->td_proc; - VM_CNT_INC(v_trap); - - if (TRAP_USERMODE(tf)) { - td->td_frame = tf; - if (td->td_cowgen != td->td_proc->p_cowgen) - thread_cow_update(td); - } - fault_pc = tf->tf_pc; - if (td->td_md.md_spinlock_count == 0) { - if (__predict_true(tf->tf_spsr & PSR_I) == 0) - enable_interrupts(PSR_I); - if (__predict_true(tf->tf_spsr & PSR_F) == 0) - enable_interrupts(PSR_F); - } - - /* Prefetch aborts cannot happen in kernel mode */ - if (__predict_false(!TRAP_USERMODE(tf))) - dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); - td->td_pticks = 0; - - /* Ok validate the address, can only execute in USER space */ - if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || - (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { - signo = SIGSEGV; - ucode = 0; - goto do_trapsignal; - } - - map = &td->td_proc->p_vmspace->vm_map; - va = trunc_page(fault_pc); - - /* - * See if the pmap can handle this fault on its own... - */ -#ifdef DEBUG - last_fault_code = -1; -#endif - if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) - goto out; - - error = vm_fault_trap(map, va, VM_PROT_READ | VM_PROT_EXECUTE, - VM_FAULT_NORMAL, &signo, &ucode); - if (__predict_true(error == KERN_SUCCESS)) - goto out; - -do_trapsignal: - call_trapsignal(td, signo, ucode); - -out: - userret(td, tf); - -} - -extern int badaddr_read_1(const uint8_t *, uint8_t *); -extern int badaddr_read_2(const uint16_t *, uint16_t *); -extern int badaddr_read_4(const uint32_t *, uint32_t *); -/* - * Tentatively read an 8, 16, or 32-bit value from 'addr'. - * If the read succeeds, the value is written to 'rptr' and zero is returned. - * Else, return EFAULT. - */ -int -badaddr_read(void *addr, size_t size, void *rptr) -{ - union { - uint8_t v1; - uint16_t v2; - uint32_t v4; - } u; - int rv; - - cpu_drain_writebuf(); - - /* Read from the test address. */ - switch (size) { - case sizeof(uint8_t): - rv = badaddr_read_1(addr, &u.v1); - if (rv == 0 && rptr) - *(uint8_t *) rptr = u.v1; - break; - - case sizeof(uint16_t): - rv = badaddr_read_2(addr, &u.v2); - if (rv == 0 && rptr) - *(uint16_t *) rptr = u.v2; - break; - - case sizeof(uint32_t): - rv = badaddr_read_4(addr, &u.v4); - if (rv == 0 && rptr) - *(uint32_t *) rptr = u.v4; - break; - - default: - panic("badaddr: invalid size (%lu)", (u_long) size); - } - - /* Return EFAULT if the address was invalid, else zero */ - return (rv); -} diff --git a/sys/arm/include/atomic-v4.h b/sys/arm/include/atomic-v4.h deleted file mode 100644 index 07d1042dd0d0..000000000000 --- a/sys/arm/include/atomic-v4.h +++ /dev/null @@ -1,660 +0,0 @@ -/* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */ - -/*- - * Copyright (C) 2003-2004 Olivier Houchard - * Copyright (C) 1994-1997 Mark Brinicombe - * Copyright (C) 1994 Brini - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of Brini may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _MACHINE_ATOMIC_V4_H_ -#define _MACHINE_ATOMIC_V4_H_ - -#ifndef _MACHINE_ATOMIC_H_ -#error Do not include this file directly, use -#endif - -#if __ARM_ARCH <= 5 -#define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory") -#define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory") -#define dmb() dsb() -#else -#error Only use this file with ARMv5 and earlier -#endif - -#define mb() dmb() -#define wmb() dmb() -#define rmb() dmb() - -#define __with_interrupts_disabled(expr) \ - do { \ - u_int cpsr_save, tmp; \ - \ - __asm __volatile( \ - "mrs %0, cpsr;" \ - "orr %1, %0, %2;" \ - "msr cpsr_fsxc, %1;" \ - : "=r" (cpsr_save), "=r" (tmp) \ - : "I" (PSR_I | PSR_F) \ - : "cc" ); \ - (expr); \ - __asm __volatile( \ - "msr cpsr_fsxc, %0" \ - : /* no output */ \ - : "r" (cpsr_save) \ - : "cc" ); \ - } while(0) - -static __inline uint32_t -__swp(uint32_t val, volatile uint32_t *ptr) -{ - __asm __volatile("swp %0, %2, [%3]" - : "=&r" (val), "=m" (*ptr) - : "r" (val), "r" (ptr), "m" (*ptr) - : "memory"); - return (val); -} - -#ifdef _KERNEL -#define ARM_HAVE_ATOMIC64 - -static __inline void -atomic_add_32(volatile u_int32_t *p, u_int32_t val) -{ - __with_interrupts_disabled(*p += val); -} - -static __inline void -atomic_add_64(volatile u_int64_t *p, u_int64_t val) -{ - __with_interrupts_disabled(*p += val); -} - -static __inline void -atomic_clear_32(volatile uint32_t *address, uint32_t clearmask) -{ - __with_interrupts_disabled(*address &= ~clearmask); -} - -static __inline void -atomic_clear_64(volatile uint64_t *address, uint64_t clearmask) -{ - __with_interrupts_disabled(*address &= ~clearmask); -} - -static __inline int -atomic_fcmpset_8(volatile uint8_t *p, volatile uint8_t *cmpval, volatile uint8_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - ret = *p; - if (*p == *cmpval) { - *p = newval; - ret = 1; - } else { - *cmpval = *p; - ret = 0; - } - }); - return (ret); -} -static __inline int -atomic_fcmpset_16(volatile uint16_t *p, volatile uint16_t *cmpval, volatile uint16_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - ret = *p; - if (*p == *cmpval) { - *p = newval; - ret = 1; - } else { - *cmpval = *p; - ret = 0; - } - }); - return (ret); -} - -static __inline int -atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - ret = *p; - if (*p == *cmpval) { - *p = newval; - ret = 1; - } else { - *cmpval = *p; - ret = 0; - } - }); - return (ret); -} - -static __inline int -atomic_fcmpset_64(volatile u_int64_t *p, volatile u_int64_t *cmpval, volatile u_int64_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - if (*p == *cmpval) { - *p = newval; - ret = 1; - } else { - *cmpval = *p; - ret = 0; - } - }); - return (ret); -} - -static __inline int -atomic_cmpset_8(volatile uint8_t *p, volatile uint8_t cmpval, volatile uint8_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - if (*p == cmpval) { - *p = newval; - ret = 1; - } else { - ret = 0; - } - }); - return (ret); -} - -static __inline int -atomic_cmpset_16(volatile uint16_t *p, volatile uint16_t cmpval, volatile uint16_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - if (*p == cmpval) { - *p = newval; - ret = 1; - } else { - ret = 0; - } - }); - return (ret); -} - -static __inline int -atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - if (*p == cmpval) { - *p = newval; - ret = 1; - } else { - ret = 0; - } - }); - return (ret); -} - -static __inline int -atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval) -{ - int ret; - - __with_interrupts_disabled( - { - if (*p == cmpval) { - *p = newval; - ret = 1; - } else { - ret = 0; - } - }); - return (ret); -} - -static __inline uint32_t -atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) -{ - uint32_t value; - - __with_interrupts_disabled( - { - value = *p; - *p += v; - }); - return (value); -} - -static __inline uint64_t -atomic_fetchadd_64(volatile uint64_t *p, uint64_t v) -{ - uint64_t value; - - __with_interrupts_disabled( - { - value = *p; - *p += v; - }); - return (value); -} - -static __inline uint64_t -atomic_load_64(volatile uint64_t *p) -{ - uint64_t value; - - __with_interrupts_disabled(value = *p); - return (value); -} - -static __inline void -atomic_set_32(volatile uint32_t *address, uint32_t setmask) -{ - __with_interrupts_disabled(*address |= setmask); -} - -static __inline void -atomic_set_64(volatile uint64_t *address, uint64_t setmask) -{ - __with_interrupts_disabled(*address |= setmask); -} - -static __inline void -atomic_store_64(volatile uint64_t *p, uint64_t value) -{ - __with_interrupts_disabled(*p = value); -} - -static __inline void -atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) -{ - __with_interrupts_disabled(*p -= val); -} - -static __inline void -atomic_subtract_64(volatile u_int64_t *p, u_int64_t val) -{ - __with_interrupts_disabled(*p -= val); -} - -static __inline uint64_t -atomic_swap_64(volatile uint64_t *p, uint64_t v) -{ - uint64_t value; - - __with_interrupts_disabled( - { - value = *p; - *p = v; - }); - return (value); -} - -#else /* !_KERNEL */ - -static __inline void -atomic_add_32(volatile u_int32_t *p, u_int32_t val) -{ - int start, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%2]\n" - "add %1, %1, %3\n" - "str %1, [%2]\n" - "2:\n" - "mov %1, #0\n" - "str %1, [%0]\n" - "mov %1, #0xffffffff\n" - "str %1, [%0, #4]\n" - : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val) - : : "memory"); -} - -static __inline void -atomic_clear_32(volatile uint32_t *address, uint32_t clearmask) -{ - int start, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%2]\n" - "bic %1, %1, %3\n" - "str %1, [%2]\n" - "2:\n" - "mov %1, #0\n" - "str %1, [%0]\n" - "mov %1, #0xffffffff\n" - "str %1, [%0, #4]\n" - : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask) - : : "memory"); - -} - -static __inline int -atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval) -{ - int done, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%2]\n" - "cmp %1, %3\n" - "streq %4, [%2]\n" - "2:\n" - "mov %1, #0\n" - "str %1, [%0]\n" - "mov %1, #0xffffffff\n" - "str %1, [%0, #4]\n" - "moveq %1, #1\n" - "movne %1, #0\n" - : "+r" (ras_start), "=r" (done) - ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory"); - return (done); -} - -static __inline int -atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval) -{ - int done, oldval, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%2]\n" - "ldr %5, [%3]\n" - "cmp %1, %5\n" - "streq %4, [%2]\n" - "2:\n" - "mov %5, #0\n" - "str %5, [%0]\n" - "mov %5, #0xffffffff\n" - "str %5, [%0, #4]\n" - "strne %1, [%3]\n" - "moveq %1, #1\n" - "movne %1, #0\n" - : "+r" (ras_start), "=r" (done) ,"+r" (p) - , "+r" (cmpval), "+r" (newval), "+r" (oldval) : : "cc", "memory"); - return (done); -} - -static __inline uint32_t -atomic_fetchadd_32(volatile uint32_t *p, uint32_t v) -{ - uint32_t start, tmp, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%3]\n" - "mov %2, %1\n" - "add %2, %2, %4\n" - "str %2, [%3]\n" - "2:\n" - "mov %2, #0\n" - "str %2, [%0]\n" - "mov %2, #0xffffffff\n" - "str %2, [%0, #4]\n" - : "+r" (ras_start), "=r" (start), "=r" (tmp), "+r" (p), "+r" (v) - : : "memory"); - return (start); -} - -static __inline void -atomic_set_32(volatile uint32_t *address, uint32_t setmask) -{ - int start, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%2]\n" - "orr %1, %1, %3\n" - "str %1, [%2]\n" - "2:\n" - "mov %1, #0\n" - "str %1, [%0]\n" - "mov %1, #0xffffffff\n" - "str %1, [%0, #4]\n" - - : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (setmask) - : : "memory"); -} - -static __inline void -atomic_subtract_32(volatile u_int32_t *p, u_int32_t val) -{ - int start, ras_start = ARM_RAS_START; - - __asm __volatile("1:\n" - "adr %1, 1b\n" - "str %1, [%0]\n" - "adr %1, 2f\n" - "str %1, [%0, #4]\n" - "ldr %1, [%2]\n" - "sub %1, %1, %3\n" - "str %1, [%2]\n" - "2:\n" - "mov %1, #0\n" - "str %1, [%0]\n" - "mov %1, #0xffffffff\n" - "str %1, [%0, #4]\n" - - : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val) - : : "memory"); -} - -#endif /* _KERNEL */ - -static __inline uint32_t -atomic_readandclear_32(volatile u_int32_t *p) -{ - - return (__swp(0, p)); -} - -static __inline uint32_t -atomic_swap_32(volatile u_int32_t *p, u_int32_t v) -{ - - return (__swp(v, p)); -} - -#define atomic_fcmpset_rel_32 atomic_fcmpset_32 -#define atomic_fcmpset_acq_32 atomic_fcmpset_32 -#ifdef _KERNEL -#define atomic_fcmpset_8 atomic_fcmpset_8 -#define atomic_fcmpset_rel_8 atomic_fcmpset_8 -#define atomic_fcmpset_acq_8 atomic_fcmpset_8 -#define atomic_fcmpset_16 atomic_fcmpset_16 -#define atomic_fcmpset_rel_16 atomic_fcmpset_16 -#define atomic_fcmpset_acq_16 atomic_fcmpset_16 -#define atomic_fcmpset_rel_64 atomic_fcmpset_64 -#define atomic_fcmpset_acq_64 atomic_fcmpset_64 -#endif -#define atomic_fcmpset_acq_long atomic_fcmpset_long -#define atomic_fcmpset_rel_long atomic_fcmpset_long -#define atomic_cmpset_rel_32 atomic_cmpset_32 -#define atomic_cmpset_acq_32 atomic_cmpset_32 -#ifdef _KERNEL -#define atomic_cmpset_8 atomic_cmpset_8 -#define atomic_cmpset_rel_8 atomic_cmpset_8 -#define atomic_cmpset_acq_8 atomic_cmpset_8 -#define atomic_cmpset_16 atomic_cmpset_16 -#define atomic_cmpset_rel_16 atomic_cmpset_16 -#define atomic_cmpset_acq_16 atomic_cmpset_16 -#define atomic_cmpset_rel_64 atomic_cmpset_64 -#define atomic_cmpset_acq_64 atomic_cmpset_64 -#endif -#define atomic_set_rel_32 atomic_set_32 -#define atomic_set_acq_32 atomic_set_32 -#define atomic_clear_rel_32 atomic_clear_32 -#define atomic_clear_acq_32 atomic_clear_32 -#define atomic_add_rel_32 atomic_add_32 -#define atomic_add_acq_32 atomic_add_32 -#define atomic_subtract_rel_32 atomic_subtract_32 -#define atomic_subtract_acq_32 atomic_subtract_32 -#define atomic_store_rel_32 atomic_store_32 -#define atomic_store_rel_long atomic_store_long -#define atomic_load_acq_32 atomic_load_32 -#define atomic_load_acq_long atomic_load_long -#define atomic_add_acq_long atomic_add_long -#define atomic_add_rel_long atomic_add_long -#define atomic_subtract_acq_long atomic_subtract_long -#define atomic_subtract_rel_long atomic_subtract_long -#define atomic_clear_acq_long atomic_clear_long -#define atomic_clear_rel_long atomic_clear_long -#define atomic_set_acq_long atomic_set_long -#define atomic_set_rel_long atomic_set_long -#define atomic_cmpset_acq_long atomic_cmpset_long -#define atomic_cmpset_rel_long atomic_cmpset_long -#define atomic_load_acq_long atomic_load_long -#undef __with_interrupts_disabled - -static __inline void -atomic_add_long(volatile u_long *p, u_long v) -{ - - atomic_add_32((volatile uint32_t *)p, v); -} - -static __inline void -atomic_clear_long(volatile u_long *p, u_long v) -{ - - atomic_clear_32((volatile uint32_t *)p, v); -} - -static __inline int -atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe) -{ - - return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe)); -} - -static __inline u_long -atomic_fcmpset_long(volatile u_long *dst, u_long *old, u_long newe) -{ - - return (atomic_fcmpset_32((volatile uint32_t *)dst, - (uint32_t *)old, newe)); -} - -static __inline u_long -atomic_fetchadd_long(volatile u_long *p, u_long v) -{ - - return (atomic_fetchadd_32((volatile uint32_t *)p, v)); -} - -static __inline void -atomic_readandclear_long(volatile u_long *p) -{ - - atomic_readandclear_32((volatile uint32_t *)p); -} - -static __inline void -atomic_set_long(volatile u_long *p, u_long v) -{ - - atomic_set_32((volatile uint32_t *)p, v); -} - -static __inline void -atomic_subtract_long(volatile u_long *p, u_long v) -{ - - atomic_subtract_32((volatile uint32_t *)p, v); -} - -/* - * ARMv5 does not support SMP. For both kernel and user modes, only a - * compiler barrier is needed for fences, since CPU is always - * self-consistent. - */ -static __inline void -atomic_thread_fence_acq(void) -{ - - __compiler_membar(); -} - -static __inline void -atomic_thread_fence_rel(void) -{ - - __compiler_membar(); -} - -static __inline void -atomic_thread_fence_acq_rel(void) -{ - - __compiler_membar(); -} - -static __inline void -atomic_thread_fence_seq_cst(void) -{ - - __compiler_membar(); -} - -#endif /* _MACHINE_ATOMIC_H_ */ diff --git a/sys/arm/include/atomic.h b/sys/arm/include/atomic.h index 9281096782ef..f5d98d920ebd 100644 --- a/sys/arm/include/atomic.h +++ b/sys/arm/include/atomic.h @@ -1,108 +1,104 @@ /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 2003-2004 Olivier Houchard * Copyright (C) 1994-1997 Mark Brinicombe * Copyright (C) 1994 Brini * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of Brini may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ATOMIC_H_ #define _MACHINE_ATOMIC_H_ #include #include #ifndef _KERNEL #include #endif -#if __ARM_ARCH >= 6 #include -#else /* < armv6 */ -#include -#endif /* Arch >= v6 */ static __inline u_long atomic_swap_long(volatile u_long *p, u_long v) { return (atomic_swap_32((volatile uint32_t *)p, v)); } #define atomic_clear_ptr atomic_clear_32 #define atomic_clear_acq_ptr atomic_clear_acq_32 #define atomic_clear_rel_ptr atomic_clear_rel_32 #define atomic_set_ptr atomic_set_32 #define atomic_set_acq_ptr atomic_set_acq_32 #define atomic_set_rel_ptr atomic_set_rel_32 #define atomic_fcmpset_ptr atomic_fcmpset_32 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_32 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_32 #define atomic_cmpset_ptr atomic_cmpset_32 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_32 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_32 #define atomic_load_acq_ptr atomic_load_acq_32 #define atomic_store_rel_ptr atomic_store_rel_32 #define atomic_swap_ptr atomic_swap_32 #define atomic_readandclear_ptr atomic_readandclear_32 #define atomic_add_int atomic_add_32 #define atomic_add_acq_int atomic_add_acq_32 #define atomic_add_rel_int atomic_add_rel_32 #define atomic_subtract_int atomic_subtract_32 #define atomic_subtract_acq_int atomic_subtract_acq_32 #define atomic_subtract_rel_int atomic_subtract_rel_32 #define atomic_clear_int atomic_clear_32 #define atomic_clear_acq_int atomic_clear_acq_32 #define atomic_clear_rel_int atomic_clear_rel_32 #define atomic_set_int atomic_set_32 #define atomic_set_acq_int atomic_set_acq_32 #define atomic_set_rel_int atomic_set_rel_32 #define atomic_fcmpset_int atomic_fcmpset_32 #define atomic_fcmpset_acq_int atomic_fcmpset_acq_32 #define atomic_fcmpset_rel_int atomic_fcmpset_rel_32 #define atomic_cmpset_int atomic_cmpset_32 #define atomic_cmpset_acq_int atomic_cmpset_acq_32 #define atomic_cmpset_rel_int atomic_cmpset_rel_32 #define atomic_fetchadd_int atomic_fetchadd_32 #define atomic_readandclear_int atomic_readandclear_32 #define atomic_load_acq_int atomic_load_acq_32 #define atomic_store_rel_int atomic_store_rel_32 #define atomic_swap_int atomic_swap_32 #include #endif /* _MACHINE_ATOMIC_H_ */ diff --git a/sys/arm/include/cpu-v4.h b/sys/arm/include/cpu-v4.h deleted file mode 100644 index 441234635572..000000000000 --- a/sys/arm/include/cpu-v4.h +++ /dev/null @@ -1,183 +0,0 @@ -/*- - * Copyright 2016 Svatopluk Kraus - * Copyright 2016 Michal Meloun - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ -#ifndef MACHINE_CPU_V4_H -#define MACHINE_CPU_V4_H - -/* There are no user serviceable parts here, they may change without notice */ -#ifndef _KERNEL -#error Only include this file in the kernel -#endif - -#include -#include -#include -#include - -#if __ARM_ARCH >= 6 -#error Never include this file for ARMv6 -#else - -#define CPU_ASID_KERNEL 0 - -/* - * Macros to generate CP15 (system control processor) read/write functions. - */ -#define _FX(s...) #s - -#define _RF0(fname, aname...) \ -static __inline uint32_t \ -fname(void) \ -{ \ - uint32_t reg; \ - __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ - return(reg); \ -} - -#define _R64F0(fname, aname) \ -static __inline uint64_t \ -fname(void) \ -{ \ - uint64_t reg; \ - __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ - return(reg); \ -} - -#define _WF0(fname, aname...) \ -static __inline void \ -fname(void) \ -{ \ - __asm __volatile("mcr\t" _FX(aname)); \ -} - -#define _WF1(fname, aname...) \ -static __inline void \ -fname(uint32_t reg) \ -{ \ - __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ -} - -/* - * Publicly accessible functions - */ - -/* Various control registers */ - -_RF0(cp15_cpacr_get, CP15_CPACR(%0)) -_WF1(cp15_cpacr_set, CP15_CPACR(%0)) -_RF0(cp15_dfsr_get, CP15_DFSR(%0)) -_RF0(cp15_ttbr_get, CP15_TTBR0(%0)) -_RF0(cp15_dfar_get, CP15_DFAR(%0)) -/* XScale */ -_RF0(cp15_actlr_get, CP15_ACTLR(%0)) -_WF1(cp15_actlr_set, CP15_ACTLR(%0)) - -/*CPU id registers */ -_RF0(cp15_midr_get, CP15_MIDR(%0)) -_RF0(cp15_ctr_get, CP15_CTR(%0)) -_RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) -_RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) -_RF0(cp15_sctlr_get, CP15_SCTLR(%0)) - -#undef _FX -#undef _RF0 -#undef _WF0 -#undef _WF1 - -/* - * armv4/5 compatibility shims. - * - * These functions provide armv4 cache maintenance using the new armv6 names. - * Included here are just the functions actually used now in common code; it may - * be necessary to add things here over time. - * - * The callers of the dcache functions expect these routines to handle address - * and size values which are not aligned to cacheline boundaries; the armv4 and - * armv5 asm code handles that. - */ - -static __inline void -tlb_flush_all(void) -{ - cpu_tlb_flushID(); - cpu_cpwait(); -} - -static __inline void -icache_sync(vm_offset_t va, vm_size_t size) -{ - cpu_icache_sync_range(va, size); -} - -static __inline void -dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) -{ - - cpu_dcache_inv_range(va, size); -#ifdef ARM_L2_PIPT - cpu_l2cache_inv_range(pa, size); -#else - cpu_l2cache_inv_range(va, size); -#endif -} - -static __inline void -dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) -{ - - /* See armv6 code, above, for why we do L2 before L1 in this case. */ -#ifdef ARM_L2_PIPT - cpu_l2cache_inv_range(pa, size); -#else - cpu_l2cache_inv_range(va, size); -#endif - cpu_dcache_inv_range(va, size); -} - -static __inline void -dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) -{ - - cpu_dcache_wb_range(va, size); -#ifdef ARM_L2_PIPT - cpu_l2cache_wb_range(pa, size); -#else - cpu_l2cache_wb_range(va, size); -#endif -} - -static __inline void -dcache_wbinv_poc_all(void) -{ - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); -} - -#endif /* _KERNEL */ - -#endif /* MACHINE_CPU_V4_H */ diff --git a/sys/arm/include/cpu.h b/sys/arm/include/cpu.h index 55c20cec3ba9..8937a87aebea 100644 --- a/sys/arm/include/cpu.h +++ b/sys/arm/include/cpu.h @@ -1,94 +1,90 @@ /* $NetBSD: cpu.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */ /* $FreeBSD$ */ #ifndef MACHINE_CPU_H #define MACHINE_CPU_H #include #include void cpu_halt(void); void swi_vm(void *); #ifdef _KERNEL -#if __ARM_ARCH >= 6 #include -#else -#include -#endif /* __ARM_ARCH >= 6 */ static __inline uint64_t get_cyclecount(void) { #if __ARM_ARCH > 6 || (__ARM_ARCH == 6 && defined(CPU_ARM1176)) #if (__ARM_ARCH > 6) && defined(DEV_PMU) if (pmu_attched) { u_int cpu; uint64_t h, h2; uint32_t l, r; cpu = PCPU_GET(cpuid); h = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]); l = cp15_pmccntr_get(); /* In case interrupts are disabled we need to check for overflow. */ r = cp15_pmovsr_get(); if (r & PMU_OVSR_C) { atomic_add_32(&ccnt_hi[cpu], 1); /* Clear the event. */ cp15_pmovsr_set(PMU_OVSR_C); } /* Make sure there was no wrap-around while we read the lo half. */ h2 = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]); if (h != h2) l = cp15_pmccntr_get(); return (h2 << 32 | l); } else #endif return cp15_pmccntr_get(); #else /* No performance counters, so use binuptime(9). This is slooooow */ struct bintime bt; binuptime(&bt); return ((uint64_t)bt.sec << 56 | bt.frac >> 8); #endif } #endif #define TRAPF_USERMODE(frame) ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE) #define TRAPF_PC(tfp) ((tfp)->tf_pc) #define cpu_getstack(td) ((td)->td_frame->tf_usr_sp) #define cpu_setstack(td, sp) ((td)->td_frame->tf_usr_sp = (sp)) #define cpu_spinwait() /* nothing */ #define cpu_lock_delay() DELAY(1) #define ARM_NVEC 8 #define ARM_VEC_ALL 0xffffffff extern vm_offset_t vector_page; /* * Params passed into initarm. If you change the size of this you will * need to update locore.S to allocate more memory on the stack before * it calls initarm. */ struct arm_boot_params { register_t abp_size; /* Size of this structure */ register_t abp_r0; /* r0 from the boot loader */ register_t abp_r1; /* r1 from the boot loader */ register_t abp_r2; /* r2 from the boot loader */ register_t abp_r3; /* r3 from the boot loader */ vm_offset_t abp_physaddr; /* The kernel physical address */ vm_offset_t abp_pagetable; /* The early page table */ }; void arm_vector_init(vm_offset_t, int); void fork_trampoline(void); void identify_arm_cpu(void); void *initarm(struct arm_boot_params *); extern char btext[]; extern char etext[]; int badaddr_read(void *, size_t, void *); #endif /* !MACHINE_CPU_H */ diff --git a/sys/arm/include/pmap-v4.h b/sys/arm/include/pmap-v4.h deleted file mode 100644 index e4102ffe50c8..000000000000 --- a/sys/arm/include/pmap-v4.h +++ /dev/null @@ -1,381 +0,0 @@ -/*- - * Copyright (c) 1991 Regents of the University of California. - * All rights reserved. - * - * This code is derived from software contributed to Berkeley by - * the Systems Programming Group of the University of Utah Computer - * Science Department and William Jolitz of UUNET Technologies Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * Derived from hp300 version by Mike Hibler, this version by William - * Jolitz uses a recursive map [a pde points to the page directory] to - * map the page tables using the pagetables themselves. This is done to - * reduce the impact on kernel virtual memory for lots of sparse address - * space, and to reduce the cost of memory to each process. - * - * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 - * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 - * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 - * - * $FreeBSD$ - */ - -#ifndef _MACHINE_PMAP_V4_H_ -#define _MACHINE_PMAP_V4_H_ - -#include - -/* - * Pte related macros - */ -#define PTE_NOCACHE 1 -#define PTE_CACHE 2 -#define PTE_DEVICE PTE_NOCACHE -#define PTE_PAGETABLE 3 - -enum mem_type { - STRONG_ORD = 0, - DEVICE_NOSHARE, - DEVICE_SHARE, - NRML_NOCACHE, - NRML_IWT_OWT, - NRML_IWB_OWB, - NRML_IWBA_OWBA -}; - -#ifndef LOCORE - -#include -#include -#include -#include - -#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ -#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ - -#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) - -/* - * Pmap stuff - */ - -/* - * This structure is used to hold a virtual<->physical address - * association and is used mostly by bootstrap code - */ -struct pv_addr { - SLIST_ENTRY(pv_addr) pv_list; - vm_offset_t pv_va; - vm_paddr_t pv_pa; -}; - -struct pv_entry; -struct pv_chunk; - -struct md_page { - int pvh_attrs; - vm_memattr_t pv_memattr; - vm_offset_t pv_kva; /* first kernel VA mapping */ - TAILQ_HEAD(,pv_entry) pv_list; -}; - -struct l1_ttable; -struct l2_dtable; - -/* - * The number of L2 descriptor tables which can be tracked by an l2_dtable. - * A bucket size of 16 provides for 16MB of contiguous virtual address - * space per l2_dtable. Most processes will, therefore, require only two or - * three of these to map their whole working set. - */ -#define L2_BUCKET_LOG2 4 -#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) -/* - * Given the above "L2-descriptors-per-l2_dtable" constant, the number - * of l2_dtable structures required to track all possible page descriptors - * mappable by an L1 translation table is given by the following constants: - */ -#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) -#define L2_SIZE (1 << L2_LOG2) - -struct pmap { - struct mtx pm_mtx; - u_int8_t pm_domain; - struct l1_ttable *pm_l1; - struct l2_dtable *pm_l2[L2_SIZE]; - cpuset_t pm_active; /* active on cpus */ - struct pmap_statistics pm_stats; /* pmap statictics */ - TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ -}; - -typedef struct pmap *pmap_t; - -#ifdef _KERNEL -extern struct pmap kernel_pmap_store; -#define kernel_pmap (&kernel_pmap_store) - -#define PMAP_ASSERT_LOCKED(pmap) \ - mtx_assert(&(pmap)->pm_mtx, MA_OWNED) -#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) -#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) -#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ - NULL, MTX_DEF | MTX_DUPOK) -#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) -#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) -#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) -#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -#endif - -/* - * For each vm_page_t, there is a list of all currently valid virtual - * mappings of that page. An entry is a pv_entry_t, the list is pv_list. - */ -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_list; - int pv_flags; /* flags (wired, etc...) */ - pmap_t pv_pmap; /* pmap where mapping lies */ - TAILQ_ENTRY(pv_entry) pv_plist; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#define _NPCM 8 -#define _NPCPV 252 - -struct pv_chunk { - pmap_t pc_pmap; - TAILQ_ENTRY(pv_chunk) pc_list; - uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ - uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ - TAILQ_ENTRY(pv_chunk) pc_lru; - struct pv_entry pc_pventry[_NPCPV]; -}; - -#ifdef _KERNEL - -boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); - -/* - * virtual address to page table entry and - * to physical address. Likewise for alternate address space. - * Note: these work recursively, thus vtopte of a pte will give - * the corresponding pde that in turn maps it. - */ - -/* - * The current top of kernel VM. - */ -extern vm_offset_t pmap_curmaxkvaddr; - -/* Virtual address to page table entry */ -static __inline pt_entry_t * -vtopte(vm_offset_t va) -{ - pd_entry_t *pdep; - pt_entry_t *ptep; - - if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) - return (NULL); - return (ptep); -} - -void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); -int pmap_change_attr(vm_offset_t, vm_size_t, int); -void pmap_kenter(vm_offset_t va, vm_paddr_t pa); -void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); -void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); -vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); -void pmap_kremove(vm_offset_t); -vm_page_t pmap_use_pt(pmap_t, vm_offset_t); -void pmap_debug(int); -void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); -void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); -vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); -void -pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, - int cache); -int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); - -/* - * Definitions for MMU domains - */ -#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ -#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ - -/* - * The new pmap ensures that page-tables are always mapping Write-Thru. - * Thus, on some platforms we can run fast and loose and avoid syncing PTEs - * on every change. - * - * Unfortunately, not all CPUs have a write-through cache mode. So we - * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, - * and if there is the chance for PTE syncs to be needed, we define - * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) - * the code. - */ -extern int pmap_needs_pte_sync; - -/* - * These macros define the various bit masks in the PTE. - */ - -#define L1_S_CACHE_MASK (L1_S_B|L1_S_C) -#define L2_L_CACHE_MASK (L2_B|L2_C) -#define L2_S_PROT_U (L2_AP(AP_U)) -#define L2_S_PROT_W (L2_AP(AP_W)) -#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_W) -#define L2_S_CACHE_MASK (L2_B|L2_C) -#define L1_S_PROTO (L1_TYPE_S | L1_S_IMP) -#define L1_C_PROTO (L1_TYPE_C | L1_C_IMP2) -#define L2_L_PROTO (L2_TYPE_L) -#define L2_S_PROTO (L2_TYPE_S) - -/* - * User-visible names for the ones that vary with MMU class. - */ -#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) - -#if defined(CPU_XSCALE_81342) -#define CPU_XSCALE_CORE3 -#define PMAP_NEEDS_PTE_SYNC 1 -#define PMAP_INCLUDE_PTE_SYNC -#else -#define PMAP_NEEDS_PTE_SYNC 0 -#endif - -/* - * These macros return various bits based on kernel/user and protection. - * Note that the compiler will usually fold these at compile time. - */ -#define L1_S_PROT_U (L1_S_AP(AP_U)) -#define L1_S_PROT_W (L1_S_AP(AP_W)) -#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) -#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) - -#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) - -#define L2_L_PROT_U (L2_AP(AP_U)) -#define L2_L_PROT_W (L2_AP(AP_W)) -#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) - -#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) - -#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) - -/* - * Macros to test if a mapping is mappable with an L1 Section mapping - * or an L2 Large Page mapping. - */ -#define L1_S_MAPPABLE_P(va, pa, size) \ - ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) - -#define L2_L_MAPPABLE_P(va, pa, size) \ - ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) - -/* - * Provide a fallback in case we were not able to determine it at - * compile-time. - */ -#ifndef PMAP_NEEDS_PTE_SYNC -#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync -#define PMAP_INCLUDE_PTE_SYNC -#endif - -#ifdef ARM_L2_PIPT -#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) -#else -#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) -#endif - -#define PTE_SYNC(pte) \ -do { \ - if (PMAP_NEEDS_PTE_SYNC) { \ - cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ - cpu_drain_writebuf(); \ - _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ - } else \ - cpu_drain_writebuf(); \ -} while (/*CONSTCOND*/0) - -#define PTE_SYNC_RANGE(pte, cnt) \ -do { \ - if (PMAP_NEEDS_PTE_SYNC) { \ - cpu_dcache_wb_range((vm_offset_t)(pte), \ - (cnt) << 2); /* * sizeof(pt_entry_t) */ \ - cpu_drain_writebuf(); \ - _sync_l2((vm_offset_t)(pte), \ - (cnt) << 2); /* * sizeof(pt_entry_t) */ \ - } else \ - cpu_drain_writebuf(); \ -} while (/*CONSTCOND*/0) - -void pmap_pte_init_generic(void); - -#define PTE_KERNEL 0 -#define PTE_USER 1 - -/* - * Flags that indicate attributes of pages or mappings of pages. - * - * The PVF_MOD and PVF_REF flags are stored in the mdpage for each - * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual - * pv_entry's for each page. They live in the same "namespace" so - * that we can clear multiple attributes at a time. - * - * Note the "non-cacheable" flag generally means the page has - * multiple mappings in a given address space. - */ -#define PVF_MOD 0x01 /* page is modified */ -#define PVF_REF 0x02 /* page is referenced */ -#define PVF_WIRED 0x04 /* mapping is wired */ -#define PVF_WRITE 0x08 /* mapping is writable */ -#define PVF_EXEC 0x10 /* mapping is executable */ -#define PVF_NC 0x20 /* mapping is non-cacheable */ -#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ -#define PVF_UNMAN 0x80 /* mapping is unmanaged */ - -void vector_page_setprot(int); - -#define SECTION_CACHE 0x1 -#define SECTION_PT 0x2 -void pmap_postinit(void); - -#endif /* _KERNEL */ - -#endif /* !LOCORE */ - -#endif /* !_MACHINE_PMAP_V4_H_ */ diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h index 3983dd9988c6..2f407a9c8760 100644 --- a/sys/arm/include/pmap.h +++ b/sys/arm/include/pmap.h @@ -1,81 +1,77 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2016 Svatopluk Kraus * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ -#if __ARM_ARCH >= 6 #include -#else -#include -#endif #ifdef _KERNEL #include extern char *_tmppt; /* poor name! */ extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; void *pmap_kenter_temporary(vm_paddr_t, int); #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0) void pmap_page_set_memattr(vm_page_t, vm_memattr_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void pmap_unmapdev(vm_offset_t, vm_size_t); static inline void * pmap_mapdev_attr(vm_paddr_t addr, vm_size_t size, int attr) { panic("%s is not implemented yet!\n", __func__); } struct pcb; void pmap_set_pcb_pagedir(pmap_t, struct pcb *); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); void pmap_kremove_device(vm_offset_t, vm_size_t); vm_paddr_t pmap_kextract(vm_offset_t); #define vtophys(va) pmap_kextract((vm_offset_t)(va)) static inline int pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) { return (0); } #define PMAP_ENTER_QUICK_LOCKED 0x10000000 #endif /* _KERNEL */ #endif /* !_MACHINE_PMAP_H_ */ diff --git a/sys/arm/include/pte-v4.h b/sys/arm/include/pte-v4.h deleted file mode 100644 index 7102902c18f7..000000000000 --- a/sys/arm/include/pte-v4.h +++ /dev/null @@ -1,350 +0,0 @@ -/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ - -/*- - * Copyright (c) 1994 Mark Brinicombe. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the RiscBSD team. - * 4. The name "RiscBSD" nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _MACHINE_PTE_V4_H_ -#define _MACHINE_PTE_V4_H_ - -#ifndef LOCORE -typedef uint32_t pd_entry_t; /* page directory entry */ -typedef uint32_t pt_entry_t; /* page table entry */ -typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ -#endif - -#define PG_FRAME 0xfffff000 - -/* The PT_SIZE definition is misleading... A page table is only 0x400 - * bytes long. But since VM mapping can only be done to 0x1000 a single - * 1KB blocks cannot be steered to a va by itself. Therefore the - * pages tables are allocated in blocks of 4. i.e. if a 1 KB block - * was allocated for a PT then the other 3KB would also get mapped - * whenever the 1KB was mapped. - */ - -#define PT_RSIZE 0x0400 /* Real page table size */ -#define PT_SIZE 0x1000 -#define PD_SIZE 0x4000 - -/* Page table types and masks */ -#define L1_PAGE 0x01 /* L1 page table mapping */ -#define L1_SECTION 0x02 /* L1 section mapping */ -#define L1_FPAGE 0x03 /* L1 fine page mapping */ -#define L1_MASK 0x03 /* Mask for L1 entry type */ -#define L2_LPAGE 0x01 /* L2 large page (64KB) */ -#define L2_SPAGE 0x02 /* L2 small page (4KB) */ -#define L2_MASK 0x03 /* Mask for L2 entry type */ -#define L2_INVAL 0x00 /* L2 invalid type */ - -/* - * The ARM MMU architecture was introduced with ARM v3 (previous ARM - * architecture versions used an optional off-CPU memory controller - * to perform address translation). - * - * The ARM MMU consists of a TLB and translation table walking logic. - * There is typically one TLB per memory interface (or, put another - * way, one TLB per software-visible cache). - * - * The ARM MMU is capable of mapping memory in the following chunks: - * - * 1M Sections (L1 table) - * - * 64K Large Pages (L2 table) - * - * 4K Small Pages (L2 table) - * - * 1K Tiny Pages (L2 table) - * - * There are two types of L2 tables: Coarse Tables and Fine Tables. - * Coarse Tables can map Large and Small Pages. Fine Tables can - * map Tiny Pages. - * - * Coarse Tables can define 4 Subpages within Large and Small pages. - * Subpages define different permissions for each Subpage within - * a Page. - * - * Coarse Tables are 1K in length. Fine tables are 4K in length. - * - * The Translation Table Base register holds the pointer to the - * L1 Table. The L1 Table is a 16K contiguous chunk of memory - * aligned to a 16K boundary. Each entry in the L1 Table maps - * 1M of virtual address space, either via a Section mapping or - * via an L2 Table. - * - * In addition, the Fast Context Switching Extension (FCSE) is available - * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating - * TLB/cache flushes on context switch by use of a smaller address space - * and a "process ID" that modifies the virtual address before being - * presented to the translation logic. - */ - -/* ARMv6 super-sections. */ -#define L1_SUP_SIZE 0x01000000 /* 16M */ -#define L1_SUP_OFFSET (L1_SUP_SIZE - 1) -#define L1_SUP_FRAME (~L1_SUP_OFFSET) -#define L1_SUP_SHIFT 24 - -#define L1_S_SIZE 0x00100000 /* 1M */ -#define L1_S_OFFSET (L1_S_SIZE - 1) -#define L1_S_FRAME (~L1_S_OFFSET) -#define L1_S_SHIFT 20 - -#define L2_L_SIZE 0x00010000 /* 64K */ -#define L2_L_OFFSET (L2_L_SIZE - 1) -#define L2_L_FRAME (~L2_L_OFFSET) -#define L2_L_SHIFT 16 - -#define L2_S_SIZE 0x00001000 /* 4K */ -#define L2_S_OFFSET (L2_S_SIZE - 1) -#define L2_S_FRAME (~L2_S_OFFSET) -#define L2_S_SHIFT 12 - -#define L2_T_SIZE 0x00000400 /* 1K */ -#define L2_T_OFFSET (L2_T_SIZE - 1) -#define L2_T_FRAME (~L2_T_OFFSET) -#define L2_T_SHIFT 10 - -/* - * The NetBSD VM implementation only works on whole pages (4K), - * whereas the ARM MMU's Coarse tables are sized in terms of 1K - * (16K L1 table, 1K L2 table). - * - * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 - * table. - */ -#define L1_TABLE_SIZE 0x4000 /* 16K */ -#define L2_TABLE_SIZE 0x1000 /* 4K */ -/* - * The new pmap deals with the 1KB coarse L2 tables by - * allocating them from a pool. Until every port has been converted, - * keep the old L2_TABLE_SIZE define lying around. Converted ports - * should use L2_TABLE_SIZE_REAL until then. - */ -#define L2_TABLE_SIZE_REAL 0x400 /* 1K */ - -/* Total number of page table entries in L2 table */ -#define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) - -/* - * ARM L1 Descriptors - */ - -#define L1_TYPE_INV 0x00 /* Invalid (fault) */ -#define L1_TYPE_C 0x01 /* Coarse L2 */ -#define L1_TYPE_S 0x02 /* Section */ -#define L1_TYPE_F 0x03 /* Fine L2 */ -#define L1_TYPE_MASK 0x03 /* mask of type bits */ - -/* L1 Section Descriptor */ -#define L1_S_B 0x00000004 /* bufferable Section */ -#define L1_S_C 0x00000008 /* cacheable Section */ -#define L1_S_IMP 0x00000010 /* implementation defined */ -#define L1_S_XN (1 << 4) /* execute not */ -#define L1_S_DOM(x) ((x) << 5) /* domain */ -#define L1_S_DOM_MASK L1_S_DOM(0xf) -#define L1_S_AP(x) ((x) << 10) /* access permissions */ -#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ -#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ -#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ -#define L1_S_APX (1 << 15) -#define L1_SHARED (1 << 16) - -#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ -#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ - -#define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ - -/* L1 Coarse Descriptor */ -#define L1_C_IMP0 0x00000004 /* implementation defined */ -#define L1_C_IMP1 0x00000008 /* implementation defined */ -#define L1_C_IMP2 0x00000010 /* implementation defined */ -#define L1_C_DOM(x) ((x) << 5) /* domain */ -#define L1_C_DOM_MASK L1_C_DOM(0xf) -#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ - -#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ - -/* L1 Fine Descriptor */ -#define L1_F_IMP0 0x00000004 /* implementation defined */ -#define L1_F_IMP1 0x00000008 /* implementation defined */ -#define L1_F_IMP2 0x00000010 /* implementation defined */ -#define L1_F_DOM(x) ((x) << 5) /* domain */ -#define L1_F_DOM_MASK L1_F_DOM(0xf) -#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ - -#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ - -/* - * ARM L2 Descriptors - */ - -#define L2_TYPE_INV 0x00 /* Invalid (fault) */ -#define L2_TYPE_L 0x01 /* Large Page */ -#define L2_TYPE_S 0x02 /* Small Page */ -#define L2_TYPE_T 0x03 /* Tiny Page */ -#define L2_TYPE_MASK 0x03 /* mask of type bits */ - - /* - * This L2 Descriptor type is available on XScale processors - * when using a Coarse L1 Descriptor. The Extended Small - * Descriptor has the same format as the XScale Tiny Descriptor, - * but describes a 4K page, rather than a 1K page. - */ -#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ - -#define L2_B 0x00000004 /* Bufferable page */ -#define L2_C 0x00000008 /* Cacheable page */ -#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ -#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ -#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ -#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ - -#define L2_SHARED (1 << 10) -#define L2_APX (1 << 9) -#define L2_XN (1 << 0) -#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ -#define L2_L_TEX(x) (((x) & 0x7) << 12) -#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ -#define L2_S_TEX(x) (((x) & 0x7) << 6) - -#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ -#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ -#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ - -/* - * Access Permissions for L1 and L2 Descriptors. - */ -#define AP_W 0x01 /* writable */ -#define AP_REF 0x01 /* referenced flag */ -#define AP_U 0x02 /* user */ - -/* - * Short-hand for common AP_* constants. - * - * Note: These values assume the S (System) bit is set and - * the R (ROM) bit is clear in CP15 register 1. - */ -#define AP_KR 0x00 /* kernel read */ -#define AP_KRW 0x01 /* kernel read/write */ -#define AP_KRWUR 0x02 /* kernel read/write usr read */ -#define AP_KRWURW 0x03 /* kernel read/write usr read/write */ - -/* - * Domain Types for the Domain Access Control Register. - */ -#define DOMAIN_FAULT 0x00 /* no access */ -#define DOMAIN_CLIENT 0x01 /* client */ -#define DOMAIN_RESERVED 0x02 /* reserved */ -#define DOMAIN_MANAGER 0x03 /* manager */ - -/* - * Type Extension bits for XScale processors. - * - * Behavior of C and B when X == 0: - * - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 N N - - - * 0 1 N Y - - - * 1 0 Y Y Write-through Read Allocate - * 1 1 Y Y Write-back Read Allocate - * - * Behavior of C and B when X == 1: - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 - - - - DO NOT USE - * 0 1 N Y - - - * 1 0 Mini-Data - - - - * 1 1 Y Y Write-back R/W Allocate - */ -#define TEX_XSCALE_X 0x01 /* X modifies C and B */ -#define TEX_XSCALE_E 0x02 -#define TEX_XSCALE_T 0x04 - -/* Xscale core 3 */ - -/* - * - * Cache attributes with L2 present, S = 0 - * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce - * 0 0 0 0 0 N N - N N - * 0 0 0 0 1 N N - N Y - * 0 0 0 1 0 Y Y WT N Y - * 0 0 0 1 1 Y Y WB Y Y - * 0 0 1 0 0 N N - Y Y - * 0 0 1 0 1 N N - N N - * 0 0 1 1 0 Y Y - - N - * 0 0 1 1 1 Y Y WT Y Y - * 0 1 0 0 0 N N - N N - * 0 1 0 0 1 N/A N/A N/A N/A N/A - * 0 1 0 1 0 N/A N/A N/A N/A N/A - * 0 1 0 1 1 N/A N/A N/A N/A N/A - * 0 1 1 X X N/A N/A N/A N/A N/A - * 1 X 0 0 0 N N - N Y - * 1 X 0 0 1 Y N WB N Y - * 1 X 0 1 0 Y N WT N Y - * 1 X 0 1 1 Y N WB Y Y - * 1 X 1 0 0 N N - Y Y - * 1 X 1 0 1 Y Y WB Y Y - * 1 X 1 1 0 Y Y WT Y Y - * 1 X 1 1 1 Y Y WB Y Y - * - * - * - * - * Cache attributes with L2 present, S = 1 - * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce - * 0 0 0 0 0 N N - N N - * 0 0 0 0 1 N N - N Y - * 0 0 0 1 0 Y Y - N Y - * 0 0 0 1 1 Y Y WT Y Y - * 0 0 1 0 0 N N - Y Y - * 0 0 1 0 1 N N - N N - * 0 0 1 1 0 Y Y - - N - * 0 0 1 1 1 Y Y WT Y Y - * 0 1 0 0 0 N N - N N - * 0 1 0 0 1 N/A N/A N/A N/A N/A - * 0 1 0 1 0 N/A N/A N/A N/A N/A - * 0 1 0 1 1 N/A N/A N/A N/A N/A - * 0 1 1 X X N/A N/A N/A N/A N/A - * 1 X 0 0 0 N N - N Y - * 1 X 0 0 1 Y N - N Y - * 1 X 0 1 0 Y N - N Y - * 1 X 0 1 1 Y N - Y Y - * 1 X 1 0 0 N N - Y Y - * 1 X 1 0 1 Y Y WT Y Y - * 1 X 1 1 0 Y Y WT Y Y - * 1 X 1 1 1 Y Y WT Y Y - */ -#endif /* !_MACHINE_PTE_V4_H_ */ - -/* End of pte.h */ diff --git a/sys/conf/files.arm b/sys/conf/files.arm index 313cb151ea5c..e5b38f723ba4 100644 --- a/sys/conf/files.arm +++ b/sys/conf/files.arm @@ -1,163 +1,158 @@ # $FreeBSD$ arm/arm/autoconf.c standard arm/arm/bcopy_page.S standard arm/arm/bcopyinout.S standard arm/arm/blockio.S standard arm/arm/bus_space_asm_generic.S standard arm/arm/bus_space_base.c optional fdt arm/arm/bus_space_generic.c standard arm/arm/busdma_machdep.c standard arm/arm/copystr.S standard arm/arm/cpufunc.c standard arm/arm/cpufunc_asm.S standard arm/arm/cpufunc_asm_arm9.S optional cpu_arm9e arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176 arm/arm/cpufunc_asm_armv4.S optional cpu_arm9e arm/arm/cpufunc_asm_armv5_ec.S optional cpu_arm9e arm/arm/cpufunc_asm_armv7.S optional cpu_cortexa | cpu_krait | cpu_mv_pj4b arm/arm/cpufunc_asm_pj4b.S optional cpu_mv_pj4b arm/arm/cpufunc_asm_sheeva.S optional cpu_arm9e arm/arm/cpuinfo.c standard -arm/arm/cpu_asm-v6.S optional armv7 | armv6 +arm/arm/cpu_asm-v6.S standard arm/arm/db_disasm.c optional ddb arm/arm/db_interface.c optional ddb arm/arm/db_trace.c optional ddb arm/arm/debug_monitor.c optional ddb armv6 arm/arm/debug_monitor.c optional ddb armv7 arm/arm/disassem.c optional ddb arm/arm/dump_machdep.c standard arm/arm/elf_machdep.c standard arm/arm/elf_note.S standard arm/arm/exception.S standard arm/arm/fiq.c standard arm/arm/fiq_subr.S standard arm/arm/fusu.S standard arm/arm/gdb_machdep.c optional gdb arm/arm/generic_timer.c optional generic_timer arm/arm/gic.c optional gic arm/arm/gic_fdt.c optional gic fdt -arm/arm/identcpu-v4.c optional !armv7 !armv6 -arm/arm/identcpu-v6.c optional armv7 | armv6 +arm/arm/identcpu-v6.c standard arm/arm/in_cksum.c optional inet | inet6 arm/arm/in_cksum_arm.S optional inet | inet6 -arm/arm/intr.c optional !intrng -kern/subr_intr.c optional intrng +kern/subr_intr.c standard arm/arm/locore.S standard no-obj -arm/arm/hypervisor-stub.S optional armv7 | armv6 +arm/arm/hypervisor-stub.S standard arm/arm/machdep.c standard arm/arm/machdep_boot.c standard arm/arm/machdep_kdb.c standard arm/arm/machdep_intr.c standard arm/arm/machdep_ptrace.c standard arm/arm/mem.c optional mem arm/arm/minidump_machdep.c standard arm/arm/mp_machdep.c optional smp arm/arm/mpcore_timer.c optional mpcore_timer arm/arm/nexus.c standard arm/arm/ofw_machdep.c optional fdt arm/arm/pl190.c optional pl190 arm/arm/pl310.c optional pl310 arm/arm/platform.c optional platform arm/arm/platform_if.m optional platform arm/arm/platform_pl310_if.m optional platform pl310 -arm/arm/pmap-v4.c optional !armv7 !armv6 -arm/arm/pmap-v6.c optional armv7 | armv6 +arm/arm/pmap-v6.c standard arm/arm/pmu.c optional pmu | fdt hwpmc arm/arm/ptrace_machdep.c standard arm/arm/sc_machdep.c optional sc arm/arm/setcpsr.S standard arm/arm/setstack.s standard arm/arm/stack_machdep.c optional ddb | stack arm/arm/stdatomic.c standard \ compile-with "${NORMAL_C:N-Wmissing-prototypes}" arm/arm/support.S standard arm/arm/swtch.S standard -arm/arm/swtch-v4.S optional !armv7 !armv6 -arm/arm/swtch-v6.S optional armv7 | armv6 +arm/arm/swtch-v6.S standard arm/arm/sys_machdep.c standard arm/arm/syscall.c standard -arm/arm/trap-v4.c optional !armv7 !armv6 -arm/arm/trap-v6.c optional armv7 | armv6 +arm/arm/trap-v6.c standard arm/arm/uio_machdep.c standard arm/arm/undefined.c standard arm/arm/unwind.c optional ddb | kdtrace_hooks | stack arm/arm/vm_machdep.c standard arm/arm/vfp.c standard arm/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 cddl/compat/opensolaris/kern/opensolaris_atomic.c optional !armv7 !armv6 zfs | !armv7 !armv6 dtrace compile-with "${CDDL_C}" cddl/dev/dtrace/arm/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/arm/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/arm/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/des/des_enc.c optional netsmb dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/dwc/if_dwc.c optional dwc dev/dwc/if_dwc_if.m optional dwc dev/fb/fb.c optional sc dev/fdt/fdt_arm_platform.c optional platform fdt dev/hdmi/hdmi_if.m optional hdmi dev/hwpmc/hwpmc_arm.c optional hwpmc dev/hwpmc/hwpmc_armv7.c optional hwpmc armv6 dev/hwpmc/hwpmc_armv7.c optional hwpmc armv7 dev/iicbus/twsi/twsi.c optional twsi dev/ofw/ofwpci.c optional fdt pci dev/pci/pci_host_generic.c optional pci_host_generic pci dev/pci/pci_host_generic_fdt.c optional pci_host_generic pci fdt dev/psci/psci.c optional psci dev/psci/smccc_arm.S optional psci dev/syscons/scgfbrndr.c optional sc dev/uart/uart_cpu_fdt.c optional uart fdt kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_busdma_bufalloc.c standard kern/subr_devmap.c standard kern/subr_physmem.c standard kern/subr_sfbuf.c standard libkern/arm/aeabi_unwind.c standard libkern/arm/divsi3.S standard libkern/arm/ffs.S optional !armv7 !armv6 libkern/arm/ldivmod.S standard libkern/arm/ldivmod_helper.c standard libkern/arm/memclr.S standard libkern/arm/memcpy.S standard libkern/arm/memset.S standard libkern/arm/muldi3.c standard libkern/ashldi3.c standard libkern/ashrdi3.c standard libkern/divdi3.c standard libkern/ffsl.c optional !armv7 !armv6 libkern/ffsll.c optional !armv7 !armv6 libkern/fls.c optional !armv7 !armv6 libkern/flsl.c optional !armv7 !armv6 libkern/flsll.c optional !armv7 !armv6 libkern/lshrdi3.c standard libkern/memcmp.c standard libkern/moddi3.c standard libkern/qdivrem.c standard libkern/ucmpdi2.c standard libkern/udivdi3.c standard libkern/umoddi3.c standard # CloudABI support cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_armv6.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_armv6.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf32-littlearm --binary-architecture arm cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # # Annapurna support arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}"