Index: head/sys/arm/arm/cpufunc_asm_xscale.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale.S (revision 295266) +++ head/sys/arm/arm/cpufunc_asm_xscale.S (revision 295267) @@ -1,523 +1,508 @@ /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /*- * Copyright (c) 2001 Matt Thomas. * Copyright (c) 1997,1998 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * XScale assembly functions for CPU / MMU / TLB specific operations */ #include #include __FBSDID("$FreeBSD$"); /* * Size of the XScale core D-cache. */ #define DCACHE_SIZE 0x00008000 -.Lblock_userspace_access: - .word _C_LABEL(block_userspace_access) - /* * CPWAIT -- Canonical method to wait for CP15 update. * From: Intel 80200 manual, section 2.3.3. * * NOTE: Clobbers the specified temp reg. */ #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ mov tmp, tmp /* wait for it to complete */ ;\ CPWAIT_BRANCH /* branch to next insn */ #define CPWAIT_AND_RETURN_SHIFTER lsr #32 #define CPWAIT_AND_RETURN(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ /* Wait for it to complete and branch to the return address */ \ sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER ENTRY(xscale_cpwait) CPWAIT_AND_RETURN(r0) END(xscale_cpwait) /* * We need a separate cpu_control() entry point, since we have to * invalidate the Branch Target Buffer in the event the BPRD bit * changes in the control register. */ ENTRY(xscale_control) mrc p15, 0, r3, c1, c0, 0 /* Read the control register */ bic r2, r3, r0 /* Clear bits */ eor r2, r2, r1 /* XOR bits */ teq r2, r3 /* Only write if there was a change */ mcrne p15, 0, r0, c7, c5, 6 /* Invalidate the BTB */ mcrne p15, 0, r2, c1, c0, 0 /* Write new control register */ mov r0, r3 /* Return old value */ CPWAIT_AND_RETURN(r1) END(xscale_control) /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(xscale_setttb) #ifdef CACHE_CLEAN_BLOCK_INTR mrs r3, cpsr orr r1, r3, #(PSR_I | PSR_F) msr cpsr_fsxc, r1 -#else - ldr r3, .Lblock_userspace_access - ldr r2, [r3] - orr r1, r2, #1 - str r1, [r3] #endif stmfd sp!, {r0-r3, lr} bl _C_LABEL(xscale_cache_cleanID) mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ CPWAIT(r0) ldmfd sp!, {r0-r3, lr} /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ /* The cleanID above means we only need to flush the I cache here */ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ CPWAIT(r0) #ifdef CACHE_CLEAN_BLOCK_INTR msr cpsr_fsxc, r3 -#else - str r2, [r3] #endif RET END(xscale_setttb) /* * TLB functions * */ ENTRY(xscale_tlb_flushID_SE) mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ CPWAIT_AND_RETURN(r0) END(xscale_tlb_flushID_SE) /* * Cache functions */ ENTRY(xscale_cache_flushID) mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushID) ENTRY(xscale_cache_flushI) mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushI) ENTRY(xscale_cache_flushD) mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushD) ENTRY(xscale_cache_flushI_SE) mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushI_SE) ENTRY(xscale_cache_flushD_SE) /* * Errata (rev < 2): Must clean-dcache-line to an address * before invalidate-dcache-line to an address, or dirty * bits will not be cleared in the dcache array. */ mcr p15, 0, r0, c7, c10, 1 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushD_SE) ENTRY(xscale_cache_cleanD_E) mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ CPWAIT_AND_RETURN(r0) END(xscale_cache_cleanD_E) /* * Information for the XScale cache clean/purge functions: * * * Virtual address of the memory region to use * * Size of memory region * * Note the virtual address for the Data cache clean operation * does not need to be backed by physical memory, since no loads * will actually be performed by the allocate-line operation. * * Note that the Mini-Data cache MUST be cleaned by executing * loads from memory mapped into a region reserved exclusively * for cleaning of the Mini-Data cache. */ .data .global _C_LABEL(xscale_cache_clean_addr) _C_LABEL(xscale_cache_clean_addr): .word 0x00000000 .global _C_LABEL(xscale_cache_clean_size) _C_LABEL(xscale_cache_clean_size): .word DCACHE_SIZE .global _C_LABEL(xscale_minidata_clean_addr) _C_LABEL(xscale_minidata_clean_addr): .word 0x00000000 .global _C_LABEL(xscale_minidata_clean_size) _C_LABEL(xscale_minidata_clean_size): .word 0x00000800 .text .Lxscale_cache_clean_addr: .word _C_LABEL(xscale_cache_clean_addr) .Lxscale_cache_clean_size: .word _C_LABEL(xscale_cache_clean_size) .Lxscale_minidata_clean_addr: .word _C_LABEL(xscale_minidata_clean_addr) .Lxscale_minidata_clean_size: .word _C_LABEL(xscale_minidata_clean_size) #ifdef CACHE_CLEAN_BLOCK_INTR #define XSCALE_CACHE_CLEAN_BLOCK \ mrs r3, cpsr ; \ orr r0, r3, #(PSR_I | PSR_F) ; \ msr cpsr_fsxc, r0 #define XSCALE_CACHE_CLEAN_UNBLOCK \ msr cpsr_fsxc, r3 #else -#define XSCALE_CACHE_CLEAN_BLOCK \ - ldr r3, .Lblock_userspace_access ; \ - ldr ip, [r3] ; \ - orr r0, ip, #1 ; \ - str r0, [r3] +#define XSCALE_CACHE_CLEAN_BLOCK -#define XSCALE_CACHE_CLEAN_UNBLOCK \ - str ip, [r3] +#define XSCALE_CACHE_CLEAN_UNBLOCK #endif /* CACHE_CLEAN_BLOCK_INTR */ #define XSCALE_CACHE_CLEAN_PROLOGUE \ XSCALE_CACHE_CLEAN_BLOCK ; \ ldr r2, .Lxscale_cache_clean_addr ; \ ldmia r2, {r0, r1} ; \ /* \ * BUG ALERT! \ * \ * The XScale core has a strange cache eviction bug, which \ * requires us to use 2x the cache size for the cache clean \ * and for that area to be aligned to 2 * cache size. \ * \ * The work-around is to use 2 areas for cache clean, and to \ * alternate between them whenever this is done. No one knows \ * why the work-around works (mmm!). \ */ \ eor r0, r0, #(DCACHE_SIZE) ; \ str r0, [r2] ; \ add r0, r0, r1 #define XSCALE_CACHE_CLEAN_EPILOGUE \ XSCALE_CACHE_CLEAN_UNBLOCK ENTRY_NP(xscale_cache_syncI) EENTRY_NP(xscale_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ EENTRY_NP(xscale_cache_cleanID) EENTRY_NP(xscale_cache_purgeD) EENTRY(xscale_cache_cleanD) XSCALE_CACHE_CLEAN_PROLOGUE 1: subs r0, r0, #32 mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */ subs r1, r1, #32 bne 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT(r0) XSCALE_CACHE_CLEAN_EPILOGUE RET EEND(xscale_cache_cleanD) EEND(xscale_cache_purgeD) EEND(xscale_cache_cleanID) EEND(xscale_cache_purgeID) END(xscale_cache_syncI) /* * Clean the mini-data cache. * * It's expected that we only use the mini-data cache for * kernel addresses, so there is no need to purge it on * context switch, and no need to prevent userspace access * while we clean it. */ ENTRY(xscale_cache_clean_minidata) ldr r2, .Lxscale_minidata_clean_addr ldmia r2, {r0, r1} 1: ldr r3, [r0], #32 subs r1, r1, #32 bne 1b mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r1) END(xscale_cache_clean_minidata) ENTRY(xscale_cache_purgeID_E) mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ CPWAIT(r1) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ CPWAIT_AND_RETURN(r1) END(xscale_cache_purgeID_E) ENTRY(xscale_cache_purgeD_E) mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ CPWAIT(r1) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ CPWAIT_AND_RETURN(r1) END(xscale_cache_purgeD_E) /* * Soft functions */ /* xscale_cache_syncI is identical to xscale_cache_purgeID */ EENTRY(xscale_cache_cleanID_rng) ENTRY(xscale_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) /*END(xscale_cache_cleanID_rng)*/ END(xscale_cache_cleanD_rng) ENTRY(xscale_cache_purgeID_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_purgeID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_purgeID_rng) ENTRY(xscale_cache_purgeD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_purgeD) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_purgeD_rng) ENTRY(xscale_cache_syncI_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_syncI) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_syncI_rng) ENTRY(xscale_cache_flushD_rng) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushD_rng) /* * Context switch. * * These is the CPU-specific parts of the context switcher cpu_switch() * These functions actually perform the TTB reload. * * NOTE: Special calling convention * r1, r4-r13 must be preserved */ ENTRY(xscale_context_switch) /* * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. * Thus the data cache will contain only kernel data and the * instruction cache will contain only kernel code, and all * kernel mappings are shared by all processes. */ /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ CPWAIT_AND_RETURN(r0) END(xscale_context_switch) /* * xscale_cpu_sleep * * This is called when there is nothing on any of the run queues. * We go into IDLE mode so that any IRQ or FIQ will awaken us. * * If this is called with anything other than ARM_SLEEP_MODE_IDLE, * ignore it. */ ENTRY(xscale_cpu_sleep) tst r0, #0x00000000 bne 1f mov r0, #0x1 mcr p14, 0, r0, c7, c0, 0 1: RET END(xscale_cpu_sleep) Index: head/sys/arm/arm/cpufunc_asm_xscale_c3.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale_c3.S (revision 295266) +++ head/sys/arm/arm/cpufunc_asm_xscale_c3.S (revision 295267) @@ -1,416 +1,400 @@ /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */ /*- * Copyright (c) 2007 Olivier Houchard * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /*- * Copyright (c) 2001 Matt Thomas. * Copyright (c) 1997,1998 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * XScale core 3 assembly functions for CPU / MMU / TLB specific operations */ #include #include __FBSDID("$FreeBSD$"); /* * Size of the XScale core D-cache. */ #define DCACHE_SIZE 0x00008000 -.Lblock_userspace_access: - .word _C_LABEL(block_userspace_access) - /* * CPWAIT -- Canonical method to wait for CP15 update. * From: Intel 80200 manual, section 2.3.3. * * NOTE: Clobbers the specified temp reg. */ #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ mov tmp, tmp /* wait for it to complete */ ;\ CPWAIT_BRANCH /* branch to next insn */ #define CPWAIT_AND_RETURN_SHIFTER lsr #32 #define CPWAIT_AND_RETURN(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ /* Wait for it to complete and branch to the return address */ \ sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER #define ARM_USE_L2_CACHE #define L2_CACHE_SIZE 0x80000 #define L2_CACHE_WAYS 8 #define L2_CACHE_LINE_SIZE 32 #define L2_CACHE_SETS (L2_CACHE_SIZE / \ (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE)) #define L1_DCACHE_SIZE 32 * 1024 #define L1_DCACHE_WAYS 4 #define L1_DCACHE_LINE_SIZE 32 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \ (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE)) #ifdef CACHE_CLEAN_BLOCK_INTR #define XSCALE_CACHE_CLEAN_BLOCK \ stmfd sp!, {r4} ; \ mrs r4, cpsr ; \ orr r0, r4, #(PSR_I | PSR_F) ; \ msr cpsr_fsxc, r0 #define XSCALE_CACHE_CLEAN_UNBLOCK \ msr cpsr_fsxc, r4 ; \ ldmfd sp!, {r4} #else -#define XSCALE_CACHE_CLEAN_BLOCK \ - stmfd sp!, {r4} ; \ - ldr r4, .Lblock_userspace_access ; \ - ldr ip, [r4] ; \ - orr r0, ip, #1 ; \ - str r0, [r4] - -#define XSCALE_CACHE_CLEAN_UNBLOCK \ - str ip, [r3] ; \ - ldmfd sp!, {r4} +#define XSCALE_CACHE_CLEAN_BLOCK +#define XSCALE_CACHE_CLEAN_UNBLOCK #endif /* CACHE_CLEAN_BLOCK_INTR */ ENTRY_NP(xscalec3_cache_syncI) EENTRY_NP(xscalec3_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ EENTRY_NP(xscalec3_cache_cleanID) EENTRY_NP(xscalec3_cache_purgeD) EENTRY(xscalec3_cache_cleanD) XSCALE_CACHE_CLEAN_BLOCK mov r0, #0 1: mov r1, r0, asl #30 mov r2, #0 2: orr r3, r1, r2, asl #5 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */ add r2, r2, #1 cmp r2, #L1_DCACHE_SETS bne 2b add r0, r0, #1 cmp r0, #4 bne 1b CPWAIT(r0) XSCALE_CACHE_CLEAN_UNBLOCK mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ RET EEND(xscalec3_cache_purgeID) EEND(xscalec3_cache_cleanID) EEND(xscalec3_cache_purgeD) EEND(xscalec3_cache_cleanD) END(xscalec3_cache_syncI) ENTRY(xscalec3_cache_purgeID_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */ nop mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscalec3_cache_purgeID_rng) ENTRY(xscalec3_cache_syncI_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_syncI) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscalec3_cache_syncI_rng) ENTRY(xscalec3_cache_purgeD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscalec3_cache_purgeD_rng) ENTRY(xscalec3_cache_cleanID_rng) EENTRY(xscalec3_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */ nop add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) EEND(xscalec3_cache_cleanD_rng) END(xscalec3_cache_cleanID_rng) ENTRY(xscalec3_l2cache_purge) /* Clean-up the L2 cache */ mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ mov r0, #0 1: mov r1, r0, asl #29 mov r2, #0 2: orr r3, r1, r2, asl #5 mcr p15, 1, r3, c7, c15, 2 add r2, r2, #1 cmp r2, #L2_CACHE_SETS bne 2b add r0, r0, #1 cmp r0, #8 bne 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier CPWAIT(r0) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ RET END(xscalec3_l2cache_purge) ENTRY(xscalec3_l2cache_clean_rng) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c10, 5 CPWAIT_AND_RETURN(r0) END(xscalec3_l2cache_clean_rng) ENTRY(xscalec3_l2cache_purge_rng) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */ mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c10, 5 CPWAIT_AND_RETURN(r0) END(xscalec3_l2cache_purge_rng) ENTRY(xscalec3_l2cache_flush_rng) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */ add r0, r0, #32 subs r1, r1, #32 bhi 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c10, 5 CPWAIT_AND_RETURN(r0) END(xscalec3_l2cache_flush_rng) /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(xscalec3_setttb) #ifdef CACHE_CLEAN_BLOCK_INTR mrs r3, cpsr orr r1, r3, #(PSR_I | PSR_F) msr cpsr_fsxc, r1 -#else - ldr r3, .Lblock_userspace_access - ldr r2, [r3] - orr r1, r2, #1 - str r1, [r3] #endif stmfd sp!, {r0-r3, lr} bl _C_LABEL(xscalec3_cache_cleanID) mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ CPWAIT(r0) ldmfd sp!, {r0-r3, lr} #ifdef ARM_USE_L2_CACHE orr r0, r0, #0x18 /* cache the page table in L2 */ #endif /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ CPWAIT(r0) #ifdef CACHE_CLEAN_BLOCK_INTR msr cpsr_fsxc, r3 #else str r2, [r3] #endif RET END(xscalec3_setttb) /* * Context switch. * * These is the CPU-specific parts of the context switcher cpu_switch() * These functions actually perform the TTB reload. * * NOTE: Special calling convention * r1, r4-r13 must be preserved */ ENTRY(xscalec3_context_switch) /* * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. * Thus the data cache will contain only kernel data and the * instruction cache will contain only kernel code, and all * kernel mappings are shared by all processes. */ #ifdef ARM_USE_L2_CACHE orr r0, r0, #0x18 /* Cache the page table in L2 */ #endif /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ CPWAIT_AND_RETURN(r0) END(xscalec3_context_switch) Index: head/sys/arm/arm/elf_trampoline.c =================================================================== --- head/sys/arm/arm/elf_trampoline.c (revision 295266) +++ head/sys/arm/arm/elf_trampoline.c (revision 295267) @@ -1,736 +1,735 @@ /*- * Copyright (c) 2005 Olivier Houchard. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Since we are compiled outside of the normal kernel build process, we * need to include opt_global.h manually. */ #include "opt_global.h" #include "opt_kernname.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include extern char kernel_start[]; extern char kernel_end[]; extern void *_end; void _start(void); void __start(void); void __startC(void); extern unsigned int cpu_ident(void); extern void armv6_idcache_wbinv_all(void); extern void armv7_idcache_wbinv_all(void); extern void do_call(void *, void *, void *, int); #define GZ_HEAD 0xa #if defined(CPU_ARM9) #define cpu_idcache_wbinv_all arm9_idcache_wbinv_all extern void arm9_idcache_wbinv_all(void); #elif defined(CPU_FA526) #define cpu_idcache_wbinv_all fa526_idcache_wbinv_all extern void fa526_idcache_wbinv_all(void); #elif defined(CPU_ARM9E) #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all extern void armv5_ec_idcache_wbinv_all(void); #elif defined(CPU_ARM1176) #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all #elif defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) #define cpu_idcache_wbinv_all xscale_cache_purgeID extern void xscale_cache_purgeID(void); #elif defined(CPU_XSCALE_81342) #define cpu_idcache_wbinv_all xscalec3_cache_purgeID extern void xscalec3_cache_purgeID(void); #elif defined(CPU_MV_PJ4B) #if !defined(SOC_MV_ARMADAXP) #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all extern void armv6_idcache_wbinv_all(void); #else #define cpu_idcache_wbinv_all() armadaxp_idcache_wbinv_all #endif #endif /* CPU_MV_PJ4B */ #ifdef CPU_XSCALE_81342 #define cpu_l2cache_wbinv_all xscalec3_l2cache_purge extern void xscalec3_l2cache_purge(void); #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all extern void sheeva_l2cache_wbinv_all(void); #elif defined(CPU_CORTEXA) || defined(CPU_KRAIT) #define cpu_idcache_wbinv_all armv7_idcache_wbinv_all #define cpu_l2cache_wbinv_all() #else #define cpu_l2cache_wbinv_all() #endif static void armadaxp_idcache_wbinv_all(void); int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size = 32; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; int arm_dcache_min_line_size = 32; int arm_icache_min_line_size = 32; int arm_idcache_min_line_size = 32; u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; -int block_userspace_access = 0; extern int arm9_dcache_sets_inc; extern int arm9_dcache_sets_max; extern int arm9_dcache_index_max; extern int arm9_dcache_index_inc; static __inline void * memcpy(void *dst, const void *src, int len) { const char *s = src; char *d = dst; while (len) { if (0 && len >= 4 && !((vm_offset_t)d & 3) && !((vm_offset_t)s & 3)) { *(uint32_t *)d = *(uint32_t *)s; s += 4; d += 4; len -= 4; } else { *d++ = *s++; len--; } } return (dst); } static __inline void bzero(void *addr, int count) { char *tmp = (char *)addr; while (count > 0) { if (count >= 4 && !((vm_offset_t)tmp & 3)) { *(uint32_t *)tmp = 0; tmp += 4; count -= 4; } else { *tmp = 0; tmp++; count--; } } } static void arm9_setup(void); void _startC(void) { int tmp1; unsigned int sp = ((unsigned int)&_end & ~3) + 4; unsigned int pc, kernphysaddr; /* * Figure out the physical address the kernel was loaded at. This * assumes the entry point (this code right here) is in the first page, * which will always be the case for this trampoline code. */ __asm __volatile("mov %0, pc\n" : "=r" (pc)); kernphysaddr = pc & ~PAGE_MASK; #if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR) if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) || (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) { /* * We're running from flash, so just copy the whole thing * from flash to memory. * This is far from optimal, we could do the relocation or * the unzipping directly from flash to memory to avoid this * needless copy, but it would require to know the flash * physical address. */ unsigned int target_addr; unsigned int tmp_sp; uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000; target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR; tmp_sp = target_addr + 0x100000 + (unsigned int)&_end - (unsigned int)&_start; memcpy((char *)target_addr, (char *)src_addr, (unsigned int)&_end - (unsigned int)&_start); /* Temporary set the sp and jump to the new location. */ __asm __volatile( "mov sp, %1\n" "mov pc, %0\n" : : "r" (target_addr), "r" (tmp_sp)); } #endif #ifdef KZIP sp += KERNSIZE + 0x100; sp &= ~(L1_TABLE_SIZE - 1); sp += 2 * L1_TABLE_SIZE; #endif sp += 1024 * 1024; /* Should be enough for a stack */ __asm __volatile("adr %0, 2f\n" "bic %0, %0, #0xff000000\n" "and %1, %1, #0xff000000\n" "orr %0, %0, %1\n" "mrc p15, 0, %1, c1, c0, 0\n" "bic %1, %1, #1\n" /* Disable MMU */ "orr %1, %1, #(4 | 8)\n" /* Add DC enable, WBUF enable */ "orr %1, %1, #0x1000\n" /* Add IC enable */ "orr %1, %1, #(0x800)\n" /* BPRD enable */ "mcr p15, 0, %1, c1, c0, 0\n" "nop\n" "nop\n" "nop\n" "mov pc, %0\n" "2: nop\n" "mov sp, %2\n" : "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp)); #ifndef KZIP #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif #endif __start(); } static void get_cachetype_cp15() { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); cpuid = cpu_ident(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { /* Resolve minimal cache line sizes */ arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2); arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2); arm_idcache_min_line_size = (arm_dcache_min_line_size > arm_icache_min_line_size ? arm_icache_min_line_size : arm_dcache_min_line_size); __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1; i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } static void arm9_setup(void) { get_cachetype_cp15(); arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm9_dcache_sets_inc; arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm9_dcache_index_max = 0U - arm9_dcache_index_inc; } static void armadaxp_idcache_wbinv_all(void) { uint32_t feat; __asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat)); if (feat & ARM_PFR0_THUMBEE_MASK) armv7_idcache_wbinv_all(); else armv6_idcache_wbinv_all(); } #ifdef KZIP static unsigned char *orig_input, *i_input, *i_output; static u_int memcnt; /* Memory allocated: blocks */ static size_t memtot; /* Memory allocated: bytes */ /* * Library functions required by inflate(). */ #define MEMSIZ 0x8000 /* * Allocate memory block. */ unsigned char * kzipmalloc(int size) { void *ptr; static u_char mem[MEMSIZ]; if (memtot + size > MEMSIZ) return NULL; ptr = mem + memtot; memtot += size; memcnt++; return ptr; } /* * Free allocated memory block. */ void kzipfree(void *ptr) { memcnt--; if (!memcnt) memtot = 0; } void putstr(char *dummy) { } static int input(void *dummy) { if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) { return (GZ_EOF); } return *i_input++; } static int output(void *dummy, unsigned char *ptr, unsigned long len) { memcpy(i_output, ptr, len); i_output += len; return (0); } static void * inflate_kernel(void *kernel, void *startaddr) { struct inflate infl; unsigned char slide[GZ_WSIZE]; orig_input = kernel; memcnt = memtot = 0; i_input = (unsigned char *)kernel + GZ_HEAD; if (((char *)kernel)[3] & 0x18) { while (*i_input) i_input++; i_input++; } i_output = startaddr; bzero(&infl, sizeof(infl)); infl.gz_input = input; infl.gz_output = output; infl.gz_slide = slide; inflate(&infl); return ((char *)(((vm_offset_t)i_output & ~3) + 4)); } #endif void * load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, int d) { Elf32_Ehdr *eh; Elf32_Phdr phdr[64] /* XXX */, *php; Elf32_Shdr shdr[64] /* XXX */; int i,j; void *entry_point; int symtabindex = -1; int symstrindex = -1; vm_offset_t lastaddr = 0; Elf_Addr ssym = 0; Elf_Dyn *dp; eh = (Elf32_Ehdr *)kstart; ssym = 0; entry_point = (void*)eh->e_entry; memcpy(phdr, (void *)(kstart + eh->e_phoff ), eh->e_phnum * sizeof(phdr[0])); /* Determine lastaddr. */ for (i = 0; i < eh->e_phnum; i++) { if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz)) lastaddr = phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz; } /* Save the symbol tables, as there're about to be scratched. */ memcpy(shdr, (void *)(kstart + eh->e_shoff), sizeof(*shdr) * eh->e_shnum); if (eh->e_shnum * eh->e_shentsize != 0 && eh->e_shoff != 0) { for (i = 0; i < eh->e_shnum; i++) { if (shdr[i].sh_type == SHT_SYMTAB) { for (j = 0; j < eh->e_phnum; j++) { if (phdr[j].p_type == PT_LOAD && shdr[i].sh_offset >= phdr[j].p_offset && (shdr[i].sh_offset + shdr[i].sh_size <= phdr[j].p_offset + phdr[j].p_filesz)) { shdr[i].sh_offset = 0; shdr[i].sh_size = 0; j = eh->e_phnum; } } if (shdr[i].sh_offset != 0 && shdr[i].sh_size != 0) { symtabindex = i; symstrindex = shdr[i].sh_link; } } } func_end = roundup(func_end, sizeof(long)); if (symtabindex >= 0 && symstrindex >= 0) { ssym = lastaddr; if (d) { memcpy((void *)func_end, (void *)( shdr[symtabindex].sh_offset + kstart), shdr[symtabindex].sh_size); memcpy((void *)(func_end + shdr[symtabindex].sh_size), (void *)(shdr[symstrindex].sh_offset + kstart), shdr[symstrindex].sh_size); } else { lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); lastaddr += sizeof(shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); } } } if (!d) return ((void *)lastaddr); j = eh->e_phnum; for (i = 0; i < j; i++) { volatile char c; if (phdr[i].p_type != PT_LOAD) continue; memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr), (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz); /* Clean space from oversized segments, eg: bss. */ if (phdr[i].p_filesz < phdr[i].p_memsz) bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_filesz), phdr[i].p_memsz - phdr[i].p_filesz); } /* Now grab the symbol tables. */ if (symtabindex >= 0 && symstrindex >= 0) { *(Elf_Size *)lastaddr = shdr[symtabindex].sh_size; lastaddr += sizeof(shdr[symtabindex].sh_size); memcpy((void*)lastaddr, (void *)func_end, shdr[symtabindex].sh_size); lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); *(Elf_Size *)lastaddr = shdr[symstrindex].sh_size; lastaddr += sizeof(shdr[symstrindex].sh_size); memcpy((void*)lastaddr, (void*)(func_end + shdr[symtabindex].sh_size), shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER; *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR; *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR; } else *(Elf_Addr *)curaddr = 0; /* Invalidate the instruction cache. */ __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n" "mcr p15, 0, %0, c7, c10, 4\n" : : "r" (curaddr)); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" : "=r" (ssym)); /* Jump to the entry point. */ ((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))(); __asm __volatile(".globl func_end\n" "func_end:"); /* NOTREACHED */ return NULL; } extern char func_end[]; #define PMAP_DOMAIN_KERNEL 0 /* * Just define it instead of including the * whole VM headers set. */ int __hack; static __inline void setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend, int write_back) { unsigned int *pd = (unsigned int *)pt_addr; vm_paddr_t addr; int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT; int tmp; bzero(pd, L1_TABLE_SIZE); for (addr = physstart; addr < physend; addr += L1_S_SIZE) { pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr; if (write_back && 0) pd[addr >> L1_S_SHIFT] |= L1_S_B; } /* XXX: See below */ if (0xfff00000 < physstart || 0xfff00000 > physend) pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart; __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */ "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */ "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */ "mrc p15, 0, %0, c1, c0, 0\n" "orr %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */ "mov r0, r0\n" "sub pc, pc, #4\n" : "=r" (tmp) : "r" (pd), "r" (domain)); /* * XXX: This is the most stupid workaround I've ever wrote. * For some reason, the KB9202 won't boot the kernel unless * we access an address which is not in the * 0x20000000 - 0x20ffffff range. I hope I'll understand * what's going on later. */ __hack = *(volatile int *)0xfffff21c; } void __start(void) { void *curaddr; void *dst, *altdst; char *kernel = (char *)&kernel_start; int sp; int pt_addr; __asm __volatile("mov %0, pc" : "=r" (curaddr)); curaddr = (void*)((unsigned int)curaddr & 0xfff00000); #ifdef KZIP if (*kernel == 0x1f && kernel[1] == 0x8b) { pt_addr = (((int)&_end + KERNSIZE + 0x100) & ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 1); /* Gzipped kernel */ dst = inflate_kernel(kernel, &_end); kernel = (char *)&_end; altdst = 4 + load_kernel((unsigned int)kernel, (unsigned int)curaddr, (unsigned int)&func_end + 800 , 0); if (altdst > dst) dst = altdst; /* * Disable MMU. Otherwise, setup_pagetables call below * might overwrite the L1 table we are currently using. */ cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #1\n" /* MMU_DISABLE */ "mcr p15, 0, %0, c1, c0, 0\n" :"=r" (pt_addr)); } else #endif dst = 4 + load_kernel((unsigned int)&kernel_start, (unsigned int)curaddr, (unsigned int)&func_end, 0); dst = (void *)(((vm_offset_t)dst & ~3)); pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 0); sp = pt_addr + L1_TABLE_SIZE + 8192; sp = sp &~3; dst = (void *)(sp + 4); memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - (unsigned int)&load_kernel + 800); do_call(dst, kernel, dst + (unsigned int)(&func_end) - (unsigned int)(&load_kernel) + 800, sp); } /* We need to provide these functions but never call them */ void __aeabi_unwind_cpp_pr0(void); void __aeabi_unwind_cpp_pr1(void); void __aeabi_unwind_cpp_pr2(void); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2); void __aeabi_unwind_cpp_pr0(void) { } Index: head/sys/arm/arm/fusu.S =================================================================== --- head/sys/arm/arm/fusu.S (revision 295266) +++ head/sys/arm/arm/fusu.S (revision 295267) @@ -1,437 +1,350 @@ /* $NetBSD: fusu.S,v 1.10 2003/12/01 13:34:44 rearnsha Exp $ */ /*- * Copyright (c) 1996-1998 Mark Brinicombe. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include "assym.s" __FBSDID("$FreeBSD$"); .syntax unified #if __ARM_ARCH >= 6 #define GET_PCB(tmp) \ mrc p15, 0, tmp, c13, c0, 4; \ add tmp, tmp, #(TD_PCB) #else .Lcurpcb: .word _C_LABEL(__pcpu) + PC_CURPCB #define GET_PCB(tmp) \ ldr tmp, .Lcurpcb #endif /* * casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, * uint32_t newval); */ ENTRY(casueword) EENTRY_NP(casueword32) stmfd sp!, {r4, r5, r6} ldr r4, =(VM_MAXUSER_ADDRESS-3) cmp r0, r4 mvncs r0, #0 bcs 2f GET_PCB(r6) ldr r6, [r6] #ifdef DIAGNOSTIC teq r6, #0x00000000 ldmfdeq sp!, {r4, r5, r6} beq .Lfusupcbfault #endif adr r4, .Lcasuwordfault str r4, [r6, #PCB_ONFAULT] #if __ARM_ARCH >= 6 1: ldrex r4, [r0] cmp r4, r1 strexeq r5, r3, [r0] cmpeq r5, #1 beq 1b #else ldrt r4, [r0] cmp r4, r1 strteq r3, [r0] #endif str r4, [r2] mov r0, #0 str r0, [r6, #PCB_ONFAULT] 2: ldmfd sp!, {r4, r5, r6} RET EEND(casueword32) END(casueword) /* * Handle faults from casuword. Clean up and return -1. */ .Lcasuwordfault: mov r0, #0x00000000 str r0, [r6, #PCB_ONFAULT] mvn r0, #0 ldmfd sp!, {r4, r5, r6} RET /* * fueword(caddr_t uaddr, long *val); * Fetch an int from the user's address space. */ ENTRY(fueword) EENTRY_NP(fueword32) ldr r3, =(VM_MAXUSER_ADDRESS-3) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] ldrt r3, [r0] str r3, [r1] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET EEND(fueword32) END(fueword) /* * fusword(caddr_t uaddr); * Fetch a short from the user's address space. */ ENTRY(fusword) ldr r3, =(VM_MAXUSER_ADDRESS-1) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0], #1 ldrbt ip, [r0] #ifdef __ARMEB__ orr r0, ip, r3, asl #8 #else orr r0, r3, ip, asl #8 #endif mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] RET END(fusword) /* * fuswintr(caddr_t uaddr); * Fetch a short from the user's address space. Can be called during an * interrupt. */ ENTRY(fuswintr) - ldr r3, =(VM_MAXUSER_ADDRESS-1) - cmp r0, r3 - mvncs r0, #0 - RETc(cs) - - ldr r2, Lblock_userspace_access - ldr r2, [r2] - teq r2, #0 - mvnne r0, #0x00000000 - RETne - - GET_PCB(r2) - ldr r2, [r2] - -#ifdef DIAGNOSTIC - teq r2, #0x00000000 - beq .Lfusupcbfault -#endif - - adr r1, _C_LABEL(fusubailout) - str r1, [r2, #PCB_ONFAULT] - - ldrbt r3, [r0], #1 - ldrbt ip, [r0] -#ifdef __ARMEB__ - orr r0, ip, r3, asl #8 -#else - orr r0, r3, ip, asl #8 -#endif - - mov r1, #0x00000000 - str r1, [r2, #PCB_ONFAULT] + mov r0, #-1 RET END(fuswintr) -Lblock_userspace_access: - .word _C_LABEL(block_userspace_access) - - .data - .align 2 - .global _C_LABEL(block_userspace_access) -_C_LABEL(block_userspace_access): - .word 0 - .text - /* * fubyte(caddr_t uaddr); * Fetch a byte from the user's address space. */ ENTRY(fubyte) ldr r3, =VM_MAXUSER_ADDRESS cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r1, .Lfusufault str r1, [r2, #PCB_ONFAULT] ldrbt r3, [r0] mov r1, #0x00000000 str r1, [r2, #PCB_ONFAULT] mov r0, r3 RET END(fubyte) /* * Handle faults from [fs]u*(). Clean up and return -1. */ .Lfusufault: mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] mvn r0, #0x00000000 RET -/* - * Handle faults from [fs]u*(). Clean up and return -1. This differs from - * fusufault() in that trap() will recognise it and return immediately rather - * than trying to page fault. - */ - -/* label must be global as fault.c references it */ - .global _C_LABEL(fusubailout) -_C_LABEL(fusubailout): - mov r0, #0x00000000 - str r0, [r2, #PCB_ONFAULT] - mvn r0, #0x00000000 - RET - #ifdef DIAGNOSTIC /* * Handle earlier faults from [fs]u*(), due to no pcb */ .Lfusupcbfault: mov r1, r0 adr r0, fusupcbfaulttext b _C_LABEL(panic) fusupcbfaulttext: .asciz "Yikes - no valid PCB during fusuxxx() addr=%08x\n" .align 2 #endif /* * suword(caddr_t uaddr, int x); * Store an int in the user's address space. */ ENTRY(suword) EENTRY_NP(suword32) ldr r3, =(VM_MAXUSER_ADDRESS-3) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] strt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET EEND(suword32) END(suword) /* * suswintr(caddr_t uaddr, short x); * Store a short in the user's address space. Can be called during an * interrupt. */ ENTRY(suswintr) - ldr r3, =(VM_MAXUSER_ADDRESS-1) - cmp r0, r3 - mvncs r0, #0 - RETc(cs) - - ldr r2, Lblock_userspace_access - ldr r2, [r2] - teq r2, #0 - mvnne r0, #0x00000000 - RETne - - GET_PCB(r2) - ldr r2, [r2] - -#ifdef DIAGNOSTIC - teq r2, #0x00000000 - beq .Lfusupcbfault -#endif - - adr r3, _C_LABEL(fusubailout) - str r3, [r2, #PCB_ONFAULT] - -#ifdef __ARMEB__ - mov ip, r1, lsr #8 - strbt ip, [r0], #1 -#else - strbt r1, [r0], #1 - mov r1, r1, lsr #8 -#endif - strbt r1, [r0] - - mov r0, #0x00000000 - str r0, [r2, #PCB_ONFAULT] + mov r0, #-1 RET END(suswintr) /* * susword(caddr_t uaddr, short x); * Store a short in the user's address space. */ ENTRY(susword) ldr r3, =(VM_MAXUSER_ADDRESS-1) cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] #ifdef __ARMEB__ mov ip, r1, lsr #8 strbt ip, [r0], #1 #else strbt r1, [r0], #1 mov r1, r1, lsr #8 #endif strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(susword) /* * subyte(caddr_t uaddr, char x); * Store a byte in the user's address space. */ ENTRY(subyte) ldr r3, =VM_MAXUSER_ADDRESS cmp r0, r3 mvncs r0, #0 RETc(cs) GET_PCB(r2) ldr r2, [r2] #ifdef DIAGNOSTIC teq r2, #0x00000000 beq .Lfusupcbfault #endif adr r3, .Lfusufault str r3, [r2, #PCB_ONFAULT] strbt r1, [r0] mov r0, #0x00000000 str r0, [r2, #PCB_ONFAULT] RET END(subyte) Index: head/sys/arm/arm/trap-v6.c =================================================================== --- head/sys/arm/arm/trap-v6.c (revision 295266) +++ head/sys/arm/arm/trap-v6.c (revision 295267) @@ -1,657 +1,649 @@ /*- * Copyright 2014 Olivier Houchard * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * Copyright 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ktrace.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDB #include #include #endif #ifdef KDTRACE_HOOKS #include #endif -extern char fusubailout[]; extern char cachebailout[]; #ifdef DEBUG int last_fault_code; /* For the benefit of pmap_fault_fixup() */ #endif struct ksig { int sig; u_long code; vm_offset_t addr; }; typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int, struct thread *, struct ksig *); static abort_func_t abort_fatal; static abort_func_t abort_align; static abort_func_t abort_icache; struct abort { abort_func_t *func; const char *desc; }; /* * How are the aborts handled? * * Undefined Code: * - Always fatal as we do not know what does it mean. * Imprecise External Abort: * - Always fatal, but can be handled somehow in the future. * Now, due to PCIe buggy hardware, ignored. * Precise External Abort: * - Always fatal, but who knows in the future??? * Debug Event: * - Special handling. * External Translation Abort (L1 & L2) * - Always fatal as something is screwed up in page tables or hardware. * Domain Fault (L1 & L2): * - Always fatal as we do not play game with domains. * Alignment Fault: * - Everything should be aligned in kernel with exception of user to kernel * and vice versa data copying, so if pcb_onfault is not set, it's fatal. * We generate signal in case of abort from user mode. * Instruction cache maintenance: * - According to manual, this is translation fault during cache maintenance * operation. So, it could be really complex in SMP case and fuzzy too * for cache operations working on virtual addresses. For now, we will * consider this abort as fatal. In fact, no cache maintenance on * not mapped virtual addresses should be called. As cache maintenance * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged, * the abort is fatal for user mode as well for now. (This is good place to * note that cache maintenance on virtual address fill TLB.) * Acces Bit (L1 & L2): * - Fast hardware emulation for kernel and user mode. * Translation Fault (L1 & L2): * - Standard fault mechanism is held including vm_fault(). * Permission Fault (L1 & L2): * - Fast hardware emulation of modify bits and in other cases, standard * fault mechanism is held including vm_fault(). */ static const struct abort aborts[] = { {abort_fatal, "Undefined Code (0x000)"}, {abort_align, "Alignment Fault"}, {abort_fatal, "Debug Event"}, {NULL, "Access Bit (L1)"}, {NULL, "Instruction cache maintenance"}, {NULL, "Translation Fault (L1)"}, {NULL, "Access Bit (L2)"}, {NULL, "Translation Fault (L2)"}, {abort_fatal, "External Abort"}, {abort_fatal, "Domain Fault (L1)"}, {abort_fatal, "Undefined Code (0x00A)"}, {abort_fatal, "Domain Fault (L2)"}, {abort_fatal, "External Translation Abort (L1)"}, {NULL, "Permission Fault (L1)"}, {abort_fatal, "External Translation Abort (L2)"}, {NULL, "Permission Fault (L2)"}, {abort_fatal, "TLB Conflict Abort"}, {abort_fatal, "Undefined Code (0x401)"}, {abort_fatal, "Undefined Code (0x402)"}, {abort_fatal, "Undefined Code (0x403)"}, {abort_fatal, "Undefined Code (0x404)"}, {abort_fatal, "Undefined Code (0x405)"}, {abort_fatal, "Asynchronous External Abort"}, {abort_fatal, "Undefined Code (0x407)"}, {abort_fatal, "Asynchronous Parity Error on Memory Access"}, {abort_fatal, "Parity Error on Memory Access"}, {abort_fatal, "Undefined Code (0x40A)"}, {abort_fatal, "Undefined Code (0x40B)"}, {abort_fatal, "Parity Error on Translation (L1)"}, {abort_fatal, "Undefined Code (0x40D)"}, {abort_fatal, "Parity Error on Translation (L2)"}, {abort_fatal, "Undefined Code (0x40F)"} }; static __inline void call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr) { ksiginfo_t ksi; CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d", __func__, addr, sig, code); /* * TODO: some info would be nice to know * if we are serving data or prefetch abort. */ ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = code; ksi.ksi_addr = (void *)addr; trapsignal(td, &ksi); } /* * abort_imprecise() handles the following abort: * * FAULT_EA_IMPREC - Imprecise External Abort * * The imprecise means that we don't know where the abort happened, * thus FAR is undefined. The abort should not never fire, but hot * plugging or accidental hardware failure can be the cause of it. * If the abort happens, it can even be on different (thread) context. * Without any additional support, the abort is fatal, as we do not * know what really happened. * * QQQ: Some additional functionality, like pcb_onfault but global, * can be implemented. Imprecise handlers could be registered * which tell us if the abort is caused by something they know * about. They should return one of three codes like: * FAULT_IS_MINE, * FAULT_CAN_BE_MINE, * FAULT_IS_NOT_MINE. * The handlers should be called until some of them returns * FAULT_IS_MINE value or all was called. If all handlers return * FAULT_IS_NOT_MINE value, then the abort is fatal. */ static __inline void abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode) { /* * XXX - We can got imprecise abort as result of access * to not-present PCI/PCIe configuration space. */ #if 0 goto out; #endif abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL); /* * Returning from this function means that we ignore * the abort for good reason. Note that imprecise abort * could fire any time even in user mode. */ #if 0 out: if (usermode) userret(curthread, tf); #endif } /* * abort_debug() handles the following abort: * * FAULT_DEBUG - Debug Event * */ static __inline void abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode, u_int far) { if (usermode) { struct thread *td; td = curthread; call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far); userret(td, tf); } else { #ifdef KDB kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf); #else printf("No debugger in kernel.\n"); #endif } } /* * Abort handler. * * FAR, FSR, and everything what can be lost after enabling * interrupts must be grabbed before the interrupts will be * enabled. Note that when interrupts will be enabled, we * could even migrate to another CPU ... * * TODO: move quick cases to ASM */ void abort_handler(struct trapframe *tf, int prefetch) { struct thread *td; vm_offset_t far, va; int idx, rv; uint32_t fsr; struct ksig ksig; struct proc *p; struct pcb *pcb; struct vm_map *map; struct vmspace *vm; vm_prot_t ftype; bool usermode; #ifdef INVARIANTS void *onfault; #endif PCPU_INC(cnt.v_trap); td = curthread; fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get(); #if __ARM_ARCH >= 7 far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get(); #else far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get(); #endif idx = FSR_TO_FAULT(fsr); usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */ if (usermode) td->td_frame = tf; CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d", __func__, fsr, idx, far, prefetch, usermode); /* * Firstly, handle aborts that are not directly related to mapping. */ if (__predict_false(idx == FAULT_EA_IMPREC)) { abort_imprecise(tf, fsr, prefetch, usermode); return; } if (__predict_false(idx == FAULT_DEBUG)) { abort_debug(tf, fsr, prefetch, usermode, far); return; } /* * ARM has a set of unprivileged load and store instructions * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other * than user mode and OS should recognize their aborts and behave * appropriately. However, there is no way how to do that reasonably * in general unless we restrict the handling somehow. * * For now, these instructions are used only in copyin()/copyout() * like functions where usermode buffers are checked in advance that * they are not from KVA space. Thus, no action is needed here. */ /* * (1) Handle access and R/W hardware emulation aborts. * (2) Check that abort is not on pmap essential address ranges. * There is no way how to fix it, so we don't even try. */ rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode); if (rv == KERN_SUCCESS) return; #ifdef KDB if (kdb_active) { kdb_reenter(); goto out; } #endif if (rv == KERN_INVALID_ADDRESS) goto nogo; if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) { /* * Due to both processor errata and lazy TLB invalidation when * access restrictions are removed from virtual pages, memory * accesses that are allowed by the physical mapping layer may * nonetheless cause one spurious page fault per virtual page. * When the thread is executing a "no faulting" section that * is bracketed by vm_fault_{disable,enable}_pagefaults(), * every page fault is treated as a spurious page fault, * unless it accesses the same virtual address as the most * recent page fault within the same "no faulting" section. */ if (td->td_md.md_spurflt_addr != far || (td->td_pflags & TDP_RESETSPUR) != 0) { td->td_md.md_spurflt_addr = far; td->td_pflags &= ~TDP_RESETSPUR; tlb_flush_local(far & ~PAGE_MASK); return; } } else { /* * If we get a page fault while in a critical section, then * it is most likely a fatal kernel page fault. The kernel * is already going to panic trying to get a sleep lock to * do the VM lookup, so just consider it a fatal trap so the * kernel can print out a useful trap message and even get * to the debugger. * * If we get a page fault while holding a non-sleepable * lock, then it is most likely a fatal kernel page fault. * If WITNESS is enabled, then it's going to whine about * bogus LORs with various VM locks, so just skip to the * fatal trap handling directly. */ if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) { abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } } /* Re-enable interrupts if they were enabled previously. */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } p = td->td_proc; if (usermode) { td->td_pticks = 0; if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); } /* Invoke the appropriate handler, if necessary. */ if (__predict_false(aborts[idx].func != NULL)) { if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig)) goto do_trapsignal; goto out; } /* * At this point, we're dealing with one of the following aborts: * * FAULT_ICACHE - I-cache maintenance * FAULT_TRAN_xx - Translation * FAULT_PERM_xx - Permission */ /* * Don't pass faulting cache operation to vm_fault(). We don't want * to handle all vm stuff at this moment. */ pcb = td->td_pcb; if (__predict_false(pcb->pcb_onfault == cachebailout)) { tf->tf_r0 = far; /* return failing address */ tf->tf_pc = (register_t)pcb->pcb_onfault; return; } /* Handle remaining I-cache aborts. */ if (idx == FAULT_ICACHE) { if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig)) goto do_trapsignal; goto out; - } - - /* fusubailout is used by [fs]uswintr to avoid page faulting. */ - if (__predict_false(pcb->pcb_onfault == fusubailout)) { - tf->tf_r0 = EFAULT; - tf->tf_pc = (register_t)pcb->pcb_onfault; - return; } va = trunc_page(far); if (va >= KERNBASE) { /* * Don't allow user-mode faults in kernel address space. */ if (usermode) goto nogo; map = kernel_map; } else { /* * This is a fault on non-kernel virtual memory. If curproc * is NULL or curproc->p_vmspace is NULL the fault is fatal. */ vm = (p != NULL) ? p->p_vmspace : NULL; if (vm == NULL) goto nogo; map = &vm->vm_map; if (!usermode && (td->td_intr_nesting_level != 0 || pcb->pcb_onfault == NULL)) { abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } } ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ; if (prefetch) ftype |= VM_PROT_EXECUTE; #ifdef DEBUG last_fault_code = fsr; #endif #ifdef INVARIANTS onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; #endif /* Fault in the page. */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); #ifdef INVARIANTS pcb->pcb_onfault = onfault; #endif if (__predict_true(rv == KERN_SUCCESS)) goto out; nogo: if (!usermode) { if (td->td_intr_nesting_level == 0 && pcb->pcb_onfault != NULL) { tf->tf_r0 = rv; tf->tf_pc = (int)pcb->pcb_onfault; return; } CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv); abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } ksig.sig = SIGSEGV; ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR; ksig.addr = far; do_trapsignal: call_trapsignal(td, ksig.sig, ksig.code, ksig.addr); out: if (usermode) userret(td, tf); } /* * abort_fatal() handles the following data aborts: * * FAULT_DEBUG - Debug Event * FAULT_ACCESS_xx - Acces Bit * FAULT_EA_PREC - Precise External Abort * FAULT_DOMAIN_xx - Domain Fault * FAULT_EA_TRAN_xx - External Translation Abort * FAULT_EA_IMPREC - Imprecise External Abort * + all undefined codes for ABORT * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { bool usermode; const char *mode; const char *rw_mode; usermode = TRAPF_USERMODE(tf); #ifdef KDTRACE_HOOKS if (!usermode) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far)) return (0); } #endif mode = usermode ? "user" : "kernel"; rw_mode = fsr & FSR_WNR ? "write" : "read"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s' on %s\n", mode, aborts[idx].desc, rw_mode); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if (idx != FAULT_EA_IMPREC) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (usermode) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic || kdb_active) kdb_trap(fsr, 0, tf); #endif panic("Fatal abort"); /*NOTREACHED*/ } /* * abort_align() handles the following data abort: * * FAULT_ALIGN - Alignment fault * * Everything should be aligned in kernel with exception of user to kernel * and vice versa data copying, so if pcb_onfault is not set, it's fatal. * We generate signal in case of abort from user mode. */ static int abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { bool usermode; usermode = TRAPF_USERMODE(tf); if (!usermode) { if (td->td_intr_nesting_level == 0 && td != NULL && td->td_pcb->pcb_onfault != NULL) { tf->tf_r0 = EFAULT; tf->tf_pc = (int)td->td_pcb->pcb_onfault; return (0); } abort_fatal(tf, idx, fsr, far, prefetch, td, ksig); } /* Deliver a bus error signal to the process */ ksig->code = BUS_ADRALN; ksig->sig = SIGBUS; ksig->addr = far; return (1); } /* * abort_icache() handles the following data abort: * * FAULT_ICACHE - Instruction cache maintenance * * According to manual, FAULT_ICACHE is translation fault during cache * maintenance operation. In fact, no cache maintenance operation on * not mapped virtual addresses should be called. As cache maintenance * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged, * the abort is concider as fatal for now. However, all the matter with * cache maintenance operation on virtual addresses could be really complex * and fuzzy in SMP case, so maybe in future standard fault mechanism * should be held here including vm_fault() calling. */ static int abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { abort_fatal(tf, idx, fsr, far, prefetch, td, ksig); return(0); } Index: head/sys/arm/arm/trap.c =================================================================== --- head/sys/arm/arm/trap.c (revision 295266) +++ head/sys/arm/arm/trap.c (revision 295267) @@ -1,746 +1,737 @@ /* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ /*- * Copyright 2004 Olivier Houchard * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * fault.c * * Fault handlers * * Created : 28/11/94 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KDB #include #endif #ifdef KDTRACE_HOOKS #include #endif #define ReadWord(a) (*((volatile unsigned int *)(a))) -extern char fusubailout[]; - #ifdef DEBUG int last_fault_code; /* For the benefit of pmap_fault_fixup() */ #endif struct ksig { int signb; u_long code; }; struct data_abort { int (*func)(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); const char *desc; }; static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static int dab_align(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *, struct ksig *); static void prefetch_abort_handler(struct trapframe *); static const struct data_abort data_aborts[] = { {dab_fatal, "Vector Exception"}, {dab_align, "Alignment Fault 1"}, {dab_fatal, "Terminal Exception"}, {dab_align, "Alignment Fault 3"}, {dab_buserr, "External Linefetch Abort (S)"}, {NULL, "Translation Fault (S)"}, #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 {NULL, "Translation Flag Fault"}, #else {dab_buserr, "External Linefetch Abort (P)"}, #endif {NULL, "Translation Fault (P)"}, {dab_buserr, "External Non-Linefetch Abort (S)"}, {NULL, "Domain Fault (S)"}, {dab_buserr, "External Non-Linefetch Abort (P)"}, {NULL, "Domain Fault (P)"}, {dab_buserr, "External Translation Abort (L1)"}, {NULL, "Permission Fault (S)"}, {dab_buserr, "External Translation Abort (L2)"}, {NULL, "Permission Fault (P)"} }; /* Determine if a fault came from user mode */ #define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) /* Determine if 'x' is a permission fault */ #define IS_PERMISSION_FAULT(x) \ (((1 << ((x) & FAULT_TYPE_MASK)) & \ ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) static __inline void call_trapsignal(struct thread *td, int sig, u_long code) { ksiginfo_t ksi; ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = (int)code; trapsignal(td, &ksi); } void abort_handler(struct trapframe *tf, int type) { struct vm_map *map; struct pcb *pcb; struct thread *td; u_int user, far, fsr; vm_prot_t ftype; void *onfault; vm_offset_t va; int error = 0; struct ksig ksig; struct proc *p; if (type == 1) return (prefetch_abort_handler(tf)); /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); fsr = cpu_faultstatus(); #if 0 printf("data abort: fault address=%p (from pc=%p lr=%p)\n", (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); #endif /* Update vmmeter statistics */ #if 0 vmexp.traps++; #endif td = curthread; p = td->td_proc; PCPU_INC(cnt.v_trap); /* Data abort came from user mode? */ user = TRAP_USERMODE(tf); if (user) { td->td_pticks = 0; td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } /* Grab the current pcb */ pcb = td->td_pcb; /* Re-enable interrupts if they were enabled previously */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Invoke the appropriate handler, if necessary */ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, td, &ksig)) { goto do_trapsignal; } goto out; } /* * At this point, we're dealing with one of the following data aborts: * * FAULT_TRANS_S - Translation -- Section * FAULT_TRANS_P - Translation -- Page * FAULT_DOMAIN_S - Domain -- Section * FAULT_DOMAIN_P - Domain -- Page * FAULT_PERM_S - Permission -- Section * FAULT_PERM_P - Permission -- Page * * These are the main virtual memory-related faults signalled by * the MMU. */ - - /* fusubailout is used by [fs]uswintr to avoid page faulting */ - if (__predict_false(pcb->pcb_onfault == fusubailout)) { - tf->tf_r0 = EFAULT; - tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; - return; - } /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not * be word-aligned. This would cause a kernel alignment fault * further down if we have to decode the current instruction. * XXX: It would be nice to be able to support Thumb at some point. */ if (__predict_false((tf->tf_pc & 3) != 0)) { if (user) { /* * Give the user an illegal instruction signal. */ /* Deliver a SIGILL to the process */ ksig.signb = SIGILL; ksig.code = 0; goto do_trapsignal; } /* * The kernel never executes Thumb code. */ printf("\ndata_abort_fault: Misaligned Kernel-mode " "Program Counter\n"); dab_fatal(tf, fsr, far, td, &ksig); } va = trunc_page((vm_offset_t)far); /* * It is only a kernel address space fault iff: * 1. user == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. */ if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && __predict_true((pcb->pcb_onfault == NULL || (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { map = kernel_map; /* Was the fault due to the FPE/IPKDB ? */ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { /* * Force exit via userret() * This is necessary as the FPE is an extension to * userland that actually runs in a priveledged mode * but uses USR mode permissions for its accesses. */ user = 1; ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } } else { map = &td->td_proc->p_vmspace->vm_map; } /* * We need to know whether the page should be mapped as R or R/W. On * armv6 and later the fault status register indicates whether the * access was a read or write. Prior to armv6, we know that a * permission fault can only be the result of a write to a read-only * location, so we can deal with those quickly. Otherwise we need to * disassemble the faulting instruction to determine if it was a write. */ #if __ARM_ARCH >= 6 ftype = (fsr & FAULT_WNR) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; #else if (IS_PERMISSION_FAULT(fsr)) ftype = VM_PROT_WRITE; else { u_int insn = ReadWord(tf->tf_pc); if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ ftype = VM_PROT_WRITE; } else { if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; } } #endif /* * See if the fault is as a result of ref/mod emulation, * or domain mismatch. */ #ifdef DEBUG last_fault_code = fsr; #endif if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) goto fatal_pagefault; if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, user)) { goto out; } onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); pcb->pcb_onfault = onfault; if (__predict_true(error == 0)) goto out; fatal_pagefault: if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, error); dab_fatal(tf, fsr, far, td, &ksig); } if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: /* If returning to user mode, make sure to invoke userret() */ if (user) userret(td, tf); } /* * dab_fatal() handles the following data aborts: * * FAULT_WRTBUF_0 - Vector Exception * FAULT_WRTBUF_1 - Terminal Exception * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { const char *mode; #ifdef KDTRACE_HOOKS if (!TRAP_USERMODE(tf)) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) return (0); } #endif mode = TRAP_USERMODE(tf) ? "user" : "kernel"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s'\n", mode, data_aborts[fsr & FAULT_TYPE_MASK].desc); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if ((fsr & FAULT_IMPRECISE) == 0) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (TRAP_USERMODE(tf)) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic || kdb_active) if (kdb_trap(fsr, 0, tf)) return (0); #endif panic("Fatal abort"); /*NOTREACHED*/ } /* * dab_align() handles the following data aborts: * * FAULT_ALIGN_0 - Alignment fault * FAULT_ALIGN_1 - Alignment fault * * These faults are fatal if they happen in kernel mode. Otherwise, we * deliver a bus error to the process. */ static int dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { /* Alignment faults are always fatal if they occur in kernel mode */ if (!TRAP_USERMODE(tf)) { if (!td || !td->td_pcb->pcb_onfault) dab_fatal(tf, fsr, far, td, ksig); tf->tf_r0 = EFAULT; tf->tf_pc = (int)td->td_pcb->pcb_onfault; return (0); } /* pcb_onfault *must* be NULL at this point */ /* Deliver a bus error signal to the process */ ksig->code = 0; ksig->signb = SIGBUS; td->td_frame = tf; return (1); } /* * dab_buserr() handles the following data aborts: * * FAULT_BUSERR_0 - External Abort on Linefetch -- Section * FAULT_BUSERR_1 - External Abort on Linefetch -- Page * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 * * If pcb_onfault is set, flag the fault and return to the handler. * If the fault occurred in user mode, give the process a SIGBUS. * * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 * can be flagged as imprecise in the FSR. This causes a real headache * since some of the machine state is lost. In this case, tf->tf_pc * may not actually point to the offending instruction. In fact, if * we've taken a double abort fault, it generally points somewhere near * the top of "data_abort_entry" in exception.S. * * In all other cases, these data aborts are considered fatal. */ static int dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { struct pcb *pcb = td->td_pcb; #ifdef __XSCALE__ if ((fsr & FAULT_IMPRECISE) != 0 && (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { /* * Oops, an imprecise, double abort fault. We've lost the * r14_abt/spsr_abt values corresponding to the original * abort, and the spsr saved in the trapframe indicates * ABT mode. */ tf->tf_spsr &= ~PSR_MODE; /* * We use a simple heuristic to determine if the double abort * happened as a result of a kernel or user mode access. * If the current trapframe is at the top of the kernel stack, * the fault _must_ have come from user mode. */ if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) { /* * Kernel mode. We're either about to die a * spectacular death, or pcb_onfault will come * to our rescue. Either way, the current value * of tf->tf_pc is irrelevant. */ tf->tf_spsr |= PSR_SVC32_MODE; if (pcb->pcb_onfault == NULL) printf("\nKernel mode double abort!\n"); } else { /* * User mode. We've lost the program counter at the * time of the fault (not that it was accurate anyway; * it's not called an imprecise fault for nothing). * About all we can do is copy r14_usr to tf_pc and * hope for the best. The process is about to get a * SIGBUS, so it's probably history anyway. */ tf->tf_spsr |= PSR_USR32_MODE; tf->tf_pc = tf->tf_usr_lr; } } /* FAR is invalid for imprecise exceptions */ if ((fsr & FAULT_IMPRECISE) != 0) far = 0; #endif /* __XSCALE__ */ if (pcb->pcb_onfault) { tf->tf_r0 = EFAULT; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return (0); } /* * At this point, if the fault happened in kernel mode, we're toast */ if (!TRAP_USERMODE(tf)) dab_fatal(tf, fsr, far, td, ksig); /* Deliver a bus error signal to the process */ ksig->signb = SIGBUS; ksig->code = 0; td->td_frame = tf; return (1); } /* * void prefetch_abort_handler(struct trapframe *tf) * * Abort handler called when instruction execution occurs at * a non existent or restricted (access permissions) memory page. * If the address is invalid and we were in SVC mode then panic as * the kernel should never prefetch abort. * If the address is invalid and the page is mapped then the user process * does no have read permission so send it a signal. * Otherwise fault the page in and try again. */ static void prefetch_abort_handler(struct trapframe *tf) { struct thread *td; struct proc * p; struct vm_map *map; vm_offset_t fault_pc, va; int error = 0; struct ksig ksig; #if 0 /* Update vmmeter statistics */ uvmexp.traps++; #endif #if 0 printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, (void*)tf->tf_usr_lr); #endif td = curthread; p = td->td_proc; PCPU_INC(cnt.v_trap); if (TRAP_USERMODE(tf)) { td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } fault_pc = tf->tf_pc; if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Prefetch aborts cannot happen in kernel mode */ if (__predict_false(!TRAP_USERMODE(tf))) dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); td->td_pticks = 0; /* Ok validate the address, can only execute in USER space */ if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } map = &td->td_proc->p_vmspace->vm_map; va = trunc_page(fault_pc); /* * See if the pmap can handle this fault on its own... */ #ifdef DEBUG last_fault_code = -1; #endif if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) goto out; error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, VM_FAULT_NORMAL); if (__predict_true(error == 0)) goto out; if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: userret(td, tf); } extern int badaddr_read_1(const uint8_t *, uint8_t *); extern int badaddr_read_2(const uint16_t *, uint16_t *); extern int badaddr_read_4(const uint32_t *, uint32_t *); /* * Tentatively read an 8, 16, or 32-bit value from 'addr'. * If the read succeeds, the value is written to 'rptr' and zero is returned. * Else, return EFAULT. */ int badaddr_read(void *addr, size_t size, void *rptr) { union { uint8_t v1; uint16_t v2; uint32_t v4; } u; int rv; cpu_drain_writebuf(); /* Read from the test address. */ switch (size) { case sizeof(uint8_t): rv = badaddr_read_1(addr, &u.v1); if (rv == 0 && rptr) *(uint8_t *) rptr = u.v1; break; case sizeof(uint16_t): rv = badaddr_read_2(addr, &u.v2); if (rv == 0 && rptr) *(uint16_t *) rptr = u.v2; break; case sizeof(uint32_t): rv = badaddr_read_4(addr, &u.v4); if (rv == 0 && rptr) *(uint32_t *) rptr = u.v4; break; default: panic("badaddr: invalid size (%lu)", (u_long) size); } /* Return EFAULT if the address was invalid, else zero */ return (rv); }