Index: head/sys/arm/arm/cpufunc_asm_sheeva.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_sheeva.S (revision 308407) +++ head/sys/arm/arm/cpufunc_asm_sheeva.S (revision 308408) @@ -1,421 +1,421 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#include #include __FBSDID("$FreeBSD$"); +#include #include .Lsheeva_cache_line_size: .word _C_LABEL(arm_pdcache_line_size) .Lsheeva_asm_page_mask: .word _C_LABEL(PAGE_MASK) ENTRY(sheeva_setttb) /* Disable irqs */ mrs r2, cpsr orr r3, r2, #PSR_I | PSR_F msr cpsr_c, r3 mov r1, #0 mcr p15, 0, r1, c7, c5, 0 /* Invalidate ICache */ 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 /* Test, clean and invalidate DCache */ bne 1b /* More to do? */ mcr p15, 1, r1, c15, c9, 0 /* Clean L2 */ mcr p15, 1, r1, c15, c11, 0 /* Invalidate L2 */ /* Reenable irqs */ msr cpsr_c, r2 mcr p15, 0, r1, c7, c10, 4 /* drain the write buffer */ mcr p15, 0, r0, c2, c0, 0 /* load new TTB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */ RET END(sheeva_setttb) ENTRY(sheeva_dcache_wbinv_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */ mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */ /* Enable irqs */ msr cpsr_c, lr add r0, r0, ip sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_dcache_wbinv_range) ENTRY(sheeva_idcache_wbinv_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */ mcr p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */ /* Enable irqs */ msr cpsr_c, lr /* Invalidate and clean icache line by line */ ldr r3, .Lsheeva_cache_line_size ldr r3, [r3] 2: mcr p15, 0, r0, c7, c5, 1 add r0, r0, r3 cmp r2, r0 bhi 2b add r0, r2, #1 sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_idcache_wbinv_range) ENTRY(sheeva_dcache_inv_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 5, r0, c15, c14, 0 /* Inv zone start address */ mcr p15, 5, r2, c15, c14, 1 /* Inv zone end address */ /* Enable irqs */ msr cpsr_c, lr add r0, r0, ip sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_dcache_inv_range) ENTRY(sheeva_dcache_wb_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 5, r0, c15, c13, 0 /* Clean zone start address */ mcr p15, 5, r2, c15, c13, 1 /* Clean zone end address */ /* Enable irqs */ msr cpsr_c, lr add r0, r0, ip sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_dcache_wb_range) ENTRY(sheeva_l2cache_wbinv_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */ mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */ mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */ mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */ /* Enable irqs */ msr cpsr_c, lr add r0, r0, ip sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_l2cache_wbinv_range) ENTRY(sheeva_l2cache_inv_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */ mcr p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */ /* Enable irqs */ msr cpsr_c, lr add r0, r0, ip sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_l2cache_inv_range) ENTRY(sheeva_l2cache_wb_range) str lr, [sp, #-4]! mrs lr, cpsr /* Start with cache line aligned address */ ldr ip, .Lsheeva_cache_line_size ldr ip, [ip] sub ip, ip, #1 and r2, r0, ip add r1, r1, r2 add r1, r1, ip bics r1, r1, ip bics r0, r0, ip ldr ip, .Lsheeva_asm_page_mask and r2, r0, ip rsb r2, r2, #PAGE_SIZE cmp r1, r2 movcc ip, r1 movcs ip, r2 1: add r3, r0, ip sub r2, r3, #1 /* Disable irqs */ orr r3, lr, #PSR_I | PSR_F msr cpsr_c, r3 mcr p15, 1, r0, c15, c9, 4 /* Clean L2 zone start address */ mcr p15, 1, r2, c15, c9, 5 /* Clean L2 zone end address */ /* Enable irqs */ msr cpsr_c, lr add r0, r0, ip sub r1, r1, ip cmp r1, #PAGE_SIZE movcc ip, r1 movcs ip, #PAGE_SIZE cmp r1, #0 bne 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ ldr lr, [sp], #4 RET END(sheeva_l2cache_wb_range) ENTRY(sheeva_l2cache_wbinv_all) /* Disable irqs */ mrs r1, cpsr orr r2, r1, #PSR_I | PSR_F msr cpsr_c, r2 mov r0, #0 mcr p15, 1, r0, c15, c9, 0 /* Clean L2 */ mcr p15, 1, r0, c15, c11, 0 /* Invalidate L2 */ msr cpsr_c, r1 /* Reenable irqs */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET END(sheeva_l2cache_wbinv_all) /* This function modifies register value as follows: * * arg1 arg EFFECT (bit value saved into register) * 0 0 not changed * 0 1 negated * 1 0 cleared * 1 1 set */ ENTRY(sheeva_control_ext) mrc p15, 1, r3, c15, c1, 0 /* Read the control register */ bic r2, r3, r0 /* Clear bits */ eor r2, r2, r1 /* XOR bits */ teq r2, r3 /* Only write if there is a change */ mcrne p15, 1, r2, c15, c1, 0 /* Write new control register */ mov r0, r3 /* Return old value */ RET END(sheeva_control_ext) ENTRY(sheeva_cpu_sleep) mov r0, #0 mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */ mcr p15, 0, r0, c7, c0, 4 /* Wait for interrupt */ mov pc, lr END(sheeva_cpu_sleep) Index: head/sys/arm/arm/cpufunc_asm_xscale.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale.S (revision 308407) +++ head/sys/arm/arm/cpufunc_asm_xscale.S (revision 308408) @@ -1,508 +1,509 @@ /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /*- * Copyright (c) 2001 Matt Thomas. * Copyright (c) 1997,1998 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * XScale assembly functions for CPU / MMU / TLB specific operations */ -#include #include __FBSDID("$FreeBSD$"); + +#include /* * Size of the XScale core D-cache. */ #define DCACHE_SIZE 0x00008000 /* * CPWAIT -- Canonical method to wait for CP15 update. * From: Intel 80200 manual, section 2.3.3. * * NOTE: Clobbers the specified temp reg. */ #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ mov tmp, tmp /* wait for it to complete */ ;\ CPWAIT_BRANCH /* branch to next insn */ #define CPWAIT_AND_RETURN_SHIFTER lsr #32 #define CPWAIT_AND_RETURN(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ /* Wait for it to complete and branch to the return address */ \ sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER ENTRY(xscale_cpwait) CPWAIT_AND_RETURN(r0) END(xscale_cpwait) /* * We need a separate cpu_control() entry point, since we have to * invalidate the Branch Target Buffer in the event the BPRD bit * changes in the control register. */ ENTRY(xscale_control) mrc CP15_SCTLR(r3) /* Read the control register */ bic r2, r3, r0 /* Clear bits */ eor r2, r2, r1 /* XOR bits */ teq r2, r3 /* Only write if there was a change */ mcrne p15, 0, r0, c7, c5, 6 /* Invalidate the BTB */ mcrne CP15_SCTLR(r2) /* Write new control register */ mov r0, r3 /* Return old value */ CPWAIT_AND_RETURN(r1) END(xscale_control) /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(xscale_setttb) #ifdef CACHE_CLEAN_BLOCK_INTR mrs r3, cpsr orr r1, r3, #(PSR_I | PSR_F) msr cpsr_fsxc, r1 #endif stmfd sp!, {r0-r3, lr} bl _C_LABEL(xscale_cache_cleanID) mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ CPWAIT(r0) ldmfd sp!, {r0-r3, lr} /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ /* The cleanID above means we only need to flush the I cache here */ mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ CPWAIT(r0) #ifdef CACHE_CLEAN_BLOCK_INTR msr cpsr_fsxc, r3 #endif RET END(xscale_setttb) /* * TLB functions * */ ENTRY(xscale_tlb_flushID_SE) mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ CPWAIT_AND_RETURN(r0) END(xscale_tlb_flushID_SE) /* * Cache functions */ ENTRY(xscale_cache_flushID) mcr p15, 0, r0, c7, c7, 0 /* flush I+D cache */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushID) ENTRY(xscale_cache_flushI) mcr p15, 0, r0, c7, c5, 0 /* flush I cache */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushI) ENTRY(xscale_cache_flushD) mcr p15, 0, r0, c7, c6, 0 /* flush D cache */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushD) ENTRY(xscale_cache_flushI_SE) mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushI_SE) ENTRY(xscale_cache_flushD_SE) /* * Errata (rev < 2): Must clean-dcache-line to an address * before invalidate-dcache-line to an address, or dirty * bits will not be cleared in the dcache array. */ mcr p15, 0, r0, c7, c10, 1 mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushD_SE) ENTRY(xscale_cache_cleanD_E) mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ CPWAIT_AND_RETURN(r0) END(xscale_cache_cleanD_E) /* * Information for the XScale cache clean/purge functions: * * * Virtual address of the memory region to use * * Size of memory region * * Note the virtual address for the Data cache clean operation * does not need to be backed by physical memory, since no loads * will actually be performed by the allocate-line operation. * * Note that the Mini-Data cache MUST be cleaned by executing * loads from memory mapped into a region reserved exclusively * for cleaning of the Mini-Data cache. */ .data .global _C_LABEL(xscale_cache_clean_addr) _C_LABEL(xscale_cache_clean_addr): .word 0x00000000 .global _C_LABEL(xscale_cache_clean_size) _C_LABEL(xscale_cache_clean_size): .word DCACHE_SIZE .global _C_LABEL(xscale_minidata_clean_addr) _C_LABEL(xscale_minidata_clean_addr): .word 0x00000000 .global _C_LABEL(xscale_minidata_clean_size) _C_LABEL(xscale_minidata_clean_size): .word 0x00000800 .text .Lxscale_cache_clean_addr: .word _C_LABEL(xscale_cache_clean_addr) .Lxscale_cache_clean_size: .word _C_LABEL(xscale_cache_clean_size) .Lxscale_minidata_clean_addr: .word _C_LABEL(xscale_minidata_clean_addr) .Lxscale_minidata_clean_size: .word _C_LABEL(xscale_minidata_clean_size) #ifdef CACHE_CLEAN_BLOCK_INTR #define XSCALE_CACHE_CLEAN_BLOCK \ mrs r3, cpsr ; \ orr r0, r3, #(PSR_I | PSR_F) ; \ msr cpsr_fsxc, r0 #define XSCALE_CACHE_CLEAN_UNBLOCK \ msr cpsr_fsxc, r3 #else #define XSCALE_CACHE_CLEAN_BLOCK #define XSCALE_CACHE_CLEAN_UNBLOCK #endif /* CACHE_CLEAN_BLOCK_INTR */ #define XSCALE_CACHE_CLEAN_PROLOGUE \ XSCALE_CACHE_CLEAN_BLOCK ; \ ldr r2, .Lxscale_cache_clean_addr ; \ ldmia r2, {r0, r1} ; \ /* \ * BUG ALERT! \ * \ * The XScale core has a strange cache eviction bug, which \ * requires us to use 2x the cache size for the cache clean \ * and for that area to be aligned to 2 * cache size. \ * \ * The work-around is to use 2 areas for cache clean, and to \ * alternate between them whenever this is done. No one knows \ * why the work-around works (mmm!). \ */ \ eor r0, r0, #(DCACHE_SIZE) ; \ str r0, [r2] ; \ add r0, r0, r1 #define XSCALE_CACHE_CLEAN_EPILOGUE \ XSCALE_CACHE_CLEAN_UNBLOCK ENTRY_NP(xscale_cache_syncI) EENTRY_NP(xscale_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ EENTRY_NP(xscale_cache_cleanID) EENTRY_NP(xscale_cache_purgeD) EENTRY(xscale_cache_cleanD) XSCALE_CACHE_CLEAN_PROLOGUE 1: subs r0, r0, #32 mcr p15, 0, r0, c7, c2, 5 /* allocate cache line */ subs r1, r1, #32 bne 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT(r0) XSCALE_CACHE_CLEAN_EPILOGUE RET EEND(xscale_cache_cleanD) EEND(xscale_cache_purgeD) EEND(xscale_cache_cleanID) EEND(xscale_cache_purgeID) END(xscale_cache_syncI) /* * Clean the mini-data cache. * * It's expected that we only use the mini-data cache for * kernel addresses, so there is no need to purge it on * context switch, and no need to prevent userspace access * while we clean it. */ ENTRY(xscale_cache_clean_minidata) ldr r2, .Lxscale_minidata_clean_addr ldmia r2, {r0, r1} 1: ldr r3, [r0], #32 subs r1, r1, #32 bne 1b mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r1) END(xscale_cache_clean_minidata) ENTRY(xscale_cache_purgeID_E) mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ CPWAIT(r1) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ CPWAIT_AND_RETURN(r1) END(xscale_cache_purgeID_E) ENTRY(xscale_cache_purgeD_E) mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ CPWAIT(r1) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ CPWAIT_AND_RETURN(r1) END(xscale_cache_purgeD_E) /* * Soft functions */ /* xscale_cache_syncI is identical to xscale_cache_purgeID */ EENTRY(xscale_cache_cleanID_rng) ENTRY(xscale_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) /*END(xscale_cache_cleanID_rng)*/ END(xscale_cache_cleanD_rng) ENTRY(xscale_cache_purgeID_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_purgeID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_purgeID_rng) ENTRY(xscale_cache_purgeD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_purgeD) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_purgeD_rng) ENTRY(xscale_cache_syncI_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_syncI) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_syncI_rng) ENTRY(xscale_cache_flushD_rng) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c6, 1 /* flush D cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscale_cache_flushD_rng) /* * Context switch. * * These is the CPU-specific parts of the context switcher cpu_switch() * These functions actually perform the TTB reload. * * NOTE: Special calling convention * r1, r4-r13 must be preserved */ ENTRY(xscale_context_switch) /* * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. * Thus the data cache will contain only kernel data and the * instruction cache will contain only kernel code, and all * kernel mappings are shared by all processes. */ /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ CPWAIT_AND_RETURN(r0) END(xscale_context_switch) /* * xscale_cpu_sleep * * This is called when there is nothing on any of the run queues. * We go into IDLE mode so that any IRQ or FIQ will awaken us. * * If this is called with anything other than ARM_SLEEP_MODE_IDLE, * ignore it. */ ENTRY(xscale_cpu_sleep) tst r0, #0x00000000 bne 1f mov r0, #0x1 mcr p14, 0, r0, c7, c0, 0 1: RET END(xscale_cpu_sleep) Index: head/sys/arm/arm/cpufunc_asm_xscale_c3.S =================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale_c3.S (revision 308407) +++ head/sys/arm/arm/cpufunc_asm_xscale_c3.S (revision 308408) @@ -1,398 +1,399 @@ /* $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $ */ /*- * Copyright (c) 2007 Olivier Houchard * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /*- * Copyright (c) 2001 Matt Thomas. * Copyright (c) 1997,1998 Mark Brinicombe. * Copyright (c) 1997 Causality Limited * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Causality Limited. * 4. The name of Causality Limited may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * XScale core 3 assembly functions for CPU / MMU / TLB specific operations */ -#include #include __FBSDID("$FreeBSD$"); + +#include /* * Size of the XScale core D-cache. */ #define DCACHE_SIZE 0x00008000 /* * CPWAIT -- Canonical method to wait for CP15 update. * From: Intel 80200 manual, section 2.3.3. * * NOTE: Clobbers the specified temp reg. */ #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ mov tmp, tmp /* wait for it to complete */ ;\ CPWAIT_BRANCH /* branch to next insn */ #define CPWAIT_AND_RETURN_SHIFTER lsr #32 #define CPWAIT_AND_RETURN(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ /* Wait for it to complete and branch to the return address */ \ sub pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER #define ARM_USE_L2_CACHE #define L2_CACHE_SIZE 0x80000 #define L2_CACHE_WAYS 8 #define L2_CACHE_LINE_SIZE 32 #define L2_CACHE_SETS (L2_CACHE_SIZE / \ (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE)) #define L1_DCACHE_SIZE 32 * 1024 #define L1_DCACHE_WAYS 4 #define L1_DCACHE_LINE_SIZE 32 #define L1_DCACHE_SETS (L1_DCACHE_SIZE / \ (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE)) #ifdef CACHE_CLEAN_BLOCK_INTR #define XSCALE_CACHE_CLEAN_BLOCK \ stmfd sp!, {r4} ; \ mrs r4, cpsr ; \ orr r0, r4, #(PSR_I | PSR_F) ; \ msr cpsr_fsxc, r0 #define XSCALE_CACHE_CLEAN_UNBLOCK \ msr cpsr_fsxc, r4 ; \ ldmfd sp!, {r4} #else #define XSCALE_CACHE_CLEAN_BLOCK #define XSCALE_CACHE_CLEAN_UNBLOCK #endif /* CACHE_CLEAN_BLOCK_INTR */ ENTRY_NP(xscalec3_cache_syncI) EENTRY_NP(xscalec3_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ EENTRY_NP(xscalec3_cache_cleanID) EENTRY_NP(xscalec3_cache_purgeD) EENTRY(xscalec3_cache_cleanD) XSCALE_CACHE_CLEAN_BLOCK mov r0, #0 1: mov r1, r0, asl #30 mov r2, #0 2: orr r3, r1, r2, asl #5 mcr p15, 0, r3, c7, c14, 2 /* clean and invalidate */ add r2, r2, #1 cmp r2, #L1_DCACHE_SETS bne 2b add r0, r0, #1 cmp r0, #4 bne 1b CPWAIT(r0) XSCALE_CACHE_CLEAN_UNBLOCK mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ RET EEND(xscalec3_cache_purgeID) EEND(xscalec3_cache_cleanID) EEND(xscalec3_cache_purgeD) EEND(xscalec3_cache_cleanD) END(xscalec3_cache_syncI) ENTRY(xscalec3_cache_purgeID_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c14, 1 /* clean/invalidate L1 D cache entry */ nop mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscalec3_cache_purgeID_rng) ENTRY(xscalec3_cache_syncI_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_syncI) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean D cache entry */ mcr p15, 0, r0, c7, c5, 1 /* flush I cache single entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscalec3_cache_syncI_rng) ENTRY(xscalec3_cache_purgeD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c14, 1 /* Clean and invalidate D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) END(xscalec3_cache_purgeD_rng) ENTRY(xscalec3_cache_cleanID_rng) EENTRY(xscalec3_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 0, r0, c7, c10, 1 /* clean L1 D cache entry */ nop add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) EEND(xscalec3_cache_cleanD_rng) END(xscalec3_cache_cleanID_rng) ENTRY(xscalec3_l2cache_purge) /* Clean-up the L2 cache */ mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ mov r0, #0 1: mov r1, r0, asl #29 mov r2, #0 2: orr r3, r1, r2, asl #5 mcr p15, 1, r3, c7, c15, 2 add r2, r2, #1 cmp r2, #L2_CACHE_SETS bne 2b add r0, r0, #1 cmp r0, #8 bne 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier CPWAIT(r0) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ RET END(xscalec3_l2cache_purge) ENTRY(xscalec3_l2cache_clean_rng) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b CPWAIT(r0) mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c10, 5 CPWAIT_AND_RETURN(r0) END(xscalec3_l2cache_clean_rng) ENTRY(xscalec3_l2cache_purge_rng) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 1, r0, c7, c11, 1 /* Clean L2 D cache entry */ mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 D cache entry */ add r0, r0, #32 subs r1, r1, #32 bhi 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c10, 5 CPWAIT_AND_RETURN(r0) END(xscalec3_l2cache_purge_rng) ENTRY(xscalec3_l2cache_flush_rng) mcr p15, 0, r0, c7, c10, 5 /* Data memory barrier */ and r2, r0, #0x1f add r1, r1, r2 bic r0, r0, #0x1f 1: mcr p15, 1, r0, c7, c7, 1 /* Invalidate L2 cache line */ add r0, r0, #32 subs r1, r1, #32 bhi 1b mcr p15, 0, r0, c7, c10, 4 @ data write barrier mcr p15, 0, r0, c7, c10, 5 CPWAIT_AND_RETURN(r0) END(xscalec3_l2cache_flush_rng) /* * Functions to set the MMU Translation Table Base register * * We need to clean and flush the cache as it uses virtual * addresses that are about to change. */ ENTRY(xscalec3_setttb) #ifdef CACHE_CLEAN_BLOCK_INTR mrs r3, cpsr orr r1, r3, #(PSR_I | PSR_F) msr cpsr_fsxc, r1 #endif stmfd sp!, {r0-r3, lr} bl _C_LABEL(xscalec3_cache_cleanID) mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ CPWAIT(r0) ldmfd sp!, {r0-r3, lr} #ifdef ARM_USE_L2_CACHE orr r0, r0, #0x18 /* cache the page table in L2 */ #endif /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLB */ CPWAIT(r0) #ifdef CACHE_CLEAN_BLOCK_INTR msr cpsr_fsxc, r3 #endif RET END(xscalec3_setttb) /* * Context switch. * * These is the CPU-specific parts of the context switcher cpu_switch() * These functions actually perform the TTB reload. * * NOTE: Special calling convention * r1, r4-r13 must be preserved */ ENTRY(xscalec3_context_switch) /* * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. * Thus the data cache will contain only kernel data and the * instruction cache will contain only kernel code, and all * kernel mappings are shared by all processes. */ #ifdef ARM_USE_L2_CACHE orr r0, r0, #0x18 /* Cache the page table in L2 */ #endif /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ mcr p15, 0, r0, c8, c7, 0 /* flush the I+D tlb */ CPWAIT_AND_RETURN(r0) END(xscalec3_context_switch) Index: head/sys/arm/arm/fiq_subr.S =================================================================== --- head/sys/arm/arm/fiq_subr.S (revision 308407) +++ head/sys/arm/arm/fiq_subr.S (revision 308408) @@ -1,93 +1,94 @@ /* $NetBSD: fiq_subr.S,v 1.3 2002/04/12 18:50:31 thorpej Exp $ */ /*- * Copyright (c) 2001 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ -#include #include __FBSDID("$FreeBSD$"); + +#include /* * MODE_CHANGE_NOP should be inserted between a mode change and a * banked register (R8--R15) access. */ #if defined(CPU_ARM2) || defined(CPU_ARM250) #define MODE_CHANGE_NOP mov r0, r0 #else #define MODE_CHANGE_NOP /* Data sheet says ARM3 doesn't need it */ #endif #define SWITCH_TO_FIQ_MODE \ mrs r2, cpsr ; \ mov r3, r2 ; \ bic r2, r2, #(PSR_MODE) ; \ orr r2, r2, #(PSR_FIQ32_MODE) ; \ msr cpsr_fsxc, r2 #define BACK_TO_SVC_MODE \ msr cpsr_fsxc, r3 /* * fiq_getregs: * * Fetch the FIQ mode banked registers into the fiqhandler * structure. */ ENTRY(fiq_getregs) SWITCH_TO_FIQ_MODE stmia r0, {r8-r13} BACK_TO_SVC_MODE RET END(fiq_getregs) /* * fiq_setregs: * * Load the FIQ mode banked registers from the fiqhandler * structure. */ ENTRY(fiq_setregs) SWITCH_TO_FIQ_MODE ldmia r0, {r8-r13} BACK_TO_SVC_MODE RET END(fiq_setregs) Index: head/sys/arm/arm/setstack.s =================================================================== --- head/sys/arm/arm/setstack.s (revision 308407) +++ head/sys/arm/arm/setstack.s (revision 308408) @@ -1,94 +1,95 @@ /* $NetBSD: setstack.S,v 1.1 2001/07/28 13:28:03 chris Exp $ */ /*- * Copyright (c) 1994 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * setstack.S * * Miscellaneous routine to play with the stack pointer in different CPU modes * * Eventually this routine can be inline assembly. * * Created : 17/09/94 * * Based of kate/display/setstack.s * */ -#include #include __FBSDID("$FreeBSD$"); + +#include /* To set the stack pointer for a particular mode we must switch * to that mode update the banked r13 and then switch back. * This routine provides an easy way of doing this for any mode * * r0 = CPU mode * r1 = stackptr */ ENTRY(set_stackptr) mrs r3, cpsr /* Switch to the appropriate mode */ bic r2, r3, #(PSR_MODE) orr r2, r2, r0 msr cpsr_fsxc, r2 mov sp, r1 /* Set the stack pointer */ msr cpsr_fsxc, r3 /* Restore the old mode */ mov pc, lr /* Exit */ END(set_stackptr) /* To get the stack pointer for a particular mode we must switch * to that mode copy the banked r13 and then switch back. * This routine provides an easy way of doing this for any mode * * r0 = CPU mode */ ENTRY(get_stackptr) mrs r3, cpsr /* Switch to the appropriate mode */ bic r2, r3, #(PSR_MODE) orr r2, r2, r0 msr cpsr_fsxc, r2 mov r0, sp /* Set the stack pointer */ msr cpsr_fsxc, r3 /* Restore the old mode */ mov pc, lr /* Exit */ END(get_stackptr) /* End of setstack.S */