Index: head/sys/arm/arm/exception.S =================================================================== --- head/sys/arm/arm/exception.S (revision 284114) +++ head/sys/arm/arm/exception.S (revision 284115) @@ -1,472 +1,473 @@ /* $NetBSD: exception.S,v 1.13 2003/10/31 16:30:15 scw Exp $ */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * exception.S * * Low level handlers for exception vectors * * Created : 24/09/94 * * Based on kate/display/abort.s * */ #include "assym.s" #include "opt_kdtrace.h" +#include #include #include #include __FBSDID("$FreeBSD$"); #ifdef KDTRACE_HOOKS .bss .align 4 .global _C_LABEL(dtrace_invop_calltrap_addr) _C_LABEL(dtrace_invop_calltrap_addr): .word 0 .word 0 #endif .text .align 2 /* * ASM macros for pushing and pulling trapframes from the stack * * These macros are used to handle the irqframe and trapframe structures * defined above. */ /* * PUSHFRAME - macro to push a trap frame on the stack in the current mode * Since the current mode is used, the SVC lr field is not defined. * * NOTE: r13 and r14 are stored separately as a work around for the * SA110 rev 2 STM^ bug */ -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH < 6 #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; \ ldr r0, =ARM_RAS_START; \ mov r1, #0; \ str r1, [r0]; \ mov r1, #0xffffffff; \ str r1, [r0, #4]; #else #define PUSHFRAME \ sub sp, sp, #4; /* Align the stack */ \ str lr, [sp, #-4]!; /* Push the return address */ \ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]!; #endif /* * PULLFRAME - macro to pull a trap frame from the stack in the current mode * Since the current mode is used, the SVC lr field is ignored. */ -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH < 6 #define PULLFRAME \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ #else #define PULLFRAME \ ldr r0, [sp], #4 ; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*17); /* Adjust the stack pointer */ \ ldr lr, [sp], #4; /* Pull the return address */ \ add sp, sp, #4 /* Align the stack */ #endif /* * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode * This should only be used if the processor is not currently in SVC32 * mode. The processor mode is switched to SVC mode and the trap frame is * stored. The SVC lr field is used to store the previous value of * lr in SVC mode. * * NOTE: r13 and r14 are stored separately as a work around for the * SA110 rev 2 STM^ bug */ -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH < 6 #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ /* and for dtrace to emulate push/pop */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ ldr r5, =ARM_RAS_START; /* Check if there's any RAS */ \ ldr r4, [r5, #4]; /* reset it to point at the */ \ cmp r4, #0xffffffff; /* end of memory if necessary; */ \ movne r1, #0xffffffff; /* leave value in r4 for later */ \ strne r1, [r5, #4]; /* comparision against PC. */ \ ldr r3, [r5]; /* Retrieve global RAS_START */ \ cmp r3, #0; /* and reset it if non-zero. */ \ movne r1, #0; /* If non-zero RAS_START and */ \ strne r1, [r5]; /* PC was lower than RAS_END, */ \ ldrne r1, [r0, #16]; /* adjust the saved PC so that */ \ cmpne r4, r1; /* execution later resumes at */ \ strhi r3, [r0, #16]; /* the RAS_START location. */ \ mrs r0, spsr; \ str r0, [sp, #-4]! #else #define PUSHFRAMEINSVC \ stmdb sp, {r0-r3}; /* Save 4 registers */ \ mov r0, lr; /* Save xxx32 r14 */ \ mov r1, sp; /* Save xxx32 sp */ \ mrs r3, spsr; /* Save xxx32 spsr */ \ mrs r2, cpsr; /* Get the CPSR */ \ bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ orr r2, r2, #(PSR_SVC32_MODE); \ msr cpsr_c, r2; /* Punch into SVC mode */ \ mov r2, sp; /* Save SVC sp */ \ bic sp, sp, #7; /* Align sp to an 8-byte addrress */ \ sub sp, sp, #(4 * 17); /* Pad trapframe to keep alignment */ \ /* and for dtrace to emulate push/pop */ \ str r0, [sp, #-4]!; /* Push return address */ \ str lr, [sp, #-4]!; /* Push SVC lr */ \ str r2, [sp, #-4]!; /* Push SVC sp */ \ msr spsr_fsxc, r3; /* Restore correct spsr */ \ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ stmia sp, {r0-r12}; /* Push the user mode registers */ \ add r0, sp, #(4*13); /* Adjust the stack pointer */ \ stmia r0, {r13-r14}^; /* Push the user mode registers */ \ mov r0, r0; /* NOP for previous instruction */ \ mrs r0, spsr; /* Put the SPSR on the stack */ \ str r0, [sp, #-4]! #endif /* * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack * in SVC32 mode and restore the saved processor mode and PC. * This should be used when the SVC lr register needs to be restored on * exit. */ -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH < 6 #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ #else #define PULLFRAMEFROMSVCANDEXIT \ ldr r0, [sp], #4; /* Get the SPSR from stack */ \ msr spsr_fsxc, r0; /* restore SPSR */ \ clrex; \ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ mov r0, r0; /* NOP for previous instruction */ \ add sp, sp, #(4*15); /* Adjust the stack pointer */ \ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ #endif /* * Unwind hints so we can unwind past functions that use * PULLFRAMEFROMSVCANDEXIT. They are run in reverse order. * As the last thing we do is restore the stack pointer * we can ignore the padding at the end of struct trapframe. */ #define UNWINDSVCFRAME \ .save {r13-r15}; /* Restore sp, lr, pc */ \ .pad #(2*4); /* Skip user sp and lr */ \ .save {r0-r12}; /* Restore r0-r12 */ \ .pad #(4) /* Skip spsr */ #define DO_AST \ ldr r0, [sp]; /* Get the SPSR from stack */ \ mrs r4, cpsr; /* save CPSR */ \ orr r1, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r1; /* Disable interrupts */ \ and r0, r0, #(PSR_MODE); /* Returning to USR mode? */ \ teq r0, #(PSR_USR32_MODE); \ bne 2f; /* Nope, get out now */ \ bic r4, r4, #(PSR_I|PSR_F); \ 1: GET_CURTHREAD_PTR(r5); \ ldr r1, [r5, #(TD_FLAGS)]; \ and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED); \ teq r1, #0; \ beq 2f; /* Nope. Just bail */ \ msr cpsr_c, r4; /* Restore interrupts */ \ mov r0, sp; \ bl _C_LABEL(ast); /* ast(frame) */ \ orr r0, r4, #(PSR_I|PSR_F); \ msr cpsr_c, r0; \ b 1b; \ 2: /* * Entry point for a Software Interrupt (SWI). * * The hardware switches to svc32 mode on a swi, so we're already on the * right stack; just build a trapframe and call the handler. */ ASENTRY_NP(swi_entry) PUSHFRAME /* Build the trapframe on the */ mov r0, sp /* scv32 stack, pass it to the */ bl _C_LABEL(swi_handler) /* swi handler. */ /* * The fork_trampoline() code in swtch.S aranges for the MI fork_exit() * to return to swi_exit here, to return to userland. The net effect is * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ EEND(swi_exit) END(swi_entry) /* * Standard exception exit handler. * * This is used to return from all exceptions except SWI. It uses DO_AST and * PULLFRAMEFROMSVCANDEXIT and can only be called if the exception entry code * used PUSHFRAMEINSVC. * * If the return is to user mode, this uses DO_AST to deliver any pending * signals and/or handle TDF_NEEDRESCHED first. */ ASENTRY_NP(exception_exit) DO_AST /* Handle pending signals. */ PULLFRAMEFROMSVCANDEXIT /* Return. */ UNWINDSVCFRAME /* Special unwinding for exceptions. */ END(exception_exit) /* * Entry point for a Prefetch Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(prefetch_abort_entry) #ifdef __XSCALE__ nop /* Make absolutely sure any pending */ nop /* imprecise aborts have occurred. */ #endif sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ mov r1, #1 /* Type flag */ b _C_LABEL(abort_handler) END(prefetch_abort_entry) /* * Entry point for a Data Abort exception. * * The hardware switches to the abort mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(data_abort_entry) #ifdef __XSCALE__ nop /* Make absolutely sure any pending */ nop /* imprecise aborts have occurred. */ #endif sub lr, lr, #8 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Exception exit routine */ mov r0, sp /* Trapframe to the handler */ mov r1, #0 /* Type flag */ b _C_LABEL(abort_handler) END(data_abort_entry) /* * Entry point for an Undefined Instruction exception. * * The hardware switches to the undefined mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the undefined mode stack). */ ASENTRY_NP(undefined_entry) PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ b undefinedinstruction /* trapframe to the handler. */ END(undefined_entry) /* * Entry point for a normal IRQ. * * The hardware switches to the IRQ mode stack; we switch to svc32 before * calling the handler, then return directly to the original mode/stack * on exit (without transitioning back through the IRQ mode stack). */ ASENTRY_NP(irq_entry) sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ mov r0, sp /* exception exit routine. Pass the */ b _C_LABEL(arm_irq_handler)/* trapframe to the handler. */ END(irq_entry) /* * Entry point for an FIQ interrupt. * * We don't currently support FIQ handlers very much. Something can * install itself in the FIQ vector using code (that may or may not work * these days) in fiq.c. If nobody does that and an FIQ happens, this * default handler just disables FIQs and otherwise ignores it. */ ASENTRY_NP(fiq_entry) mrs r8, cpsr /* FIQ handling isn't supported, */ bic r8, #(PSR_F) /* just disable FIQ and return. */ msr cpsr_c, r8 /* The r8 we trash here is the */ subs pc, lr, #4 /* banked FIQ-mode r8. */ END(fiq_entry) /* * Entry point for an Address Exception exception. * This is an arm26 exception that should never happen. */ ASENTRY_NP(addr_exception_entry) mov r3, lr mrs r2, spsr mrs r1, cpsr adr r0, Laddr_exception_msg b _C_LABEL(panic) Laddr_exception_msg: .asciz "Address Exception CPSR=0x%08x SPSR=0x%08x LR=0x%08x\n" .balign 4 END(addr_exception_entry) /* * Entry point for the system Reset vector. * This should never happen, so panic. */ ASENTRY_NP(reset_entry) mov r1, lr adr r0, Lreset_panicmsg b _C_LABEL(panic) /* NOTREACHED */ Lreset_panicmsg: .asciz "Reset vector called, LR = 0x%08x" .balign 4 END(reset_entry) /* * page0 and page0_data -- An image of the ARM vectors which is copied to * the ARM vectors page (high or low) as part of CPU initialization. The * code that does the copy assumes that page0_data holds one 32-bit word * of data for each of the predefined ARM vectors. It also assumes that * page0_data follows the vectors in page0, but other stuff can appear * between the two. We currently leave room between the two for some fiq * handler code to be copied in. */ .global _C_LABEL(page0), _C_LABEL(page0_data) _C_LABEL(page0): ldr pc, .Lreset_entry ldr pc, .Lundefined_entry ldr pc, .Lswi_entry ldr pc, .Lprefetch_abort_entry ldr pc, .Ldata_abort_entry ldr pc, .Laddr_exception_entry ldr pc, .Lirq_entry .fiqv: ldr pc, .Lfiq_entry .space 256 /* room for some fiq handler code */ _C_LABEL(page0_data): .Lreset_entry: .word reset_entry .Lundefined_entry: .word undefined_entry .Lswi_entry: .word swi_entry .Lprefetch_abort_entry: .word prefetch_abort_entry .Ldata_abort_entry: .word data_abort_entry .Laddr_exception_entry: .word addr_exception_entry .Lirq_entry: .word irq_entry .Lfiq_entry: .word fiq_entry /* * These items are used by the code in fiq.c to install what it calls the * "null" handler. It's actually our default vector entry that just jumps * to the default handler which just disables FIQs and returns. */ .global _C_LABEL(fiq_nullhandler_code), _C_LABEL(fiq_nullhandler_size) _C_LABEL(fiq_nullhandler_code): .word .fiqv _C_LABEL(fiq_nullhandler_size): .word 4 Index: head/sys/arm/arm/genassym.c =================================================================== --- head/sys/arm/arm/genassym.c (revision 284114) +++ head/sys/arm/arm/genassym.c (revision 284115) @@ -1,164 +1,166 @@ /*- * Copyright (c) 2004 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include + +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ASSYM(KERNBASE, KERNBASE); ASSYM(PCB_NOALIGNFLT, PCB_NOALIGNFLT); #ifdef ARM_NEW_PMAP ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); #endif ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); #ifndef ARM_NEW_PMAP ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr)); #endif ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); #ifndef ARM_NEW_PMAP ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec)); ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec)); #endif ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); ASSYM(PCB_R7, offsetof(struct pcb, pcb_regs.sf_r7)); ASSYM(PCB_R8, offsetof(struct pcb, pcb_regs.sf_r8)); ASSYM(PCB_R9, offsetof(struct pcb, pcb_regs.sf_r9)); ASSYM(PCB_R10, offsetof(struct pcb, pcb_regs.sf_r10)); ASSYM(PCB_R11, offsetof(struct pcb, pcb_regs.sf_r11)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_regs.sf_r12)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_regs.sf_sp)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_regs.sf_lr)); ASSYM(PCB_PC, offsetof(struct pcb, pcb_regs.sf_pc)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(M_LEN, offsetof(struct mbuf, m_len)); ASSYM(M_DATA, offsetof(struct mbuf, m_data)); ASSYM(M_NEXT, offsetof(struct mbuf, m_next)); ASSYM(IP_SRC, offsetof(struct ip, ip_src)); ASSYM(IP_DST, offsetof(struct ip, ip_dst)); ASSYM(CF_SETTTB, offsetof(struct cpu_functions, cf_setttb)); ASSYM(CF_CONTROL, offsetof(struct cpu_functions, cf_control)); ASSYM(CF_CONTEXT_SWITCH, offsetof(struct cpu_functions, cf_context_switch)); ASSYM(CF_DCACHE_WB_RANGE, offsetof(struct cpu_functions, cf_dcache_wb_range)); ASSYM(CF_L2CACHE_WB_RANGE, offsetof(struct cpu_functions, cf_l2cache_wb_range)); ASSYM(CF_IDCACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_idcache_wbinv_all)); ASSYM(CF_L2CACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_l2cache_wbinv_all)); ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE)); ASSYM(CF_ICACHE_SYNC, offsetof(struct cpu_functions, cf_icache_sync_all)); ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); ASSYM(V_SOFT, offsetof(struct vmmeter, v_soft)); ASSYM(V_INTR, offsetof(struct vmmeter, v_intr)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(MD_TP, offsetof(struct mdthread, md_tp)); ASSYM(MD_RAS_START, offsetof(struct mdthread, md_ras_start)); ASSYM(MD_RAS_END, offsetof(struct mdthread, md_ras_end)); ASSYM(TF_R0, offsetof(struct trapframe, tf_r0)); ASSYM(TF_R1, offsetof(struct trapframe, tf_r1)); ASSYM(TF_PC, offsetof(struct trapframe, tf_pc)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(P_FLAG, offsetof(struct proc, p_flag)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH < 6 ASSYM(ARM_TP_ADDRESS, ARM_TP_ADDRESS); ASSYM(ARM_RAS_START, ARM_RAS_START); ASSYM(ARM_RAS_END, ARM_RAS_END); #endif #ifdef VFP ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate)); ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); #endif ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL); #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); #endif ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(P_TRACED, P_TRACED); ASSYM(P_SIGEVENT, P_SIGEVENT); ASSYM(P_PROFIL, P_PROFIL); ASSYM(TRAPFRAMESIZE, sizeof(struct trapframe)); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(NIRQ, NIRQ); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); Index: head/sys/arm/arm/swtch.S =================================================================== --- head/sys/arm/arm/swtch.S (revision 284114) +++ head/sys/arm/arm/swtch.S (revision 284115) @@ -1,821 +1,821 @@ /* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ /*- * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpuswitch.S * * cpu switching functions * * Created : 15/10/94 * */ #include "assym.s" #include "opt_sched.h" #include #include #include #include #include __FBSDID("$FreeBSD$"); #if __ARM_ARCH >= 6 && defined(SMP) #define GET_PCPU(tmp, tmp2) \ mrc p15, 0, tmp, c0, c0, 5; \ and tmp, tmp, #0xf; \ ldr tmp2, .Lcurpcpu+4; \ mul tmp, tmp, tmp2; \ ldr tmp2, .Lcurpcpu; \ add tmp, tmp, tmp2; #else #define GET_PCPU(tmp, tmp2) \ ldr tmp, .Lcurpcpu #endif #ifdef VFP .fpu vfp /* allow VFP instructions */ #endif .Lcurpcpu: .word _C_LABEL(__pcpu) .word PCPU_SIZE .Lblocked_lock: .word _C_LABEL(blocked_lock) #ifndef ARM_NEW_PMAP #define DOMAIN_CLIENT 0x01 .Lcpufuncs: .word _C_LABEL(cpufuncs) /* * cpu_throw(oldtd, newtd) * * Remove current thread state, then select the next thread to run * and load its state. * r0 = oldtd * r1 = newtd */ ENTRY(cpu_throw) mov r5, r1 /* * r0 = oldtd * r5 = newtd */ #ifdef VFP /* This thread is dying, disable */ bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ #endif GET_PCPU(r7, r9) ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */ /* Switch to lwp0 context */ ldr r9, .Lcpufuncs #if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT) mov lr, pc ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] #endif ldr r0, [r7, #(PCB_PL1VEC)] ldr r1, [r7, #(PCB_DACR)] /* * r0 = Pointer to L1 slot for vector_page (or NULL) * r1 = lwp0's DACR * r5 = lwp0 * r7 = lwp0's PCB * r9 = cpufuncs */ /* * Ensure the vector table is accessible by fixing up lwp0's L1 */ cmp r0, #0 /* No need to fixup vector table? */ ldrne r3, [r0] /* But if yes, fetch current value */ ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */ mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */ cmpne r3, r2 /* Stuffing the same value? */ strne r2, [r0] /* Store if not. */ #ifdef PMAP_INCLUDE_PTE_SYNC /* * Need to sync the cache to make sure that last store is * visible to the MMU. */ movne r1, #4 movne lr, pc ldrne pc, [r9, #CF_DCACHE_WB_RANGE] #endif /* PMAP_INCLUDE_PTE_SYNC */ /* * Note: We don't do the same optimisation as cpu_switch() with * respect to avoiding flushing the TLB if we're switching to * the same L1 since this process' VM space may be about to go * away, so we don't want *any* turds left in the TLB. */ /* Switch the memory to the new process */ ldr r0, [r7, #(PCB_PAGEDIR)] mov lr, pc ldr pc, [r9, #CF_CONTEXT_SWITCH] GET_PCPU(r6, r4) /* Hook in a new pcb */ str r7, [r6, #PC_CURPCB] /* We have a new curthread now so make a note it */ str r5, [r6, #PC_CURTHREAD] -#ifndef ARM_TP_ADDRESS +#if __ARM_ARCH >= 6 mcr p15, 0, r5, c13, c0, 4 #endif /* Set the new tp */ ldr r6, [r5, #(TD_MD + MD_TP)] -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH >= 6 + mcr p15, 0, r6, c13, c0, 3 +#else ldr r4, =ARM_TP_ADDRESS str r6, [r4] ldr r6, [r5, #(TD_MD + MD_RAS_START)] str r6, [r4, #4] /* ARM_RAS_START */ ldr r6, [r5, #(TD_MD + MD_RAS_END)] str r6, [r4, #8] /* ARM_RAS_END */ -#else - mcr p15, 0, r6, c13, c0, 3 #endif /* Restore all the saved registers and exit */ add r3, r7, #PCB_R4 ldmia r3, {r4-r12, sp, pc} END(cpu_throw) /* * cpu_switch(oldtd, newtd, lock) * * Save the current thread state, then select the next thread to run * and load its state. * r0 = oldtd * r1 = newtd * r2 = lock (new lock for old thread) */ ENTRY(cpu_switch) /* Interrupts are disabled. */ /* Save all the registers in the old thread's pcb. */ ldr r3, [r0, #(TD_PCB)] /* Restore all the saved registers and exit */ add r3, #(PCB_R4) stmia r3, {r4-r12, sp, lr, pc} mov r6, r2 /* Save the mutex */ /* rem: r0 = old lwp */ /* rem: interrupts are disabled */ /* Process is now on a processor. */ /* We have a new curthread now so make a note it */ GET_PCPU(r7, r2) str r1, [r7, #PC_CURTHREAD] -#ifndef ARM_TP_ADDRESS +#if __ARM_ARCH >= 6 mcr p15, 0, r1, c13, c0, 4 #endif /* Hook in a new pcb */ ldr r2, [r1, #TD_PCB] str r2, [r7, #PC_CURPCB] /* Stage two : Save old context */ /* Get the user structure for the old thread. */ ldr r2, [r0, #(TD_PCB)] mov r4, r0 /* Save the old thread. */ -#ifdef ARM_TP_ADDRESS +#if __ARM_ARCH >= 6 + /* + * Set new tp. No need to store the old one first, userland can't + * change it directly on armv6. + */ + ldr r9, [r1, #(TD_MD + MD_TP)] + mcr p15, 0, r9, c13, c0, 3 +#else /* Store the old tp; userland can change it on armv4. */ ldr r3, =ARM_TP_ADDRESS ldr r9, [r3] str r9, [r0, #(TD_MD + MD_TP)] ldr r9, [r3, #4] str r9, [r0, #(TD_MD + MD_RAS_START)] ldr r9, [r3, #8] str r9, [r0, #(TD_MD + MD_RAS_END)] /* Set the new tp */ ldr r9, [r1, #(TD_MD + MD_TP)] str r9, [r3] ldr r9, [r1, #(TD_MD + MD_RAS_START)] str r9, [r3, #4] ldr r9, [r1, #(TD_MD + MD_RAS_END)] str r9, [r3, #8] -#else - /* - * Set new tp. No need to store the old one first, userland can't - * change it directly on armv6. - */ - ldr r9, [r1, #(TD_MD + MD_TP)] - mcr p15, 0, r9, c13, c0, 3 #endif /* Get the user structure for the new process in r9 */ ldr r9, [r1, #(TD_PCB)] /* rem: r2 = old PCB */ /* rem: r9 = new PCB */ /* rem: interrupts are enabled */ #ifdef VFP fmrx r0, fpexc /* If the VFP is enabled */ tst r0, #(VFPEXC_EN) /* the current thread has */ movne r1, #1 /* used it, so go save */ addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */ blne _C_LABEL(vfp_store) /* and disable the VFP. */ #endif /* r0-r3 now free! */ /* Third phase : restore saved context */ /* rem: r2 = old PCB */ /* rem: r9 = new PCB */ ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */ mov r2, #DOMAIN_CLIENT cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ beq .Lcs_context_switched /* Yup. Don't flush cache */ mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */ /* * Get the new L1 table pointer into r11. If we're switching to * an LWP with the same address space as the outgoing one, we can * skip the cache purge and the TTB load. * * To avoid data dep stalls that would happen anyway, we try * and get some useful work done in the mean time. */ mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ teq r10, r11 /* Same L1? */ cmpeq r0, r5 /* Same DACR? */ beq .Lcs_context_switched /* yes! */ #if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT) /* * Definately need to flush the cache. */ ldr r1, .Lcpufuncs mov lr, pc ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] #endif .Lcs_cache_purge_skipped: /* rem: r6 = lock */ /* rem: r9 = new PCB */ /* rem: r10 = old L1 */ /* rem: r11 = new L1 */ mov r2, #0x00000000 ldr r7, [r9, #(PCB_PL1VEC)] /* * Ensure the vector table is accessible by fixing up the L1 */ cmp r7, #0 /* No need to fixup vector table? */ ldrne r2, [r7] /* But if yes, fetch current value */ ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */ mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */ cmpne r2, r0 /* Stuffing the same value? */ #ifndef PMAP_INCLUDE_PTE_SYNC strne r0, [r7] /* Nope, update it */ #else beq .Lcs_same_vector str r0, [r7] /* Otherwise, update it */ /* * Need to sync the cache to make sure that last store is * visible to the MMU. */ ldr r2, .Lcpufuncs mov r0, r7 mov r1, #4 mov lr, pc ldr pc, [r2, #CF_DCACHE_WB_RANGE] .Lcs_same_vector: #endif /* PMAP_INCLUDE_PTE_SYNC */ cmp r10, r11 /* Switching to the same L1? */ ldr r10, .Lcpufuncs beq .Lcs_same_l1 /* Yup. */ /* * Do a full context switch, including full TLB flush. */ mov r0, r11 mov lr, pc ldr pc, [r10, #CF_CONTEXT_SWITCH] b .Lcs_context_switched /* * We're switching to a different process in the same L1. * In this situation, we only need to flush the TLB for the * vector_page mapping, and even then only if r7 is non-NULL. */ .Lcs_same_l1: cmp r7, #0 movne r0, #0 /* We *know* vector_page's VA is 0x0 */ movne lr, pc ldrne pc, [r10, #CF_TLB_FLUSHID_SE] .Lcs_context_switched: /* Release the old thread */ str r6, [r4, #TD_LOCK] #if defined(SCHED_ULE) && defined(SMP) ldr r6, .Lblocked_lock GET_CURTHREAD_PTR(r3) 1: ldr r4, [r3, #TD_LOCK] cmp r4, r6 beq 1b #endif /* XXXSCW: Safe to re-enable FIQs here */ /* rem: r9 = new PCB */ /* Restore all the saved registers and exit */ add r3, r9, #PCB_R4 ldmia r3, {r4-r12, sp, pc} END(cpu_switch) #else /* !ARM_NEW_PMAP */ #include ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */ DSB mcr CP15_TTBR0(r0) /* set the new TTB */ ISB mov r0, #(CPU_ASID_KERNEL) mcr CP15_TLBIASID(r0) /* flush not global TLBs */ /* * Flush entire Branch Target Cache because of the branch predictor * is not architecturally invisible. See ARM Architecture Reference * Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch * predictors and Requirements for branch predictor maintenance * operations sections. * * QQQ: The predictor is virtually addressed and holds virtual target * addresses. Therefore, if mapping is changed, the predictor cache * must be flushed.The flush is part of entire i-cache invalidation * what is always called when code mapping is changed. So herein, * it's the only place where standalone predictor flush must be * executed in kernel (except self modifying code case). */ mcr CP15_BPIALL /* and flush entire Branch Target Cache */ DSB mov pc, lr END(cpu_context_switch) /* * cpu_throw(oldtd, newtd) * * Remove current thread state, then select the next thread to run * and load its state. * r0 = oldtd * r1 = newtd */ ENTRY(cpu_throw) mov r10, r0 /* r10 = oldtd */ mov r11, r1 /* r11 = newtd */ #ifdef VFP /* This thread is dying, disable */ bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ #endif GET_PCPU(r8, r9) /* r8 = current pcpu */ ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */ cmp r10, #0 /* old thread? */ beq 2f /* no, skip */ /* Remove this CPU from the active list. */ ldr r5, [r8, #PC_CURPMAP] mov r0, #(PM_ACTIVE) add r5, r0 /* r5 = old pm_active */ /* Compute position and mask. */ #if _NCPUWORDS > 1 lsr r0, r4, #3 bic r0, #3 add r5, r0 /* r5 = position in old pm_active */ mov r2, #1 and r0, r4, #31 lsl r2, r0 /* r2 = mask */ #else mov r2, #1 lsl r2, r4 /* r2 = mask */ #endif /* Clear cpu from old active list. */ #ifdef SMP 1: ldrex r0, [r5] bic r0, r2 strex r1, r0, [r5] teq r1, #0 bne 1b #else ldr r0, [r5] bic r0, r2 str r0, [r5] #endif 2: #ifdef INVARIANTS cmp r11, #0 /* new thread? */ beq badsw1 /* no, panic */ #endif ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */ /* * Registers at this point * r4 = current cpu id * r7 = new PCB * r8 = current pcpu * r11 = newtd */ /* MMU switch to new thread. */ ldr r0, [r7, #(PCB_PAGEDIR)] #ifdef INVARIANTS cmp r0, #0 /* new thread? */ beq badsw4 /* no, panic */ #endif bl _C_LABEL(cpu_context_switch) /* * Set new PMAP as current one. * Insert cpu to new active list. */ ldr r6, [r11, #(TD_PROC)] /* newtd->proc */ ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */ add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ str r6, [r8, #PC_CURPMAP] /* store to curpmap */ mov r0, #PM_ACTIVE add r6, r0 /* r6 = new pm_active */ /* compute position and mask */ #if _NCPUWORDS > 1 lsr r0, r4, #3 bic r0, #3 add r6, r0 /* r6 = position in new pm_active */ mov r2, #1 and r0, r4, #31 lsl r2, r0 /* r2 = mask */ #else mov r2, #1 lsl r2, r4 /* r2 = mask */ #endif /* Set cpu to new active list. */ #ifdef SMP 1: ldrex r0, [r6] orr r0, r2 strex r1, r0, [r6] teq r1, #0 bne 1b #else ldr r0, [r6] orr r0, r2 str r0, [r6] #endif /* * Registers at this point. * r7 = new PCB * r8 = current pcpu * r11 = newtd * They must match the ones in sw1 position !!! */ DMB b sw1 /* share new thread init with cpu_switch() */ END(cpu_throw) /* * cpu_switch(oldtd, newtd, lock) * * Save the current thread state, then select the next thread to run * and load its state. * r0 = oldtd * r1 = newtd * r2 = lock (new lock for old thread) */ ENTRY(cpu_switch) /* Interrupts are disabled. */ #ifdef INVARIANTS cmp r0, #0 /* old thread? */ beq badsw2 /* no, panic */ #endif /* Save all the registers in the old thread's pcb. */ ldr r3, [r0, #(TD_PCB)] add r3, #(PCB_R4) stmia r3, {r4-r12, sp, lr, pc} #ifdef INVARIANTS cmp r1, #0 /* new thread? */ beq badsw3 /* no, panic */ #endif /* * Save arguments. Note that we can now use r0-r14 until * it is time to restore them for the new thread. However, * some registers are not safe over function call. */ mov r9, r2 /* r9 = lock */ mov r10, r0 /* r10 = oldtd */ mov r11, r1 /* r11 = newtd */ GET_PCPU(r8, r3) /* r8 = current PCPU */ ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */ #ifdef VFP ldr r3, [r10, #(TD_PCB)] fmrx r0, fpexc /* If the VFP is enabled */ tst r0, #(VFPEXC_EN) /* the current thread has */ movne r1, #1 /* used it, so go save */ addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */ blne _C_LABEL(vfp_store) /* and disable the VFP. */ #endif /* * MMU switch. If we're switching to a thread with the same * address space as the outgoing one, we can skip the MMU switch. */ mrc CP15_TTBR0(r1) /* r1 = old TTB */ ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */ cmp r0, r1 /* Switching to the TTB? */ beq sw0 /* same TTB, skip */ #ifdef INVARIANTS cmp r0, #0 /* new thread? */ beq badsw4 /* no, panic */ #endif bl cpu_context_switch /* new TTB as argument */ /* * Registers at this point * r7 = new PCB * r8 = current pcpu * r9 = lock * r10 = oldtd * r11 = newtd */ /* * Set new PMAP as current one. * Update active list on PMAPs. */ ldr r6, [r11, #TD_PROC] /* newtd->proc */ ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */ add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */ str r6, [r8, #PC_CURPMAP] /* and save new one */ mov r0, #PM_ACTIVE add r5, r0 /* r5 = old pm_active */ add r6, r0 /* r6 = new pm_active */ /* Compute position and mask. */ ldr r4, [r8, #PC_CPUID] #if _NCPUWORDS > 1 lsr r0, r4, #3 bic r0, #3 add r5, r0 /* r5 = position in old pm_active */ add r6, r0 /* r6 = position in new pm_active */ mov r2, #1 and r0, r4, #31 lsl r2, r0 /* r2 = mask */ #else mov r2, #1 lsl r2, r4 /* r2 = mask */ #endif /* Clear cpu from old active list. */ #ifdef SMP 1: ldrex r0, [r5] bic r0, r2 strex r1, r0, [r5] teq r1, #0 bne 1b #else ldr r0, [r5] bic r0, r2 str r0, [r5] #endif /* Set cpu to new active list. */ #ifdef SMP 1: ldrex r0, [r6] orr r0, r2 strex r1, r0, [r6] teq r1, #0 bne 1b #else ldr r0, [r6] orr r0, r2 str r0, [r6] #endif sw0: /* * Registers at this point * r7 = new PCB * r8 = current pcpu * r9 = lock * r10 = oldtd * r11 = newtd */ /* Change the old thread lock. */ add r5, r10, #TD_LOCK DMB 1: ldrex r0, [r5] strex r1, r9, [r5] teq r1, #0 bne 1b DMB sw1: clrex /* * Registers at this point * r7 = new PCB * r8 = current pcpu * r11 = newtd */ #if defined(SMP) && defined(SCHED_ULE) /* * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE * QQQ: What does it mean in reality and why is it done? */ ldr r6, =blocked_lock 1: ldr r3, [r11, #TD_LOCK] /* atomic write regular read */ cmp r3, r6 beq 1b #endif /* Set the new tls */ ldr r0, [r11, #(TD_MD + MD_TP)] mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */ /* We have a new curthread now so make a note it */ str r11, [r8, #PC_CURTHREAD] mcr CP15_TPIDRPRW(r11) /* store pcb in per cpu structure */ str r7, [r8, #PC_CURPCB] /* * Restore all saved registers and return. Note that some saved * registers can be changed when either cpu_fork(), cpu_set_upcall(), * cpu_set_fork_handler(), or makectx() was called. */ add r3, r7, #PCB_R4 ldmia r3, {r4-r12, sp, pc} #ifdef INVARIANTS badsw1: ldr r0, =sw1_panic_str bl _C_LABEL(panic) 1: nop b 1b badsw2: ldr r0, =sw2_panic_str bl _C_LABEL(panic) 1: nop b 1b badsw3: ldr r0, =sw3_panic_str bl _C_LABEL(panic) 1: nop b 1b badsw4: ldr r0, =sw4_panic_str bl _C_LABEL(panic) 1: nop b 1b sw1_panic_str: .asciz "cpu_throw: no newthread supplied.\n" sw2_panic_str: .asciz "cpu_switch: no curthread supplied.\n" sw3_panic_str: .asciz "cpu_switch: no newthread supplied.\n" sw4_panic_str: .asciz "cpu_switch: new pagedir is NULL.\n" #endif END(cpu_switch) #endif /* !ARM_NEW_PMAP */ ENTRY(savectx) stmfd sp!, {lr} sub sp, sp, #4 /* Store all the registers in the thread's pcb */ add r3, r0, #(PCB_R4) stmia r3, {r4-r12, sp, lr, pc} #ifdef VFP fmrx r2, fpexc /* If the VFP is enabled */ tst r2, #(VFPEXC_EN) /* the current thread has */ movne r1, #1 /* used it, so go save */ addne r0, r0, #(PCB_VFPSTATE) /* the state into the PCB */ blne _C_LABEL(vfp_store) /* and disable the VFP. */ #endif add sp, sp, #4; ldmfd sp!, {pc} END(savectx) ENTRY(fork_trampoline) STOP_UNWINDING /* EABI: Don't unwind beyond the thread enty point. */ mov fp, #0 /* OABI: Stack traceback via fp stops here. */ mov r2, sp mov r1, r5 mov r0, r4 ldr lr, =swi_exit /* Go finish forking, then return */ b _C_LABEL(fork_exit) /* to userland via swi_exit code. */ END(fork_trampoline) Index: head/sys/arm/arm/sys_machdep.c =================================================================== --- head/sys/arm/arm/sys_machdep.c (revision 284114) +++ head/sys/arm/arm/sys_machdep.c (revision 284115) @@ -1,234 +1,235 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include #include #include #include #include #include #include #include #include +#include #include #include #include #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { int op; char *parms; }; #endif /* Prototypes */ static int arm32_sync_icache (struct thread *, void *); static int arm32_drain_writebuf(struct thread *, void *); #if __ARM_ARCH >= 6 static int sync_icache(uintptr_t addr, size_t len) { size_t size; vm_offset_t rv; /* * Align starting address to even number because value of "1" * is used as return value for success. */ len += addr & 1; addr &= ~1; /* Break whole range to pages. */ do { size = PAGE_SIZE - (addr & PAGE_MASK); size = min(size, len); rv = dcache_wb_pou_checked(addr, size); if (rv == 1) /* see dcache_wb_pou_checked() */ rv = icache_inv_pou_checked(addr, size); if (rv != 1) { if (!useracc((void *)addr, size, VM_PROT_READ)) { /* Invalid access */ return (rv); } /* Valid but unmapped page - skip it. */ } len -= size; addr += size; } while (len > 0); /* Invalidate branch predictor buffer. */ bpb_inv_all(); return (1); } #endif static int arm32_sync_icache(struct thread *td, void *args) { struct arm_sync_icache_args ua; int error; ksiginfo_t ksi; #if __ARM_ARCH >= 6 vm_offset_t rv; #endif if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); if (ua.len == 0) { td->td_retval[0] = 0; return (0); } /* * Validate arguments. Address and length are unsigned, * so we can use wrapped overflow check. */ if (((ua.addr + ua.len) < ua.addr) || ((ua.addr + ua.len) > VM_MAXUSER_ADDRESS)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (void *)max(ua.addr, VM_MAXUSER_ADDRESS); trapsignal(td, &ksi); return (EINVAL); } #if __ARM_ARCH >= 6 rv = sync_icache(ua.addr, ua.len); if (rv != 1) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; ksi.ksi_addr = (void *)rv; trapsignal(td, &ksi); return (EINVAL); } #else cpu_icache_sync_range(ua.addr, ua.len); #endif td->td_retval[0] = 0; return (0); } static int arm32_drain_writebuf(struct thread *td, void *args) { /* No args. */ td->td_retval[0] = 0; cpu_drain_writebuf(); return (0); } static int arm32_set_tp(struct thread *td, void *args) { td->td_md.md_tp = (register_t)args; -#ifndef ARM_TP_ADDRESS +#if __ARM_ARCH >= 6 set_tls(args); #else *(register_t *)ARM_TP_ADDRESS = (register_t)args; #endif return (0); } static int arm32_get_tp(struct thread *td, void *args) { -#ifndef ARM_TP_ADDRESS +#if __ARM_ARCH >= 6 td->td_retval[0] = td->td_md.md_tp; #else td->td_retval[0] = *(register_t *)ARM_TP_ADDRESS; #endif return (0); } int sysarch(td, uap) struct thread *td; register struct sysarch_args *uap; { int error; #ifdef CAPABILITY_MODE /* * When adding new operations, add a new case statement here to * explicitly indicate whether or not the operation is safe to * perform in capability mode. */ if (IN_CAPABILITY_MODE(td)) { switch (uap->op) { case ARM_SYNC_ICACHE: case ARM_DRAIN_WRITEBUF: case ARM_SET_TP: case ARM_GET_TP: break; default: #ifdef KTRACE if (KTRPOINT(td, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL); #endif return (ECAPMODE); } } #endif switch (uap->op) { case ARM_SYNC_ICACHE: error = arm32_sync_icache(td, uap->parms); break; case ARM_DRAIN_WRITEBUF: error = arm32_drain_writebuf(td, uap->parms); break; case ARM_SET_TP: error = arm32_set_tp(td, uap->parms); break; case ARM_GET_TP: error = arm32_get_tp(td, uap->parms); break; default: error = EINVAL; break; } return (error); } Index: head/sys/arm/arm/vm_machdep.c =================================================================== --- head/sys/arm/arm/vm_machdep.c (revision 284114) +++ head/sys/arm/arm/vm_machdep.c (revision 284115) @@ -1,348 +1,350 @@ /*- * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary :forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include + +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * struct switchframe and trapframe must both be a multiple of 8 * for correct stack alignment. */ CTASSERT(sizeof(struct switchframe) == 48); CTASSERT(sizeof(struct trapframe) == 80); /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(register struct thread *td1, register struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; struct mdproc *mdp2; if ((flags & RFPROC) == 0) return; /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *) (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; #ifdef __XSCALE__ #ifndef CPU_XSCALE_CORE3 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); #endif #endif td2->td_pcb = pcb2; /* Clone td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point to mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); /* Point the frame to the stack in front of pcb and copy td1's frame */ td2->td_frame = (struct trapframe *)pcb2 - 1; *td2->td_frame = *td1->td_frame; /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2); pcb2->pcb_regs.sf_r4 = (register_t)fork_return; pcb2->pcb_regs.sf_r5 = (register_t)td2; pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline; pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame); pcb2->pcb_vfpcpu = -1; pcb2->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ; tf = td2->td_frame; tf->tf_spsr &= ~PSR_C; tf->tf_r0 = 0; tf->tf_r1 = 0; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_cspr = PSR_SVC32_MODE;; -#ifdef ARM_TP_ADDRESS - td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS; -#else +#if __ARM_ARCH >= 6 td2->td_md.md_tp = td1->td_md.md_tp; +#else + td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS; #endif } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; int fixup; #ifdef __ARMEB__ u_int call; #endif frame = td->td_frame; fixup = 0; #ifdef __ARMEB__ /* * __syscall returns an off_t while most other syscalls return an * int. As an off_t is 64-bits and an int is 32-bits we need to * place the returned data into r1. As the lseek and frerebsd6_lseek * syscalls also return an off_t they do not need this fixup. */ call = frame->tf_r7; if (call == SYS___syscall) { register_t *ap = &frame->tf_r0; register_t code = ap[_QUAD_LOWWORD]; if (td->td_proc->p_sysent->sv_mask) code &= td->td_proc->p_sysent->sv_mask; fixup = (code != SYS_freebsd6_lseek && code != SYS_lseek) ? 1 : 0; } #endif switch (error) { case 0: if (fixup) { frame->tf_r0 = 0; frame->tf_r1 = td->td_retval[0]; } else { frame->tf_r0 = td->td_retval[0]; frame->tf_r1 = td->td_retval[1]; } frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: /* * Reconstruct the pc to point at the swi. */ #if __ARM_ARCH >= 7 if ((frame->tf_spsr & PSR_T) != 0) frame->tf_pc -= THUMB_INSN_SIZE; else #endif frame->tf_pc -= INSN_SIZE; break; case EJUSTRETURN: /* nothing to do */ break; default: frame->tf_r0 = error; frame->tf_spsr |= PSR_C; /* carry bit */ break; } } /* * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) * Address and stack, along with those from upcals that are from other sources * such as those generated in thread_userret() itself. */ void cpu_set_upcall(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return; td->td_pcb->pcb_regs.sf_r5 = (register_t)td; td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline; td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame); td->td_frame->tf_spsr &= ~PSR_C; td->td_frame->tf_r0 = 0; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = PSR_SVC32_MODE; } /* * Set that machine state for performing an upcall that has to * be done in thread_userret() so that those upcalls generated * in thread_userret() itself can be done as well. */ void cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size); tf->tf_pc = (int)entry; tf->tf_r0 = (int)arg; tf->tf_spsr = PSR_USR32_MODE; } int cpu_set_user_tls(struct thread *td, void *tls_base) { td->td_md.md_tp = (register_t)tls_base; if (td == curthread) { critical_enter(); -#ifdef ARM_TP_ADDRESS - *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base; -#else +#if __ARM_ARCH >= 6 set_tls(tls_base); +#else + *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base; #endif critical_exit(); } return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; /* * Ensure td_frame is aligned to an 8 byte boundary as it will be * placed into the stack pointer which must be 8 byte aligned in * the ARM EABI. */ td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1; #ifdef __XSCALE__ #ifndef CPU_XSCALE_CORE3 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); #endif #endif } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_regs.sf_r4 = (register_t)func; /* function */ td->td_pcb->pcb_regs.sf_r5 = (register_t)arg; /* first arg */ } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending) busdma_swi(); } void cpu_exit(struct thread *td) { }