Index: head/sys/arm/arm/genassym.c =================================================================== --- head/sys/arm/arm/genassym.c (revision 306161) +++ head/sys/arm/arm/genassym.c (revision 306162) @@ -1,156 +1,159 @@ /*- * Copyright (c) 2004 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ASSYM(KERNBASE, KERNBASE); #if __ARM_ARCH >= 6 ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); #endif ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); #if __ARM_ARCH < 6 ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr)); #endif ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); #if __ARM_ARCH < 6 ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec)); ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec)); #endif ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); ASSYM(PCB_R7, offsetof(struct pcb, pcb_regs.sf_r7)); ASSYM(PCB_R8, offsetof(struct pcb, pcb_regs.sf_r8)); ASSYM(PCB_R9, offsetof(struct pcb, pcb_regs.sf_r9)); ASSYM(PCB_R10, offsetof(struct pcb, pcb_regs.sf_r10)); ASSYM(PCB_R11, offsetof(struct pcb, pcb_regs.sf_r11)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_regs.sf_r12)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_regs.sf_sp)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_regs.sf_lr)); ASSYM(PCB_PC, offsetof(struct pcb, pcb_regs.sf_pc)); +#if __ARM_ARCH >= 6 +ASSYM(PCB_TPIDRURW, offsetof(struct pcb, pcb_regs.sf_tpidrurw)); +#endif ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(M_LEN, offsetof(struct mbuf, m_len)); ASSYM(M_DATA, offsetof(struct mbuf, m_data)); ASSYM(M_NEXT, offsetof(struct mbuf, m_next)); ASSYM(IP_SRC, offsetof(struct ip, ip_src)); ASSYM(IP_DST, offsetof(struct ip, ip_dst)); ASSYM(CF_CONTEXT_SWITCH, offsetof(struct cpu_functions, cf_context_switch)); ASSYM(CF_DCACHE_WB_RANGE, offsetof(struct cpu_functions, cf_dcache_wb_range)); ASSYM(CF_IDCACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_idcache_wbinv_all)); ASSYM(CF_L2CACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_l2cache_wbinv_all)); ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); -ASSYM(MD_TP, offsetof(struct mdthread, md_tp)); #if __ARM_ARCH < 6 +ASSYM(MD_TP, offsetof(struct mdthread, md_tp)); ASSYM(MD_RAS_START, offsetof(struct mdthread, md_ras_start)); ASSYM(MD_RAS_END, offsetof(struct mdthread, md_ras_end)); #endif ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_R0, offsetof(struct trapframe, tf_r0)); ASSYM(TF_R1, offsetof(struct trapframe, tf_r1)); ASSYM(TF_PC, offsetof(struct trapframe, tf_pc)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(P_FLAG, offsetof(struct proc, p_flag)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); #if __ARM_ARCH < 6 ASSYM(ARM_TP_ADDRESS, ARM_TP_ADDRESS); ASSYM(ARM_RAS_START, ARM_RAS_START); ASSYM(ARM_RAS_END, ARM_RAS_END); #endif #ifdef VFP ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate)); #endif #if __ARM_ARCH >= 6 ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); #endif ASSYM(PAGE_SIZE, PAGE_SIZE); #if __ARM_ARCH < 6 ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL); #endif #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); #endif ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(NIRQ, NIRQ); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); Index: head/sys/arm/arm/swtch-v6.S =================================================================== --- head/sys/arm/arm/swtch-v6.S (revision 306161) +++ head/sys/arm/arm/swtch-v6.S (revision 306162) @@ -1,493 +1,499 @@ /* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ /*- * Copyright 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Steve C. Woodford for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpuswitch.S * * cpu switching functions * * Created : 15/10/94 * */ #include "assym.s" #include "opt_sched.h" #include #include #include #include #include __FBSDID("$FreeBSD$"); #if defined(SMP) #define GET_PCPU(tmp, tmp2) \ mrc CP15_MPIDR(tmp); \ and tmp, tmp, #0xf; \ ldr tmp2, .Lcurpcpu+4; \ mul tmp, tmp, tmp2; \ ldr tmp2, .Lcurpcpu; \ add tmp, tmp, tmp2; #else #define GET_PCPU(tmp, tmp2) \ ldr tmp, .Lcurpcpu #endif #ifdef VFP .fpu vfp /* allow VFP instructions */ #endif .Lcurpcpu: .word _C_LABEL(__pcpu) .word PCPU_SIZE .Lblocked_lock: .word _C_LABEL(blocked_lock) ENTRY(cpu_context_switch) DSB /* * We can directly switch between translation tables only when the * size of the mapping for any given virtual address is the same * in the old and new translation tables. * Thus, we must switch to kernel pmap translation table as * intermediate mapping because all sizes of these mappings are same * (or unmapped). The same is true for switch from kernel pmap * translation table to new pmap one. */ mov r2, #(CPU_ASID_KERNEL) ldr r1, =(_C_LABEL(pmap_kern_ttb)) ldr r1, [r1] mcr CP15_TTBR0(r1) /* switch to kernel TTB */ ISB mcr CP15_TLBIASID(r2) /* flush not global TLBs */ DSB mcr CP15_TTBR0(r0) /* switch to new TTB */ ISB /* * We must flush not global TLBs again because PT2MAP mapping * is different. */ mcr CP15_TLBIASID(r2) /* flush not global TLBs */ /* * Flush entire Branch Target Cache because of the branch predictor * is not architecturally invisible. See ARM Architecture Reference * Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch * predictors and Requirements for branch predictor maintenance * operations sections. */ mcr CP15_BPIALL /* flush entire Branch Target Cache */ DSB mov pc, lr END(cpu_context_switch) /* * cpu_throw(oldtd, newtd) * * Remove current thread state, then select the next thread to run * and load its state. * r0 = oldtd * r1 = newtd */ ENTRY(cpu_throw) mov r10, r0 /* r10 = oldtd */ mov r11, r1 /* r11 = newtd */ #ifdef VFP /* This thread is dying, disable */ bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ #endif GET_PCPU(r8, r9) /* r8 = current pcpu */ ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */ cmp r10, #0 /* old thread? */ beq 2f /* no, skip */ /* Remove this CPU from the active list. */ ldr r5, [r8, #PC_CURPMAP] mov r0, #(PM_ACTIVE) add r5, r0 /* r5 = old pm_active */ /* Compute position and mask. */ #if _NCPUWORDS > 1 lsr r0, r4, #3 bic r0, #3 add r5, r0 /* r5 = position in old pm_active */ mov r2, #1 and r0, r4, #31 lsl r2, r0 /* r2 = mask */ #else mov r2, #1 lsl r2, r4 /* r2 = mask */ #endif /* Clear cpu from old active list. */ #ifdef SMP 1: ldrex r0, [r5] bic r0, r2 strex r1, r0, [r5] teq r1, #0 bne 1b #else ldr r0, [r5] bic r0, r2 str r0, [r5] #endif 2: #ifdef INVARIANTS cmp r11, #0 /* new thread? */ beq badsw1 /* no, panic */ #endif ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */ /* * Registers at this point * r4 = current cpu id * r7 = new PCB * r8 = current pcpu * r11 = newtd */ /* MMU switch to new thread. */ ldr r0, [r7, #(PCB_PAGEDIR)] #ifdef INVARIANTS cmp r0, #0 /* new thread? */ beq badsw4 /* no, panic */ #endif bl _C_LABEL(cpu_context_switch) /* * Set new PMAP as current one. * Insert cpu to new active list. */ ldr r6, [r11, #(TD_PROC)] /* newtd->proc */ ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */ add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ str r6, [r8, #PC_CURPMAP] /* store to curpmap */ mov r0, #PM_ACTIVE add r6, r0 /* r6 = new pm_active */ /* compute position and mask */ #if _NCPUWORDS > 1 lsr r0, r4, #3 bic r0, #3 add r6, r0 /* r6 = position in new pm_active */ mov r2, #1 and r0, r4, #31 lsl r2, r0 /* r2 = mask */ #else mov r2, #1 lsl r2, r4 /* r2 = mask */ #endif /* Set cpu to new active list. */ #ifdef SMP 1: ldrex r0, [r6] orr r0, r2 strex r1, r0, [r6] teq r1, #0 bne 1b #else ldr r0, [r6] orr r0, r2 str r0, [r6] #endif /* * Registers at this point. * r7 = new PCB * r8 = current pcpu * r11 = newtd * They must match the ones in sw1 position !!! */ DMB b sw1 /* share new thread init with cpu_switch() */ END(cpu_throw) /* * cpu_switch(oldtd, newtd, lock) * * Save the current thread state, then select the next thread to run * and load its state. * r0 = oldtd * r1 = newtd * r2 = lock (new lock for old thread) */ ENTRY(cpu_switch) /* Interrupts are disabled. */ #ifdef INVARIANTS cmp r0, #0 /* old thread? */ beq badsw2 /* no, panic */ #endif /* Save all the registers in the old thread's pcb. */ ldr r3, [r0, #(TD_PCB)] add r3, #(PCB_R4) stmia r3, {r4-r12, sp, lr, pc} + mrc CP15_TPIDRURW(r4) + str r4, [r3, #(PCB_TPIDRURW - PCB_R4)] #ifdef INVARIANTS cmp r1, #0 /* new thread? */ beq badsw3 /* no, panic */ #endif /* * Save arguments. Note that we can now use r0-r14 until * it is time to restore them for the new thread. However, * some registers are not safe over function call. */ mov r9, r2 /* r9 = lock */ mov r10, r0 /* r10 = oldtd */ mov r11, r1 /* r11 = newtd */ GET_PCPU(r8, r3) /* r8 = current PCPU */ ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */ #ifdef VFP ldr r3, [r10, #(TD_PCB)] fmrx r0, fpexc /* If the VFP is enabled */ tst r0, #(VFPEXC_EN) /* the current thread has */ movne r1, #1 /* used it, so go save */ addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */ blne _C_LABEL(vfp_store) /* and disable the VFP. */ #endif /* * MMU switch. If we're switching to a thread with the same * address space as the outgoing one, we can skip the MMU switch. */ mrc CP15_TTBR0(r1) /* r1 = old TTB */ ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */ cmp r0, r1 /* Switching to the TTB? */ beq sw0 /* same TTB, skip */ #ifdef INVARIANTS cmp r0, #0 /* new thread? */ beq badsw4 /* no, panic */ #endif bl cpu_context_switch /* new TTB as argument */ /* * Registers at this point * r7 = new PCB * r8 = current pcpu * r9 = lock * r10 = oldtd * r11 = newtd */ /* * Set new PMAP as current one. * Update active list on PMAPs. */ ldr r6, [r11, #TD_PROC] /* newtd->proc */ ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */ add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */ str r6, [r8, #PC_CURPMAP] /* and save new one */ mov r0, #PM_ACTIVE add r5, r0 /* r5 = old pm_active */ add r6, r0 /* r6 = new pm_active */ /* Compute position and mask. */ ldr r4, [r8, #PC_CPUID] #if _NCPUWORDS > 1 lsr r0, r4, #3 bic r0, #3 add r5, r0 /* r5 = position in old pm_active */ add r6, r0 /* r6 = position in new pm_active */ mov r2, #1 and r0, r4, #31 lsl r2, r0 /* r2 = mask */ #else mov r2, #1 lsl r2, r4 /* r2 = mask */ #endif /* Clear cpu from old active list. */ #ifdef SMP 1: ldrex r0, [r5] bic r0, r2 strex r1, r0, [r5] teq r1, #0 bne 1b #else ldr r0, [r5] bic r0, r2 str r0, [r5] #endif /* Set cpu to new active list. */ #ifdef SMP 1: ldrex r0, [r6] orr r0, r2 strex r1, r0, [r6] teq r1, #0 bne 1b #else ldr r0, [r6] orr r0, r2 str r0, [r6] #endif sw0: /* * Registers at this point * r7 = new PCB * r8 = current pcpu * r9 = lock * r10 = oldtd * r11 = newtd */ /* Change the old thread lock. */ add r5, r10, #TD_LOCK DMB 1: ldrex r0, [r5] strex r1, r9, [r5] teq r1, #0 bne 1b DMB sw1: clrex /* * Registers at this point * r7 = new PCB * r8 = current pcpu * r11 = newtd */ #if defined(SMP) && defined(SCHED_ULE) /* * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE * QQQ: What does it mean in reality and why is it done? */ ldr r6, =blocked_lock 1: ldr r3, [r11, #TD_LOCK] /* atomic write regular read */ cmp r3, r6 beq 1b #endif - /* Set the new tls */ - ldr r0, [r11, #(TD_MD + MD_TP)] - mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */ /* We have a new curthread now so make a note it */ str r11, [r8, #PC_CURTHREAD] mcr CP15_TPIDRPRW(r11) /* store pcb in per cpu structure */ str r7, [r8, #PC_CURPCB] /* * Restore all saved registers and return. Note that some saved * registers can be changed when either cpu_fork(), cpu_copy_thread(), * cpu_fork_kthread_handler(), or makectx() was called. + * + * The value of TPIDRURW is also written into TPIDRURO, as + * userspace still uses TPIDRURO, modifying it through + * sysarch(ARM_SET_TP, addr). */ + ldr r3, [r7, #PCB_TPIDRURW] + mcr CP15_TPIDRURW(r3) /* write tls thread reg 2 */ + mcr CP15_TPIDRURO(r3) /* write tls thread reg 3 */ add r3, r7, #PCB_R4 ldmia r3, {r4-r12, sp, pc} #ifdef INVARIANTS badsw1: ldr r0, =sw1_panic_str bl _C_LABEL(panic) 1: nop b 1b badsw2: ldr r0, =sw2_panic_str bl _C_LABEL(panic) 1: nop b 1b badsw3: ldr r0, =sw3_panic_str bl _C_LABEL(panic) 1: nop b 1b badsw4: ldr r0, =sw4_panic_str bl _C_LABEL(panic) 1: nop b 1b sw1_panic_str: .asciz "cpu_throw: no newthread supplied.\n" sw2_panic_str: .asciz "cpu_switch: no curthread supplied.\n" sw3_panic_str: .asciz "cpu_switch: no newthread supplied.\n" sw4_panic_str: .asciz "cpu_switch: new pagedir is NULL.\n" #endif END(cpu_switch) Index: head/sys/arm/arm/sys_machdep.c =================================================================== --- head/sys/arm/arm/sys_machdep.c (revision 306161) +++ head/sys/arm/arm/sys_machdep.c (revision 306162) @@ -1,239 +1,239 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifndef _SYS_SYSPROTO_H_ struct sysarch_args { int op; char *parms; }; #endif /* Prototypes */ static int arm32_sync_icache (struct thread *, void *); static int arm32_drain_writebuf(struct thread *, void *); #if __ARM_ARCH >= 6 static int sync_icache(uintptr_t addr, size_t len) { size_t size; vm_offset_t rv; /* * Align starting address to even number because value of "1" * is used as return value for success. */ len += addr & 1; addr &= ~1; /* Break whole range to pages. */ do { size = PAGE_SIZE - (addr & PAGE_MASK); size = min(size, len); rv = dcache_wb_pou_checked(addr, size); if (rv == 1) /* see dcache_wb_pou_checked() */ rv = icache_inv_pou_checked(addr, size); if (rv != 1) { if (!useracc((void *)addr, size, VM_PROT_READ)) { /* Invalid access */ return (rv); } /* Valid but unmapped page - skip it. */ } len -= size; addr += size; } while (len > 0); /* Invalidate branch predictor buffer. */ bpb_inv_all(); return (1); } #endif static int arm32_sync_icache(struct thread *td, void *args) { struct arm_sync_icache_args ua; int error; ksiginfo_t ksi; #if __ARM_ARCH >= 6 vm_offset_t rv; #endif if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); if (ua.len == 0) { td->td_retval[0] = 0; return (0); } /* * Validate arguments. Address and length are unsigned, * so we can use wrapped overflow check. */ if (((ua.addr + ua.len) < ua.addr) || ((ua.addr + ua.len) > VM_MAXUSER_ADDRESS)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (void *)max(ua.addr, VM_MAXUSER_ADDRESS); trapsignal(td, &ksi); return (EINVAL); } #if __ARM_ARCH >= 6 rv = sync_icache(ua.addr, ua.len); if (rv != 1) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; ksi.ksi_addr = (void *)rv; trapsignal(td, &ksi); return (EINVAL); } #else cpu_icache_sync_range(ua.addr, ua.len); #endif td->td_retval[0] = 0; return (0); } static int arm32_drain_writebuf(struct thread *td, void *args) { /* No args. */ #if __ARM_ARCH < 6 cpu_drain_writebuf(); #else dsb(); cpu_l2cache_drain_writebuf(); #endif td->td_retval[0] = 0; return (0); } static int arm32_set_tp(struct thread *td, void *args) { - td->td_md.md_tp = (register_t)args; #if __ARM_ARCH >= 6 set_tls(args); #else + td->td_md.md_tp = (register_t)args; *(register_t *)ARM_TP_ADDRESS = (register_t)args; #endif return (0); } static int arm32_get_tp(struct thread *td, void *args) { #if __ARM_ARCH >= 6 - td->td_retval[0] = td->td_md.md_tp; + td->td_retval[0] = (register_t)get_tls(); #else td->td_retval[0] = *(register_t *)ARM_TP_ADDRESS; #endif return (0); } int sysarch(td, uap) struct thread *td; register struct sysarch_args *uap; { int error; #ifdef CAPABILITY_MODE /* * When adding new operations, add a new case statement here to * explicitly indicate whether or not the operation is safe to * perform in capability mode. */ if (IN_CAPABILITY_MODE(td)) { switch (uap->op) { case ARM_SYNC_ICACHE: case ARM_DRAIN_WRITEBUF: case ARM_SET_TP: case ARM_GET_TP: break; default: #ifdef KTRACE if (KTRPOINT(td, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL); #endif return (ECAPMODE); } } #endif switch (uap->op) { case ARM_SYNC_ICACHE: error = arm32_sync_icache(td, uap->parms); break; case ARM_DRAIN_WRITEBUF: error = arm32_drain_writebuf(td, uap->parms); break; case ARM_SET_TP: error = arm32_set_tp(td, uap->parms); break; case ARM_GET_TP: error = arm32_get_tp(td, uap->parms); break; default: error = EINVAL; break; } return (error); } Index: head/sys/arm/arm/vm_machdep.c =================================================================== --- head/sys/arm/arm/vm_machdep.c (revision 306161) +++ head/sys/arm/arm/vm_machdep.c (revision 306162) @@ -1,350 +1,353 @@ /*- * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary :forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include "opt_compat.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * struct switchframe and trapframe must both be a multiple of 8 * for correct stack alignment. */ -CTASSERT(sizeof(struct switchframe) == 48); -CTASSERT(sizeof(struct trapframe) == 80); +_Static_assert((sizeof(struct switchframe) % 8) == 0, "Bad alignment"); +_Static_assert((sizeof(struct trapframe) % 8) == 0, "Bad alignment"); uint32_t initial_fpscr = VFPSCR_DN | VFPSCR_FZ; /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(register struct thread *td1, register struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; struct mdproc *mdp2; if ((flags & RFPROC) == 0) return; /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *) (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; #ifdef __XSCALE__ #ifndef CPU_XSCALE_CORE3 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); #endif #endif td2->td_pcb = pcb2; /* Clone td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point to mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); /* Point the frame to the stack in front of pcb and copy td1's frame */ td2->td_frame = (struct trapframe *)pcb2 - 1; *td2->td_frame = *td1->td_frame; /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2); pcb2->pcb_regs.sf_r4 = (register_t)fork_return; pcb2->pcb_regs.sf_r5 = (register_t)td2; pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline; pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame); +#if __ARM_ARCH >= 6 + pcb2->pcb_regs.sf_tpidrurw = (register_t)get_tls(); +#endif pcb2->pcb_vfpcpu = -1; pcb2->pcb_vfpstate.fpscr = initial_fpscr; tf = td2->td_frame; tf->tf_spsr &= ~PSR_C; tf->tf_r0 = 0; tf->tf_r1 = 0; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_cspr = PSR_SVC32_MODE; -#if __ARM_ARCH >= 6 - td2->td_md.md_tp = td1->td_md.md_tp; -#else +#if __ARM_ARCH < 6 td2->td_md.md_tp = *(register_t *)ARM_TP_ADDRESS; #endif } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; int fixup; #ifdef __ARMEB__ u_int call; #endif frame = td->td_frame; fixup = 0; #ifdef __ARMEB__ /* * __syscall returns an off_t while most other syscalls return an * int. As an off_t is 64-bits and an int is 32-bits we need to * place the returned data into r1. As the lseek and freebsd6_lseek * syscalls also return an off_t they do not need this fixup. */ call = frame->tf_r7; if (call == SYS___syscall) { register_t *ap = &frame->tf_r0; register_t code = ap[_QUAD_LOWWORD]; if (td->td_proc->p_sysent->sv_mask) code &= td->td_proc->p_sysent->sv_mask; fixup = (code != SYS_lseek); } #endif switch (error) { case 0: if (fixup) { frame->tf_r0 = 0; frame->tf_r1 = td->td_retval[0]; } else { frame->tf_r0 = td->td_retval[0]; frame->tf_r1 = td->td_retval[1]; } frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: /* * Reconstruct the pc to point at the swi. */ #if __ARM_ARCH >= 7 if ((frame->tf_spsr & PSR_T) != 0) frame->tf_pc -= THUMB_INSN_SIZE; else #endif frame->tf_pc -= INSN_SIZE; break; case EJUSTRETURN: /* nothing to do */ break; default: frame->tf_r0 = error; frame->tf_spsr |= PSR_C; /* carry bit */ break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return; td->td_pcb->pcb_regs.sf_r5 = (register_t)td; td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline; td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame); td->td_frame->tf_spsr &= ~PSR_C; td->td_frame->tf_r0 = 0; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = PSR_SVC32_MODE; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ void cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size); tf->tf_pc = (int)entry; tf->tf_r0 = (int)arg; tf->tf_spsr = PSR_USR32_MODE; } int cpu_set_user_tls(struct thread *td, void *tls_base) { - td->td_md.md_tp = (register_t)tls_base; - if (td == curthread) { - critical_enter(); #if __ARM_ARCH >= 6 + td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)tls_base; + if (td == curthread) set_tls(tls_base); #else + td->td_md.md_tp = (register_t)tls_base; + if (td == curthread) { + critical_enter(); *(register_t *)ARM_TP_ADDRESS = (register_t)tls_base; -#endif critical_exit(); } +#endif return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; /* * Ensure td_frame is aligned to an 8 byte boundary as it will be * placed into the stack pointer which must be 8 byte aligned in * the ARM EABI. */ td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1; #ifdef __XSCALE__ #ifndef CPU_XSCALE_CORE3 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); #endif #endif } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_regs.sf_r4 = (register_t)func; /* function */ td->td_pcb->pcb_regs.sf_r5 = (register_t)arg; /* first arg */ } /* * Software interrupt handler for queued VM system processing. */ void swi_vm(void *dummy) { if (busdma_swi_pending) busdma_swi(); } void cpu_exit(struct thread *td) { } Index: head/sys/arm/include/frame.h =================================================================== --- head/sys/arm/include/frame.h (revision 306161) +++ head/sys/arm/include/frame.h (revision 306162) @@ -1,135 +1,139 @@ /* $NetBSD: frame.h,v 1.5 2002/10/19 00:10:54 bjh21 Exp $ */ /*- * Copyright (c) 1994-1997 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * frame.h * * Stack frames structures * * Created : 30/09/94 * * $FreeBSD$ * */ #ifndef _MACHINE_FRAME_H_ #define _MACHINE_FRAME_H_ #ifndef _LOCORE #include #include /* * Trap frame. Pushed onto the kernel stack on a trap (synchronous exception). */ struct trapframe { register_t tf_spsr; /* Zero on arm26 */ register_t tf_r0; register_t tf_r1; register_t tf_r2; register_t tf_r3; register_t tf_r4; register_t tf_r5; register_t tf_r6; register_t tf_r7; register_t tf_r8; register_t tf_r9; register_t tf_r10; register_t tf_r11; register_t tf_r12; register_t tf_usr_sp; register_t tf_usr_lr; register_t tf_svc_sp; /* Not used on arm26 */ register_t tf_svc_lr; /* Not used on arm26 */ register_t tf_pc; register_t tf_pad; }; /* Register numbers */ #define tf_r13 tf_usr_sp #define tf_r14 tf_usr_lr #define tf_r15 tf_pc /* * Signal frame. Pushed onto user stack before calling sigcode. * The pointers are used in the trampoline code to locate the ucontext. */ struct sigframe { siginfo_t sf_si; /* actual saved siginfo */ ucontext_t sf_uc; /* actual saved ucontext */ }; /* * Switch frame. * * It is important this is a multiple of 8 bytes so the stack is correctly * aligned when we create new threads. */ struct switchframe { register_t sf_r4; register_t sf_r5; register_t sf_r6; register_t sf_r7; register_t sf_r8; register_t sf_r9; register_t sf_r10; register_t sf_r11; register_t sf_r12; register_t sf_sp; register_t sf_lr; register_t sf_pc; +#if __ARM_ARCH >= 6 + register_t sf_tpidrurw; + register_t sf_spare0; +#endif }; /* * Stack frame. Used during stack traces (db_trace.c) */ struct frame { u_int fr_fp; u_int fr_sp; u_int fr_lr; u_int fr_pc; }; #endif /* !_LOCORE */ #endif /* _MACHINE_FRAME_H_ */ Index: head/sys/arm/include/pcpu.h =================================================================== --- head/sys/arm/include/pcpu.h (revision 306161) +++ head/sys/arm/include/pcpu.h (revision 306162) @@ -1,132 +1,141 @@ /*- * Copyright (c) 1999 Luoqi Chen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27 * $FreeBSD$ */ #ifndef _MACHINE_PCPU_H_ #define _MACHINE_PCPU_H_ #ifdef _KERNEL #include #define ALT_STACK_SIZE 128 struct vmspace; #endif /* _KERNEL */ #if __ARM_ARCH >= 6 #define PCPU_MD_FIELDS \ unsigned int pc_vfpsid; \ unsigned int pc_vfpmvfr0; \ unsigned int pc_vfpmvfr1; \ struct pmap *pc_curpmap; \ vm_offset_t pc_qmap_addr; \ void *pc_qmap_pte; \ unsigned int pc_dbreg[32]; \ int pc_dbreg_cmd; \ char __pad[1] #else #define PCPU_MD_FIELDS \ vm_offset_t qmap_addr; \ void *pc_qmap_pte; \ char __pad[149] #endif #ifdef _KERNEL #define PC_DBREG_CMD_NONE 0 #define PC_DBREG_CMD_LOAD 1 struct pcb; struct pcpu; extern struct pcpu *pcpup; #if __ARM_ARCH >= 6 #define CPU_MASK (0xf) #ifndef SMP #define get_pcpu() (pcpup) #else #define get_pcpu() __extension__ ({ \ int id; \ __asm __volatile("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); \ (pcpup + (id & CPU_MASK)); \ }) #endif static inline struct thread * get_curthread(void) { void *ret; __asm __volatile("mrc p15, 0, %0, c13, c0, 4" : "=r" (ret)); return (ret); } static inline void set_curthread(struct thread *td) { __asm __volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (td)); } static inline void * get_tls(void) { void *tls; - __asm __volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tls)); + /* TPIDRURW contains the authoritative value. */ + __asm __volatile("mrc p15, 0, %0, c13, c0, 2" : "=r" (tls)); return (tls); } static inline void set_tls(void *tls) { - __asm __volatile("mcr p15, 0, %0, c13, c0, 3" : : "r" (tls)); + /* + * Update both TPIDRURW and TPIDRURO. TPIDRURW needs to be written + * first to ensure that a context switch between the two writes will + * still give the desired result of updating both. + */ + __asm __volatile( + "mcr p15, 0, %0, c13, c0, 2\n" + "mcr p15, 0, %0, c13, c0, 3\n" + : : "r" (tls)); } #define curthread get_curthread() #else #define get_pcpu() pcpup #endif #define PCPU_GET(member) (get_pcpu()->pc_ ## member) #define PCPU_ADD(member, value) (get_pcpu()->pc_ ## member += (value)) #define PCPU_INC(member) PCPU_ADD(member, 1) #define PCPU_PTR(member) (&get_pcpu()->pc_ ## member) #define PCPU_SET(member,value) (get_pcpu()->pc_ ## member = (value)) void pcpu0_init(void); #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */ Index: head/sys/arm/include/proc.h =================================================================== --- head/sys/arm/include/proc.h (revision 306161) +++ head/sys/arm/include/proc.h (revision 306162) @@ -1,87 +1,87 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)proc.h 7.1 (Berkeley) 5/15/91 * from: FreeBSD: src/sys/i386/include/proc.h,v 1.11 2001/06/29 * $FreeBSD$ */ #ifndef _MACHINE_PROC_H_ #define _MACHINE_PROC_H_ #include struct md_utrap { utrap_entry_t *ut_precise[UT_MAX]; /* must be first */ int ut_refcnt; }; struct mdthread { int md_spinlock_count; /* (k) */ register_t md_saved_cspr; /* (k) */ register_t md_spurflt_addr; /* (k) Spurious page fault address. */ int md_ptrace_instr; int md_ptrace_addr; int md_ptrace_instr_alt; int md_ptrace_addr_alt; - register_t md_tp; #if __ARM_ARCH < 6 + register_t md_tp; void *md_ras_start; void *md_ras_end; #endif }; struct mdproc { struct md_utrap *md_utrap; void *md_sigtramp; }; #define KINFO_PROC_SIZE 816 #define MAXARGS 8 /* * This holds the syscall state for a single system call. * As some syscall arguments may be 64-bit aligned we need to ensure the * args value is 64-bit aligned. The ABI will then ensure any 64-bit * arguments are already correctly aligned, even if they were passed in * via registers, we just need to make sure we copy them to an aligned * buffer. */ struct syscall_args { u_int code; struct sysent *callp; register_t args[MAXARGS]; int narg; u_int nap; } __aligned(8); #endif /* !_MACHINE_PROC_H_ */