Index: head/sys/arm/arm/genassym.c =================================================================== --- head/sys/arm/arm/genassym.c (revision 295702) +++ head/sys/arm/arm/genassym.c (revision 295703) @@ -1,153 +1,155 @@ /*- * Copyright (c) 2004 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ASSYM(KERNBASE, KERNBASE); #if __ARM_ARCH >= 6 ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); #endif ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); #if __ARM_ARCH < 6 ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr)); #endif ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); #if __ARM_ARCH < 6 ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec)); ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec)); #endif ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); ASSYM(PCB_R7, offsetof(struct pcb, pcb_regs.sf_r7)); ASSYM(PCB_R8, offsetof(struct pcb, pcb_regs.sf_r8)); ASSYM(PCB_R9, offsetof(struct pcb, pcb_regs.sf_r9)); ASSYM(PCB_R10, offsetof(struct pcb, pcb_regs.sf_r10)); ASSYM(PCB_R11, offsetof(struct pcb, pcb_regs.sf_r11)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_regs.sf_r12)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_regs.sf_sp)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_regs.sf_lr)); ASSYM(PCB_PC, offsetof(struct pcb, pcb_regs.sf_pc)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(M_LEN, offsetof(struct mbuf, m_len)); ASSYM(M_DATA, offsetof(struct mbuf, m_data)); ASSYM(M_NEXT, offsetof(struct mbuf, m_next)); ASSYM(IP_SRC, offsetof(struct ip, ip_src)); ASSYM(IP_DST, offsetof(struct ip, ip_dst)); ASSYM(CF_CONTEXT_SWITCH, offsetof(struct cpu_functions, cf_context_switch)); ASSYM(CF_DCACHE_WB_RANGE, offsetof(struct cpu_functions, cf_dcache_wb_range)); ASSYM(CF_IDCACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_idcache_wbinv_all)); ASSYM(CF_L2CACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_l2cache_wbinv_all)); ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(MD_TP, offsetof(struct mdthread, md_tp)); ASSYM(MD_RAS_START, offsetof(struct mdthread, md_ras_start)); ASSYM(MD_RAS_END, offsetof(struct mdthread, md_ras_end)); ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_R0, offsetof(struct trapframe, tf_r0)); ASSYM(TF_R1, offsetof(struct trapframe, tf_r1)); ASSYM(TF_PC, offsetof(struct trapframe, tf_pc)); ASSYM(P_PID, offsetof(struct proc, p_pid)); ASSYM(P_FLAG, offsetof(struct proc, p_flag)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); #if __ARM_ARCH < 6 ASSYM(ARM_TP_ADDRESS, ARM_TP_ADDRESS); ASSYM(ARM_RAS_START, ARM_RAS_START); ASSYM(ARM_RAS_END, ARM_RAS_END); #endif #ifdef VFP ASSYM(PCB_VFPSTATE, offsetof(struct pcb, pcb_vfpstate)); #endif #if __ARM_ARCH >= 6 ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); #endif ASSYM(PAGE_SIZE, PAGE_SIZE); +#if __ARM_ARCH < 6 ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL); +#endif #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); #endif ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(NIRQ, NIRQ); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); Index: head/sys/arm/arm/locore-v6.S =================================================================== --- head/sys/arm/arm/locore-v6.S (revision 295702) +++ head/sys/arm/arm/locore-v6.S (revision 295703) @@ -1,587 +1,587 @@ /*- * Copyright 2004-2014 Olivier Houchard * Copyright 2012-2014 Ian Lepore * Copyright 2013-2014 Andrew Turner * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "assym.s" #include #include #include #include #include #include #include #include __FBSDID("$FreeBSD$"); #if __ARM_ARCH >= 7 #if defined(__ARM_ARCH_7VE__) || defined(__clang__) /* * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__ * when enabled. llvm >= 3.6 supports it too. */ .arch_extension virt #define MSR_ELR_HYP(regnum) msr elr_hyp, lr #define ERET eret #else #define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum) #define ERET .word 0xe160006e #endif #endif /* __ARM_ARCH >= 7 */ /* A small statically-allocated stack used only during initarm() and AP startup. */ #define INIT_ARM_STACK_SIZE 2048 .text .align 2 #if __ARM_ARCH >= 7 #define LEAVE_HYP \ /* Leave HYP mode */ ;\ mrs r0, cpsr ;\ and r0, r0, #(PSR_MODE) /* Mode is in the low 5 bits of CPSR */ ;\ teq r0, #(PSR_HYP32_MODE) /* Hyp Mode? */ ;\ bne 1f ;\ /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\ mrs r0, cpsr ;\ bic r0, r0, #(PSR_MODE) ;\ orr r0, r0, #(PSR_SVC32_MODE) ;\ orr r0, r0, #(PSR_I | PSR_F | PSR_A) ;\ msr spsr_cxsf, r0 ;\ /* Exit hypervisor mode */ ;\ adr lr, 1f ;\ MSR_ELR_HYP(14) ;\ ERET ;\ 1: #else #define LEAVE_HYP #endif /* __ARM_ARCH >= 7 */ /* * On entry for FreeBSD boot ABI: * r0 - metadata pointer or 0 (boothowto on AT91's boot2) * r1 - if (r0 == 0) then metadata pointer * On entry for Linux boot ABI: * r0 - 0 * r1 - machine type (passed as arg2 to initarm) * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) * * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ .globl btext btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ /* Make sure interrupts are disabled. */ cpsid ifa mov r8, r0 /* 0 or boot mode from boot2 */ mov r9, r1 /* Save Machine type */ mov r10, r2 /* Save meta data */ mov r11, r3 /* Future expansion */ LEAVE_HYP /* * Check whether data cache is enabled. If it is, then we know * current tags are valid (not power-on garbage values) and there * might be dirty lines that need cleaning. Disable cache to prevent * new lines being allocated, then call wbinv_poc_all to clean it. */ mrc CP15_SCTLR(r7) tst r7, #CPU_CONTROL_DC_ENABLE blne dcache_wbinv_poc_all /* ! Do not write to memory between wbinv and disabling cache ! */ /* * Now there are no dirty lines, but there may still be lines marked * valid. Disable all caches and the MMU, and invalidate everything * before setting up new page tables and re-enabling the mmu. */ 1: bic r7, #CPU_CONTROL_DC_ENABLE bic r7, #CPU_CONTROL_MMU_ENABLE bic r7, #CPU_CONTROL_IC_ENABLE bic r7, #CPU_CONTROL_BPRD_ENABLE bic r7, #CPU_CONTROL_SW_ENABLE orr r7, #CPU_CONTROL_UNAL_ENABLE orr r7, #CPU_CONTROL_AFLT_ENABLE orr r7, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r7) DSB ISB bl dcache_inv_poc_all mcr CP15_ICIALLU DSB ISB /* * Build page table from scratch. */ /* * Figure out the physical address we're loaded at by assuming this * entry point code is in the first L1 section and so if we clear the * offset bits of the pc that will give us the section-aligned load * address, which remains in r5 throughout all the following code. */ ldr r2, =(L1_S_OFFSET) bic r5, pc, r2 /* Find the delta between VA and PA, result stays in r0 throughout. */ adr r0, Lpagetable bl translate_va_to_pa /* * First map the entire 4GB address space as VA=PA. It's mapped as * normal (cached) memory because it's for things like accessing the * parameters passed in from the bootloader, which might be at any * physical address, different for every platform. */ mov r1, #0 mov r2, #0 mov r3, #4096 bl build_pagetables /* * Next we do 64MiB starting at the physical load address, mapped to * the VA the kernel is linked for. */ mov r1, r5 ldr r2, =(KERNVIRTADDR) mov r3, #64 bl build_pagetables /* Create a device mapping for early_printf if specified. */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) ldr r1, =SOCDEV_PA ldr r2, =SOCDEV_VA mov r3, #1 bl build_device_pagetables #endif bl init_mmu /* Transition the PC from physical to virtual addressing. */ ldr pc, =1f 1: /* Setup stack, clear BSS */ ldr r1, =.Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ add sp, sp, #INIT_ARM_STACK_SIZE sub r2, r2, r1 /* get zero init data */ mov r3, #0 2: str r3, [r1], #0x0004 /* get zero init data */ subs r2, r2, #4 bgt 2b mov r1, #28 /* loader info size is 28 bytes also second arg */ subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ mov r0, sp /* loader info pointer is first arg */ bic sp, sp, #7 /* align stack to 8 bytes */ str r1, [r0] /* Store length of loader info */ str r8, [r0, #4] /* Store r0 from boot loader */ str r9, [r0, #8] /* Store r1 from boot loader */ str r10, [r0, #12] /* store r2 from boot loader */ str r11, [r0, #16] /* store r3 from boot loader */ str r5, [r0, #20] /* store the physical address */ adr r4, Lpagetable /* load the pagetable address */ ldr r5, [r4, #4] str r5, [r0, #24] /* store the pagetable address */ mov fp, #0 /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ /* init arm will return the new stack pointer. */ mov sp, r0 bl _C_LABEL(mi_startup) /* call mi_startup()! */ ldr r0, =.Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ END(_start) #define VA_TO_PA_POINTER(name, table) \ name: ;\ .word . ;\ .word table /* * Returns the physical address of a magic va to pa pointer. * r0 - The pagetable data pointer. This must be built using the * VA_TO_PA_POINTER macro. * e.g. * VA_TO_PA_POINTER(Lpagetable, pagetable) * ... * adr r0, Lpagetable * bl translate_va_to_pa * r0 will now contain the physical address of pagetable * r1, r2 - Trashed */ translate_va_to_pa: ldr r1, [r0] sub r2, r1, r0 /* At this point: r2 = VA - PA */ /* * Find the physical address of the table. After these two * instructions: * r1 = va(pagetable) * * r0 = va(pagetable) - (VA - PA) * = va(pagetable) - VA + PA * = pa(pagetable) */ ldr r1, [r0, #4] sub r0, r1, r2 mov pc, lr /* * Init MMU * r0 - the table base address */ ASENTRY_NP(init_mmu) /* Setup TLB and MMU registers */ mcr CP15_TTBR0(r0) /* Set TTB */ mov r0, #0 mcr CP15_CONTEXTIDR(r0) /* Set ASID to 0 */ /* Set the Domain Access register */ - mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) + mov r0, #DOMAIN_CLIENT /* Only domain #0 is used */ mcr CP15_DACR(r0) /* * Set TEX remap registers * - All is set to uncacheable memory */ ldr r0, =0xAAAAA mcr CP15_PRRR(r0) mov r0, #0 mcr CP15_NMRR(r0) mcr CP15_TLBIALL /* Flush TLB */ DSB ISB /* Enable MMU */ mrc CP15_SCTLR(r0) orr r0, r0, #CPU_CONTROL_MMU_ENABLE orr r0, r0, #CPU_CONTROL_V6_EXTPAGE orr r0, r0, #CPU_CONTROL_TR_ENABLE orr r0, r0, #CPU_CONTROL_AF_ENABLE mcr CP15_SCTLR(r0) DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ DSB ISB mov pc, lr END(init_mmu) /* * Init SMP coherent mode, enable caching and switch to final MMU table. * Called with disabled caches * r0 - The table base address * r1 - clear bits for aux register * r2 - set bits for aux register */ ASENTRY_NP(reinit_mmu) push {r4-r11, lr} mov r4, r0 mov r5, r1 mov r6, r2 /* !! Be very paranoid here !! */ /* !! We cannot write single bit here !! */ #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU DSB ISB /* Set auxiliary register */ mrc CP15_ACTLR(r7) bic r8, r7, r5 /* Mask bits */ eor r8, r8, r6 /* Set bits */ teq r7, r8 mcrne CP15_ACTLR(r8) DSB ISB /* Enable caches. */ mrc CP15_SCTLR(r7) orr r7, #CPU_CONTROL_DC_ENABLE orr r7, #CPU_CONTROL_IC_ENABLE orr r7, #CPU_CONTROL_BPRD_ENABLE mcr CP15_SCTLR(r7) DSB mcr CP15_TTBR0(r4) /* Set new TTB */ DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ DSB ISB #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU DSB ISB pop {r4-r11, pc} END(reinit_mmu) /* * Builds the page table * r0 - The table base address * r1 - The physical address (trashed) * r2 - The virtual address (trashed) * r3 - The number of 1MiB sections * r4 - Trashed * * Addresses must be 1MiB aligned */ build_device_pagetables: ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 b 1f build_pagetables: /* Set the required page attributed */ ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 1: orr r1, r4 /* Move the virtual address to the correct bit location */ lsr r2, #(PTE1_SHIFT - 2) mov r4, r3 2: str r1, [r0, r2] add r2, r2, #4 add r1, r1, #(PTE1_SIZE) adds r4, r4, #-1 bhi 2b mov pc, lr VA_TO_PA_POINTER(Lpagetable, boot_pt1) .Lstart: .word _edata /* Note that these three items are */ .word _ebss /* loaded with a single ldmia and */ .word svcstk /* must remain in order together. */ .Lmainreturned: .asciz "main() returned" .align 2 .bss svcstk: .space INIT_ARM_STACK_SIZE * MAXCPU /* * Memory for the initial pagetable. We are unable to place this in * the bss as this will be cleared after the table is loaded. */ .section ".init_pagetable" .align 14 /* 16KiB aligned */ .globl boot_pt1 boot_pt1: .space L1_TABLE_SIZE .text .align 2 .Lcpufuncs: .word _C_LABEL(cpufuncs) #if defined(SMP) ASENTRY_NP(mpentry) /* Make sure interrupts are disabled. */ cpsid ifa LEAVE_HYP /* Setup core, disable all caches. */ mrc CP15_SCTLR(r0) bic r0, #CPU_CONTROL_MMU_ENABLE bic r0, #CPU_CONTROL_DC_ENABLE bic r0, #CPU_CONTROL_IC_ENABLE bic r0, #CPU_CONTROL_BPRD_ENABLE bic r0, #CPU_CONTROL_SW_ENABLE orr r0, #CPU_CONTROL_UNAL_ENABLE orr r0, #CPU_CONTROL_AFLT_ENABLE orr r0, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r0) DSB ISB /* Invalidate L1 cache I+D cache */ bl dcache_inv_pou_all mcr CP15_ICIALLU DSB ISB /* Find the delta between VA and PA */ adr r0, Lpagetable bl translate_va_to_pa bl init_mmu adr r1, .Lstart+8 /* Get initstack pointer from */ ldr sp, [r1] /* startup data. */ mrc CP15_MPIDR(r0) /* Get processor id number. */ and r0, r0, #0x0f mov r1, #INIT_ARM_STACK_SIZE mul r2, r1, r0 /* Point sp to initstack */ add sp, sp, r2 /* area for this processor. */ /* Switch to virtual addresses. */ ldr pc, =1f 1: mov fp, #0 /* trace back starts here */ bl _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */ adr r0, .Lmpreturned b _C_LABEL(panic) /* NOTREACHED */ END(mpentry) .Lmpreturned: .asciz "init_secondary() returned" .align 2 #endif ENTRY_NP(cpu_halt) /* XXX re-implement !!! */ cpsid ifa bl dcache_wbinv_poc_all ldr r4, .Lcpu_reset_address ldr r4, [r4] teq r4, #0 movne pc, r4 1: WFI b 1b /* * _cpu_reset_address contains the address to branch to, to complete * the cpu reset after turning the MMU off * This variable is provided by the hardware specific code */ .Lcpu_reset_address: .word _C_LABEL(cpu_reset_address) END(cpu_halt) /* * setjump + longjmp */ ENTRY(setjmp) stmia r0, {r4-r14} mov r0, #0x00000000 RET END(setjmp) ENTRY(longjmp) ldmia r0, {r4-r14} mov r0, #0x00000001 RET END(longjmp) .data .global _C_LABEL(esym) _C_LABEL(esym): .word _C_LABEL(end) ENTRY_NP(abort) b _C_LABEL(abort) END(abort) ENTRY_NP(sigcode) mov r0, sp add r0, r0, #SIGF_UC /* * Call the sigreturn system call. * * We have to load r7 manually rather than using * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is * correct. Using the alternative places esigcode at the address * of the data rather than the address one past the data. */ ldr r7, [pc, #12] /* Load SYS_sigreturn */ swi SYS_sigreturn /* Well if that failed we better exit quick ! */ ldr r7, [pc, #8] /* Load SYS_exit */ swi SYS_exit /* Branch back to retry SYS_sigreturn */ b . - 16 END(sigcode) .word SYS_sigreturn .word SYS_exit .align 2 .global _C_LABEL(esigcode) _C_LABEL(esigcode): .data .global szsigcode szsigcode: .long esigcode-sigcode /* End of locore.S */ Index: head/sys/arm/include/pmap-v6.h =================================================================== --- head/sys/arm/include/pmap-v6.h (revision 295702) +++ head/sys/arm/include/pmap-v6.h (revision 295703) @@ -1,249 +1,223 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The ARM version of this file was more or less based on the i386 version, * which has the following provenance... * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ #include #include #include #include typedef uint32_t pt1_entry_t; /* L1 table entry */ typedef uint32_t pt2_entry_t; /* L2 table entry */ typedef uint32_t ttb_entry_t; /* TTB entry */ #ifdef _KERNEL #if 0 #define PMAP_PTE_NOCACHE // Use uncached page tables #endif /* * (1) During pmap bootstrap, physical pages for L2 page tables are * allocated in advance which are used for KVA continuous mapping * starting from KERNBASE. This makes things more simple. * (2) During vm subsystem initialization, only vm subsystem itself can * allocate physical memory safely. As pmap_map() is called during * this initialization, we must be prepared for that and have some * preallocated physical pages for L2 page tables. * * Note that some more pages for L2 page tables are preallocated too * for mappings laying above VM_MAX_KERNEL_ADDRESS. */ #ifndef NKPT2PG /* * The optimal way is to define this in board configuration as * definition here must be safe enough. It means really big. * * 1 GB KVA <=> 256 kernel L2 page table pages * * From real platforms: * 1 GB physical memory <=> 10 pages is enough * 2 GB physical memory <=> 21 pages is enough */ #define NKPT2PG 32 #endif extern vm_paddr_t phys_avail[]; extern vm_paddr_t dump_avail[]; extern char *_tmppt; /* poor name! */ extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; /* * Pmap stuff */ /* * This structure is used to hold a virtual<->physical address * association and is used mostly by bootstrap code */ struct pv_addr { SLIST_ENTRY(pv_addr) pv_list; vm_offset_t pv_va; vm_paddr_t pv_pa; }; #endif struct pv_entry; struct pv_chunk; struct md_page { TAILQ_HEAD(,pv_entry) pv_list; uint16_t pt2_wirecount[4]; vm_memattr_t pat_mode; }; struct pmap { struct mtx pm_mtx; pt1_entry_t *pm_pt1; /* KVA of pt1 */ pt2_entry_t *pm_pt2tab; /* KVA of pt2 pages table */ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF | MTX_DUPOK) #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_next; } *pv_entry_t; /* * pv_entries are allocated in chunks per-process. This avoids the * need to track per-pmap assignments. */ #define _NPCM 11 #define _NPCPV 336 struct pv_chunk { pmap_t pc_pmap; TAILQ_ENTRY(pv_chunk) pc_list; uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ TAILQ_ENTRY(pv_chunk) pc_lru; struct pv_entry pc_pventry[_NPCPV]; }; #ifdef _KERNEL struct pcb; extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ #define pmap_page_get_memattr(m) ((m)->md.pat_mode) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) /* * Only the following functions or macros may be used before pmap_bootstrap() * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and * vtopte2(). */ void pmap_bootstrap(vm_offset_t ); void pmap_kenter(vm_offset_t , vm_paddr_t ); void *pmap_kenter_temporary(vm_paddr_t , int ); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); boolean_t pmap_page_is_mapped(vm_page_t ); void pmap_page_set_memattr(vm_page_t , vm_memattr_t ); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); void pmap_kremove_device(vm_offset_t, vm_size_t); void pmap_set_pcb_pagedir(pmap_t , struct pcb *); void pmap_tlb_flush(pmap_t , vm_offset_t ); void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t ); void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t ); vm_paddr_t pmap_kextract(vm_offset_t ); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , bool); #define vtophys(va) pmap_kextract((vm_offset_t)(va)) void pmap_set_tex(void); void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set); /* * Pre-bootstrap epoch functions set. */ void pmap_bootstrap_prepare(vm_paddr_t ); vm_paddr_t pmap_preboot_get_pages(u_int ); void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int ); vm_offset_t pmap_preboot_reserve_pages(u_int ); vm_offset_t pmap_preboot_get_vpages(u_int ); void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t, vm_memattr_t); #endif /* _KERNEL */ - -// ----------------- TO BE DELETED --------------------------------------------- -#ifdef _KERNEL - -/* - * sys/arm/arm/elf_trampoline.c - * sys/arm/arm/genassym.c - * sys/arm/arm/machdep.c - * sys/arm/arm/mp_machdep.c - * sys/arm/arm/locore.S - * sys/arm/arm/pmap.c - * sys/arm/arm/swtch.S - * sys/arm/at91/at91_machdep.c - * sys/arm/cavium/cns11xx/econa_machdep.c - * sys/arm/s3c2xx0/s3c24x0_machdep.c - * sys/arm/xscale/ixp425/avila_machdep.c - * sys/arm/xscale/i8134x/crb_machdep.c - * sys/arm/xscale/i80321/ep80219_machdep.c - * sys/arm/xscale/i80321/iq31244_machdep.c - * sys/arm/xscale/pxa/pxa_machdep.c - */ -#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ - -#endif /* _KERNEL */ -// ----------------------------------------------------------------------------- - #endif /* !_MACHINE_PMAP_H_ */