diff --git a/lib/libkvm/kvm_arm.h b/lib/libkvm/kvm_arm.h index f441d65860ec..300c44902393 100644 --- a/lib/libkvm/kvm_arm.h +++ b/lib/libkvm/kvm_arm.h @@ -1,125 +1,118 @@ /*- * Copyright (c) 2015 John H. Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __KVM_ARM_H__ #define __KVM_ARM_H__ typedef uint32_t arm_physaddr_t; typedef uint32_t arm_pd_entry_t; typedef uint32_t arm_pt_entry_t; #define ARM_PAGE_SHIFT 12 #define ARM_PAGE_SIZE (1 << ARM_PAGE_SHIFT) /* Page size */ #define ARM_PAGE_MASK (ARM_PAGE_SIZE - 1) #define ARM_L1_TABLE_SIZE 0x4000 /* 16K */ #define ARM_L1_S_SIZE 0x00100000 /* 1M */ #define ARM_L1_S_OFFSET (ARM_L1_S_SIZE - 1) #define ARM_L1_S_FRAME (~ARM_L1_S_OFFSET) #define ARM_L1_S_SHIFT 20 #define ARM_L2_L_SIZE 0x00010000 /* 64K */ #define ARM_L2_L_OFFSET (ARM_L2_L_SIZE - 1) #define ARM_L2_L_FRAME (~ARM_L2_L_OFFSET) #define ARM_L2_L_SHIFT 16 #define ARM_L2_S_SIZE 0x00001000 /* 4K */ #define ARM_L2_S_OFFSET (ARM_L2_S_SIZE - 1) #define ARM_L2_S_FRAME (~ARM_L2_S_OFFSET) #define ARM_L2_S_SHIFT 12 #define ARM_L2_TEX1 0x00000080 #define ARM_PTE2_RO ARM_L2_TEX1 #define ARM_L2_NX 0x00000001 #define ARM_PTE2_NX ARM_L2_NX /* * Note: L2_S_PROT_W differs depending on whether the system is generic or * xscale. This isn't easily accessible in this context, so use an * approximation of 'xscale' which is a subset of 'generic'. */ #define ARM_L2_AP0(x) ((x) << 4) #define ARM_AP_W 0x01 #define ARM_L2_S_PROT_W (ARM_L2_AP0(ARM_AP_W)) #define ARM_L1_TYPE_INV 0x00 /* Invalid (fault) */ #define ARM_L1_TYPE_C 0x01 /* Coarse L2 */ #define ARM_L1_TYPE_S 0x02 /* Section */ #define ARM_L1_TYPE_MASK 0x03 /* Mask of type bits */ #define ARM_L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ #define ARM_L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ #define ARM_L2_TYPE_INV 0x00 /* Invalid (fault) */ #define ARM_L2_TYPE_L 0x01 /* Large Page - 64k */ #define ARM_L2_TYPE_S 0x02 /* Small Page - 4k */ #define ARM_L2_TYPE_T 0x03 /* Tiny Page - 1k - not used */ #define ARM_L2_TYPE_MASK 0x03 #ifdef __arm__ #include -#if __ARM_ARCH >= 6 -#include -#else -#include -#endif +#include _Static_assert(PAGE_SHIFT == ARM_PAGE_SHIFT, "PAGE_SHIFT mismatch"); _Static_assert(PAGE_SIZE == ARM_PAGE_SIZE, "PAGE_SIZE mismatch"); _Static_assert(PAGE_MASK == ARM_PAGE_MASK, "PAGE_MASK mismatch"); _Static_assert(L1_TABLE_SIZE == ARM_L1_TABLE_SIZE, "L1_TABLE_SIZE mismatch"); _Static_assert(L1_S_SIZE == ARM_L1_S_SIZE, "L1_S_SIZE mismatch"); _Static_assert(L1_S_OFFSET == ARM_L1_S_OFFSET, "L1_S_OFFSET mismatch"); _Static_assert(L1_S_FRAME == ARM_L1_S_FRAME, "L1_S_FRAME mismatch"); _Static_assert(L1_S_SHIFT == ARM_L1_S_SHIFT, "L1_S_SHIFT mismatch"); _Static_assert(L2_L_SIZE == ARM_L2_L_SIZE, "L2_L_SIZE mismatch"); _Static_assert(L2_L_OFFSET == ARM_L2_L_OFFSET, "L2_L_OFFSET mismatch"); _Static_assert(L2_L_FRAME == ARM_L2_L_FRAME, "L2_L_FRAME mismatch"); _Static_assert(L2_L_SHIFT == ARM_L2_L_SHIFT, "L2_L_SHIFT mismatch"); _Static_assert(L2_S_SIZE == ARM_L2_S_SIZE, "L2_S_SIZE mismatch"); _Static_assert(L2_S_OFFSET == ARM_L2_S_OFFSET, "L2_S_OFFSET mismatch"); _Static_assert(L2_S_FRAME == ARM_L2_S_FRAME, "L2_S_FRAME mismatch"); _Static_assert(L2_S_SHIFT == ARM_L2_S_SHIFT, "L2_S_SHIFT mismatch"); _Static_assert(L1_TYPE_INV == ARM_L1_TYPE_INV, "L1_TYPE_INV mismatch"); _Static_assert(L1_TYPE_C == ARM_L1_TYPE_C, "L1_TYPE_C mismatch"); _Static_assert(L1_TYPE_S == ARM_L1_TYPE_S, "L1_TYPE_S mismatch"); _Static_assert(L1_TYPE_MASK == ARM_L1_TYPE_MASK, "L1_TYPE_MASK mismatch"); _Static_assert(L1_S_ADDR_MASK == ARM_L1_S_ADDR_MASK, "L1_S_ADDR_MASK mismatch"); _Static_assert(L1_C_ADDR_MASK == ARM_L1_C_ADDR_MASK, "L1_C_ADDR_MASK mismatch"); _Static_assert(L2_TYPE_INV == ARM_L2_TYPE_INV, "L2_TYPE_INV mismatch"); _Static_assert(L2_TYPE_L == ARM_L2_TYPE_L, "L2_TYPE_L mismatch"); _Static_assert(L2_TYPE_S == ARM_L2_TYPE_S, "L2_TYPE_S mismatch"); -#if __ARM_ARCH < 6 -_Static_assert(L2_TYPE_T == ARM_L2_TYPE_T, "L2_TYPE_T mismatch"); -#endif _Static_assert(L2_TYPE_MASK == ARM_L2_TYPE_MASK, "L2_TYPE_MASK mismatch"); #endif int _arm_native(kvm_t *); #endif /* !__KVM_ARM_H__ */ diff --git a/sys/arm/arm/locore-v6.S b/sys/arm/arm/locore-v6.S index ad96b0b9d638..7104ae124cf1 100644 --- a/sys/arm/arm/locore-v6.S +++ b/sys/arm/arm/locore-v6.S @@ -1,605 +1,605 @@ /*- * Copyright 2004-2014 Olivier Houchard * Copyright 2012-2014 Ian Lepore * Copyright 2013-2014 Andrew Turner * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "assym.inc" #include #include #include #include #include -#include +#include __FBSDID("$FreeBSD$"); /* We map 64MB of kernel unless overridden in assym.inc by the kernel option. */ #ifndef LOCORE_MAP_MB #define LOCORE_MAP_MB 64 #endif #if __ARM_ARCH >= 7 #if defined(__ARM_ARCH_7VE__) || defined(__clang__) /* * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__ * when enabled. llvm >= 3.6 supports it too. */ .arch_extension virt #endif #endif /* __ARM_ARCH >= 7 */ /* A small statically-allocated stack used only during initarm() and AP startup. */ #define INIT_ARM_STACK_SIZE 2048 .text .align 2 .globl kernbase .set kernbase,KERNVIRTADDR #if __ARM_ARCH >= 7 #define HANDLE_HYP \ /* Leave HYP mode */ ;\ mrs r0, cpsr ;\ and r0, r0, #(PSR_MODE) /* Mode is in the low 5 bits of CPSR */ ;\ teq r0, #(PSR_HYP32_MODE) /* Hyp Mode? */ ;\ bne 1f ;\ /* Install Hypervisor Stub Exception Vector */ ;\ bl hypervisor_stub_vect_install ;\ mov r0, 0 ;\ adr r1, hypmode_enabled ;\ str r0, [r1] ;\ /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\ mrs r0, cpsr ;\ bic r0, r0, #(PSR_MODE) ;\ orr r0, r0, #(PSR_SVC32_MODE) ;\ orr r0, r0, #(PSR_I | PSR_F | PSR_A) ;\ msr spsr_cxsf, r0 ;\ /* Exit hypervisor mode */ ;\ adr lr, 2f ;\ MSR_ELR_HYP(14) ;\ ERET ;\ 1: ;\ mov r0, -1 ;\ adr r1, hypmode_enabled ;\ str r0, [r1] ;\ 2: #else #define HANDLE_HYP #endif /* __ARM_ARCH >= 7 */ /* * On entry for FreeBSD boot ABI: * r0 - metadata pointer or 0 (boothowto on AT91's boot2) * r1 - if (r0 == 0) then metadata pointer * On entry for Linux boot ABI: * r0 - 0 * r1 - machine type (passed as arg2 to initarm) * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) * * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ .globl btext btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ /* Make sure interrupts are disabled. */ cpsid ifa mov r8, r0 /* 0 or boot mode from boot2 */ mov r9, r1 /* Save Machine type */ mov r10, r2 /* Save meta data */ mov r11, r3 /* Future expansion */ # If HYP-MODE is active, install an exception vector stub HANDLE_HYP /* * Check whether data cache is enabled. If it is, then we know * current tags are valid (not power-on garbage values) and there * might be dirty lines that need cleaning. Disable cache to prevent * new lines being allocated, then call wbinv_poc_all to clean it. */ mrc CP15_SCTLR(r7) tst r7, #CPU_CONTROL_DC_ENABLE blne dcache_wbinv_poc_all /* ! Do not write to memory between wbinv and disabling cache ! */ /* * Now there are no dirty lines, but there may still be lines marked * valid. Disable all caches and the MMU, and invalidate everything * before setting up new page tables and re-enabling the mmu. */ 1: bic r7, #CPU_CONTROL_DC_ENABLE bic r7, #CPU_CONTROL_AFLT_ENABLE bic r7, #CPU_CONTROL_MMU_ENABLE bic r7, #CPU_CONTROL_IC_ENABLE bic r7, #CPU_CONTROL_BPRD_ENABLE bic r7, #CPU_CONTROL_SW_ENABLE orr r7, #CPU_CONTROL_UNAL_ENABLE orr r7, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r7) DSB ISB bl dcache_inv_poc_all mcr CP15_ICIALLU DSB ISB /* * Build page table from scratch. */ /* * Figure out the physical address we're loaded at by assuming this * entry point code is in the first L1 section and so if we clear the * offset bits of the pc that will give us the section-aligned load * address, which remains in r5 throughout all the following code. */ ldr r2, =(L1_S_OFFSET) bic r5, pc, r2 /* Find the delta between VA and PA, result stays in r0 throughout. */ adr r0, Lpagetable bl translate_va_to_pa /* * First map the entire 4GB address space as VA=PA. It's mapped as * normal (cached) memory because it's for things like accessing the * parameters passed in from the bootloader, which might be at any * physical address, different for every platform. */ mov r1, #0 mov r2, #0 mov r3, #4096 bl build_pagetables /* * Next we map the kernel starting at the physical load address, mapped * to the VA the kernel is linked for. The default size we map is 64MiB * but it can be overridden with a kernel option. */ mov r1, r5 ldr r2, =(KERNVIRTADDR) ldr r3, =(LOCORE_MAP_MB) bl build_pagetables /* Create a device mapping for early_printf if specified. */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) ldr r1, =SOCDEV_PA ldr r2, =SOCDEV_VA mov r3, #1 bl build_device_pagetables #endif bl init_mmu /* Transition the PC from physical to virtual addressing. */ ldr pc, =1f 1: /* Setup stack, clear BSS */ ldr r1, =.Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ add sp, sp, #INIT_ARM_STACK_SIZE sub r2, r2, r1 /* get zero init data */ mov r3, #0 2: str r3, [r1], #0x0004 /* get zero init data */ subs r2, r2, #4 bgt 2b mov r1, #28 /* loader info size is 28 bytes also second arg */ subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ mov r0, sp /* loader info pointer is first arg */ bic sp, sp, #7 /* align stack to 8 bytes */ str r1, [r0] /* Store length of loader info */ str r8, [r0, #4] /* Store r0 from boot loader */ str r9, [r0, #8] /* Store r1 from boot loader */ str r10, [r0, #12] /* store r2 from boot loader */ str r11, [r0, #16] /* store r3 from boot loader */ str r5, [r0, #20] /* store the physical address */ adr r4, Lpagetable /* load the pagetable address */ ldr r5, [r4, #4] str r5, [r0, #24] /* store the pagetable address */ mov fp, #0 /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ /* init arm will return the new stack pointer. */ mov sp, r0 bl _C_LABEL(mi_startup) /* call mi_startup()! */ ldr r0, =.Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ END(_start) #define VA_TO_PA_POINTER(name, table) \ name: ;\ .word . ;\ .word table /* * Returns the physical address of a magic va to pa pointer. * r0 - The pagetable data pointer. This must be built using the * VA_TO_PA_POINTER macro. * e.g. * VA_TO_PA_POINTER(Lpagetable, pagetable) * ... * adr r0, Lpagetable * bl translate_va_to_pa * r0 will now contain the physical address of pagetable * r1, r2 - Trashed */ translate_va_to_pa: ldr r1, [r0] sub r2, r1, r0 /* At this point: r2 = VA - PA */ /* * Find the physical address of the table. After these two * instructions: * r1 = va(pagetable) * * r0 = va(pagetable) - (VA - PA) * = va(pagetable) - VA + PA * = pa(pagetable) */ ldr r1, [r0, #4] sub r0, r1, r2 mov pc, lr /* * Init MMU * r0 - the table base address */ ASENTRY_NP(init_mmu) /* Setup TLB and MMU registers */ mcr CP15_TTBR0(r0) /* Set TTB */ mov r0, #0 mcr CP15_CONTEXTIDR(r0) /* Set ASID to 0 */ /* Set the Domain Access register */ mov r0, #DOMAIN_CLIENT /* Only domain #0 is used */ mcr CP15_DACR(r0) /* * Ensure that LPAE is disabled and that TTBR0 is used for translation, * use a 16KB translation table */ mov r0, #0 mcr CP15_TTBCR(r0) /* * Set TEX remap registers * - All is set to uncacheable memory */ ldr r0, =0xAAAAA mcr CP15_PRRR(r0) mov r0, #0 mcr CP15_NMRR(r0) mcr CP15_TLBIALL /* Flush TLB */ DSB ISB /* Enable MMU */ mrc CP15_SCTLR(r0) orr r0, r0, #CPU_CONTROL_MMU_ENABLE orr r0, r0, #CPU_CONTROL_V6_EXTPAGE orr r0, r0, #CPU_CONTROL_TR_ENABLE orr r0, r0, #CPU_CONTROL_AF_ENABLE mcr CP15_SCTLR(r0) DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ DSB ISB mov pc, lr END(init_mmu) /* * Init SMP coherent mode, enable caching and switch to final MMU table. * Called with disabled caches * r0 - The table base address * r1 - clear bits for aux register * r2 - set bits for aux register */ ASENTRY_NP(reinit_mmu) push {r4-r11, lr} mov r4, r0 mov r5, r1 mov r6, r2 /* !! Be very paranoid here !! */ /* !! We cannot write single bit here !! */ #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU DSB ISB /* Set auxiliary register */ mrc CP15_ACTLR(r7) bic r8, r7, r5 /* Mask bits */ eor r8, r8, r6 /* Set bits */ teq r7, r8 mcrne CP15_ACTLR(r8) DSB ISB /* Enable caches. */ mrc CP15_SCTLR(r7) orr r7, #CPU_CONTROL_DC_ENABLE orr r7, #CPU_CONTROL_IC_ENABLE orr r7, #CPU_CONTROL_BPRD_ENABLE mcr CP15_SCTLR(r7) DSB mcr CP15_TTBR0(r4) /* Set new TTB */ DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ DSB ISB #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU DSB ISB pop {r4-r11, pc} END(reinit_mmu) /* * Builds the page table * r0 - The table base address * r1 - The physical address (trashed) * r2 - The virtual address (trashed) * r3 - The number of 1MiB sections * r4 - Trashed * * Addresses must be 1MiB aligned */ build_device_pagetables: ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 b 1f build_pagetables: /* Set the required page attributed */ ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 1: orr r1, r4 /* Move the virtual address to the correct bit location */ lsr r2, #(PTE1_SHIFT - 2) mov r4, r3 2: str r1, [r0, r2] add r2, r2, #4 add r1, r1, #(PTE1_SIZE) adds r4, r4, #-1 bhi 2b mov pc, lr VA_TO_PA_POINTER(Lpagetable, boot_pt1) .global _C_LABEL(hypmode_enabled) _C_LABEL(hypmode_enabled): .word 0 .Lstart: .word _edata /* Note that these three items are */ .word _ebss /* loaded with a single ldmia and */ .word svcstk /* must remain in order together. */ .Lmainreturned: .asciz "main() returned" .align 2 .bss svcstk: .space INIT_ARM_STACK_SIZE * MAXCPU /* * Memory for the initial pagetable. We are unable to place this in * the bss as this will be cleared after the table is loaded. */ .section ".init_pagetable", "aw", %nobits .align 14 /* 16KiB aligned */ .globl boot_pt1 boot_pt1: .space L1_TABLE_SIZE .text .align 2 #if defined(SMP) ASENTRY_NP(mpentry) /* Make sure interrupts are disabled. */ cpsid ifa HANDLE_HYP /* Setup core, disable all caches. */ mrc CP15_SCTLR(r0) bic r0, #CPU_CONTROL_MMU_ENABLE bic r0, #CPU_CONTROL_AFLT_ENABLE bic r0, #CPU_CONTROL_DC_ENABLE bic r0, #CPU_CONTROL_IC_ENABLE bic r0, #CPU_CONTROL_BPRD_ENABLE bic r0, #CPU_CONTROL_SW_ENABLE orr r0, #CPU_CONTROL_UNAL_ENABLE orr r0, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r0) DSB ISB /* Invalidate L1 cache I+D cache */ bl dcache_inv_pou_all mcr CP15_ICIALLU DSB ISB /* Find the delta between VA and PA */ adr r0, Lpagetable bl translate_va_to_pa bl init_mmu adr r1, .Lstart+8 /* Get initstack pointer from */ ldr sp, [r1] /* startup data. */ mrc CP15_MPIDR(r0) /* Get processor id number. */ and r0, r0, #0x0f mov r1, #INIT_ARM_STACK_SIZE mul r2, r1, r0 /* Point sp to initstack */ add sp, sp, r2 /* area for this processor. */ /* Switch to virtual addresses. */ ldr pc, =1f 1: mov fp, #0 /* trace back starts here */ bl _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */ adr r0, .Lmpreturned b _C_LABEL(panic) /* NOTREACHED */ END(mpentry) .Lmpreturned: .asciz "init_secondary() returned" .align 2 #endif ENTRY_NP(cpu_halt) /* XXX re-implement !!! */ cpsid ifa bl dcache_wbinv_poc_all ldr r4, .Lcpu_reset_address ldr r4, [r4] teq r4, #0 movne pc, r4 1: WFI b 1b /* * _cpu_reset_address contains the address to branch to, to complete * the cpu reset after turning the MMU off * This variable is provided by the hardware specific code */ .Lcpu_reset_address: .word _C_LABEL(cpu_reset_address) END(cpu_halt) /* * setjump + longjmp */ ENTRY(setjmp) stmia r0, {r4-r14} mov r0, #0x00000000 RET END(setjmp) ENTRY(longjmp) ldmia r0, {r4-r14} mov r0, #0x00000001 RET END(longjmp) .data .global _C_LABEL(esym) _C_LABEL(esym): .word _C_LABEL(end) ENTRY_NP(abort) b _C_LABEL(abort) END(abort) ENTRY_NP(sigcode) mov r0, sp add r0, r0, #SIGF_UC /* * Call the sigreturn system call. * * We have to load r7 manually rather than using * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is * correct. Using the alternative places esigcode at the address * of the data rather than the address one past the data. */ ldr r7, [pc, #12] /* Load SYS_sigreturn */ swi SYS_sigreturn /* Well if that failed we better exit quick ! */ ldr r7, [pc, #8] /* Load SYS_exit */ swi SYS_exit /* Branch back to retry SYS_sigreturn */ b . - 16 END(sigcode) .word SYS_sigreturn .word SYS_exit .align 2 .global _C_LABEL(esigcode) _C_LABEL(esigcode): .data .global szsigcode szsigcode: .long esigcode-sigcode /* End of locore.S */ diff --git a/sys/arm/include/pmap_var.h b/sys/arm/include/pmap_var.h index 08d32575f82a..e746ed513d4f 100644 --- a/sys/arm/include/pmap_var.h +++ b/sys/arm/include/pmap_var.h @@ -1,494 +1,494 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_VAR_H_ #define _MACHINE_PMAP_VAR_H_ -#include +#include /* * Various PMAP defines, exports, and inline functions * definitions also usable in other MD code. */ /* A number of pages in L1 page table. */ #define NPG_IN_PT1 (NB_IN_PT1 / PAGE_SIZE) /* A number of L2 page tables in a page. */ #define NPT2_IN_PG (PAGE_SIZE / NB_IN_PT2) /* A number of L2 page table entries in a page. */ #define NPTE2_IN_PG (NPT2_IN_PG * NPTE2_IN_PT2) #ifdef _KERNEL /* * A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of * pte1_idx by PT2PG_MASK gives us an index to associated L2 page table * in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly. * I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled. */ #define PT2PG_SHIFT 2 #define PT2PG_MASK ((1 << PT2PG_SHIFT) - 1) /* * A PT2TAB holds all allocated L2 page table pages in a pmap. * Right shifting of virtual address by PT2TAB_SHIFT gives us an index * to L2 page table page in PT2TAB which holds the address mapping. */ #define PT2TAB_ENTRIES (NPTE1_IN_PT1 / NPT2_IN_PG) #define PT2TAB_SHIFT (PTE1_SHIFT + PT2PG_SHIFT) /* * All allocated L2 page table pages in a pmap are mapped into PT2MAP space. * An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2 * which maps the address. */ #define PT2MAP_SIZE (NPTE1_IN_PT1 * NB_IN_PT2) #define PT2MAP_SHIFT PTE2_SHIFT extern pt1_entry_t *kern_pt1; extern pt2_entry_t *kern_pt2tab; extern pt2_entry_t *PT2MAP; /* * Virtual interface for L1 page table management. */ static __inline u_int pte1_index(vm_offset_t va) { return (va >> PTE1_SHIFT); } static __inline pt1_entry_t * pte1_ptr(pt1_entry_t *pt1, vm_offset_t va) { return (pt1 + pte1_index(va)); } static __inline vm_offset_t pte1_trunc(vm_offset_t va) { return (va & PTE1_FRAME); } static __inline vm_offset_t pte1_roundup(vm_offset_t va) { return ((va + PTE1_OFFSET) & PTE1_FRAME); } /* * Virtual interface for L1 page table entries management. * * XXX: Some of the following functions now with a synchronization barrier * are called in a loop, so it could be useful to have two versions of them. * One with the barrier and one without the barrier. In this case, pure * barrier pte1_sync() should be implemented as well. */ static __inline void pte1_sync(pt1_entry_t *pte1p) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p)); #endif } static __inline void pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte1p, size); #endif } static __inline void pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1) { dmb(); *pte1p = pte1; pte1_sync(pte1p); } static __inline void pte1_clear(pt1_entry_t *pte1p) { pte1_store(pte1p, 0); } static __inline void pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit) { *pte1p &= ~bit; pte1_sync(pte1p); } static __inline boolean_t pte1_is_link(pt1_entry_t pte1) { return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C); } static __inline int pte1_is_section(pt1_entry_t pte1) { return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S); } static __inline boolean_t pte1_is_dirty(pt1_entry_t pte1) { return ((pte1 & (PTE1_NM | PTE1_RO)) == 0); } static __inline boolean_t pte1_is_global(pt1_entry_t pte1) { return ((pte1 & PTE1_NG) == 0); } static __inline boolean_t pte1_is_valid(pt1_entry_t pte1) { int l1_type; l1_type = pte1 & L1_TYPE_MASK; return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S)); } static __inline boolean_t pte1_is_wired(pt1_entry_t pte1) { return (pte1 & PTE1_W); } static __inline pt1_entry_t pte1_load(pt1_entry_t *pte1p) { pt1_entry_t pte1; pte1 = *pte1p; return (pte1); } static __inline pt1_entry_t pte1_load_clear(pt1_entry_t *pte1p) { pt1_entry_t opte1; opte1 = *pte1p; *pte1p = 0; pte1_sync(pte1p); return (opte1); } static __inline void pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit) { *pte1p |= bit; pte1_sync(pte1p); } static __inline vm_paddr_t pte1_pa(pt1_entry_t pte1) { return ((vm_paddr_t)(pte1 & PTE1_FRAME)); } static __inline vm_paddr_t pte1_link_pa(pt1_entry_t pte1) { return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK)); } /* * Virtual interface for L2 page table entries management. * * XXX: Some of the following functions now with a synchronization barrier * are called in a loop, so it could be useful to have two versions of them. * One with the barrier and one without the barrier. */ static __inline void pte2_sync(pt2_entry_t *pte2p) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p)); #endif } static __inline void pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size) { dsb(); #ifndef PMAP_PTE_NOCACHE if (!cpuinfo.coherent_walk) dcache_wb_pou((vm_offset_t)pte2p, size); #endif } static __inline void pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2) { dmb(); *pte2p = pte2; pte2_sync(pte2p); } static __inline void pte2_clear(pt2_entry_t *pte2p) { pte2_store(pte2p, 0); } static __inline void pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit) { *pte2p &= ~bit; pte2_sync(pte2p); } static __inline boolean_t pte2_is_dirty(pt2_entry_t pte2) { return ((pte2 & (PTE2_NM | PTE2_RO)) == 0); } static __inline boolean_t pte2_is_global(pt2_entry_t pte2) { return ((pte2 & PTE2_NG) == 0); } static __inline boolean_t pte2_is_valid(pt2_entry_t pte2) { return (pte2 & PTE2_V); } static __inline boolean_t pte2_is_wired(pt2_entry_t pte2) { return (pte2 & PTE2_W); } static __inline pt2_entry_t pte2_load(pt2_entry_t *pte2p) { pt2_entry_t pte2; pte2 = *pte2p; return (pte2); } static __inline pt2_entry_t pte2_load_clear(pt2_entry_t *pte2p) { pt2_entry_t opte2; opte2 = *pte2p; *pte2p = 0; pte2_sync(pte2p); return (opte2); } static __inline void pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit) { *pte2p |= bit; pte2_sync(pte2p); } static __inline void pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired) { /* * Wired bit is transparent for page table walk, * so pte2_sync() is not needed. */ if (wired) *pte2p |= PTE2_W; else *pte2p &= ~PTE2_W; } static __inline vm_paddr_t pte2_pa(pt2_entry_t pte2) { return ((vm_paddr_t)(pte2 & PTE2_FRAME)); } static __inline u_int pte2_attr(pt2_entry_t pte2) { return ((u_int)(pte2 & PTE2_ATTR_MASK)); } /* * Virtual interface for L2 page tables mapping management. */ static __inline u_int pt2tab_index(vm_offset_t va) { return (va >> PT2TAB_SHIFT); } static __inline pt2_entry_t * pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va) { return (pt2tab + pt2tab_index(va)); } static __inline void pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2) { pte2_store(pte2p,pte2); } static __inline pt2_entry_t pt2tab_load(pt2_entry_t *pte2p) { return (pte2_load(pte2p)); } static __inline pt2_entry_t pt2tab_load_clear(pt2_entry_t *pte2p) { return (pte2_load_clear(pte2p)); } static __inline u_int pt2map_index(vm_offset_t va) { return (va >> PT2MAP_SHIFT); } static __inline pt2_entry_t * pt2map_entry(vm_offset_t va) { return (PT2MAP + pt2map_index(va)); } /* * Virtual interface for pmap structure & kernel shortcuts. */ static __inline pt1_entry_t * pmap_pte1(pmap_t pmap, vm_offset_t va) { return (pte1_ptr(pmap->pm_pt1, va)); } static __inline pt1_entry_t * kern_pte1(vm_offset_t va) { return (pte1_ptr(kern_pt1, va)); } static __inline pt2_entry_t * pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va) { return (pt2tab_entry(pmap->pm_pt2tab, va)); } static __inline pt2_entry_t * kern_pt2tab_entry(vm_offset_t va) { return (pt2tab_entry(kern_pt2tab, va)); } static __inline vm_page_t pmap_pt2_page(pmap_t pmap, vm_offset_t va) { pt2_entry_t pte2; pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME)); } static __inline vm_page_t kern_pt2_page(vm_offset_t va) { pt2_entry_t pte2; pte2 = pte2_load(kern_pt2tab_entry(va)); return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME)); } #endif /* _KERNEL */ #endif /* !_MACHINE_PMAP_VAR_H_ */ diff --git a/sys/arm/include/pte-v6.h b/sys/arm/include/pte.h similarity index 100% rename from sys/arm/include/pte-v6.h rename to sys/arm/include/pte.h diff --git a/sys/arm/mv/mv_armv7_machdep.c b/sys/arm/mv/mv_armv7_machdep.c index c7f5f0e4d13b..f08b62048e1e 100644 --- a/sys/arm/mv/mv_armv7_machdep.c +++ b/sys/arm/mv/mv_armv7_machdep.c @@ -1,481 +1,481 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017 Semihalf. * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 */ #include "opt_ddb.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include #include #include #include #include #include "opt_platform.h" #include "platform_if.h" #if defined(SOC_MV_ARMADA38X) #include "platform_pl310_if.h" #include "armada38x/armada38x_pl310.h" #endif static int platform_mpp_init(void); int armada38x_win_set_iosync_barrier(void); int armada38x_scu_enable(void); int armada38x_open_bootrom_win(void); int armada38x_mbus_optimization(void); static vm_offset_t mv_platform_lastaddr(platform_t plate); static int mv_platform_probe_and_attach(platform_t plate); static void mv_platform_gpio_init(platform_t plate); static void mv_cpu_reset(platform_t plat); static void mv_a38x_platform_late_init(platform_t plate); static int mv_a38x_platform_devmap_init(platform_t plate); static void mv_axp_platform_late_init(platform_t plate); static int mv_axp_platform_devmap_init(platform_t plate); void armadaxp_init_coher_fabric(void); void armadaxp_l2_init(void); #ifdef SMP void mv_a38x_platform_mp_setmaxid(platform_t plate); void mv_a38x_platform_mp_start_ap(platform_t plate); void mv_axp_platform_mp_setmaxid(platform_t plate); void mv_axp_platform_mp_start_ap(platform_t plate); #endif #define MPP_PIN_MAX 68 #define MPP_PIN_CELLS 2 #define MPP_PINS_PER_REG 8 #define MPP_SEL(pin,func) (((func) & 0xf) << \ (((pin) % MPP_PINS_PER_REG) * 4)) static void mv_busdma_tag_init(void *arg __unused) { phandle_t node; bus_dma_tag_t dmat; /* * If this platform has coherent DMA, create the parent DMA tag to pass * down the coherent flag to all busses and devices on the platform, * otherwise return without doing anything. By default create tag * for all A38x-based platforms only. */ if ((node = OF_finddevice("/")) == -1){ printf("no tree\n"); return; } if (ofw_bus_node_is_compatible(node, "marvell,armada380") == 0) return; bus_dma_tag_create(NULL, /* No parent tag */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dmat); nexus_set_dma_tag(dmat); } SYSINIT(mv_busdma_tag, SI_SUB_DRIVERS, SI_ORDER_ANY, mv_busdma_tag_init, NULL); static int platform_mpp_init(void) { pcell_t pinmap[MPP_PIN_MAX * MPP_PIN_CELLS]; int mpp[MPP_PIN_MAX]; uint32_t ctrl_val, ctrl_offset; pcell_t reg[4]; u_long start, size; phandle_t node; pcell_t pin_cells, *pinmap_ptr, pin_count; ssize_t len; int par_addr_cells, par_size_cells; int tuple_size, rv, pins, i, j; int mpp_pin, mpp_function; /* * Try to access the MPP node directly i.e. through /aliases/mpp. */ if ((node = OF_finddevice("mpp")) != -1) if (ofw_bus_node_is_compatible(node, "mrvl,mpp")) goto moveon; /* * Find the node the long way. */ if ((node = OF_finddevice("/")) == -1) return (ENXIO); if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0) return (ENXIO); if ((node = fdt_find_compatible(node, "mrvl,mpp", 0)) == 0) /* * No MPP node. Fall back to how MPP got set by the * first-stage loader and try to continue booting. */ return (0); moveon: /* * Process 'reg' prop. */ if ((rv = fdt_addrsize_cells(OF_parent(node), &par_addr_cells, &par_size_cells)) != 0) return(ENXIO); tuple_size = sizeof(pcell_t) * (par_addr_cells + par_size_cells); len = OF_getprop(node, "reg", reg, sizeof(reg)); if (tuple_size <= 0) return (EINVAL); rv = fdt_data_to_res(reg, par_addr_cells, par_size_cells, &start, &size); if (rv != 0) return (rv); start += fdt_immr_va; /* * Process 'pin-count' and 'pin-map' props. */ if (OF_getencprop(node, "pin-count", &pin_count, sizeof(pin_count)) <= 0) return (ENXIO); if (pin_count > MPP_PIN_MAX) return (ERANGE); if (OF_getencprop(node, "#pin-cells", &pin_cells, sizeof(pin_cells)) <= 0) pin_cells = MPP_PIN_CELLS; if (pin_cells > MPP_PIN_CELLS) return (ERANGE); tuple_size = sizeof(pcell_t) * pin_cells; bzero(pinmap, sizeof(pinmap)); len = OF_getencprop(node, "pin-map", pinmap, sizeof(pinmap)); if (len <= 0) return (ERANGE); if (len % tuple_size) return (ERANGE); pins = len / tuple_size; if (pins > pin_count) return (ERANGE); /* * Fill out a "mpp[pin] => function" table. All pins unspecified in * the 'pin-map' property are defaulted to 0 function i.e. GPIO. */ bzero(mpp, sizeof(mpp)); pinmap_ptr = pinmap; for (i = 0; i < pins; i++) { mpp_pin = *pinmap_ptr; mpp_function = *(pinmap_ptr + 1); mpp[mpp_pin] = mpp_function; pinmap_ptr += pin_cells; } /* * Prepare and program MPP control register values. */ ctrl_offset = 0; for (i = 0; i < pin_count;) { ctrl_val = 0; for (j = 0; j < MPP_PINS_PER_REG; j++) { if (i + j == pin_count - 1) break; ctrl_val |= MPP_SEL(i + j, mpp[i + j]); } i += MPP_PINS_PER_REG; bus_space_write_4(fdtbus_bs_tag, start, ctrl_offset, ctrl_val); ctrl_offset += 4; } return (0); } static vm_offset_t mv_platform_lastaddr(platform_t plat) { return (fdt_immr_va); } static int mv_platform_probe_and_attach(platform_t plate) { if (fdt_immr_addr(MV_BASE) != 0) while (1); return (0); } static void mv_platform_gpio_init(platform_t plate) { /* * Re-initialise MPP. It is important to call this prior to using * console as the physical connection can be routed via MPP. */ if (platform_mpp_init() != 0) while (1); } static void mv_a38x_platform_late_init(platform_t plate) { /* * Re-initialise decode windows */ if (mv_check_soc_family() == MV_SOC_UNSUPPORTED) panic("Unsupported SoC family\n"); if (soc_decode_win() != 0) printf("WARNING: could not re-initialise decode windows! " "Running with existing settings...\n"); /* Configure timers' base frequency */ arm_tmr_change_frequency(get_cpu_freq() / 2); /* * Workaround for Marvell Armada38X family HW issue * between Cortex-A9 CPUs and on-chip devices that may * cause hang on heavy load. * To avoid that, map all registers including PCIe IO * as strongly ordered instead of device memory. */ pmap_remap_vm_attr(VM_MEMATTR_DEVICE, VM_MEMATTR_SO); /* Set IO Sync Barrier bit for all Mbus devices */ if (armada38x_win_set_iosync_barrier() != 0) printf("WARNING: could not map CPU Subsystem registers\n"); if (armada38x_mbus_optimization() != 0) printf("WARNING: could not enable mbus optimization\n"); if (armada38x_scu_enable() != 0) printf("WARNING: could not enable SCU\n"); #ifdef SMP /* Open window to bootROM memory - needed for SMP */ if (armada38x_open_bootrom_win() != 0) printf("WARNING: could not open window to bootROM\n"); #endif } static void mv_axp_platform_late_init(platform_t plate) { phandle_t node; /* * Re-initialise decode windows */ if (soc_decode_win() != 0) printf("WARNING: could not re-initialise decode windows! " "Running with existing settings...\n"); if ((node = OF_finddevice("/")) == -1) return; #if !defined(SMP) /* For SMP case it should be initialized after APs are booted */ armadaxp_init_coher_fabric(); #endif armadaxp_l2_init(); } #define FDT_DEVMAP_MAX (MV_WIN_CPU_MAX_ARMV7 + 2) static struct devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = { { 0, 0, 0, } }; static int platform_sram_devmap(struct devmap_entry *map) { return (ENOENT); } /* * Construct devmap table with DT-derived config data. */ static int mv_a38x_platform_devmap_init(platform_t plat) { phandle_t root, child; int i; i = 0; devmap_register_table(&fdt_devmap[0]); if ((root = OF_finddevice("/")) == -1) return (ENXIO); /* * IMMR range. */ fdt_devmap[i].pd_va = fdt_immr_va; fdt_devmap[i].pd_pa = fdt_immr_pa; fdt_devmap[i].pd_size = fdt_immr_size; i++; /* * SRAM range. */ if (i < FDT_DEVMAP_MAX) if (platform_sram_devmap(&fdt_devmap[i]) == 0) i++; /* * PCI range(s). * PCI range(s) and localbus. */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { if (mv_fdt_is_type(child, "pci") || mv_fdt_is_type(child, "pciep")) { /* * Check space: each PCI node will consume 2 devmap * entries. */ if (i + 1 >= FDT_DEVMAP_MAX) return (ENOMEM); if (mv_pci_devmap(child, &fdt_devmap[i], MV_PCI_VA_IO_BASE, MV_PCI_VA_MEM_BASE) != 0) return (ENXIO); i += 2; } } return (0); } static int mv_axp_platform_devmap_init(platform_t plate) { vm_paddr_t cur_immr_pa; /* * Acquire SoC registers' base passed by u-boot and fill devmap * accordingly. DTB is going to be modified basing on this data * later. */ __asm __volatile("mrc p15, 4, %0, c15, c0, 0" : "=r" (cur_immr_pa)); cur_immr_pa = (cur_immr_pa << 13) & 0xff000000; if (cur_immr_pa != 0) fdt_immr_pa = cur_immr_pa; mv_a38x_platform_devmap_init(plate); return (0); } static void mv_cpu_reset(platform_t plat) { write_cpu_misc(RSTOUTn_MASK_ARMV7, SOFT_RST_OUT_EN_ARMV7); write_cpu_misc(SYSTEM_SOFT_RESET_ARMV7, SYS_SOFT_RST_ARMV7); } #if defined(SOC_MV_ARMADA38X) static platform_method_t mv_a38x_methods[] = { PLATFORMMETHOD(platform_devmap_init, mv_a38x_platform_devmap_init), PLATFORMMETHOD(platform_cpu_reset, mv_cpu_reset), PLATFORMMETHOD(platform_lastaddr, mv_platform_lastaddr), PLATFORMMETHOD(platform_attach, mv_platform_probe_and_attach), PLATFORMMETHOD(platform_gpio_init, mv_platform_gpio_init), PLATFORMMETHOD(platform_late_init, mv_a38x_platform_late_init), PLATFORMMETHOD(platform_pl310_init, mv_a38x_platform_pl310_init), PLATFORMMETHOD(platform_pl310_write_ctrl, mv_a38x_platform_pl310_write_ctrl), PLATFORMMETHOD(platform_pl310_write_debug, mv_a38x_platform_pl310_write_debug), #ifdef SMP PLATFORMMETHOD(platform_mp_start_ap, mv_a38x_platform_mp_start_ap), PLATFORMMETHOD(platform_mp_setmaxid, mv_a38x_platform_mp_setmaxid), #endif PLATFORMMETHOD_END, }; FDT_PLATFORM_DEF(mv_a38x, "mv_a38x", 0, "marvell,armada380", 100); #endif static platform_method_t mv_axp_methods[] = { PLATFORMMETHOD(platform_devmap_init, mv_axp_platform_devmap_init), PLATFORMMETHOD(platform_cpu_reset, mv_cpu_reset), PLATFORMMETHOD(platform_lastaddr, mv_platform_lastaddr), PLATFORMMETHOD(platform_attach, mv_platform_probe_and_attach), PLATFORMMETHOD(platform_gpio_init, mv_platform_gpio_init), PLATFORMMETHOD(platform_late_init, mv_axp_platform_late_init), #ifdef SMP PLATFORMMETHOD(platform_mp_start_ap, mv_axp_platform_mp_start_ap), PLATFORMMETHOD(platform_mp_setmaxid, mv_axp_platform_mp_setmaxid), #endif PLATFORMMETHOD_END, }; FDT_PLATFORM_DEF(mv_axp, "mv_axp", 0, "marvell,armadaxp", 100);