Index: head/lib/libkvm/kvm_arm.h =================================================================== --- head/lib/libkvm/kvm_arm.h (revision 295800) +++ head/lib/libkvm/kvm_arm.h (revision 295801) @@ -1,111 +1,113 @@ /*- * Copyright (c) 2015 John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __KVM_ARM_H__ #define __KVM_ARM_H__ -#ifdef __arm__ -#include -#endif - typedef uint32_t arm_physaddr_t; typedef uint32_t arm_pd_entry_t; typedef uint32_t arm_pt_entry_t; #define ARM_PAGE_SHIFT 12 #define ARM_PAGE_SIZE (1 << ARM_PAGE_SHIFT) /* Page size */ #define ARM_PAGE_MASK (ARM_PAGE_SIZE - 1) #define ARM_L1_TABLE_SIZE 0x4000 /* 16K */ #define ARM_L1_S_SIZE 0x00100000 /* 1M */ #define ARM_L1_S_OFFSET (ARM_L1_S_SIZE - 1) #define ARM_L1_S_FRAME (~ARM_L1_S_OFFSET) #define ARM_L1_S_SHIFT 20 #define ARM_L2_L_SIZE 0x00010000 /* 64K */ #define ARM_L2_L_OFFSET (ARM_L2_L_SIZE - 1) #define ARM_L2_L_FRAME (~ARM_L2_L_OFFSET) #define ARM_L2_L_SHIFT 16 #define ARM_L2_S_SIZE 0x00001000 /* 4K */ #define ARM_L2_S_OFFSET (ARM_L2_S_SIZE - 1) #define ARM_L2_S_FRAME (~ARM_L2_S_OFFSET) #define ARM_L2_S_SHIFT 12 #define ARM_L1_TYPE_INV 0x00 /* Invalid (fault) */ #define ARM_L1_TYPE_C 0x01 /* Coarse L2 */ #define ARM_L1_TYPE_S 0x02 /* Section */ #define ARM_L1_TYPE_MASK 0x03 /* Mask of type bits */ #define ARM_L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ #define ARM_L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ #define ARM_L2_TYPE_INV 0x00 /* Invalid (fault) */ #define ARM_L2_TYPE_L 0x01 /* Large Page - 64k */ #define ARM_L2_TYPE_S 0x02 /* Small Page - 4k */ #define ARM_L2_TYPE_T 0x03 /* Tiny Page - 1k - not used */ #define ARM_L2_TYPE_MASK 0x03 #ifdef __arm__ #include + +#if __ARM_ARCH >= 6 +#include +#else +#include +#endif _Static_assert(PAGE_SHIFT == ARM_PAGE_SHIFT, "PAGE_SHIFT mismatch"); _Static_assert(PAGE_SIZE == ARM_PAGE_SIZE, "PAGE_SIZE mismatch"); _Static_assert(PAGE_MASK == ARM_PAGE_MASK, "PAGE_MASK mismatch"); _Static_assert(L1_TABLE_SIZE == ARM_L1_TABLE_SIZE, "L1_TABLE_SIZE mismatch"); _Static_assert(L1_S_SIZE == ARM_L1_S_SIZE, "L1_S_SIZE mismatch"); _Static_assert(L1_S_OFFSET == ARM_L1_S_OFFSET, "L1_S_OFFSET mismatch"); _Static_assert(L1_S_FRAME == ARM_L1_S_FRAME, "L1_S_FRAME mismatch"); _Static_assert(L1_S_SHIFT == ARM_L1_S_SHIFT, "L1_S_SHIFT mismatch"); _Static_assert(L2_L_SIZE == ARM_L2_L_SIZE, "L2_L_SIZE mismatch"); _Static_assert(L2_L_OFFSET == ARM_L2_L_OFFSET, "L2_L_OFFSET mismatch"); _Static_assert(L2_L_FRAME == ARM_L2_L_FRAME, "L2_L_FRAME mismatch"); _Static_assert(L2_L_SHIFT == ARM_L2_L_SHIFT, "L2_L_SHIFT mismatch"); _Static_assert(L2_S_SIZE == ARM_L2_S_SIZE, "L2_S_SIZE mismatch"); _Static_assert(L2_S_OFFSET == ARM_L2_S_OFFSET, "L2_S_OFFSET mismatch"); _Static_assert(L2_S_FRAME == ARM_L2_S_FRAME, "L2_S_FRAME mismatch"); _Static_assert(L2_S_SHIFT == ARM_L2_S_SHIFT, "L2_S_SHIFT mismatch"); _Static_assert(L1_TYPE_INV == ARM_L1_TYPE_INV, "L1_TYPE_INV mismatch"); _Static_assert(L1_TYPE_C == ARM_L1_TYPE_C, "L1_TYPE_C mismatch"); _Static_assert(L1_TYPE_S == ARM_L1_TYPE_S, "L1_TYPE_S mismatch"); _Static_assert(L1_TYPE_MASK == ARM_L1_TYPE_MASK, "L1_TYPE_MASK mismatch"); _Static_assert(L1_S_ADDR_MASK == ARM_L1_S_ADDR_MASK, "L1_S_ADDR_MASK mismatch"); _Static_assert(L1_C_ADDR_MASK == ARM_L1_C_ADDR_MASK, "L1_C_ADDR_MASK mismatch"); _Static_assert(L2_TYPE_INV == ARM_L2_TYPE_INV, "L2_TYPE_INV mismatch"); _Static_assert(L2_TYPE_L == ARM_L2_TYPE_L, "L2_TYPE_L mismatch"); _Static_assert(L2_TYPE_S == ARM_L2_TYPE_S, "L2_TYPE_S mismatch"); #if __ARM_ARCH < 6 _Static_assert(L2_TYPE_T == ARM_L2_TYPE_T, "L2_TYPE_T mismatch"); #endif _Static_assert(L2_TYPE_MASK == ARM_L2_TYPE_MASK, "L2_TYPE_MASK mismatch"); #endif int _arm_native(kvm_t *); #endif /* !__KVM_ARM_H__ */ Index: head/sys/arm/arm/elf_trampoline.c =================================================================== --- head/sys/arm/arm/elf_trampoline.c (revision 295800) +++ head/sys/arm/arm/elf_trampoline.c (revision 295801) @@ -1,735 +1,735 @@ /*- * Copyright (c) 2005 Olivier Houchard. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Since we are compiled outside of the normal kernel build process, we * need to include opt_global.h manually. */ #include "opt_global.h" #include "opt_kernname.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include -#include +#include #include #include extern char kernel_start[]; extern char kernel_end[]; extern void *_end; void _start(void); void __start(void); void __startC(void); extern unsigned int cpu_ident(void); extern void armv6_idcache_wbinv_all(void); extern void armv7_idcache_wbinv_all(void); extern void do_call(void *, void *, void *, int); #define GZ_HEAD 0xa #if defined(CPU_ARM9) #define cpu_idcache_wbinv_all arm9_idcache_wbinv_all extern void arm9_idcache_wbinv_all(void); #elif defined(CPU_FA526) #define cpu_idcache_wbinv_all fa526_idcache_wbinv_all extern void fa526_idcache_wbinv_all(void); #elif defined(CPU_ARM9E) #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all extern void armv5_ec_idcache_wbinv_all(void); #elif defined(CPU_ARM1176) #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all #elif defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) #define cpu_idcache_wbinv_all xscale_cache_purgeID extern void xscale_cache_purgeID(void); #elif defined(CPU_XSCALE_81342) #define cpu_idcache_wbinv_all xscalec3_cache_purgeID extern void xscalec3_cache_purgeID(void); #elif defined(CPU_MV_PJ4B) #if !defined(SOC_MV_ARMADAXP) #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all extern void armv6_idcache_wbinv_all(void); #else #define cpu_idcache_wbinv_all() armadaxp_idcache_wbinv_all #endif #endif /* CPU_MV_PJ4B */ #ifdef CPU_XSCALE_81342 #define cpu_l2cache_wbinv_all xscalec3_l2cache_purge extern void xscalec3_l2cache_purge(void); #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all extern void sheeva_l2cache_wbinv_all(void); #elif defined(CPU_CORTEXA) || defined(CPU_KRAIT) #define cpu_idcache_wbinv_all armv7_idcache_wbinv_all #define cpu_l2cache_wbinv_all() #else #define cpu_l2cache_wbinv_all() #endif static void armadaxp_idcache_wbinv_all(void); int arm_picache_size; int arm_picache_line_size; int arm_picache_ways; int arm_pdcache_size; /* and unified */ int arm_pdcache_line_size = 32; int arm_pdcache_ways; int arm_pcache_type; int arm_pcache_unified; int arm_dcache_align; int arm_dcache_align_mask; int arm_dcache_min_line_size = 32; int arm_icache_min_line_size = 32; int arm_idcache_min_line_size = 32; u_int arm_cache_level; u_int arm_cache_type[14]; u_int arm_cache_loc; /* Additional cache information local to this file. Log2 of some of the above numbers. */ static int arm_dcache_l2_nsets; static int arm_dcache_l2_assoc; static int arm_dcache_l2_linesize; extern int arm9_dcache_sets_inc; extern int arm9_dcache_sets_max; extern int arm9_dcache_index_max; extern int arm9_dcache_index_inc; static __inline void * memcpy(void *dst, const void *src, int len) { const char *s = src; char *d = dst; while (len) { if (0 && len >= 4 && !((vm_offset_t)d & 3) && !((vm_offset_t)s & 3)) { *(uint32_t *)d = *(uint32_t *)s; s += 4; d += 4; len -= 4; } else { *d++ = *s++; len--; } } return (dst); } static __inline void bzero(void *addr, int count) { char *tmp = (char *)addr; while (count > 0) { if (count >= 4 && !((vm_offset_t)tmp & 3)) { *(uint32_t *)tmp = 0; tmp += 4; count -= 4; } else { *tmp = 0; tmp++; count--; } } } static void arm9_setup(void); void _startC(void) { int tmp1; unsigned int sp = ((unsigned int)&_end & ~3) + 4; unsigned int pc, kernphysaddr; /* * Figure out the physical address the kernel was loaded at. This * assumes the entry point (this code right here) is in the first page, * which will always be the case for this trampoline code. */ __asm __volatile("mov %0, pc\n" : "=r" (pc)); kernphysaddr = pc & ~PAGE_MASK; #if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR) if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) || (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) { /* * We're running from flash, so just copy the whole thing * from flash to memory. * This is far from optimal, we could do the relocation or * the unzipping directly from flash to memory to avoid this * needless copy, but it would require to know the flash * physical address. */ unsigned int target_addr; unsigned int tmp_sp; uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000; target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR; tmp_sp = target_addr + 0x100000 + (unsigned int)&_end - (unsigned int)&_start; memcpy((char *)target_addr, (char *)src_addr, (unsigned int)&_end - (unsigned int)&_start); /* Temporary set the sp and jump to the new location. */ __asm __volatile( "mov sp, %1\n" "mov pc, %0\n" : : "r" (target_addr), "r" (tmp_sp)); } #endif #ifdef KZIP sp += KERNSIZE + 0x100; sp &= ~(L1_TABLE_SIZE - 1); sp += 2 * L1_TABLE_SIZE; #endif sp += 1024 * 1024; /* Should be enough for a stack */ __asm __volatile("adr %0, 2f\n" "bic %0, %0, #0xff000000\n" "and %1, %1, #0xff000000\n" "orr %0, %0, %1\n" "mrc p15, 0, %1, c1, c0, 0\n" "bic %1, %1, #1\n" /* Disable MMU */ "orr %1, %1, #(4 | 8)\n" /* Add DC enable, WBUF enable */ "orr %1, %1, #0x1000\n" /* Add IC enable */ "orr %1, %1, #(0x800)\n" /* BPRD enable */ "mcr p15, 0, %1, c1, c0, 0\n" "nop\n" "nop\n" "nop\n" "mov pc, %0\n" "2: nop\n" "mov sp, %2\n" : "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp)); #ifndef KZIP #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif #endif __start(); } static void get_cachetype_cp15() { u_int ctype, isize, dsize, cpuid; u_int clevel, csize, i, sel; u_int multiplier; u_char type; __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); cpuid = cpu_ident(); /* * ...and thus spake the ARM ARM: * * If an value corresponding to an unimplemented or * reserved ID register is encountered, the System Control * processor returns the value of the main ID register. */ if (ctype == cpuid) goto out; if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) { /* Resolve minimal cache line sizes */ arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2); arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2); arm_idcache_min_line_size = (arm_dcache_min_line_size > arm_icache_min_line_size ? arm_icache_min_line_size : arm_dcache_min_line_size); __asm __volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (clevel)); arm_cache_level = clevel; arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1; i = 0; while ((type = (clevel & 0x7)) && i < 7) { if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { sel = i << 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { sel = (i << 1) | 1; __asm __volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (sel)); __asm __volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csize)); arm_cache_type[sel] = csize; } i++; clevel >>= 3; } } else { if ((ctype & CPU_CT_S) == 0) arm_pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ arm_pcache_type = CPU_CT_CTYPE(ctype); if (arm_pcache_unified == 0) { isize = CPU_CT_ISIZE(ctype); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) arm_picache_line_size = 0; /* not present */ else arm_picache_ways = 1; } else { arm_picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(ctype); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) arm_pdcache_line_size = 0; /* not present */ else arm_pdcache_ways = 1; } else { arm_pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); arm_dcache_align = arm_pdcache_line_size; arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); out: arm_dcache_align_mask = arm_dcache_align - 1; } } static void arm9_setup(void) { get_cachetype_cp15(); arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm9_dcache_sets_inc; arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); arm9_dcache_index_max = 0U - arm9_dcache_index_inc; } static void armadaxp_idcache_wbinv_all(void) { uint32_t feat; __asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat)); if (feat & ARM_PFR0_THUMBEE_MASK) armv7_idcache_wbinv_all(); else armv6_idcache_wbinv_all(); } #ifdef KZIP static unsigned char *orig_input, *i_input, *i_output; static u_int memcnt; /* Memory allocated: blocks */ static size_t memtot; /* Memory allocated: bytes */ /* * Library functions required by inflate(). */ #define MEMSIZ 0x8000 /* * Allocate memory block. */ unsigned char * kzipmalloc(int size) { void *ptr; static u_char mem[MEMSIZ]; if (memtot + size > MEMSIZ) return NULL; ptr = mem + memtot; memtot += size; memcnt++; return ptr; } /* * Free allocated memory block. */ void kzipfree(void *ptr) { memcnt--; if (!memcnt) memtot = 0; } void putstr(char *dummy) { } static int input(void *dummy) { if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) { return (GZ_EOF); } return *i_input++; } static int output(void *dummy, unsigned char *ptr, unsigned long len) { memcpy(i_output, ptr, len); i_output += len; return (0); } static void * inflate_kernel(void *kernel, void *startaddr) { struct inflate infl; unsigned char slide[GZ_WSIZE]; orig_input = kernel; memcnt = memtot = 0; i_input = (unsigned char *)kernel + GZ_HEAD; if (((char *)kernel)[3] & 0x18) { while (*i_input) i_input++; i_input++; } i_output = startaddr; bzero(&infl, sizeof(infl)); infl.gz_input = input; infl.gz_output = output; infl.gz_slide = slide; inflate(&infl); return ((char *)(((vm_offset_t)i_output & ~3) + 4)); } #endif void * load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, int d) { Elf32_Ehdr *eh; Elf32_Phdr phdr[64] /* XXX */, *php; Elf32_Shdr shdr[64] /* XXX */; int i,j; void *entry_point; int symtabindex = -1; int symstrindex = -1; vm_offset_t lastaddr = 0; Elf_Addr ssym = 0; Elf_Dyn *dp; eh = (Elf32_Ehdr *)kstart; ssym = 0; entry_point = (void*)eh->e_entry; memcpy(phdr, (void *)(kstart + eh->e_phoff ), eh->e_phnum * sizeof(phdr[0])); /* Determine lastaddr. */ for (i = 0; i < eh->e_phnum; i++) { if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz)) lastaddr = phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_memsz; } /* Save the symbol tables, as there're about to be scratched. */ memcpy(shdr, (void *)(kstart + eh->e_shoff), sizeof(*shdr) * eh->e_shnum); if (eh->e_shnum * eh->e_shentsize != 0 && eh->e_shoff != 0) { for (i = 0; i < eh->e_shnum; i++) { if (shdr[i].sh_type == SHT_SYMTAB) { for (j = 0; j < eh->e_phnum; j++) { if (phdr[j].p_type == PT_LOAD && shdr[i].sh_offset >= phdr[j].p_offset && (shdr[i].sh_offset + shdr[i].sh_size <= phdr[j].p_offset + phdr[j].p_filesz)) { shdr[i].sh_offset = 0; shdr[i].sh_size = 0; j = eh->e_phnum; } } if (shdr[i].sh_offset != 0 && shdr[i].sh_size != 0) { symtabindex = i; symstrindex = shdr[i].sh_link; } } } func_end = roundup(func_end, sizeof(long)); if (symtabindex >= 0 && symstrindex >= 0) { ssym = lastaddr; if (d) { memcpy((void *)func_end, (void *)( shdr[symtabindex].sh_offset + kstart), shdr[symtabindex].sh_size); memcpy((void *)(func_end + shdr[symtabindex].sh_size), (void *)(shdr[symstrindex].sh_offset + kstart), shdr[symstrindex].sh_size); } else { lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); lastaddr += sizeof(shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); } } } if (!d) return ((void *)lastaddr); j = eh->e_phnum; for (i = 0; i < j; i++) { volatile char c; if (phdr[i].p_type != PT_LOAD) continue; memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr), (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz); /* Clean space from oversized segments, eg: bss. */ if (phdr[i].p_filesz < phdr[i].p_memsz) bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_filesz), phdr[i].p_memsz - phdr[i].p_filesz); } /* Now grab the symbol tables. */ if (symtabindex >= 0 && symstrindex >= 0) { *(Elf_Size *)lastaddr = shdr[symtabindex].sh_size; lastaddr += sizeof(shdr[symtabindex].sh_size); memcpy((void*)lastaddr, (void *)func_end, shdr[symtabindex].sh_size); lastaddr += shdr[symtabindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symtabindex].sh_size)); *(Elf_Size *)lastaddr = shdr[symstrindex].sh_size; lastaddr += sizeof(shdr[symstrindex].sh_size); memcpy((void*)lastaddr, (void*)(func_end + shdr[symtabindex].sh_size), shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER; *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR; *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR; } else *(Elf_Addr *)curaddr = 0; /* Invalidate the instruction cache. */ __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n" "mcr p15, 0, %0, c7, c10, 4\n" : : "r" (curaddr)); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" : "=r" (ssym)); /* Jump to the entry point. */ ((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))(); __asm __volatile(".globl func_end\n" "func_end:"); /* NOTREACHED */ return NULL; } extern char func_end[]; #define PMAP_DOMAIN_KERNEL 0 /* * Just define it instead of including the * whole VM headers set. */ int __hack; static __inline void setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend, int write_back) { unsigned int *pd = (unsigned int *)pt_addr; vm_paddr_t addr; int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT; int tmp; bzero(pd, L1_TABLE_SIZE); for (addr = physstart; addr < physend; addr += L1_S_SIZE) { pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr; if (write_back && 0) pd[addr >> L1_S_SHIFT] |= L1_S_B; } /* XXX: See below */ if (0xfff00000 < physstart || 0xfff00000 > physend) pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)| L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart; __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */ "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */ "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */ "mrc p15, 0, %0, c1, c0, 0\n" "orr %0, %0, #1\n" /* MMU_ENABLE */ "mcr p15, 0, %0, c1, c0, 0\n" "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */ "mov r0, r0\n" "sub pc, pc, #4\n" : "=r" (tmp) : "r" (pd), "r" (domain)); /* * XXX: This is the most stupid workaround I've ever wrote. * For some reason, the KB9202 won't boot the kernel unless * we access an address which is not in the * 0x20000000 - 0x20ffffff range. I hope I'll understand * what's going on later. */ __hack = *(volatile int *)0xfffff21c; } void __start(void) { void *curaddr; void *dst, *altdst; char *kernel = (char *)&kernel_start; int sp; int pt_addr; __asm __volatile("mov %0, pc" : "=r" (curaddr)); curaddr = (void*)((unsigned int)curaddr & 0xfff00000); #ifdef KZIP if (*kernel == 0x1f && kernel[1] == 0x8b) { pt_addr = (((int)&_end + KERNSIZE + 0x100) & ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 1); /* Gzipped kernel */ dst = inflate_kernel(kernel, &_end); kernel = (char *)&_end; altdst = 4 + load_kernel((unsigned int)kernel, (unsigned int)curaddr, (unsigned int)&func_end + 800 , 0); if (altdst > dst) dst = altdst; /* * Disable MMU. Otherwise, setup_pagetables call below * might overwrite the L1 table we are currently using. */ cpu_idcache_wbinv_all(); cpu_l2cache_wbinv_all(); __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n" "bic %0, %0, #1\n" /* MMU_DISABLE */ "mcr p15, 0, %0, c1, c0, 0\n" :"=r" (pt_addr)); } else #endif dst = 4 + load_kernel((unsigned int)&kernel_start, (unsigned int)curaddr, (unsigned int)&func_end, 0); dst = (void *)(((vm_offset_t)dst & ~3)); pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; setup_pagetables(pt_addr, (vm_paddr_t)curaddr, (vm_paddr_t)curaddr + 0x10000000, 0); sp = pt_addr + L1_TABLE_SIZE + 8192; sp = sp &~3; dst = (void *)(sp + 4); memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - (unsigned int)&load_kernel + 800); do_call(dst, kernel, dst + (unsigned int)(&func_end) - (unsigned int)(&load_kernel) + 800, sp); } /* We need to provide these functions but never call them */ void __aeabi_unwind_cpp_pr0(void); void __aeabi_unwind_cpp_pr1(void); void __aeabi_unwind_cpp_pr2(void); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1); __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2); void __aeabi_unwind_cpp_pr0(void) { } Index: head/sys/arm/arm/locore-v4.S =================================================================== --- head/sys/arm/arm/locore-v4.S (revision 295800) +++ head/sys/arm/arm/locore-v4.S (revision 295801) @@ -1,483 +1,483 @@ /* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */ /*- * Copyright 2011 Semihalf * Copyright (C) 1994-1997 Mark Brinicombe * Copyright (C) 1994 Brini * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of Brini may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "assym.s" #include #include #include #include -#include +#include __FBSDID("$FreeBSD$"); /* 2K initial stack is plenty, it is only used by initarm() */ #define INIT_ARM_STACK_SIZE 2048 #define CPWAIT_BRANCH \ sub pc, pc, #4 #define CPWAIT(tmp) \ mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ mov tmp, tmp /* wait for it to complete */ ;\ CPWAIT_BRANCH /* branch to next insn */ /* * This is for libkvm, and should be the address of the beginning * of the kernel text segment (not necessarily the same as kernbase). * * These are being phased out. Newer copies of libkvm don't need these * values as the information is added to the core file by inspecting * the running kernel. */ .text .align 2 #ifdef PHYSADDR .globl kernbase .set kernbase,KERNBASE .globl physaddr .set physaddr,PHYSADDR #endif /* * On entry for FreeBSD boot ABI: * r0 - metadata pointer or 0 (boothowto on AT91's boot2) * r1 - if (r0 == 0) then metadata pointer * On entry for Linux boot ABI: * r0 - 0 * r1 - machine type (passed as arg2 to initarm) * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) * * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ .globl btext btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ mov r9, r0 /* 0 or boot mode from boot2 */ mov r8, r1 /* Save Machine type */ mov ip, r2 /* Save meta data */ mov fp, r3 /* Future expansion */ /* Make sure interrupts are disabled. */ mrs r7, cpsr orr r7, r7, #(PSR_I | PSR_F) msr cpsr_c, r7 #if defined (FLASHADDR) && defined(LOADERRAMADDR) /* * Sanity check the configuration. * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases. * ARMv4 and ARMv5 make assumptions on where they are loaded. * TODO: Fix the ARMv4/v5 case. */ #ifndef PHYSADDR #error PHYSADDR must be defined for this configuration #endif /* Check if we're running from flash. */ ldr r7, =FLASHADDR /* * If we're running with MMU disabled, test against the * physical address instead. */ mrc p15, 0, r2, c1, c0, 0 ands r2, r2, #CPU_CONTROL_MMU_ENABLE ldreq r6, =PHYSADDR ldrne r6, =LOADERRAMADDR cmp r7, r6 bls flash_lower cmp r7, pc bhi from_ram b do_copy flash_lower: cmp r6, pc bls from_ram do_copy: ldr r7, =KERNBASE adr r1, _start ldr r0, Lreal_start ldr r2, Lend sub r2, r2, r0 sub r0, r0, r7 add r0, r0, r6 mov r4, r0 bl memcpy ldr r0, Lram_offset add pc, r4, r0 Lram_offset: .word from_ram-_C_LABEL(_start) from_ram: nop #endif disable_mmu: /* Disable MMU for a while */ mrc p15, 0, r2, c1, c0, 0 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ CPU_CONTROL_WBUF_ENABLE) bic r2, r2, #(CPU_CONTROL_IC_ENABLE) bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) mcr p15, 0, r2, c1, c0, 0 nop nop nop CPWAIT(r0) Lunmapped: /* * Build page table from scratch. */ /* * Figure out the physical address we're loaded at by assuming this * entry point code is in the first L1 section and so if we clear the * offset bits of the pc that will give us the section-aligned load * address, which remains in r5 throughout all the following code. */ ldr r2, =(L1_S_OFFSET) bic r5, pc, r2 /* Find the delta between VA and PA, result stays in r0 throughout. */ adr r0, Lpagetable bl translate_va_to_pa /* * First map the entire 4GB address space as VA=PA. It's mapped as * normal (cached) memory because it's for things like accessing the * parameters passed in from the bootloader, which might be at any * physical address, different for every platform. */ mov r1, #0 mov r2, #0 mov r3, #4096 bl build_pagetables /* * Next we do 64MiB starting at the physical load address, mapped to * the VA the kernel is linked for. */ mov r1, r5 ldr r2, =(KERNVIRTADDR) mov r3, #64 bl build_pagetables /* Create a device mapping for early_printf if specified. */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) ldr r1, =SOCDEV_PA ldr r2, =SOCDEV_VA mov r3, #1 bl build_device_pagetables #endif mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ /* Set the Domain Access register. Very important! */ mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) mcr p15, 0, r0, c3, c0, 0 /* * Enable MMU. */ mrc p15, 0, r0, c1, c0, 0 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE) mcr p15, 0, r0, c1, c0, 0 nop nop nop CPWAIT(r0) /* Transition the PC from physical to virtual addressing. */ ldr pc,=mmu_done mmu_done: nop adr r1, .Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ sub r2, r2, r1 /* get zero init data */ mov r3, #0 .L1: str r3, [r1], #0x0004 /* get zero init data */ subs r2, r2, #4 bgt .L1 virt_done: mov r1, #28 /* loader info size is 28 bytes also second arg */ subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ mov r0, sp /* loader info pointer is first arg */ bic sp, sp, #7 /* align stack to 8 bytes */ str r1, [r0] /* Store length of loader info */ str r9, [r0, #4] /* Store r0 from boot loader */ str r8, [r0, #8] /* Store r1 from boot loader */ str ip, [r0, #12] /* store r2 from boot loader */ str fp, [r0, #16] /* store r3 from boot loader */ str r5, [r0, #20] /* store the physical address */ adr r4, Lpagetable /* load the pagetable address */ ldr r5, [r4, #4] str r5, [r0, #24] /* store the pagetable address */ mov fp, #0 /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ /* init arm will return the new stack pointer. */ mov sp, r0 bl _C_LABEL(mi_startup) /* call mi_startup()! */ adr r0, .Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ END(_start) #define VA_TO_PA_POINTER(name, table) \ name: ;\ .word . ;\ .word table /* * Returns the physical address of a magic va to pa pointer. * r0 - The pagetable data pointer. This must be built using the * VA_TO_PA_POINTER macro. * e.g. * VA_TO_PA_POINTER(Lpagetable, pagetable) * ... * adr r0, Lpagetable * bl translate_va_to_pa * r0 will now contain the physical address of pagetable * r1, r2 - Trashed */ translate_va_to_pa: ldr r1, [r0] sub r2, r1, r0 /* At this point: r2 = VA - PA */ /* * Find the physical address of the table. After these two * instructions: * r1 = va(pagetable) * * r0 = va(pagetable) - (VA - PA) * = va(pagetable) - VA + PA * = pa(pagetable) */ ldr r1, [r0, #4] sub r0, r1, r2 RET /* * Builds the page table * r0 - The table base address * r1 - The physical address (trashed) * r2 - The virtual address (trashed) * r3 - The number of 1MiB sections * r4 - Trashed * * Addresses must be 1MiB aligned */ build_device_pagetables: ldr r4, =(L1_TYPE_S|L1_S_AP(AP_KRW)) b 1f build_pagetables: /* Set the required page attributed */ ldr r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 1: orr r1, r4 /* Move the virtual address to the correct bit location */ lsr r2, #(L1_S_SHIFT - 2) mov r4, r3 2: str r1, [r0, r2] add r2, r2, #4 add r1, r1, #(L1_S_SIZE) adds r4, r4, #-1 bhi 2b RET VA_TO_PA_POINTER(Lpagetable, pagetable) Lreal_start: .word _start Lend: .word _edata .Lstart: .word _edata .word _ebss .word svcstk + INIT_ARM_STACK_SIZE .Lvirt_done: .word virt_done .Lmainreturned: .asciz "main() returned" .align 2 .bss svcstk: .space INIT_ARM_STACK_SIZE /* * Memory for the initial pagetable. We are unable to place this in * the bss as this will be cleared after the table is loaded. */ .section ".init_pagetable" .align 14 /* 16KiB aligned */ pagetable: .space L1_TABLE_SIZE .text .align 2 .Lcpufuncs: .word _C_LABEL(cpufuncs) ENTRY_NP(cpu_halt) mrs r2, cpsr bic r2, r2, #(PSR_MODE) orr r2, r2, #(PSR_SVC32_MODE) orr r2, r2, #(PSR_I | PSR_F) msr cpsr_fsxc, r2 ldr r4, .Lcpu_reset_address ldr r4, [r4] ldr r0, .Lcpufuncs mov lr, pc ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] mov lr, pc ldr pc, [r0, #CF_L2CACHE_WBINV_ALL] /* * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's * necessary. */ ldr r1, .Lcpu_reset_needs_v4_MMU_disable ldr r1, [r1] cmp r1, #0 mov r2, #0 /* * MMU & IDC off, 32 bit program & data space * Hurl ourselves into the ROM */ mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE) mcr p15, 0, r0, c1, c0, 0 mcrne p15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */ mov pc, r4 /* * _cpu_reset_address contains the address to branch to, to complete * the cpu reset after turning the MMU off * This variable is provided by the hardware specific code */ .Lcpu_reset_address: .word _C_LABEL(cpu_reset_address) /* * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the * v4 MMU disable instruction needs executing... it is an illegal instruction * on f.e. ARM6/7 that locks up the computer in an endless illegal * instruction / data-abort / reset loop. */ .Lcpu_reset_needs_v4_MMU_disable: .word _C_LABEL(cpu_reset_needs_v4_MMU_disable) END(cpu_halt) /* * setjump + longjmp */ ENTRY(setjmp) stmia r0, {r4-r14} mov r0, #0x00000000 RET END(setjmp) ENTRY(longjmp) ldmia r0, {r4-r14} mov r0, #0x00000001 RET END(longjmp) .data .global _C_LABEL(esym) _C_LABEL(esym): .word _C_LABEL(end) ENTRY_NP(abort) b _C_LABEL(abort) END(abort) ENTRY_NP(sigcode) mov r0, sp add r0, r0, #SIGF_UC /* * Call the sigreturn system call. * * We have to load r7 manually rather than using * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is * correct. Using the alternative places esigcode at the address * of the data rather than the address one past the data. */ ldr r7, [pc, #12] /* Load SYS_sigreturn */ swi SYS_sigreturn /* Well if that failed we better exit quick ! */ ldr r7, [pc, #8] /* Load SYS_exit */ swi SYS_exit /* Branch back to retry SYS_sigreturn */ b . - 16 END(sigcode) .word SYS_sigreturn .word SYS_exit .align 2 .global _C_LABEL(esigcode) _C_LABEL(esigcode): .data .global szsigcode szsigcode: .long esigcode-sigcode /* End of locore.S */ Index: head/sys/arm/arm/locore-v6.S =================================================================== --- head/sys/arm/arm/locore-v6.S (revision 295800) +++ head/sys/arm/arm/locore-v6.S (revision 295801) @@ -1,587 +1,587 @@ /*- * Copyright 2004-2014 Olivier Houchard * Copyright 2012-2014 Ian Lepore * Copyright 2013-2014 Andrew Turner * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "assym.s" #include #include #include #include #include #include #include -#include +#include __FBSDID("$FreeBSD$"); #if __ARM_ARCH >= 7 #if defined(__ARM_ARCH_7VE__) || defined(__clang__) /* * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__ * when enabled. llvm >= 3.6 supports it too. */ .arch_extension virt #define MSR_ELR_HYP(regnum) msr elr_hyp, lr #define ERET eret #else #define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum) #define ERET .word 0xe160006e #endif #endif /* __ARM_ARCH >= 7 */ /* A small statically-allocated stack used only during initarm() and AP startup. */ #define INIT_ARM_STACK_SIZE 2048 .text .align 2 #if __ARM_ARCH >= 7 #define LEAVE_HYP \ /* Leave HYP mode */ ;\ mrs r0, cpsr ;\ and r0, r0, #(PSR_MODE) /* Mode is in the low 5 bits of CPSR */ ;\ teq r0, #(PSR_HYP32_MODE) /* Hyp Mode? */ ;\ bne 1f ;\ /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\ mrs r0, cpsr ;\ bic r0, r0, #(PSR_MODE) ;\ orr r0, r0, #(PSR_SVC32_MODE) ;\ orr r0, r0, #(PSR_I | PSR_F | PSR_A) ;\ msr spsr_cxsf, r0 ;\ /* Exit hypervisor mode */ ;\ adr lr, 1f ;\ MSR_ELR_HYP(14) ;\ ERET ;\ 1: #else #define LEAVE_HYP #endif /* __ARM_ARCH >= 7 */ /* * On entry for FreeBSD boot ABI: * r0 - metadata pointer or 0 (boothowto on AT91's boot2) * r1 - if (r0 == 0) then metadata pointer * On entry for Linux boot ABI: * r0 - 0 * r1 - machine type (passed as arg2 to initarm) * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) * * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ .globl btext btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ /* Make sure interrupts are disabled. */ cpsid ifa mov r8, r0 /* 0 or boot mode from boot2 */ mov r9, r1 /* Save Machine type */ mov r10, r2 /* Save meta data */ mov r11, r3 /* Future expansion */ LEAVE_HYP /* * Check whether data cache is enabled. If it is, then we know * current tags are valid (not power-on garbage values) and there * might be dirty lines that need cleaning. Disable cache to prevent * new lines being allocated, then call wbinv_poc_all to clean it. */ mrc CP15_SCTLR(r7) tst r7, #CPU_CONTROL_DC_ENABLE blne dcache_wbinv_poc_all /* ! Do not write to memory between wbinv and disabling cache ! */ /* * Now there are no dirty lines, but there may still be lines marked * valid. Disable all caches and the MMU, and invalidate everything * before setting up new page tables and re-enabling the mmu. */ 1: bic r7, #CPU_CONTROL_DC_ENABLE bic r7, #CPU_CONTROL_MMU_ENABLE bic r7, #CPU_CONTROL_IC_ENABLE bic r7, #CPU_CONTROL_BPRD_ENABLE bic r7, #CPU_CONTROL_SW_ENABLE orr r7, #CPU_CONTROL_UNAL_ENABLE orr r7, #CPU_CONTROL_AFLT_ENABLE orr r7, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r7) DSB ISB bl dcache_inv_poc_all mcr CP15_ICIALLU DSB ISB /* * Build page table from scratch. */ /* * Figure out the physical address we're loaded at by assuming this * entry point code is in the first L1 section and so if we clear the * offset bits of the pc that will give us the section-aligned load * address, which remains in r5 throughout all the following code. */ ldr r2, =(L1_S_OFFSET) bic r5, pc, r2 /* Find the delta between VA and PA, result stays in r0 throughout. */ adr r0, Lpagetable bl translate_va_to_pa /* * First map the entire 4GB address space as VA=PA. It's mapped as * normal (cached) memory because it's for things like accessing the * parameters passed in from the bootloader, which might be at any * physical address, different for every platform. */ mov r1, #0 mov r2, #0 mov r3, #4096 bl build_pagetables /* * Next we do 64MiB starting at the physical load address, mapped to * the VA the kernel is linked for. */ mov r1, r5 ldr r2, =(KERNVIRTADDR) mov r3, #64 bl build_pagetables /* Create a device mapping for early_printf if specified. */ #if defined(SOCDEV_PA) && defined(SOCDEV_VA) ldr r1, =SOCDEV_PA ldr r2, =SOCDEV_VA mov r3, #1 bl build_device_pagetables #endif bl init_mmu /* Transition the PC from physical to virtual addressing. */ ldr pc, =1f 1: /* Setup stack, clear BSS */ ldr r1, =.Lstart ldmia r1, {r1, r2, sp} /* Set initial stack and */ add sp, sp, #INIT_ARM_STACK_SIZE sub r2, r2, r1 /* get zero init data */ mov r3, #0 2: str r3, [r1], #0x0004 /* get zero init data */ subs r2, r2, #4 bgt 2b mov r1, #28 /* loader info size is 28 bytes also second arg */ subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ mov r0, sp /* loader info pointer is first arg */ bic sp, sp, #7 /* align stack to 8 bytes */ str r1, [r0] /* Store length of loader info */ str r8, [r0, #4] /* Store r0 from boot loader */ str r9, [r0, #8] /* Store r1 from boot loader */ str r10, [r0, #12] /* store r2 from boot loader */ str r11, [r0, #16] /* store r3 from boot loader */ str r5, [r0, #20] /* store the physical address */ adr r4, Lpagetable /* load the pagetable address */ ldr r5, [r4, #4] str r5, [r0, #24] /* store the pagetable address */ mov fp, #0 /* trace back starts here */ bl _C_LABEL(initarm) /* Off we go */ /* init arm will return the new stack pointer. */ mov sp, r0 bl _C_LABEL(mi_startup) /* call mi_startup()! */ ldr r0, =.Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ END(_start) #define VA_TO_PA_POINTER(name, table) \ name: ;\ .word . ;\ .word table /* * Returns the physical address of a magic va to pa pointer. * r0 - The pagetable data pointer. This must be built using the * VA_TO_PA_POINTER macro. * e.g. * VA_TO_PA_POINTER(Lpagetable, pagetable) * ... * adr r0, Lpagetable * bl translate_va_to_pa * r0 will now contain the physical address of pagetable * r1, r2 - Trashed */ translate_va_to_pa: ldr r1, [r0] sub r2, r1, r0 /* At this point: r2 = VA - PA */ /* * Find the physical address of the table. After these two * instructions: * r1 = va(pagetable) * * r0 = va(pagetable) - (VA - PA) * = va(pagetable) - VA + PA * = pa(pagetable) */ ldr r1, [r0, #4] sub r0, r1, r2 mov pc, lr /* * Init MMU * r0 - the table base address */ ASENTRY_NP(init_mmu) /* Setup TLB and MMU registers */ mcr CP15_TTBR0(r0) /* Set TTB */ mov r0, #0 mcr CP15_CONTEXTIDR(r0) /* Set ASID to 0 */ /* Set the Domain Access register */ mov r0, #DOMAIN_CLIENT /* Only domain #0 is used */ mcr CP15_DACR(r0) /* * Set TEX remap registers * - All is set to uncacheable memory */ ldr r0, =0xAAAAA mcr CP15_PRRR(r0) mov r0, #0 mcr CP15_NMRR(r0) mcr CP15_TLBIALL /* Flush TLB */ DSB ISB /* Enable MMU */ mrc CP15_SCTLR(r0) orr r0, r0, #CPU_CONTROL_MMU_ENABLE orr r0, r0, #CPU_CONTROL_V6_EXTPAGE orr r0, r0, #CPU_CONTROL_TR_ENABLE orr r0, r0, #CPU_CONTROL_AF_ENABLE mcr CP15_SCTLR(r0) DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ DSB ISB mov pc, lr END(init_mmu) /* * Init SMP coherent mode, enable caching and switch to final MMU table. * Called with disabled caches * r0 - The table base address * r1 - clear bits for aux register * r2 - set bits for aux register */ ASENTRY_NP(reinit_mmu) push {r4-r11, lr} mov r4, r0 mov r5, r1 mov r6, r2 /* !! Be very paranoid here !! */ /* !! We cannot write single bit here !! */ #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU DSB ISB /* Set auxiliary register */ mrc CP15_ACTLR(r7) bic r8, r7, r5 /* Mask bits */ eor r8, r8, r6 /* Set bits */ teq r7, r8 mcrne CP15_ACTLR(r8) DSB ISB /* Enable caches. */ mrc CP15_SCTLR(r7) orr r7, #CPU_CONTROL_DC_ENABLE orr r7, #CPU_CONTROL_IC_ENABLE orr r7, #CPU_CONTROL_BPRD_ENABLE mcr CP15_SCTLR(r7) DSB mcr CP15_TTBR0(r4) /* Set new TTB */ DSB ISB mcr CP15_TLBIALL /* Flush TLB */ mcr CP15_BPIALL /* Flush Branch predictor */ DSB ISB #if 0 /* XXX writeback shouldn't be necessary */ /* Write back and invalidate all integrated caches */ bl dcache_wbinv_poc_all #else bl dcache_inv_pou_all #endif mcr CP15_ICIALLU DSB ISB pop {r4-r11, pc} END(reinit_mmu) /* * Builds the page table * r0 - The table base address * r1 - The physical address (trashed) * r2 - The virtual address (trashed) * r3 - The number of 1MiB sections * r4 - Trashed * * Addresses must be 1MiB aligned */ build_device_pagetables: ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 b 1f build_pagetables: /* Set the required page attributed */ ldr r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0 1: orr r1, r4 /* Move the virtual address to the correct bit location */ lsr r2, #(PTE1_SHIFT - 2) mov r4, r3 2: str r1, [r0, r2] add r2, r2, #4 add r1, r1, #(PTE1_SIZE) adds r4, r4, #-1 bhi 2b mov pc, lr VA_TO_PA_POINTER(Lpagetable, boot_pt1) .Lstart: .word _edata /* Note that these three items are */ .word _ebss /* loaded with a single ldmia and */ .word svcstk /* must remain in order together. */ .Lmainreturned: .asciz "main() returned" .align 2 .bss svcstk: .space INIT_ARM_STACK_SIZE * MAXCPU /* * Memory for the initial pagetable. We are unable to place this in * the bss as this will be cleared after the table is loaded. */ .section ".init_pagetable" .align 14 /* 16KiB aligned */ .globl boot_pt1 boot_pt1: .space L1_TABLE_SIZE .text .align 2 .Lcpufuncs: .word _C_LABEL(cpufuncs) #if defined(SMP) ASENTRY_NP(mpentry) /* Make sure interrupts are disabled. */ cpsid ifa LEAVE_HYP /* Setup core, disable all caches. */ mrc CP15_SCTLR(r0) bic r0, #CPU_CONTROL_MMU_ENABLE bic r0, #CPU_CONTROL_DC_ENABLE bic r0, #CPU_CONTROL_IC_ENABLE bic r0, #CPU_CONTROL_BPRD_ENABLE bic r0, #CPU_CONTROL_SW_ENABLE orr r0, #CPU_CONTROL_UNAL_ENABLE orr r0, #CPU_CONTROL_AFLT_ENABLE orr r0, #CPU_CONTROL_VECRELOC mcr CP15_SCTLR(r0) DSB ISB /* Invalidate L1 cache I+D cache */ bl dcache_inv_pou_all mcr CP15_ICIALLU DSB ISB /* Find the delta between VA and PA */ adr r0, Lpagetable bl translate_va_to_pa bl init_mmu adr r1, .Lstart+8 /* Get initstack pointer from */ ldr sp, [r1] /* startup data. */ mrc CP15_MPIDR(r0) /* Get processor id number. */ and r0, r0, #0x0f mov r1, #INIT_ARM_STACK_SIZE mul r2, r1, r0 /* Point sp to initstack */ add sp, sp, r2 /* area for this processor. */ /* Switch to virtual addresses. */ ldr pc, =1f 1: mov fp, #0 /* trace back starts here */ bl _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */ adr r0, .Lmpreturned b _C_LABEL(panic) /* NOTREACHED */ END(mpentry) .Lmpreturned: .asciz "init_secondary() returned" .align 2 #endif ENTRY_NP(cpu_halt) /* XXX re-implement !!! */ cpsid ifa bl dcache_wbinv_poc_all ldr r4, .Lcpu_reset_address ldr r4, [r4] teq r4, #0 movne pc, r4 1: WFI b 1b /* * _cpu_reset_address contains the address to branch to, to complete * the cpu reset after turning the MMU off * This variable is provided by the hardware specific code */ .Lcpu_reset_address: .word _C_LABEL(cpu_reset_address) END(cpu_halt) /* * setjump + longjmp */ ENTRY(setjmp) stmia r0, {r4-r14} mov r0, #0x00000000 RET END(setjmp) ENTRY(longjmp) ldmia r0, {r4-r14} mov r0, #0x00000001 RET END(longjmp) .data .global _C_LABEL(esym) _C_LABEL(esym): .word _C_LABEL(end) ENTRY_NP(abort) b _C_LABEL(abort) END(abort) ENTRY_NP(sigcode) mov r0, sp add r0, r0, #SIGF_UC /* * Call the sigreturn system call. * * We have to load r7 manually rather than using * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is * correct. Using the alternative places esigcode at the address * of the data rather than the address one past the data. */ ldr r7, [pc, #12] /* Load SYS_sigreturn */ swi SYS_sigreturn /* Well if that failed we better exit quick ! */ ldr r7, [pc, #8] /* Load SYS_exit */ swi SYS_exit /* Branch back to retry SYS_sigreturn */ b . - 16 END(sigcode) .word SYS_sigreturn .word SYS_exit .align 2 .global _C_LABEL(esigcode) _C_LABEL(esigcode): .data .global szsigcode szsigcode: .long esigcode-sigcode /* End of locore.S */ Index: head/sys/arm/include/pte.h =================================================================== --- head/sys/arm/include/pte.h (revision 295800) +++ head/sys/arm/include/pte.h (nonexistent) @@ -1,356 +0,0 @@ -/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ - -/*- - * Copyright (c) 1994 Mark Brinicombe. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the RiscBSD team. - * 4. The name "RiscBSD" nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ -#include - -#if __ARM_ARCH >= 6 -#include -#else /* __ARM_ARCH >= 6 */ - -#ifndef _MACHINE_PTE_H_ -#define _MACHINE_PTE_H_ - -#ifndef LOCORE -typedef uint32_t pd_entry_t; /* page directory entry */ -typedef uint32_t pt_entry_t; /* page table entry */ -typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ -#endif - -#define PG_FRAME 0xfffff000 - -/* The PT_SIZE definition is misleading... A page table is only 0x400 - * bytes long. But since VM mapping can only be done to 0x1000 a single - * 1KB blocks cannot be steered to a va by itself. Therefore the - * pages tables are allocated in blocks of 4. i.e. if a 1 KB block - * was allocated for a PT then the other 3KB would also get mapped - * whenever the 1KB was mapped. - */ - -#define PT_RSIZE 0x0400 /* Real page table size */ -#define PT_SIZE 0x1000 -#define PD_SIZE 0x4000 - -/* Page table types and masks */ -#define L1_PAGE 0x01 /* L1 page table mapping */ -#define L1_SECTION 0x02 /* L1 section mapping */ -#define L1_FPAGE 0x03 /* L1 fine page mapping */ -#define L1_MASK 0x03 /* Mask for L1 entry type */ -#define L2_LPAGE 0x01 /* L2 large page (64KB) */ -#define L2_SPAGE 0x02 /* L2 small page (4KB) */ -#define L2_MASK 0x03 /* Mask for L2 entry type */ -#define L2_INVAL 0x00 /* L2 invalid type */ - -/* - * The ARM MMU architecture was introduced with ARM v3 (previous ARM - * architecture versions used an optional off-CPU memory controller - * to perform address translation). - * - * The ARM MMU consists of a TLB and translation table walking logic. - * There is typically one TLB per memory interface (or, put another - * way, one TLB per software-visible cache). - * - * The ARM MMU is capable of mapping memory in the following chunks: - * - * 1M Sections (L1 table) - * - * 64K Large Pages (L2 table) - * - * 4K Small Pages (L2 table) - * - * 1K Tiny Pages (L2 table) - * - * There are two types of L2 tables: Coarse Tables and Fine Tables. - * Coarse Tables can map Large and Small Pages. Fine Tables can - * map Tiny Pages. - * - * Coarse Tables can define 4 Subpages within Large and Small pages. - * Subpages define different permissions for each Subpage within - * a Page. - * - * Coarse Tables are 1K in length. Fine tables are 4K in length. - * - * The Translation Table Base register holds the pointer to the - * L1 Table. The L1 Table is a 16K contiguous chunk of memory - * aligned to a 16K boundary. Each entry in the L1 Table maps - * 1M of virtual address space, either via a Section mapping or - * via an L2 Table. - * - * In addition, the Fast Context Switching Extension (FCSE) is available - * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating - * TLB/cache flushes on context switch by use of a smaller address space - * and a "process ID" that modifies the virtual address before being - * presented to the translation logic. - */ - -/* ARMv6 super-sections. */ -#define L1_SUP_SIZE 0x01000000 /* 16M */ -#define L1_SUP_OFFSET (L1_SUP_SIZE - 1) -#define L1_SUP_FRAME (~L1_SUP_OFFSET) -#define L1_SUP_SHIFT 24 - -#define L1_S_SIZE 0x00100000 /* 1M */ -#define L1_S_OFFSET (L1_S_SIZE - 1) -#define L1_S_FRAME (~L1_S_OFFSET) -#define L1_S_SHIFT 20 - -#define L2_L_SIZE 0x00010000 /* 64K */ -#define L2_L_OFFSET (L2_L_SIZE - 1) -#define L2_L_FRAME (~L2_L_OFFSET) -#define L2_L_SHIFT 16 - -#define L2_S_SIZE 0x00001000 /* 4K */ -#define L2_S_OFFSET (L2_S_SIZE - 1) -#define L2_S_FRAME (~L2_S_OFFSET) -#define L2_S_SHIFT 12 - -#define L2_T_SIZE 0x00000400 /* 1K */ -#define L2_T_OFFSET (L2_T_SIZE - 1) -#define L2_T_FRAME (~L2_T_OFFSET) -#define L2_T_SHIFT 10 - -/* - * The NetBSD VM implementation only works on whole pages (4K), - * whereas the ARM MMU's Coarse tables are sized in terms of 1K - * (16K L1 table, 1K L2 table). - * - * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 - * table. - */ -#define L1_TABLE_SIZE 0x4000 /* 16K */ -#define L2_TABLE_SIZE 0x1000 /* 4K */ -/* - * The new pmap deals with the 1KB coarse L2 tables by - * allocating them from a pool. Until every port has been converted, - * keep the old L2_TABLE_SIZE define lying around. Converted ports - * should use L2_TABLE_SIZE_REAL until then. - */ -#define L2_TABLE_SIZE_REAL 0x400 /* 1K */ - -/* Total number of page table entries in L2 table */ -#define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) - -/* - * ARM L1 Descriptors - */ - -#define L1_TYPE_INV 0x00 /* Invalid (fault) */ -#define L1_TYPE_C 0x01 /* Coarse L2 */ -#define L1_TYPE_S 0x02 /* Section */ -#define L1_TYPE_F 0x03 /* Fine L2 */ -#define L1_TYPE_MASK 0x03 /* mask of type bits */ - -/* L1 Section Descriptor */ -#define L1_S_B 0x00000004 /* bufferable Section */ -#define L1_S_C 0x00000008 /* cacheable Section */ -#define L1_S_IMP 0x00000010 /* implementation defined */ -#define L1_S_XN (1 << 4) /* execute not */ -#define L1_S_DOM(x) ((x) << 5) /* domain */ -#define L1_S_DOM_MASK L1_S_DOM(0xf) -#define L1_S_AP(x) ((x) << 10) /* access permissions */ -#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ -#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ -#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ -#define L1_S_APX (1 << 15) -#define L1_SHARED (1 << 16) - -#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ -#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ - -#define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ - -/* L1 Coarse Descriptor */ -#define L1_C_IMP0 0x00000004 /* implementation defined */ -#define L1_C_IMP1 0x00000008 /* implementation defined */ -#define L1_C_IMP2 0x00000010 /* implementation defined */ -#define L1_C_DOM(x) ((x) << 5) /* domain */ -#define L1_C_DOM_MASK L1_C_DOM(0xf) -#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ - -#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ - -/* L1 Fine Descriptor */ -#define L1_F_IMP0 0x00000004 /* implementation defined */ -#define L1_F_IMP1 0x00000008 /* implementation defined */ -#define L1_F_IMP2 0x00000010 /* implementation defined */ -#define L1_F_DOM(x) ((x) << 5) /* domain */ -#define L1_F_DOM_MASK L1_F_DOM(0xf) -#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ - -#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ - -/* - * ARM L2 Descriptors - */ - -#define L2_TYPE_INV 0x00 /* Invalid (fault) */ -#define L2_TYPE_L 0x01 /* Large Page */ -#define L2_TYPE_S 0x02 /* Small Page */ -#define L2_TYPE_T 0x03 /* Tiny Page */ -#define L2_TYPE_MASK 0x03 /* mask of type bits */ - - /* - * This L2 Descriptor type is available on XScale processors - * when using a Coarse L1 Descriptor. The Extended Small - * Descriptor has the same format as the XScale Tiny Descriptor, - * but describes a 4K page, rather than a 1K page. - */ -#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ - -#define L2_B 0x00000004 /* Bufferable page */ -#define L2_C 0x00000008 /* Cacheable page */ -#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ -#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ -#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ -#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ - -#define L2_SHARED (1 << 10) -#define L2_APX (1 << 9) -#define L2_XN (1 << 0) -#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ -#define L2_L_TEX(x) (((x) & 0x7) << 12) -#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ -#define L2_S_TEX(x) (((x) & 0x7) << 6) - -#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ -#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ -#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ - -/* - * Access Permissions for L1 and L2 Descriptors. - */ -#define AP_W 0x01 /* writable */ -#define AP_REF 0x01 /* referenced flag */ -#define AP_U 0x02 /* user */ - -/* - * Short-hand for common AP_* constants. - * - * Note: These values assume the S (System) bit is set and - * the R (ROM) bit is clear in CP15 register 1. - */ -#define AP_KR 0x00 /* kernel read */ -#define AP_KRW 0x01 /* kernel read/write */ -#define AP_KRWUR 0x02 /* kernel read/write usr read */ -#define AP_KRWURW 0x03 /* kernel read/write usr read/write */ - -/* - * Domain Types for the Domain Access Control Register. - */ -#define DOMAIN_FAULT 0x00 /* no access */ -#define DOMAIN_CLIENT 0x01 /* client */ -#define DOMAIN_RESERVED 0x02 /* reserved */ -#define DOMAIN_MANAGER 0x03 /* manager */ - -/* - * Type Extension bits for XScale processors. - * - * Behavior of C and B when X == 0: - * - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 N N - - - * 0 1 N Y - - - * 1 0 Y Y Write-through Read Allocate - * 1 1 Y Y Write-back Read Allocate - * - * Behavior of C and B when X == 1: - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 - - - - DO NOT USE - * 0 1 N Y - - - * 1 0 Mini-Data - - - - * 1 1 Y Y Write-back R/W Allocate - */ -#define TEX_XSCALE_X 0x01 /* X modifies C and B */ -#define TEX_XSCALE_E 0x02 -#define TEX_XSCALE_T 0x04 - -/* Xscale core 3 */ - -/* - * - * Cache attributes with L2 present, S = 0 - * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce - * 0 0 0 0 0 N N - N N - * 0 0 0 0 1 N N - N Y - * 0 0 0 1 0 Y Y WT N Y - * 0 0 0 1 1 Y Y WB Y Y - * 0 0 1 0 0 N N - Y Y - * 0 0 1 0 1 N N - N N - * 0 0 1 1 0 Y Y - - N - * 0 0 1 1 1 Y Y WT Y Y - * 0 1 0 0 0 N N - N N - * 0 1 0 0 1 N/A N/A N/A N/A N/A - * 0 1 0 1 0 N/A N/A N/A N/A N/A - * 0 1 0 1 1 N/A N/A N/A N/A N/A - * 0 1 1 X X N/A N/A N/A N/A N/A - * 1 X 0 0 0 N N - N Y - * 1 X 0 0 1 Y N WB N Y - * 1 X 0 1 0 Y N WT N Y - * 1 X 0 1 1 Y N WB Y Y - * 1 X 1 0 0 N N - Y Y - * 1 X 1 0 1 Y Y WB Y Y - * 1 X 1 1 0 Y Y WT Y Y - * 1 X 1 1 1 Y Y WB Y Y - * - * - * - * - * Cache attributes with L2 present, S = 1 - * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce - * 0 0 0 0 0 N N - N N - * 0 0 0 0 1 N N - N Y - * 0 0 0 1 0 Y Y - N Y - * 0 0 0 1 1 Y Y WT Y Y - * 0 0 1 0 0 N N - Y Y - * 0 0 1 0 1 N N - N N - * 0 0 1 1 0 Y Y - - N - * 0 0 1 1 1 Y Y WT Y Y - * 0 1 0 0 0 N N - N N - * 0 1 0 0 1 N/A N/A N/A N/A N/A - * 0 1 0 1 0 N/A N/A N/A N/A N/A - * 0 1 0 1 1 N/A N/A N/A N/A N/A - * 0 1 1 X X N/A N/A N/A N/A N/A - * 1 X 0 0 0 N N - N Y - * 1 X 0 0 1 Y N - N Y - * 1 X 0 1 0 Y N - N Y - * 1 X 0 1 1 Y N - Y Y - * 1 X 1 0 0 N N - Y Y - * 1 X 1 0 1 Y Y WT Y Y - * 1 X 1 1 0 Y Y WT Y Y - * 1 X 1 1 1 Y Y WT Y Y - */ -#endif /* !_MACHINE_PTE_H_ */ -#endif /* __ARM_ARCH >= 6 */ - -/* End of pte.h */ Property changes on: head/sys/arm/include/pte.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/arm/include/pmap-v4.h =================================================================== --- head/sys/arm/include/pmap-v4.h (revision 295800) +++ head/sys/arm/include/pmap-v4.h (revision 295801) @@ -1,516 +1,516 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_V4_H_ #define _MACHINE_PMAP_V4_H_ -#include +#include #include /* * Pte related macros */ #define PTE_NOCACHE 1 #define PTE_CACHE 2 #define PTE_DEVICE PTE_NOCACHE #define PTE_PAGETABLE 3 enum mem_type { STRONG_ORD = 0, DEVICE_NOSHARE, DEVICE_SHARE, NRML_NOCACHE, NRML_IWT_OWT, NRML_IWB_OWB, NRML_IWBA_OWBA }; #ifndef LOCORE #include #include #include #include #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) /* * Pmap stuff */ /* * This structure is used to hold a virtual<->physical address * association and is used mostly by bootstrap code */ struct pv_addr { SLIST_ENTRY(pv_addr) pv_list; vm_offset_t pv_va; vm_paddr_t pv_pa; }; struct pv_entry; struct pv_chunk; struct md_page { int pvh_attrs; vm_memattr_t pv_memattr; vm_offset_t pv_kva; /* first kernel VA mapping */ TAILQ_HEAD(,pv_entry) pv_list; }; struct l1_ttable; struct l2_dtable; /* * The number of L2 descriptor tables which can be tracked by an l2_dtable. * A bucket size of 16 provides for 16MB of contiguous virtual address * space per l2_dtable. Most processes will, therefore, require only two or * three of these to map their whole working set. */ #define L2_BUCKET_LOG2 4 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) /* * Given the above "L2-descriptors-per-l2_dtable" constant, the number * of l2_dtable structures required to track all possible page descriptors * mappable by an L1 translation table is given by the following constants: */ #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) #define L2_SIZE (1 << L2_LOG2) struct pmap { struct mtx pm_mtx; u_int8_t pm_domain; struct l1_ttable *pm_l1; struct l2_dtable *pm_l2[L2_SIZE]; cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_ASSERT_LOCKED(pmap) \ mtx_assert(&(pmap)->pm_mtx, MA_OWNED) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF | MTX_DUPOK) #define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_list; int pv_flags; /* flags (wired, etc...) */ pmap_t pv_pmap; /* pmap where mapping lies */ TAILQ_ENTRY(pv_entry) pv_plist; } *pv_entry_t; /* * pv_entries are allocated in chunks per-process. This avoids the * need to track per-pmap assignments. */ #define _NPCM 8 #define _NPCPV 252 struct pv_chunk { pmap_t pc_pmap; TAILQ_ENTRY(pv_chunk) pc_list; uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ TAILQ_ENTRY(pv_chunk) pc_lru; struct pv_entry pc_pventry[_NPCPV]; }; #ifdef _KERNEL boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); /* * virtual address to page table entry and * to physical address. Likewise for alternate address space. * Note: these work recursively, thus vtopte of a pte will give * the corresponding pde that in turn maps it. */ /* * The current top of kernel VM. */ extern vm_offset_t pmap_curmaxkvaddr; /* Virtual address to page table entry */ static __inline pt_entry_t * vtopte(vm_offset_t va) { pd_entry_t *pdep; pt_entry_t *ptep; if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) return (NULL); return (ptep); } void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); void pmap_kremove(vm_offset_t); vm_page_t pmap_use_pt(pmap_t, vm_offset_t); void pmap_debug(int); void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); void pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, int cache); int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); /* * Definitions for MMU domains */ #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ /* * The new pmap ensures that page-tables are always mapping Write-Thru. * Thus, on some platforms we can run fast and loose and avoid syncing PTEs * on every change. * * Unfortunately, not all CPUs have a write-through cache mode. So we * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, * and if there is the chance for PTE syncs to be needed, we define * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) * the code. */ extern int pmap_needs_pte_sync; /* * These macros define the various bit masks in the PTE. * * We use these macros since we use different bits on different processor * models. */ #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ L1_S_XSCALE_TEX(TEX_XSCALE_T)) #define L2_L_CACHE_MASK_generic (L2_B|L2_C) #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ L2_XSCALE_L_TEX(TEX_XSCALE_T)) #define L2_S_PROT_U_generic (L2_AP(AP_U)) #define L2_S_PROT_W_generic (L2_AP(AP_W)) #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) #define L2_S_PROT_U_xscale (L2_AP0(AP_U)) #define L2_S_PROT_W_xscale (L2_AP0(AP_W)) #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) #define L2_S_CACHE_MASK_generic (L2_B|L2_C) #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ L2_XSCALE_T_TEX(TEX_XSCALE_X)) #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) #define L1_S_PROTO_xscale (L1_TYPE_S) #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) #define L1_C_PROTO_xscale (L1_TYPE_C) #define L2_L_PROTO (L2_TYPE_L) #define L2_S_PROTO_generic (L2_TYPE_S) #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) /* * User-visible names for the ones that vary with MMU class. */ #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) #if ARM_NMMUS > 1 /* More than one MMU class configured; use variables. */ #define L2_S_PROT_U pte_l2_s_prot_u #define L2_S_PROT_W pte_l2_s_prot_w #define L2_S_PROT_MASK pte_l2_s_prot_mask #define L1_S_CACHE_MASK pte_l1_s_cache_mask #define L2_L_CACHE_MASK pte_l2_l_cache_mask #define L2_S_CACHE_MASK pte_l2_s_cache_mask #define L1_S_PROTO pte_l1_s_proto #define L1_C_PROTO pte_l1_c_proto #define L2_S_PROTO pte_l2_s_proto #elif ARM_MMU_GENERIC != 0 #define L2_S_PROT_U L2_S_PROT_U_generic #define L2_S_PROT_W L2_S_PROT_W_generic #define L2_S_PROT_MASK L2_S_PROT_MASK_generic #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic #define L1_S_PROTO L1_S_PROTO_generic #define L1_C_PROTO L1_C_PROTO_generic #define L2_S_PROTO L2_S_PROTO_generic #elif ARM_MMU_XSCALE == 1 #define L2_S_PROT_U L2_S_PROT_U_xscale #define L2_S_PROT_W L2_S_PROT_W_xscale #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale #define L1_S_PROTO L1_S_PROTO_xscale #define L1_C_PROTO L1_C_PROTO_xscale #define L2_S_PROTO L2_S_PROTO_xscale #endif /* ARM_NMMUS > 1 */ #if defined(CPU_XSCALE_81342) #define PMAP_NEEDS_PTE_SYNC 1 #define PMAP_INCLUDE_PTE_SYNC #else #define PMAP_NEEDS_PTE_SYNC 0 #endif /* * These macros return various bits based on kernel/user and protection. * Note that the compiler will usually fold these at compile time. */ #define L1_S_PROT_U (L1_S_AP(AP_U)) #define L1_S_PROT_W (L1_S_AP(AP_W)) #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) #define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) #define L2_L_PROT_U (L2_AP(AP_U)) #define L2_L_PROT_W (L2_AP(AP_W)) #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) /* * Macros to test if a mapping is mappable with an L1 Section mapping * or an L2 Large Page mapping. */ #define L1_S_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) #define L2_L_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) /* * Provide a fallback in case we were not able to determine it at * compile-time. */ #ifndef PMAP_NEEDS_PTE_SYNC #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync #define PMAP_INCLUDE_PTE_SYNC #endif #ifdef ARM_L2_PIPT #define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) #else #define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) #endif #define PTE_SYNC(pte) \ do { \ if (PMAP_NEEDS_PTE_SYNC) { \ cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ cpu_drain_writebuf(); \ _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ } else \ cpu_drain_writebuf(); \ } while (/*CONSTCOND*/0) #define PTE_SYNC_RANGE(pte, cnt) \ do { \ if (PMAP_NEEDS_PTE_SYNC) { \ cpu_dcache_wb_range((vm_offset_t)(pte), \ (cnt) << 2); /* * sizeof(pt_entry_t) */ \ cpu_drain_writebuf(); \ _sync_l2((vm_offset_t)(pte), \ (cnt) << 2); /* * sizeof(pt_entry_t) */ \ } else \ cpu_drain_writebuf(); \ } while (/*CONSTCOND*/0) extern pt_entry_t pte_l1_s_cache_mode; extern pt_entry_t pte_l1_s_cache_mask; extern pt_entry_t pte_l2_l_cache_mode; extern pt_entry_t pte_l2_l_cache_mask; extern pt_entry_t pte_l2_s_cache_mode; extern pt_entry_t pte_l2_s_cache_mask; extern pt_entry_t pte_l1_s_cache_mode_pt; extern pt_entry_t pte_l2_l_cache_mode_pt; extern pt_entry_t pte_l2_s_cache_mode_pt; extern pt_entry_t pte_l2_s_prot_u; extern pt_entry_t pte_l2_s_prot_w; extern pt_entry_t pte_l2_s_prot_mask; extern pt_entry_t pte_l1_s_proto; extern pt_entry_t pte_l1_c_proto; extern pt_entry_t pte_l2_s_proto; extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); #if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); void pmap_zero_page_generic(vm_paddr_t, int, int); void pmap_pte_init_generic(void); #endif /* ARM_MMU_GENERIC != 0 */ #if ARM_MMU_XSCALE == 1 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); void pmap_zero_page_xscale(vm_paddr_t, int, int); void pmap_pte_init_xscale(void); void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); void pmap_use_minicache(vm_offset_t, vm_size_t); #endif /* ARM_MMU_XSCALE == 1 */ #if defined(CPU_XSCALE_81342) #define ARM_HAVE_SUPERSECTIONS #endif #define PTE_KERNEL 0 #define PTE_USER 1 #define l1pte_valid(pde) ((pde) != 0) #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) #define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) #define l2pte_valid(pte) ((pte) != 0) #define l2pte_pa(pte) ((pte) & L2_S_FRAME) #define l2pte_minidata(pte) (((pte) & \ (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) /* L1 and L2 page table macros */ #define pmap_pde_v(pde) l1pte_valid(*(pde)) #define pmap_pde_section(pde) l1pte_section_p(*(pde)) #define pmap_pde_page(pde) l1pte_page_p(*(pde)) #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) #define pmap_pte_v(pte) l2pte_valid(*(pte)) #define pmap_pte_pa(pte) l2pte_pa(*(pte)) /* * Flags that indicate attributes of pages or mappings of pages. * * The PVF_MOD and PVF_REF flags are stored in the mdpage for each * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual * pv_entry's for each page. They live in the same "namespace" so * that we can clear multiple attributes at a time. * * Note the "non-cacheable" flag generally means the page has * multiple mappings in a given address space. */ #define PVF_MOD 0x01 /* page is modified */ #define PVF_REF 0x02 /* page is referenced */ #define PVF_WIRED 0x04 /* mapping is wired */ #define PVF_WRITE 0x08 /* mapping is writable */ #define PVF_EXEC 0x10 /* mapping is executable */ #define PVF_NC 0x20 /* mapping is non-cacheable */ #define PVF_MWC 0x40 /* mapping is used multiple times in userland */ #define PVF_UNMAN 0x80 /* mapping is unmanaged */ void vector_page_setprot(int); #define SECTION_CACHE 0x1 #define SECTION_PT 0x2 void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); #ifdef ARM_HAVE_SUPERSECTIONS void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); #endif void pmap_postinit(void); #endif /* _KERNEL */ #endif /* !LOCORE */ #endif /* !_MACHINE_PMAP_V4_H_ */ Index: head/sys/arm/include/pte-v4.h =================================================================== --- head/sys/arm/include/pte-v4.h (nonexistent) +++ head/sys/arm/include/pte-v4.h (revision 295801) @@ -0,0 +1,350 @@ +/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ + +/*- + * Copyright (c) 1994 Mark Brinicombe. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the RiscBSD team. + * 4. The name "RiscBSD" nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_PTE_V4_H_ +#define _MACHINE_PTE_V4_H_ + +#ifndef LOCORE +typedef uint32_t pd_entry_t; /* page directory entry */ +typedef uint32_t pt_entry_t; /* page table entry */ +typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ +#endif + +#define PG_FRAME 0xfffff000 + +/* The PT_SIZE definition is misleading... A page table is only 0x400 + * bytes long. But since VM mapping can only be done to 0x1000 a single + * 1KB blocks cannot be steered to a va by itself. Therefore the + * pages tables are allocated in blocks of 4. i.e. if a 1 KB block + * was allocated for a PT then the other 3KB would also get mapped + * whenever the 1KB was mapped. + */ + +#define PT_RSIZE 0x0400 /* Real page table size */ +#define PT_SIZE 0x1000 +#define PD_SIZE 0x4000 + +/* Page table types and masks */ +#define L1_PAGE 0x01 /* L1 page table mapping */ +#define L1_SECTION 0x02 /* L1 section mapping */ +#define L1_FPAGE 0x03 /* L1 fine page mapping */ +#define L1_MASK 0x03 /* Mask for L1 entry type */ +#define L2_LPAGE 0x01 /* L2 large page (64KB) */ +#define L2_SPAGE 0x02 /* L2 small page (4KB) */ +#define L2_MASK 0x03 /* Mask for L2 entry type */ +#define L2_INVAL 0x00 /* L2 invalid type */ + +/* + * The ARM MMU architecture was introduced with ARM v3 (previous ARM + * architecture versions used an optional off-CPU memory controller + * to perform address translation). + * + * The ARM MMU consists of a TLB and translation table walking logic. + * There is typically one TLB per memory interface (or, put another + * way, one TLB per software-visible cache). + * + * The ARM MMU is capable of mapping memory in the following chunks: + * + * 1M Sections (L1 table) + * + * 64K Large Pages (L2 table) + * + * 4K Small Pages (L2 table) + * + * 1K Tiny Pages (L2 table) + * + * There are two types of L2 tables: Coarse Tables and Fine Tables. + * Coarse Tables can map Large and Small Pages. Fine Tables can + * map Tiny Pages. + * + * Coarse Tables can define 4 Subpages within Large and Small pages. + * Subpages define different permissions for each Subpage within + * a Page. + * + * Coarse Tables are 1K in length. Fine tables are 4K in length. + * + * The Translation Table Base register holds the pointer to the + * L1 Table. The L1 Table is a 16K contiguous chunk of memory + * aligned to a 16K boundary. Each entry in the L1 Table maps + * 1M of virtual address space, either via a Section mapping or + * via an L2 Table. + * + * In addition, the Fast Context Switching Extension (FCSE) is available + * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating + * TLB/cache flushes on context switch by use of a smaller address space + * and a "process ID" that modifies the virtual address before being + * presented to the translation logic. + */ + +/* ARMv6 super-sections. */ +#define L1_SUP_SIZE 0x01000000 /* 16M */ +#define L1_SUP_OFFSET (L1_SUP_SIZE - 1) +#define L1_SUP_FRAME (~L1_SUP_OFFSET) +#define L1_SUP_SHIFT 24 + +#define L1_S_SIZE 0x00100000 /* 1M */ +#define L1_S_OFFSET (L1_S_SIZE - 1) +#define L1_S_FRAME (~L1_S_OFFSET) +#define L1_S_SHIFT 20 + +#define L2_L_SIZE 0x00010000 /* 64K */ +#define L2_L_OFFSET (L2_L_SIZE - 1) +#define L2_L_FRAME (~L2_L_OFFSET) +#define L2_L_SHIFT 16 + +#define L2_S_SIZE 0x00001000 /* 4K */ +#define L2_S_OFFSET (L2_S_SIZE - 1) +#define L2_S_FRAME (~L2_S_OFFSET) +#define L2_S_SHIFT 12 + +#define L2_T_SIZE 0x00000400 /* 1K */ +#define L2_T_OFFSET (L2_T_SIZE - 1) +#define L2_T_FRAME (~L2_T_OFFSET) +#define L2_T_SHIFT 10 + +/* + * The NetBSD VM implementation only works on whole pages (4K), + * whereas the ARM MMU's Coarse tables are sized in terms of 1K + * (16K L1 table, 1K L2 table). + * + * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 + * table. + */ +#define L1_TABLE_SIZE 0x4000 /* 16K */ +#define L2_TABLE_SIZE 0x1000 /* 4K */ +/* + * The new pmap deals with the 1KB coarse L2 tables by + * allocating them from a pool. Until every port has been converted, + * keep the old L2_TABLE_SIZE define lying around. Converted ports + * should use L2_TABLE_SIZE_REAL until then. + */ +#define L2_TABLE_SIZE_REAL 0x400 /* 1K */ + +/* Total number of page table entries in L2 table */ +#define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) + +/* + * ARM L1 Descriptors + */ + +#define L1_TYPE_INV 0x00 /* Invalid (fault) */ +#define L1_TYPE_C 0x01 /* Coarse L2 */ +#define L1_TYPE_S 0x02 /* Section */ +#define L1_TYPE_F 0x03 /* Fine L2 */ +#define L1_TYPE_MASK 0x03 /* mask of type bits */ + +/* L1 Section Descriptor */ +#define L1_S_B 0x00000004 /* bufferable Section */ +#define L1_S_C 0x00000008 /* cacheable Section */ +#define L1_S_IMP 0x00000010 /* implementation defined */ +#define L1_S_XN (1 << 4) /* execute not */ +#define L1_S_DOM(x) ((x) << 5) /* domain */ +#define L1_S_DOM_MASK L1_S_DOM(0xf) +#define L1_S_AP(x) ((x) << 10) /* access permissions */ +#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ +#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ +#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ +#define L1_S_APX (1 << 15) +#define L1_SHARED (1 << 16) + +#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ +#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ + +#define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ + +/* L1 Coarse Descriptor */ +#define L1_C_IMP0 0x00000004 /* implementation defined */ +#define L1_C_IMP1 0x00000008 /* implementation defined */ +#define L1_C_IMP2 0x00000010 /* implementation defined */ +#define L1_C_DOM(x) ((x) << 5) /* domain */ +#define L1_C_DOM_MASK L1_C_DOM(0xf) +#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ + +#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ + +/* L1 Fine Descriptor */ +#define L1_F_IMP0 0x00000004 /* implementation defined */ +#define L1_F_IMP1 0x00000008 /* implementation defined */ +#define L1_F_IMP2 0x00000010 /* implementation defined */ +#define L1_F_DOM(x) ((x) << 5) /* domain */ +#define L1_F_DOM_MASK L1_F_DOM(0xf) +#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ + +#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ + +/* + * ARM L2 Descriptors + */ + +#define L2_TYPE_INV 0x00 /* Invalid (fault) */ +#define L2_TYPE_L 0x01 /* Large Page */ +#define L2_TYPE_S 0x02 /* Small Page */ +#define L2_TYPE_T 0x03 /* Tiny Page */ +#define L2_TYPE_MASK 0x03 /* mask of type bits */ + + /* + * This L2 Descriptor type is available on XScale processors + * when using a Coarse L1 Descriptor. The Extended Small + * Descriptor has the same format as the XScale Tiny Descriptor, + * but describes a 4K page, rather than a 1K page. + */ +#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ + +#define L2_B 0x00000004 /* Bufferable page */ +#define L2_C 0x00000008 /* Cacheable page */ +#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ +#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ +#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ +#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ + +#define L2_SHARED (1 << 10) +#define L2_APX (1 << 9) +#define L2_XN (1 << 0) +#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ +#define L2_L_TEX(x) (((x) & 0x7) << 12) +#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ +#define L2_S_TEX(x) (((x) & 0x7) << 6) + +#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ +#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ +#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ + +/* + * Access Permissions for L1 and L2 Descriptors. + */ +#define AP_W 0x01 /* writable */ +#define AP_REF 0x01 /* referenced flag */ +#define AP_U 0x02 /* user */ + +/* + * Short-hand for common AP_* constants. + * + * Note: These values assume the S (System) bit is set and + * the R (ROM) bit is clear in CP15 register 1. + */ +#define AP_KR 0x00 /* kernel read */ +#define AP_KRW 0x01 /* kernel read/write */ +#define AP_KRWUR 0x02 /* kernel read/write usr read */ +#define AP_KRWURW 0x03 /* kernel read/write usr read/write */ + +/* + * Domain Types for the Domain Access Control Register. + */ +#define DOMAIN_FAULT 0x00 /* no access */ +#define DOMAIN_CLIENT 0x01 /* client */ +#define DOMAIN_RESERVED 0x02 /* reserved */ +#define DOMAIN_MANAGER 0x03 /* manager */ + +/* + * Type Extension bits for XScale processors. + * + * Behavior of C and B when X == 0: + * + * C B Cacheable Bufferable Write Policy Line Allocate Policy + * 0 0 N N - - + * 0 1 N Y - - + * 1 0 Y Y Write-through Read Allocate + * 1 1 Y Y Write-back Read Allocate + * + * Behavior of C and B when X == 1: + * C B Cacheable Bufferable Write Policy Line Allocate Policy + * 0 0 - - - - DO NOT USE + * 0 1 N Y - - + * 1 0 Mini-Data - - - + * 1 1 Y Y Write-back R/W Allocate + */ +#define TEX_XSCALE_X 0x01 /* X modifies C and B */ +#define TEX_XSCALE_E 0x02 +#define TEX_XSCALE_T 0x04 + +/* Xscale core 3 */ + +/* + * + * Cache attributes with L2 present, S = 0 + * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce + * 0 0 0 0 0 N N - N N + * 0 0 0 0 1 N N - N Y + * 0 0 0 1 0 Y Y WT N Y + * 0 0 0 1 1 Y Y WB Y Y + * 0 0 1 0 0 N N - Y Y + * 0 0 1 0 1 N N - N N + * 0 0 1 1 0 Y Y - - N + * 0 0 1 1 1 Y Y WT Y Y + * 0 1 0 0 0 N N - N N + * 0 1 0 0 1 N/A N/A N/A N/A N/A + * 0 1 0 1 0 N/A N/A N/A N/A N/A + * 0 1 0 1 1 N/A N/A N/A N/A N/A + * 0 1 1 X X N/A N/A N/A N/A N/A + * 1 X 0 0 0 N N - N Y + * 1 X 0 0 1 Y N WB N Y + * 1 X 0 1 0 Y N WT N Y + * 1 X 0 1 1 Y N WB Y Y + * 1 X 1 0 0 N N - Y Y + * 1 X 1 0 1 Y Y WB Y Y + * 1 X 1 1 0 Y Y WT Y Y + * 1 X 1 1 1 Y Y WB Y Y + * + * + * + * + * Cache attributes with L2 present, S = 1 + * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce + * 0 0 0 0 0 N N - N N + * 0 0 0 0 1 N N - N Y + * 0 0 0 1 0 Y Y - N Y + * 0 0 0 1 1 Y Y WT Y Y + * 0 0 1 0 0 N N - Y Y + * 0 0 1 0 1 N N - N N + * 0 0 1 1 0 Y Y - - N + * 0 0 1 1 1 Y Y WT Y Y + * 0 1 0 0 0 N N - N N + * 0 1 0 0 1 N/A N/A N/A N/A N/A + * 0 1 0 1 0 N/A N/A N/A N/A N/A + * 0 1 0 1 1 N/A N/A N/A N/A N/A + * 0 1 1 X X N/A N/A N/A N/A N/A + * 1 X 0 0 0 N N - N Y + * 1 X 0 0 1 Y N - N Y + * 1 X 0 1 0 Y N - N Y + * 1 X 0 1 1 Y N - Y Y + * 1 X 1 0 0 N N - Y Y + * 1 X 1 0 1 Y Y WT Y Y + * 1 X 1 1 0 Y Y WT Y Y + * 1 X 1 1 1 Y Y WT Y Y + */ +#endif /* !_MACHINE_PTE_V4_H_ */ + +/* End of pte.h */ Property changes on: head/sys/arm/include/pte-v4.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/arm/include/pte-v6.h =================================================================== --- head/sys/arm/include/pte-v6.h (revision 295800) +++ head/sys/arm/include/pte-v6.h (revision 295801) @@ -1,301 +1,301 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#ifndef _MACHINE_PTE_H_ -#define _MACHINE_PTE_H_ +#ifndef _MACHINE_PTE_V6_H_ +#define _MACHINE_PTE_V6_H_ /* * Domain Types for the Domain Access Control Register. */ #define DOMAIN_FAULT 0x00 /* no access */ #define DOMAIN_CLIENT 0x01 /* client */ #define DOMAIN_RESERVED 0x02 /* reserved */ #define DOMAIN_MANAGER 0x03 /* manager */ /* * TEX remap registers attributes */ #define PRRR_SO 0 /* Strongly ordered memory */ #define PRRR_DEV 1 /* Device memory */ #define PRRR_MEM 2 /* Normal memory */ #define PRRR_DS0 (1 << 16) /* Shared bit for Device, S = 0 */ #define PRRR_DS1 (1 << 17) /* Shared bit for Device, S = 1 */ #define PRRR_NS0 (1 << 18) /* Shared bit for Normal, S = 0 */ #define PRRR_NS1 (1 << 19) /* Shared bit for Normal, S = 1 */ #define PRRR_NOS_SHIFT 24 /* base shif for Not Outer Shared bits */ #define NMRR_NC 0 /* Noncachable*/ #define NMRR_WB_WA 1 /* Write Back, Write Allocate */ #define NMRR_WT 2 /* Write Through, Non-Write Allocate */ #define NMRR_WB 3 /* Write Back, Non-Write Allocate */ /* * * The ARM MMU is capable of mapping memory in the following chunks: * * 16M Supersections (L1 table) * * 1M Sections (L1 table) * * 64K Large Pages (L2 table) * * 4K Small Pages (L2 table) * * * Coarse Tables can map Large and Small Pages. * Coarse Tables are 1K in length. * * The Translation Table Base register holds the pointer to the * L1 Table. The L1 Table is a 16K contiguous chunk of memory * aligned to a 16K boundary. Each entry in the L1 Table maps * 1M of virtual address space, either via a Section mapping or * via an L2 Table. * */ #define L1_TABLE_SIZE 0x4000 /* 16K */ #define L1_ENTRIES 0x1000 /* 4K */ #define L2_TABLE_SIZE 0x0400 /* 1K */ #define L2_ENTRIES 0x0100 /* 256 */ /* ARMv6 super-sections. */ #define L1_SUP_SIZE 0x01000000 /* 16M */ #define L1_SUP_OFFSET (L1_SUP_SIZE - 1) #define L1_SUP_FRAME (~L1_SUP_OFFSET) #define L1_SUP_SHIFT 24 #define L1_S_SIZE 0x00100000 /* 1M */ #define L1_S_OFFSET (L1_S_SIZE - 1) #define L1_S_FRAME (~L1_S_OFFSET) #define L1_S_SHIFT 20 #define L2_L_SIZE 0x00010000 /* 64K */ #define L2_L_OFFSET (L2_L_SIZE - 1) #define L2_L_FRAME (~L2_L_OFFSET) #define L2_L_SHIFT 16 #define L2_S_SIZE 0x00001000 /* 4K */ #define L2_S_OFFSET (L2_S_SIZE - 1) #define L2_S_FRAME (~L2_S_OFFSET) #define L2_S_SHIFT 12 /* * ARM MMU L1 Descriptors */ #define L1_TYPE_INV 0x00 /* Invalid (fault) */ #define L1_TYPE_C 0x01 /* Coarse L2 */ #define L1_TYPE_S 0x02 /* Section */ #define L1_TYPE_MASK 0x03 /* Mask of type bits */ /* L1 Section Descriptor */ #define L1_S_B 0x00000004 /* bufferable Section */ #define L1_S_C 0x00000008 /* cacheable Section */ #define L1_S_NX 0x00000010 /* not executeable */ #define L1_S_DOM(x) ((x) << 5) /* domain */ #define L1_S_DOM_MASK L1_S_DOM(0xf) #define L1_S_P 0x00000200 /* ECC enable for this section */ #define L1_S_AP(x) ((x) << 10) /* access permissions */ #define L1_S_AP0 0x00000400 /* access permissions bit 0 */ #define L1_S_AP1 0x00000800 /* access permissions bit 1 */ #define L1_S_TEX(x) ((x) << 12) /* type extension */ #define L1_S_TEX0 0x00001000 /* type extension bit 0 */ #define L1_S_TEX1 0x00002000 /* type extension bit 1 */ #define L1_S_TEX2 0x00004000 /* type extension bit 2 */ #define L1_S_AP2 0x00008000 /* access permissions bit 2 */ #define L1_S_SHARED 0x00010000 /* shared */ #define L1_S_NG 0x00020000 /* not global */ #define L1_S_SUPERSEC 0x00040000 /* Section is a super-section. */ #define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ /* L1 Coarse Descriptor */ #define L1_C_DOM(x) ((x) << 5) /* domain */ #define L1_C_DOM_MASK L1_C_DOM(0xf) #define L1_C_P 0x00000200 /* ECC enable for this section */ #define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ /* * ARM MMU L2 Descriptors */ #define L2_TYPE_INV 0x00 /* Invalid (fault) */ #define L2_TYPE_L 0x01 /* Large Page - 64k - not used yet*/ #define L2_TYPE_S 0x02 /* Small Page - 4 */ #define L2_TYPE_MASK 0x03 #define L2_NX 0x00000001 /* Not executable */ #define L2_B 0x00000004 /* Bufferable page */ #define L2_C 0x00000008 /* Cacheable page */ #define L2_AP(x) ((x) << 4) #define L2_AP0 0x00000010 /* access permissions bit 0*/ #define L2_AP1 0x00000020 /* access permissions bit 1*/ #define L2_TEX(x) ((x) << 6) /* type extension */ #define L2_TEX0 0x00000040 /* type extension bit 0 */ #define L2_TEX1 0x00000080 /* type extension bit 1 */ #define L2_TEX2 0x00000100 /* type extension bit 2 */ #define L2_AP2 0x00000200 /* access permissions bit 2*/ #define L2_SHARED 0x00000400 /* shared */ #define L2_NG 0x00000800 /* not global */ /* * TEX classes encoding */ #define TEX1_CLASS_0 ( 0) #define TEX1_CLASS_1 ( L1_S_B) #define TEX1_CLASS_2 ( L1_S_C ) #define TEX1_CLASS_3 ( L1_S_C | L1_S_B) #define TEX1_CLASS_4 (L1_S_TEX0 ) #define TEX1_CLASS_5 (L1_S_TEX0 | L1_S_B) #define TEX1_CLASS_6 (L1_S_TEX0 | L1_S_C ) /* Reserved for ARM11 */ #define TEX1_CLASS_7 (L1_S_TEX0 | L1_S_C | L1_S_B) #define TEX2_CLASS_0 ( 0) #define TEX2_CLASS_1 ( L2_B) #define TEX2_CLASS_2 ( L2_C ) #define TEX2_CLASS_3 ( L2_C | L2_B) #define TEX2_CLASS_4 (L2_TEX0 ) #define TEX2_CLASS_5 (L2_TEX0 | L2_B) #define TEX2_CLASS_6 (L2_TEX0 | L2_C ) /* Reserved for ARM11 */ #define TEX2_CLASS_7 (L2_TEX0 | L2_C | L2_B) /* L1 table definitions. */ #define NB_IN_PT1 L1_TABLE_SIZE #define NPTE1_IN_PT1 L1_ENTRIES /* L2 table definitions. */ #define NB_IN_PT2 L2_TABLE_SIZE #define NPTE2_IN_PT2 L2_ENTRIES /* * Map memory attributes to TEX classes */ #define PTE2_ATTR_WB_WA TEX2_CLASS_0 #define PTE2_ATTR_NOCACHE TEX2_CLASS_1 #define PTE2_ATTR_DEVICE TEX2_CLASS_2 #define PTE2_ATTR_SO TEX2_CLASS_3 #define PTE2_ATTR_WT TEX2_CLASS_4 /* * Software defined bits for L1 descriptors * - L1_AP0 is used as page accessed bit * - L1_AP2 (RO / not RW) is used as page not modified bit * - L1_TEX0 is used as software emulated RO bit */ #define PTE1_V L1_TYPE_S /* Valid bit */ #define PTE1_A L1_S_AP0 /* Accessed - software emulated */ #define PTE1_NM L1_S_AP2 /* not modified bit - software emulated * used as real write enable bit */ #define PTE1_M 0 /* Modified (dummy) */ #define PTE1_S L1_S_SHARED /* Shared */ #define PTE1_NG L1_S_NG /* Not global */ #define PTE1_G 0 /* Global (dummy) */ #define PTE1_NX L1_S_NX /* Not executable */ #define PTE1_X 0 /* Executable (dummy) */ #define PTE1_RO L1_S_TEX1 /* Read Only */ #define PTE1_RW 0 /* Read-Write (dummy) */ #define PTE1_U L1_S_AP1 /* User */ #define PTE1_NU 0 /* Not user (kernel only) (dummy) */ #define PTE1_W L1_S_TEX2 /* Wired */ #define PTE1_SHIFT L1_S_SHIFT #define PTE1_SIZE L1_S_SIZE #define PTE1_OFFSET L1_S_OFFSET #define PTE1_FRAME L1_S_FRAME #define PTE1_ATTR_MASK (L1_S_TEX0 | L1_S_C | L1_S_B) #define PTE1_AP_KR (PTE1_RO | PTE1_NM) #define PTE1_AP_KRW 0 #define PTE1_AP_KRUR (PTE1_RO | PTE1_NM | PTE1_U) #define PTE1_AP_KRWURW PTE1_U /* * PTE1 descriptors creation macros. */ #define PTE1_PA(pa) ((pa) & PTE1_FRAME) #define PTE1_AP_COMMON (PTE1_V | PTE1_S) #define PTE1(pa, ap, attr) (PTE1_PA(pa) | (ap) | (attr) | PTE1_AP_COMMON) #define PTE1_KERN(pa, ap, attr) PTE1(pa, (ap) | PTE1_A | PTE1_G, attr) #define PTE1_KERN_NG(pa, ap, attr) PTE1(pa, (ap) | PTE1_A | PTE1_NG, attr) #define PTE1_LINK(pa) (((pa) & L1_C_ADDR_MASK) | L1_TYPE_C) /* * Software defined bits for L2 descriptors * - L2_AP0 is used as page accessed bit * - L2_AP2 (RO / not RW) is used as page not modified bit * - L2_TEX0 is used as software emulated RO bit */ #define PTE2_V L2_TYPE_S /* Valid bit */ #define PTE2_A L2_AP0 /* Accessed - software emulated */ #define PTE2_NM L2_AP2 /* not modified bit - software emulated * used as real write enable bit */ #define PTE2_M 0 /* Modified (dummy) */ #define PTE2_S L2_SHARED /* Shared */ #define PTE2_NG L2_NG /* Not global */ #define PTE2_G 0 /* Global (dummy) */ #define PTE2_NX L2_NX /* Not executable */ #define PTE2_X 0 /* Not executable (dummy) */ #define PTE2_RO L2_TEX1 /* Read Only */ #define PTE2_U L2_AP1 /* User */ #define PTE2_NU 0 /* Not user (kernel only) (dummy) */ #define PTE2_W L2_TEX2 /* Wired */ #define PTE2_SHIFT L2_S_SHIFT #define PTE2_SIZE L2_S_SIZE #define PTE2_OFFSET L2_S_OFFSET #define PTE2_FRAME L2_S_FRAME #define PTE2_ATTR_MASK (L2_TEX0 | L2_C | L2_B) #define PTE2_AP_KR (PTE2_RO | PTE2_NM) #define PTE2_AP_KRW 0 #define PTE2_AP_KRUR (PTE2_RO | PTE2_NM | PTE2_U) #define PTE2_AP_KRWURW PTE2_U /* * PTE2 descriptors creation macros. */ #define PTE2_PA(pa) ((pa) & PTE2_FRAME) #define PTE2_AP_COMMON (PTE2_V | PTE2_S) #define PTE2(pa, ap, attr) (PTE2_PA(pa) | (ap) | (attr) | PTE2_AP_COMMON) #define PTE2_KERN(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_G, attr) #define PTE2_KERN_NG(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_NG, attr) // ----------------- TO BE DELETED --------------------------------------------- /* * sys/arm/arm/elf_trampoline.c */ #define AP_KRW 0x01 /* kernel read/write */ // ----------------------------------------------------------------------------- -#endif /* !_MACHINE_PTE_H_ */ +#endif /* !_MACHINE_PTE_V6_H_ */