Index: head/sys/arm/arm/bus_space_generic.c =================================================================== --- head/sys/arm/arm/bus_space_generic.c (revision 295693) +++ head/sys/arm/arm/bus_space_generic.c (revision 295694) @@ -1,133 +1,133 @@ /* $NetBSD: obio_space.c,v 1.6 2003/07/15 00:25:05 lukem Exp $ */ /*- * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include void generic_bs_unimplemented(void) { panic("unimplemented bus_space function called"); } /* Prototypes for all the bus_space structure functions */ bs_protos(generic); int generic_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp) { void *va; /* * We don't even examine the passed-in flags. For ARM, the CACHEABLE - * flag doesn't make sense (we create PTE_DEVICE mappings), and the - * LINEAR flag is just implied because we use kva_alloc(size). + * flag doesn't make sense (we create VM_MEMATTR_DEVICE mappings), and + * the LINEAR flag is just implied because we use kva_alloc(size). */ if ((va = pmap_mapdev(bpa, size)) == NULL) return (ENOMEM); *bshp = (bus_space_handle_t)va; return (0); } int generic_bs_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend, bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, bus_addr_t *bpap, bus_space_handle_t *bshp) { panic("generic_bs_alloc(): not implemented"); } void generic_bs_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t size) { pmap_unmapdev((vm_offset_t)h, size); } void generic_bs_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size) { panic("generic_bs_free(): not implemented"); } int generic_bs_subregion(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp) { *nbshp = bsh + offset; return (0); } void generic_bs_barrier(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t offset, bus_size_t len, int flags) { /* * dsb() will drain the L1 write buffer and establish a memory access * barrier point on platforms where that has meaning. On a write we * also need to drain the L2 write buffer, because most on-chip memory * mapped devices are downstream of the L2 cache. Note that this needs * to be done even for memory mapped as Device type, because while * Device memory is not cached, writes to it are still buffered. */ dsb(); if (flags & BUS_SPACE_BARRIER_WRITE) { cpu_l2cache_drain_writebuf(); } } Index: head/sys/arm/arm/devmap.c =================================================================== --- head/sys/arm/arm/devmap.c (revision 295693) +++ head/sys/arm/arm/devmap.c (revision 295694) @@ -1,335 +1,333 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Routines for mapping device memory. * * This is used on both arm and arm64. */ #include "opt_ddb.h" #include #include #include #include #include #ifdef __arm__ #include #endif #include #include #include static const struct arm_devmap_entry *devmap_table; static boolean_t devmap_bootstrap_done = false; #if defined(__aarch64__) #define MAX_VADDR VM_MAX_KERNEL_ADDRESS -#define PTE_DEVICE VM_MEMATTR_DEVICE #elif defined(__arm__) #define MAX_VADDR ARM_VECTORS_HIGH #endif /* * The allocated-kva (akva) devmap table and metadata. Platforms can call * arm_devmap_add_entry() to add static device mappings to this table using * automatically allocated virtual addresses carved out of the top of kva space. * Allocation begins immediately below the ARM_VECTORS_HIGH address. */ #define AKVA_DEVMAP_MAX_ENTRIES 32 static struct arm_devmap_entry akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES]; static u_int akva_devmap_idx; static vm_offset_t akva_devmap_vaddr = MAX_VADDR; #ifdef __aarch64__ extern int early_boot; #endif /* * Print the contents of the static mapping table using the provided printf-like * output function (which will be either printf or db_printf). */ static void devmap_dump_table(int (*prfunc)(const char *, ...)) { const struct arm_devmap_entry *pd; if (devmap_table == NULL || devmap_table[0].pd_size == 0) { prfunc("No static device mappings.\n"); return; } prfunc("Static device mappings:\n"); for (pd = devmap_table; pd->pd_size != 0; ++pd) { prfunc(" 0x%08x - 0x%08x mapped at VA 0x%08x\n", pd->pd_pa, pd->pd_pa + pd->pd_size - 1, pd->pd_va); } } /* * Print the contents of the static mapping table. Used for bootverbose. */ void arm_devmap_print_table() { devmap_dump_table(printf); } /* * Return the "last" kva address used by the registered devmap table. It's * actually the lowest address used by the static mappings, i.e., the address of * the first unusable byte of KVA. */ vm_offset_t arm_devmap_lastaddr() { const struct arm_devmap_entry *pd; vm_offset_t lowaddr; if (akva_devmap_idx > 0) return (akva_devmap_vaddr); lowaddr = MAX_VADDR; for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) { if (lowaddr > pd->pd_va) lowaddr = pd->pd_va; } return (lowaddr); } /* * Add an entry to the internal "akva" static devmap table using the given * physical address and size and a virtual address allocated from the top of * kva. This automatically registers the akva table on the first call, so all a * platform has to do is call this routine to install as many mappings as it * needs and when initarm() calls arm_devmap_bootstrap() it will pick up all the * entries in the akva table automatically. */ void arm_devmap_add_entry(vm_paddr_t pa, vm_size_t sz) { struct arm_devmap_entry *m; if (devmap_bootstrap_done) panic("arm_devmap_add_entry() after arm_devmap_bootstrap()"); if (akva_devmap_idx == (AKVA_DEVMAP_MAX_ENTRIES - 1)) panic("AKVA_DEVMAP_MAX_ENTRIES is too small"); if (akva_devmap_idx == 0) arm_devmap_register_table(akva_devmap_entries); /* * Allocate virtual address space from the top of kva downwards. If the * range being mapped is aligned and sized to 1MB boundaries then also * align the virtual address to the next-lower 1MB boundary so that we * end up with a nice efficient section mapping. */ #ifdef __arm__ if ((pa & 0x000fffff) == 0 && (sz & 0x000fffff) == 0) { akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz); } else #endif { akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz); } m = &akva_devmap_entries[akva_devmap_idx++]; m->pd_va = akva_devmap_vaddr; m->pd_pa = pa; m->pd_size = sz; - m->pd_prot = VM_PROT_READ | VM_PROT_WRITE; - m->pd_cache = PTE_DEVICE; } /* * Register the given table as the one to use in arm_devmap_bootstrap(). */ void arm_devmap_register_table(const struct arm_devmap_entry *table) { devmap_table = table; } /* * Map all of the static regions in the devmap table, and remember the devmap * table so the mapdev, ptov, and vtop functions can do lookups later. * * If a non-NULL table pointer is given it is used unconditionally, otherwise * the previously-registered table is used. This smooths transition from legacy * code that fills in a local table then calls this function passing that table, * and newer code that uses arm_devmap_register_table() in platform-specific * code, then lets the common initarm() call this function with a NULL pointer. */ void arm_devmap_bootstrap(vm_offset_t l1pt, const struct arm_devmap_entry *table) { const struct arm_devmap_entry *pd; devmap_bootstrap_done = true; /* * If given a table pointer, use it. Otherwise, if a table was * previously registered, use it. Otherwise, no work to do. */ if (table != NULL) devmap_table = table; else if (devmap_table == NULL) return; for (pd = devmap_table; pd->pd_size != 0; ++pd) { #if defined(__arm__) #if __ARM_ARCH >= 6 pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size, - pd->pd_prot, pd->pd_cache); + VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE); #else pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size, - pd->pd_prot, pd->pd_cache); + VM_PROT_READ | VM_PROT_WRITE, PTE_DEVICE); #endif #elif defined(__aarch64__) pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa); #endif } } /* * Look up the given physical address in the static mapping data and return the * corresponding virtual address, or NULL if not found. */ void * arm_devmap_ptov(vm_paddr_t pa, vm_size_t size) { const struct arm_devmap_entry *pd; if (devmap_table == NULL) return (NULL); for (pd = devmap_table; pd->pd_size != 0; ++pd) { if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size) return ((void *)(pd->pd_va + (pa - pd->pd_pa))); } return (NULL); } /* * Look up the given virtual address in the static mapping data and return the * corresponding physical address, or DEVMAP_PADDR_NOTFOUND if not found. */ vm_paddr_t arm_devmap_vtop(void * vpva, vm_size_t size) { const struct arm_devmap_entry *pd; vm_offset_t va; if (devmap_table == NULL) return (DEVMAP_PADDR_NOTFOUND); va = (vm_offset_t)vpva; for (pd = devmap_table; pd->pd_size != 0; ++pd) { if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size) return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va))); } return (DEVMAP_PADDR_NOTFOUND); } /* * Map a set of physical memory pages into the kernel virtual address space. * Return a pointer to where it is mapped. * * This uses a pre-established static mapping if one exists for the requested * range, otherwise it allocates kva space and maps the physical pages into it. * * This routine is intended to be used for mapping device memory, NOT real - * memory; the mapping type is inherently PTE_DEVICE in pmap_kenter_device(). + * memory; the mapping type is inherently VM_MEMATTR_DEVICE in + * pmap_kenter_device(). */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { vm_offset_t va, offset; void * rva; /* First look in the static mapping table. */ if ((rva = arm_devmap_ptov(pa, size)) != NULL) return (rva); offset = pa & PAGE_MASK; pa = trunc_page(pa); size = round_page(size + offset); #ifdef __aarch64__ if (early_boot) { akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size); va = akva_devmap_vaddr; KASSERT(va >= VM_MAX_KERNEL_ADDRESS - L2_SIZE, ("Too many early devmap mappings")); } else #endif va = kva_alloc(size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); pmap_kenter_device(va, size, pa); return ((void *)(va + offset)); } /* * Unmap device memory and free the kva space. */ void pmap_unmapdev(vm_offset_t va, vm_size_t size) { vm_offset_t offset; /* Nothing to do if we find the mapping in the static table. */ if (arm_devmap_vtop((void*)va, size) != DEVMAP_PADDR_NOTFOUND) return; offset = va & PAGE_MASK; va = trunc_page(va); size = round_page(size + offset); pmap_kremove_device(va, size); kva_free(va, size); } #ifdef DDB #include DB_SHOW_COMMAND(devmap, db_show_devmap) { devmap_dump_table(db_printf); } #endif /* DDB */ Index: head/sys/arm/at91/at91_machdep.c =================================================================== --- head/sys/arm/at91/at91_machdep.c (revision 295693) +++ head/sys/arm/at91/at91_machdep.c (revision 295694) @@ -1,697 +1,685 @@ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * machdep.c * * Machine dependant functions for kernel setup * * This file needs a lot of work. * * Created : 17/09/94 */ #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MAXCPU #define MAXCPU 1 #endif /* Page table for mapping proc0 zero page */ #define KERNEL_PT_SYS 0 #define KERNEL_PT_KERN 1 #define KERNEL_PT_KERN_NUM 22 /* L2 table for mapping after kernel */ #define KERNEL_PT_AFKERNEL KERNEL_PT_KERN + KERNEL_PT_KERN_NUM #define KERNEL_PT_AFKERNEL_NUM 5 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; /* Static device mappings. */ const struct arm_devmap_entry at91_devmap[] = { /* * Map the critical on-board devices. The interrupt vector at * 0xffff0000 makes it impossible to map them PA == VA, so we map all * 0xfffxxxxx addresses to 0xdffxxxxx. This covers all critical devices * on all members of the AT91SAM9 and AT91RM9200 families. */ { 0xdff00000, 0xfff00000, 0x00100000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, /* There's a notion that we should do the rest of these lazily. */ /* * We can't just map the OHCI registers VA == PA, because * AT91xx_xxx_BASE belongs to the userland address space. * We could just choose a different virtual address, but a better * solution would probably be to just use pmap_mapdev() to allocate * KVA, as we don't need the OHCI controller before the vm * initialization is done. However, the AT91 resource allocation * system doesn't know how to use pmap_mapdev() yet. * Care must be taken to ensure PA and VM address do not overlap * between entries. */ { /* * Add the ohci controller, and anything else that might be * on this chip select for a VA/PA mapping. */ /* Internal Memory 1MB */ AT91RM92_OHCI_VA_BASE, AT91RM92_OHCI_BASE, 0x00100000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { /* CompactFlash controller. Portion of EBI CS4 1MB */ AT91RM92_CF_VA_BASE, AT91RM92_CF_BASE, 0x00100000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, /* * The next two should be good for the 9260, 9261 and 9G20 since * addresses mapping is the same. */ { /* Internal Memory 1MB */ AT91SAM9G20_OHCI_VA_BASE, AT91SAM9G20_OHCI_BASE, 0x00100000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { /* EBI CS3 256MB */ AT91SAM9G20_NAND_VA_BASE, AT91SAM9G20_NAND_BASE, AT91SAM9G20_NAND_SIZE, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, /* * The next should be good for the 9G45. */ { /* Internal Memory 1MB */ AT91SAM9G45_OHCI_VA_BASE, AT91SAM9G45_OHCI_BASE, 0x00100000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, - { 0, 0, 0, 0, 0, } + { 0, 0, 0, } }; #ifdef LINUX_BOOT_ABI extern int membanks; extern int memstart[]; extern int memsize[]; #endif long at91_ramsize(void) { uint32_t cr, mdr, mr, *SDRAMC; int banks, rows, cols, bw; #ifdef LINUX_BOOT_ABI /* * If we found any ATAGs that were for memory, return the first bank. */ if (membanks > 0) return (memsize[0]); #endif if (at91_is_rm92()) { SDRAMC = (uint32_t *)(AT91_BASE + AT91RM92_SDRAMC_BASE); cr = SDRAMC[AT91RM92_SDRAMC_CR / 4]; mr = SDRAMC[AT91RM92_SDRAMC_MR / 4]; banks = (cr & AT91RM92_SDRAMC_CR_NB_4) ? 2 : 1; rows = ((cr & AT91RM92_SDRAMC_CR_NR_MASK) >> 2) + 11; cols = (cr & AT91RM92_SDRAMC_CR_NC_MASK) + 8; bw = (mr & AT91RM92_SDRAMC_MR_DBW_16) ? 1 : 2; } else if (at91_cpu_is(AT91_T_SAM9G45)) { SDRAMC = (uint32_t *)(AT91_BASE + AT91SAM9G45_DDRSDRC0_BASE); cr = SDRAMC[AT91SAM9G45_DDRSDRC_CR / 4]; mdr = SDRAMC[AT91SAM9G45_DDRSDRC_MDR / 4]; banks = 0; rows = ((cr & AT91SAM9G45_DDRSDRC_CR_NR_MASK) >> 2) + 11; cols = (cr & AT91SAM9G45_DDRSDRC_CR_NC_MASK) + 8; bw = (mdr & AT91SAM9G45_DDRSDRC_MDR_DBW_16) ? 1 : 2; /* Fix the calculation for DDR memory */ mdr &= AT91SAM9G45_DDRSDRC_MDR_MASK; if (mdr & AT91SAM9G45_DDRSDRC_MDR_LPDDR1 || mdr & AT91SAM9G45_DDRSDRC_MDR_DDR2) { /* The cols value is 1 higher for DDR */ cols += 1; /* DDR has 4 internal banks. */ banks = 2; } } else { /* * This should be good for the 9260, 9261, 9G20, 9G35 and 9X25 * as addresses and registers are the same. */ SDRAMC = (uint32_t *)(AT91_BASE + AT91SAM9G20_SDRAMC_BASE); cr = SDRAMC[AT91SAM9G20_SDRAMC_CR / 4]; mr = SDRAMC[AT91SAM9G20_SDRAMC_MR / 4]; banks = (cr & AT91SAM9G20_SDRAMC_CR_NB_4) ? 2 : 1; rows = ((cr & AT91SAM9G20_SDRAMC_CR_NR_MASK) >> 2) + 11; cols = (cr & AT91SAM9G20_SDRAMC_CR_NC_MASK) + 8; bw = (cr & AT91SAM9G20_SDRAMC_CR_DBW_16) ? 1 : 2; } return (1 << (cols + rows + banks + bw)); } static const char *soc_type_name[] = { [AT91_T_CAP9] = "at91cap9", [AT91_T_RM9200] = "at91rm9200", [AT91_T_SAM9260] = "at91sam9260", [AT91_T_SAM9261] = "at91sam9261", [AT91_T_SAM9263] = "at91sam9263", [AT91_T_SAM9G10] = "at91sam9g10", [AT91_T_SAM9G20] = "at91sam9g20", [AT91_T_SAM9G45] = "at91sam9g45", [AT91_T_SAM9N12] = "at91sam9n12", [AT91_T_SAM9RL] = "at91sam9rl", [AT91_T_SAM9X5] = "at91sam9x5", [AT91_T_NONE] = "UNKNOWN" }; static const char *soc_subtype_name[] = { [AT91_ST_NONE] = "UNKNOWN", [AT91_ST_RM9200_BGA] = "at91rm9200_bga", [AT91_ST_RM9200_PQFP] = "at91rm9200_pqfp", [AT91_ST_SAM9XE] = "at91sam9xe", [AT91_ST_SAM9G45] = "at91sam9g45", [AT91_ST_SAM9M10] = "at91sam9m10", [AT91_ST_SAM9G46] = "at91sam9g46", [AT91_ST_SAM9M11] = "at91sam9m11", [AT91_ST_SAM9G15] = "at91sam9g15", [AT91_ST_SAM9G25] = "at91sam9g25", [AT91_ST_SAM9G35] = "at91sam9g35", [AT91_ST_SAM9X25] = "at91sam9x25", [AT91_ST_SAM9X35] = "at91sam9x35", }; struct at91_soc_info soc_info; /* * Read the SoC ID from the CIDR register and try to match it against the * values we know. If we find a good one, we return true. If not, we * return false. When we find a good one, we also find the subtype * and CPU family. */ static int at91_try_id(uint32_t dbgu_base) { uint32_t socid; soc_info.cidr = *(volatile uint32_t *)(AT91_BASE + dbgu_base + DBGU_C1R); socid = soc_info.cidr & ~AT91_CPU_VERSION_MASK; soc_info.type = AT91_T_NONE; soc_info.subtype = AT91_ST_NONE; soc_info.family = (soc_info.cidr & AT91_CPU_FAMILY_MASK) >> 20; soc_info.exid = *(volatile uint32_t *)(AT91_BASE + dbgu_base + DBGU_C2R); switch (socid) { case AT91_CPU_CAP9: soc_info.type = AT91_T_CAP9; break; case AT91_CPU_RM9200: soc_info.type = AT91_T_RM9200; break; case AT91_CPU_SAM9XE128: case AT91_CPU_SAM9XE256: case AT91_CPU_SAM9XE512: case AT91_CPU_SAM9260: soc_info.type = AT91_T_SAM9260; if (soc_info.family == AT91_FAMILY_SAM9XE) soc_info.subtype = AT91_ST_SAM9XE; break; case AT91_CPU_SAM9261: soc_info.type = AT91_T_SAM9261; break; case AT91_CPU_SAM9263: soc_info.type = AT91_T_SAM9263; break; case AT91_CPU_SAM9G10: soc_info.type = AT91_T_SAM9G10; break; case AT91_CPU_SAM9G20: soc_info.type = AT91_T_SAM9G20; break; case AT91_CPU_SAM9G45: soc_info.type = AT91_T_SAM9G45; break; case AT91_CPU_SAM9N12: soc_info.type = AT91_T_SAM9N12; break; case AT91_CPU_SAM9RL64: soc_info.type = AT91_T_SAM9RL; break; case AT91_CPU_SAM9X5: soc_info.type = AT91_T_SAM9X5; break; default: return (0); } switch (soc_info.type) { case AT91_T_SAM9G45: switch (soc_info.exid) { case AT91_EXID_SAM9G45: soc_info.subtype = AT91_ST_SAM9G45; break; case AT91_EXID_SAM9G46: soc_info.subtype = AT91_ST_SAM9G46; break; case AT91_EXID_SAM9M10: soc_info.subtype = AT91_ST_SAM9M10; break; case AT91_EXID_SAM9M11: soc_info.subtype = AT91_ST_SAM9M11; break; } break; case AT91_T_SAM9X5: switch (soc_info.exid) { case AT91_EXID_SAM9G15: soc_info.subtype = AT91_ST_SAM9G15; break; case AT91_EXID_SAM9G25: soc_info.subtype = AT91_ST_SAM9G25; break; case AT91_EXID_SAM9G35: soc_info.subtype = AT91_ST_SAM9G35; break; case AT91_EXID_SAM9X25: soc_info.subtype = AT91_ST_SAM9X25; break; case AT91_EXID_SAM9X35: soc_info.subtype = AT91_ST_SAM9X35; break; } break; default: break; } /* * Disable interrupts in the DBGU unit... */ *(volatile uint32_t *)(AT91_BASE + dbgu_base + USART_IDR) = 0xffffffff; /* * Save the name for later... */ snprintf(soc_info.name, sizeof(soc_info.name), "%s%s%s", soc_type_name[soc_info.type], soc_info.subtype == AT91_ST_NONE ? "" : " subtype ", soc_info.subtype == AT91_ST_NONE ? "" : soc_subtype_name[soc_info.subtype]); /* * try to get the matching CPU support. */ soc_info.soc_data = at91_match_soc(soc_info.type, soc_info.subtype); soc_info.dbgu_base = AT91_BASE + dbgu_base; return (1); } void at91_soc_id(void) { if (!at91_try_id(AT91_DBGU0)) at91_try_id(AT91_DBGU1); } #ifdef ARM_MANY_BOARD /* likely belongs in arm/arm/machdep.c, but since board_init is still at91 only... */ SET_DECLARE(arm_board_set, const struct arm_board); /* Not yet fully functional, but enough to build ATMEL config */ static long board_init(void) { return -1; } #endif #ifndef FDT /* Physical and virtual addresses for some global pages */ struct pv_addr msgbufpv; struct pv_addr kernelstack; struct pv_addr systempage; struct pv_addr irqstack; struct pv_addr abtstack; struct pv_addr undstack; void * initarm(struct arm_boot_params *abp) { struct pv_addr kernel_l1pt; struct pv_addr dpcpu; int i; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t afterkern; uint32_t memsize; vm_offset_t lastaddr; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0; i < NUM_KERNEL_PTS; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[i].pv_va = freemempos - (i % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[KERNEL_PT_SYS]); for (i = 0; i < KERNEL_PT_KERN_NUM; i++) pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE, &kernel_pt_table[KERNEL_PT_KERN + i]); pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, (((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the DPCPU pages */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, DPCPU_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa, msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); for (i = 0; i < NUM_KERNEL_PTS; ++i) { pmap_map_chunk(l1pagetable, kernel_pt_table[i].pv_va, kernel_pt_table[i].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } arm_devmap_bootstrap(l1pagetable, at91_devmap); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); at91_soc_id(); /* * Initialize all the clocks, so that the console can work. We can only * do this if at91_soc_id() was able to fill in the support data. Even * if we can't init the clocks, still try to do a console init so we can * try to print the error message about missing soc support. There's a * chance the printf will work if the bootloader set up the DBGU. */ if (soc_info.soc_data != NULL) { soc_info.soc_data->soc_clock_init(); at91_pmc_init_clock(); } cninit(); if (soc_info.soc_data == NULL) printf("Warning: No soc support for %s found.\n", soc_info.name); memsize = board_init(); if (memsize == -1) { printf("board_init() failed, cannot determine ram size; " "assuming 16MB\n"); memsize = 16 * 1024 * 1024; } /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); cpu_setup(); set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_curmaxkvaddr = afterkern + L1_S_SIZE * (KERNEL_PT_KERN_NUM - 1); /* Always use the 256MB of KVA we have available between the kernel and devices */ vm_max_kernel_address = KERNVIRTADDR + (256 << 20); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void*)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Add the physical ram we have available. * * Exclude the kernel, and all the things we allocated which immediately * follow the kernel, from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_hardware_region(PHYSADDR, memsize); arm_physmem_exclude_region(abp->abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #endif /* * These functions are handled elsewhere, so make them nops here. */ void cpu_startprofclock(void) { } void cpu_stopprofclock(void) { } void cpu_initclocks(void) { } void DELAY(int n) { if (soc_info.soc_data) soc_info.soc_data->soc_delay(n); } void cpu_reset(void) { if (soc_info.soc_data) soc_info.soc_data->soc_reset(); while (1) continue; } Index: head/sys/arm/cavium/cns11xx/econa_machdep.c =================================================================== --- head/sys/arm/cavium/cns11xx/econa_machdep.c (revision 295693) +++ head/sys/arm/cavium/cns11xx/econa_machdep.c (revision 295694) @@ -1,345 +1,335 @@ /*- * Copyright (c) 2009 Yohanes Nugroho * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_kstack_pages.h" #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "econa_reg.h" /* Page table for mapping proc0 zero page */ #define KERNEL_PT_SYS 0 #define KERNEL_PT_KERN 1 #define KERNEL_PT_KERN_NUM 22 /* L2 table for mapping after kernel */ #define KERNEL_PT_AFKERNEL KERNEL_PT_KERN + KERNEL_PT_KERN_NUM #define KERNEL_PT_AFKERNEL_NUM 5 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; /* Physical and virtual addresses for some global pages */ struct pv_addr systempage; struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; struct pv_addr kernelstack; /* Static device mappings. */ static const struct arm_devmap_entry econa_devmap[] = { { /* * This maps DDR SDRAM */ ECONA_SDRAM_BASE, /*virtual*/ ECONA_SDRAM_BASE, /*physical*/ ECONA_SDRAM_SIZE, /*size*/ - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, /* * Map the on-board devices VA == PA so that we can access them * with the MMU on or off. */ { /* * This maps the interrupt controller, the UART * and the timer. */ ECONA_IO_BASE, /*virtual*/ ECONA_IO_BASE, /*physical*/ ECONA_IO_SIZE, /*size*/ - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { /* * OHCI + EHCI */ ECONA_OHCI_VBASE, /*virtual*/ ECONA_OHCI_PBASE, /*physical*/ ECONA_USB_SIZE, /*size*/ - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { /* * CFI */ ECONA_CFI_VBASE, /*virtual*/ ECONA_CFI_PBASE, /*physical*/ ECONA_CFI_SIZE, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { - 0, - 0, 0, 0, 0, } }; void * initarm(struct arm_boot_params *abp) { struct pv_addr kernel_l1pt; volatile uint32_t * ddr = (uint32_t *)0x4000000C; int loop, i; u_int l1pagetable; vm_offset_t afterkern; vm_offset_t freemempos; vm_offset_t lastaddr; uint32_t memsize; int mem_info; boothowto = RB_VERBOSE; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[loop], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[loop].pv_va = freemempos - (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[loop].pv_pa = kernel_pt_table[loop].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, kstack_pages); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[KERNEL_PT_SYS]); for (i = 0; i < KERNEL_PT_KERN_NUM; i++) pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE, &kernel_pt_table[KERNEL_PT_KERN + i]); pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, (((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa, msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } arm_devmap_bootstrap(l1pagetable, econa_devmap); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); cninit(); mem_info = ((*ddr) >> 4) & 0x3; memsize = (8<abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } Index: head/sys/arm/freescale/imx/imx6_machdep.c =================================================================== --- head/sys/arm/freescale/imx/imx6_machdep.c (revision 295693) +++ head/sys/arm/freescale/imx/imx6_machdep.c (revision 295694) @@ -1,287 +1,287 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" struct fdt_fixup_entry fdt_fixup_table[] = { { NULL, NULL } }; static uint32_t gpio1_node; #ifndef ARM_INTRNG /* * Work around the linux workaround for imx6 erratum 006687, in which some * ethernet interrupts don't go to the GPC and thus won't wake the system from * Wait mode. We don't use Wait mode (which halts the GIC, leaving only GPC * interrupts able to wake the system), so we don't experience the bug at all. * The linux workaround is to reconfigure GPIO1_6 as the ENET interrupt by * writing magic values to an undocumented IOMUX register, then letting the gpio * interrupt driver notify the ethernet driver. We'll be able to do all that * (even though we don't need to) once the INTRNG project is committed and the * imx_gpio driver becomes an interrupt driver. Until then, this crazy little * workaround watches for requests to map an interrupt 6 with the interrupt * controller node referring to gpio1, and it substitutes the proper ffec * interrupt number. */ static int imx6_decode_fdt(uint32_t iparent, uint32_t *intr, int *interrupt, int *trig, int *pol) { if (fdt32_to_cpu(intr[0]) == 6 && OF_node_from_xref(iparent) == gpio1_node) { *interrupt = 150; *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; return (0); } return (gic_decode_fdt(iparent, intr, interrupt, trig, pol)); } fdt_pic_decode_t fdt_pic_table[] = { &imx6_decode_fdt, NULL }; #endif static vm_offset_t imx6_lastaddr(platform_t plat) { return (arm_devmap_lastaddr()); } static int imx6_attach(platform_t plat) { /* Inform the MPCore timer driver that its clock is variable. */ arm_tmr_change_frequency(ARM_TMR_FREQUENCY_VARIES); return (0); } static void imx6_late_init(platform_t plat) { const uint32_t IMX6_WDOG_SR_PHYS = 0x020bc004; imx_wdog_init_last_reset(IMX6_WDOG_SR_PHYS); /* Cache the gpio1 node handle for imx6_decode_fdt() workaround code. */ gpio1_node = OF_node_from_xref( OF_finddevice("/soc/aips-bus@02000000/gpio@0209c000")); } /* * Set up static device mappings. * * This attempts to cover the most-used devices with 1MB section mappings, which * is good for performance (uses fewer TLB entries for device access). * * ARMMP covers the interrupt controller, MPCore timers, global timer, and the * L2 cache controller. Most of the 1MB range is unused reserved space. * * AIPS1/AIPS2 cover most of the on-chip devices such as uart, spi, i2c, etc. * * Notably not mapped right now are HDMI, GPU, and other devices below ARMMP in * the memory map. When we get support for graphics it might make sense to * static map some of that area. Be careful with other things in that area such - * as OCRAM that probably shouldn't be mapped as PTE_DEVICE memory. + * as OCRAM that probably shouldn't be mapped as VM_MEMATTR_DEVICE memory. */ static int imx6_devmap_init(platform_t plat) { const uint32_t IMX6_ARMMP_PHYS = 0x00a00000; const uint32_t IMX6_ARMMP_SIZE = 0x00100000; const uint32_t IMX6_AIPS1_PHYS = 0x02000000; const uint32_t IMX6_AIPS1_SIZE = 0x00100000; const uint32_t IMX6_AIPS2_PHYS = 0x02100000; const uint32_t IMX6_AIPS2_SIZE = 0x00100000; arm_devmap_add_entry(IMX6_ARMMP_PHYS, IMX6_ARMMP_SIZE); arm_devmap_add_entry(IMX6_AIPS1_PHYS, IMX6_AIPS1_SIZE); arm_devmap_add_entry(IMX6_AIPS2_PHYS, IMX6_AIPS2_SIZE); return (0); } void cpu_reset(void) { const uint32_t IMX6_WDOG_CR_PHYS = 0x020bc000; imx_wdog_cpu_reset(IMX6_WDOG_CR_PHYS); } /* * Determine what flavor of imx6 we're running on. * * This code is based on the way u-boot does it. Information found on the web * indicates that Freescale themselves were the original source of this logic, * including the strange check for number of CPUs in the SCU configuration * register, which is apparently needed on some revisions of the SOLO. * * According to the documentation, there is such a thing as an i.MX6 Dual * (non-lite flavor). However, Freescale doesn't seem to have assigned it a * number or provided any logic to handle it in their detection code. * * Note that the ANALOG_DIGPROG and SCU configuration registers are not * documented in the chip reference manual. (SCU configuration is mentioned, * but not mapped out in detail.) I think the bottom two bits of the scu config * register may be ncpu-1. * * This hasn't been tested yet on a dual[-lite]. * * On a solo: * digprog = 0x00610001 * hwsoc = 0x00000062 * scu config = 0x00000500 * On a quad: * digprog = 0x00630002 * hwsoc = 0x00000063 * scu config = 0x00005503 */ u_int imx_soc_type() { uint32_t digprog, hwsoc; uint32_t *pcr; static u_int soctype; const vm_offset_t SCU_CONFIG_PHYSADDR = 0x00a00004; #define HWSOC_MX6SL 0x60 #define HWSOC_MX6DL 0x61 #define HWSOC_MX6SOLO 0x62 #define HWSOC_MX6Q 0x63 if (soctype != 0) return (soctype); digprog = imx6_anatop_read_4(IMX6_ANALOG_DIGPROG_SL); hwsoc = (digprog >> IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT) & IMX6_ANALOG_DIGPROG_SOCTYPE_MASK; if (hwsoc != HWSOC_MX6SL) { digprog = imx6_anatop_read_4(IMX6_ANALOG_DIGPROG); hwsoc = (digprog & IMX6_ANALOG_DIGPROG_SOCTYPE_MASK) >> IMX6_ANALOG_DIGPROG_SOCTYPE_SHIFT; /*printf("digprog = 0x%08x\n", digprog);*/ if (hwsoc == HWSOC_MX6DL) { pcr = arm_devmap_ptov(SCU_CONFIG_PHYSADDR, 4); if (pcr != NULL) { /*printf("scu config = 0x%08x\n", *pcr);*/ if ((*pcr & 0x03) == 0) { hwsoc = HWSOC_MX6SOLO; } } } } /* printf("hwsoc 0x%08x\n", hwsoc); */ switch (hwsoc) { case HWSOC_MX6SL: soctype = IMXSOC_6SL; break; case HWSOC_MX6SOLO: soctype = IMXSOC_6S; break; case HWSOC_MX6DL: soctype = IMXSOC_6DL; break; case HWSOC_MX6Q : soctype = IMXSOC_6Q; break; default: printf("imx_soc_type: Don't understand hwsoc 0x%02x, " "digprog 0x%08x; assuming IMXSOC_6Q\n", hwsoc, digprog); soctype = IMXSOC_6Q; break; } return (soctype); } /* * Early putc routine for EARLY_PRINTF support. To use, add to kernel config: * option SOCDEV_PA=0x02000000 * option SOCDEV_VA=0x02000000 * option EARLY_PRINTF * Resist the temptation to change the #if 0 to #ifdef EARLY_PRINTF here. It * makes sense now, but if multiple SOCs do that it will make early_putc another * duplicate symbol to be eliminated on the path to a generic kernel. */ #if 0 static void imx6_early_putc(int c) { volatile uint32_t * UART_STAT_REG = (uint32_t *)0x02020098; volatile uint32_t * UART_TX_REG = (uint32_t *)0x02020040; const uint32_t UART_TXRDY = (1 << 3); while ((*UART_STAT_REG & UART_TXRDY) == 0) continue; *UART_TX_REG = c; } early_putc_t *early_putc = imx6_early_putc; #endif static platform_method_t imx6_methods[] = { PLATFORMMETHOD(platform_attach, imx6_attach), PLATFORMMETHOD(platform_lastaddr, imx6_lastaddr), PLATFORMMETHOD(platform_devmap_init, imx6_devmap_init), PLATFORMMETHOD(platform_late_init, imx6_late_init), PLATFORMMETHOD_END, }; FDT_PLATFORM_DEF2(imx6, imx6s, "i.MX6 Solo", 0, "fsl,imx6s"); FDT_PLATFORM_DEF2(imx6, imx6d, "i.MX6 Dual", 0, "fsl,imx6d"); FDT_PLATFORM_DEF2(imx6, imx6q, "i.MX6 Quad", 0, "fsl,imx6q"); Index: head/sys/arm/include/devmap.h =================================================================== --- head/sys/arm/include/devmap.h (revision 295693) +++ head/sys/arm/include/devmap.h (revision 295694) @@ -1,93 +1,91 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_DEVMAP_H_ #define _MACHINE_DEVMAP_H_ /* * This structure is used by MD code to describe static mappings of devices * which are established as part of bringing up the MMU early in the boot. */ struct arm_devmap_entry { vm_offset_t pd_va; /* virtual address */ vm_paddr_t pd_pa; /* physical address */ vm_size_t pd_size; /* size of region */ - vm_prot_t pd_prot; /* protection code */ - int pd_cache; /* cache attributes */ }; /* * Return the lowest KVA address used in any entry in the registered devmap * table. This works with whatever table is registered, including the internal * table used by arm_devmap_add_entry() if that routine was used. Platforms can * implement platform_lastaddr() by calling this if static device mappings are * their only use of high KVA space. */ vm_offset_t arm_devmap_lastaddr(void); /* * Automatically allocate KVA (from the top of the address space downwards) and * make static device mapping entries in an internal table. The internal table * is automatically registered on the first call to this. */ void arm_devmap_add_entry(vm_paddr_t pa, vm_size_t sz); /* * Register a platform-local table to be bootstrapped by the generic * initarm() in arm/machdep.c. This is used by newer code that allocates and * fills in its own local table but does not have its own initarm() routine. */ void arm_devmap_register_table(const struct arm_devmap_entry * _table); /* * Establish mappings for all the entries in the table. This is called * automatically from the common initarm() in arm/machdep.c, and also from the * custom initarm() routines in older code. If the table pointer is NULL, this * will use the table installed previously by arm_devmap_register_table(). */ void arm_devmap_bootstrap(vm_offset_t _l1pt, const struct arm_devmap_entry *_table); /* * Translate between virtual and physical addresses within a region that is * static-mapped by the devmap code. If the given address range isn't * static-mapped, then ptov returns NULL and vtop returns DEVMAP_PADDR_NOTFOUND. * The latter implies that you can't vtop just the last byte of physical address * space. This is not as limiting as it might sound, because even if a device * occupies the end of the physical address space, you're only prevented from * doing vtop for that single byte. If you vtop a size bigger than 1 it works. */ #define DEVMAP_PADDR_NOTFOUND ((vm_paddr_t)(-1)) void * arm_devmap_ptov(vm_paddr_t _pa, vm_size_t _sz); vm_paddr_t arm_devmap_vtop(void * _va, vm_size_t _sz); /* Print the static mapping table; used for bootverbose output. */ void arm_devmap_print_table(void); #endif Index: head/sys/arm/include/pmap-v6.h =================================================================== --- head/sys/arm/include/pmap-v6.h (revision 295693) +++ head/sys/arm/include/pmap-v6.h (revision 295694) @@ -1,258 +1,256 @@ /*- * Copyright 2014 Svatopluk Kraus * Copyright 2014 Michal Meloun * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The ARM version of this file was more or less based on the i386 version, * which has the following provenance... * * Derived from hp300 version by Mike Hibler, this version by William * Jolitz uses a recursive map [a pde points to the page directory] to * map the page tables using the pagetables themselves. This is done to * reduce the impact on kernel virtual memory for lots of sparse address * space, and to reduce the cost of memory to each process. * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 * * $FreeBSD$ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ #include #include #include #include typedef uint32_t pt1_entry_t; /* L1 table entry */ typedef uint32_t pt2_entry_t; /* L2 table entry */ typedef uint32_t ttb_entry_t; /* TTB entry */ #ifdef _KERNEL #if 0 #define PMAP_PTE_NOCACHE // Use uncached page tables #endif /* * (1) During pmap bootstrap, physical pages for L2 page tables are * allocated in advance which are used for KVA continuous mapping * starting from KERNBASE. This makes things more simple. * (2) During vm subsystem initialization, only vm subsystem itself can * allocate physical memory safely. As pmap_map() is called during * this initialization, we must be prepared for that and have some * preallocated physical pages for L2 page tables. * * Note that some more pages for L2 page tables are preallocated too * for mappings laying above VM_MAX_KERNEL_ADDRESS. */ #ifndef NKPT2PG /* * The optimal way is to define this in board configuration as * definition here must be safe enough. It means really big. * * 1 GB KVA <=> 256 kernel L2 page table pages * * From real platforms: * 1 GB physical memory <=> 10 pages is enough * 2 GB physical memory <=> 21 pages is enough */ #define NKPT2PG 32 #endif extern vm_paddr_t phys_avail[]; extern vm_paddr_t dump_avail[]; extern char *_tmppt; /* poor name! */ extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; /* * Pmap stuff */ /* * This structure is used to hold a virtual<->physical address * association and is used mostly by bootstrap code */ struct pv_addr { SLIST_ENTRY(pv_addr) pv_list; vm_offset_t pv_va; vm_paddr_t pv_pa; }; #endif struct pv_entry; struct pv_chunk; struct md_page { TAILQ_HEAD(,pv_entry) pv_list; uint16_t pt2_wirecount[4]; vm_memattr_t pat_mode; }; struct pmap { struct mtx pm_mtx; pt1_entry_t *pm_pt1; /* KVA of pt1 */ pt2_entry_t *pm_pt2tab; /* KVA of pt2 pages table */ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ }; typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ NULL, MTX_DEF | MTX_DUPOK) #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_next; } *pv_entry_t; /* * pv_entries are allocated in chunks per-process. This avoids the * need to track per-pmap assignments. */ #define _NPCM 11 #define _NPCPV 336 struct pv_chunk { pmap_t pc_pmap; TAILQ_ENTRY(pv_chunk) pc_list; uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ TAILQ_ENTRY(pv_chunk) pc_lru; struct pv_entry pc_pventry[_NPCPV]; }; #ifdef _KERNEL struct pcb; extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ #define pmap_page_get_memattr(m) ((m)->md.pat_mode) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) /* * Only the following functions or macros may be used before pmap_bootstrap() * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and * vtopte2(). */ void pmap_bootstrap(vm_offset_t ); void pmap_kenter(vm_offset_t , vm_paddr_t ); void *pmap_kenter_temporary(vm_paddr_t , int ); void pmap_kremove(vm_offset_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); boolean_t pmap_page_is_mapped(vm_page_t ); void pmap_page_set_memattr(vm_page_t , vm_memattr_t ); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); void pmap_kremove_device(vm_offset_t, vm_size_t); void pmap_set_pcb_pagedir(pmap_t , struct pcb *); void pmap_tlb_flush(pmap_t , vm_offset_t ); void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t ); void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t ); vm_paddr_t pmap_kextract(vm_offset_t ); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , bool); #define vtophys(va) pmap_kextract((vm_offset_t)(va)) void pmap_set_tex(void); void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set); /* * Pre-bootstrap epoch functions set. */ void pmap_bootstrap_prepare(vm_paddr_t ); vm_paddr_t pmap_preboot_get_pages(u_int ); void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int ); vm_offset_t pmap_preboot_reserve_pages(u_int ); vm_offset_t pmap_preboot_get_vpages(u_int ); void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t, vm_memattr_t); #endif /* _KERNEL */ // ----------------- TO BE DELETED --------------------------------------------- #include #ifdef _KERNEL /* * sys/arm/arm/elf_trampoline.c * sys/arm/arm/genassym.c * sys/arm/arm/machdep.c * sys/arm/arm/mp_machdep.c * sys/arm/arm/locore.S * sys/arm/arm/pmap.c * sys/arm/arm/swtch.S * sys/arm/at91/at91_machdep.c * sys/arm/cavium/cns11xx/econa_machdep.c * sys/arm/s3c2xx0/s3c24x0_machdep.c * sys/arm/xscale/ixp425/avila_machdep.c * sys/arm/xscale/i8134x/crb_machdep.c * sys/arm/xscale/i80321/ep80219_machdep.c * sys/arm/xscale/i80321/iq31244_machdep.c * sys/arm/xscale/pxa/pxa_machdep.c */ #define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ /* * sys/arm/arm/cpufunc.c */ void vector_page_setprot(int); -#define PTE_DEVICE VM_MEMATTR_DEVICE - #endif /* _KERNEL */ // ----------------------------------------------------------------------------- #endif /* !_MACHINE_PMAP_H_ */ Index: head/sys/arm/mv/mv_localbus.c =================================================================== --- head/sys/arm/mv/mv_localbus.c (revision 295693) +++ head/sys/arm/mv/mv_localbus.c (revision 295694) @@ -1,494 +1,492 @@ /*- * Copyright (c) 2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dev/fdt/fdt_common.h" #include "ofw_bus_if.h" #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #define MV_LOCALBUS_MAX_BANKS 8 #define MV_LOCALBUS_MAX_BANK_CELLS 4 static MALLOC_DEFINE(M_LOCALBUS, "localbus", "localbus devices information"); struct localbus_bank { vm_offset_t va; /* VA of the bank */ vm_paddr_t pa; /* physical address of the bank */ vm_size_t size; /* bank size */ uint8_t mapped; /* device memory has mapping */ }; struct localbus_softc { device_t sc_dev; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct localbus_bank *sc_banks; }; struct localbus_devinfo { struct ofw_bus_devinfo di_ofw; struct resource_list di_res; int di_bank; }; struct localbus_va_entry { int8_t bank; vm_offset_t va; vm_size_t size; }; /* * Prototypes. */ static int localbus_probe(device_t); static int localbus_attach(device_t); static int localbus_print_child(device_t, device_t); static struct resource *localbus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static struct resource_list *localbus_get_resource_list(device_t, device_t); static ofw_bus_get_devinfo_t localbus_get_devinfo; /* * Bus interface definition. */ static device_method_t localbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, localbus_probe), DEVMETHOD(device_attach, localbus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, localbus_print_child), DEVMETHOD(bus_alloc_resource, localbus_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_resource_list, localbus_get_resource_list), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_devinfo, localbus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t localbus_driver = { "localbus", localbus_methods, sizeof(struct localbus_softc) }; const struct localbus_va_entry localbus_virtmap[] = { { 0, MV_DEV_BOOT_BASE, MV_DEV_BOOT_SIZE }, { 1, MV_DEV_CS0_BASE, MV_DEV_CS0_SIZE }, { 2, MV_DEV_CS1_BASE, MV_DEV_CS1_SIZE }, { 3, MV_DEV_CS2_BASE, MV_DEV_CS2_SIZE }, { -1, 0, 0 } }; static struct localbus_bank localbus_banks[MV_LOCALBUS_MAX_BANKS]; devclass_t localbus_devclass; DRIVER_MODULE(localbus, ofwbus, localbus_driver, localbus_devclass, 0, 0); static int fdt_localbus_reg_decode(phandle_t node, struct localbus_softc *sc, struct localbus_devinfo *di) { u_long start, end, count; pcell_t *reg, *regptr; pcell_t addr_cells, size_cells; int tuple_size, tuples; int i, rv, bank; if (fdt_addrsize_cells(OF_parent(node), &addr_cells, &size_cells) != 0) return (ENXIO); tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); tuples = OF_getprop_alloc(node, "reg", tuple_size, (void **)®); debugf("addr_cells = %d, size_cells = %d\n", addr_cells, size_cells); debugf("tuples = %d, tuple size = %d\n", tuples, tuple_size); if (tuples <= 0) /* No 'reg' property in this node. */ return (0); regptr = reg; for (i = 0; i < tuples; i++) { bank = fdt_data_get((void *)regptr, 1); if (bank >= MV_LOCALBUS_MAX_BANKS) { device_printf(sc->sc_dev, "bank number [%d] out of " "range\n", bank); continue; } /* * If device doesn't have virtual to physical mapping don't add * resources */ if (!(sc->sc_banks[bank].mapped)) { device_printf(sc->sc_dev, "device [%d]: missing memory " "mapping\n", bank); continue; } di->di_bank = bank; regptr += 1; /* Get address/size. */ rv = fdt_data_to_res(regptr, addr_cells - 1, size_cells, &start, &count); if (rv != 0) { resource_list_free(&di->di_res); goto out; } /* Check if enough amount of memory is mapped */ if (sc->sc_banks[bank].size < count) { device_printf(sc->sc_dev, "device [%d]: not enough " "memory reserved\n", bank); continue; } regptr += addr_cells - 1 + size_cells; /* Calculate address range relative to VA base. */ start = sc->sc_banks[bank].va + start; end = start + count - 1; debugf("reg addr bank = %d, start = %lx, end = %lx, " "count = %lx\n", bank, start, end, count); /* Use bank (CS) cell as rid. */ resource_list_add(&di->di_res, SYS_RES_MEMORY, di->di_bank, start, end, count); } rv = 0; out: free(reg, M_OFWPROP); return (rv); } static int localbus_probe(device_t dev) { if (!ofw_bus_is_compatible_strict(dev, "mrvl,lbc")) return (ENXIO); device_set_desc(dev, "Marvell device bus"); return (BUS_PROBE_DEFAULT); } static int localbus_attach(device_t dev) { device_t dev_child; struct localbus_softc *sc; struct localbus_devinfo *di; phandle_t dt_node, dt_child; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_banks = localbus_banks; /* * Walk localbus and add direct subordinates as our children. */ dt_node = ofw_bus_get_node(dev); for (dt_child = OF_child(dt_node); dt_child != 0; dt_child = OF_peer(dt_child)) { /* Check and process 'status' property. */ if (!(fdt_is_enabled(dt_child))) continue; if (!(fdt_pm_is_enabled(dt_child))) continue; di = malloc(sizeof(*di), M_LOCALBUS, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_ofw, dt_child) != 0) { free(di, M_LOCALBUS); device_printf(dev, "could not set up devinfo\n"); continue; } resource_list_init(&di->di_res); if (fdt_localbus_reg_decode(dt_child, sc, di)) { device_printf(dev, "could not process 'reg' " "property\n"); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LOCALBUS); continue; } /* Add newbus device for this FDT node */ dev_child = device_add_child(dev, NULL, -1); if (dev_child == NULL) { device_printf(dev, "could not add child: %s\n", di->di_ofw.obd_name); resource_list_free(&di->di_res); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LOCALBUS); continue; } #ifdef DEBUG device_printf(dev, "added child: %s\n\n", di->di_ofw.obd_name); #endif device_set_ivars(dev_child, di); } return (bus_generic_attach(dev)); } static int localbus_print_child(device_t dev, device_t child) { struct localbus_devinfo *di; struct resource_list *rl; int rv; di = device_get_ivars(child); rl = &di->di_res; rv = 0; rv += bus_print_child_header(dev, child); rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); rv += bus_print_child_footer(dev, child); return (rv); } static struct resource * localbus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct localbus_devinfo *di; struct resource_list_entry *rle; /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if ((start == 0UL) && (end == ~0UL)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; rid = &di->di_bank; rle = resource_list_find(&di->di_res, type, *rid); if (rle == NULL) { device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static struct resource_list * localbus_get_resource_list(device_t bus, device_t child) { struct localbus_devinfo *di; di = device_get_ivars(child); return (&di->di_res); } static const struct ofw_bus_devinfo * localbus_get_devinfo(device_t bus, device_t child) { struct localbus_devinfo *di; di = device_get_ivars(child); return (&di->di_ofw); } int fdt_localbus_devmap(phandle_t dt_node, struct arm_devmap_entry *fdt_devmap, int banks_max_num, int *banks_added) { pcell_t ranges[MV_LOCALBUS_MAX_BANKS * MV_LOCALBUS_MAX_BANK_CELLS]; pcell_t *rangesptr; uint32_t tuple_size, bank; vm_paddr_t offset; vm_size_t size; int dev_num, addr_cells, size_cells, par_addr_cells, va_index, i, j, k; if ((fdt_addrsize_cells(dt_node, &addr_cells, &size_cells)) != 0) return (EINVAL); par_addr_cells = fdt_parent_addr_cells(dt_node); if (par_addr_cells > 2) { /* * Localbus devmap initialization error: unsupported parent * #addr-cells */ return (ERANGE); } tuple_size = (addr_cells + par_addr_cells + size_cells); if (tuple_size > MV_LOCALBUS_MAX_BANK_CELLS) return (ERANGE); tuple_size *= sizeof(pcell_t); dev_num = OF_getprop(dt_node, "ranges", ranges, sizeof(ranges)); if (dev_num <= 0) return (EINVAL); /* Calculate number of devices attached to bus */ dev_num = dev_num / tuple_size; /* * If number of ranges > max number of localbus devices, * additional entries will not be processed */ dev_num = MIN(dev_num, banks_max_num); rangesptr = &ranges[0]; j = 0; /* Process data from FDT */ for (i = 0; i < dev_num; i++) { /* First field is bank number */ bank = fdt_data_get((void *)rangesptr, 1); rangesptr += 1; if (bank > MV_LOCALBUS_MAX_BANKS) { /* Bank out of range */ rangesptr += ((addr_cells - 1) + par_addr_cells + size_cells); continue; } /* Find virtmap entry for this bank */ va_index = -1; for (k = 0; localbus_virtmap[k].bank >= 0; k++) { if (localbus_virtmap[k].bank == bank) { va_index = k; break; } } /* Check if virtmap entry was found */ if (va_index == -1) { rangesptr += ((addr_cells - 1) + par_addr_cells + size_cells); continue; } /* Remaining child's address fields are unused */ rangesptr += (addr_cells - 1); /* Parent address offset */ offset = fdt_data_get((void *)rangesptr, par_addr_cells); rangesptr += par_addr_cells; /* Last field is size */ size = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; if (size > localbus_virtmap[va_index].size) { /* Not enough space reserved in virtual memory map */ continue; } fdt_devmap[j].pd_va = localbus_virtmap[va_index].va; fdt_devmap[j].pd_pa = offset; fdt_devmap[j].pd_size = size; - fdt_devmap[j].pd_prot = VM_PROT_READ | VM_PROT_WRITE; - fdt_devmap[j].pd_cache = PTE_DEVICE; /* Copy data to structure used by localbus driver */ localbus_banks[bank].va = fdt_devmap[j].pd_va; localbus_banks[bank].pa = fdt_devmap[j].pd_pa; localbus_banks[bank].size = fdt_devmap[j].pd_size; localbus_banks[bank].mapped = 1; j++; } *banks_added = j; return (0); } Index: head/sys/arm/mv/mv_machdep.c =================================================================== --- head/sys/arm/mv/mv_machdep.c (revision 295693) +++ head/sys/arm/mv/mv_machdep.c (revision 295694) @@ -1,504 +1,500 @@ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 */ #include "opt_ddb.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include /* XXX */ #include /* XXX eventually this should be eliminated */ #include #include static int platform_mpp_init(void); #if defined(SOC_MV_ARMADAXP) void armadaxp_init_coher_fabric(void); void armadaxp_l2_init(void); #endif #if defined(SOC_MV_ARMADA38X) int armada38x_win_set_iosync_barrier(void); int armada38x_scu_enable(void); int armada38x_open_bootrom_win(void); #endif #define MPP_PIN_MAX 68 #define MPP_PIN_CELLS 2 #define MPP_PINS_PER_REG 8 #define MPP_SEL(pin,func) (((func) & 0xf) << \ (((pin) % MPP_PINS_PER_REG) * 4)) static int platform_mpp_init(void) { pcell_t pinmap[MPP_PIN_MAX * MPP_PIN_CELLS]; int mpp[MPP_PIN_MAX]; uint32_t ctrl_val, ctrl_offset; pcell_t reg[4]; u_long start, size; phandle_t node; pcell_t pin_cells, *pinmap_ptr, pin_count; ssize_t len; int par_addr_cells, par_size_cells; int tuple_size, tuples, rv, pins, i, j; int mpp_pin, mpp_function; /* * Try to access the MPP node directly i.e. through /aliases/mpp. */ if ((node = OF_finddevice("mpp")) != -1) if (fdt_is_compatible(node, "mrvl,mpp")) goto moveon; /* * Find the node the long way. */ if ((node = OF_finddevice("/")) == -1) return (ENXIO); if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0) return (ENXIO); if ((node = fdt_find_compatible(node, "mrvl,mpp", 0)) == 0) /* * No MPP node. Fall back to how MPP got set by the * first-stage loader and try to continue booting. */ return (0); moveon: /* * Process 'reg' prop. */ if ((rv = fdt_addrsize_cells(OF_parent(node), &par_addr_cells, &par_size_cells)) != 0) return(ENXIO); tuple_size = sizeof(pcell_t) * (par_addr_cells + par_size_cells); len = OF_getprop(node, "reg", reg, sizeof(reg)); tuples = len / tuple_size; if (tuple_size <= 0) return (EINVAL); /* * Get address/size. XXX we assume only the first 'reg' tuple is used. */ rv = fdt_data_to_res(reg, par_addr_cells, par_size_cells, &start, &size); if (rv != 0) return (rv); start += fdt_immr_va; /* * Process 'pin-count' and 'pin-map' props. */ if (OF_getprop(node, "pin-count", &pin_count, sizeof(pin_count)) <= 0) return (ENXIO); pin_count = fdt32_to_cpu(pin_count); if (pin_count > MPP_PIN_MAX) return (ERANGE); if (OF_getprop(node, "#pin-cells", &pin_cells, sizeof(pin_cells)) <= 0) pin_cells = MPP_PIN_CELLS; pin_cells = fdt32_to_cpu(pin_cells); if (pin_cells > MPP_PIN_CELLS) return (ERANGE); tuple_size = sizeof(pcell_t) * pin_cells; bzero(pinmap, sizeof(pinmap)); len = OF_getprop(node, "pin-map", pinmap, sizeof(pinmap)); if (len <= 0) return (ERANGE); if (len % tuple_size) return (ERANGE); pins = len / tuple_size; if (pins > pin_count) return (ERANGE); /* * Fill out a "mpp[pin] => function" table. All pins unspecified in * the 'pin-map' property are defaulted to 0 function i.e. GPIO. */ bzero(mpp, sizeof(mpp)); pinmap_ptr = pinmap; for (i = 0; i < pins; i++) { mpp_pin = fdt32_to_cpu(*pinmap_ptr); mpp_function = fdt32_to_cpu(*(pinmap_ptr + 1)); mpp[mpp_pin] = mpp_function; pinmap_ptr += pin_cells; } /* * Prepare and program MPP control register values. */ ctrl_offset = 0; for (i = 0; i < pin_count;) { ctrl_val = 0; for (j = 0; j < MPP_PINS_PER_REG; j++) { if (i + j == pin_count - 1) break; ctrl_val |= MPP_SEL(i + j, mpp[i + j]); } i += MPP_PINS_PER_REG; bus_space_write_4(fdtbus_bs_tag, start, ctrl_offset, ctrl_val); #if defined(SOC_MV_ORION) /* * Third MPP reg on Orion SoC is placed * non-linearly (with different offset). */ if (i == (2 * MPP_PINS_PER_REG)) ctrl_offset = 0x50; else #endif ctrl_offset += 4; } return (0); } vm_offset_t platform_lastaddr(void) { return (fdt_immr_va); } void platform_probe_and_attach(void) { if (fdt_immr_addr(MV_BASE) != 0) while (1); } void platform_gpio_init(void) { /* * Re-initialise MPP. It is important to call this prior to using * console as the physical connection can be routed via MPP. */ if (platform_mpp_init() != 0) while (1); } void platform_late_init(void) { /* * Re-initialise decode windows */ #if !defined(SOC_MV_FREY) if (soc_decode_win() != 0) printf("WARNING: could not re-initialise decode windows! " "Running with existing settings...\n"); #else /* Disable watchdog and timers */ write_cpu_ctrl(CPU_TIMERS_BASE + CPU_TIMER_CONTROL, 0); #endif #if defined(SOC_MV_ARMADAXP) #if !defined(SMP) /* For SMP case it should be initialized after APs are booted */ armadaxp_init_coher_fabric(); #endif armadaxp_l2_init(); #endif #if defined(SOC_MV_ARMADA38X) /* Set IO Sync Barrier bit for all Mbus devices */ if (armada38x_win_set_iosync_barrier() != 0) printf("WARNING: could not map CPU Subsystem registers\n"); if (armada38x_scu_enable() != 0) printf("WARNING: could not enable SCU\n"); #ifdef SMP /* Open window to bootROM memory - needed for SMP */ if (armada38x_open_bootrom_win() != 0) printf("WARNING: could not open window to bootROM\n"); #endif #endif } #define FDT_DEVMAP_MAX (MV_WIN_CPU_MAX + 2) static struct arm_devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = { - { 0, 0, 0, 0, 0, } + { 0, 0, 0, } }; static int platform_sram_devmap(struct arm_devmap_entry *map) { #if !defined(SOC_MV_ARMADAXP) phandle_t child, root; u_long base, size; /* * SRAM range. */ if ((child = OF_finddevice("/sram")) != 0) if (fdt_is_compatible(child, "mrvl,cesa-sram") || fdt_is_compatible(child, "mrvl,scratchpad")) goto moveon; if ((root = OF_finddevice("/")) == 0) return (ENXIO); if ((child = fdt_find_compatible(root, "mrvl,cesa-sram", 0)) == 0 && (child = fdt_find_compatible(root, "mrvl,scratchpad", 0)) == 0) goto out; moveon: if (fdt_regsize(child, &base, &size) != 0) return (EINVAL); map->pd_va = MV_CESA_SRAM_BASE; /* XXX */ map->pd_pa = base; map->pd_size = size; - map->pd_prot = VM_PROT_READ | VM_PROT_WRITE; - map->pd_cache = PTE_DEVICE; return (0); out: #endif return (ENOENT); } /* * Supply a default do-nothing implementation of mv_pci_devmap() via a weak * alias. Many Marvell platforms don't support a PCI interface, but to support * those that do, we end up with a reference to this function below, in * platform_devmap_init(). If "device pci" appears in the kernel config, the * real implementation of this function in arm/mv/mv_pci.c overrides the weak * alias defined here. */ int mv_default_fdt_pci_devmap(phandle_t node, struct arm_devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va); int mv_default_fdt_pci_devmap(phandle_t node, struct arm_devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va) { return (0); } __weak_reference(mv_default_fdt_pci_devmap, mv_pci_devmap); /* * XXX: When device entry in devmap has pd_size smaller than section size, * system will freeze during initialization */ /* * Construct devmap table with DT-derived config data. */ int platform_devmap_init(void) { phandle_t root, child; pcell_t bank_count; int i, num_mapped; i = 0; arm_devmap_register_table(&fdt_devmap[0]); #ifdef SOC_MV_ARMADAXP vm_paddr_t cur_immr_pa; /* * Acquire SoC registers' base passed by u-boot and fill devmap * accordingly. DTB is going to be modified basing on this data * later. */ __asm __volatile("mrc p15, 4, %0, c15, c0, 0" : "=r" (cur_immr_pa)); cur_immr_pa = (cur_immr_pa << 13) & 0xff000000; if (cur_immr_pa != 0) fdt_immr_pa = cur_immr_pa; #endif /* * IMMR range. */ fdt_devmap[i].pd_va = fdt_immr_va; fdt_devmap[i].pd_pa = fdt_immr_pa; fdt_devmap[i].pd_size = fdt_immr_size; - fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; - fdt_devmap[i].pd_cache = PTE_DEVICE; i++; /* * SRAM range. */ if (i < FDT_DEVMAP_MAX) if (platform_sram_devmap(&fdt_devmap[i]) == 0) i++; /* * PCI range(s). * PCI range(s) and localbus. */ if ((root = OF_finddevice("/")) == -1) return (ENXIO); for (child = OF_child(root); child != 0; child = OF_peer(child)) { if (fdt_is_type(child, "pci") || fdt_is_type(child, "pciep")) { /* * Check space: each PCI node will consume 2 devmap * entries. */ if (i + 1 >= FDT_DEVMAP_MAX) return (ENOMEM); /* * XXX this should account for PCI and multiple ranges * of a given kind. */ if (mv_pci_devmap(child, &fdt_devmap[i], MV_PCI_VA_IO_BASE, MV_PCI_VA_MEM_BASE) != 0) return (ENXIO); i += 2; } if (fdt_is_compatible(child, "mrvl,lbc")) { /* Check available space */ if (OF_getprop(child, "bank-count", (void *)&bank_count, sizeof(bank_count)) <= 0) /* If no property, use default value */ bank_count = 1; else bank_count = fdt32_to_cpu(bank_count); if ((i + bank_count) >= FDT_DEVMAP_MAX) return (ENOMEM); /* Add all localbus ranges to device map */ num_mapped = 0; if (fdt_localbus_devmap(child, &fdt_devmap[i], (int)bank_count, &num_mapped) != 0) return (ENXIO); i += num_mapped; } } return (0); } struct arm32_dma_range * bus_dma_get_range(void) { return (NULL); } int bus_dma_get_range_nb(void) { return (0); } #if defined(CPU_MV_PJ4B) #ifdef DDB #include DB_SHOW_COMMAND(cp15, db_show_cp15) { u_int reg; __asm __volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (reg)); db_printf("Cpu ID: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (reg)); db_printf("Current Cache Lvl ID: 0x%08x\n",reg); __asm __volatile("mrc p15, 0, %0, c1, c0, 0" : "=r" (reg)); db_printf("Ctrl: 0x%08x\n",reg); __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reg)); db_printf("Aux Ctrl: 0x%08x\n",reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (reg)); db_printf("Processor Feat 0: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 1" : "=r" (reg)); db_printf("Processor Feat 1: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 2" : "=r" (reg)); db_printf("Debug Feat 0: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 3" : "=r" (reg)); db_printf("Auxiliary Feat 0: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 4" : "=r" (reg)); db_printf("Memory Model Feat 0: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 5" : "=r" (reg)); db_printf("Memory Model Feat 1: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 6" : "=r" (reg)); db_printf("Memory Model Feat 2: 0x%08x\n", reg); __asm __volatile("mrc p15, 0, %0, c0, c1, 7" : "=r" (reg)); db_printf("Memory Model Feat 3: 0x%08x\n", reg); __asm __volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (reg)); db_printf("Aux Func Modes Ctrl 0: 0x%08x\n",reg); __asm __volatile("mrc p15, 1, %0, c15, c2, 1" : "=r" (reg)); db_printf("Aux Func Modes Ctrl 1: 0x%08x\n",reg); __asm __volatile("mrc p15, 1, %0, c15, c12, 0" : "=r" (reg)); db_printf("CPU ID code extension: 0x%08x\n",reg); } DB_SHOW_COMMAND(vtop, db_show_vtop) { u_int reg; if (have_addr) { __asm __volatile("mcr p15, 0, %0, c7, c8, 0" : : "r" (addr)); __asm __volatile("mrc p15, 0, %0, c7, c4, 0" : "=r" (reg)); db_printf("Physical address reg: 0x%08x\n",reg); } else db_printf("show vtop \n"); } #endif /* DDB */ #endif /* CPU_MV_PJ4B */ Index: head/sys/arm/mv/mv_pci.c =================================================================== --- head/sys/arm/mv/mv_pci.c (revision 295693) +++ head/sys/arm/mv/mv_pci.c (revision 295694) @@ -1,1216 +1,1212 @@ /*- * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2015 Semihalf * All rights reserved. * * Developed by Semihalf. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Marvell integrated PCI/PCI-Express controller driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif /* * Code and data related to fdt-based PCI configuration. * * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was * always Marvell-specific so that was deleted and the code now lives here. */ struct mv_pci_range { u_long base_pci; u_long base_parent; u_long len; }; #define FDT_RANGES_CELLS ((3 + 3 + 2) * 2) static void mv_pci_range_dump(struct mv_pci_range *range) { #ifdef DEBUG printf("\n"); printf(" base_pci = 0x%08lx\n", range->base_pci); printf(" base_par = 0x%08lx\n", range->base_parent); printf(" len = 0x%08lx\n", range->len); #endif } static int mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { pcell_t ranges[FDT_RANGES_CELLS]; struct mv_pci_range *pci_space; pcell_t addr_cells, size_cells, par_addr_cells; pcell_t *rangesptr; pcell_t cell0, cell1, cell2; int tuple_size, tuples, i, rv, offset_cells, len; /* * Retrieve 'ranges' property. */ if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (EINVAL); if (addr_cells != 3 || size_cells != 2) return (ERANGE); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 3) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; /* * Initialize the ranges so that we don't have to worry about * having them all defined in the FDT. In particular, it is * perfectly fine not to want I/O space on PCI busses. */ bzero(io_space, sizeof(*io_space)); bzero(mem_space, sizeof(*mem_space)); rangesptr = &ranges[0]; offset_cells = 0; for (i = 0; i < tuples; i++) { cell0 = fdt_data_get((void *)rangesptr, 1); rangesptr++; cell1 = fdt_data_get((void *)rangesptr, 1); rangesptr++; cell2 = fdt_data_get((void *)rangesptr, 1); rangesptr++; if (cell0 & 0x02000000) { pci_space = mem_space; } else if (cell0 & 0x01000000) { pci_space = io_space; } else { rv = ERANGE; goto out; } if (par_addr_cells == 3) { /* * This is a PCI subnode 'ranges'. Skip cell0 and * cell1 of this entry and only use cell2. */ offset_cells = 2; rangesptr += offset_cells; } if ((par_addr_cells - offset_cells) > 2) { rv = ERANGE; goto out; } pci_space->base_parent = fdt_data_get((void *)rangesptr, par_addr_cells - offset_cells); rangesptr += par_addr_cells - offset_cells; if (size_cells > 2) { rv = ERANGE; goto out; } pci_space->len = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; pci_space->base_pci = cell2; } rv = 0; out: return (rv); } static int mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { int err; debugf("Processing PCI node: %x\n", node); if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) { debugf("could not decode parent PCI node 'ranges'\n"); return (err); } debugf("Post fixup dump:\n"); mv_pci_range_dump(io_space); mv_pci_range_dump(mem_space); return (0); } int mv_pci_devmap(phandle_t node, struct arm_devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va) { struct mv_pci_range io_space, mem_space; int error; if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0) return (error); devmap->pd_va = (io_va ? io_va : io_space.base_parent); devmap->pd_pa = io_space.base_parent; devmap->pd_size = io_space.len; - devmap->pd_prot = VM_PROT_READ | VM_PROT_WRITE; - devmap->pd_cache = PTE_DEVICE; devmap++; devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent); devmap->pd_pa = mem_space.base_parent; devmap->pd_size = mem_space.len; - devmap->pd_prot = VM_PROT_READ | VM_PROT_WRITE; - devmap->pd_cache = PTE_DEVICE; return (0); } /* * Code and data related to the Marvell pcib driver. */ #define PCI_CFG_ENA (1U << 31) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8) #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc) #define PCI_REG_CFG_ADDR 0x0C78 #define PCI_REG_CFG_DATA 0x0C7C #define PCIE_REG_CFG_ADDR 0x18F8 #define PCIE_REG_CFG_DATA 0x18FC #define PCIE_REG_CONTROL 0x1A00 #define PCIE_CTRL_LINK1X 0x00000001 #define PCIE_REG_STATUS 0x1A04 #define PCIE_REG_IRQ_MASK 0x1910 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1) #define PCIE_CONTROL_HOT_RESET (1 << 24) #define PCIE_LINK_TIMEOUT 1000000 #define PCIE_STATUS_LINK_DOWN 1 #define PCIE_STATUS_DEV_OFFS 16 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */ #define PCI_MIN_IO_ALLOC 4 #define PCI_MIN_MEM_ALLOC 16 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t)) struct mv_pcib_softc { device_t sc_dev; struct rman sc_mem_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; int sc_win_target; int sc_mem_win_attr; struct rman sc_io_rman; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; int sc_io_win_attr; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct mtx sc_msi_mtx; uint32_t sc_msi_bitmap; int sc_busnr; /* Host bridge bus number */ int sc_devnr; /* Host bridge device number */ int sc_type; int sc_mode; /* Endpoint / Root Complex */ struct ofw_bus_iinfo sc_pci_iinfo; }; /* Local forward prototypes */ static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *); static void mv_pcib_hw_cfginit(void); static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, int); static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_init(struct mv_pcib_softc *, int, int); static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int); static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int); static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t); static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t); static int mv_pcib_mem_init(struct mv_pcib_softc *); /* Forward prototypes */ static int mv_pcib_probe(device_t); static int mv_pcib_attach(device_t); static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int mv_pcib_release_resource(device_t, device_t, int, int, struct resource *); static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int mv_pcib_maxslots(device_t); static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_route_interrupt(device_t, device_t, int); #if defined(SOC_MV_ARMADAXP) static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *); static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int mv_pcib_release_msi(device_t, device_t, int, int *); #endif /* * Bus interface definitions. */ static device_method_t mv_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_pcib_probe), DEVMETHOD(device_attach, mv_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar), DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource), DEVMETHOD(bus_release_resource, mv_pcib_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mv_pcib_maxslots), DEVMETHOD(pcib_read_config, mv_pcib_read_config), DEVMETHOD(pcib_write_config, mv_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt), #if defined(SOC_MV_ARMADAXP) DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, mv_pcib_release_msi), DEVMETHOD(pcib_map_msi, mv_pcib_map_msi), #endif /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mv_pcib_driver = { "pcib", mv_pcib_methods, sizeof(struct mv_pcib_softc), }; devclass_t pcib_devclass; DRIVER_MODULE(pcib, ofwbus, mv_pcib_driver, pcib_devclass, 0, 0); static struct mtx pcicfg_mtx; static int mv_pcib_probe(device_t self) { phandle_t node; node = ofw_bus_get_node(self); if (!fdt_is_type(node, "pci")) return (ENXIO); if (!(ofw_bus_is_compatible(self, "mrvl,pcie") || ofw_bus_is_compatible(self, "mrvl,pci"))) return (ENXIO); device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int mv_pcib_attach(device_t self) { struct mv_pcib_softc *sc; phandle_t node, parnode; uint32_t val, unit; int err; sc = device_get_softc(self); sc->sc_dev = self; unit = fdt_get_unit(self); node = ofw_bus_get_node(self); parnode = OF_parent(node); if (fdt_is_compatible(node, "mrvl,pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET(unit); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(unit); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(unit); } else if (fdt_is_compatible(node, "mrvl,pci")) { sc->sc_type = MV_TYPE_PCI; sc->sc_win_target = MV_WIN_PCI_TARGET; sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR; sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR; } else return (ENXIO); /* * Retrieve our mem-mapped registers range. */ sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(self, "could not map memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL); sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT : MV_MODE_ENDPOINT); /* * Get PCI interrupt info. */ if (sc->sc_mode == MV_MODE_ROOT) ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t)); /* * Configure decode windows for PCI(E) access. */ if (mv_pcib_decode_win(node, sc) != 0) return (ENXIO); mv_pcib_hw_cfginit(); /* * Enable PCIE device. */ mv_pcib_enable(sc, unit); /* * Memory management. */ err = mv_pcib_mem_init(sc); if (err) return (err); if (sc->sc_mode == MV_MODE_ROOT) { err = mv_pcib_init(sc, sc->sc_busnr, mv_pcib_maxslots(sc->sc_dev)); if (err) goto error; device_add_child(self, "pci", -1); } else { sc->sc_devnr = 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS); device_add_child(self, "pci_ep", -1); } mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); return (bus_generic_attach(self)); error: /* XXX SYS_RES_ should be released here */ rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static void mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit) { uint32_t val; #if !defined(SOC_MV_ARMADAXP) int timeout; /* * Check if PCIE device is enabled. */ if (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit)) { write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) & ~(CPU_CONTROL_PCIE_DISABLE(unit))); timeout = PCIE_LINK_TIMEOUT; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) { DELAY(1000); timeout -= 1000; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); } } #endif if (sc->sc_mode == MV_MODE_ROOT) { /* * Enable PCI bridge. */ val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND); val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val); } } static int mv_pcib_mem_init(struct mv_pcib_softc *sc) { int err; /* * Memory management. */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_mem_rman); if (err) return (err); sc->sc_io_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_io_rman); if (err) { rman_fini(&sc->sc_mem_rman); return (err); } err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1); if (err) goto error; err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1); if (err) goto error; return (0); error: rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } /* * The idea of this allocator is taken from ARM No-Cache memory * management code (sys/arm/arm/vm_machdep.c). */ static bus_addr_t pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask) { uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i += bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); /* * Need to init the BAR register with 0xffffffff before correct * value can be read. */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4); bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(sc, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4); if (width == 2) mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4, 0, 4); return (width); } static void mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func) { bus_addr_t io_base, mem_base; uint32_t io_limit, mem_limit; int secbus; io_base = sc->sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; /* Configure I/O decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); /* Configure memory decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); /* Disable memory prefetch decode */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SECBUS_1, 1); /* Configure buses behind the bridge */ mv_pcib_init(sc, secbus, PCI_SLOTMAX); } static int mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); error = mv_pcib_init_all_bars(sc, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); /* Handle PCI-PCI bridges */ class = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_CLASS, 1); subclass = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; mv_pcib_init_bridge(sc, bus, slot, func); } } /* Enable all ABCD interrupts */ pcib_write_irq_mask(sc, (0xF << 24)); return (0); } static int mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; /* Program the base address registers */ while (bar < maxbar) { i = mv_pcib_init_bar(sc, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(sc->sc_dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static struct resource * mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); struct rman *rm = NULL; struct resource *res; switch (type) { case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); }; if ((start == 0UL) && (end == ~0UL)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; } if ((start < sc->sc_mem_base) || (start + count - 1 != end) || (end > sc->sc_mem_base + sc->sc_mem_size - 1)) return (NULL); res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_bustag(res, fdtbus_bs_tag); rman_set_bushandle(res, start); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res)) { rman_release_resource(res); return (NULL); } return (res); } static int mv_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, res)); return (rman_release_resource(res)); } static int mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_busnr; return (0); case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); } return (ENOENT); } static int mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busnr = value; return (0); } return (ENOENT); } static inline void pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask) { if (sc->sc_type != MV_TYPE_PCI) return; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask); } static void mv_pcib_hw_cfginit(void) { static int opened = 0; if (opened) return; mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; } static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); data = ~0; switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, cd)); break; } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, cd, htole32(data)); break; } mtx_unlock_spin(&pcicfg_mtx); } static int mv_pcib_maxslots(device_t dev) { struct mv_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX); } static int mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func) { #if defined(SOC_MV_ARMADA38X) struct mv_pcib_softc *sc = device_get_softc(dev); uint32_t vendor, device; vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR, PCIR_VENDOR_LENGTH); device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE, PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK; return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X); #else /* On platforms other than Armada38x, root link is always at slot 0 */ return (slot == 0); #endif } static uint32_t mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return ~0 if link is inactive or trying to read from Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return (~0U); return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes)); } static void mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return if link is inactive or trying to write to Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return; mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static int mv_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct mv_pcib_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[4]; int icells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (icells > 0) return (ofw_bus_map_intr(dev, iparent, icells, mintr)); /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc) { struct mv_pci_range io_space, mem_space; device_t dev; int error; dev = sc->sc_dev; if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) { device_printf(dev, "could not retrieve 'ranges' data\n"); return (error); } /* Configure CPU decoding windows */ error = decode_win_cpu_set(sc->sc_win_target, sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0); if (error < 0) { device_printf(dev, "could not set up CPU decode " "window for PCI IO\n"); return (ENXIO); } error = decode_win_cpu_set(sc->sc_win_target, sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len, mem_space.base_parent); if (error < 0) { device_printf(dev, "could not set up CPU decode " "windows for PCI MEM\n"); return (ENXIO); } sc->sc_io_base = io_space.base_parent; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base_parent; sc->sc_mem_size = mem_space.len; return (0); } #if defined(SOC_MV_ARMADAXP) static int mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct mv_pcib_softc *sc; sc = device_get_softc(dev); irq = irq - MSI_IRQ; /* validate parameters */ if (isclr(&sc->sc_msi_bitmap, irq)) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } mv_msi_data(irq, addr, data); debugf("%s: irq: %d addr: %jx data: %x\n", __func__, irq, *addr, *data); return (0); } static int mv_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct mv_pcib_softc *sc; u_int start = 0, i; if (powerof2(count) == 0 || count > MSI_IRQ_NUM) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->sc_msi_mtx); for (start = 0; (start + count) < MSI_IRQ_NUM; start++) { for (i = start; i < start + count; i++) { if (isset(&sc->sc_msi_bitmap, i)) break; } if (i == start + count) break; } if ((start + count) == MSI_IRQ_NUM) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = start; i < start + count; i++) { setbit(&sc->sc_msi_bitmap, i); *irqs++ = MSI_IRQ + i; } debugf("%s: start: %x count: %x\n", __func__, start, count); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { struct mv_pcib_softc *sc; u_int i; sc = device_get_softc(dev); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ); mtx_unlock(&sc->sc_msi_mtx); return (0); } #endif Index: head/sys/arm/mv/orion/db88f5xxx.c =================================================================== --- head/sys/arm/mv/orion/db88f5xxx.c (revision 295693) +++ head/sys/arm/mv/orion/db88f5xxx.c (revision 295694) @@ -1,185 +1,173 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include /* * Virtual address space layout: * ----------------------------- * 0x0000_0000 - 0xbfff_ffff : user process * * 0xc040_0000 - virtual_avail : kernel reserved (text, data, page tables * : structures, ARM stacks etc.) * virtual_avail - 0xefff_ffff : KVA (virtual_avail is typically < 0xc0a0_0000) * 0xf000_0000 - 0xf0ff_ffff : no-cache allocation area (16MB) * 0xf100_0000 - 0xf10f_ffff : SoC integrated devices registers range (1MB) * 0xf110_0000 - 0xf11f_ffff : PCI-Express I/O space (1MB) * 0xf120_0000 - 0xf12f_ffff : PCI I/O space (1MB) * 0xf130_0000 - 0xf52f_ffff : PCI-Express memory space (64MB) * 0xf530_0000 - 0xf92f_ffff : PCI memory space (64MB) * 0xf930_0000 - 0xfffe_ffff : unused (~108MB) * 0xffff_0000 - 0xffff_0fff : 'high' vectors page (4KB) * 0xffff_1000 - 0xffff_1fff : ARM_TP_ADDRESS/RAS page (4KB) * 0xffff_2000 - 0xffff_ffff : unused (~55KB) */ #if 0 int platform_pci_get_irq(u_int bus, u_int slot, u_int func, u_int pin); /* Static device mappings. */ const struct arm_devmap_entry db88f5xxx_devmap[] = { /* * Map the on-board devices VA == PA so that we can access them * with the MMU on or off. */ { /* SoC integrated peripherals registers range */ MV_BASE, MV_PHYS_BASE, MV_SIZE, - VM_PROT_READ | VM_PROT_WRITE, - PTE_DEVICE, }, { /* PCIE I/O */ MV_PCIE_IO_BASE, MV_PCIE_IO_PHYS_BASE, MV_PCIE_IO_SIZE, - VM_PROT_READ | VM_PROT_WRITE, - PTE_DEVICE, }, { /* PCIE Memory */ MV_PCIE_MEM_BASE, MV_PCIE_MEM_PHYS_BASE, MV_PCIE_MEM_SIZE, - VM_PROT_READ | VM_PROT_WRITE, - PTE_DEVICE, }, { /* PCI I/O */ MV_PCI_IO_BASE, MV_PCI_IO_PHYS_BASE, MV_PCI_IO_SIZE, - VM_PROT_READ | VM_PROT_WRITE, - PTE_DEVICE, }, { /* PCI Memory */ MV_PCI_MEM_BASE, MV_PCI_MEM_PHYS_BASE, MV_PCI_MEM_SIZE, - VM_PROT_READ | VM_PROT_WRITE, - PTE_DEVICE, }, { /* 7-seg LED */ MV_DEV_CS0_BASE, MV_DEV_CS0_PHYS_BASE, MV_DEV_CS0_SIZE, - VM_PROT_READ | VM_PROT_WRITE, - PTE_DEVICE, }, - { 0, 0, 0, 0, 0, } + { 0, 0, 0, } }; /* * The pci_irq_map table consists of 3 columns: * - PCI slot number (less than zero means ANY). * - PCI IRQ pin (less than zero means ANY). * - PCI IRQ (less than zero marks end of table). * * IRQ number from the first matching entry is used to configure PCI device */ /* PCI IRQ Map for DB-88F5281 */ const struct obio_pci_irq_map pci_irq_map[] = { { 7, -1, GPIO2IRQ(12) }, { 8, -1, GPIO2IRQ(13) }, { 9, -1, GPIO2IRQ(13) }, { -1, -1, -1 } }; /* PCI IRQ Map for DB-88F5182 */ const struct obio_pci_irq_map pci_irq_map[] = { { 7, -1, GPIO2IRQ(0) }, { 8, -1, GPIO2IRQ(1) }, { 9, -1, GPIO2IRQ(1) }, { -1, -1, -1 } }; #endif #if 0 /* * mv_gpio_config row structure: * , , * * - GPIO pin number (less than zero marks end of table) * - GPIO flags: * MV_GPIO_BLINK * MV_GPIO_POLAR_LOW * MV_GPIO_EDGE * MV_GPIO_LEVEL * - GPIO mode: * 1 - Output, set to HIGH. * 0 - Output, set to LOW. * -1 - Input. */ /* GPIO Configuration for DB-88F5281 */ const struct gpio_config mv_gpio_config[] = { { 12, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 }, { 13, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 }, { -1, -1, -1 } }; #if 0 /* GPIO Configuration for DB-88F5182 */ const struct gpio_config mv_gpio_config[] = { { 0, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 }, { 1, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 }, { -1, -1, -1 } }; #endif #endif Index: head/sys/arm/versatile/versatile_machdep.c =================================================================== --- head/sys/arm/versatile/versatile_machdep.c (revision 295693) +++ head/sys/arm/versatile/versatile_machdep.c (revision 295694) @@ -1,127 +1,125 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_ddb.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include /* Start of address space used for bootstrap map */ #define DEVMAP_BOOTSTRAP_MAP_START 0xE0000000 vm_offset_t platform_lastaddr(void) { return (DEVMAP_BOOTSTRAP_MAP_START); } void platform_probe_and_attach(void) { } void platform_gpio_init(void) { } void platform_late_init(void) { } #define FDT_DEVMAP_MAX (2) /* FIXME */ static struct arm_devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = { - { 0, 0, 0, 0, 0, }, - { 0, 0, 0, 0, 0, } + { 0, 0, 0, }, + { 0, 0, 0, } }; /* * Construct devmap table with DT-derived config data. */ int platform_devmap_init(void) { int i = 0; fdt_devmap[i].pd_va = 0xf0100000; fdt_devmap[i].pd_pa = 0x10100000; fdt_devmap[i].pd_size = 0x01000000; /* 1 MB */ - fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; - fdt_devmap[i].pd_cache = PTE_DEVICE; arm_devmap_register_table(&fdt_devmap[0]); return (0); } struct arm32_dma_range * bus_dma_get_range(void) { return (NULL); } int bus_dma_get_range_nb(void) { return (0); } void cpu_reset() { printf("cpu_reset\n"); while (1); } Index: head/sys/arm/xscale/i8134x/crb_machdep.c =================================================================== --- head/sys/arm/xscale/i8134x/crb_machdep.c (revision 295693) +++ head/sys/arm/xscale/i8134x/crb_machdep.c (revision 295694) @@ -1,340 +1,332 @@ /* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * machdep.c * * Machine dependant functions for kernel setup * * This file needs a lot of work. * * Created : 17/09/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_kstack_pages.h" #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For i80321_calibrate_delay() */ #include #include #include #define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ #define KERNEL_PT_IOPXS 1 #define KERNEL_PT_BEFOREKERN 2 #define KERNEL_PT_AFKERNEL 3 /* L2 table for mapping after kernel */ #define KERNEL_PT_AFKERNEL_NUM 9 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; /* Physical and virtual addresses for some global pages */ struct pv_addr systempage; struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; struct pv_addr kernelstack; /* Static device mappings. */ static const struct arm_devmap_entry iq81342_devmap[] = { { IOP34X_VADDR, IOP34X_HWADDR, IOP34X_SIZE, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { /* * Cheat and map a whole section, this will bring * both PCI-X and PCI-E outbound I/O */ IOP34X_PCIX_OIOBAR_VADDR &~ (0x100000 - 1), IOP34X_PCIX_OIOBAR &~ (0x100000 - 1), 0x100000, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { IOP34X_PCE1_VADDR, IOP34X_PCE1, IOP34X_PCE1_SIZE, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, { - 0, - 0, 0, 0, 0, } }; #define SDRAM_START 0x00000000 extern vm_offset_t xscale_cache_clean_addr; void * initarm(struct arm_boot_params *abp) { struct pv_addr kernel_l1pt; struct pv_addr dpcpu; int loop, i; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t freemem_pt; vm_offset_t afterkern; vm_offset_t freemem_after; vm_offset_t lastaddr; uint32_t memsize, memstart; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); freemempos = 0x00200000; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = (var).pv_pa + 0xc0000000; #define alloc_pages(var, np) \ freemempos -= (np * PAGE_SIZE); \ (var) = freemempos; \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos -= PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[loop], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[loop].pv_pa = freemempos + (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa + 0xc0000000; } } freemem_pt = freemempos; freemempos = 0x00100000; /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, kstack_pages); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); arm_devmap_bootstrap(l1pagetable, iq81342_devmap); /* * Give the XScale global cache clean code an appropriately * sized chunk of unmapped VA space starting at 0xff000000 * (our device mappings end before this address). */ xscale_cache_clean_addr = 0xff000000U; cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); cpu_setup(); i80321_calibrate_delay(); i81342_sdram_bounds(obio_bs_tag, IOP34X_VADDR, &memstart, &memsize); physmem = memsize / PAGE_SIZE; cninit(); /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_curmaxkvaddr = afterkern + PAGE_SIZE; vm_max_kernel_address = 0xe0000000; pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt); msgbufp = (void*)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Add the physical ram we have available. * * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_hardware_region(SDRAM_START, memsize); arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr - freemem_pt, EXFLAG_NOALLOC); arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 - freemempos, EXFLAG_NOALLOC); arm_physmem_exclude_region(abp->abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } Index: head/sys/arm/xscale/ixp425/avila_machdep.c =================================================================== --- head/sys/arm/xscale/ixp425/avila_machdep.c (revision 295693) +++ head/sys/arm/xscale/ixp425/avila_machdep.c (revision 295694) @@ -1,433 +1,417 @@ /* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * machdep.c * * Machine dependant functions for kernel setup * * This file needs a lot of work. * * Created : 17/09/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_kstack_pages.h" #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ #define KERNEL_PT_IO 1 #define KERNEL_PT_IO_NUM 3 #define KERNEL_PT_BEFOREKERN KERNEL_PT_IO + KERNEL_PT_IO_NUM #define KERNEL_PT_AFKERNEL KERNEL_PT_BEFOREKERN + 1 /* L2 table for mapping after kernel */ #define KERNEL_PT_AFKERNEL_NUM 9 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; /* Physical and virtual addresses for some global pages */ struct pv_addr systempage; struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; struct pv_addr kernelstack; struct pv_addr minidataclean; /* Static device mappings. */ static const struct arm_devmap_entry ixp425_devmap[] = { /* Physical/Virtual address for I/O space */ - { IXP425_IO_VBASE, IXP425_IO_HWBASE, IXP425_IO_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_IO_VBASE, IXP425_IO_HWBASE, IXP425_IO_SIZE, }, /* Expansion Bus */ - { IXP425_EXP_VBASE, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_EXP_VBASE, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, }, /* CFI Flash on the Expansion Bus */ { IXP425_EXP_BUS_CS0_VBASE, IXP425_EXP_BUS_CS0_HWBASE, - IXP425_EXP_BUS_CS0_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + IXP425_EXP_BUS_CS0_SIZE, }, /* IXP425 PCI Configuration */ - { IXP425_PCI_VBASE, IXP425_PCI_HWBASE, IXP425_PCI_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_PCI_VBASE, IXP425_PCI_HWBASE, IXP425_PCI_SIZE, }, /* SDRAM Controller */ - { IXP425_MCU_VBASE, IXP425_MCU_HWBASE, IXP425_MCU_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_MCU_VBASE, IXP425_MCU_HWBASE, IXP425_MCU_SIZE, }, /* PCI Memory Space */ - { IXP425_PCI_MEM_VBASE, IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_PCI_MEM_VBASE, IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_SIZE, }, /* Q-Mgr Memory Space */ - { IXP425_QMGR_VBASE, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_QMGR_VBASE, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, }, { 0 }, }; /* Static device mappings. */ static const struct arm_devmap_entry ixp435_devmap[] = { /* Physical/Virtual address for I/O space */ - { IXP425_IO_VBASE, IXP425_IO_HWBASE, IXP425_IO_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_IO_VBASE, IXP425_IO_HWBASE, IXP425_IO_SIZE, }, - { IXP425_EXP_VBASE, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_EXP_VBASE, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, }, /* IXP425 PCI Configuration */ - { IXP425_PCI_VBASE, IXP425_PCI_HWBASE, IXP425_PCI_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_PCI_VBASE, IXP425_PCI_HWBASE, IXP425_PCI_SIZE, }, /* DDRII Controller NB: mapped same place as IXP425 */ - { IXP425_MCU_VBASE, IXP435_MCU_HWBASE, IXP425_MCU_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_MCU_VBASE, IXP435_MCU_HWBASE, IXP425_MCU_SIZE, }, /* PCI Memory Space */ - { IXP425_PCI_MEM_VBASE, IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_PCI_MEM_VBASE, IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_SIZE, }, /* Q-Mgr Memory Space */ - { IXP425_QMGR_VBASE, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP425_QMGR_VBASE, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, }, /* CFI Flash on the Expansion Bus */ { IXP425_EXP_BUS_CS0_VBASE, IXP425_EXP_BUS_CS0_HWBASE, - IXP425_EXP_BUS_CS0_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + IXP425_EXP_BUS_CS0_SIZE, }, /* USB1 Memory Space */ - { IXP435_USB1_VBASE, IXP435_USB1_HWBASE, IXP435_USB1_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP435_USB1_VBASE, IXP435_USB1_HWBASE, IXP435_USB1_SIZE, }, /* USB2 Memory Space */ - { IXP435_USB2_VBASE, IXP435_USB2_HWBASE, IXP435_USB2_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { IXP435_USB2_VBASE, IXP435_USB2_HWBASE, IXP435_USB2_SIZE, }, /* GPS Memory Space */ - { CAMBRIA_GPS_VBASE, CAMBRIA_GPS_HWBASE, CAMBRIA_GPS_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { CAMBRIA_GPS_VBASE, CAMBRIA_GPS_HWBASE, CAMBRIA_GPS_SIZE, }, /* RS485 Memory Space */ - { CAMBRIA_RS485_VBASE, CAMBRIA_RS485_HWBASE, CAMBRIA_RS485_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_DEVICE, }, + { CAMBRIA_RS485_VBASE, CAMBRIA_RS485_HWBASE, CAMBRIA_RS485_SIZE, }, { 0 } }; extern vm_offset_t xscale_cache_clean_addr; void * initarm(struct arm_boot_params *abp) { #define next_chunk2(a,b) (((a) + (b)) &~ ((b)-1)) #define next_page(a) next_chunk2(a,PAGE_SIZE) struct pv_addr kernel_l1pt; struct pv_addr dpcpu; int loop, i; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t freemem_pt; vm_offset_t afterkern; vm_offset_t freemem_after; vm_offset_t lastaddr; uint32_t memsize; /* kernel text starts where we were loaded at boot */ #define KERNEL_TEXT_OFF (abp->abp_physaddr - PHYSADDR) #define KERNEL_TEXT_BASE (KERNBASE + KERNEL_TEXT_OFF) #define KERNEL_TEXT_PHYS (PHYSADDR + KERNEL_TEXT_OFF) lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); /* NB: sets cputype */ pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); init_static_kenv(NULL, 0); /* Do basic tuning, hz etc */ init_param1(); /* * We allocate memory downwards from where we were loaded * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS * entries in the L2 page table. Past that we re-align the * allocation boundary so later data structures (stacks, etc) * can be mapped with different attributes (write-back vs * write-through). Note this leaves a gap for expansion * (or might be repurposed). */ freemempos = abp->abp_physaddr; /* macros to simplify initial memory allocation */ #define alloc_pages(var, np) do { \ freemempos -= (np * PAGE_SIZE); \ (var) = freemempos; \ /* NB: this works because locore maps PA=VA */ \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); \ } while (0) #define valloc_pages(var, np) do { \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = (var).pv_pa + (KERNVIRTADDR - abp->abp_physaddr); \ } while (0) /* force L1 page table alignment */ while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos -= PAGE_SIZE; /* allocate contiguous L1 page table */ valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); /* now allocate L2 page tables; they are linked to L1 below */ for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[loop], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[loop].pv_pa = freemempos + (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa + (KERNVIRTADDR - abp->abp_physaddr); } } freemem_pt = freemempos; /* base of allocated pt's */ /* * Re-align allocation boundary so we can map the area * write-back instead of write-through for the stacks and * related structures allocated below. */ freemempos = PHYSADDR + 0x100000; /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, kstack_pages); alloc_pages(minidataclean.pv_pa, 1); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now construct the L1 page table. First map the L2 * page tables into the L1 so we can replace L1 mappings * later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE, &kernel_pt_table[KERNEL_PT_IO]); pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE, &kernel_pt_table[KERNEL_PT_IO + 1]); pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE, &kernel_pt_table[KERNEL_PT_IO + 2]); pmap_link_l2pt(l1pagetable, KERNBASE, &kernel_pt_table[KERNEL_PT_BEFOREKERN]); pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS, next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); freemem_after = next_page((int)lastaddr); afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the Mini-Data cache clean area. */ xscale_setup_minidata(l1pagetable, afterkern, minidataclean.pv_pa); /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); if (cpu_is_ixp43x()) arm_devmap_bootstrap(l1pagetable, ixp435_devmap); else arm_devmap_bootstrap(l1pagetable, ixp425_devmap); /* * Give the XScale global cache clean code an appropriately * sized chunk of unmapped VA space starting at 0xff000000 * (our device mappings end before this address). */ xscale_cache_clean_addr = 0xff000000U; cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); cpu_setup(); /* ready to setup the console (XXX move earlier if possible) */ cninit(); /* * Fetch the RAM size from the MCU registers. The * expansion bus was mapped above so we can now read 'em. */ if (cpu_is_ixp43x()) memsize = ixp435_ddram_size(); else memsize = ixp425_sdram_size(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_curmaxkvaddr = afterkern + PAGE_SIZE; vm_max_kernel_address = 0xe0000000; pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt); msgbufp = (void*)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Add the physical ram we have available. * * Exclude the kernel, and all the things we allocated which immediately * follow the kernel, from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_hardware_region(PHYSADDR, memsize); arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr - freemem_pt, EXFLAG_NOALLOC); arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 - freemempos, EXFLAG_NOALLOC); arm_physmem_exclude_region(abp->abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); #undef next_page #undef next_chunk2 } Index: head/sys/arm/xscale/pxa/pxa_machdep.c =================================================================== --- head/sys/arm/xscale/pxa/pxa_machdep.c (revision 295693) +++ head/sys/arm/xscale/pxa/pxa_machdep.c (revision 295694) @@ -1,440 +1,438 @@ /* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */ /*- * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * machdep.c * * Machine dependant functions for kernel setup * * This file needs a lot of work. * * Created : 17/09/94 */ #include "opt_ddb.h" #include "opt_kstack_pages.h" #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ #define KERNEL_PT_IOPXS 1 #define KERNEL_PT_BEFOREKERN 2 #define KERNEL_PT_AFKERNEL 3 /* L2 table for mapping after kernel */ #define KERNEL_PT_AFKERNEL_NUM 9 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; /* Physical and virtual addresses for some global pages */ struct pv_addr systempage; struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; struct pv_addr kernelstack; struct pv_addr minidataclean; static void pxa_probe_sdram(bus_space_tag_t, bus_space_handle_t, uint32_t *, uint32_t *); /* Static device mappings. */ static const struct arm_devmap_entry pxa_devmap[] = { /* * Map the on-board devices up into the KVA region so we don't muck * up user-space. */ { PXA2X0_PERIPH_START + PXA2X0_PERIPH_OFFSET, PXA2X0_PERIPH_START, PXA250_PERIPH_END - PXA2X0_PERIPH_START, - VM_PROT_READ|VM_PROT_WRITE, - PTE_DEVICE, }, - { 0, 0, 0, 0, 0, } + { 0, 0, 0, } }; #define SDRAM_START 0xa0000000 extern vm_offset_t xscale_cache_clean_addr; void * initarm(struct arm_boot_params *abp) { struct pv_addr kernel_l1pt; struct pv_addr dpcpu; int loop; u_int l1pagetable; vm_offset_t freemempos; vm_offset_t freemem_pt; vm_offset_t afterkern; vm_offset_t freemem_after; vm_offset_t lastaddr; int i, j; uint32_t memsize[PXA2X0_SDRAM_BANKS], memstart[PXA2X0_SDRAM_BANKS]; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; set_cpufuncs(); pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); freemempos = 0xa0200000; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = (var).pv_pa + 0x20000000; #define alloc_pages(var, np) \ freemempos -= (np * PAGE_SIZE); \ (var) = freemempos; \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos -= PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[loop], L2_TABLE_SIZE / PAGE_SIZE); } else { kernel_pt_table[loop].pv_pa = freemempos + (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * L2_TABLE_SIZE_REAL; kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa + 0x20000000; } } freemem_pt = freemempos; freemempos = 0xa0100000; /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, kstack_pages); alloc_pages(minidataclean.pv_pa, 1); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Allocate memory for the l1 and l2 page tables. The scheme to avoid * wasting memory by allocating the l1pt on the first 16k memory was * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for * this to work (which is supposed to be the case). */ /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); #if 0 /* XXXBJR: What is this? Don't know if there's an analogue. */ pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE, &kernel_pt_table[KERNEL_PT_IOPXS]); #endif pmap_link_l2pt(l1pagetable, KERNBASE, &kernel_pt_table[KERNEL_PT_BEFOREKERN]); pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000, 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1)); for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); } pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the Mini-Data cache clean area. */ xscale_setup_minidata(l1pagetable, afterkern, minidataclean.pv_pa); /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); arm_devmap_bootstrap(l1pagetable, pxa_devmap); /* * Give the XScale global cache clean code an appropriately * sized chunk of unmapped VA space starting at 0xff000000 * (our device mappings end before this address). */ xscale_cache_clean_addr = 0xff000000U; cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); cpu_setup(); /* * Sort out bus_space for on-board devices. */ pxa_obio_tag_init(); /* * Fetch the SDRAM start/size from the PXA2X0 SDRAM configration * registers. */ pxa_probe_sdram(obio_tag, PXA2X0_MEMCTL_BASE, memstart, memsize); /* Fire up consoles. */ cninit(); undefined_init(); init_proc0(kernelstack.pv_va); /* Enable MMU, I-cache, D-cache, write buffer. */ arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_curmaxkvaddr = afterkern + PAGE_SIZE; vm_max_kernel_address = 0xe0000000; pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt); msgbufp = (void*)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Add the physical ram we have available. * * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ for (j = 0; j < PXA2X0_SDRAM_BANKS; j++) { if (memsize[j] > 0) arm_physmem_hardware_region(memstart[j], memsize[j]); } arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr - freemem_pt, EXFLAG_NOALLOC); arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 - freemempos, EXFLAG_NOALLOC); arm_physmem_exclude_region(abp->abp_physaddr, virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } static void pxa_probe_sdram(bus_space_tag_t bst, bus_space_handle_t bsh, uint32_t *memstart, uint32_t *memsize) { uint32_t mdcnfg, dwid, dcac, drac, dnb; int i; mdcnfg = bus_space_read_4(bst, bsh, MEMCTL_MDCNFG); /* * Scan all 4 SDRAM banks */ for (i = 0; i < PXA2X0_SDRAM_BANKS; i++) { memstart[i] = 0; memsize[i] = 0; switch (i) { case 0: case 1: if ((i == 0 && (mdcnfg & MDCNFG_DE0) == 0) || (i == 1 && (mdcnfg & MDCNFG_DE1) == 0)) continue; dwid = mdcnfg >> MDCNFD_DWID01_SHIFT; dcac = mdcnfg >> MDCNFD_DCAC01_SHIFT; drac = mdcnfg >> MDCNFD_DRAC01_SHIFT; dnb = mdcnfg >> MDCNFD_DNB01_SHIFT; break; case 2: case 3: if ((i == 2 && (mdcnfg & MDCNFG_DE2) == 0) || (i == 3 && (mdcnfg & MDCNFG_DE3) == 0)) continue; dwid = mdcnfg >> MDCNFD_DWID23_SHIFT; dcac = mdcnfg >> MDCNFD_DCAC23_SHIFT; drac = mdcnfg >> MDCNFD_DRAC23_SHIFT; dnb = mdcnfg >> MDCNFD_DNB23_SHIFT; break; default: panic("pxa_probe_sdram: impossible"); } dwid = 2 << (1 - (dwid & MDCNFD_DWID_MASK)); /* 16/32 width */ dcac = 1 << ((dcac & MDCNFD_DCAC_MASK) + 8); /* 8-11 columns */ drac = 1 << ((drac & MDCNFD_DRAC_MASK) + 11); /* 11-13 rows */ dnb = 2 << (dnb & MDCNFD_DNB_MASK); /* # of banks */ memsize[i] = dwid * dcac * drac * dnb; memstart[i] = PXA2X0_SDRAM0_START + (i * PXA2X0_SDRAM_BANK_SIZE); } } #define TIMER_FREQUENCY 3686400 #define UNIMPLEMENTED panic("%s: unimplemented", __func__) /* XXXBJR: Belongs with DELAY in a timer.c of some sort. */ void cpu_startprofclock(void) { UNIMPLEMENTED; } void cpu_stopprofclock(void) { UNIMPLEMENTED; } static struct arm32_dma_range pxa_range = { .dr_sysbase = 0, .dr_busbase = 0, .dr_len = ~0u, }; struct arm32_dma_range * bus_dma_get_range(void) { return (&pxa_range); } int bus_dma_get_range_nb(void) { return (1); } Index: head/sys/arm64/include/devmap.h =================================================================== --- head/sys/arm64/include/devmap.h (revision 295693) +++ head/sys/arm64/include/devmap.h (revision 295694) @@ -1,93 +1,91 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_DEVMAP_H_ #define _MACHINE_DEVMAP_H_ /* * This structure is used by MD code to describe static mappings of devices * which are established as part of bringing up the MMU early in the boot. */ struct arm_devmap_entry { vm_offset_t pd_va; /* virtual address */ vm_paddr_t pd_pa; /* physical address */ vm_size_t pd_size; /* size of region */ - vm_prot_t pd_prot; /* protection code */ - int pd_cache; /* cache attributes */ }; /* * Return the lowest KVA address used in any entry in the registered devmap * table. This works with whatever table is registered, including the internal * table used by arm_devmap_add_entry() if that routine was used. Platforms can * implement initarm_lastaddr() by calling this if static device mappings are * their only use of high KVA space. */ vm_offset_t arm_devmap_lastaddr(void); /* * Automatically allocate KVA (from the top of the address space downwards) and * make static device mapping entries in an internal table. The internal table * is automatically registered on the first call to this. */ void arm_devmap_add_entry(vm_paddr_t pa, vm_size_t sz); /* * Register a platform-local table to be bootstrapped by the generic * initarm() in arm/machdep.c. This is used by newer code that allocates and * fills in its own local table but does not have its own initarm() routine. */ void arm_devmap_register_table(const struct arm_devmap_entry * _table); /* * Establish mappings for all the entries in the table. This is called * automatically from the common initarm() in arm/machdep.c, and also from the * custom initarm() routines in older code. If the table pointer is NULL, this * will use the table installed previously by arm_devmap_register_table(). */ -void arm_devmap_bootstrap(vm_offset_t _l1pt, +void arm_devmap_bootstrap(vm_offset_t _l1pt, const struct arm_devmap_entry *_table); /* * Translate between virtual and physical addresses within a region that is * static-mapped by the devmap code. If the given address range isn't * static-mapped, then ptov returns NULL and vtop returns DEVMAP_PADDR_NOTFOUND. * The latter implies that you can't vtop just the last byte of physical address * space. This is not as limiting as it might sound, because even if a device * occupies the end of the physical address space, you're only prevented from * doing vtop for that single byte. If you vtop a size bigger than 1 it works. */ #define DEVMAP_PADDR_NOTFOUND ((vm_paddr_t)(-1)) void * arm_devmap_ptov(vm_paddr_t _pa, vm_size_t _sz); vm_paddr_t arm_devmap_vtop(void * _va, vm_size_t _sz); /* Print the static mapping table; used for bootverbose output. */ void arm_devmap_print_table(void); #endif