Index: head/sys/arm/annapurna/alpine/alpine_machdep.c
===================================================================
--- head/sys/arm/annapurna/alpine/alpine_machdep.c (revision 295142)
+++ head/sys/arm/annapurna/alpine/alpine_machdep.c (revision 295143)
@@ -1,148 +1,148 @@
/*-
* Copyright (c) 2013 Ruslan Bukin
* Copyright (c) 2015 Semihalf
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include
__FBSDID("$FreeBSD$");
#define _ARM32_BUS_DMA_PRIVATE
#include
#include
#include
#include
#include
#include
#include
#include
#include /* For trapframe_t, used in */
#include
#include
#include
#include
#include
#include
#include "opt_ddb.h"
#include "opt_platform.h"
struct mtx al_dbg_lock;
#define DEVMAP_MAX_VA_ADDRESS 0xF0000000
bus_addr_t al_devmap_pa;
bus_addr_t al_devmap_size;
#define AL_NB_SERVICE_OFFSET 0x70000
#define AL_NB_CCU_OFFSET 0x90000
#define AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET 0x4000
#define AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET 0x5000
#define AL_CCU_SPECULATION_CONTROL_OFFSET 0x4
#define AL_NB_ACF_MISC_OFFSET 0xD0
#define AL_NB_ACF_MISC_READ_BYPASS (1 << 30)
int alpine_get_devmap_base(bus_addr_t *pa, bus_addr_t *size);
vm_offset_t
platform_lastaddr(void)
{
return (DEVMAP_MAX_VA_ADDRESS);
}
void
platform_probe_and_attach(void)
{
}
void
platform_gpio_init(void)
{
}
void
platform_late_init(void)
{
bus_addr_t reg_baddr;
uint32_t val;
if (!mtx_initialized(&al_dbg_lock))
mtx_init(&al_dbg_lock, "ALDBG", "ALDBG", MTX_SPIN);
/* configure system fabric */
if (bus_space_map(fdtbus_bs_tag, al_devmap_pa, al_devmap_size, 0,
®_baddr))
panic("Couldn't map Register Space area");
/* do not allow reads to bypass writes to different addresses */
val = bus_space_read_4(fdtbus_bs_tag, reg_baddr,
AL_NB_SERVICE_OFFSET + AL_NB_ACF_MISC_OFFSET);
val &= ~AL_NB_ACF_MISC_READ_BYPASS;
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_SERVICE_OFFSET + AL_NB_ACF_MISC_OFFSET, val);
/* enable cache snoop */
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_CCU_OFFSET + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET, 1);
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_CCU_OFFSET + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET, 1);
/* disable speculative fetches from masters */
bus_space_write_4(fdtbus_bs_tag, reg_baddr,
AL_NB_CCU_OFFSET + AL_CCU_SPECULATION_CONTROL_OFFSET, 7);
bus_space_unmap(fdtbus_bs_tag, reg_baddr, al_devmap_size);
}
/*
- * Construct pmap_devmap[] with DT-derived config data.
+ * Construct devmap table with DT-derived config data.
*/
int
platform_devmap_init(void)
{
alpine_get_devmap_base(&al_devmap_pa, &al_devmap_size);
arm_devmap_add_entry(al_devmap_pa, al_devmap_size);
return (0);
}
struct arm32_dma_range *
bus_dma_get_range(void)
{
return (NULL);
}
int
bus_dma_get_range_nb(void)
{
return (0);
}
Index: head/sys/arm/include/pmap-v6.h
===================================================================
--- head/sys/arm/include/pmap-v6.h (revision 295142)
+++ head/sys/arm/include/pmap-v6.h (revision 295143)
@@ -1,299 +1,285 @@
/*-
* Copyright 2014 Svatopluk Kraus
* Copyright 2014 Michal Meloun
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department and William Jolitz of UUNET Technologies Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The ARM version of this file was more or less based on the i386 version,
* which has the following provenance...
*
* Derived from hp300 version by Mike Hibler, this version by William
* Jolitz uses a recursive map [a pde points to the page directory] to
* map the page tables using the pagetables themselves. This is done to
* reduce the impact on kernel virtual memory for lots of sparse address
* space, and to reduce the cost of memory to each process.
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
*
* $FreeBSD$
*/
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#include
#include
#include
#include
typedef uint32_t pt1_entry_t; /* L1 table entry */
typedef uint32_t pt2_entry_t; /* L2 table entry */
typedef uint32_t ttb_entry_t; /* TTB entry */
#ifdef _KERNEL
#if 0
#define PMAP_PTE_NOCACHE // Use uncached page tables
#endif
/*
* (1) During pmap bootstrap, physical pages for L2 page tables are
* allocated in advance which are used for KVA continuous mapping
* starting from KERNBASE. This makes things more simple.
* (2) During vm subsystem initialization, only vm subsystem itself can
* allocate physical memory safely. As pmap_map() is called during
* this initialization, we must be prepared for that and have some
* preallocated physical pages for L2 page tables.
*
* Note that some more pages for L2 page tables are preallocated too
* for mappings laying above VM_MAX_KERNEL_ADDRESS.
*/
#ifndef NKPT2PG
/*
* The optimal way is to define this in board configuration as
* definition here must be safe enough. It means really big.
*
* 1 GB KVA <=> 256 kernel L2 page table pages
*
* From real platforms:
* 1 GB physical memory <=> 10 pages is enough
* 2 GB physical memory <=> 21 pages is enough
*/
#define NKPT2PG 32
#endif
extern vm_paddr_t phys_avail[];
extern vm_paddr_t dump_avail[];
extern char *_tmppt; /* poor name! */
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
/*
* Pmap stuff
*/
/*
* This structure is used to hold a virtual<->physical address
* association and is used mostly by bootstrap code
*/
struct pv_addr {
SLIST_ENTRY(pv_addr) pv_list;
vm_offset_t pv_va;
vm_paddr_t pv_pa;
};
#endif
struct pv_entry;
struct pv_chunk;
struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
uint16_t pt2_wirecount[4];
int pat_mode;
};
struct pmap {
struct mtx pm_mtx;
pt1_entry_t *pm_pt1; /* KVA of pt1 */
pt2_entry_t *pm_pt2tab; /* KVA of pt2 pages table */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
cpuset_t pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statictics */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
};
typedef struct pmap *pmap_t;
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
#define PMAP_LOCK_ASSERT(pmap, type) \
mtx_assert(&(pmap)->pm_mtx, (type))
#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
NULL, MTX_DEF | MTX_DUPOK)
#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#endif
/*
* For each vm_page_t, there is a list of all currently valid virtual
* mappings of that page. An entry is a pv_entry_t, the list is pv_list.
*/
typedef struct pv_entry {
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_next;
} *pv_entry_t;
/*
* pv_entries are allocated in chunks per-process. This avoids the
* need to track per-pmap assignments.
*/
#define _NPCM 11
#define _NPCPV 336
struct pv_chunk {
pmap_t pc_pmap;
TAILQ_ENTRY(pv_chunk) pc_list;
uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */
TAILQ_ENTRY(pv_chunk) pc_lru;
struct pv_entry pc_pventry[_NPCPV];
};
#ifdef _KERNEL
struct pcb;
extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
/*
* Only the following functions or macros may be used before pmap_bootstrap()
* is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and
* vtopte2().
*/
void pmap_bootstrap(vm_offset_t );
void pmap_kenter(vm_offset_t , vm_paddr_t );
void *pmap_kenter_temporary(vm_paddr_t , int );
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
boolean_t pmap_page_is_mapped(vm_page_t );
void pmap_page_set_memattr(vm_page_t , vm_memattr_t );
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
void pmap_kremove_device(vm_offset_t, vm_size_t);
void pmap_set_pcb_pagedir(pmap_t , struct pcb *);
void pmap_tlb_flush(pmap_t , vm_offset_t );
void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t );
void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t );
vm_paddr_t pmap_kextract(vm_offset_t );
vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *);
int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , bool);
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
void pmap_set_tex(void);
void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set);
/*
* Pre-bootstrap epoch functions set.
*/
void pmap_bootstrap_prepare(vm_paddr_t );
vm_paddr_t pmap_preboot_get_pages(u_int );
void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int );
vm_offset_t pmap_preboot_reserve_pages(u_int );
vm_offset_t pmap_preboot_get_vpages(u_int );
void pmap_preboot_map_attr(vm_paddr_t , vm_offset_t , vm_size_t ,
int , int );
static __inline void
pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
vm_size_t size, int prot, int cache)
{
pmap_preboot_map_attr(pa, va, size, prot, cache);
}
-/*
- * This structure is used by machine-dependent code to describe
- * static mappings of devices, created at bootstrap time.
- */
-struct pmap_devmap {
- vm_offset_t pd_va; /* virtual address */
- vm_paddr_t pd_pa; /* physical address */
- vm_size_t pd_size; /* size of region */
- vm_prot_t pd_prot; /* protection code */
- int pd_cache; /* cache attributes */
-};
-
-void pmap_devmap_bootstrap(const struct pmap_devmap *);
-
#endif /* _KERNEL */
// ----------------- TO BE DELETED ---------------------------------------------
#include
#ifdef _KERNEL
/*
* sys/arm/arm/elf_trampoline.c
* sys/arm/arm/genassym.c
* sys/arm/arm/machdep.c
* sys/arm/arm/mp_machdep.c
* sys/arm/arm/locore.S
* sys/arm/arm/pmap.c
* sys/arm/arm/swtch.S
* sys/arm/at91/at91_machdep.c
* sys/arm/cavium/cns11xx/econa_machdep.c
* sys/arm/s3c2xx0/s3c24x0_machdep.c
* sys/arm/xscale/ixp425/avila_machdep.c
* sys/arm/xscale/i8134x/crb_machdep.c
* sys/arm/xscale/i80321/ep80219_machdep.c
* sys/arm/xscale/i80321/iq31244_machdep.c
* sys/arm/xscale/pxa/pxa_machdep.c
*/
#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
/*
* sys/arm/arm/cpufunc.c
*/
void vector_page_setprot(int);
/*
* sys/arm/arm/bus_space_generic.c (just comment)
* sys/arm/arm/devmap.c
* sys/arm/arm/pmap.c (just comment)
* sys/arm/at91/at91_machdep.c
* sys/arm/cavium/cns11xx/econa_machdep.c
* sys/arm/freescale/imx/imx6_machdep.c (just comment)
* sys/arm/mv/orion/db88f5xxx.c
* sys/arm/mv/mv_localbus.c
* sys/arm/mv/mv_machdep.c
* sys/arm/mv/mv_pci.c
* sys/arm/s3c2xx0/s3c24x0_machdep.c
* sys/arm/versatile/versatile_machdep.c
* sys/arm/xscale/ixp425/avila_machdep.c
* sys/arm/xscale/i8134x/crb_machdep.c
* sys/arm/xscale/i80321/ep80219_machdep.c
* sys/arm/xscale/i80321/iq31244_machdep.c
* sys/arm/xscale/pxa/pxa_machdep.c
*/
#define PTE_DEVICE PTE2_ATTR_DEVICE
#endif /* _KERNEL */
// -----------------------------------------------------------------------------
#endif /* !_MACHINE_PMAP_H_ */
Index: head/sys/arm/mv/mv_common.c
===================================================================
--- head/sys/arm/mv/mv_common.c (revision 295142)
+++ head/sys/arm/mv/mv_common.c (revision 295143)
@@ -1,2231 +1,2231 @@
/*-
* Copyright (C) 2008-2011 MARVELL INTERNATIONAL LTD.
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of MARVELL nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
MALLOC_DEFINE(M_IDMA, "idma", "idma dma test memory");
#define IDMA_DEBUG
#undef IDMA_DEBUG
#define MAX_CPU_WIN 5
#ifdef DEBUG
#define debugf(fmt, args...) do { printf("%s(): ", __func__); \
printf(fmt,##args); } while (0)
#else
#define debugf(fmt, args...)
#endif
#ifdef DEBUG
#define MV_DUMP_WIN 1
#else
#define MV_DUMP_WIN 0
#endif
static int win_eth_can_remap(int i);
#ifndef SOC_MV_FREY
static int decode_win_cpu_valid(void);
#endif
static int decode_win_usb_valid(void);
static int decode_win_eth_valid(void);
static int decode_win_pcie_valid(void);
static int decode_win_sata_valid(void);
static int decode_win_idma_valid(void);
static int decode_win_xor_valid(void);
#ifndef SOC_MV_FREY
static void decode_win_cpu_setup(void);
#endif
#ifdef SOC_MV_ARMADAXP
static int decode_win_sdram_fixup(void);
#endif
static void decode_win_usb_setup(u_long);
static void decode_win_eth_setup(u_long);
static void decode_win_sata_setup(u_long);
static void decode_win_idma_setup(u_long);
static void decode_win_xor_setup(u_long);
static void decode_win_usb_dump(u_long);
static void decode_win_eth_dump(u_long base);
static void decode_win_idma_dump(u_long base);
static void decode_win_xor_dump(u_long base);
static int fdt_get_ranges(const char *, void *, int, int *, int *);
#ifdef SOC_MV_ARMADA38X
int gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt,
int *trig, int *pol);
#endif
static int win_cpu_from_dt(void);
static int fdt_win_setup(void);
static uint32_t dev_mask = 0;
static int cpu_wins_no = 0;
static int eth_port = 0;
static int usb_port = 0;
static struct decode_win cpu_win_tbl[MAX_CPU_WIN];
const struct decode_win *cpu_wins = cpu_win_tbl;
typedef void (*decode_win_setup_t)(u_long);
typedef void (*dump_win_t)(u_long);
struct soc_node_spec {
const char *compat;
decode_win_setup_t decode_handler;
dump_win_t dump_handler;
};
static struct soc_node_spec soc_nodes[] = {
{ "mrvl,ge", &decode_win_eth_setup, &decode_win_eth_dump },
{ "mrvl,usb-ehci", &decode_win_usb_setup, &decode_win_usb_dump },
{ "mrvl,sata", &decode_win_sata_setup, NULL },
{ "mrvl,xor", &decode_win_xor_setup, &decode_win_xor_dump },
{ "mrvl,idma", &decode_win_idma_setup, &decode_win_idma_dump },
{ "mrvl,pcie", &decode_win_pcie_setup, NULL },
{ NULL, NULL, NULL },
};
struct fdt_pm_mask_entry fdt_pm_mask_table[] = {
{ "mrvl,ge", CPU_PM_CTRL_GE(0) },
{ "mrvl,ge", CPU_PM_CTRL_GE(1) },
{ "mrvl,usb-ehci", CPU_PM_CTRL_USB(0) },
{ "mrvl,usb-ehci", CPU_PM_CTRL_USB(1) },
{ "mrvl,usb-ehci", CPU_PM_CTRL_USB(2) },
{ "mrvl,xor", CPU_PM_CTRL_XOR },
{ "mrvl,sata", CPU_PM_CTRL_SATA },
{ NULL, 0 }
};
static __inline int
pm_is_disabled(uint32_t mask)
{
#if defined(SOC_MV_KIRKWOOD)
return (soc_power_ctrl_get(mask) == mask);
#else
return (soc_power_ctrl_get(mask) == mask ? 0 : 1);
#endif
}
/*
* Disable device using power management register.
* 1 - Device Power On
* 0 - Device Power Off
* Mask can be set in loader.
* EXAMPLE:
* loader> set hw.pm-disable-mask=0x2
*
* Common mask:
* |-------------------------------|
* | Device | Kirkwood | Discovery |
* |-------------------------------|
* | USB0 | 0x00008 | 0x020000 |
* |-------------------------------|
* | USB1 | - | 0x040000 |
* |-------------------------------|
* | USB2 | - | 0x080000 |
* |-------------------------------|
* | GE0 | 0x00001 | 0x000002 |
* |-------------------------------|
* | GE1 | - | 0x000004 |
* |-------------------------------|
* | IDMA | - | 0x100000 |
* |-------------------------------|
* | XOR | 0x10000 | 0x200000 |
* |-------------------------------|
* | CESA | 0x20000 | 0x400000 |
* |-------------------------------|
* | SATA | 0x04000 | 0x004000 |
* --------------------------------|
* This feature can be used only on Kirkwood and Discovery
* machines.
*/
static __inline void
pm_disable_device(int mask)
{
#ifdef DIAGNOSTIC
uint32_t reg;
reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL);
printf("Power Management Register: 0%x\n", reg);
reg &= ~mask;
soc_power_ctrl_set(reg);
printf("Device %x is disabled\n", mask);
reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL);
printf("Power Management Register: 0%x\n", reg);
#endif
}
int
fdt_pm(phandle_t node)
{
uint32_t cpu_pm_ctrl;
int i, ena, compat;
ena = 1;
cpu_pm_ctrl = read_cpu_ctrl(CPU_PM_CTRL);
for (i = 0; fdt_pm_mask_table[i].compat != NULL; i++) {
if (dev_mask & (1 << i))
continue;
compat = fdt_is_compatible(node, fdt_pm_mask_table[i].compat);
#if defined(SOC_MV_KIRKWOOD)
if (compat && (cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) {
dev_mask |= (1 << i);
ena = 0;
break;
} else if (compat) {
dev_mask |= (1 << i);
break;
}
#else
if (compat && (~cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) {
dev_mask |= (1 << i);
ena = 0;
break;
} else if (compat) {
dev_mask |= (1 << i);
break;
}
#endif
}
return (ena);
}
uint32_t
read_cpu_ctrl(uint32_t reg)
{
return (bus_space_read_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg));
}
void
write_cpu_ctrl(uint32_t reg, uint32_t val)
{
bus_space_write_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg, val);
}
#if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X)
uint32_t
read_cpu_mp_clocks(uint32_t reg)
{
return (bus_space_read_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg));
}
void
write_cpu_mp_clocks(uint32_t reg, uint32_t val)
{
bus_space_write_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg, val);
}
uint32_t
read_cpu_misc(uint32_t reg)
{
return (bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, reg));
}
void
write_cpu_misc(uint32_t reg, uint32_t val)
{
bus_space_write_4(fdtbus_bs_tag, MV_MISC_BASE, reg, val);
}
#endif
void
cpu_reset(void)
{
#if defined(SOC_MV_ARMADAXP) || defined (SOC_MV_ARMADA38X)
write_cpu_misc(RSTOUTn_MASK, SOFT_RST_OUT_EN);
write_cpu_misc(SYSTEM_SOFT_RESET, SYS_SOFT_RST);
#else
write_cpu_ctrl(RSTOUTn_MASK, SOFT_RST_OUT_EN);
write_cpu_ctrl(SYSTEM_SOFT_RESET, SYS_SOFT_RST);
#endif
while (1);
}
uint32_t
cpu_extra_feat(void)
{
uint32_t dev, rev;
uint32_t ef = 0;
soc_id(&dev, &rev);
switch (dev) {
case MV_DEV_88F6281:
case MV_DEV_88F6282:
case MV_DEV_88RC8180:
case MV_DEV_MV78100_Z0:
case MV_DEV_MV78100:
__asm __volatile("mrc p15, 1, %0, c15, c1, 0" : "=r" (ef));
break;
case MV_DEV_88F5182:
case MV_DEV_88F5281:
__asm __volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (ef));
break;
default:
if (bootverbose)
printf("This ARM Core does not support any extra features\n");
}
return (ef);
}
/*
* Get the power status of device. This feature is only supported on
* Kirkwood and Discovery SoCs.
*/
uint32_t
soc_power_ctrl_get(uint32_t mask)
{
#if !defined(SOC_MV_ORION) && !defined(SOC_MV_LOKIPLUS) && !defined(SOC_MV_FREY)
if (mask != CPU_PM_CTRL_NONE)
mask &= read_cpu_ctrl(CPU_PM_CTRL);
return (mask);
#else
return (mask);
#endif
}
/*
* Set the power status of device. This feature is only supported on
* Kirkwood and Discovery SoCs.
*/
void
soc_power_ctrl_set(uint32_t mask)
{
#if !defined(SOC_MV_ORION) && !defined(SOC_MV_LOKIPLUS)
if (mask != CPU_PM_CTRL_NONE)
write_cpu_ctrl(CPU_PM_CTRL, mask);
#endif
}
void
soc_id(uint32_t *dev, uint32_t *rev)
{
/*
* Notice: system identifiers are available in the registers range of
* PCIE controller, so using this function is only allowed (and
* possible) after the internal registers range has been mapped in via
- * pmap_devmap_bootstrap().
+ * arm_devmap_bootstrap().
*/
*dev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 0) >> 16;
*rev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 8) & 0xff;
}
static void
soc_identify(void)
{
uint32_t d, r, size, mode;
const char *dev;
const char *rev;
soc_id(&d, &r);
printf("SOC: ");
if (bootverbose)
printf("(0x%4x:0x%02x) ", d, r);
rev = "";
switch (d) {
case MV_DEV_88F5181:
dev = "Marvell 88F5181";
if (r == 3)
rev = "B1";
break;
case MV_DEV_88F5182:
dev = "Marvell 88F5182";
if (r == 2)
rev = "A2";
break;
case MV_DEV_88F5281:
dev = "Marvell 88F5281";
if (r == 4)
rev = "D0";
else if (r == 5)
rev = "D1";
else if (r == 6)
rev = "D2";
break;
case MV_DEV_88F6281:
dev = "Marvell 88F6281";
if (r == 0)
rev = "Z0";
else if (r == 2)
rev = "A0";
else if (r == 3)
rev = "A1";
break;
case MV_DEV_88RC8180:
dev = "Marvell 88RC8180";
break;
case MV_DEV_88RC9480:
dev = "Marvell 88RC9480";
break;
case MV_DEV_88RC9580:
dev = "Marvell 88RC9580";
break;
case MV_DEV_88F6781:
dev = "Marvell 88F6781";
if (r == 2)
rev = "Y0";
break;
case MV_DEV_88F6282:
dev = "Marvell 88F6282";
if (r == 0)
rev = "A0";
else if (r == 1)
rev = "A1";
break;
case MV_DEV_88F6828:
dev = "Marvell 88F6828";
break;
case MV_DEV_88F6820:
dev = "Marvell 88F6820";
break;
case MV_DEV_88F6810:
dev = "Marvell 88F6810";
break;
case MV_DEV_MV78100_Z0:
dev = "Marvell MV78100 Z0";
break;
case MV_DEV_MV78100:
dev = "Marvell MV78100";
break;
case MV_DEV_MV78160:
dev = "Marvell MV78160";
break;
case MV_DEV_MV78260:
dev = "Marvell MV78260";
break;
case MV_DEV_MV78460:
dev = "Marvell MV78460";
break;
default:
dev = "UNKNOWN";
break;
}
printf("%s", dev);
if (*rev != '\0')
printf(" rev %s", rev);
printf(", TClock %dMHz\n", get_tclk() / 1000 / 1000);
mode = read_cpu_ctrl(CPU_CONFIG);
printf(" Instruction cache prefetch %s, data cache prefetch %s\n",
(mode & CPU_CONFIG_IC_PREF) ? "enabled" : "disabled",
(mode & CPU_CONFIG_DC_PREF) ? "enabled" : "disabled");
switch (d) {
case MV_DEV_88F6281:
case MV_DEV_88F6282:
mode = read_cpu_ctrl(CPU_L2_CONFIG) & CPU_L2_CONFIG_MODE;
printf(" 256KB 4-way set-associative %s unified L2 cache\n",
mode ? "write-through" : "write-back");
break;
case MV_DEV_MV78100:
mode = read_cpu_ctrl(CPU_CONTROL);
size = mode & CPU_CONTROL_L2_SIZE;
mode = mode & CPU_CONTROL_L2_MODE;
printf(" %s set-associative %s unified L2 cache\n",
size ? "256KB 4-way" : "512KB 8-way",
mode ? "write-through" : "write-back");
break;
default:
break;
}
}
static void
platform_identify(void *dummy)
{
soc_identify();
/*
* XXX Board identification e.g. read out from FPGA or similar should
* go here
*/
}
SYSINIT(platform_identify, SI_SUB_CPU, SI_ORDER_SECOND, platform_identify,
NULL);
#ifdef KDB
static void
mv_enter_debugger(void *dummy)
{
if (boothowto & RB_KDB)
kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
}
SYSINIT(mv_enter_debugger, SI_SUB_CPU, SI_ORDER_ANY, mv_enter_debugger, NULL);
#endif
int
soc_decode_win(void)
{
uint32_t dev, rev;
int mask, err;
mask = 0;
TUNABLE_INT_FETCH("hw.pm-disable-mask", &mask);
if (mask != 0)
pm_disable_device(mask);
/* Retrieve data about physical addresses from device tree. */
if ((err = win_cpu_from_dt()) != 0)
return (err);
/* Retrieve our ID: some windows facilities vary between SoC models */
soc_id(&dev, &rev);
#ifdef SOC_MV_ARMADAXP
if ((err = decode_win_sdram_fixup()) != 0)
return(err);
#endif
#ifndef SOC_MV_FREY
if (!decode_win_cpu_valid() || !decode_win_usb_valid() ||
!decode_win_eth_valid() || !decode_win_idma_valid() ||
!decode_win_pcie_valid() || !decode_win_sata_valid() ||
!decode_win_xor_valid())
return (EINVAL);
decode_win_cpu_setup();
#else
if (!decode_win_usb_valid() ||
!decode_win_eth_valid() || !decode_win_idma_valid() ||
!decode_win_pcie_valid() || !decode_win_sata_valid() ||
!decode_win_xor_valid())
return (EINVAL);
#endif
if (MV_DUMP_WIN)
soc_dump_decode_win();
eth_port = 0;
usb_port = 0;
if ((err = fdt_win_setup()) != 0)
return (err);
return (0);
}
/**************************************************************************
* Decode windows registers accessors
**************************************************************************/
#if !defined(SOC_MV_FREY)
WIN_REG_IDX_RD(win_cpu, cr, MV_WIN_CPU_CTRL, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_RD(win_cpu, br, MV_WIN_CPU_BASE, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_RD(win_cpu, remap_l, MV_WIN_CPU_REMAP_LO, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_RD(win_cpu, remap_h, MV_WIN_CPU_REMAP_HI, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_WR(win_cpu, cr, MV_WIN_CPU_CTRL, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_WR(win_cpu, br, MV_WIN_CPU_BASE, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_WR(win_cpu, remap_l, MV_WIN_CPU_REMAP_LO, MV_MBUS_BRIDGE_BASE)
WIN_REG_IDX_WR(win_cpu, remap_h, MV_WIN_CPU_REMAP_HI, MV_MBUS_BRIDGE_BASE)
#endif
WIN_REG_BASE_IDX_RD(win_usb, cr, MV_WIN_USB_CTRL)
WIN_REG_BASE_IDX_RD(win_usb, br, MV_WIN_USB_BASE)
WIN_REG_BASE_IDX_WR(win_usb, cr, MV_WIN_USB_CTRL)
WIN_REG_BASE_IDX_WR(win_usb, br, MV_WIN_USB_BASE)
WIN_REG_BASE_IDX_RD(win_eth, br, MV_WIN_ETH_BASE)
WIN_REG_BASE_IDX_RD(win_eth, sz, MV_WIN_ETH_SIZE)
WIN_REG_BASE_IDX_RD(win_eth, har, MV_WIN_ETH_REMAP)
WIN_REG_BASE_IDX_WR(win_eth, br, MV_WIN_ETH_BASE)
WIN_REG_BASE_IDX_WR(win_eth, sz, MV_WIN_ETH_SIZE)
WIN_REG_BASE_IDX_WR(win_eth, har, MV_WIN_ETH_REMAP)
WIN_REG_BASE_IDX_RD2(win_xor, br, MV_WIN_XOR_BASE)
WIN_REG_BASE_IDX_RD2(win_xor, sz, MV_WIN_XOR_SIZE)
WIN_REG_BASE_IDX_RD2(win_xor, har, MV_WIN_XOR_REMAP)
WIN_REG_BASE_IDX_RD2(win_xor, ctrl, MV_WIN_XOR_CTRL)
WIN_REG_BASE_IDX_WR2(win_xor, br, MV_WIN_XOR_BASE)
WIN_REG_BASE_IDX_WR2(win_xor, sz, MV_WIN_XOR_SIZE)
WIN_REG_BASE_IDX_WR2(win_xor, har, MV_WIN_XOR_REMAP)
WIN_REG_BASE_IDX_WR2(win_xor, ctrl, MV_WIN_XOR_CTRL)
WIN_REG_BASE_RD(win_eth, bare, 0x290)
WIN_REG_BASE_RD(win_eth, epap, 0x294)
WIN_REG_BASE_WR(win_eth, bare, 0x290)
WIN_REG_BASE_WR(win_eth, epap, 0x294)
WIN_REG_BASE_IDX_RD(win_pcie, cr, MV_WIN_PCIE_CTRL);
WIN_REG_BASE_IDX_RD(win_pcie, br, MV_WIN_PCIE_BASE);
WIN_REG_BASE_IDX_RD(win_pcie, remap, MV_WIN_PCIE_REMAP);
WIN_REG_BASE_IDX_WR(win_pcie, cr, MV_WIN_PCIE_CTRL);
WIN_REG_BASE_IDX_WR(win_pcie, br, MV_WIN_PCIE_BASE);
WIN_REG_BASE_IDX_WR(win_pcie, remap, MV_WIN_PCIE_REMAP);
WIN_REG_BASE_IDX_RD(pcie_bar, br, MV_PCIE_BAR_BASE);
WIN_REG_BASE_IDX_WR(pcie_bar, br, MV_PCIE_BAR_BASE);
WIN_REG_BASE_IDX_WR(pcie_bar, brh, MV_PCIE_BAR_BASE_H);
WIN_REG_BASE_IDX_WR(pcie_bar, cr, MV_PCIE_BAR_CTRL);
WIN_REG_BASE_IDX_RD(win_idma, br, MV_WIN_IDMA_BASE)
WIN_REG_BASE_IDX_RD(win_idma, sz, MV_WIN_IDMA_SIZE)
WIN_REG_BASE_IDX_RD(win_idma, har, MV_WIN_IDMA_REMAP)
WIN_REG_BASE_IDX_RD(win_idma, cap, MV_WIN_IDMA_CAP)
WIN_REG_BASE_IDX_WR(win_idma, br, MV_WIN_IDMA_BASE)
WIN_REG_BASE_IDX_WR(win_idma, sz, MV_WIN_IDMA_SIZE)
WIN_REG_BASE_IDX_WR(win_idma, har, MV_WIN_IDMA_REMAP)
WIN_REG_BASE_IDX_WR(win_idma, cap, MV_WIN_IDMA_CAP)
WIN_REG_BASE_RD(win_idma, bare, 0xa80)
WIN_REG_BASE_WR(win_idma, bare, 0xa80)
WIN_REG_BASE_IDX_RD(win_sata, cr, MV_WIN_SATA_CTRL);
WIN_REG_BASE_IDX_RD(win_sata, br, MV_WIN_SATA_BASE);
WIN_REG_BASE_IDX_WR(win_sata, cr, MV_WIN_SATA_CTRL);
WIN_REG_BASE_IDX_WR(win_sata, br, MV_WIN_SATA_BASE);
#ifndef SOC_MV_DOVE
WIN_REG_IDX_RD(ddr, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE)
WIN_REG_IDX_RD(ddr, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE)
WIN_REG_IDX_WR(ddr, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE)
WIN_REG_IDX_WR(ddr, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE)
#else
/*
* On 88F6781 (Dove) SoC DDR Controller is accessed through
* single MBUS <-> AXI bridge. In this case we provide emulated
* ddr_br_read() and ddr_sz_read() functions to keep compatibility
* with common decoding windows setup code.
*/
static inline uint32_t ddr_br_read(int i)
{
uint32_t mmap;
/* Read Memory Address Map Register for CS i */
mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0);
/* Return CS i base address */
return (mmap & 0xFF000000);
}
static inline uint32_t ddr_sz_read(int i)
{
uint32_t mmap, size;
/* Read Memory Address Map Register for CS i */
mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0);
/* Extract size of CS space in 64kB units */
size = (1 << ((mmap >> 16) & 0x0F));
/* Return CS size and enable/disable status */
return (((size - 1) << 16) | (mmap & 0x01));
}
#endif
#if !defined(SOC_MV_FREY)
/**************************************************************************
* Decode windows helper routines
**************************************************************************/
void
soc_dump_decode_win(void)
{
uint32_t dev, rev;
int i;
soc_id(&dev, &rev);
for (i = 0; i < MV_WIN_CPU_MAX; i++) {
printf("CPU window#%d: c 0x%08x, b 0x%08x", i,
win_cpu_cr_read(i),
win_cpu_br_read(i));
if (win_cpu_can_remap(i))
printf(", rl 0x%08x, rh 0x%08x",
win_cpu_remap_l_read(i),
win_cpu_remap_h_read(i));
printf("\n");
}
printf("Internal regs base: 0x%08x\n",
bus_space_read_4(fdtbus_bs_tag, MV_INTREGS_BASE, 0));
for (i = 0; i < MV_WIN_DDR_MAX; i++)
printf("DDR CS#%d: b 0x%08x, s 0x%08x\n", i,
ddr_br_read(i), ddr_sz_read(i));
}
/**************************************************************************
* CPU windows routines
**************************************************************************/
int
win_cpu_can_remap(int i)
{
uint32_t dev, rev;
soc_id(&dev, &rev);
/* Depending on the SoC certain windows have remap capability */
if ((dev == MV_DEV_88F5182 && i < 2) ||
(dev == MV_DEV_88F5281 && i < 4) ||
(dev == MV_DEV_88F6281 && i < 4) ||
(dev == MV_DEV_88F6282 && i < 4) ||
(dev == MV_DEV_88F6828 && i < 20) ||
(dev == MV_DEV_88F6820 && i < 20) ||
(dev == MV_DEV_88F6810 && i < 20) ||
(dev == MV_DEV_88RC8180 && i < 2) ||
(dev == MV_DEV_88F6781 && i < 4) ||
(dev == MV_DEV_MV78100_Z0 && i < 8) ||
((dev & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY && i < 8))
return (1);
return (0);
}
/* XXX This should check for overlapping remap fields too.. */
int
decode_win_overlap(int win, int win_no, const struct decode_win *wintab)
{
const struct decode_win *tab;
int i;
tab = wintab;
for (i = 0; i < win_no; i++, tab++) {
if (i == win)
/* Skip self */
continue;
if ((tab->base + tab->size - 1) < (wintab + win)->base)
continue;
else if (((wintab + win)->base + (wintab + win)->size - 1) <
tab->base)
continue;
else
return (i);
}
return (-1);
}
static int
decode_win_cpu_valid(void)
{
int i, j, rv;
uint32_t b, e, s;
if (cpu_wins_no > MV_WIN_CPU_MAX) {
printf("CPU windows: too many entries: %d\n", cpu_wins_no);
return (0);
}
rv = 1;
for (i = 0; i < cpu_wins_no; i++) {
if (cpu_wins[i].target == 0) {
printf("CPU window#%d: DDR target window is not "
"supposed to be reprogrammed!\n", i);
rv = 0;
}
if (cpu_wins[i].remap != ~0 && win_cpu_can_remap(i) != 1) {
printf("CPU window#%d: not capable of remapping, but "
"val 0x%08x defined\n", i, cpu_wins[i].remap);
rv = 0;
}
s = cpu_wins[i].size;
b = cpu_wins[i].base;
e = b + s - 1;
if (s > (0xFFFFFFFF - b + 1)) {
/*
* XXX this boundary check should account for 64bit
* and remapping..
*/
printf("CPU window#%d: no space for size 0x%08x at "
"0x%08x\n", i, s, b);
rv = 0;
continue;
}
if (b != (b & ~(s - 1))) {
printf("CPU window#%d: address 0x%08x is not aligned "
"to 0x%08x\n", i, b, s);
rv = 0;
continue;
}
j = decode_win_overlap(i, cpu_wins_no, &cpu_wins[0]);
if (j >= 0) {
printf("CPU window#%d: (0x%08x - 0x%08x) overlaps "
"with #%d (0x%08x - 0x%08x)\n", i, b, e, j,
cpu_wins[j].base,
cpu_wins[j].base + cpu_wins[j].size - 1);
rv = 0;
}
}
return (rv);
}
int
decode_win_cpu_set(int target, int attr, vm_paddr_t base, uint32_t size,
vm_paddr_t remap)
{
uint32_t br, cr;
int win, i;
if (remap == ~0) {
win = MV_WIN_CPU_MAX - 1;
i = -1;
} else {
win = 0;
i = 1;
}
while ((win >= 0) && (win < MV_WIN_CPU_MAX)) {
cr = win_cpu_cr_read(win);
if ((cr & MV_WIN_CPU_ENABLE_BIT) == 0)
break;
if ((cr & ((0xff << MV_WIN_CPU_ATTR_SHIFT) |
(0x1f << MV_WIN_CPU_TARGET_SHIFT))) ==
((attr << MV_WIN_CPU_ATTR_SHIFT) |
(target << MV_WIN_CPU_TARGET_SHIFT)))
break;
win += i;
}
if ((win < 0) || (win >= MV_WIN_CPU_MAX) ||
((remap != ~0) && (win_cpu_can_remap(win) == 0)))
return (-1);
br = base & 0xffff0000;
win_cpu_br_write(win, br);
if (win_cpu_can_remap(win)) {
if (remap != ~0) {
win_cpu_remap_l_write(win, remap & 0xffff0000);
win_cpu_remap_h_write(win, 0);
} else {
/*
* Remap function is not used for a given window
* (capable of remapping) - set remap field with the
* same value as base.
*/
win_cpu_remap_l_write(win, base & 0xffff0000);
win_cpu_remap_h_write(win, 0);
}
}
cr = ((size - 1) & 0xffff0000) | (attr << MV_WIN_CPU_ATTR_SHIFT) |
(target << MV_WIN_CPU_TARGET_SHIFT) | MV_WIN_CPU_ENABLE_BIT;
win_cpu_cr_write(win, cr);
return (0);
}
static void
decode_win_cpu_setup(void)
{
int i;
/* Disable all CPU windows */
for (i = 0; i < MV_WIN_CPU_MAX; i++) {
win_cpu_cr_write(i, 0);
win_cpu_br_write(i, 0);
if (win_cpu_can_remap(i)) {
win_cpu_remap_l_write(i, 0);
win_cpu_remap_h_write(i, 0);
}
}
for (i = 0; i < cpu_wins_no; i++)
if (cpu_wins[i].target > 0)
decode_win_cpu_set(cpu_wins[i].target,
cpu_wins[i].attr, cpu_wins[i].base,
cpu_wins[i].size, cpu_wins[i].remap);
}
#endif
#ifdef SOC_MV_ARMADAXP
static int
decode_win_sdram_fixup(void)
{
struct mem_region mr[FDT_MEM_REGIONS];
uint8_t window_valid[MV_WIN_DDR_MAX];
int mr_cnt, memsize, err, i, j;
uint32_t valid_win_num = 0;
/* Grab physical memory regions information from device tree. */
err = fdt_get_mem_regions(mr, &mr_cnt, &memsize);
if (err != 0)
return (err);
for (i = 0; i < MV_WIN_DDR_MAX; i++)
window_valid[i] = 0;
/* Try to match entries from device tree with settings from u-boot */
for (i = 0; i < mr_cnt; i++) {
for (j = 0; j < MV_WIN_DDR_MAX; j++) {
if (ddr_is_active(j) &&
(ddr_base(j) == mr[i].mr_start) &&
(ddr_size(j) == mr[i].mr_size)) {
window_valid[j] = 1;
valid_win_num++;
}
}
}
if (mr_cnt != valid_win_num)
return (EINVAL);
/* Destroy windows without corresponding device tree entry */
for (j = 0; j < MV_WIN_DDR_MAX; j++) {
if (ddr_is_active(j) && (window_valid[j] != 1)) {
printf("Disabling SDRAM decoding window: %d\n", j);
ddr_disable(j);
}
}
return (0);
}
#endif
/*
* Check if we're able to cover all active DDR banks.
*/
static int
decode_win_can_cover_ddr(int max)
{
int i, c;
c = 0;
for (i = 0; i < MV_WIN_DDR_MAX; i++)
if (ddr_is_active(i))
c++;
if (c > max) {
printf("Unable to cover all active DDR banks: "
"%d, available windows: %d\n", c, max);
return (0);
}
return (1);
}
/**************************************************************************
* DDR windows routines
**************************************************************************/
int
ddr_is_active(int i)
{
if (ddr_sz_read(i) & 0x1)
return (1);
return (0);
}
void
ddr_disable(int i)
{
ddr_sz_write(i, 0);
ddr_br_write(i, 0);
}
uint32_t
ddr_base(int i)
{
return (ddr_br_read(i) & 0xff000000);
}
uint32_t
ddr_size(int i)
{
return ((ddr_sz_read(i) | 0x00ffffff) + 1);
}
uint32_t
ddr_attr(int i)
{
uint32_t dev, rev;
soc_id(&dev, &rev);
if (dev == MV_DEV_88RC8180)
return ((ddr_sz_read(i) & 0xf0) >> 4);
if (dev == MV_DEV_88F6781)
return (0);
return (i == 0 ? 0xe :
(i == 1 ? 0xd :
(i == 2 ? 0xb :
(i == 3 ? 0x7 : 0xff))));
}
uint32_t
ddr_target(int i)
{
uint32_t dev, rev;
soc_id(&dev, &rev);
if (dev == MV_DEV_88RC8180) {
i = (ddr_sz_read(i) & 0xf0) >> 4;
return (i == 0xe ? 0xc :
(i == 0xd ? 0xd :
(i == 0xb ? 0xe :
(i == 0x7 ? 0xf : 0xc))));
}
/*
* On SOCs other than 88RC8180 Mbus unit ID for
* DDR SDRAM controller is always 0x0.
*/
return (0);
}
/**************************************************************************
* USB windows routines
**************************************************************************/
static int
decode_win_usb_valid(void)
{
return (decode_win_can_cover_ddr(MV_WIN_USB_MAX));
}
static void
decode_win_usb_dump(u_long base)
{
int i;
if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port - 1)))
return;
for (i = 0; i < MV_WIN_USB_MAX; i++)
printf("USB window#%d: c 0x%08x, b 0x%08x\n", i,
win_usb_cr_read(base, i), win_usb_br_read(base, i));
}
/*
* Set USB decode windows.
*/
static void
decode_win_usb_setup(u_long base)
{
uint32_t br, cr;
int i, j;
if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port)))
return;
usb_port++;
for (i = 0; i < MV_WIN_USB_MAX; i++) {
win_usb_cr_write(base, i, 0);
win_usb_br_write(base, i, 0);
}
/* Only access to active DRAM banks is required */
for (i = 0; i < MV_WIN_DDR_MAX; i++) {
if (ddr_is_active(i)) {
br = ddr_base(i);
/*
* XXX for 6281 we should handle Mbus write
* burst limit field in the ctrl reg
*/
cr = (((ddr_size(i) - 1) & 0xffff0000) |
(ddr_attr(i) << 8) |
(ddr_target(i) << 4) | 1);
/* Set the first free USB window */
for (j = 0; j < MV_WIN_USB_MAX; j++) {
if (win_usb_cr_read(base, j) & 0x1)
continue;
win_usb_br_write(base, j, br);
win_usb_cr_write(base, j, cr);
break;
}
}
}
}
/**************************************************************************
* ETH windows routines
**************************************************************************/
static int
win_eth_can_remap(int i)
{
/* ETH encode windows 0-3 have remap capability */
if (i < 4)
return (1);
return (0);
}
static int
eth_bare_read(uint32_t base, int i)
{
uint32_t v;
v = win_eth_bare_read(base);
v &= (1 << i);
return (v >> i);
}
static void
eth_bare_write(uint32_t base, int i, int val)
{
uint32_t v;
v = win_eth_bare_read(base);
v &= ~(1 << i);
v |= (val << i);
win_eth_bare_write(base, v);
}
static void
eth_epap_write(uint32_t base, int i, int val)
{
uint32_t v;
v = win_eth_epap_read(base);
v &= ~(0x3 << (i * 2));
v |= (val << (i * 2));
win_eth_epap_write(base, v);
}
static void
decode_win_eth_dump(u_long base)
{
int i;
if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port - 1)))
return;
for (i = 0; i < MV_WIN_ETH_MAX; i++) {
printf("ETH window#%d: b 0x%08x, s 0x%08x", i,
win_eth_br_read(base, i),
win_eth_sz_read(base, i));
if (win_eth_can_remap(i))
printf(", ha 0x%08x",
win_eth_har_read(base, i));
printf("\n");
}
printf("ETH windows: bare 0x%08x, epap 0x%08x\n",
win_eth_bare_read(base),
win_eth_epap_read(base));
}
#if defined(SOC_MV_LOKIPLUS)
#define MV_WIN_ETH_DDR_TRGT(n) 0
#else
#define MV_WIN_ETH_DDR_TRGT(n) ddr_target(n)
#endif
static void
decode_win_eth_setup(u_long base)
{
uint32_t br, sz;
int i, j;
if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port)))
return;
eth_port++;
/* Disable, clear and revoke protection for all ETH windows */
for (i = 0; i < MV_WIN_ETH_MAX; i++) {
eth_bare_write(base, i, 1);
eth_epap_write(base, i, 0);
win_eth_br_write(base, i, 0);
win_eth_sz_write(base, i, 0);
if (win_eth_can_remap(i))
win_eth_har_write(base, i, 0);
}
/* Only access to active DRAM banks is required */
for (i = 0; i < MV_WIN_DDR_MAX; i++)
if (ddr_is_active(i)) {
br = ddr_base(i) | (ddr_attr(i) << 8) | MV_WIN_ETH_DDR_TRGT(i);
sz = ((ddr_size(i) - 1) & 0xffff0000);
/* Set the first free ETH window */
for (j = 0; j < MV_WIN_ETH_MAX; j++) {
if (eth_bare_read(base, j) == 0)
continue;
win_eth_br_write(base, j, br);
win_eth_sz_write(base, j, sz);
/* XXX remapping ETH windows not supported */
/* Set protection RW */
eth_epap_write(base, j, 0x3);
/* Enable window */
eth_bare_write(base, j, 0);
break;
}
}
}
static int
decode_win_eth_valid(void)
{
return (decode_win_can_cover_ddr(MV_WIN_ETH_MAX));
}
/**************************************************************************
* PCIE windows routines
**************************************************************************/
void
decode_win_pcie_setup(u_long base)
{
uint32_t size = 0, ddrbase = ~0;
uint32_t cr, br;
int i, j;
for (i = 0; i < MV_PCIE_BAR_MAX; i++) {
pcie_bar_br_write(base, i,
MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN);
if (i < 3)
pcie_bar_brh_write(base, i, 0);
if (i > 0)
pcie_bar_cr_write(base, i, 0);
}
for (i = 0; i < MV_WIN_PCIE_MAX; i++) {
win_pcie_cr_write(base, i, 0);
win_pcie_br_write(base, i, 0);
win_pcie_remap_write(base, i, 0);
}
/* On End-Point only set BAR size to 1MB regardless of DDR size */
if ((bus_space_read_4(fdtbus_bs_tag, base, MV_PCIE_CONTROL)
& MV_PCIE_ROOT_CMPLX) == 0) {
pcie_bar_cr_write(base, 1, 0xf0000 | 1);
return;
}
for (i = 0; i < MV_WIN_DDR_MAX; i++) {
if (ddr_is_active(i)) {
/* Map DDR to BAR 1 */
cr = (ddr_size(i) - 1) & 0xffff0000;
size += ddr_size(i) & 0xffff0000;
cr |= (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1;
br = ddr_base(i);
if (br < ddrbase)
ddrbase = br;
/* Use the first available PCIE window */
for (j = 0; j < MV_WIN_PCIE_MAX; j++) {
if (win_pcie_cr_read(base, j) != 0)
continue;
win_pcie_br_write(base, j, br);
win_pcie_cr_write(base, j, cr);
break;
}
}
}
/*
* Upper 16 bits in BAR register is interpreted as BAR size
* (in 64 kB units) plus 64kB, so substract 0x10000
* form value passed to register to get correct value.
*/
size -= 0x10000;
pcie_bar_cr_write(base, 1, size | 1);
pcie_bar_br_write(base, 1, ddrbase |
MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN);
pcie_bar_br_write(base, 0, fdt_immr_pa |
MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN);
}
static int
decode_win_pcie_valid(void)
{
return (decode_win_can_cover_ddr(MV_WIN_PCIE_MAX));
}
/**************************************************************************
* IDMA windows routines
**************************************************************************/
#if defined(SOC_MV_ORION) || defined(SOC_MV_DISCOVERY)
static int
idma_bare_read(u_long base, int i)
{
uint32_t v;
v = win_idma_bare_read(base);
v &= (1 << i);
return (v >> i);
}
static void
idma_bare_write(u_long base, int i, int val)
{
uint32_t v;
v = win_idma_bare_read(base);
v &= ~(1 << i);
v |= (val << i);
win_idma_bare_write(base, v);
}
/*
* Sets channel protection 'val' for window 'w' on channel 'c'
*/
static void
idma_cap_write(u_long base, int c, int w, int val)
{
uint32_t v;
v = win_idma_cap_read(base, c);
v &= ~(0x3 << (w * 2));
v |= (val << (w * 2));
win_idma_cap_write(base, c, v);
}
/*
* Set protection 'val' on all channels for window 'w'
*/
static void
idma_set_prot(u_long base, int w, int val)
{
int c;
for (c = 0; c < MV_IDMA_CHAN_MAX; c++)
idma_cap_write(base, c, w, val);
}
static int
win_idma_can_remap(int i)
{
/* IDMA decode windows 0-3 have remap capability */
if (i < 4)
return (1);
return (0);
}
void
decode_win_idma_setup(u_long base)
{
uint32_t br, sz;
int i, j;
if (pm_is_disabled(CPU_PM_CTRL_IDMA))
return;
/*
* Disable and clear all IDMA windows, revoke protection for all channels
*/
for (i = 0; i < MV_WIN_IDMA_MAX; i++) {
idma_bare_write(base, i, 1);
win_idma_br_write(base, i, 0);
win_idma_sz_write(base, i, 0);
if (win_idma_can_remap(i) == 1)
win_idma_har_write(base, i, 0);
}
for (i = 0; i < MV_IDMA_CHAN_MAX; i++)
win_idma_cap_write(base, i, 0);
/*
* Set up access to all active DRAM banks
*/
for (i = 0; i < MV_WIN_DDR_MAX; i++)
if (ddr_is_active(i)) {
br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i);
sz = ((ddr_size(i) - 1) & 0xffff0000);
/* Place DDR entries in non-remapped windows */
for (j = 0; j < MV_WIN_IDMA_MAX; j++)
if (win_idma_can_remap(j) != 1 &&
idma_bare_read(base, j) == 1) {
/* Configure window */
win_idma_br_write(base, j, br);
win_idma_sz_write(base, j, sz);
/* Set protection RW on all channels */
idma_set_prot(base, j, 0x3);
/* Enable window */
idma_bare_write(base, j, 0);
break;
}
}
/*
* Remaining targets -- from statically defined table
*/
for (i = 0; i < idma_wins_no; i++)
if (idma_wins[i].target > 0) {
br = (idma_wins[i].base & 0xffff0000) |
(idma_wins[i].attr << 8) | idma_wins[i].target;
sz = ((idma_wins[i].size - 1) & 0xffff0000);
/* Set the first free IDMA window */
for (j = 0; j < MV_WIN_IDMA_MAX; j++) {
if (idma_bare_read(base, j) == 0)
continue;
/* Configure window */
win_idma_br_write(base, j, br);
win_idma_sz_write(base, j, sz);
if (win_idma_can_remap(j) &&
idma_wins[j].remap >= 0)
win_idma_har_write(base, j,
idma_wins[j].remap);
/* Set protection RW on all channels */
idma_set_prot(base, j, 0x3);
/* Enable window */
idma_bare_write(base, j, 0);
break;
}
}
}
int
decode_win_idma_valid(void)
{
const struct decode_win *wintab;
int c, i, j, rv;
uint32_t b, e, s;
if (idma_wins_no > MV_WIN_IDMA_MAX) {
printf("IDMA windows: too many entries: %d\n", idma_wins_no);
return (0);
}
for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++)
if (ddr_is_active(i))
c++;
if (idma_wins_no > (MV_WIN_IDMA_MAX - c)) {
printf("IDMA windows: too many entries: %d, available: %d\n",
idma_wins_no, MV_WIN_IDMA_MAX - c);
return (0);
}
wintab = idma_wins;
rv = 1;
for (i = 0; i < idma_wins_no; i++, wintab++) {
if (wintab->target == 0) {
printf("IDMA window#%d: DDR target window is not "
"supposed to be reprogrammed!\n", i);
rv = 0;
}
if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) {
printf("IDMA window#%d: not capable of remapping, but "
"val 0x%08x defined\n", i, wintab->remap);
rv = 0;
}
s = wintab->size;
b = wintab->base;
e = b + s - 1;
if (s > (0xFFFFFFFF - b + 1)) {
/* XXX this boundary check should account for 64bit and
* remapping.. */
printf("IDMA window#%d: no space for size 0x%08x at "
"0x%08x\n", i, s, b);
rv = 0;
continue;
}
j = decode_win_overlap(i, idma_wins_no, &idma_wins[0]);
if (j >= 0) {
printf("IDMA window#%d: (0x%08x - 0x%08x) overlaps "
"with #%d (0x%08x - 0x%08x)\n", i, b, e, j,
idma_wins[j].base,
idma_wins[j].base + idma_wins[j].size - 1);
rv = 0;
}
}
return (rv);
}
void
decode_win_idma_dump(u_long base)
{
int i;
if (pm_is_disabled(CPU_PM_CTRL_IDMA))
return;
for (i = 0; i < MV_WIN_IDMA_MAX; i++) {
printf("IDMA window#%d: b 0x%08x, s 0x%08x", i,
win_idma_br_read(base, i), win_idma_sz_read(base, i));
if (win_idma_can_remap(i))
printf(", ha 0x%08x", win_idma_har_read(base, i));
printf("\n");
}
for (i = 0; i < MV_IDMA_CHAN_MAX; i++)
printf("IDMA channel#%d: ap 0x%08x\n", i,
win_idma_cap_read(base, i));
printf("IDMA windows: bare 0x%08x\n", win_idma_bare_read(base));
}
#else
/* Provide dummy functions to satisfy the build for SoCs not equipped with IDMA */
int
decode_win_idma_valid(void)
{
return (1);
}
void
decode_win_idma_setup(u_long base)
{
}
void
decode_win_idma_dump(u_long base)
{
}
#endif
/**************************************************************************
* XOR windows routines
**************************************************************************/
#if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
static int
xor_ctrl_read(u_long base, int i, int c, int e)
{
uint32_t v;
v = win_xor_ctrl_read(base, c, e);
v &= (1 << i);
return (v >> i);
}
static void
xor_ctrl_write(u_long base, int i, int c, int e, int val)
{
uint32_t v;
v = win_xor_ctrl_read(base, c, e);
v &= ~(1 << i);
v |= (val << i);
win_xor_ctrl_write(base, c, e, v);
}
/*
* Set channel protection 'val' for window 'w' on channel 'c'
*/
static void
xor_chan_write(u_long base, int c, int e, int w, int val)
{
uint32_t v;
v = win_xor_ctrl_read(base, c, e);
v &= ~(0x3 << (w * 2 + 16));
v |= (val << (w * 2 + 16));
win_xor_ctrl_write(base, c, e, v);
}
/*
* Set protection 'val' on all channels for window 'w' on engine 'e'
*/
static void
xor_set_prot(u_long base, int w, int e, int val)
{
int c;
for (c = 0; c < MV_XOR_CHAN_MAX; c++)
xor_chan_write(base, c, e, w, val);
}
static int
win_xor_can_remap(int i)
{
/* XOR decode windows 0-3 have remap capability */
if (i < 4)
return (1);
return (0);
}
static int
xor_max_eng(void)
{
uint32_t dev, rev;
soc_id(&dev, &rev);
switch (dev) {
case MV_DEV_88F6281:
case MV_DEV_88F6282:
case MV_DEV_MV78130:
case MV_DEV_MV78160:
case MV_DEV_MV78230:
case MV_DEV_MV78260:
case MV_DEV_MV78460:
return (2);
case MV_DEV_MV78100:
case MV_DEV_MV78100_Z0:
return (1);
default:
return (0);
}
}
static void
xor_active_dram(u_long base, int c, int e, int *window)
{
uint32_t br, sz;
int i, m, w;
/*
* Set up access to all active DRAM banks
*/
m = xor_max_eng();
for (i = 0; i < m; i++)
if (ddr_is_active(i)) {
br = ddr_base(i) | (ddr_attr(i) << 8) |
ddr_target(i);
sz = ((ddr_size(i) - 1) & 0xffff0000);
/* Place DDR entries in non-remapped windows */
for (w = 0; w < MV_WIN_XOR_MAX; w++)
if (win_xor_can_remap(w) != 1 &&
(xor_ctrl_read(base, w, c, e) == 0) &&
w > *window) {
/* Configure window */
win_xor_br_write(base, w, e, br);
win_xor_sz_write(base, w, e, sz);
/* Set protection RW on all channels */
xor_set_prot(base, w, e, 0x3);
/* Enable window */
xor_ctrl_write(base, w, c, e, 1);
(*window)++;
break;
}
}
}
void
decode_win_xor_setup(u_long base)
{
uint32_t br, sz;
int i, j, z, e = 1, m, window;
if (pm_is_disabled(CPU_PM_CTRL_XOR))
return;
/*
* Disable and clear all XOR windows, revoke protection for all
* channels
*/
m = xor_max_eng();
for (j = 0; j < m; j++, e--) {
/* Number of non-remaped windows */
window = MV_XOR_NON_REMAP - 1;
for (i = 0; i < MV_WIN_XOR_MAX; i++) {
win_xor_br_write(base, i, e, 0);
win_xor_sz_write(base, i, e, 0);
}
if (win_xor_can_remap(i) == 1)
win_xor_har_write(base, i, e, 0);
for (i = 0; i < MV_XOR_CHAN_MAX; i++) {
win_xor_ctrl_write(base, i, e, 0);
xor_active_dram(base, i, e, &window);
}
/*
* Remaining targets -- from a statically defined table
*/
for (i = 0; i < xor_wins_no; i++)
if (xor_wins[i].target > 0) {
br = (xor_wins[i].base & 0xffff0000) |
(xor_wins[i].attr << 8) |
xor_wins[i].target;
sz = ((xor_wins[i].size - 1) & 0xffff0000);
/* Set the first free XOR window */
for (z = 0; z < MV_WIN_XOR_MAX; z++) {
if (xor_ctrl_read(base, z, 0, e) &&
xor_ctrl_read(base, z, 1, e))
continue;
/* Configure window */
win_xor_br_write(base, z, e, br);
win_xor_sz_write(base, z, e, sz);
if (win_xor_can_remap(z) &&
xor_wins[z].remap >= 0)
win_xor_har_write(base, z, e,
xor_wins[z].remap);
/* Set protection RW on all channels */
xor_set_prot(base, z, e, 0x3);
/* Enable window */
xor_ctrl_write(base, z, 0, e, 1);
xor_ctrl_write(base, z, 1, e, 1);
break;
}
}
}
}
int
decode_win_xor_valid(void)
{
const struct decode_win *wintab;
int c, i, j, rv;
uint32_t b, e, s;
if (xor_wins_no > MV_WIN_XOR_MAX) {
printf("XOR windows: too many entries: %d\n", xor_wins_no);
return (0);
}
for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++)
if (ddr_is_active(i))
c++;
if (xor_wins_no > (MV_WIN_XOR_MAX - c)) {
printf("XOR windows: too many entries: %d, available: %d\n",
xor_wins_no, MV_WIN_IDMA_MAX - c);
return (0);
}
wintab = xor_wins;
rv = 1;
for (i = 0; i < xor_wins_no; i++, wintab++) {
if (wintab->target == 0) {
printf("XOR window#%d: DDR target window is not "
"supposed to be reprogrammed!\n", i);
rv = 0;
}
if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) {
printf("XOR window#%d: not capable of remapping, but "
"val 0x%08x defined\n", i, wintab->remap);
rv = 0;
}
s = wintab->size;
b = wintab->base;
e = b + s - 1;
if (s > (0xFFFFFFFF - b + 1)) {
/*
* XXX this boundary check should account for 64bit
* and remapping..
*/
printf("XOR window#%d: no space for size 0x%08x at "
"0x%08x\n", i, s, b);
rv = 0;
continue;
}
j = decode_win_overlap(i, xor_wins_no, &xor_wins[0]);
if (j >= 0) {
printf("XOR window#%d: (0x%08x - 0x%08x) overlaps "
"with #%d (0x%08x - 0x%08x)\n", i, b, e, j,
xor_wins[j].base,
xor_wins[j].base + xor_wins[j].size - 1);
rv = 0;
}
}
return (rv);
}
void
decode_win_xor_dump(u_long base)
{
int i, j;
int e = 1;
if (pm_is_disabled(CPU_PM_CTRL_XOR))
return;
for (j = 0; j < xor_max_eng(); j++, e--) {
for (i = 0; i < MV_WIN_XOR_MAX; i++) {
printf("XOR window#%d: b 0x%08x, s 0x%08x", i,
win_xor_br_read(base, i, e), win_xor_sz_read(base, i, e));
if (win_xor_can_remap(i))
printf(", ha 0x%08x", win_xor_har_read(base, i, e));
printf("\n");
}
for (i = 0; i < MV_XOR_CHAN_MAX; i++)
printf("XOR control#%d: 0x%08x\n", i,
win_xor_ctrl_read(base, i, e));
}
}
#else
/* Provide dummy functions to satisfy the build for SoCs not equipped with XOR */
static int
decode_win_xor_valid(void)
{
return (1);
}
static void
decode_win_xor_setup(u_long base)
{
}
static void
decode_win_xor_dump(u_long base)
{
}
#endif
/**************************************************************************
* SATA windows routines
**************************************************************************/
static void
decode_win_sata_setup(u_long base)
{
uint32_t cr, br;
int i, j;
if (pm_is_disabled(CPU_PM_CTRL_SATA))
return;
for (i = 0; i < MV_WIN_SATA_MAX; i++) {
win_sata_cr_write(base, i, 0);
win_sata_br_write(base, i, 0);
}
for (i = 0; i < MV_WIN_DDR_MAX; i++)
if (ddr_is_active(i)) {
cr = ((ddr_size(i) - 1) & 0xffff0000) |
(ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1;
br = ddr_base(i);
/* Use the first available SATA window */
for (j = 0; j < MV_WIN_SATA_MAX; j++) {
if ((win_sata_cr_read(base, j) & 1) != 0)
continue;
win_sata_br_write(base, j, br);
win_sata_cr_write(base, j, cr);
break;
}
}
}
static int
decode_win_sata_valid(void)
{
uint32_t dev, rev;
soc_id(&dev, &rev);
if (dev == MV_DEV_88F5281)
return (1);
return (decode_win_can_cover_ddr(MV_WIN_SATA_MAX));
}
/**************************************************************************
* FDT parsing routines.
**************************************************************************/
static int
fdt_get_ranges(const char *nodename, void *buf, int size, int *tuples,
int *tuplesize)
{
phandle_t node;
pcell_t addr_cells, par_addr_cells, size_cells;
int len, tuple_size, tuples_count;
node = OF_finddevice(nodename);
if (node == -1)
return (EINVAL);
if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0)
return (ENXIO);
par_addr_cells = fdt_parent_addr_cells(node);
if (par_addr_cells > 2)
return (ERANGE);
tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells +
size_cells);
/* Note the OF_getprop_alloc() cannot be used at this early stage. */
len = OF_getprop(node, "ranges", buf, size);
/*
* XXX this does not handle the empty 'ranges;' case, which is
* legitimate and should be allowed.
*/
tuples_count = len / tuple_size;
if (tuples_count <= 0)
return (ERANGE);
if (par_addr_cells > 2 || addr_cells > 2 || size_cells > 2)
return (ERANGE);
*tuples = tuples_count;
*tuplesize = tuple_size;
return (0);
}
static int
win_cpu_from_dt(void)
{
pcell_t ranges[48];
phandle_t node;
int i, entry_size, err, t, tuple_size, tuples;
u_long sram_base, sram_size;
t = 0;
/* Retrieve 'ranges' property of '/localbus' node. */
if ((err = fdt_get_ranges("/localbus", ranges, sizeof(ranges),
&tuples, &tuple_size)) == 0) {
/*
* Fill CPU decode windows table.
*/
bzero((void *)&cpu_win_tbl, sizeof(cpu_win_tbl));
entry_size = tuple_size / sizeof(pcell_t);
cpu_wins_no = tuples;
for (i = 0, t = 0; t < tuples; i += entry_size, t++) {
cpu_win_tbl[t].target = 1;
cpu_win_tbl[t].attr = fdt32_to_cpu(ranges[i + 1]);
cpu_win_tbl[t].base = fdt32_to_cpu(ranges[i + 2]);
cpu_win_tbl[t].size = fdt32_to_cpu(ranges[i + 3]);
cpu_win_tbl[t].remap = ~0;
debugf("target = 0x%0x attr = 0x%0x base = 0x%0x "
"size = 0x%0x remap = 0x%0x\n",
cpu_win_tbl[t].target,
cpu_win_tbl[t].attr, cpu_win_tbl[t].base,
cpu_win_tbl[t].size, cpu_win_tbl[t].remap);
}
}
/*
* Retrieve CESA SRAM data.
*/
if ((node = OF_finddevice("sram")) != -1)
if (fdt_is_compatible(node, "mrvl,cesa-sram"))
goto moveon;
if ((node = OF_finddevice("/")) == 0)
return (ENXIO);
if ((node = fdt_find_compatible(node, "mrvl,cesa-sram", 0)) == 0)
/* SRAM block is not always present. */
return (0);
moveon:
sram_base = sram_size = 0;
if (fdt_regsize(node, &sram_base, &sram_size) != 0)
return (EINVAL);
cpu_win_tbl[t].target = MV_WIN_CESA_TARGET;
cpu_win_tbl[t].attr = MV_WIN_CESA_ATTR(1);
cpu_win_tbl[t].base = sram_base;
cpu_win_tbl[t].size = sram_size;
cpu_win_tbl[t].remap = ~0;
cpu_wins_no++;
debugf("sram: base = 0x%0lx size = 0x%0lx\n", sram_base, sram_size);
return (0);
}
static int
fdt_win_setup(void)
{
phandle_t node, child;
struct soc_node_spec *soc_node;
u_long size, base;
int err, i;
node = OF_finddevice("/");
if (node == -1)
panic("fdt_win_setup: no root node");
/*
* Traverse through all children of root and simple-bus nodes.
* For each found device retrieve decode windows data (if applicable).
*/
child = OF_child(node);
while (child != 0) {
for (i = 0; soc_nodes[i].compat != NULL; i++) {
soc_node = &soc_nodes[i];
if (!fdt_is_compatible(child, soc_node->compat))
continue;
err = fdt_regsize(child, &base, &size);
if (err != 0)
return (err);
base = (base & 0x000fffff) | fdt_immr_va;
if (soc_node->decode_handler != NULL)
soc_node->decode_handler(base);
else
return (ENXIO);
if (MV_DUMP_WIN && (soc_node->dump_handler != NULL))
soc_node->dump_handler(base);
}
/*
* Once done with root-level children let's move down to
* simple-bus and its children.
*/
child = OF_peer(child);
if ((child == 0) && (node == OF_finddevice("/"))) {
node = fdt_find_compatible(node, "simple-bus", 0);
if (node == 0)
return (ENXIO);
child = OF_child(node);
}
}
return (0);
}
static void
fdt_fixup_busfreq(phandle_t root)
{
phandle_t sb;
pcell_t freq;
freq = cpu_to_fdt32(get_tclk());
/*
* Fix bus speed in cpu node
*/
if ((sb = OF_finddevice("cpu")) != 0)
if (fdt_is_compatible_strict(sb, "ARM,88VS584"))
OF_setprop(sb, "bus-frequency", (void *)&freq,
sizeof(freq));
/*
* This fixup sets the simple-bus bus-frequency property.
*/
if ((sb = fdt_find_compatible(root, "simple-bus", 1)) != 0)
OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq));
}
static void
fdt_fixup_ranges(phandle_t root)
{
phandle_t node;
pcell_t par_addr_cells, addr_cells, size_cells;
pcell_t ranges[3], reg[2], *rangesptr;
int len, tuple_size, tuples_count;
uint32_t base;
/* Fix-up SoC ranges according to real fdt_immr_pa */
if ((node = fdt_find_compatible(root, "simple-bus", 1)) != 0) {
if (fdt_addrsize_cells(node, &addr_cells, &size_cells) == 0 &&
(par_addr_cells = fdt_parent_addr_cells(node) <= 2)) {
tuple_size = sizeof(pcell_t) * (par_addr_cells +
addr_cells + size_cells);
len = OF_getprop(node, "ranges", ranges,
sizeof(ranges));
tuples_count = len / tuple_size;
/* Unexpected settings are not supported */
if (tuples_count != 1)
goto fixup_failed;
rangesptr = &ranges[0];
rangesptr += par_addr_cells;
base = fdt_data_get((void *)rangesptr, addr_cells);
*rangesptr = cpu_to_fdt32(fdt_immr_pa);
if (OF_setprop(node, "ranges", (void *)&ranges[0],
sizeof(ranges)) < 0)
goto fixup_failed;
}
}
/* Fix-up PCIe reg according to real PCIe registers' PA */
if ((node = fdt_find_compatible(root, "mrvl,pcie", 1)) != 0) {
if (fdt_addrsize_cells(OF_parent(node), &par_addr_cells,
&size_cells) == 0) {
tuple_size = sizeof(pcell_t) * (par_addr_cells +
size_cells);
len = OF_getprop(node, "reg", reg, sizeof(reg));
tuples_count = len / tuple_size;
/* Unexpected settings are not supported */
if (tuples_count != 1)
goto fixup_failed;
base = fdt_data_get((void *)®[0], par_addr_cells);
base &= ~0xFF000000;
base |= fdt_immr_pa;
reg[0] = cpu_to_fdt32(base);
if (OF_setprop(node, "reg", (void *)®[0],
sizeof(reg)) < 0)
goto fixup_failed;
}
}
/* Fix-up succeeded. May return and continue */
return;
fixup_failed:
while (1) {
/*
* In case of any error while fixing ranges just hang.
* 1. No message can be displayed yet since console
* is not initialized.
* 2. Going further will cause failure on bus_space_map()
* relying on the wrong ranges or data abort when
* accessing PCIe registers.
*/
}
}
struct fdt_fixup_entry fdt_fixup_table[] = {
{ "mrvl,DB-88F6281", &fdt_fixup_busfreq },
{ "mrvl,DB-78460", &fdt_fixup_busfreq },
{ "mrvl,DB-78460", &fdt_fixup_ranges },
{ NULL, NULL }
};
static int
fdt_pic_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig,
int *pol)
{
if (!fdt_is_compatible(node, "mrvl,pic") &&
!fdt_is_compatible(node, "mrvl,mpic"))
return (ENXIO);
*interrupt = fdt32_to_cpu(intr[0]);
*trig = INTR_TRIGGER_CONFORM;
*pol = INTR_POLARITY_CONFORM;
return (0);
}
fdt_pic_decode_t fdt_pic_table[] = {
#ifdef SOC_MV_ARMADA38X
&gic_decode_fdt,
#endif
&fdt_pic_decode_ic,
NULL
};
uint64_t
get_sar_value(void)
{
uint32_t sar_low, sar_high;
#if defined(SOC_MV_ARMADAXP)
sar_high = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE,
SAMPLE_AT_RESET_HI);
sar_low = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE,
SAMPLE_AT_RESET_LO);
#elif defined(SOC_MV_ARMADA38X)
sar_high = 0;
sar_low = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE,
SAMPLE_AT_RESET);
#else
/*
* TODO: Add getting proper values for other SoC configurations
*/
sar_high = 0;
sar_low = 0;
#endif
return (((uint64_t)sar_high << 32) | sar_low);
}
Index: head/sys/arm/mv/mv_machdep.c
===================================================================
--- head/sys/arm/mv/mv_machdep.c (revision 295142)
+++ head/sys/arm/mv/mv_machdep.c (revision 295143)
@@ -1,504 +1,504 @@
/*-
* Copyright (c) 1994-1998 Mark Brinicombe.
* Copyright (c) 1994 Brini.
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45
*/
#include "opt_ddb.h"
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#define _ARM32_BUS_DMA_PRIVATE
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include /* XXX */
#include /* XXX eventually this should be eliminated */
#include
#include
static int platform_mpp_init(void);
#if defined(SOC_MV_ARMADAXP)
void armadaxp_init_coher_fabric(void);
void armadaxp_l2_init(void);
#endif
#if defined(SOC_MV_ARMADA38X)
int armada38x_win_set_iosync_barrier(void);
int armada38x_scu_enable(void);
int armada38x_open_bootrom_win(void);
#endif
#define MPP_PIN_MAX 68
#define MPP_PIN_CELLS 2
#define MPP_PINS_PER_REG 8
#define MPP_SEL(pin,func) (((func) & 0xf) << \
(((pin) % MPP_PINS_PER_REG) * 4))
static int
platform_mpp_init(void)
{
pcell_t pinmap[MPP_PIN_MAX * MPP_PIN_CELLS];
int mpp[MPP_PIN_MAX];
uint32_t ctrl_val, ctrl_offset;
pcell_t reg[4];
u_long start, size;
phandle_t node;
pcell_t pin_cells, *pinmap_ptr, pin_count;
ssize_t len;
int par_addr_cells, par_size_cells;
int tuple_size, tuples, rv, pins, i, j;
int mpp_pin, mpp_function;
/*
* Try to access the MPP node directly i.e. through /aliases/mpp.
*/
if ((node = OF_finddevice("mpp")) != -1)
if (fdt_is_compatible(node, "mrvl,mpp"))
goto moveon;
/*
* Find the node the long way.
*/
if ((node = OF_finddevice("/")) == -1)
return (ENXIO);
if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0)
return (ENXIO);
if ((node = fdt_find_compatible(node, "mrvl,mpp", 0)) == 0)
/*
* No MPP node. Fall back to how MPP got set by the
* first-stage loader and try to continue booting.
*/
return (0);
moveon:
/*
* Process 'reg' prop.
*/
if ((rv = fdt_addrsize_cells(OF_parent(node), &par_addr_cells,
&par_size_cells)) != 0)
return(ENXIO);
tuple_size = sizeof(pcell_t) * (par_addr_cells + par_size_cells);
len = OF_getprop(node, "reg", reg, sizeof(reg));
tuples = len / tuple_size;
if (tuple_size <= 0)
return (EINVAL);
/*
* Get address/size. XXX we assume only the first 'reg' tuple is used.
*/
rv = fdt_data_to_res(reg, par_addr_cells, par_size_cells,
&start, &size);
if (rv != 0)
return (rv);
start += fdt_immr_va;
/*
* Process 'pin-count' and 'pin-map' props.
*/
if (OF_getprop(node, "pin-count", &pin_count, sizeof(pin_count)) <= 0)
return (ENXIO);
pin_count = fdt32_to_cpu(pin_count);
if (pin_count > MPP_PIN_MAX)
return (ERANGE);
if (OF_getprop(node, "#pin-cells", &pin_cells, sizeof(pin_cells)) <= 0)
pin_cells = MPP_PIN_CELLS;
pin_cells = fdt32_to_cpu(pin_cells);
if (pin_cells > MPP_PIN_CELLS)
return (ERANGE);
tuple_size = sizeof(pcell_t) * pin_cells;
bzero(pinmap, sizeof(pinmap));
len = OF_getprop(node, "pin-map", pinmap, sizeof(pinmap));
if (len <= 0)
return (ERANGE);
if (len % tuple_size)
return (ERANGE);
pins = len / tuple_size;
if (pins > pin_count)
return (ERANGE);
/*
* Fill out a "mpp[pin] => function" table. All pins unspecified in
* the 'pin-map' property are defaulted to 0 function i.e. GPIO.
*/
bzero(mpp, sizeof(mpp));
pinmap_ptr = pinmap;
for (i = 0; i < pins; i++) {
mpp_pin = fdt32_to_cpu(*pinmap_ptr);
mpp_function = fdt32_to_cpu(*(pinmap_ptr + 1));
mpp[mpp_pin] = mpp_function;
pinmap_ptr += pin_cells;
}
/*
* Prepare and program MPP control register values.
*/
ctrl_offset = 0;
for (i = 0; i < pin_count;) {
ctrl_val = 0;
for (j = 0; j < MPP_PINS_PER_REG; j++) {
if (i + j == pin_count - 1)
break;
ctrl_val |= MPP_SEL(i + j, mpp[i + j]);
}
i += MPP_PINS_PER_REG;
bus_space_write_4(fdtbus_bs_tag, start, ctrl_offset,
ctrl_val);
#if defined(SOC_MV_ORION)
/*
* Third MPP reg on Orion SoC is placed
* non-linearly (with different offset).
*/
if (i == (2 * MPP_PINS_PER_REG))
ctrl_offset = 0x50;
else
#endif
ctrl_offset += 4;
}
return (0);
}
vm_offset_t
platform_lastaddr(void)
{
return (fdt_immr_va);
}
void
platform_probe_and_attach(void)
{
if (fdt_immr_addr(MV_BASE) != 0)
while (1);
}
void
platform_gpio_init(void)
{
/*
* Re-initialise MPP. It is important to call this prior to using
* console as the physical connection can be routed via MPP.
*/
if (platform_mpp_init() != 0)
while (1);
}
void
platform_late_init(void)
{
/*
* Re-initialise decode windows
*/
#if !defined(SOC_MV_FREY)
if (soc_decode_win() != 0)
printf("WARNING: could not re-initialise decode windows! "
"Running with existing settings...\n");
#else
/* Disable watchdog and timers */
write_cpu_ctrl(CPU_TIMERS_BASE + CPU_TIMER_CONTROL, 0);
#endif
#if defined(SOC_MV_ARMADAXP)
#if !defined(SMP)
/* For SMP case it should be initialized after APs are booted */
armadaxp_init_coher_fabric();
#endif
armadaxp_l2_init();
#endif
#if defined(SOC_MV_ARMADA38X)
/* Set IO Sync Barrier bit for all Mbus devices */
if (armada38x_win_set_iosync_barrier() != 0)
printf("WARNING: could not map CPU Subsystem registers\n");
if (armada38x_scu_enable() != 0)
printf("WARNING: could not enable SCU\n");
#ifdef SMP
/* Open window to bootROM memory - needed for SMP */
if (armada38x_open_bootrom_win() != 0)
printf("WARNING: could not open window to bootROM\n");
#endif
#endif
}
#define FDT_DEVMAP_MAX (MV_WIN_CPU_MAX + 2)
static struct arm_devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = {
{ 0, 0, 0, 0, 0, }
};
static int
platform_sram_devmap(struct arm_devmap_entry *map)
{
#if !defined(SOC_MV_ARMADAXP)
phandle_t child, root;
u_long base, size;
/*
* SRAM range.
*/
if ((child = OF_finddevice("/sram")) != 0)
if (fdt_is_compatible(child, "mrvl,cesa-sram") ||
fdt_is_compatible(child, "mrvl,scratchpad"))
goto moveon;
if ((root = OF_finddevice("/")) == 0)
return (ENXIO);
if ((child = fdt_find_compatible(root, "mrvl,cesa-sram", 0)) == 0 &&
(child = fdt_find_compatible(root, "mrvl,scratchpad", 0)) == 0)
goto out;
moveon:
if (fdt_regsize(child, &base, &size) != 0)
return (EINVAL);
map->pd_va = MV_CESA_SRAM_BASE; /* XXX */
map->pd_pa = base;
map->pd_size = size;
map->pd_prot = VM_PROT_READ | VM_PROT_WRITE;
map->pd_cache = PTE_DEVICE;
return (0);
out:
#endif
return (ENOENT);
}
/*
* Supply a default do-nothing implementation of mv_pci_devmap() via a weak
* alias. Many Marvell platforms don't support a PCI interface, but to support
* those that do, we end up with a reference to this function below, in
* platform_devmap_init(). If "device pci" appears in the kernel config, the
* real implementation of this function in arm/mv/mv_pci.c overrides the weak
* alias defined here.
*/
int mv_default_fdt_pci_devmap(phandle_t node, struct arm_devmap_entry *devmap,
vm_offset_t io_va, vm_offset_t mem_va);
int
mv_default_fdt_pci_devmap(phandle_t node, struct arm_devmap_entry *devmap,
vm_offset_t io_va, vm_offset_t mem_va)
{
return (0);
}
__weak_reference(mv_default_fdt_pci_devmap, mv_pci_devmap);
/*
* XXX: When device entry in devmap has pd_size smaller than section size,
* system will freeze during initialization
*/
/*
- * Construct pmap_devmap[] with DT-derived config data.
+ * Construct devmap table with DT-derived config data.
*/
int
platform_devmap_init(void)
{
phandle_t root, child;
pcell_t bank_count;
int i, num_mapped;
i = 0;
arm_devmap_register_table(&fdt_devmap[0]);
#ifdef SOC_MV_ARMADAXP
vm_paddr_t cur_immr_pa;
/*
* Acquire SoC registers' base passed by u-boot and fill devmap
* accordingly. DTB is going to be modified basing on this data
* later.
*/
__asm __volatile("mrc p15, 4, %0, c15, c0, 0" : "=r" (cur_immr_pa));
cur_immr_pa = (cur_immr_pa << 13) & 0xff000000;
if (cur_immr_pa != 0)
fdt_immr_pa = cur_immr_pa;
#endif
/*
* IMMR range.
*/
fdt_devmap[i].pd_va = fdt_immr_va;
fdt_devmap[i].pd_pa = fdt_immr_pa;
fdt_devmap[i].pd_size = fdt_immr_size;
fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE;
fdt_devmap[i].pd_cache = PTE_DEVICE;
i++;
/*
* SRAM range.
*/
if (i < FDT_DEVMAP_MAX)
if (platform_sram_devmap(&fdt_devmap[i]) == 0)
i++;
/*
* PCI range(s).
* PCI range(s) and localbus.
*/
if ((root = OF_finddevice("/")) == -1)
return (ENXIO);
for (child = OF_child(root); child != 0; child = OF_peer(child)) {
if (fdt_is_type(child, "pci") || fdt_is_type(child, "pciep")) {
/*
* Check space: each PCI node will consume 2 devmap
* entries.
*/
if (i + 1 >= FDT_DEVMAP_MAX)
return (ENOMEM);
/*
* XXX this should account for PCI and multiple ranges
* of a given kind.
*/
if (mv_pci_devmap(child, &fdt_devmap[i], MV_PCI_VA_IO_BASE,
MV_PCI_VA_MEM_BASE) != 0)
return (ENXIO);
i += 2;
}
if (fdt_is_compatible(child, "mrvl,lbc")) {
/* Check available space */
if (OF_getprop(child, "bank-count", (void *)&bank_count,
sizeof(bank_count)) <= 0)
/* If no property, use default value */
bank_count = 1;
else
bank_count = fdt32_to_cpu(bank_count);
if ((i + bank_count) >= FDT_DEVMAP_MAX)
return (ENOMEM);
/* Add all localbus ranges to device map */
num_mapped = 0;
if (fdt_localbus_devmap(child, &fdt_devmap[i],
(int)bank_count, &num_mapped) != 0)
return (ENXIO);
i += num_mapped;
}
}
return (0);
}
struct arm32_dma_range *
bus_dma_get_range(void)
{
return (NULL);
}
int
bus_dma_get_range_nb(void)
{
return (0);
}
#if defined(CPU_MV_PJ4B)
#ifdef DDB
#include
DB_SHOW_COMMAND(cp15, db_show_cp15)
{
u_int reg;
__asm __volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (reg));
db_printf("Cpu ID: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (reg));
db_printf("Current Cache Lvl ID: 0x%08x\n",reg);
__asm __volatile("mrc p15, 0, %0, c1, c0, 0" : "=r" (reg));
db_printf("Ctrl: 0x%08x\n",reg);
__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reg));
db_printf("Aux Ctrl: 0x%08x\n",reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (reg));
db_printf("Processor Feat 0: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 1" : "=r" (reg));
db_printf("Processor Feat 1: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 2" : "=r" (reg));
db_printf("Debug Feat 0: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 3" : "=r" (reg));
db_printf("Auxiliary Feat 0: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 4" : "=r" (reg));
db_printf("Memory Model Feat 0: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 5" : "=r" (reg));
db_printf("Memory Model Feat 1: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 6" : "=r" (reg));
db_printf("Memory Model Feat 2: 0x%08x\n", reg);
__asm __volatile("mrc p15, 0, %0, c0, c1, 7" : "=r" (reg));
db_printf("Memory Model Feat 3: 0x%08x\n", reg);
__asm __volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (reg));
db_printf("Aux Func Modes Ctrl 0: 0x%08x\n",reg);
__asm __volatile("mrc p15, 1, %0, c15, c2, 1" : "=r" (reg));
db_printf("Aux Func Modes Ctrl 1: 0x%08x\n",reg);
__asm __volatile("mrc p15, 1, %0, c15, c12, 0" : "=r" (reg));
db_printf("CPU ID code extension: 0x%08x\n",reg);
}
DB_SHOW_COMMAND(vtop, db_show_vtop)
{
u_int reg;
if (have_addr) {
__asm __volatile("mcr p15, 0, %0, c7, c8, 0" : : "r" (addr));
__asm __volatile("mrc p15, 0, %0, c7, c4, 0" : "=r" (reg));
db_printf("Physical address reg: 0x%08x\n",reg);
} else
db_printf("show vtop \n");
}
#endif /* DDB */
#endif /* CPU_MV_PJ4B */
Index: head/sys/arm/mv/orion/db88f5xxx.c
===================================================================
--- head/sys/arm/mv/orion/db88f5xxx.c (revision 295142)
+++ head/sys/arm/mv/orion/db88f5xxx.c (revision 295143)
@@ -1,185 +1,185 @@
/*-
* Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
* All rights reserved.
*
* Developed by Semihalf.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of MARVELL nor the names of contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* Virtual address space layout:
* -----------------------------
* 0x0000_0000 - 0xbfff_ffff : user process
*
* 0xc040_0000 - virtual_avail : kernel reserved (text, data, page tables
* : structures, ARM stacks etc.)
* virtual_avail - 0xefff_ffff : KVA (virtual_avail is typically < 0xc0a0_0000)
* 0xf000_0000 - 0xf0ff_ffff : no-cache allocation area (16MB)
* 0xf100_0000 - 0xf10f_ffff : SoC integrated devices registers range (1MB)
* 0xf110_0000 - 0xf11f_ffff : PCI-Express I/O space (1MB)
* 0xf120_0000 - 0xf12f_ffff : PCI I/O space (1MB)
* 0xf130_0000 - 0xf52f_ffff : PCI-Express memory space (64MB)
* 0xf530_0000 - 0xf92f_ffff : PCI memory space (64MB)
* 0xf930_0000 - 0xfffe_ffff : unused (~108MB)
* 0xffff_0000 - 0xffff_0fff : 'high' vectors page (4KB)
* 0xffff_1000 - 0xffff_1fff : ARM_TP_ADDRESS/RAS page (4KB)
* 0xffff_2000 - 0xffff_ffff : unused (~55KB)
*/
#if 0
int platform_pci_get_irq(u_int bus, u_int slot, u_int func, u_int pin);
/* Static device mappings. */
-const struct pmap_devmap pmap_devmap[] = {
+const struct arm_devmap_entry db88f5xxx_devmap[] = {
/*
* Map the on-board devices VA == PA so that we can access them
* with the MMU on or off.
*/
{ /* SoC integrated peripherals registers range */
MV_BASE,
MV_PHYS_BASE,
MV_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
PTE_DEVICE,
},
{ /* PCIE I/O */
MV_PCIE_IO_BASE,
MV_PCIE_IO_PHYS_BASE,
MV_PCIE_IO_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
PTE_DEVICE,
},
{ /* PCIE Memory */
MV_PCIE_MEM_BASE,
MV_PCIE_MEM_PHYS_BASE,
MV_PCIE_MEM_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
PTE_DEVICE,
},
{ /* PCI I/O */
MV_PCI_IO_BASE,
MV_PCI_IO_PHYS_BASE,
MV_PCI_IO_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
PTE_DEVICE,
},
{ /* PCI Memory */
MV_PCI_MEM_BASE,
MV_PCI_MEM_PHYS_BASE,
MV_PCI_MEM_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
PTE_DEVICE,
},
{ /* 7-seg LED */
MV_DEV_CS0_BASE,
MV_DEV_CS0_PHYS_BASE,
MV_DEV_CS0_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
PTE_DEVICE,
},
{ 0, 0, 0, 0, 0, }
};
/*
* The pci_irq_map table consists of 3 columns:
* - PCI slot number (less than zero means ANY).
* - PCI IRQ pin (less than zero means ANY).
* - PCI IRQ (less than zero marks end of table).
*
* IRQ number from the first matching entry is used to configure PCI device
*/
/* PCI IRQ Map for DB-88F5281 */
const struct obio_pci_irq_map pci_irq_map[] = {
{ 7, -1, GPIO2IRQ(12) },
{ 8, -1, GPIO2IRQ(13) },
{ 9, -1, GPIO2IRQ(13) },
{ -1, -1, -1 }
};
/* PCI IRQ Map for DB-88F5182 */
const struct obio_pci_irq_map pci_irq_map[] = {
{ 7, -1, GPIO2IRQ(0) },
{ 8, -1, GPIO2IRQ(1) },
{ 9, -1, GPIO2IRQ(1) },
{ -1, -1, -1 }
};
#endif
#if 0
/*
* mv_gpio_config row structure:
* , ,
*
* - GPIO pin number (less than zero marks end of table)
* - GPIO flags:
* MV_GPIO_BLINK
* MV_GPIO_POLAR_LOW
* MV_GPIO_EDGE
* MV_GPIO_LEVEL
* - GPIO mode:
* 1 - Output, set to HIGH.
* 0 - Output, set to LOW.
* -1 - Input.
*/
/* GPIO Configuration for DB-88F5281 */
const struct gpio_config mv_gpio_config[] = {
{ 12, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 },
{ 13, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 },
{ -1, -1, -1 }
};
#if 0
/* GPIO Configuration for DB-88F5182 */
const struct gpio_config mv_gpio_config[] = {
{ 0, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 },
{ 1, MV_GPIO_POLAR_LOW | MV_GPIO_LEVEL, -1 },
{ -1, -1, -1 }
};
#endif
#endif
Index: head/sys/arm/versatile/versatile_machdep.c
===================================================================
--- head/sys/arm/versatile/versatile_machdep.c (revision 295142)
+++ head/sys/arm/versatile/versatile_machdep.c (revision 295143)
@@ -1,127 +1,127 @@
/*-
* Copyright (c) 2012 Oleksandr Tymoshenko.
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_ddb.h"
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#define _ARM32_BUS_DMA_PRIVATE
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/* Start of address space used for bootstrap map */
#define DEVMAP_BOOTSTRAP_MAP_START 0xE0000000
vm_offset_t
platform_lastaddr(void)
{
return (DEVMAP_BOOTSTRAP_MAP_START);
}
void
platform_probe_and_attach(void)
{
}
void
platform_gpio_init(void)
{
}
void
platform_late_init(void)
{
}
#define FDT_DEVMAP_MAX (2) /* FIXME */
static struct arm_devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = {
{ 0, 0, 0, 0, 0, },
{ 0, 0, 0, 0, 0, }
};
/*
- * Construct pmap_devmap[] with DT-derived config data.
+ * Construct devmap table with DT-derived config data.
*/
int
platform_devmap_init(void)
{
int i = 0;
fdt_devmap[i].pd_va = 0xf0100000;
fdt_devmap[i].pd_pa = 0x10100000;
fdt_devmap[i].pd_size = 0x01000000; /* 1 MB */
fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE;
fdt_devmap[i].pd_cache = PTE_DEVICE;
arm_devmap_register_table(&fdt_devmap[0]);
return (0);
}
struct arm32_dma_range *
bus_dma_get_range(void)
{
return (NULL);
}
int
bus_dma_get_range_nb(void)
{
return (0);
}
void
cpu_reset()
{
printf("cpu_reset\n");
while (1);
}