diff --git a/sys/arm/annapurna/alpine/alpine_machdep.c b/sys/arm/annapurna/alpine/alpine_machdep.c
index 89d21bf9fcae..a4e51d339354 100644
--- a/sys/arm/annapurna/alpine/alpine_machdep.c
+++ b/sys/arm/annapurna/alpine/alpine_machdep.c
@@ -1,159 +1,158 @@
/*-
* Copyright (c) 2013 Ruslan Bukin
* Copyright (c) 2015 Semihalf
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_ddb.h"
#include "opt_platform.h"
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
#define WDTLOAD 0x000
#define LOAD_MIN 0x00000001
#define LOAD_MAX 0xFFFFFFFF
#define WDTVALUE 0x004
#define WDTCONTROL 0x008
/* control register masks */
#define INT_ENABLE (1 << 0)
#define RESET_ENABLE (1 << 1)
#define WDTLOCK 0xC00
#define UNLOCK 0x1ACCE551
#define LOCK 0x00000001
bus_addr_t al_devmap_pa;
bus_addr_t al_devmap_size;
static int
alpine_get_devmap_base(bus_addr_t *pa, bus_addr_t *size)
{
phandle_t node;
if ((node = OF_finddevice("/")) == -1)
return (ENXIO);
if ((node = fdt_find_compatible(node, "simple-bus", 1)) == 0)
return (ENXIO);
return fdt_get_range(node, 0, pa, size);
}
static int
alpine_get_wdt_base(uint32_t *pbase, uint32_t *psize)
{
phandle_t node;
u_long base = 0;
u_long size = 0;
if (pbase == NULL || psize == NULL)
return (EINVAL);
if ((node = OF_finddevice("/")) == -1)
return (EFAULT);
if ((node = fdt_find_compatible(node, "simple-bus", 1)) == 0)
return (EFAULT);
if ((node =
fdt_find_compatible(node, "arm,sp805", 1)) == 0)
return (EFAULT);
if (fdt_regsize(node, &base, &size))
return (EFAULT);
*pbase = base;
*psize = size;
return (0);
}
/*
* Construct devmap table with DT-derived config data.
*/
static int
alpine_devmap_init(platform_t plat)
{
alpine_get_devmap_base(&al_devmap_pa, &al_devmap_size);
devmap_add_entry(al_devmap_pa, al_devmap_size);
return (0);
}
static void
alpine_cpu_reset(platform_t plat)
{
uint32_t wdbase, wdsize;
bus_addr_t wdbaddr;
int ret;
ret = alpine_get_wdt_base(&wdbase, &wdsize);
if (ret) {
printf("Unable to get WDT base, do power down manually...");
goto infinite;
}
ret = bus_space_map(fdtbus_bs_tag, al_devmap_pa + wdbase,
wdsize, 0, &wdbaddr);
if (ret) {
printf("Unable to map WDT base, do power down manually...");
goto infinite;
}
bus_space_write_4(fdtbus_bs_tag, wdbaddr, WDTLOCK, UNLOCK);
bus_space_write_4(fdtbus_bs_tag, wdbaddr, WDTLOAD, LOAD_MIN);
bus_space_write_4(fdtbus_bs_tag, wdbaddr, WDTCONTROL,
INT_ENABLE | RESET_ENABLE);
infinite:
while (1) {}
}
static platform_method_t alpine_methods[] = {
PLATFORMMETHOD(platform_devmap_init, alpine_devmap_init),
PLATFORMMETHOD(platform_cpu_reset, alpine_cpu_reset),
#ifdef SMP
PLATFORMMETHOD(platform_mp_start_ap, alpine_mp_start_ap),
PLATFORMMETHOD(platform_mp_setmaxid, alpine_mp_setmaxid),
#endif
PLATFORMMETHOD_END,
};
FDT_PLATFORM_DEF(alpine, "alpine", 0, "annapurna,alpine", 200);
diff --git a/sys/arm/arm/bus_space_generic.c b/sys/arm/arm/bus_space_generic.c
index 7794147aa4b6..28708e3929e3 100644
--- a/sys/arm/arm/bus_space_generic.c
+++ b/sys/arm/arm/bus_space_generic.c
@@ -1,131 +1,130 @@
/* $NetBSD: obio_space.c,v 1.6 2003/07/15 00:25:05 lukem Exp $ */
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
void
generic_bs_unimplemented(void)
{
panic("unimplemented bus_space function called");
}
/* Prototypes for all the bus_space structure functions */
bs_protos(generic);
int
generic_bs_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
bus_space_handle_t *bshp)
{
void *va;
/*
* We don't even examine the passed-in flags. For ARM, the CACHEABLE
* flag doesn't make sense (we create VM_MEMATTR_DEVICE mappings), and
* the LINEAR flag is just implied because we use kva_alloc(size).
*/
if ((va = pmap_mapdev(bpa, size)) == NULL)
return (ENOMEM);
*bshp = (bus_space_handle_t)va;
return (0);
}
int
generic_bs_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
bus_size_t alignment, bus_size_t boundary, int flags, bus_addr_t *bpap,
bus_space_handle_t *bshp)
{
panic("generic_bs_alloc(): not implemented");
}
void
generic_bs_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t size)
{
pmap_unmapdev((void *)h, size);
}
void
generic_bs_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
{
panic("generic_bs_free(): not implemented");
}
int
generic_bs_subregion(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t offset,
bus_size_t size, bus_space_handle_t *nbshp)
{
*nbshp = bsh + offset;
return (0);
}
void
generic_bs_barrier(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t offset,
bus_size_t len, int flags)
{
/*
* dsb() will drain the L1 write buffer and establish a memory access
* barrier point on platforms where that has meaning. On a write we
* also need to drain the L2 write buffer, because most on-chip memory
* mapped devices are downstream of the L2 cache. Note that this needs
* to be done even for memory mapped as Device type, because while
* Device memory is not cached, writes to it are still buffered.
*/
dsb();
if (flags & BUS_SPACE_BARRIER_WRITE) {
cpu_l2cache_drain_writebuf();
}
}
diff --git a/sys/arm/freescale/fsl_ocotp.c b/sys/arm/freescale/fsl_ocotp.c
index cf275f9da41b..805bbd613d23 100644
--- a/sys/arm/freescale/fsl_ocotp.c
+++ b/sys/arm/freescale/fsl_ocotp.c
@@ -1,202 +1,201 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2014 Steven Lawrance
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
/*
* Access to the Freescale i.MX6 On-Chip One-Time-Programmable Memory
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* Find the physical address and size of the ocotp registers and devmap them,
* returning a pointer to the virtual address of the base.
*
* XXX This is temporary until we've worked out all the details of controlling
* the load order of devices. In an ideal world this device would be up and
* running before anything that needs it. When we're at a point to make that
* happen, this little block of code, and the few lines in fsl_ocotp_read_4()
* that refer to it can be deleted.
*/
#include
#include
#include
-#include
static uint32_t *ocotp_regs;
static vm_size_t ocotp_size;
static void
fsl_ocotp_devmap(void)
{
phandle_t child, root;
u_long base, size;
if ((root = OF_finddevice("/")) == -1)
goto fatal;
if ((child = fdt_depth_search_compatible(root, "fsl,imx6q-ocotp", 0)) == 0)
goto fatal;
if (fdt_regsize(child, &base, &size) != 0)
goto fatal;
ocotp_size = (vm_size_t)size;
if ((ocotp_regs = pmap_mapdev((vm_paddr_t)base, ocotp_size)) == NULL)
goto fatal;
return;
fatal:
panic("cannot find/map the ocotp registers");
}
/* XXX end of temporary code */
struct ocotp_softc {
device_t dev;
struct resource *mem_res;
};
static struct ocotp_softc *ocotp_sc;
static inline uint32_t
RD4(struct ocotp_softc *sc, bus_size_t off)
{
return (bus_read_4(sc->mem_res, off));
}
static int
ocotp_detach(device_t dev)
{
/* The ocotp registers are always accessible. */
return (EBUSY);
}
static int
ocotp_attach(device_t dev)
{
struct ocotp_softc *sc;
int err, rid;
sc = device_get_softc(dev);
sc->dev = dev;
/* Allocate bus_space resources. */
rid = 0;
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem_res == NULL) {
device_printf(dev, "Cannot allocate memory resources\n");
err = ENXIO;
goto out;
}
ocotp_sc = sc;
/* We're done with the temporary mapping now. */
if (ocotp_regs != NULL)
pmap_unmapdev(ocotp_regs, ocotp_size);
err = 0;
out:
if (err != 0)
ocotp_detach(dev);
return (err);
}
static int
ocotp_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_is_compatible(dev, "fsl,imx6q-ocotp") == 0)
return (ENXIO);
device_set_desc(dev,
"Freescale On-Chip One-Time-Programmable Memory");
return (BUS_PROBE_DEFAULT);
}
uint32_t
fsl_ocotp_read_4(bus_size_t off)
{
if (off > FSL_OCOTP_LAST_REG)
panic("fsl_ocotp_read_4: offset out of range");
/* If we have a softcontext use the regular bus_space read. */
if (ocotp_sc != NULL)
return (RD4(ocotp_sc, off));
/*
* Otherwise establish a tempory device mapping if necessary, and read
* the device without any help from bus_space.
*
* XXX Eventually the code from there down can be deleted.
*/
if (ocotp_regs == NULL)
fsl_ocotp_devmap();
return (ocotp_regs[off / 4]);
}
static device_method_t ocotp_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ocotp_probe),
DEVMETHOD(device_attach, ocotp_attach),
DEVMETHOD(device_detach, ocotp_detach),
DEVMETHOD_END
};
static driver_t ocotp_driver = {
"ocotp",
ocotp_methods,
sizeof(struct ocotp_softc)
};
EARLY_DRIVER_MODULE(ocotp, simplebus, ocotp_driver, 0, 0,
BUS_PASS_CPU + BUS_PASS_ORDER_FIRST);
diff --git a/sys/arm/freescale/vybrid/vf_machdep.c b/sys/arm/freescale/vybrid/vf_machdep.c
index 2ab14f5154eb..fe36558b7ece 100644
--- a/sys/arm/freescale/vybrid/vf_machdep.c
+++ b/sys/arm/freescale/vybrid/vf_machdep.c
@@ -1,85 +1,84 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2013 Ruslan Bukin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
static int
vf_devmap_init(platform_t plat)
{
devmap_add_entry(0x40000000, 0x100000);
return (0);
}
static void
vf_cpu_reset(platform_t plat)
{
phandle_t src;
uint32_t paddr;
bus_addr_t vaddr;
if (src_swreset() == 0)
goto end;
src = OF_finddevice("src");
if ((src != -1) && (OF_getencprop(src, "reg", &paddr, sizeof(paddr))) > 0) {
if (bus_space_map(fdtbus_bs_tag, paddr, 0x10, 0, &vaddr) == 0) {
bus_space_write_4(fdtbus_bs_tag, vaddr, 0x00, SW_RST);
}
}
end:
while (1);
}
static platform_method_t vf_methods[] = {
PLATFORMMETHOD(platform_devmap_init, vf_devmap_init),
PLATFORMMETHOD(platform_cpu_reset, vf_cpu_reset),
PLATFORMMETHOD_END,
};
FDT_PLATFORM_DEF(vf, "vybrid", 0, "freescale,vybrid", 200);
diff --git a/sys/arm/nvidia/tegra_pcie.c b/sys/arm/nvidia/tegra_pcie.c
index b76e74da4f7d..ec272523df1b 100644
--- a/sys/arm/nvidia/tegra_pcie.c
+++ b/sys/arm/nvidia/tegra_pcie.c
@@ -1,1625 +1,1624 @@
/*-
* Copyright (c) 2016 Michal Meloun
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
/*
* Nvidia Integrated PCI/PCI-Express controller driver.
*/
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "ofw_bus_if.h"
#include "msi_if.h"
#include "pcib_if.h"
#include "pic_if.h"
#define AFI_AXI_BAR0_SZ 0x000
#define AFI_AXI_BAR1_SZ 0x004
#define AFI_AXI_BAR2_SZ 0x008
#define AFI_AXI_BAR3_SZ 0x00c
#define AFI_AXI_BAR4_SZ 0x010
#define AFI_AXI_BAR5_SZ 0x014
#define AFI_AXI_BAR0_START 0x018
#define AFI_AXI_BAR1_START 0x01c
#define AFI_AXI_BAR2_START 0x020
#define AFI_AXI_BAR3_START 0x024
#define AFI_AXI_BAR4_START 0x028
#define AFI_AXI_BAR5_START 0x02c
#define AFI_FPCI_BAR0 0x030
#define AFI_FPCI_BAR1 0x034
#define AFI_FPCI_BAR2 0x038
#define AFI_FPCI_BAR3 0x03c
#define AFI_FPCI_BAR4 0x040
#define AFI_FPCI_BAR5 0x044
#define AFI_MSI_BAR_SZ 0x060
#define AFI_MSI_FPCI_BAR_ST 0x064
#define AFI_MSI_AXI_BAR_ST 0x068
#define AFI_MSI_VEC(x) (0x06c + 4 * (x))
#define AFI_MSI_EN_VEC(x) (0x08c + 4 * (x))
#define AFI_MSI_INTR_IN_REG 32
#define AFI_MSI_REGS 8
#define AFI_CONFIGURATION 0x0ac
#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
#define AFI_FPCI_ERROR_MASKS 0x0b0
#define AFI_INTR_MASK 0x0b4
#define AFI_INTR_MASK_MSI_MASK (1 << 8)
#define AFI_INTR_MASK_INT_MASK (1 << 0)
#define AFI_INTR_CODE 0x0b8
#define AFI_INTR_CODE_MASK 0xf
#define AFI_INTR_CODE_INT_CODE_INI_SLVERR 1
#define AFI_INTR_CODE_INT_CODE_INI_DECERR 2
#define AFI_INTR_CODE_INT_CODE_TGT_SLVERR 3
#define AFI_INTR_CODE_INT_CODE_TGT_DECERR 4
#define AFI_INTR_CODE_INT_CODE_TGT_WRERR 5
#define AFI_INTR_CODE_INT_CODE_SM_MSG 6
#define AFI_INTR_CODE_INT_CODE_DFPCI_DECERR 7
#define AFI_INTR_CODE_INT_CODE_AXI_DECERR 8
#define AFI_INTR_CODE_INT_CODE_FPCI_TIMEOUT 9
#define AFI_INTR_CODE_INT_CODE_PE_PRSNT_SENSE 10
#define AFI_INTR_CODE_INT_CODE_PE_CLKREQ_SENSE 11
#define AFI_INTR_CODE_INT_CODE_CLKCLAMP_SENSE 12
#define AFI_INTR_CODE_INT_CODE_RDY4PD_SENSE 13
#define AFI_INTR_CODE_INT_CODE_P2P_ERROR 14
#define AFI_INTR_SIGNATURE 0x0bc
#define AFI_UPPER_FPCI_ADDRESS 0x0c0
#define AFI_SM_INTR_ENABLE 0x0c4
#define AFI_SM_INTR_RP_DEASSERT (1 << 14)
#define AFI_SM_INTR_RP_ASSERT (1 << 13)
#define AFI_SM_INTR_HOTPLUG (1 << 12)
#define AFI_SM_INTR_PME (1 << 11)
#define AFI_SM_INTR_FATAL_ERROR (1 << 10)
#define AFI_SM_INTR_UNCORR_ERROR (1 << 9)
#define AFI_SM_INTR_CORR_ERROR (1 << 8)
#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
#define AFI_AFI_INTR_ENABLE 0x0c8
#define AFI_AFI_INTR_ENABLE_CODE(code) (1 << (code))
#define AFI_PCIE_CONFIG 0x0f8
#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0x6
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR2_1 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR4_1 (0x1 << 20)
#define AFI_FUSE 0x104
#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
#define AFI_PEX0_CTRL 0x110
#define AFI_PEX1_CTRL 0x118
#define AFI_PEX2_CTRL 0x128
#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
#define AFI_PEX_CTRL_RST_L (1 << 0)
#define AFI_AXI_BAR6_SZ 0x134
#define AFI_AXI_BAR7_SZ 0x138
#define AFI_AXI_BAR8_SZ 0x13c
#define AFI_AXI_BAR6_START 0x140
#define AFI_AXI_BAR7_START 0x144
#define AFI_AXI_BAR8_START 0x148
#define AFI_FPCI_BAR6 0x14c
#define AFI_FPCI_BAR7 0x150
#define AFI_FPCI_BAR8 0x154
#define AFI_PLLE_CONTROL 0x160
#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
#define AFI_PLLE_CONTROL_BYPASS_PCIE2PLLE_CONTROL (1 << 8)
#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
#define AFI_PLLE_CONTROL_PCIE2PLLE_CONTROL_EN (1 << 0)
#define AFI_PEXBIAS_CTRL 0x168
/* Configuration space */
#define RP_VEND_XP 0x0F00
#define RP_VEND_XP_DL_UP (1 << 30)
#define RP_VEND_CTL2 0x0fa8
#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
#define RP_PRIV_MISC 0x0FE0
#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
#define RP_LINK_CONTROL_STATUS 0x0090
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
/* PADS space */
#define PADS_REFCLK_CFG0 0x000c8
#define PADS_REFCLK_CFG1 0x000cc
/* Wait 50 ms (per port) for link. */
#define TEGRA_PCIE_LINKUP_TIMEOUT 50000
/* FPCI Address space */
#define FPCI_MAP_IO 0xFDFC000000ULL
#define FPCI_MAP_TYPE0_CONFIG 0xFDFC000000ULL
#define FPCI_MAP_TYPE1_CONFIG 0xFDFF000000ULL
#define FPCI_MAP_EXT_TYPE0_CONFIG 0xFE00000000ULL
#define FPCI_MAP_EXT_TYPE1_CONFIG 0xFE10000000ULL
#define TEGRA_PCIB_MSI_ENABLE
#define DEBUG
#ifdef DEBUG
#define debugf(fmt, args...) do { printf(fmt,##args); } while (0)
#else
#define debugf(fmt, args...)
#endif
/*
* Configuration space format:
* [27:24] extended register
* [23:16] bus
* [15:11] slot (device)
* [10: 8] function
* [ 7: 0] register
*/
#define PCI_CFG_EXT_REG(reg) ((((reg) >> 8) & 0x0f) << 24)
#define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16)
#define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11)
#define PCI_CFG_FUN(fun) (((fun) & 0x07) << 8)
#define PCI_CFG_BASE_REG(reg) ((reg) & 0xff)
#define PADS_WR4(_sc, _r, _v) bus_write_4((_sc)->pads_mem_res, (_r), (_v))
#define PADS_RD4(_sc, _r) bus_read_4((_sc)->pads_mem_res, (_r))
#define AFI_WR4(_sc, _r, _v) bus_write_4((_sc)->afi_mem_res, (_r), (_v))
#define AFI_RD4(_sc, _r) bus_read_4((_sc)->afi_mem_res, (_r))
static struct {
bus_size_t axi_start;
bus_size_t fpci_start;
bus_size_t size;
} bars[] = {
{AFI_AXI_BAR0_START, AFI_FPCI_BAR0, AFI_AXI_BAR0_SZ}, /* BAR 0 */
{AFI_AXI_BAR1_START, AFI_FPCI_BAR1, AFI_AXI_BAR1_SZ}, /* BAR 1 */
{AFI_AXI_BAR2_START, AFI_FPCI_BAR2, AFI_AXI_BAR2_SZ}, /* BAR 2 */
{AFI_AXI_BAR3_START, AFI_FPCI_BAR3, AFI_AXI_BAR3_SZ}, /* BAR 3 */
{AFI_AXI_BAR4_START, AFI_FPCI_BAR4, AFI_AXI_BAR4_SZ}, /* BAR 4 */
{AFI_AXI_BAR5_START, AFI_FPCI_BAR5, AFI_AXI_BAR5_SZ}, /* BAR 5 */
{AFI_AXI_BAR6_START, AFI_FPCI_BAR6, AFI_AXI_BAR6_SZ}, /* BAR 6 */
{AFI_AXI_BAR7_START, AFI_FPCI_BAR7, AFI_AXI_BAR7_SZ}, /* BAR 7 */
{AFI_AXI_BAR8_START, AFI_FPCI_BAR8, AFI_AXI_BAR8_SZ}, /* BAR 8 */
{AFI_MSI_AXI_BAR_ST, AFI_MSI_FPCI_BAR_ST, AFI_MSI_BAR_SZ}, /* MSI 9 */
};
struct pcie_soc {
char **regulator_names;
bool cml_clk;
bool pca_enable;
uint32_t pads_refclk_cfg0;
uint32_t pads_refclk_cfg1;
};
/* Tegra 124 config. */
static char *tegra124_reg_names[] = {
"avddio-pex-supply",
"dvddio-pex-supply",
"avdd-pex-pll-supply",
"hvdd-pex-supply",
"hvdd-pex-pll-e-supply",
"vddio-pex-ctl-supply",
"avdd-pll-erefe-supply",
NULL
};
static struct pcie_soc tegra124_soc = {
.regulator_names = tegra124_reg_names,
.cml_clk = true,
.pca_enable = false,
.pads_refclk_cfg0 = 0x44ac44ac,
};
/* Tegra 210 config. */
static char *tegra210_reg_names[] = {
"avdd-pll-uerefe-supply",
"hvddio-pex-supply",
"dvddio-pex-supply",
"dvdd-pex-pll-supply",
"hvdd-pex-pll-e-supply",
"vddio-pex-ctl-supply",
NULL
};
static struct pcie_soc tegra210_soc = {
.regulator_names = tegra210_reg_names,
.cml_clk = true,
.pca_enable = true,
.pads_refclk_cfg0 = 0x90b890b8,
};
/* Compatible devices. */
static struct ofw_compat_data compat_data[] = {
{"nvidia,tegra124-pcie", (uintptr_t)&tegra124_soc},
{"nvidia,tegra210-pcie", (uintptr_t)&tegra210_soc},
{NULL, 0},
};
#define TEGRA_FLAG_MSI_USED 0x0001
struct tegra_pcib_irqsrc {
struct intr_irqsrc isrc;
u_int irq;
u_int flags;
};
struct tegra_pcib_port {
int enabled;
int port_idx; /* chip port index */
int num_lanes; /* number of lanes */
bus_size_t afi_pex_ctrl; /* offset of afi_pex_ctrl */
phy_t phy; /* port phy */
/* Config space properties. */
bus_addr_t rp_base_addr; /* PA of config window */
bus_size_t rp_size; /* size of config window */
bus_space_handle_t cfg_handle; /* handle of config window */
};
#define TEGRA_PCIB_MAX_PORTS 3
#define TEGRA_PCIB_MAX_MSI AFI_MSI_INTR_IN_REG * AFI_MSI_REGS
struct tegra_pcib_softc {
struct ofw_pci_softc ofw_pci;
device_t dev;
struct pcie_soc *soc;
struct mtx mtx;
struct resource *pads_mem_res;
struct resource *afi_mem_res;
struct resource *cfg_mem_res;
struct resource *irq_res;
struct resource *msi_irq_res;
void *intr_cookie;
void *msi_intr_cookie;
struct ofw_pci_range mem_range;
struct ofw_pci_range pref_mem_range;
struct ofw_pci_range io_range;
clk_t clk_pex;
clk_t clk_afi;
clk_t clk_pll_e;
clk_t clk_cml;
hwreset_t hwreset_pex;
hwreset_t hwreset_afi;
hwreset_t hwreset_pcie_x;
regulator_t regulators[16]; /* Safe maximum */
vm_offset_t msi_page; /* VA of MSI page */
bus_addr_t cfg_base_addr; /* base address of config */
bus_size_t cfg_cur_offs; /* currently mapped window */
bus_space_handle_t cfg_handle; /* handle of config window */
bus_space_tag_t bus_tag; /* tag of config window */
int lanes_cfg;
int num_ports;
struct tegra_pcib_port *ports[TEGRA_PCIB_MAX_PORTS];
struct tegra_pcib_irqsrc *isrcs;
};
static int
tegra_pcib_maxslots(device_t dev)
{
return (16);
}
static int
tegra_pcib_route_interrupt(device_t bus, device_t dev, int pin)
{
struct tegra_pcib_softc *sc;
u_int irq;
sc = device_get_softc(bus);
irq = intr_map_clone_irq(rman_get_start(sc->irq_res));
device_printf(bus, "route pin %d for device %d.%d to %u\n",
pin, pci_get_slot(dev), pci_get_function(dev),
irq);
return (irq);
}
static int
tegra_pcbib_map_cfg(struct tegra_pcib_softc *sc, u_int bus, u_int slot,
u_int func, u_int reg)
{
bus_size_t offs;
int flags, rv;
offs = sc->cfg_base_addr;
offs |= PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) |
PCI_CFG_EXT_REG(reg);
if ((sc->cfg_handle != 0) && (sc->cfg_cur_offs == offs))
return (0);
if (sc->cfg_handle != 0)
bus_space_unmap(sc->bus_tag, sc->cfg_handle, 0x800);
#if defined(BUS_SPACE_MAP_NONPOSTED)
flags = BUS_SPACE_MAP_NONPOSTED;
#else
flags = 0;
#endif
rv = bus_space_map(sc->bus_tag, offs, 0x800, flags, &sc->cfg_handle);
if (rv != 0)
device_printf(sc->dev, "Cannot map config space\n");
else
sc->cfg_cur_offs = offs;
return (rv);
}
static uint32_t
tegra_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func,
u_int reg, int bytes)
{
struct tegra_pcib_softc *sc;
bus_space_handle_t hndl;
uint32_t off;
uint32_t val;
int rv, i;
sc = device_get_softc(dev);
if (bus == 0) {
if (func != 0)
return (0xFFFFFFFF);
for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) {
if ((sc->ports[i] != NULL) &&
(sc->ports[i]->port_idx == slot)) {
hndl = sc->ports[i]->cfg_handle;
off = reg & 0xFFF;
break;
}
}
if (i >= TEGRA_PCIB_MAX_PORTS)
return (0xFFFFFFFF);
} else {
rv = tegra_pcbib_map_cfg(sc, bus, slot, func, reg);
if (rv != 0)
return (0xFFFFFFFF);
hndl = sc->cfg_handle;
off = PCI_CFG_BASE_REG(reg);
}
val = bus_space_read_4(sc->bus_tag, hndl, off & ~3);
switch (bytes) {
case 4:
break;
case 2:
if (off & 3)
val >>= 16;
val &= 0xffff;
break;
case 1:
val >>= ((off & 3) << 3);
val &= 0xff;
break;
}
return val;
}
static void
tegra_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func,
u_int reg, uint32_t val, int bytes)
{
struct tegra_pcib_softc *sc;
bus_space_handle_t hndl;
uint32_t off;
uint32_t val2;
int rv, i;
sc = device_get_softc(dev);
if (bus == 0) {
if (func != 0)
return;
for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) {
if ((sc->ports[i] != NULL) &&
(sc->ports[i]->port_idx == slot)) {
hndl = sc->ports[i]->cfg_handle;
off = reg & 0xFFF;
break;
}
}
if (i >= TEGRA_PCIB_MAX_PORTS)
return;
} else {
rv = tegra_pcbib_map_cfg(sc, bus, slot, func, reg);
if (rv != 0)
return;
hndl = sc->cfg_handle;
off = PCI_CFG_BASE_REG(reg);
}
switch (bytes) {
case 4:
bus_space_write_4(sc->bus_tag, hndl, off, val);
break;
case 2:
val2 = bus_space_read_4(sc->bus_tag, hndl, off & ~3);
val2 &= ~(0xffff << ((off & 3) << 3));
val2 |= ((val & 0xffff) << ((off & 3) << 3));
bus_space_write_4(sc->bus_tag, hndl, off & ~3, val2);
break;
case 1:
val2 = bus_space_read_4(sc->bus_tag, hndl, off & ~3);
val2 &= ~(0xff << ((off & 3) << 3));
val2 |= ((val & 0xff) << ((off & 3) << 3));
bus_space_write_4(sc->bus_tag, hndl, off & ~3, val2);
break;
}
}
static int tegra_pci_intr(void *arg)
{
struct tegra_pcib_softc *sc = arg;
uint32_t code, signature;
code = bus_read_4(sc->afi_mem_res, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
signature = bus_read_4(sc->afi_mem_res, AFI_INTR_SIGNATURE);
bus_write_4(sc->afi_mem_res, AFI_INTR_CODE, 0);
if (code == AFI_INTR_CODE_INT_CODE_SM_MSG)
return(FILTER_STRAY);
printf("tegra_pci_intr: code %x sig %x\n", code, signature);
return (FILTER_HANDLED);
}
/* -----------------------------------------------------------------------
*
* PCI MSI interface
*/
static int
tegra_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount,
int *irqs)
{
phandle_t msi_parent;
/* XXXX ofw_bus_msimap() don't works for Tegra DT.
ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
NULL);
*/
msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
irqs));
}
static int
tegra_pcib_release_msi(device_t pci, device_t child, int count, int *irqs)
{
phandle_t msi_parent;
/* XXXX ofw_bus_msimap() don't works for Tegra DT.
ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
NULL);
*/
msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
return (intr_release_msi(pci, child, msi_parent, count, irqs));
}
static int
tegra_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
uint32_t *data)
{
phandle_t msi_parent;
/* XXXX ofw_bus_msimap() don't works for Tegra DT.
ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
NULL);
*/
msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
}
#ifdef TEGRA_PCIB_MSI_ENABLE
/* --------------------------------------------------------------------------
*
* Interrupts
*
*/
static inline void
tegra_pcib_isrc_mask(struct tegra_pcib_softc *sc,
struct tegra_pcib_irqsrc *tgi, uint32_t val)
{
uint32_t reg;
int offs, bit;
offs = tgi->irq / AFI_MSI_INTR_IN_REG;
bit = 1 << (tgi->irq % AFI_MSI_INTR_IN_REG);
if (val != 0)
AFI_WR4(sc, AFI_MSI_VEC(offs), bit);
reg = AFI_RD4(sc, AFI_MSI_EN_VEC(offs));
if (val != 0)
reg |= bit;
else
reg &= ~bit;
AFI_WR4(sc, AFI_MSI_EN_VEC(offs), reg);
}
static int
tegra_pcib_msi_intr(void *arg)
{
u_int irq, i, bit, reg;
struct tegra_pcib_softc *sc;
struct trapframe *tf;
struct tegra_pcib_irqsrc *tgi;
sc = (struct tegra_pcib_softc *)arg;
tf = curthread->td_intr_frame;
for (i = 0; i < AFI_MSI_REGS; i++) {
reg = AFI_RD4(sc, AFI_MSI_VEC(i));
/* Handle one vector. */
while (reg != 0) {
bit = ffs(reg) - 1;
/* Send EOI */
AFI_WR4(sc, AFI_MSI_VEC(i), 1 << bit);
irq = i * AFI_MSI_INTR_IN_REG + bit;
tgi = &sc->isrcs[irq];
if (intr_isrc_dispatch(&tgi->isrc, tf) != 0) {
/* Disable stray. */
tegra_pcib_isrc_mask(sc, tgi, 0);
device_printf(sc->dev,
"Stray irq %u disabled\n", irq);
}
reg = AFI_RD4(sc, AFI_MSI_VEC(i));
}
}
return (FILTER_HANDLED);
}
static int
tegra_pcib_msi_attach(struct tegra_pcib_softc *sc)
{
int error;
uint32_t irq;
const char *name;
sc->isrcs = malloc(sizeof(*sc->isrcs) * TEGRA_PCIB_MAX_MSI, M_DEVBUF,
M_WAITOK | M_ZERO);
name = device_get_nameunit(sc->dev);
for (irq = 0; irq < TEGRA_PCIB_MAX_MSI; irq++) {
sc->isrcs[irq].irq = irq;
error = intr_isrc_register(&sc->isrcs[irq].isrc,
sc->dev, 0, "%s,%u", name, irq);
if (error != 0)
return (error); /* XXX deregister ISRCs */
}
if (intr_msi_register(sc->dev,
OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0)
return (ENXIO);
return (0);
}
static int
tegra_pcib_msi_detach(struct tegra_pcib_softc *sc)
{
/*
* There has not been established any procedure yet
* how to detach PIC from living system correctly.
*/
device_printf(sc->dev, "%s: not implemented yet\n", __func__);
return (EBUSY);
}
static void
tegra_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct tegra_pcib_softc *sc;
struct tegra_pcib_irqsrc *tgi;
sc = device_get_softc(dev);
tgi = (struct tegra_pcib_irqsrc *)isrc;
tegra_pcib_isrc_mask(sc, tgi, 0);
}
static void
tegra_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
{
struct tegra_pcib_softc *sc;
struct tegra_pcib_irqsrc *tgi;
sc = device_get_softc(dev);
tgi = (struct tegra_pcib_irqsrc *)isrc;
tegra_pcib_isrc_mask(sc, tgi, 1);
}
/* MSI interrupts are edge trigered -> do nothing */
static void
tegra_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
{
}
static void
tegra_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
{
}
static void
tegra_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
}
static int
tegra_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
if (data == NULL || data->type != INTR_MAP_DATA_MSI)
return (ENOTSUP);
if (isrc->isrc_handlers == 0)
tegra_pcib_msi_enable_intr(dev, isrc);
return (0);
}
static int
tegra_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
struct resource *res, struct intr_map_data *data)
{
struct tegra_pcib_softc *sc;
struct tegra_pcib_irqsrc *tgi;
sc = device_get_softc(dev);
tgi = (struct tegra_pcib_irqsrc *)isrc;
if (isrc->isrc_handlers == 0)
tegra_pcib_isrc_mask(sc, tgi, 0);
return (0);
}
static int
tegra_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount,
device_t *pic, struct intr_irqsrc **srcs)
{
struct tegra_pcib_softc *sc;
int i, irq, end_irq;
bool found;
KASSERT(powerof2(count), ("%s: bad count", __func__));
KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
sc = device_get_softc(dev);
mtx_lock(&sc->mtx);
found = false;
for (irq = 0; (irq + count - 1) < TEGRA_PCIB_MAX_MSI; irq++) {
/* Start on an aligned interrupt */
if ((irq & (maxcount - 1)) != 0)
continue;
/* Assume we found a valid range until shown otherwise */
found = true;
/* Check this range is valid */
for (end_irq = irq; end_irq < irq + count; end_irq++) {
/* This is already used */
if ((sc->isrcs[end_irq].flags & TEGRA_FLAG_MSI_USED) ==
TEGRA_FLAG_MSI_USED) {
found = false;
break;
}
}
if (found)
break;
}
/* Not enough interrupts were found */
if (!found || irq == (TEGRA_PCIB_MAX_MSI - 1)) {
mtx_unlock(&sc->mtx);
return (ENXIO);
}
for (i = 0; i < count; i++) {
/* Mark the interrupt as used */
sc->isrcs[irq + i].flags |= TEGRA_FLAG_MSI_USED;
}
mtx_unlock(&sc->mtx);
for (i = 0; i < count; i++)
srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i];
*pic = device_get_parent(dev);
return (0);
}
static int
tegra_pcib_msi_release_msi(device_t dev, device_t child, int count,
struct intr_irqsrc **isrc)
{
struct tegra_pcib_softc *sc;
struct tegra_pcib_irqsrc *ti;
int i;
sc = device_get_softc(dev);
mtx_lock(&sc->mtx);
for (i = 0; i < count; i++) {
ti = (struct tegra_pcib_irqsrc *)isrc[i];
KASSERT((ti->flags & TEGRA_FLAG_MSI_USED) == TEGRA_FLAG_MSI_USED,
("%s: Trying to release an unused MSI-X interrupt",
__func__));
ti->flags &= ~TEGRA_FLAG_MSI_USED;
}
mtx_unlock(&sc->mtx);
return (0);
}
static int
tegra_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
uint64_t *addr, uint32_t *data)
{
struct tegra_pcib_softc *sc = device_get_softc(dev);
struct tegra_pcib_irqsrc *ti = (struct tegra_pcib_irqsrc *)isrc;
*addr = vtophys(sc->msi_page);
*data = ti->irq;
return (0);
}
#endif
/* ------------------------------------------------------------------- */
static bus_size_t
tegra_pcib_pex_ctrl(struct tegra_pcib_softc *sc, int port)
{
switch (port) {
case 0:
return (AFI_PEX0_CTRL);
case 1:
return (AFI_PEX1_CTRL);
case 2:
return (AFI_PEX2_CTRL);
default:
panic("invalid port number: %d\n", port);
}
}
static int
tegra_pcib_enable_fdt_resources(struct tegra_pcib_softc *sc)
{
int i, rv;
rv = hwreset_assert(sc->hwreset_pcie_x);
if (rv != 0) {
device_printf(sc->dev, "Cannot assert 'pcie_x' reset\n");
return (rv);
}
rv = hwreset_assert(sc->hwreset_afi);
if (rv != 0) {
device_printf(sc->dev, "Cannot assert 'afi' reset\n");
return (rv);
}
rv = hwreset_assert(sc->hwreset_pex);
if (rv != 0) {
device_printf(sc->dev, "Cannot assert 'pex' reset\n");
return (rv);
}
tegra_powergate_power_off(TEGRA_POWERGATE_PCX);
/* Regulators. */
for (i = 0; i < nitems(sc->regulators); i++) {
if (sc->regulators[i] == NULL)
continue;
rv = regulator_enable(sc->regulators[i]);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable '%s' regulator\n",
sc->soc->regulator_names[i]);
return (rv);
}
}
rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCX,
sc->clk_pex, sc->hwreset_pex);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable 'PCX' powergate\n");
return (rv);
}
rv = hwreset_deassert(sc->hwreset_afi);
if (rv != 0) {
device_printf(sc->dev, "Cannot unreset 'afi' reset\n");
return (rv);
}
rv = clk_enable(sc->clk_afi);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable 'afi' clock\n");
return (rv);
}
if (sc->soc->cml_clk) {
rv = clk_enable(sc->clk_cml);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable 'cml' clock\n");
return (rv);
}
}
rv = clk_enable(sc->clk_pll_e);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable 'pll_e' clock\n");
return (rv);
}
return (0);
}
static struct tegra_pcib_port *
tegra_pcib_parse_port(struct tegra_pcib_softc *sc, phandle_t node)
{
struct tegra_pcib_port *port;
uint32_t tmp[5];
char tmpstr[6];
int rv;
port = malloc(sizeof(struct tegra_pcib_port), M_DEVBUF, M_WAITOK);
rv = OF_getprop(node, "status", tmpstr, sizeof(tmpstr));
if (rv <= 0 || strcmp(tmpstr, "okay") == 0 ||
strcmp(tmpstr, "ok") == 0)
port->enabled = 1;
else
port->enabled = 0;
rv = OF_getencprop(node, "assigned-addresses", tmp, sizeof(tmp));
if (rv != sizeof(tmp)) {
device_printf(sc->dev, "Cannot parse assigned-address: %d\n",
rv);
goto fail;
}
port->rp_base_addr = tmp[2];
port->rp_size = tmp[4];
port->port_idx = OFW_PCI_PHYS_HI_DEVICE(tmp[0]) - 1;
if (port->port_idx >= TEGRA_PCIB_MAX_PORTS) {
device_printf(sc->dev, "Invalid port index: %d\n",
port->port_idx);
goto fail;
}
/* XXX - TODO:
* Implement proper function for parsing pci "reg" property:
* - it have PCI bus format
* - its relative to matching "assigned-addresses"
*/
rv = OF_getencprop(node, "reg", tmp, sizeof(tmp));
if (rv != sizeof(tmp)) {
device_printf(sc->dev, "Cannot parse reg: %d\n", rv);
goto fail;
}
port->rp_base_addr += tmp[2];
rv = OF_getencprop(node, "nvidia,num-lanes", &port->num_lanes,
sizeof(port->num_lanes));
if (rv != sizeof(port->num_lanes)) {
device_printf(sc->dev, "Cannot parse nvidia,num-lanes: %d\n",
rv);
goto fail;
}
if (port->num_lanes > 4) {
device_printf(sc->dev, "Invalid nvidia,num-lanes: %d\n",
port->num_lanes);
goto fail;
}
port->afi_pex_ctrl = tegra_pcib_pex_ctrl(sc, port->port_idx);
sc->lanes_cfg |= port->num_lanes << (4 * port->port_idx);
/* Phy. */
rv = phy_get_by_ofw_name(sc->dev, node, "pcie-0", &port->phy);
if (rv != 0) {
device_printf(sc->dev,
"Cannot get 'pcie-0' phy for port %d\n",
port->port_idx);
goto fail;
}
return (port);
fail:
free(port, M_DEVBUF);
return (NULL);
}
static int
tegra_pcib_parse_fdt_resources(struct tegra_pcib_softc *sc, phandle_t node)
{
phandle_t child;
struct tegra_pcib_port *port;
int i, rv;
/* Regulators. */
for (i = 0; sc->soc->regulator_names[i] != NULL; i++) {
if (i >= nitems(sc->regulators)) {
device_printf(sc->dev,
"Too many regulators present in DT.\n");
return (EOVERFLOW);
}
rv = regulator_get_by_ofw_property(sc->dev, 0,
sc->soc->regulator_names[i], sc->regulators + i);
if (rv != 0) {
device_printf(sc->dev,
"Cannot get '%s' regulator\n",
sc->soc->regulator_names[i]);
return (ENXIO);
}
}
/* Resets. */
rv = hwreset_get_by_ofw_name(sc->dev, 0, "pex", &sc->hwreset_pex);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'pex' reset\n");
return (ENXIO);
}
rv = hwreset_get_by_ofw_name(sc->dev, 0, "afi", &sc->hwreset_afi);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'afi' reset\n");
return (ENXIO);
}
rv = hwreset_get_by_ofw_name(sc->dev, 0, "pcie_x", &sc->hwreset_pcie_x);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'pcie_x' reset\n");
return (ENXIO);
}
/* Clocks. */
rv = clk_get_by_ofw_name(sc->dev, 0, "pex", &sc->clk_pex);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'pex' clock\n");
return (ENXIO);
}
rv = clk_get_by_ofw_name(sc->dev, 0, "afi", &sc->clk_afi);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'afi' clock\n");
return (ENXIO);
}
rv = clk_get_by_ofw_name(sc->dev, 0, "pll_e", &sc->clk_pll_e);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'pll_e' clock\n");
return (ENXIO);
}
if (sc->soc->cml_clk) {
rv = clk_get_by_ofw_name(sc->dev, 0, "cml", &sc->clk_cml);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'cml' clock\n");
return (ENXIO);
}
}
/* Ports */
sc->num_ports = 0;
for (child = OF_child(node); child != 0; child = OF_peer(child)) {
port = tegra_pcib_parse_port(sc, child);
if (port == NULL) {
device_printf(sc->dev, "Cannot parse PCIe port node\n");
return (ENXIO);
}
sc->ports[sc->num_ports++] = port;
}
return (0);
}
static int
tegra_pcib_decode_ranges(struct tegra_pcib_softc *sc,
struct ofw_pci_range *ranges, int nranges)
{
int i;
for (i = 2; i < nranges; i++) {
if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
OFW_PCI_PHYS_HI_SPACE_IO) {
if (sc->io_range.size != 0) {
device_printf(sc->dev,
"Duplicated IO range found in DT\n");
return (ENXIO);
}
sc->io_range = ranges[i];
}
if (((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
OFW_PCI_PHYS_HI_SPACE_MEM32)) {
if (ranges[i].pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) {
if (sc->pref_mem_range.size != 0) {
device_printf(sc->dev,
"Duplicated memory range found "
"in DT\n");
return (ENXIO);
}
sc->pref_mem_range = ranges[i];
} else {
if (sc->mem_range.size != 0) {
device_printf(sc->dev,
"Duplicated memory range found "
"in DT\n");
return (ENXIO);
}
sc->mem_range = ranges[i];
}
}
}
if ((sc->io_range.size == 0) || (sc->mem_range.size == 0)
|| (sc->pref_mem_range.size == 0)) {
device_printf(sc->dev,
" Not all required ranges are found in DT\n");
return (ENXIO);
}
return (0);
}
/*
* Hardware config.
*/
static int
tegra_pcib_wait_for_link(struct tegra_pcib_softc *sc,
struct tegra_pcib_port *port)
{
uint32_t reg;
int i;
/* Setup link detection. */
reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0,
RP_PRIV_MISC, 4);
reg &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
reg |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
tegra_pcib_write_config(sc->dev, 0, port->port_idx, 0,
RP_PRIV_MISC, reg, 4);
for (i = TEGRA_PCIE_LINKUP_TIMEOUT; i > 0; i--) {
reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0,
RP_VEND_XP, 4);
if (reg & RP_VEND_XP_DL_UP)
break;
DELAY(1);
}
if (i <= 0)
return (ETIMEDOUT);
for (i = TEGRA_PCIE_LINKUP_TIMEOUT; i > 0; i--) {
reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0,
RP_LINK_CONTROL_STATUS, 4);
if (reg & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
break;
DELAY(1);
}
if (i <= 0)
return (ETIMEDOUT);
return (0);
}
static void
tegra_pcib_port_enable(struct tegra_pcib_softc *sc, int port_num)
{
struct tegra_pcib_port *port;
uint32_t reg;
int rv;
port = sc->ports[port_num];
/* Put port to reset. */
reg = AFI_RD4(sc, port->afi_pex_ctrl);
reg &= ~AFI_PEX_CTRL_RST_L;
AFI_WR4(sc, port->afi_pex_ctrl, reg);
AFI_RD4(sc, port->afi_pex_ctrl);
DELAY(10);
/* Enable clocks. */
reg |= AFI_PEX_CTRL_REFCLK_EN;
reg |= AFI_PEX_CTRL_CLKREQ_EN;
reg |= AFI_PEX_CTRL_OVERRIDE_EN;
AFI_WR4(sc, port->afi_pex_ctrl, reg);
AFI_RD4(sc, port->afi_pex_ctrl);
DELAY(100);
/* Release reset. */
reg |= AFI_PEX_CTRL_RST_L;
AFI_WR4(sc, port->afi_pex_ctrl, reg);
if (sc->soc->pca_enable) {
reg = tegra_pcib_read_config(sc->dev, 0, port->port_idx, 0,
RP_VEND_CTL2, 4);
reg |= RP_VEND_CTL2_PCA_ENABLE;
tegra_pcib_write_config(sc->dev, 0, port->port_idx, 0,
RP_VEND_CTL2, reg, 4);
}
rv = tegra_pcib_wait_for_link(sc, port);
if (bootverbose)
device_printf(sc->dev, " port %d (%d lane%s): Link is %s\n",
port->port_idx, port->num_lanes,
port->num_lanes > 1 ? "s": "",
rv == 0 ? "up": "down");
}
static void
tegra_pcib_port_disable(struct tegra_pcib_softc *sc, uint32_t port_num)
{
struct tegra_pcib_port *port;
uint32_t reg;
port = sc->ports[port_num];
/* Put port to reset. */
reg = AFI_RD4(sc, port->afi_pex_ctrl);
reg &= ~AFI_PEX_CTRL_RST_L;
AFI_WR4(sc, port->afi_pex_ctrl, reg);
AFI_RD4(sc, port->afi_pex_ctrl);
DELAY(10);
/* Disable clocks. */
reg &= ~AFI_PEX_CTRL_CLKREQ_EN;
reg &= ~AFI_PEX_CTRL_REFCLK_EN;
AFI_WR4(sc, port->afi_pex_ctrl, reg);
if (bootverbose)
device_printf(sc->dev, " port %d (%d lane%s): Disabled\n",
port->port_idx, port->num_lanes,
port->num_lanes > 1 ? "s": "");
}
static void
tegra_pcib_set_bar(struct tegra_pcib_softc *sc, int bar, uint32_t axi,
uint64_t fpci, uint32_t size, int is_memory)
{
uint32_t fpci_reg;
uint32_t axi_reg;
uint32_t size_reg;
axi_reg = axi & ~0xFFF;
size_reg = size >> 12;
fpci_reg = (uint32_t)(fpci >> 8) & ~0xF;
fpci_reg |= is_memory ? 0x1 : 0x0;
AFI_WR4(sc, bars[bar].axi_start, axi_reg);
AFI_WR4(sc, bars[bar].size, size_reg);
AFI_WR4(sc, bars[bar].fpci_start, fpci_reg);
}
static int
tegra_pcib_enable(struct tegra_pcib_softc *sc)
{
int rv;
int i;
uint32_t reg;
rv = tegra_pcib_enable_fdt_resources(sc);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable FDT resources\n");
return (rv);
}
/* Enable PLLE control. */
reg = AFI_RD4(sc, AFI_PLLE_CONTROL);
reg &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
reg |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
AFI_WR4(sc, AFI_PLLE_CONTROL, reg);
/* Set bias pad. */
AFI_WR4(sc, AFI_PEXBIAS_CTRL, 0);
/* Configure mode and ports. */
reg = AFI_RD4(sc, AFI_PCIE_CONFIG);
reg &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
if (sc->lanes_cfg == 0x14) {
if (bootverbose)
device_printf(sc->dev,
"Using x1,x4 configuration\n");
reg |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR4_1;
} else if (sc->lanes_cfg == 0x12) {
if (bootverbose)
device_printf(sc->dev,
"Using x1,x2 configuration\n");
reg |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_XBAR2_1;
} else {
device_printf(sc->dev,
"Unsupported lanes configuration: 0x%X\n", sc->lanes_cfg);
}
reg |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL;
for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) {
if ((sc->ports[i] != NULL))
reg &=
~AFI_PCIE_CONFIG_PCIE_DISABLE(sc->ports[i]->port_idx);
}
AFI_WR4(sc, AFI_PCIE_CONFIG, reg);
/* Enable Gen2 support. */
reg = AFI_RD4(sc, AFI_FUSE);
reg &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
AFI_WR4(sc, AFI_FUSE, reg);
for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) {
if (sc->ports[i] != NULL) {
rv = phy_enable(sc->ports[i]->phy);
if (rv != 0) {
device_printf(sc->dev,
"Cannot enable phy for port %d\n",
sc->ports[i]->port_idx);
return (rv);
}
}
}
/* Configure PCIe reference clock */
PADS_WR4(sc, PADS_REFCLK_CFG0, sc->soc->pads_refclk_cfg0);
if (sc->num_ports > 2)
PADS_WR4(sc, PADS_REFCLK_CFG1, sc->soc->pads_refclk_cfg1);
rv = hwreset_deassert(sc->hwreset_pcie_x);
if (rv != 0) {
device_printf(sc->dev, "Cannot unreset 'pci_x' reset\n");
return (rv);
}
/* Enable config space. */
reg = AFI_RD4(sc, AFI_CONFIGURATION);
reg |= AFI_CONFIGURATION_EN_FPCI;
AFI_WR4(sc, AFI_CONFIGURATION, reg);
/* Enable AFI errors. */
reg = 0;
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_INI_SLVERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_INI_DECERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_TGT_SLVERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_TGT_DECERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_TGT_WRERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_SM_MSG);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_DFPCI_DECERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_AXI_DECERR);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_FPCI_TIMEOUT);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_PE_PRSNT_SENSE);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_PE_CLKREQ_SENSE);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_CLKCLAMP_SENSE);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_RDY4PD_SENSE);
reg |= AFI_AFI_INTR_ENABLE_CODE(AFI_INTR_CODE_INT_CODE_P2P_ERROR);
AFI_WR4(sc, AFI_AFI_INTR_ENABLE, reg);
AFI_WR4(sc, AFI_SM_INTR_ENABLE, 0xffffffff);
/* Enable INT, disable MSI. */
AFI_WR4(sc, AFI_INTR_MASK, AFI_INTR_MASK_INT_MASK);
/* Mask all FPCI errors. */
AFI_WR4(sc, AFI_FPCI_ERROR_MASKS, 0);
/* Setup AFI translation windows. */
/* BAR 0 - type 1 extended configuration. */
tegra_pcib_set_bar(sc, 0, rman_get_start(sc->cfg_mem_res),
FPCI_MAP_EXT_TYPE1_CONFIG, rman_get_size(sc->cfg_mem_res), 0);
/* BAR 1 - downstream I/O. */
tegra_pcib_set_bar(sc, 1, sc->io_range.host, FPCI_MAP_IO,
sc->io_range.size, 0);
/* BAR 2 - downstream prefetchable memory 1:1. */
tegra_pcib_set_bar(sc, 2, sc->pref_mem_range.host,
sc->pref_mem_range.host, sc->pref_mem_range.size, 1);
/* BAR 3 - downstream not prefetchable memory 1:1 .*/
tegra_pcib_set_bar(sc, 3, sc->mem_range.host,
sc->mem_range.host, sc->mem_range.size, 1);
/* BAR 3-8 clear. */
tegra_pcib_set_bar(sc, 4, 0, 0, 0, 0);
tegra_pcib_set_bar(sc, 5, 0, 0, 0, 0);
tegra_pcib_set_bar(sc, 6, 0, 0, 0, 0);
tegra_pcib_set_bar(sc, 7, 0, 0, 0, 0);
tegra_pcib_set_bar(sc, 8, 0, 0, 0, 0);
/* MSI BAR - clear. */
tegra_pcib_set_bar(sc, 9, 0, 0, 0, 0);
return(0);
}
#ifdef TEGRA_PCIB_MSI_ENABLE
static int
tegra_pcib_attach_msi(device_t dev)
{
struct tegra_pcib_softc *sc;
uint32_t reg;
int i, rv;
sc = device_get_softc(dev);
sc->msi_page = (uintptr_t)kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
/* MSI BAR */
tegra_pcib_set_bar(sc, 9, vtophys(sc->msi_page), vtophys(sc->msi_page),
PAGE_SIZE, 0);
/* Disable and clear all interrupts. */
for (i = 0; i < AFI_MSI_REGS; i++) {
AFI_WR4(sc, AFI_MSI_EN_VEC(i), 0);
AFI_WR4(sc, AFI_MSI_VEC(i), 0xFFFFFFFF);
}
rv = bus_setup_intr(dev, sc->msi_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
tegra_pcib_msi_intr, NULL, sc, &sc->msi_intr_cookie);
if (rv != 0) {
device_printf(dev, "cannot setup MSI interrupt handler\n");
rv = ENXIO;
goto out;
}
if (tegra_pcib_msi_attach(sc) != 0) {
device_printf(dev, "WARNING: unable to attach PIC\n");
tegra_pcib_msi_detach(sc);
goto out;
}
/* Unmask MSI interrupt. */
reg = AFI_RD4(sc, AFI_INTR_MASK);
reg |= AFI_INTR_MASK_MSI_MASK;
AFI_WR4(sc, AFI_INTR_MASK, reg);
out:
return (rv);
}
#endif
static int
tegra_pcib_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) {
device_set_desc(dev, "Nvidia Integrated PCI/PCI-E Controller");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
tegra_pcib_attach(device_t dev)
{
struct tegra_pcib_softc *sc;
phandle_t node;
int rv;
int rid;
struct tegra_pcib_port *port;
int i;
sc = device_get_softc(dev);
sc->dev = dev;
mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF);
node = ofw_bus_get_node(dev);
sc->soc = (struct pcie_soc *)ofw_bus_search_compatible(dev,
compat_data)->ocd_data;
rv = tegra_pcib_parse_fdt_resources(sc, node);
if (rv != 0) {
device_printf(dev, "Cannot get FDT resources\n");
return (rv);
}
/* Allocate bus_space resources. */
rid = 0;
sc->pads_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->pads_mem_res == NULL) {
device_printf(dev, "Cannot allocate PADS register\n");
rv = ENXIO;
goto out;
}
/*
* XXX - FIXME
* tag for config space is not filled when RF_ALLOCATED flag is used.
*/
sc->bus_tag = rman_get_bustag(sc->pads_mem_res);
rid = 1;
sc->afi_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->afi_mem_res == NULL) {
device_printf(dev, "Cannot allocate AFI register\n");
rv = ENXIO;
goto out;
}
rid = 2;
sc->cfg_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ALLOCATED);
if (sc->cfg_mem_res == NULL) {
device_printf(dev, "Cannot allocate config space memory\n");
rv = ENXIO;
goto out;
}
sc->cfg_base_addr = rman_get_start(sc->cfg_mem_res);
/* Map RP slots */
for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) {
if (sc->ports[i] == NULL)
continue;
port = sc->ports[i];
rv = bus_space_map(sc->bus_tag, port->rp_base_addr,
port->rp_size, 0, &port->cfg_handle);
if (rv != 0) {
device_printf(sc->dev, "Cannot allocate memory for "
"port: %d\n", i);
rv = ENXIO;
goto out;
}
}
/*
* Get PCI interrupt
*/
rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq_res == NULL) {
device_printf(dev, "Cannot allocate IRQ resources\n");
rv = ENXIO;
goto out;
}
rid = 1;
sc->msi_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
if (sc->irq_res == NULL) {
device_printf(dev, "Cannot allocate MSI IRQ resources\n");
rv = ENXIO;
goto out;
}
sc->ofw_pci.sc_range_mask = 0x3;
rv = ofw_pcib_init(dev);
if (rv != 0)
goto out;
rv = tegra_pcib_decode_ranges(sc, sc->ofw_pci.sc_range,
sc->ofw_pci.sc_nrange);
if (rv != 0)
goto out;
if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
tegra_pci_intr, NULL, sc, &sc->intr_cookie)) {
device_printf(dev, "cannot setup interrupt handler\n");
rv = ENXIO;
goto out;
}
/*
* Enable PCIE device.
*/
rv = tegra_pcib_enable(sc);
if (rv != 0)
goto out;
for (i = 0; i < TEGRA_PCIB_MAX_PORTS; i++) {
if (sc->ports[i] == NULL)
continue;
if (sc->ports[i]->enabled)
tegra_pcib_port_enable(sc, i);
else
tegra_pcib_port_disable(sc, i);
}
#ifdef TEGRA_PCIB_MSI_ENABLE
rv = tegra_pcib_attach_msi(dev);
if (rv != 0)
goto out;
#endif
device_add_child(dev, "pci", DEVICE_UNIT_ANY);
bus_attach_children(dev);
return (0);
out:
return (rv);
}
static device_method_t tegra_pcib_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tegra_pcib_probe),
DEVMETHOD(device_attach, tegra_pcib_attach),
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
/* pcib interface */
DEVMETHOD(pcib_maxslots, tegra_pcib_maxslots),
DEVMETHOD(pcib_read_config, tegra_pcib_read_config),
DEVMETHOD(pcib_write_config, tegra_pcib_write_config),
DEVMETHOD(pcib_route_interrupt, tegra_pcib_route_interrupt),
DEVMETHOD(pcib_alloc_msi, tegra_pcib_alloc_msi),
DEVMETHOD(pcib_release_msi, tegra_pcib_release_msi),
DEVMETHOD(pcib_map_msi, tegra_pcib_map_msi),
DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
#ifdef TEGRA_PCIB_MSI_ENABLE
/* MSI/MSI-X */
DEVMETHOD(msi_alloc_msi, tegra_pcib_msi_alloc_msi),
DEVMETHOD(msi_release_msi, tegra_pcib_msi_release_msi),
DEVMETHOD(msi_map_msi, tegra_pcib_msi_map_msi),
/* Interrupt controller interface */
DEVMETHOD(pic_disable_intr, tegra_pcib_msi_disable_intr),
DEVMETHOD(pic_enable_intr, tegra_pcib_msi_enable_intr),
DEVMETHOD(pic_setup_intr, tegra_pcib_msi_setup_intr),
DEVMETHOD(pic_teardown_intr, tegra_pcib_msi_teardown_intr),
DEVMETHOD(pic_post_filter, tegra_pcib_msi_post_filter),
DEVMETHOD(pic_post_ithread, tegra_pcib_msi_post_ithread),
DEVMETHOD(pic_pre_ithread, tegra_pcib_msi_pre_ithread),
#endif
/* OFW bus interface */
DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
DEVMETHOD_END
};
DEFINE_CLASS_1(pcib, tegra_pcib_driver, tegra_pcib_methods,
sizeof(struct tegra_pcib_softc), ofw_pcib_driver);
DRIVER_MODULE(tegra_pcib, simplebus, tegra_pcib_driver, NULL, NULL);
diff --git a/sys/arm/qualcomm/ipq4018_machdep.c b/sys/arm/qualcomm/ipq4018_machdep.c
index 49dd2ff0193a..c0131d08dee6 100644
--- a/sys/arm/qualcomm/ipq4018_machdep.c
+++ b/sys/arm/qualcomm/ipq4018_machdep.c
@@ -1,233 +1,232 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Adrian Chadd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
static int
ipq4018_attach(platform_t plat)
{
return (0);
}
static void
ipq4018_late_init(platform_t plat)
{
/*
* XXX FIXME This is needed because we're not parsing
* the fdt reserved memory regions in a consistent way
* between arm/arm64. Once the reserved region parsing
* is fixed up this will become unnecessary.
*
* These cover the SRAM/TZ regions that are not fully
* accessible from the OS. They're in the ipq4018.dtsi
* tree.
*
* Without these, the system fails to boot because we
* aren't parsing the regions correctly.
*
* These will be unnecessary once the parser and setup
* code is fixed.
*/
physmem_exclude_region(IPQ4018_MEM_SMEM_START,
IPQ4018_MEM_SMEM_SIZE,
EXFLAG_NODUMP | EXFLAG_NOALLOC);
physmem_exclude_region(IPQ4018_MEM_TZ_START,
IPQ4018_MEM_TZ_SIZE,
EXFLAG_NODUMP | EXFLAG_NOALLOC);
}
static int
ipq4018_devmap_init(platform_t plat)
{
/*
* This covers the boot UART. Without it we can't boot successfully:
* there's a mutex uninit panic in subr_vmem.c that occurs when doing
* a call to pmap_mapdev() when the bus space code is doing its thing.
*/
devmap_add_entry(IPQ4018_MEM_UART1_START, IPQ4018_MEM_UART1_SIZE);
/*
* This covers a bunch of the reset block, which includes the PS-HOLD
* register for dropping power.
*/
devmap_add_entry(IPQ4018_MEM_PSHOLD_START, IPQ4018_MEM_PSHOLD_SIZE);
return (0);
}
/*
* This toggles the PS-HOLD register which on most IPQ devices will toggle
* the power control block and reset the SoC.
*
* However, there are apparently some units out there where this is not
* appropriate and instead the watchdog needs to be used.
*
* For now since there's only going to be one or two initial supported boards
* this will be fine. But if this doesn't reboot cleanly, now you know.
*/
static void
ipq4018_cpu_reset_pshold(void)
{
bus_space_handle_t pshold;
printf("%s: called\n", __func__);
bus_space_map(fdtbus_bs_tag, IPQ4018_MEM_PSHOLD_START,
IPQ4018_MEM_PSHOLD_SIZE, 0, &pshold);
bus_space_write_4(fdtbus_bs_tag, pshold, 0, 0);
bus_space_barrier(fdtbus_bs_tag, pshold, 0, 0x4,
BUS_SPACE_BARRIER_WRITE);
}
static void
ipq4018_cpu_reset(platform_t plat)
{
spinlock_enter();
dsb();
ipq4018_cpu_reset_pshold();
/* Spin */
printf("%s: spinning\n", __func__);
while(1)
;
}
/*
* Early putc routine for EARLY_PRINTF support. To use, add to kernel config:
* option SOCDEV_PA=0x07800000
* option SOCDEV_VA=0x07800000
* option EARLY_PRINTF
* Resist the temptation to change the #if 0 to #ifdef EARLY_PRINTF here. It
* makes sense now, but if multiple SOCs do that it will make early_putc another
* duplicate symbol to be eliminated on the path to a generic kernel.
*/
#if 0
void
qca_msm_early_putc(int c)
{
static int is_init = 0;
int limit;
/*
* This must match what's put into SOCDEV_VA. You have to change them
* both together.
*
* XXX TODO I should really go and just make UART_BASE here depend upon
* SOCDEV_VA so they move together.
*/
#define UART_BASE IPQ4018_MEM_UART1_START
volatile uint32_t * UART_DM_TF0 = (uint32_t *)(UART_BASE + 0x70);
volatile uint32_t * UART_DM_SR = (uint32_t *)(UART_BASE + 0x08);
#define UART_DM_SR_TXEMT (1 << 3)
#define UART_DM_SR_TXRDY (1 << 2)
volatile uint32_t * UART_DM_ISR = (uint32_t *)(UART_BASE + 0x14);
volatile uint32_t * UART_DM_CR = (uint32_t *)(UART_BASE + 0x10);
#define UART_DM_TX_READY (1 << 7)
#define UART_DM_CLEAR_TX_READY 0x300
volatile uint32_t * UART_DM_NO_CHARS_FOR_TX = (uint32_t *)(UART_BASE + 0x40);
volatile uint32_t * UART_DM_TFWR = (uint32_t *)(UART_BASE + 0x1c);
#define UART_DM_TFW_VALUE 0
volatile uint32_t * UART_DM_IPR = (uint32_t *)(UART_BASE + 0x18);
#define UART_DM_STALE_TIMEOUT_LSB 0xf
if (is_init == 0) {
is_init = 1;
*UART_DM_TFWR = UART_DM_TFW_VALUE;
wmb();
*UART_DM_IPR = UART_DM_STALE_TIMEOUT_LSB;
wmb();
}
/* Wait until TXFIFO is empty via ISR */
limit = 100000;
if ((*UART_DM_SR & UART_DM_SR_TXEMT) == 0) {
while (((*UART_DM_ISR & UART_DM_TX_READY) == 0) && --limit) {
/* Note - can't use DELAY here yet, too early */
rmb();
}
*UART_DM_CR = UART_DM_CLEAR_TX_READY;
wmb();
}
/* FIFO is ready. Say we're going to write one byte */
*UART_DM_NO_CHARS_FOR_TX = 1;
wmb();
limit = 100000;
while (((*UART_DM_SR & UART_DM_SR_TXRDY) == 0) && --limit) {
/* Note - can't use DELAY here yet, too early */
rmb();
}
/* Put character in first fifo slot */
*UART_DM_TF0 = c;
wmb();
}
early_putc_t *early_putc = qca_msm_early_putc;
#endif
static platform_method_t ipq4018_methods[] = {
PLATFORMMETHOD(platform_attach, ipq4018_attach),
PLATFORMMETHOD(platform_devmap_init, ipq4018_devmap_init),
PLATFORMMETHOD(platform_late_init, ipq4018_late_init),
PLATFORMMETHOD(platform_cpu_reset, ipq4018_cpu_reset),
#ifdef SMP
PLATFORMMETHOD(platform_mp_start_ap, ipq4018_mp_start_ap),
PLATFORMMETHOD(platform_mp_setmaxid, ipq4018_mp_setmaxid),
#endif
PLATFORMMETHOD_END,
};
FDT_PLATFORM_DEF2(ipq4018, ipq4018_ac58u, "ASUS RT-AC58U", 0,
"asus,rt-ac58u", 80);
diff --git a/sys/arm/qualcomm/ipq4018_mp.c b/sys/arm/qualcomm/ipq4018_mp.c
index cdcc0dfb9550..ab7d297738c4 100644
--- a/sys/arm/qualcomm/ipq4018_mp.c
+++ b/sys/arm/qualcomm/ipq4018_mp.c
@@ -1,113 +1,112 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Adrian Chadd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
void
ipq4018_mp_setmaxid(platform_t plat)
{
int ncpu;
/* If we've already set the global vars don't bother to do it again. */
if (mp_ncpus != 0)
return;
/* Read current CP15 Cache Size ID Register */
ncpu = cp15_l2ctlr_get();
ncpu = CPUV7_L2CTLR_NPROC(ncpu);
mp_ncpus = ncpu;
mp_maxid = ncpu - 1;
printf("SMP: ncpu=%d\n", ncpu);
}
static bool
ipq4018_start_ap(u_int id, phandle_t node, u_int addr_cells, pcell_t *arg)
{
/*
* For the IPQ401x we assume the enable method is
* "qcom,kpss-acc-v2". If this path gets turned into
* something more generic for other 32 bit qualcomm
* SoCs then we'll likely want to turn this into a
* switch based on "enable-method".
*/
return qcom_cpu_kpssv2_regulator_start(id, node);
}
void
ipq4018_mp_start_ap(platform_t plat)
{
int ret;
/*
* First step - SCM call to set the cold boot address to mpentry, so
* CPUs hopefully start in the MP path.
*/
ret = qcom_scm_legacy_mp_set_cold_boot_address((vm_offset_t) mpentry);
if (ret != 0)
panic("%s: Couldn't set cold boot address via SCM "
"(error 0x%08x)", __func__, ret);
/*
* Next step - loop over the CPU nodes and do the per-CPU setup
* required to power on the CPUs themselves.
*/
ofw_cpu_early_foreach(ipq4018_start_ap, true);
/*
* The next set of IPIs to the CPUs will wake them up and enter
* mpentry.
*/
}
diff --git a/sys/arm/qualcomm/qcom_cpu_kpssv2.c b/sys/arm/qualcomm/qcom_cpu_kpssv2.c
index c265f0f897e2..d4f2cff5ab7e 100644
--- a/sys/arm/qualcomm/qcom_cpu_kpssv2.c
+++ b/sys/arm/qualcomm/qcom_cpu_kpssv2.c
@@ -1,208 +1,207 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Adrian Chadd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
/*
* Since DELAY() hangs this early, we need some way to
* delay things to settle.
*/
static inline void
loop_delay(int usec)
{
int lcount = usec * 100000;
for (volatile int i = 0; i < lcount; i++)
;
}
/*
* This is the KPSSv2 (eg IPQ4018) regulator path for CPU
* and shared L2 cache power-on.
*/
bool
qcom_cpu_kpssv2_regulator_start(u_int id, phandle_t node)
{
phandle_t acc_phandle, l2_phandle, saw_phandle;
bus_space_tag_t acc_tag, saw_tag;
bus_space_handle_t acc_handle, saw_handle;
bus_size_t acc_sz, saw_sz;
ssize_t sret;
int ret;
uint32_t reg_val;
/*
* We don't need to power up CPU 0! This will power it
* down first and ... then everything hangs.
*/
if (id == 0)
return true;
/*
* Walk the qcom,acc and next-level-cache entries to find their
* child phandles and thus regulators.
*
* The qcom,acc is a phandle to a node.
*
* The next-level-cache actually is a phandle through to a qcom,saw
* entry.
*/
sret = OF_getencprop(node, "qcom,acc", (void *) &acc_phandle,
sizeof(acc_phandle));
if (sret != sizeof(acc_phandle))
panic("***couldn't get phandle for qcom,acc");
acc_phandle = OF_node_from_xref(acc_phandle);
sret = OF_getencprop(node, "next-level-cache", (void *) &l2_phandle,
sizeof(l2_phandle));
if (sret != sizeof(l2_phandle))
panic("***couldn't get phandle for next-level-cache");
l2_phandle = OF_node_from_xref(l2_phandle);
sret = OF_getencprop(l2_phandle, "qcom,saw", (void *) &saw_phandle,
sizeof(saw_phandle));
if (sret != sizeof(saw_phandle))
panic("***couldn't get phandle for qcom,saw");
l2_phandle = OF_node_from_xref(l2_phandle);
/*
* Now that we have the phandles referencing the correct locations,
* do some KVA mappings so we can go access the registers.
*/
ret = OF_decode_addr(acc_phandle, 0, &acc_tag, &acc_handle, &acc_sz);
if (ret != 0)
panic("*** couldn't map qcom,acc space (%d)", ret);
ret = OF_decode_addr(saw_phandle, 0, &saw_tag, &saw_handle, &saw_sz);
if (ret != 0)
panic("*** couldn't map next-level-cache -> "
"qcom,saw space (%d)", ret);
/*
* Power sequencing to ensure the cores are off, then power them on
* and bring them out of reset.
*/
/*
* BHS: off
* LDO: bypassed, powered off
*/
reg_val = (64 << QCOM_APC_PWR_GATE_CTL_BHS_CNT_SHIFT)
| (0x3f << QCOM_APC_PWR_GATE_CTL_LDO_PWR_DWN_SHIFT)
| QCOM_APC_PWR_GATE_CTL_BHS_EN;
bus_space_write_4(acc_tag, acc_handle, QCOM_APC_PWR_GATE_CTL, reg_val);
mb();
/* Settle time */
loop_delay(1);
/*
* Start up BHS segments.
*/
reg_val |= 0x3f << QCOM_APC_PWR_GATE_CTL_BHS_SEG_SHIFT;
bus_space_write_4(acc_tag, acc_handle, QCOM_APC_PWR_GATE_CTL, reg_val);
mb();
/* Settle time */
loop_delay(1);
/*
* Switch on the LDO bypass; BHS will now supply power.
*/
reg_val |= 0x3f << QCOM_APC_PWR_GATE_CTL_LDO_BYP_SHIFT;
bus_space_write_4(acc_tag, acc_handle, QCOM_APC_PWR_GATE_CTL, reg_val);
/*
* Shared L2 regulator control.
*/
bus_space_write_4(saw_tag, saw_handle, QCOM_APCS_SAW2_2_VCTL, 0x10003);
mb();
/* Settle time */
loop_delay(50);
/*
* Put the core in reset.
*/
reg_val = QCOM_APCS_CPU_PWR_CTL_COREPOR_RST
| QCOM_APCS_CPU_PWR_CTL_CLAMP;
bus_space_write_4(acc_tag, acc_handle, QCOM_APCS_CPU_PWR_CTL, reg_val);
mb();
loop_delay(2);
/*
* Remove power-down clamp.
*/
reg_val &= ~QCOM_APCS_CPU_PWR_CTL_CLAMP;
bus_space_write_4(acc_tag, acc_handle, QCOM_APCS_CPU_PWR_CTL, reg_val);
mb();
loop_delay(2);
/*
* Clear core power reset.
*/
reg_val &= ~QCOM_APCS_CPU_PWR_CTL_COREPOR_RST;
bus_space_write_4(acc_tag, acc_handle, QCOM_APCS_CPU_PWR_CTL, reg_val);
mb();
/*
* The power is ready, the core is out of reset, signal the core
* to power up.
*/
reg_val |= QCOM_APCS_CPU_PWR_CTL_CORE_PWRD_UP;
bus_space_write_4(acc_tag, acc_handle, QCOM_APCS_CPU_PWR_CTL, reg_val);
mb();
/*
* Finished with these KVA mappings, so release them.
*/
bus_space_unmap(acc_tag, acc_handle, acc_sz);
bus_space_unmap(saw_tag, saw_handle, saw_sz);
return true;
}
diff --git a/sys/arm/qualcomm/qcom_scm_legacy.c b/sys/arm/qualcomm/qcom_scm_legacy.c
index b614843b1ce8..42d5685259ad 100644
--- a/sys/arm/qualcomm/qcom_scm_legacy.c
+++ b/sys/arm/qualcomm/qcom_scm_legacy.c
@@ -1,85 +1,84 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Adrian Chadd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* Set the cold boot address for (later) a mask of CPUs.
*
* Don't set it for CPU0, that CPU is the boot CPU and is already alive.
*
* For now it sets it on CPU1..3.
*
* This works on the IPQ4019 as tested; the retval is 0x0.
*/
uint32_t
qcom_scm_legacy_mp_set_cold_boot_address(vm_offset_t mp_entry_func)
{
struct arm_smccc_res res;
int ret;
int context_id;
uint32_t scm_arg0 = QCOM_SCM_LEGACY_ATOMIC_ID(QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_ADDR, 2);
uint32_t scm_arg1 = QCOM_SCM_FLAG_COLDBOOT_CPU1
| QCOM_SCM_FLAG_COLDBOOT_CPU2
| QCOM_SCM_FLAG_COLDBOOT_CPU3;
uint32_t scm_arg2 = pmap_kextract((vm_offset_t)mp_entry_func);
ret = arm_smccc_invoke_smc(scm_arg0, (uint32_t) &context_id, scm_arg1,
scm_arg2, &res);
if (ret == 0 && res.a0 == 0)
return (0);
printf("%s: called; error; ret=0x%08x; retval[0]=0x%08x\n",
__func__, ret, res.a0);
return (0);
}
diff --git a/sys/arm/ti/ti_machdep.c b/sys/arm/ti/ti_machdep.c
index 29afcb7d9b59..ee4f416eeded 100644
--- a/sys/arm/ti/ti_machdep.c
+++ b/sys/arm/ti/ti_machdep.c
@@ -1,116 +1,115 @@
/*-
* SPDX-License-Identifier: BSD-4-Clause
*
* Copyright (c) 1994-1998 Mark Brinicombe.
* Copyright (c) 1994 Brini.
* All rights reserved.
*
* This code is derived from software written for Brini by Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Brini.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45
*/
#include "opt_platform.h"
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
#if defined(SOC_TI_AM335X)
static platform_attach_t ti_am335x_attach;
static platform_devmap_init_t ti_am335x_devmap_init;
#endif
static platform_cpu_reset_t ti_plat_cpu_reset;
void (*ti_cpu_reset)(void) = NULL;
int _ti_chip = -1;
#if defined(SOC_TI_AM335X)
static int
ti_am335x_attach(platform_t plat)
{
_ti_chip = CHIP_AM335X;
return (0);
}
#endif
/*
* Construct static devmap entries to map out the most frequently used
* peripherals using 1mb section mappings.
*/
#if defined(SOC_TI_AM335X)
static int
ti_am335x_devmap_init(platform_t plat)
{
devmap_add_entry(0x44C00000, 0x00400000); /* 4mb L4_WKUP devices*/
devmap_add_entry(0x47400000, 0x00100000); /* 1mb USB */
devmap_add_entry(0x47800000, 0x00100000); /* 1mb mmchs2 */
devmap_add_entry(0x48000000, 0x01000000); /*16mb L4_PER devices */
devmap_add_entry(0x49000000, 0x00100000); /* 1mb edma3 */
devmap_add_entry(0x49800000, 0x00300000); /* 3mb edma3 */
devmap_add_entry(0x4A000000, 0x01000000); /*16mb L4_FAST devices*/
return (0);
}
#endif
static void
ti_plat_cpu_reset(platform_t plat)
{
if (ti_cpu_reset)
(*ti_cpu_reset)();
else
printf("no cpu_reset implementation\n");
}
#if defined(SOC_TI_AM335X)
static platform_method_t am335x_methods[] = {
PLATFORMMETHOD(platform_attach, ti_am335x_attach),
PLATFORMMETHOD(platform_devmap_init, ti_am335x_devmap_init),
PLATFORMMETHOD(platform_cpu_reset, ti_plat_cpu_reset),
PLATFORMMETHOD_END,
};
FDT_PLATFORM_DEF(am335x, "am335x", 0, "ti,am33xx", 200);
#endif
diff --git a/sys/arm64/qoriq/qoriq_dw_pci.c b/sys/arm64/qoriq/qoriq_dw_pci.c
index 2492130e4b42..b6eecba48c39 100644
--- a/sys/arm64/qoriq/qoriq_dw_pci.c
+++ b/sys/arm64/qoriq/qoriq_dw_pci.c
@@ -1,263 +1,262 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright 2020 Michal Meloun
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/* Layerscape DesignWare PCIe driver */
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pcib_if.h"
#include "pci_dw_if.h"
#define PCIE_ABSERR 0x8D0
struct qoriq_dw_pci_cfg {
uint32_t pex_pf0_dgb; /* offset of PEX_PF0_DBG register */
uint32_t ltssm_bit; /* LSB bit of LTSSM state field */
};
struct qorif_dw_pci_softc {
struct pci_dw_softc dw_sc;
device_t dev;
phandle_t node;
struct resource *irq_res;
void *intr_cookie;
struct qoriq_dw_pci_cfg *soc_cfg;
};
static struct qoriq_dw_pci_cfg ls1043_cfg = {
.pex_pf0_dgb = 0x10000 + 0x7FC,
.ltssm_bit = 24,
};
static struct qoriq_dw_pci_cfg ls1012_cfg = {
.pex_pf0_dgb = 0x80000 + 0x407FC,
.ltssm_bit = 24,
};
static struct qoriq_dw_pci_cfg ls2080_cfg = {
.pex_pf0_dgb = 0x80000 + 0x7FC,
.ltssm_bit = 0,
};
static struct qoriq_dw_pci_cfg ls2028_cfg = {
.pex_pf0_dgb = 0x80000 + 0x407FC,
.ltssm_bit = 0,
};
/* Compatible devices. */
static struct ofw_compat_data compat_data[] = {
{"fsl,ls1012a-pcie", (uintptr_t)&ls1012_cfg},
{"fsl,ls1028a-pcie", (uintptr_t)&ls2028_cfg},
{"fsl,ls1043a-pcie", (uintptr_t)&ls1043_cfg},
{"fsl,ls1046a-pcie", (uintptr_t)&ls1012_cfg},
{"fsl,ls2080a-pcie", (uintptr_t)&ls2080_cfg},
{"fsl,ls2085a-pcie", (uintptr_t)&ls2080_cfg},
{"fsl,ls2088a-pcie", (uintptr_t)&ls2028_cfg},
{"fsl,ls1088a-pcie", (uintptr_t)&ls2028_cfg},
{NULL, 0},
};
static void
qorif_dw_pci_dbi_protect(struct qorif_dw_pci_softc *sc, bool protect)
{
uint32_t reg;
reg = pci_dw_dbi_rd4(sc->dev, DW_MISC_CONTROL_1);
if (protect)
reg &= ~DBI_RO_WR_EN;
else
reg |= DBI_RO_WR_EN;
pci_dw_dbi_wr4(sc->dev, DW_MISC_CONTROL_1, reg);
}
static int qorif_dw_pci_intr(void *arg)
{
#if 0
struct qorif_dw_pci_softc *sc = arg;
uint32_t cause1, cause2;
/* Ack all interrups */
cause1 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE1);
cause2 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE2);
pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, cause1);
pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, cause2);
#endif
return (FILTER_HANDLED);
}
static int
qorif_dw_pci_get_link(device_t dev, bool *status)
{
struct qorif_dw_pci_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
reg = pci_dw_dbi_rd4(sc->dev, sc->soc_cfg->pex_pf0_dgb);
reg >>= sc->soc_cfg->ltssm_bit;
reg &= 0x3F;
*status = (reg == 0x11) ? true : false;
return (0);
}
static void
qorif_dw_pci_init(struct qorif_dw_pci_softc *sc)
{
// ls_pcie_disable_outbound_atus(pcie);
/* Forward error response */
pci_dw_dbi_wr4(sc->dev, PCIE_ABSERR, 0x9401);
qorif_dw_pci_dbi_protect(sc, true);
pci_dw_dbi_wr1(sc->dev, PCIR_HDRTYPE, 1);
qorif_dw_pci_dbi_protect(sc, false);
// ls_pcie_drop_msg_tlp(pcie);
}
static int
qorif_dw_pci_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "NPX Layaerscape PCI-E Controller");
return (BUS_PROBE_DEFAULT);
}
static int
qorif_dw_pci_attach(device_t dev)
{
struct resource_map_request req;
struct resource_map map;
struct qorif_dw_pci_softc *sc;
phandle_t node;
int rv;
int rid;
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
sc->dev = dev;
sc->node = node;
sc->soc_cfg = (struct qoriq_dw_pci_cfg *)
ofw_bus_search_compatible(dev, compat_data)->ocd_data;
rid = 0;
sc->dw_sc.dbi_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE | RF_UNMAPPED);
if (sc->dw_sc.dbi_res == NULL) {
device_printf(dev, "Cannot allocate DBI memory\n");
rv = ENXIO;
goto out;
}
resource_init_map_request(&req);
req.memattr = VM_MEMATTR_DEVICE_NP;
rv = bus_map_resource(dev, SYS_RES_MEMORY, sc->dw_sc.dbi_res, &req,
&map);
if (rv != 0) {
device_printf(dev, "could not map memory.\n");
return (rv);
}
rman_set_mapping(sc->dw_sc.dbi_res, &map);
/* PCI interrupt */
rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq_res == NULL) {
device_printf(dev, "Cannot allocate IRQ resources\n");
rv = ENXIO;
goto out;
}
rv = pci_dw_init(dev);
if (rv != 0)
goto out;
qorif_dw_pci_init(sc);
/* Setup interrupt */
if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
qorif_dw_pci_intr, NULL, sc, &sc->intr_cookie)) {
device_printf(dev, "cannot setup interrupt handler\n");
rv = ENXIO;
goto out;
}
bus_attach_children(dev);
return (0);
out:
/* XXX Cleanup */
return (rv);
}
static device_method_t qorif_dw_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, qorif_dw_pci_probe),
DEVMETHOD(device_attach, qorif_dw_pci_attach),
DEVMETHOD(pci_dw_get_link, qorif_dw_pci_get_link),
DEVMETHOD_END
};
DEFINE_CLASS_1(pcib, qorif_dw_pci_driver, qorif_dw_pci_methods,
sizeof(struct qorif_dw_pci_softc), pci_dw_driver);
DRIVER_MODULE( qorif_dw_pci, simplebus, qorif_dw_pci_driver, NULL, NULL);
diff --git a/sys/dev/pci/pci_dw.c b/sys/dev/pci/pci_dw.c
index cf67d1ab06cf..dcc9c0c5e369 100644
--- a/sys/dev/pci/pci_dw.c
+++ b/sys/dev/pci/pci_dw.c
@@ -1,860 +1,859 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Michal Meloun
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/* Base class for all Synopsys DesignWare PCI/PCIe drivers */
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pcib_if.h"
#include "pci_dw_if.h"
#ifdef DEBUG
#define debugf(fmt, args...) do { printf(fmt,##args); } while (0)
#else
#define debugf(fmt, args...)
#endif
#define DBI_WR1(sc, reg, val) pci_dw_dbi_wr1((sc)->dev, reg, val)
#define DBI_WR2(sc, reg, val) pci_dw_dbi_wr2((sc)->dev, reg, val)
#define DBI_WR4(sc, reg, val) pci_dw_dbi_wr4((sc)->dev, reg, val)
#define DBI_RD1(sc, reg) pci_dw_dbi_rd1((sc)->dev, reg)
#define DBI_RD2(sc, reg) pci_dw_dbi_rd2((sc)->dev, reg)
#define DBI_RD4(sc, reg) pci_dw_dbi_rd4((sc)->dev, reg)
#define IATU_UR_WR4(sc, reg, val) \
bus_write_4((sc)->iatu_ur_res, (sc)->iatu_ur_offset + (reg), (val))
#define IATU_UR_RD4(sc, reg) \
bus_read_4((sc)->iatu_ur_res, (sc)->iatu_ur_offset + (reg))
#define PCI_BUS_SHIFT 20
#define PCI_SLOT_SHIFT 15
#define PCI_FUNC_SHIFT 12
#define PCI_BUS_MASK 0xFF
#define PCI_SLOT_MASK 0x1F
#define PCI_FUNC_MASK 0x07
#define PCI_REG_MASK 0xFFF
#define IATU_CFG_BUS(bus) ((uint64_t)((bus) & 0xff) << 24)
#define IATU_CFG_SLOT(slot) ((uint64_t)((slot) & 0x1f) << 19)
#define IATU_CFG_FUNC(func) ((uint64_t)((func) & 0x07) << 16)
static uint32_t
pci_dw_dbi_read(device_t dev, u_int reg, int width)
{
struct pci_dw_softc *sc;
sc = device_get_softc(dev);
MPASS(sc->dbi_res != NULL);
switch (width) {
case 4:
return (bus_read_4(sc->dbi_res, reg));
case 2:
return (bus_read_2(sc->dbi_res, reg));
case 1:
return (bus_read_1(sc->dbi_res, reg));
default:
device_printf(sc->dev, "Unsupported width: %d\n", width);
return (0xFFFFFFFF);
}
}
static void
pci_dw_dbi_write(device_t dev, u_int reg, uint32_t val, int width)
{
struct pci_dw_softc *sc;
sc = device_get_softc(dev);
MPASS(sc->dbi_res != NULL);
switch (width) {
case 4:
bus_write_4(sc->dbi_res, reg, val);
break;
case 2:
bus_write_2(sc->dbi_res, reg, val);
break;
case 1:
bus_write_1(sc->dbi_res, reg, val);
break;
default:
device_printf(sc->dev, "Unsupported width: %d\n", width);
break;
}
}
static void
pci_dw_dbi_protect(struct pci_dw_softc *sc, bool protect)
{
uint32_t reg;
reg = DBI_RD4(sc, DW_MISC_CONTROL_1);
if (protect)
reg &= ~DBI_RO_WR_EN;
else
reg |= DBI_RO_WR_EN;
DBI_WR4(sc, DW_MISC_CONTROL_1, reg);
}
static bool
pci_dw_check_dev(struct pci_dw_softc *sc, u_int bus, u_int slot, u_int func,
u_int reg)
{
bool status;
int rv;
if (bus < sc->bus_start || bus > sc->bus_end || slot > PCI_SLOTMAX ||
func > PCI_FUNCMAX || reg > PCIE_REGMAX)
return (false);
/* link is needed for access to all non-root busses */
if (bus != sc->root_bus) {
rv = PCI_DW_GET_LINK(sc->dev, &status);
if (rv != 0 || !status)
return (false);
return (true);
}
/* we have only 1 device with 1 function root port */
if (slot > 0 || func > 0)
return (false);
return (true);
}
static bool
pci_dw_detect_atu_unroll(struct pci_dw_softc *sc)
{
return (DBI_RD4(sc, DW_IATU_VIEWPORT) == 0xFFFFFFFFU);
}
static int
pci_dw_detect_out_atu_regions_unroll(struct pci_dw_softc *sc)
{
int num_regions, i;
uint32_t reg;
num_regions = sc->iatu_ur_size / DW_IATU_UR_STEP;
for (i = 0; i < num_regions; ++i) {
IATU_UR_WR4(sc, DW_IATU_UR_REG(i, LWR_TARGET_ADDR),
0x12340000);
reg = IATU_UR_RD4(sc, DW_IATU_UR_REG(i, LWR_TARGET_ADDR));
if (reg != 0x12340000)
break;
}
sc->num_out_regions = i;
return (0);
}
static int
pci_dw_detect_out_atu_regions_legacy(struct pci_dw_softc *sc)
{
int num_viewports, i;
uint32_t reg;
/* Find out how many viewports there are in total */
DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(~0U));
reg = DBI_RD4(sc, DW_IATU_VIEWPORT);
if (reg > IATU_REGION_INDEX(~0U)) {
device_printf(sc->dev,
"Cannot detect number of output iATU regions; read %#x\n",
reg);
return (ENXIO);
}
num_viewports = reg + 1;
/*
* Find out how many of them are outbound by seeing whether a dummy
* page-aligned address sticks.
*/
for (i = 0; i < num_viewports; ++i) {
DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(i));
DBI_WR4(sc, DW_IATU_LWR_TARGET_ADDR, 0x12340000);
reg = DBI_RD4(sc, DW_IATU_LWR_TARGET_ADDR);
if (reg != 0x12340000)
break;
}
sc->num_out_regions = i;
return (0);
}
static int
pci_dw_detect_out_atu_regions(struct pci_dw_softc *sc)
{
if (sc->iatu_ur_res)
return (pci_dw_detect_out_atu_regions_unroll(sc));
else
return (pci_dw_detect_out_atu_regions_legacy(sc));
}
static int
pci_dw_map_out_atu_unroll(struct pci_dw_softc *sc, int idx, int type,
uint64_t pa, uint64_t pci_addr, uint32_t size)
{
uint32_t reg;
int i;
if (size == 0)
return (0);
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LWR_BASE_ADDR),
pa & 0xFFFFFFFF);
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, UPPER_BASE_ADDR),
(pa >> 32) & 0xFFFFFFFF);
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LIMIT_ADDR),
(pa + size - 1) & 0xFFFFFFFF);
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, LWR_TARGET_ADDR),
pci_addr & 0xFFFFFFFF);
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, UPPER_TARGET_ADDR),
(pci_addr >> 32) & 0xFFFFFFFF);
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, CTRL1),
IATU_CTRL1_TYPE(type));
IATU_UR_WR4(sc, DW_IATU_UR_REG(idx, CTRL2),
IATU_CTRL2_REGION_EN);
/* Wait until setup becomes valid */
for (i = 10; i > 0; i--) {
reg = IATU_UR_RD4(sc, DW_IATU_UR_REG(idx, CTRL2));
if (reg & IATU_CTRL2_REGION_EN)
return (0);
DELAY(5);
}
device_printf(sc->dev,
"Cannot map outbound region %d in unroll mode iATU\n", idx);
return (ETIMEDOUT);
}
static int
pci_dw_map_out_atu_legacy(struct pci_dw_softc *sc, int idx, int type,
uint64_t pa, uint64_t pci_addr, uint32_t size)
{
uint32_t reg;
int i;
if (size == 0)
return (0);
DBI_WR4(sc, DW_IATU_VIEWPORT, IATU_REGION_INDEX(idx));
DBI_WR4(sc, DW_IATU_LWR_BASE_ADDR, pa & 0xFFFFFFFF);
DBI_WR4(sc, DW_IATU_UPPER_BASE_ADDR, (pa >> 32) & 0xFFFFFFFF);
DBI_WR4(sc, DW_IATU_LIMIT_ADDR, (pa + size - 1) & 0xFFFFFFFF);
DBI_WR4(sc, DW_IATU_LWR_TARGET_ADDR, pci_addr & 0xFFFFFFFF);
DBI_WR4(sc, DW_IATU_UPPER_TARGET_ADDR, (pci_addr >> 32) & 0xFFFFFFFF);
DBI_WR4(sc, DW_IATU_CTRL1, IATU_CTRL1_TYPE(type));
DBI_WR4(sc, DW_IATU_CTRL2, IATU_CTRL2_REGION_EN);
/* Wait until setup becomes valid */
for (i = 10; i > 0; i--) {
reg = DBI_RD4(sc, DW_IATU_CTRL2);
if (reg & IATU_CTRL2_REGION_EN)
return (0);
DELAY(5);
}
device_printf(sc->dev,
"Cannot map outbound region %d in legacy mode iATU\n", idx);
return (ETIMEDOUT);
}
/* Map one outbound ATU region */
static int
pci_dw_map_out_atu(struct pci_dw_softc *sc, int idx, int type,
uint64_t pa, uint64_t pci_addr, uint32_t size)
{
if (sc->iatu_ur_res)
return (pci_dw_map_out_atu_unroll(sc, idx, type, pa,
pci_addr, size));
else
return (pci_dw_map_out_atu_legacy(sc, idx, type, pa,
pci_addr, size));
}
static int
pci_dw_setup_hw(struct pci_dw_softc *sc)
{
uint32_t reg;
int rv, i;
pci_dw_dbi_protect(sc, false);
/* Setup config registers */
DBI_WR1(sc, PCIR_CLASS, PCIC_BRIDGE);
DBI_WR1(sc, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
DBI_WR4(sc, PCIR_BAR(0), 4);
DBI_WR4(sc, PCIR_BAR(1), 0);
DBI_WR1(sc, PCIR_INTPIN, 1);
DBI_WR1(sc, PCIR_PRIBUS_1, sc->root_bus);
DBI_WR1(sc, PCIR_SECBUS_1, sc->sub_bus);
DBI_WR1(sc, PCIR_SUBBUS_1, sc->bus_end);
DBI_WR2(sc, PCIR_COMMAND,
PCIM_CMD_PORTEN | PCIM_CMD_MEMEN |
PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN);
pci_dw_dbi_protect(sc, true);
/* Setup outbound memory windows */
for (i = 0; i < min(sc->num_mem_ranges, sc->num_out_regions - 1); ++i) {
rv = pci_dw_map_out_atu(sc, i + 1, IATU_CTRL1_TYPE_MEM,
sc->mem_ranges[i].host, sc->mem_ranges[i].pci,
sc->mem_ranges[i].size);
if (rv != 0)
return (rv);
}
/* If we have enough regions ... */
if (sc->num_mem_ranges + 1 < sc->num_out_regions &&
sc->io_range.size != 0) {
/* Setup outbound I/O window */
rv = pci_dw_map_out_atu(sc, sc->num_mem_ranges + 1,
IATU_CTRL1_TYPE_IO, sc->io_range.host, sc->io_range.pci,
sc->io_range.size);
if (rv != 0)
return (rv);
}
/* Adjust number of lanes */
reg = DBI_RD4(sc, DW_PORT_LINK_CTRL);
reg &= ~PORT_LINK_CAPABLE(~0);
switch (sc->num_lanes) {
case 1:
reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_1);
break;
case 2:
reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_2);
break;
case 4:
reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_4);
break;
case 8:
reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_8);
break;
case 16:
reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_16);
break;
case 32:
reg |= PORT_LINK_CAPABLE(PORT_LINK_CAPABLE_32);
break;
default:
device_printf(sc->dev,
"'num-lanes' property have invalid value: %d\n",
sc->num_lanes);
return (EINVAL);
}
DBI_WR4(sc, DW_PORT_LINK_CTRL, reg);
/* And link width */
reg = DBI_RD4(sc, DW_GEN2_CTRL);
reg &= ~GEN2_CTRL_NUM_OF_LANES(~0);
switch (sc->num_lanes) {
case 1:
reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_1);
break;
case 2:
reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_2);
break;
case 4:
reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_4);
break;
case 8:
reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_8);
break;
case 16:
reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_16);
break;
case 32:
reg |= GEN2_CTRL_NUM_OF_LANES(GEN2_CTRL_NUM_OF_LANES_32);
break;
}
DBI_WR4(sc, DW_GEN2_CTRL, reg);
reg = DBI_RD4(sc, DW_GEN2_CTRL);
reg |= DIRECT_SPEED_CHANGE;
DBI_WR4(sc, DW_GEN2_CTRL, reg);
return (0);
}
static int
pci_dw_decode_ranges(struct pci_dw_softc *sc, struct ofw_pci_range *ranges,
int nranges)
{
int i, nmem, rv;
nmem = 0;
for (i = 0; i < nranges; i++) {
if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
OFW_PCI_PHYS_HI_SPACE_MEM32)
++nmem;
}
sc->mem_ranges = malloc(nmem * sizeof(*sc->mem_ranges), M_DEVBUF,
M_WAITOK);
sc->num_mem_ranges = nmem;
nmem = 0;
for (i = 0; i < nranges; i++) {
if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
OFW_PCI_PHYS_HI_SPACE_IO) {
if (sc->io_range.size != 0) {
device_printf(sc->dev,
"Duplicated IO range found in DT\n");
rv = ENXIO;
goto out;
}
sc->io_range = ranges[i];
if (sc->io_range.size > UINT32_MAX) {
device_printf(sc->dev,
"ATU IO window size is too large. "
"Up to 4GB windows are supported, "
"trimming window size to 4GB\n");
sc->io_range.size = UINT32_MAX;
}
}
if ((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) ==
OFW_PCI_PHYS_HI_SPACE_MEM32) {
MPASS(nmem < sc->num_mem_ranges);
sc->mem_ranges[nmem] = ranges[i];
if (sc->mem_ranges[nmem].size > UINT32_MAX) {
device_printf(sc->dev,
"ATU MEM window size is too large. "
"Up to 4GB windows are supported, "
"trimming window size to 4GB\n");
sc->mem_ranges[nmem].size = UINT32_MAX;
}
++nmem;
}
}
MPASS(nmem == sc->num_mem_ranges);
if (nmem == 0) {
device_printf(sc->dev,
"Missing required memory range in DT\n");
return (ENXIO);
}
return (0);
out:
free(sc->mem_ranges, M_DEVBUF);
return (rv);
}
/*-----------------------------------------------------------------------------
*
* P C I B I N T E R F A C E
*/
static uint32_t
pci_dw_read_config(device_t dev, u_int bus, u_int slot,
u_int func, u_int reg, int bytes)
{
struct pci_dw_softc *sc;
struct resource *res;
uint32_t data;
uint64_t addr;
int type, rv;
sc = device_get_softc(dev);
if (!pci_dw_check_dev(sc, bus, slot, func, reg))
return (0xFFFFFFFFU);
if (bus == sc->root_bus) {
res = (sc->dbi_res);
} else {
addr = IATU_CFG_BUS(bus) | IATU_CFG_SLOT(slot) |
IATU_CFG_FUNC(func);
if (bus == sc->sub_bus)
type = IATU_CTRL1_TYPE_CFG0;
else
type = IATU_CTRL1_TYPE_CFG1;
rv = pci_dw_map_out_atu(sc, 0, type,
sc->cfg_pa, addr, sc->cfg_size);
if (rv != 0)
return (0xFFFFFFFFU);
res = sc->cfg_res;
}
switch (bytes) {
case 1:
data = bus_read_1(res, reg);
break;
case 2:
data = bus_read_2(res, reg);
break;
case 4:
data = bus_read_4(res, reg);
break;
default:
data = 0xFFFFFFFFU;
}
return (data);
}
static void
pci_dw_write_config(device_t dev, u_int bus, u_int slot,
u_int func, u_int reg, uint32_t val, int bytes)
{
struct pci_dw_softc *sc;
struct resource *res;
uint64_t addr;
int type, rv;
sc = device_get_softc(dev);
if (!pci_dw_check_dev(sc, bus, slot, func, reg))
return;
if (bus == sc->root_bus) {
res = (sc->dbi_res);
} else {
addr = IATU_CFG_BUS(bus) | IATU_CFG_SLOT(slot) |
IATU_CFG_FUNC(func);
if (bus == sc->sub_bus)
type = IATU_CTRL1_TYPE_CFG0;
else
type = IATU_CTRL1_TYPE_CFG1;
rv = pci_dw_map_out_atu(sc, 0, type,
sc->cfg_pa, addr, sc->cfg_size);
if (rv != 0)
return ;
res = sc->cfg_res;
}
switch (bytes) {
case 1:
bus_write_1(res, reg, val);
break;
case 2:
bus_write_2(res, reg, val);
break;
case 4:
bus_write_4(res, reg, val);
break;
default:
break;
}
}
static int
pci_dw_alloc_msi(device_t pci, device_t child, int count,
int maxcount, int *irqs)
{
phandle_t msi_parent;
int rv;
rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
&msi_parent, NULL);
if (rv != 0)
return (rv);
return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
irqs));
}
static int
pci_dw_release_msi(device_t pci, device_t child, int count, int *irqs)
{
phandle_t msi_parent;
int rv;
rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
&msi_parent, NULL);
if (rv != 0)
return (rv);
return (intr_release_msi(pci, child, msi_parent, count, irqs));
}
static int
pci_dw_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
uint32_t *data)
{
phandle_t msi_parent;
int rv;
rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
&msi_parent, NULL);
if (rv != 0)
return (rv);
return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
}
static int
pci_dw_alloc_msix(device_t pci, device_t child, int *irq)
{
phandle_t msi_parent;
int rv;
rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
&msi_parent, NULL);
if (rv != 0)
return (rv);
return (intr_alloc_msix(pci, child, msi_parent, irq));
}
static int
pci_dw_release_msix(device_t pci, device_t child, int irq)
{
phandle_t msi_parent;
int rv;
rv = ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child),
&msi_parent, NULL);
if (rv != 0)
return (rv);
return (intr_release_msix(pci, child, msi_parent, irq));
}
static int
pci_dw_get_id(device_t pci, device_t child, enum pci_id_type type,
uintptr_t *id)
{
phandle_t node;
int rv;
uint32_t rid;
uint16_t pci_rid;
if (type != PCI_ID_MSI)
return (pcib_get_id(pci, child, type, id));
node = ofw_bus_get_node(pci);
pci_rid = pci_get_rid(child);
rv = ofw_bus_msimap(node, pci_rid, NULL, &rid);
if (rv != 0)
return (rv);
*id = rid;
return (0);
}
/*-----------------------------------------------------------------------------
*
* B U S / D E V I C E I N T E R F A C E
*/
static bus_dma_tag_t
pci_dw_get_dma_tag(device_t dev, device_t child)
{
struct pci_dw_softc *sc;
sc = device_get_softc(dev);
return (sc->dmat);
}
int
pci_dw_init(device_t dev)
{
struct pci_dw_softc *sc;
int rv, rid;
bool unroll_mode;
sc = device_get_softc(dev);
sc->dev = dev;
sc->node = ofw_bus_get_node(dev);
mtx_init(&sc->mtx, "pci_dw_mtx", NULL, MTX_DEF);
/* XXXn Should not be this configurable ? */
sc->bus_start = 0;
sc->bus_end = 255;
sc->root_bus = 0;
sc->sub_bus = 1;
/* Read FDT properties */
if (!sc->coherent)
sc->coherent = OF_hasprop(sc->node, "dma-coherent");
rv = OF_getencprop(sc->node, "num-lanes", &sc->num_lanes,
sizeof(sc->num_lanes));
if (rv != sizeof(sc->num_lanes))
sc->num_lanes = 1;
if (sc->num_lanes != 1 && sc->num_lanes != 2 &&
sc->num_lanes != 4 && sc->num_lanes != 8) {
device_printf(dev,
"invalid number of lanes: %d\n",sc->num_lanes);
sc->num_lanes = 0;
rv = ENXIO;
goto out;
}
rid = 0;
rv = ofw_bus_find_string_index(sc->node, "reg-names", "config", &rid);
if (rv != 0) {
device_printf(dev, "Cannot get config space memory\n");
rv = ENXIO;
goto out;
}
sc->cfg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->cfg_res == NULL) {
device_printf(dev, "Cannot allocate config space(rid: %d)\n",
rid);
rv = ENXIO;
goto out;
}
/* Fill up config region related variables */
sc->cfg_size = rman_get_size(sc->cfg_res);
sc->cfg_pa = rman_get_start(sc->cfg_res) ;
if (bootverbose)
device_printf(dev, "Bus is%s cache-coherent\n",
sc->coherent ? "" : " not");
rv = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE, /* maxsegsize */
sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->dmat);
if (rv != 0)
goto out;
rv = ofw_pcib_init(dev);
if (rv != 0)
goto out;
rv = pci_dw_decode_ranges(sc, sc->ofw_pci.sc_range,
sc->ofw_pci.sc_nrange);
if (rv != 0)
goto out;
unroll_mode = pci_dw_detect_atu_unroll(sc);
if (bootverbose)
device_printf(dev, "Using iATU %s mode\n",
unroll_mode ? "unroll" : "legacy");
if (unroll_mode) {
rid = 0;
rv = ofw_bus_find_string_index(sc->node, "reg-names", "atu", &rid);
if (rv == 0) {
sc->iatu_ur_res = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->iatu_ur_res == NULL) {
device_printf(dev,
"Cannot allocate iATU space (rid: %d)\n",
rid);
rv = ENXIO;
goto out;
}
sc->iatu_ur_offset = 0;
sc->iatu_ur_size = rman_get_size(sc->iatu_ur_res);
} else if (rv == ENOENT) {
sc->iatu_ur_res = sc->dbi_res;
sc->iatu_ur_offset = DW_DEFAULT_IATU_UR_DBI_OFFSET;
sc->iatu_ur_size = DW_DEFAULT_IATU_UR_DBI_SIZE;
} else {
device_printf(dev, "Cannot get iATU space memory\n");
rv = ENXIO;
goto out;
}
}
rv = pci_dw_detect_out_atu_regions(sc);
if (rv != 0)
goto out;
if (bootverbose)
device_printf(sc->dev, "Detected outbound iATU regions: %d\n",
sc->num_out_regions);
rv = pci_dw_setup_hw(sc);
if (rv != 0)
goto out;
device_add_child(dev, "pci", DEVICE_UNIT_ANY);
return (0);
out:
/* XXX Cleanup */
return (rv);
}
static device_method_t pci_dw_methods[] = {
/* Bus interface */
DEVMETHOD(bus_get_dma_tag, pci_dw_get_dma_tag),
/* pcib interface */
DEVMETHOD(pcib_read_config, pci_dw_read_config),
DEVMETHOD(pcib_write_config, pci_dw_write_config),
DEVMETHOD(pcib_alloc_msi, pci_dw_alloc_msi),
DEVMETHOD(pcib_release_msi, pci_dw_release_msi),
DEVMETHOD(pcib_alloc_msix, pci_dw_alloc_msix),
DEVMETHOD(pcib_release_msix, pci_dw_release_msix),
DEVMETHOD(pcib_map_msi, pci_dw_map_msi),
DEVMETHOD(pcib_get_id, pci_dw_get_id),
/* OFW bus interface */
DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
/* PCI DW interface */
DEVMETHOD(pci_dw_dbi_read, pci_dw_dbi_read),
DEVMETHOD(pci_dw_dbi_write, pci_dw_dbi_write),
DEVMETHOD_END
};
DEFINE_CLASS_1(pcib, pci_dw_driver, pci_dw_methods,
sizeof(struct pci_dw_softc), ofw_pcib_driver);
diff --git a/sys/dev/pci/pci_dw_mv.c b/sys/dev/pci/pci_dw_mv.c
index d7eada39a6d6..b67356fc0e1d 100644
--- a/sys/dev/pci/pci_dw_mv.c
+++ b/sys/dev/pci/pci_dw_mv.c
@@ -1,328 +1,327 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Michal Meloun
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
/* Armada 8k DesignWare PCIe driver */
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pcib_if.h"
#include "pci_dw_if.h"
#define MV_GLOBAL_CONTROL_REG 0x8000
#define PCIE_APP_LTSSM_EN (1 << 2)
#define MV_GLOBAL_STATUS_REG 0x8008
#define MV_STATUS_RDLH_LINK_UP (1 << 1)
#define MV_STATUS_PHY_LINK_UP (1 << 9)
#define MV_INT_CAUSE1 0x801C
#define MV_INT_MASK1 0x8020
#define INT_A_ASSERT_MASK (1 << 9)
#define INT_B_ASSERT_MASK (1 << 10)
#define INT_C_ASSERT_MASK (1 << 11)
#define INT_D_ASSERT_MASK (1 << 12)
#define MV_INT_CAUSE2 0x8024
#define MV_INT_MASK2 0x8028
#define MV_ERR_INT_CAUSE 0x802C
#define MV_ERR_INT_MASK 0x8030
#define MV_ARCACHE_TRC_REG 0x8050
#define MV_AWCACHE_TRC_REG 0x8054
#define MV_ARUSER_REG 0x805C
#define MV_AWUSER_REG 0x8060
#define MV_MAX_LANES 8
struct pci_mv_softc {
struct pci_dw_softc dw_sc;
device_t dev;
phandle_t node;
struct resource *irq_res;
void *intr_cookie;
phy_t phy[MV_MAX_LANES];
clk_t clk_core;
clk_t clk_reg;
};
/* Compatible devices. */
static struct ofw_compat_data compat_data[] = {
{"marvell,armada8k-pcie", 1},
{NULL, 0},
};
static int
pci_mv_phy_init(struct pci_mv_softc *sc)
{
int i, rv;
for (i = 0; i < MV_MAX_LANES; i++) {
rv = phy_get_by_ofw_idx(sc->dev, sc->node, i, &(sc->phy[i]));
if (rv != 0 && rv != ENOENT) {
device_printf(sc->dev, "Cannot get phy[%d]\n", i);
/* XXX revert when phy driver will be implemented */
#if 0
goto fail;
#else
continue;
#endif
}
if (sc->phy[i] == NULL)
continue;
rv = phy_enable(sc->phy[i]);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable phy[%d]\n", i);
goto fail;
}
}
return (0);
fail:
for (i = 0; i < MV_MAX_LANES; i++) {
if (sc->phy[i] == NULL)
continue;
phy_release(sc->phy[i]);
}
return (rv);
}
static void
pci_mv_init(struct pci_mv_softc *sc)
{
uint32_t reg;
/* Set device configuration to RC */
reg = pci_dw_dbi_rd4(sc->dev, MV_GLOBAL_CONTROL_REG);
reg &= ~0x000000F0;
reg |= 0x000000040;
pci_dw_dbi_wr4(sc->dev, MV_GLOBAL_CONTROL_REG, reg);
/* AxCache master transaction attribures */
pci_dw_dbi_wr4(sc->dev, MV_ARCACHE_TRC_REG, 0x3511);
pci_dw_dbi_wr4(sc->dev, MV_AWCACHE_TRC_REG, 0x5311);
/* AxDomain master transaction attribures */
pci_dw_dbi_wr4(sc->dev, MV_ARUSER_REG, 0x0002);
pci_dw_dbi_wr4(sc->dev, MV_AWUSER_REG, 0x0002);
/* Enable all INTx interrupt (virtuual) pins */
reg = pci_dw_dbi_rd4(sc->dev, MV_INT_MASK1);
reg |= INT_A_ASSERT_MASK | INT_B_ASSERT_MASK |
INT_C_ASSERT_MASK | INT_D_ASSERT_MASK;
pci_dw_dbi_wr4(sc->dev, MV_INT_MASK1, reg);
/* Enable local interrupts */
pci_dw_dbi_wr4(sc->dev, DW_MSI_INTR0_MASK, 0xFFFFFFFF);
pci_dw_dbi_wr4(sc->dev, MV_INT_MASK1, 0x0001FE00);
pci_dw_dbi_wr4(sc->dev, MV_INT_MASK2, 0x00000000);
pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, 0xFFFFFFFF);
pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, 0xFFFFFFFF);
/* Errors have own interrupt, not yet populated in DTt */
pci_dw_dbi_wr4(sc->dev, MV_ERR_INT_MASK, 0);
}
static int pci_mv_intr(void *arg)
{
struct pci_mv_softc *sc = arg;
uint32_t cause1, cause2;
/* Ack all interrups */
cause1 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE1);
cause2 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE2);
pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, cause1);
pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, cause2);
return (FILTER_HANDLED);
}
static int
pci_mv_get_link(device_t dev, bool *status)
{
uint32_t reg;
reg = pci_dw_dbi_rd4(dev, MV_GLOBAL_STATUS_REG);
if ((reg & (MV_STATUS_RDLH_LINK_UP | MV_STATUS_PHY_LINK_UP)) ==
(MV_STATUS_RDLH_LINK_UP | MV_STATUS_PHY_LINK_UP))
*status = true;
else
*status = false;
return (0);
}
static int
pci_mv_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
return (ENXIO);
device_set_desc(dev, "Marvell Armada8K PCI-E Controller");
return (BUS_PROBE_DEFAULT);
}
static int
pci_mv_attach(device_t dev)
{
struct resource_map_request req;
struct resource_map map;
struct pci_mv_softc *sc;
phandle_t node;
int rv;
int rid;
sc = device_get_softc(dev);
node = ofw_bus_get_node(dev);
sc->dev = dev;
sc->node = node;
rid = 0;
sc->dw_sc.dbi_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE | RF_UNMAPPED);
if (sc->dw_sc.dbi_res == NULL) {
device_printf(dev, "Cannot allocate DBI memory\n");
rv = ENXIO;
goto out;
}
resource_init_map_request(&req);
req.memattr = VM_MEMATTR_DEVICE_NP;
rv = bus_map_resource(dev, SYS_RES_MEMORY, sc->dw_sc.dbi_res, &req,
&map);
if (rv != 0) {
device_printf(dev, "could not map memory.\n");
return (rv);
}
rman_set_mapping(sc->dw_sc.dbi_res, &map);
/* PCI interrupt */
rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq_res == NULL) {
device_printf(dev, "Cannot allocate IRQ resources\n");
rv = ENXIO;
goto out;
}
/* Clocks */
rv = clk_get_by_ofw_name(sc->dev, 0, "core", &sc->clk_core);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'core' clock\n");
rv = ENXIO;
goto out;
}
rv = clk_get_by_ofw_name(sc->dev, 0, "reg", &sc->clk_reg);
if (rv != 0) {
device_printf(sc->dev, "Cannot get 'reg' clock\n");
rv = ENXIO;
goto out;
}
rv = clk_enable(sc->clk_core);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable 'core' clock\n");
rv = ENXIO;
goto out;
}
rv = clk_enable(sc->clk_reg);
if (rv != 0) {
device_printf(sc->dev, "Cannot enable 'reg' clock\n");
rv = ENXIO;
goto out;
}
rv = pci_mv_phy_init(sc);
if (rv)
goto out;
rv = pci_dw_init(dev);
if (rv != 0)
goto out;
pci_mv_init(sc);
/* Setup interrupt */
if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
pci_mv_intr, NULL, sc, &sc->intr_cookie)) {
device_printf(dev, "cannot setup interrupt handler\n");
rv = ENXIO;
goto out;
}
bus_attach_children(dev);
return (0);
out:
/* XXX Cleanup */
return (rv);
}
static device_method_t pci_mv_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, pci_mv_probe),
DEVMETHOD(device_attach, pci_mv_attach),
DEVMETHOD(pci_dw_get_link, pci_mv_get_link),
DEVMETHOD_END
};
DEFINE_CLASS_1(pcib, pci_mv_driver, pci_mv_methods,
sizeof(struct pci_mv_softc), pci_dw_driver);
DRIVER_MODULE( pci_mv, simplebus, pci_mv_driver, NULL, NULL);