diff --git a/sys/arm/annapurna/alpine/alpine_machdep_mp.c b/sys/arm/annapurna/alpine/alpine_machdep_mp.c
index 7e6ea3479570..2a341540104d 100644
--- a/sys/arm/annapurna/alpine/alpine_machdep_mp.c
+++ b/sys/arm/annapurna/alpine/alpine_machdep_mp.c
@@ -1,252 +1,252 @@
/*-
* Copyright (c) 2013 Ruslan Bukin
* Copyright (c) 2015 Semihalf
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define AL_CPU_RESUME_WATERMARK_REG 0x00
#define AL_CPU_RESUME_FLAGS_REG 0x04
#define AL_CPU_RESUME_PCPU_RADDR_REG(cpu) (0x08 + 0x04 + 8*(cpu))
#define AL_CPU_RESUME_PCPU_FLAGS(cpu) (0x08 + 8*(cpu))
/* Per-CPU flags */
#define AL_CPU_RESUME_FLG_PERCPU_DONT_RESUME (1 << 2)
/* The expected magic number for validating the resume addresses */
#define AL_CPU_RESUME_MAGIC_NUM 0xf0e1d200
#define AL_CPU_RESUME_MAGIC_NUM_MASK 0xffffff00
/* The expected minimal version number for validating the capabilities */
#define AL_CPU_RESUME_MIN_VER 0x000000c3
#define AL_CPU_RESUME_MIN_VER_MASK 0x000000ff
/* Field controlling the boot-up of companion cores */
#define AL_NB_INIT_CONTROL (0x8)
#define AL_NB_CONFIG_STATUS_PWR_CTRL(cpu) (0x2020 + (cpu)*0x100)
extern bus_addr_t al_devmap_pa;
extern bus_addr_t al_devmap_size;
extern void mpentry(void);
static int platform_mp_get_core_cnt(void);
static int alpine_get_cpu_resume_base(u_long *pbase, u_long *psize);
static int alpine_get_nb_base(u_long *pbase, u_long *psize);
-static boolean_t alpine_validate_cpu(u_int, phandle_t, u_int, pcell_t *);
+static bool alpine_validate_cpu(u_int, phandle_t, u_int, pcell_t *);
-static boolean_t
+static bool
alpine_validate_cpu(u_int id, phandle_t child, u_int addr_cell, pcell_t *reg)
{
return ofw_bus_node_is_compatible(child, "arm,cortex-a15");
}
static int
platform_mp_get_core_cnt(void)
{
static int ncores = 0;
int nchilds;
uint32_t reg;
/* Calculate ncores value only once */
if (ncores)
return (ncores);
reg = cp15_l2ctlr_get();
ncores = CPUV7_L2CTLR_NPROC(reg);
nchilds = ofw_cpu_early_foreach(alpine_validate_cpu, false);
/* Limit CPUs if DTS has configured less than available */
if ((nchilds > 0) && (nchilds < ncores)) {
printf("SMP: limiting number of active CPUs to %d out of %d\n",
nchilds, ncores);
ncores = nchilds;
}
return (ncores);
}
void
alpine_mp_setmaxid(platform_t plat)
{
mp_ncpus = platform_mp_get_core_cnt();
mp_maxid = mp_ncpus - 1;
}
static int
alpine_get_cpu_resume_base(u_long *pbase, u_long *psize)
{
phandle_t node;
u_long base = 0;
u_long size = 0;
if (pbase == NULL || psize == NULL)
return (EINVAL);
if ((node = OF_finddevice("/")) == -1)
return (EFAULT);
if ((node =
ofw_bus_find_compatible(node, "annapurna-labs,al-cpu-resume")) == 0)
return (EFAULT);
if (fdt_regsize(node, &base, &size))
return (EFAULT);
*pbase = base;
*psize = size;
return (0);
}
static int
alpine_get_nb_base(u_long *pbase, u_long *psize)
{
phandle_t node;
u_long base = 0;
u_long size = 0;
if (pbase == NULL || psize == NULL)
return (EINVAL);
if ((node = OF_finddevice("/")) == -1)
return (EFAULT);
if ((node =
ofw_bus_find_compatible(node, "annapurna-labs,al-nb-service")) == 0)
return (EFAULT);
if (fdt_regsize(node, &base, &size))
return (EFAULT);
*pbase = base;
*psize = size;
return (0);
}
void
alpine_mp_start_ap(platform_t plat)
{
uint32_t physaddr;
vm_offset_t vaddr;
uint32_t val;
uint32_t start_mask;
u_long cpu_resume_base;
u_long nb_base;
u_long cpu_resume_size;
u_long nb_size;
bus_addr_t cpu_resume_baddr;
bus_addr_t nb_baddr;
int a;
if (alpine_get_cpu_resume_base(&cpu_resume_base, &cpu_resume_size))
panic("Couldn't resolve cpu_resume_base address\n");
if (alpine_get_nb_base(&nb_base, &nb_size))
panic("Couldn't resolve_nb_base address\n");
/* Proceed with start addresses for additional CPUs */
if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + cpu_resume_base,
cpu_resume_size, 0, &cpu_resume_baddr))
panic("Couldn't map CPU-resume area");
if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base,
nb_size, 0, &nb_baddr))
panic("Couldn't map NB-service area");
/* Proceed with start addresses for additional CPUs */
val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr,
AL_CPU_RESUME_WATERMARK_REG);
if (((val & AL_CPU_RESUME_MAGIC_NUM_MASK) != AL_CPU_RESUME_MAGIC_NUM) ||
((val & AL_CPU_RESUME_MIN_VER_MASK) < AL_CPU_RESUME_MIN_VER)) {
panic("CPU-resume device is not compatible");
}
vaddr = (vm_offset_t)mpentry;
physaddr = pmap_kextract(vaddr);
for (a = 1; a < platform_mp_get_core_cnt(); a++) {
/* Power up the core */
bus_space_write_4(fdtbus_bs_tag, nb_baddr,
AL_NB_CONFIG_STATUS_PWR_CTRL(a), 0);
mb();
/* Enable resume */
val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr,
AL_CPU_RESUME_PCPU_FLAGS(a));
val &= ~AL_CPU_RESUME_FLG_PERCPU_DONT_RESUME;
bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr,
AL_CPU_RESUME_PCPU_FLAGS(a), val);
mb();
/* Set resume physical address */
bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr,
AL_CPU_RESUME_PCPU_RADDR_REG(a), physaddr);
mb();
}
/* Release cores from reset */
if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base,
nb_size, 0, &nb_baddr))
panic("Couldn't map NB-service area");
start_mask = (1 << platform_mp_get_core_cnt()) - 1;
/* Release cores from reset */
val = bus_space_read_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL);
val |= start_mask;
bus_space_write_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL, val);
dsb();
bus_space_unmap(fdtbus_bs_tag, nb_baddr, nb_size);
bus_space_unmap(fdtbus_bs_tag, cpu_resume_baddr, cpu_resume_size);
}
diff --git a/sys/arm/qemu/virt_mp.c b/sys/arm/qemu/virt_mp.c
index 12a957caa897..9b7c1c0bd23d 100644
--- a/sys/arm/qemu/virt_mp.c
+++ b/sys/arm/qemu/virt_mp.c
@@ -1,74 +1,74 @@
/*-
* Copyright (c) 2015 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
static int running_cpus;
-static boolean_t
+static bool
virt_start_ap(u_int id, phandle_t node, u_int addr_cells, pcell_t *reg)
{
int err;
if (running_cpus >= mp_ncpus)
return (false);
running_cpus++;
err = psci_cpu_on(*reg, pmap_kextract((vm_offset_t)mpentry), id);
if (err != PSCI_RETVAL_SUCCESS)
return (false);
return (true);
}
void
virt_mp_start_ap(platform_t plat)
{
ofw_cpu_early_foreach(virt_start_ap, true);
}
diff --git a/sys/arm/qualcomm/ipq4018_mp.c b/sys/arm/qualcomm/ipq4018_mp.c
index a7ebb7d7d6c8..f8de5d6a9ccd 100644
--- a/sys/arm/qualcomm/ipq4018_mp.c
+++ b/sys/arm/qualcomm/ipq4018_mp.c
@@ -1,116 +1,116 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2021 Adrian Chadd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
void
ipq4018_mp_setmaxid(platform_t plat)
{
int ncpu;
/* If we've already set the global vars don't bother to do it again. */
if (mp_ncpus != 0)
return;
/* Read current CP15 Cache Size ID Register */
ncpu = cp15_l2ctlr_get();
ncpu = CPUV7_L2CTLR_NPROC(ncpu);
mp_ncpus = ncpu;
mp_maxid = ncpu - 1;
printf("SMP: ncpu=%d\n", ncpu);
}
-static boolean_t
+static bool
ipq4018_start_ap(u_int id, phandle_t node, u_int addr_cells, pcell_t *arg)
{
/*
* For the IPQ401x we assume the enable method is
* "qcom,kpss-acc-v2". If this path gets turned into
* something more generic for other 32 bit qualcomm
* SoCs then we'll likely want to turn this into a
* switch based on "enable-method".
*/
return qcom_cpu_kpssv2_regulator_start(id, node);
}
void
ipq4018_mp_start_ap(platform_t plat)
{
int ret;
/*
* First step - SCM call to set the cold boot address to mpentry, so
* CPUs hopefully start in the MP path.
*/
ret = qcom_scm_legacy_mp_set_cold_boot_address((vm_offset_t) mpentry);
if (ret != 0)
panic("%s: Couldn't set cold boot address via SCM "
"(error 0x%08x)", __func__, ret);
/*
* Next step - loop over the CPU nodes and do the per-CPU setup
* required to power on the CPUs themselves.
*/
ofw_cpu_early_foreach(ipq4018_start_ap, true);
/*
* The next set of IPIs to the CPUs will wake them up and enter
* mpentry.
*/
}
diff --git a/sys/arm/qualcomm/qcom_cpu_kpssv2.h b/sys/arm/qualcomm/qcom_cpu_kpssv2.h
index 18e4affc208e..d84158088120 100644
--- a/sys/arm/qualcomm/qcom_cpu_kpssv2.h
+++ b/sys/arm/qualcomm/qcom_cpu_kpssv2.h
@@ -1,35 +1,35 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2021 Adrian Chadd
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __QCOM_CPU_KPSSV2_H__
#define __QCOM_CPU_KPSSV2_H__
-extern boolean_t qcom_cpu_kpssv2_regulator_start(u_int id, phandle_t node);
+extern bool qcom_cpu_kpssv2_regulator_start(u_int id, phandle_t node);
#endif /* __QCOM_CPU_KPSSV2_H__ */
diff --git a/sys/arm/rockchip/rk32xx_mp.c b/sys/arm/rockchip/rk32xx_mp.c
index 089db173a0e9..fdb70cb0a6cd 100644
--- a/sys/arm/rockchip/rk32xx_mp.c
+++ b/sys/arm/rockchip/rk32xx_mp.c
@@ -1,174 +1,174 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2019 Michal Meloun
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define IMEM_PHYSBASE 0xFF700000
#define IMEM_SIZE 0x00018000
#define PMU_PHYSBASE 0xFF730000
#define PMU_SIZE 0x00010000
#define PMU_PWRDN_CON 0x08
static int running_cpus;
static uint32_t psci_mask, pmu_mask;
void
rk32xx_mp_setmaxid(platform_t plat)
{
int ncpu;
/* If we've already set the global vars don't bother to do it again. */
if (mp_ncpus != 0)
return;
/* Read current CP15 Cache Size ID Register */
ncpu = cp15_l2ctlr_get();
ncpu = CPUV7_L2CTLR_NPROC(ncpu);
mp_ncpus = ncpu;
mp_maxid = ncpu - 1;
}
static void
rk32xx_mp_start_pmu(uint32_t mask)
{
bus_space_handle_t imem;
bus_space_handle_t pmu;
uint32_t val;
int i, rv;
rv = bus_space_map(fdtbus_bs_tag, IMEM_PHYSBASE, IMEM_SIZE, 0, &imem);
if (rv != 0)
panic("Couldn't map the IMEM\n");
rv = bus_space_map(fdtbus_bs_tag, PMU_PHYSBASE, PMU_SIZE, 0, &pmu);
if (rv != 0)
panic("Couldn't map the PMU\n");
/* Power off all secondary cores first */
val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON);
for (i = 1; i < mp_ncpus; i++)
val |= 1 << i;
bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val);
DELAY(5000);
/* Power up all secondary cores */
val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON);
for (i = 1; i < mp_ncpus; i++)
val &= ~(1 << i);
bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val);
DELAY(5000);
/* Copy mpentry address then magic to sram */
val = pmap_kextract((vm_offset_t)mpentry);
bus_space_write_4(fdtbus_bs_tag, imem, 8, val);
dsb();
bus_space_write_4(fdtbus_bs_tag, imem, 4, 0xDEADBEAF);
dsb();
sev();
bus_space_unmap(fdtbus_bs_tag, imem, IMEM_SIZE);
bus_space_unmap(fdtbus_bs_tag, pmu, PMU_SIZE);
}
-static boolean_t
+static bool
rk32xx_start_ap(u_int id, phandle_t node, u_int addr_cells, pcell_t *reg)
{
int rv;
char method[16];
uint32_t mask;
if (!ofw_bus_node_status_okay(node))
return(false);
/* Skip boot CPU. */
if (id == 0)
return (true);
if (running_cpus >= mp_ncpus)
return (false);
running_cpus++;
mask = 1 << (*reg & 0x0f);
#ifdef INVARIANTS
if ((mask & pmu_mask) || (mask & psci_mask))
printf("CPU: Duplicated register value: 0x%X for CPU(%d)\n",
*reg, id);
#endif
rv = OF_getprop(node, "enable-method", method, sizeof(method));
if (rv > 0 && strcmp(method, "psci") == 0) {
psci_mask |= mask;
rv = psci_cpu_on(*reg, pmap_kextract((vm_offset_t)mpentry), id);
if (rv != PSCI_RETVAL_SUCCESS) {
printf("Failed to start CPU(%d)\n", id);
return (false);
}
return (true);
}
pmu_mask |= mask;
return (true);
}
void
rk32xx_mp_start_ap(platform_t plat)
{
ofw_cpu_early_foreach(rk32xx_start_ap, true);
if (pmu_mask != 0 && psci_mask != 0) {
printf("Inconsistent CPUs startup methods detected.\n");
printf("Only PSCI enabled cores will be started.\n");
return;
}
if (pmu_mask != 0)
rk32xx_mp_start_pmu(pmu_mask);
}
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
index 5e4c035586a5..9a4997358f8a 100644
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -1,1034 +1,1034 @@
/*-
* Copyright (c) 2015-2016 The FreeBSD Foundation
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_acpi.h"
#include "opt_ddb.h"
#include "opt_kstack_pages.h"
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef VFP
#include
#endif
#ifdef DEV_ACPI
#include
#include
#endif
#ifdef FDT
#include
#include
#include
#include
#endif
#include
#include "pic_if.h"
#define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE)
#define MP_QUIRK_CPULIST 0x01 /* The list of cpus may be wrong, */
/* don't panic if one fails to start */
static uint32_t mp_quirks;
#ifdef FDT
static struct {
const char *compat;
uint32_t quirks;
} fdt_quirks[] = {
{ "arm,foundation-aarch64", MP_QUIRK_CPULIST },
{ "arm,fvp-base", MP_QUIRK_CPULIST },
/* This is incorrect in some DTS files */
{ "arm,vfp-base", MP_QUIRK_CPULIST },
{ NULL, 0 },
};
#endif
typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
typedef void intr_ipi_handler_t(void *);
#define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
struct intr_ipi {
intr_ipi_handler_t * ii_handler;
void * ii_handler_arg;
intr_ipi_send_t * ii_send;
void * ii_send_arg;
char ii_name[INTR_IPI_NAMELEN];
u_long * ii_count;
};
static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
static struct intr_ipi *intr_ipi_lookup(u_int);
static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
void *);
static void ipi_ast(void *);
static void ipi_hardclock(void *);
static void ipi_preempt(void *);
static void ipi_rendezvous(void *);
static void ipi_stop(void *);
struct pcb stoppcbs[MAXCPU];
#ifdef FDT
static u_int fdt_cpuid;
#endif
void mpentry(unsigned long cpuid);
void init_secondary(uint64_t);
/* Synchronize AP startup. */
static struct mtx ap_boot_mtx;
/* Stacks for AP initialization, discarded once idle threads are started. */
void *bootstack;
static void *bootstacks[MAXCPU];
/* Count of started APs, used to synchronize access to bootstack. */
static volatile int aps_started;
/* Set to 1 once we're ready to let the APs out of the pen. */
static volatile int aps_ready;
/* Temporary variables for init_secondary() */
void *dpcpu[MAXCPU - 1];
static bool
is_boot_cpu(uint64_t target_cpu)
{
return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
}
static void
release_aps(void *dummy __unused)
{
int i, started;
/* Only release CPUs if they exist */
if (mp_ncpus == 1)
return;
intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
atomic_store_rel_int(&aps_ready, 1);
/* Wake up the other CPUs */
__asm __volatile(
"dsb ishst \n"
"sev \n"
::: "memory");
printf("Release APs...");
started = 0;
for (i = 0; i < 2000; i++) {
if (atomic_load_acq_int(&smp_started) != 0) {
printf("done\n");
return;
}
/*
* Don't time out while we are making progress. Some large
* systems can take a while to start all CPUs.
*/
if (smp_cpus > started) {
i = 0;
started = smp_cpus;
}
DELAY(1000);
}
printf("APs not started\n");
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
void
init_secondary(uint64_t cpu)
{
struct pcpu *pcpup;
pmap_t pmap0;
uint64_t mpidr;
ptrauth_mp_start(cpu);
/*
* Verify that the value passed in 'cpu' argument (aka context_id) is
* valid. Some older U-Boot based PSCI implementations are buggy,
* they can pass random value in it.
*/
mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) {
for (cpu = 0; cpu < mp_maxid; cpu++)
if (cpuid_to_pcpu[cpu] != NULL &&
PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr)
break;
if ( cpu >= MAXCPU)
panic("MPIDR for this CPU is not in pcpu table");
}
pcpup = cpuid_to_pcpu[cpu];
/*
* Set the pcpu pointer with a backup in tpidr_el1 to be
* loaded when entering the kernel from userland.
*/
__asm __volatile(
"mov x18, %0 \n"
"msr tpidr_el1, %0" :: "r"(pcpup));
/*
* Identify current CPU. This is necessary to setup
* affinity registers and to provide support for
* runtime chip identification.
*
* We need this before signalling the CPU is ready to
* let the boot CPU use the results.
*/
pcpup->pc_midr = get_midr();
identify_cpu(cpu);
/* Ensure the stores in identify_cpu have completed */
atomic_thread_fence_acq_rel();
/* Signal the BSP and spin until it has released all APs. */
atomic_add_int(&aps_started, 1);
while (!atomic_load_int(&aps_ready))
__asm __volatile("wfe");
/* Initialize curthread */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
pcpup->pc_curthread = pcpup->pc_idlethread;
schedinit_ap();
/* Initialize curpmap to match TTBR0's current setting. */
pmap0 = vmspace_pmap(&vmspace0);
KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
("pmap0 doesn't match cpu %ld's ttbr0", cpu));
pcpup->pc_curpmap = pmap0;
install_cpu_errata();
intr_pic_init_secondary();
/* Start per-CPU event timers. */
cpu_initclocks_ap();
#ifdef VFP
vfp_init();
#endif
dbg_init();
pan_enable();
mtx_lock_spin(&ap_boot_mtx);
atomic_add_rel_32(&smp_cpus, 1);
if (smp_cpus == mp_ncpus) {
/* enable IPI's, tlb shootdown, freezes etc */
atomic_store_rel_int(&smp_started, 1);
}
mtx_unlock_spin(&ap_boot_mtx);
kcsan_cpu_init(cpu);
/* Enter the scheduler */
sched_ap_entry();
panic("scheduler returned us to init_secondary");
/* NOTREACHED */
}
static void
smp_after_idle_runnable(void *arg __unused)
{
int cpu;
if (mp_ncpus == 1)
return;
KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
/*
* Wait for all APs to handle an interrupt. After that, we know that
* the APs have entered the scheduler at least once, so the boot stacks
* are safe to free.
*/
smp_rendezvous(smp_no_rendezvous_barrier, NULL,
smp_no_rendezvous_barrier, NULL);
for (cpu = 1; cpu < mp_ncpus; cpu++) {
if (bootstacks[cpu] != NULL)
kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
smp_after_idle_runnable, NULL);
/*
* Send IPI thru interrupt controller.
*/
static void
pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
{
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
/*
* Ensure that this CPU's stores will be visible to IPI
* recipients before starting to send the interrupts.
*/
dsb(ishst);
PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
}
/*
* Setup IPI handler on interrupt controller.
*
* Not SMP coherent.
*/
static void
intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
void *arg)
{
struct intr_irqsrc *isrc;
struct intr_ipi *ii;
int error;
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
if (error != 0)
return;
isrc->isrc_handlers++;
ii = intr_ipi_lookup(ipi);
KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
ii->ii_handler = hand;
ii->ii_handler_arg = arg;
ii->ii_send = pic_ipi_send;
ii->ii_send_arg = isrc;
strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
ii->ii_count = intr_ipi_setup_counters(name);
PIC_ENABLE_INTR(intr_irq_root_dev, isrc);
}
static void
intr_ipi_send(cpuset_t cpus, u_int ipi)
{
struct intr_ipi *ii;
ii = intr_ipi_lookup(ipi);
if (ii->ii_count == NULL)
panic("%s: not setup IPI %u", __func__, ipi);
ii->ii_send(ii->ii_send_arg, cpus, ipi);
}
static void
ipi_ast(void *dummy __unused)
{
CTR0(KTR_SMP, "IPI_AST");
}
static void
ipi_hardclock(void *dummy __unused)
{
CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
hardclockintr();
}
static void
ipi_preempt(void *dummy __unused)
{
CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
sched_preempt(curthread);
}
static void
ipi_rendezvous(void *dummy __unused)
{
CTR0(KTR_SMP, "IPI_RENDEZVOUS");
smp_rendezvous_action();
}
static void
ipi_stop(void *dummy __unused)
{
u_int cpu;
CTR0(KTR_SMP, "IPI_STOP");
cpu = PCPU_GET(cpuid);
savectx(&stoppcbs[cpu]);
/* Indicate we are stopped */
CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
while (!CPU_ISSET(cpu, &started_cpus))
cpu_spinwait();
#ifdef DDB
dbg_register_sync(NULL);
#endif
CPU_CLR_ATOMIC(cpu, &started_cpus);
CPU_CLR_ATOMIC(cpu, &stopped_cpus);
CTR0(KTR_SMP, "IPI_STOP (restart)");
}
struct cpu_group *
cpu_topo(void)
{
struct cpu_group *dom, *root;
int i;
root = smp_topo_alloc(1);
dom = smp_topo_alloc(vm_ndomains);
root->cg_parent = NULL;
root->cg_child = dom;
CPU_COPY(&all_cpus, &root->cg_mask);
root->cg_count = mp_ncpus;
root->cg_children = vm_ndomains;
root->cg_level = CG_SHARE_NONE;
root->cg_flags = 0;
/*
* Redundant layers will be collapsed by the caller so we don't need a
* special case for a single domain.
*/
for (i = 0; i < vm_ndomains; i++, dom++) {
dom->cg_parent = root;
dom->cg_child = NULL;
CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
dom->cg_count = CPU_COUNT(&dom->cg_mask);
dom->cg_children = 0;
dom->cg_level = CG_SHARE_L3;
dom->cg_flags = 0;
}
return (root);
}
/* Determine if we running MP machine */
int
cpu_mp_probe(void)
{
/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
return (1);
}
static int
enable_cpu_psci(uint64_t target_cpu, vm_paddr_t entry, u_int cpuid)
{
int err;
err = psci_cpu_on(target_cpu, entry, cpuid);
if (err != PSCI_RETVAL_SUCCESS) {
/*
* Panic here if INVARIANTS are enabled and PSCI failed to
* start the requested CPU. psci_cpu_on() returns PSCI_MISSING
* to indicate we are unable to use it to start the given CPU.
*/
KASSERT(err == PSCI_MISSING ||
(mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
("Failed to start CPU %u (%lx), error %d\n",
cpuid, target_cpu, err));
return (EINVAL);
}
return (0);
}
static int
enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
{
vm_paddr_t *release_addr;
release_addr = pmap_mapdev(release_paddr, sizeof(*release_addr));
if (release_addr == NULL)
return (ENOMEM);
*release_addr = entry;
pmap_unmapdev(release_addr, sizeof(*release_addr));
__asm __volatile(
"dsb sy \n"
"sev \n"
::: "memory");
return (0);
}
/*
* Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
* do nothing. Returns true if the CPU is present and running.
*/
static bool
start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
{
struct pcpu *pcpup;
vm_size_t size;
vm_paddr_t pa;
int err, naps;
/* Check we are able to start this cpu */
if (cpuid > mp_maxid)
return (false);
/* Skip boot CPU */
if (is_boot_cpu(target_cpu))
return (true);
KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
M_WAITOK | M_ZERO);
pmap_disable_promotion((vm_offset_t)pcpup, size);
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
dpcpu[cpuid - 1] = (void *)(pcpup + 1);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain),
MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
naps = atomic_load_int(&aps_started);
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
/*
* A limited set of hardware we support can only do spintables and
* remain useful, due to lack of EL3. Thus, we'll usually fall into the
* PSCI branch here.
*/
MPASS(release_addr == 0 || !psci_present);
if (release_addr != 0)
err = enable_cpu_spin(target_cpu, pa, release_addr);
else
err = enable_cpu_psci(target_cpu, pa, cpuid);
if (err != 0) {
pcpu_destroy(pcpup);
dpcpu[cpuid - 1] = NULL;
kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE);
kmem_free(pcpup, size);
bootstacks[cpuid] = NULL;
mp_ncpus--;
return (false);
}
/* Wait for the AP to switch to its boot stack. */
while (atomic_load_int(&aps_started) < naps + 1)
cpu_spinwait();
CPU_SET(cpuid, &all_cpus);
return (true);
}
#ifdef DEV_ACPI
static void
madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
{
ACPI_MADT_GENERIC_INTERRUPT *intr;
u_int *cpuid;
u_int id;
int domain;
switch(entry->Type) {
case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
cpuid = arg;
if (is_boot_cpu(intr->ArmMpidr))
id = 0;
else
id = *cpuid;
domain = 0;
#ifdef NUMA
if (vm_ndomains > 1)
domain = acpi_pxm_get_cpu_locality(intr->Uid);
#endif
if (start_cpu(id, intr->ArmMpidr, domain, 0)) {
MPASS(cpuid_to_pcpu[id] != NULL);
cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
/*
* Don't increment for the boot CPU, its CPU ID is
* reserved.
*/
if (!is_boot_cpu(intr->ArmMpidr))
(*cpuid)++;
}
break;
default:
break;
}
}
static void
cpu_init_acpi(void)
{
ACPI_TABLE_MADT *madt;
vm_paddr_t physaddr;
u_int cpuid;
physaddr = acpi_find_table(ACPI_SIG_MADT);
if (physaddr == 0)
return;
madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
if (madt == NULL) {
printf("Unable to map the MADT, not starting APs\n");
return;
}
/* Boot CPU is always 0 */
cpuid = 1;
acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
madt_handler, &cpuid);
acpi_unmap_table(madt);
#if MAXMEMDOM > 1
acpi_pxm_set_cpu_locality();
#endif
}
#endif
#ifdef FDT
/*
* Failure is indicated by failing to populate *release_addr.
*/
static void
populate_release_addr(phandle_t node, vm_paddr_t *release_addr)
{
pcell_t buf[2];
if (OF_getencprop(node, "cpu-release-addr", buf, sizeof(buf)) !=
sizeof(buf))
return;
*release_addr = (((uintptr_t)buf[0] << 32) | buf[1]);
}
-static boolean_t
+static bool
start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
{
uint64_t target_cpu;
vm_paddr_t release_addr;
char *enable_method;
int domain;
int cpuid;
target_cpu = reg[0];
if (addr_size == 2) {
target_cpu <<= 32;
target_cpu |= reg[1];
}
if (is_boot_cpu(target_cpu))
cpuid = 0;
else
cpuid = fdt_cpuid;
/*
* If PSCI is present, we'll always use that -- the cpu_on method is
* mandated in both v0.1 and v0.2. We'll check the enable-method if
* we don't have PSCI and use spin table if it's provided.
*/
release_addr = 0;
if (!psci_present && cpuid != 0) {
if (OF_getprop_alloc(node, "enable-method",
(void **)&enable_method) <= 0)
- return (FALSE);
+ return (false);
if (strcmp(enable_method, "spin-table") != 0) {
OF_prop_free(enable_method);
- return (FALSE);
+ return (false);
}
OF_prop_free(enable_method);
populate_release_addr(node, &release_addr);
if (release_addr == 0) {
printf("Failed to fetch release address for CPU %u",
cpuid);
- return (FALSE);
+ return (false);
}
}
if (!start_cpu(cpuid, target_cpu, 0, release_addr))
- return (FALSE);
+ return (false);
/*
* Don't increment for the boot CPU, its CPU ID is reserved.
*/
if (!is_boot_cpu(target_cpu))
fdt_cpuid++;
/* Try to read the numa node of this cpu */
if (vm_ndomains == 1 ||
OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
domain = 0;
cpuid_to_pcpu[cpuid]->pc_domain = domain;
if (domain < MAXMEMDOM)
CPU_SET(cpuid, &cpuset_domain[domain]);
- return (TRUE);
+ return (true);
}
static void
cpu_init_fdt(void)
{
phandle_t node;
int i;
node = OF_peer(0);
for (i = 0; fdt_quirks[i].compat != NULL; i++) {
if (ofw_bus_node_is_compatible(node,
fdt_quirks[i].compat) != 0) {
mp_quirks = fdt_quirks[i].quirks;
}
}
fdt_cpuid = 1;
ofw_cpu_early_foreach(start_cpu_fdt, true);
}
#endif
/* Initialize and fire up non-boot processors */
void
cpu_mp_start(void)
{
uint64_t mpidr;
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
/* CPU 0 is always boot CPU. */
CPU_SET(0, &all_cpus);
mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
cpuid_to_pcpu[0]->pc_mpidr = mpidr;
switch(arm64_bus_method) {
#ifdef DEV_ACPI
case ARM64_BUS_ACPI:
mp_quirks = MP_QUIRK_CPULIST;
cpu_init_acpi();
break;
#endif
#ifdef FDT
case ARM64_BUS_FDT:
cpu_init_fdt();
break;
#endif
default:
break;
}
}
/* Introduce rest of cores to the world */
void
cpu_mp_announce(void)
{
}
#ifdef DEV_ACPI
static void
cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
{
u_int *cores = arg;
switch(entry->Type) {
case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
(*cores)++;
break;
default:
break;
}
}
static u_int
cpu_count_acpi(void)
{
ACPI_TABLE_MADT *madt;
vm_paddr_t physaddr;
u_int cores;
physaddr = acpi_find_table(ACPI_SIG_MADT);
if (physaddr == 0)
return (0);
madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
if (madt == NULL) {
printf("Unable to map the MADT, not starting APs\n");
return (0);
}
cores = 0;
acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
cpu_count_acpi_handler, &cores);
acpi_unmap_table(madt);
return (cores);
}
#endif
void
cpu_mp_setmaxid(void)
{
int cores;
mp_ncpus = 1;
mp_maxid = 0;
switch(arm64_bus_method) {
#ifdef DEV_ACPI
case ARM64_BUS_ACPI:
cores = cpu_count_acpi();
if (cores > 0) {
cores = MIN(cores, MAXCPU);
if (bootverbose)
printf("Found %d CPUs in the ACPI tables\n",
cores);
mp_ncpus = cores;
mp_maxid = cores - 1;
}
break;
#endif
#ifdef FDT
case ARM64_BUS_FDT:
cores = ofw_cpu_early_foreach(NULL, false);
if (cores > 0) {
cores = MIN(cores, MAXCPU);
if (bootverbose)
printf("Found %d CPUs in the device tree\n",
cores);
mp_ncpus = cores;
mp_maxid = cores - 1;
}
break;
#endif
default:
if (bootverbose)
printf("No CPU data, limiting to 1 core\n");
break;
}
if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
if (cores > 0 && cores < mp_ncpus) {
mp_ncpus = cores;
mp_maxid = cores - 1;
}
}
}
/*
* Lookup IPI source.
*/
static struct intr_ipi *
intr_ipi_lookup(u_int ipi)
{
if (ipi >= INTR_IPI_COUNT)
panic("%s: no such IPI %u", __func__, ipi);
return (&ipi_sources[ipi]);
}
/*
* interrupt controller dispatch function for IPIs. It should
* be called straight from the interrupt controller, when associated
* interrupt source is learned. Or from anybody who has an interrupt
* source mapped.
*/
void
intr_ipi_dispatch(u_int ipi)
{
struct intr_ipi *ii;
ii = intr_ipi_lookup(ipi);
if (ii->ii_count == NULL)
panic("%s: not setup IPI %u", __func__, ipi);
intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
ii->ii_handler(ii->ii_handler_arg);
}
#ifdef notyet
/*
* Map IPI into interrupt controller.
*
* Not SMP coherent.
*/
static int
ipi_map(struct intr_irqsrc *isrc, u_int ipi)
{
boolean_t is_percpu;
int error;
if (ipi >= INTR_IPI_COUNT)
panic("%s: no such IPI %u", __func__, ipi);
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
isrc->isrc_type = INTR_ISRCT_NAMESPACE;
isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
isrc->isrc_nspc_num = ipi_next_num;
error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
if (error == 0) {
isrc->isrc_dev = intr_irq_root_dev;
ipi_next_num++;
}
return (error);
}
/*
* Setup IPI handler to interrupt source.
*
* Note that there could be more ways how to send and receive IPIs
* on a platform like fast interrupts for example. In that case,
* one can call this function with ASIF_NOALLOC flag set and then
* call intr_ipi_dispatch() when appropriate.
*
* Not SMP coherent.
*/
int
intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
void *arg, u_int flags)
{
struct intr_irqsrc *isrc;
int error;
if (filter == NULL)
return(EINVAL);
isrc = intr_ipi_lookup(ipi);
if (isrc->isrc_ipifilter != NULL)
return (EEXIST);
if ((flags & AISHF_NOALLOC) == 0) {
error = ipi_map(isrc, ipi);
if (error != 0)
return (error);
}
isrc->isrc_ipifilter = filter;
isrc->isrc_arg = arg;
isrc->isrc_handlers = 1;
isrc->isrc_count = intr_ipi_setup_counters(name);
isrc->isrc_index = 0; /* it should not be used in IPI case */
if (isrc->isrc_dev != NULL) {
PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
}
return (0);
}
#endif
/* Sending IPI */
void
ipi_all_but_self(u_int ipi)
{
cpuset_t cpus;
cpus = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
intr_ipi_send(cpus, ipi);
}
void
ipi_cpu(int cpu, u_int ipi)
{
cpuset_t cpus;
CPU_ZERO(&cpus);
CPU_SET(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
intr_ipi_send(cpus, ipi);
}
void
ipi_selected(cpuset_t cpus, u_int ipi)
{
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
intr_ipi_send(cpus, ipi);
}
diff --git a/sys/dev/fdt/fdt_arm_platform.c b/sys/dev/fdt/fdt_arm_platform.c
index bc761f6c231a..0b36a10138bf 100644
--- a/sys/dev/fdt/fdt_arm_platform.c
+++ b/sys/dev/fdt/fdt_arm_platform.c
@@ -1,107 +1,107 @@
/*-
* Copyright (c) 2013 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include "platform_if.h"
#define FDT_PLATFORM(plat) \
((fdt_platform_def_t *)(plat)->cls->baseclasses[0])
#if defined(SMP)
static platform_mp_setmaxid_t fdt_platform_mp_setmaxid;
#endif
static int
fdt_platform_probe(platform_t plat)
{
const char *compat;
phandle_t root;
/*
* TODO: Make these KASSERTs, we should only be here if we
* are using the FDT platform magic.
*/
if (plat->cls == NULL || FDT_PLATFORM(plat) == NULL)
return 1;
/* Is the device is compatible? */
root = OF_finddevice("/");
compat = FDT_PLATFORM(plat)->fdt_compatible;
if (ofw_bus_node_is_compatible(root, compat) != 0)
return 0;
/* Not compatible, return an error */
return 1;
}
#if defined(SMP)
-static boolean_t
+static bool
fdt_platform_maxid(u_int id, phandle_t node, u_int addr_cells, pcell_t *reg)
{
if (mp_maxid < id)
mp_maxid = id;
return (true);
}
static void
fdt_platform_mp_setmaxid(platform_t plat)
{
mp_maxid = PCPU_GET(cpuid);
mp_ncpus = ofw_cpu_early_foreach(fdt_platform_maxid, true);
if (mp_ncpus < 1)
mp_ncpus = 1;
mp_ncpus = MIN(mp_ncpus, MAXCPU);
}
#endif
platform_method_t fdt_platform_methods[] = {
PLATFORMMETHOD(platform_probe, fdt_platform_probe),
#if defined(SMP)
PLATFORMMETHOD(platform_mp_setmaxid, fdt_platform_mp_setmaxid),
#endif
PLATFORMMETHOD_END
};
diff --git a/sys/dev/ofw/ofw_cpu.h b/sys/dev/ofw/ofw_cpu.h
index d96991946237..1c6d7e766040 100644
--- a/sys/dev/ofw/ofw_cpu.h
+++ b/sys/dev/ofw/ofw_cpu.h
@@ -1,37 +1,37 @@
/*-
* Copyright (c) 2015 The FreeBSD Foundation
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _DEV_OFW_OFW_CPU_H_
#define _DEV_OFW_OFW_CPU_H_
-typedef boolean_t (*ofw_cpu_foreach_cb)(u_int, phandle_t, u_int, pcell_t *);
+typedef bool (*ofw_cpu_foreach_cb)(u_int, phandle_t, u_int, pcell_t *);
int ofw_cpu_early_foreach(ofw_cpu_foreach_cb, boolean_t);
#endif /* _DEV_OFW_OFW_CPU_H_ */
diff --git a/sys/riscv/riscv/mp_machdep.c b/sys/riscv/riscv/mp_machdep.c
index e7e862d3772b..799f07036c00 100644
--- a/sys/riscv/riscv/mp_machdep.c
+++ b/sys/riscv/riscv/mp_machdep.c
@@ -1,566 +1,566 @@
/*-
* Copyright (c) 2015 The FreeBSD Foundation
* Copyright (c) 2016 Ruslan Bukin
* All rights reserved.
*
* Portions of this software were developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Portions of this software were developed by SRI International and the
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
* FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by the University of Cambridge
* Computer Laboratory as part of the CTSRD Project, with support from the
* UK Higher Education Innovation Fund (HEIF).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "opt_kstack_pages.h"
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef FDT
#include
#include
#endif
#define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE)
uint32_t __riscv_boot_ap[MAXCPU];
static enum {
CPUS_UNKNOWN,
#ifdef FDT
CPUS_FDT,
#endif
} cpu_enum_method;
static device_identify_t riscv64_cpu_identify;
static device_probe_t riscv64_cpu_probe;
static device_attach_t riscv64_cpu_attach;
static int ipi_handler(void *);
struct pcb stoppcbs[MAXCPU];
extern uint32_t boot_hart;
extern cpuset_t all_harts;
#ifdef INVARIANTS
static uint32_t cpu_reg[MAXCPU][2];
#endif
static device_t cpu_list[MAXCPU];
void mpentry(u_long hartid);
void init_secondary(uint64_t);
static struct mtx ap_boot_mtx;
/* Stacks for AP initialization, discarded once idle threads are started. */
void *bootstack;
static void *bootstacks[MAXCPU];
/* Count of started APs, used to synchronize access to bootstack. */
static volatile int aps_started;
/* Set to 1 once we're ready to let the APs out of the pen. */
static volatile int aps_ready;
/* Temporary variables for init_secondary() */
void *dpcpu[MAXCPU - 1];
static device_method_t riscv64_cpu_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, riscv64_cpu_identify),
DEVMETHOD(device_probe, riscv64_cpu_probe),
DEVMETHOD(device_attach, riscv64_cpu_attach),
DEVMETHOD_END
};
static driver_t riscv64_cpu_driver = {
"riscv64_cpu",
riscv64_cpu_methods,
0
};
DRIVER_MODULE(riscv64_cpu, cpu, riscv64_cpu_driver, 0, 0);
static void
riscv64_cpu_identify(driver_t *driver, device_t parent)
{
if (device_find_child(parent, "riscv64_cpu", -1) != NULL)
return;
if (BUS_ADD_CHILD(parent, 0, "riscv64_cpu", -1) == NULL)
device_printf(parent, "add child failed\n");
}
static int
riscv64_cpu_probe(device_t dev)
{
u_int cpuid;
cpuid = device_get_unit(dev);
if (cpuid >= MAXCPU || cpuid > mp_maxid)
return (EINVAL);
device_quiet(dev);
return (0);
}
static int
riscv64_cpu_attach(device_t dev)
{
const uint32_t *reg;
size_t reg_size;
u_int cpuid;
int i;
cpuid = device_get_unit(dev);
if (cpuid >= MAXCPU || cpuid > mp_maxid)
return (EINVAL);
KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
reg = cpu_get_cpuid(dev, ®_size);
if (reg == NULL)
return (EINVAL);
if (bootverbose) {
device_printf(dev, "register <");
for (i = 0; i < reg_size; i++)
printf("%s%x", (i == 0) ? "" : " ", reg[i]);
printf(">\n");
}
/* Set the device to start it later */
cpu_list[cpuid] = dev;
return (0);
}
static void
release_aps(void *dummy __unused)
{
cpuset_t mask;
int i;
if (mp_ncpus == 1)
return;
/* Setup the IPI handler */
riscv_setup_ipihandler(ipi_handler);
atomic_store_rel_int(&aps_ready, 1);
/* Wake up the other CPUs */
mask = all_harts;
CPU_CLR(boot_hart, &mask);
printf("Release APs\n");
sbi_send_ipi(mask.__bits);
for (i = 0; i < 2000; i++) {
if (atomic_load_acq_int(&smp_started))
return;
DELAY(1000);
}
printf("APs not started\n");
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
void
init_secondary(uint64_t hart)
{
struct pcpu *pcpup;
u_int cpuid;
/* Renumber this cpu */
cpuid = hart;
if (cpuid < boot_hart)
cpuid += mp_maxid + 1;
cpuid -= boot_hart;
/* Setup the pcpu pointer */
pcpup = &__pcpu[cpuid];
__asm __volatile("mv tp, %0" :: "r"(pcpup));
/* Workaround: make sure wfi doesn't halt the hart */
csr_set(sie, SIE_SSIE);
csr_set(sip, SIE_SSIE);
/* Signal the BSP and spin until it has released all APs. */
atomic_add_int(&aps_started, 1);
while (!atomic_load_int(&aps_ready))
__asm __volatile("wfi");
/* Initialize curthread */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
pcpup->pc_curthread = pcpup->pc_idlethread;
schedinit_ap();
/*
* Identify current CPU. This is necessary to setup
* affinity registers and to provide support for
* runtime chip identification.
*/
identify_cpu();
/* Enable software interrupts */
riscv_unmask_ipi();
#ifndef EARLY_AP_STARTUP
/* Start per-CPU event timers. */
cpu_initclocks_ap();
#endif
/* Enable external (PLIC) interrupts */
csr_set(sie, SIE_SEIE);
/* Activate this hart in the kernel pmap. */
CPU_SET_ATOMIC(hart, &kernel_pmap->pm_active);
/* Activate process 0's pmap. */
pmap_activate_boot(vmspace_pmap(proc0.p_vmspace));
mtx_lock_spin(&ap_boot_mtx);
atomic_add_rel_32(&smp_cpus, 1);
if (smp_cpus == mp_ncpus) {
/* enable IPI's, tlb shootdown, freezes etc */
atomic_store_rel_int(&smp_started, 1);
}
mtx_unlock_spin(&ap_boot_mtx);
/* Enter the scheduler */
sched_ap_entry();
panic("scheduler returned us to init_secondary");
/* NOTREACHED */
}
static void
smp_after_idle_runnable(void *arg __unused)
{
int cpu;
if (mp_ncpus == 1)
return;
KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
/*
* Wait for all APs to handle an interrupt. After that, we know that
* the APs have entered the scheduler at least once, so the boot stacks
* are safe to free.
*/
smp_rendezvous(smp_no_rendezvous_barrier, NULL,
smp_no_rendezvous_barrier, NULL);
for (cpu = 1; cpu <= mp_maxid; cpu++) {
if (bootstacks[cpu] != NULL)
kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
smp_after_idle_runnable, NULL);
static int
ipi_handler(void *arg)
{
u_int ipi_bitmap;
u_int cpu, ipi;
int bit;
csr_clear(sip, SIP_SSIP);
cpu = PCPU_GET(cpuid);
mb();
ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
if (ipi_bitmap == 0)
return (FILTER_HANDLED);
while ((bit = ffs(ipi_bitmap))) {
bit = (bit - 1);
ipi = (1 << bit);
ipi_bitmap &= ~ipi;
mb();
switch (ipi) {
case IPI_AST:
CTR0(KTR_SMP, "IPI_AST");
break;
case IPI_PREEMPT:
CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
sched_preempt(curthread);
break;
case IPI_RENDEZVOUS:
CTR0(KTR_SMP, "IPI_RENDEZVOUS");
smp_rendezvous_action();
break;
case IPI_STOP:
case IPI_STOP_HARD:
CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD");
savectx(&stoppcbs[cpu]);
/* Indicate we are stopped */
CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
while (!CPU_ISSET(cpu, &started_cpus))
cpu_spinwait();
CPU_CLR_ATOMIC(cpu, &started_cpus);
CPU_CLR_ATOMIC(cpu, &stopped_cpus);
CTR0(KTR_SMP, "IPI_STOP (restart)");
/*
* The kernel debugger might have set a breakpoint,
* so flush the instruction cache.
*/
fence_i();
break;
case IPI_HARDCLOCK:
CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
hardclockintr();
break;
default:
panic("Unknown IPI %#0x on cpu %d", ipi, curcpu);
}
}
return (FILTER_HANDLED);
}
struct cpu_group *
cpu_topo(void)
{
return (smp_topo_none());
}
/* Determine if we running MP machine */
int
cpu_mp_probe(void)
{
return (mp_ncpus > 1);
}
#ifdef FDT
-static boolean_t
+static bool
cpu_check_mmu(u_int id __unused, phandle_t node, u_int addr_size __unused,
pcell_t *reg __unused)
{
char type[32];
/* Check if this hart supports MMU. */
if (OF_getprop(node, "mmu-type", (void *)type, sizeof(type)) == -1 ||
strncmp(type, "riscv,none", 10) == 0)
- return (0);
+ return (false);
- return (1);
+ return (true);
}
-static boolean_t
+static bool
cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
{
struct pcpu *pcpup;
vm_paddr_t start_addr;
uint64_t hart;
u_int cpuid;
int naps;
int error;
if (!cpu_check_mmu(id, node, addr_size, reg))
- return (0);
+ return (false);
KASSERT(id < MAXCPU, ("Too many CPUs"));
KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size"));
#ifdef INVARIANTS
cpu_reg[id][0] = reg[0];
if (addr_size == 2)
cpu_reg[id][1] = reg[1];
#endif
hart = reg[0];
if (addr_size == 2) {
hart <<= 32;
hart |= reg[1];
}
KASSERT(hart < MAXCPU, ("Too many harts."));
/* We are already running on this cpu */
if (hart == boot_hart)
- return (1);
+ return (true);
/*
* Rotate the CPU IDs to put the boot CPU as CPU 0.
* We keep the other CPUs ordered.
*/
cpuid = hart;
if (cpuid < boot_hart)
cpuid += mp_maxid + 1;
cpuid -= boot_hart;
/* Check if we are able to start this cpu */
if (cpuid > mp_maxid)
- return (0);
+ return (false);
/*
* Depending on the SBI implementation, APs are waiting either in
* locore.S or to be activated explicitly, via SBI call.
*/
if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) {
start_addr = pmap_kextract((vm_offset_t)mpentry);
error = sbi_hsm_hart_start(hart, start_addr, 0);
if (error != 0) {
mp_ncpus--;
/* Send a warning to the user and continue. */
printf("AP %u (hart %lu) failed to start, error %d\n",
cpuid, hart, error);
- return (0);
+ return (false);
}
}
pcpup = &__pcpu[cpuid];
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
pcpup->pc_hart = hart;
dpcpu[cpuid - 1] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
bootstacks[cpuid] = kmem_malloc(MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
naps = atomic_load_int(&aps_started);
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
printf("Starting CPU %u (hart %lx)\n", cpuid, hart);
atomic_store_32(&__riscv_boot_ap[hart], 1);
/* Wait for the AP to switch to its boot stack. */
while (atomic_load_int(&aps_started) < naps + 1)
cpu_spinwait();
CPU_SET(cpuid, &all_cpus);
CPU_SET(hart, &all_harts);
- return (1);
+ return (true);
}
#endif
/* Initialize and fire up non-boot processors */
void
cpu_mp_start(void)
{
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
CPU_SET(0, &all_cpus);
CPU_SET(boot_hart, &all_harts);
switch(cpu_enum_method) {
#ifdef FDT
case CPUS_FDT:
ofw_cpu_early_foreach(cpu_init_fdt, true);
break;
#endif
case CPUS_UNKNOWN:
break;
}
}
/* Introduce rest of cores to the world */
void
cpu_mp_announce(void)
{
}
void
cpu_mp_setmaxid(void)
{
int cores;
#ifdef FDT
cores = ofw_cpu_early_foreach(cpu_check_mmu, true);
if (cores > 0) {
cores = MIN(cores, MAXCPU);
if (bootverbose)
printf("Found %d CPUs in the device tree\n", cores);
mp_ncpus = cores;
mp_maxid = cores - 1;
cpu_enum_method = CPUS_FDT;
} else
#endif
{
if (bootverbose)
printf("No CPU data, limiting to 1 core\n");
mp_ncpus = 1;
mp_maxid = 0;
}
if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
if (cores > 0 && cores < mp_ncpus) {
mp_ncpus = cores;
mp_maxid = cores - 1;
}
}
}