diff --git a/sys/arm/arm/pmu_fdt.c b/sys/arm/arm/pmu_fdt.c
index 2e03fc98bfe0..6590e0659d82 100644
--- a/sys/arm/arm/pmu_fdt.c
+++ b/sys/arm/arm/pmu_fdt.c
@@ -1,239 +1,239 @@
/*-
* Copyright (c) 2015 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pmu.h"
static struct ofw_compat_data compat_data[] = {
{"arm,armv8-pmuv3", 1},
{"arm,cortex-a77-pmu", 1},
{"arm,cortex-a76-pmu", 1},
{"arm,cortex-a75-pmu", 1},
{"arm,cortex-a73-pmu", 1},
{"arm,cortex-a72-pmu", 1},
{"arm,cortex-a65-pmu", 1},
{"arm,cortex-a57-pmu", 1},
{"arm,cortex-a55-pmu", 1},
{"arm,cortex-a53-pmu", 1},
{"arm,cortex-a34-pmu", 1},
{"arm,cortex-a17-pmu", 1},
{"arm,cortex-a15-pmu", 1},
{"arm,cortex-a12-pmu", 1},
{"arm,cortex-a9-pmu", 1},
{"arm,cortex-a8-pmu", 1},
{"arm,cortex-a7-pmu", 1},
{"arm,cortex-a5-pmu", 1},
{"arm,arm11mpcore-pmu", 1},
{"arm,arm1176-pmu", 1},
{"arm,arm1136-pmu", 1},
{"qcom,krait-pmu", 1},
{NULL, 0}
};
static int
pmu_fdt_probe(device_t dev)
{
if (!ofw_bus_status_okay(dev))
return (ENXIO);
if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) {
device_set_desc(dev, "Performance Monitoring Unit");
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
pmu_parse_affinity(device_t dev, struct pmu_softc *sc, struct pmu_intr *irq,
phandle_t xref, uint32_t mpidr)
{
struct pcpu *pcpu;
int i, err;
if (xref != 0) {
err = OF_getencprop(OF_node_from_xref(xref), "reg", &mpidr,
sizeof(mpidr));
if (err < 0) {
device_printf(dev, "missing 'reg' property\n");
return (ENXIO);
}
}
for (i = 0; i < MAXCPU; i++) {
pcpu = pcpu_find(i);
- if (pcpu != NULL && pcpu->pc_mpidr == mpidr) {
+ if (pcpu != NULL && PCPU_GET_MPIDR(pcpu) == mpidr) {
irq->cpuid = i;
return (0);
}
}
device_printf(dev, "Cannot find CPU with MPIDR: 0x%08X\n", mpidr);
return (ENXIO);
}
static int
pmu_parse_intr(device_t dev, struct pmu_softc *sc)
{
bool has_affinity;
phandle_t node, *cpus;
int rid, err, ncpus, i;
node = ofw_bus_get_node(dev);
has_affinity = OF_hasprop(node, "interrupt-affinity");
for (i = 0; i < MAX_RLEN; i++)
sc->irq[i].cpuid = -1;
cpus = NULL;
if (has_affinity) {
ncpus = OF_getencprop_alloc_multi(node, "interrupt-affinity",
sizeof(*cpus), (void **)&cpus);
if (ncpus < 0) {
device_printf(dev,
"Cannot read interrupt affinity property\n");
return (ENXIO);
}
}
/* Process first interrupt */
rid = 0;
sc->irq[0].res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq[0].res == NULL) {
device_printf(dev, "Cannot get interrupt\n");
err = ENXIO;
goto done;
}
/* Check if PMU have one per-CPU interrupt */
if (intr_is_per_cpu(sc->irq[0].res)) {
if (has_affinity) {
device_printf(dev,
"Per CPU interupt have declared affinity\n");
err = ENXIO;
goto done;
}
return (0);
}
/*
* PMU with set of generic interrupts (one per core)
* Each one must be binded to exact core.
*/
err = pmu_parse_affinity(dev, sc, sc->irq + 0,
has_affinity ? cpus[0] : 0, 0);
if (err != 0) {
device_printf(dev, "Cannot parse affinity for CPUid: 0\n");
goto done;
}
for (i = 1; i < MAX_RLEN; i++) {
rid = i;
sc->irq[i].res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_ACTIVE | RF_SHAREABLE);
if (sc->irq[i].res == NULL)
break;
if (intr_is_per_cpu(sc->irq[i].res))
{
device_printf(dev, "Unexpected per CPU interupt\n");
err = ENXIO;
goto done;
}
if (has_affinity && i >= ncpus) {
device_printf(dev, "Missing value in interrupt "
"affinity property\n");
err = ENXIO;
goto done;
}
err = pmu_parse_affinity(dev, sc, sc->irq + i,
has_affinity ? cpus[i] : 0, i);
if (err != 0) {
device_printf(dev,
"Cannot parse affinity for CPUid: %d.\n", i);
goto done;
}
}
err = 0;
done:
OF_prop_free(cpus);
return (err);
}
static int
pmu_fdt_attach(device_t dev)
{
struct pmu_softc *sc;
int err;
sc = device_get_softc(dev);
err = pmu_parse_intr(dev, sc);
if (err != 0)
return (err);
return (pmu_attach(dev));
}
static device_method_t pmu_fdt_methods[] = {
DEVMETHOD(device_probe, pmu_fdt_probe),
DEVMETHOD(device_attach, pmu_fdt_attach),
{ 0, 0 }
};
static driver_t pmu_fdt_driver = {
"pmu",
pmu_fdt_methods,
sizeof(struct pmu_softc),
};
static devclass_t pmu_fdt_devclass;
DRIVER_MODULE(pmu, simplebus, pmu_fdt_driver, pmu_fdt_devclass, 0, 0);
diff --git a/sys/arm/include/pcpu.h b/sys/arm/include/pcpu.h
index 4d609b10bf73..a3a6900f692d 100644
--- a/sys/arm/include/pcpu.h
+++ b/sys/arm/include/pcpu.h
@@ -1,146 +1,148 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 1999 Luoqi Chen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27
* $FreeBSD$
*/
#ifndef _MACHINE_PCPU_H_
#define _MACHINE_PCPU_H_
#ifdef _KERNEL
#include
#include
#define ALT_STACK_SIZE 128
struct vmspace;
#endif /* _KERNEL */
/* Branch predictor hardening method */
#define PCPU_BP_HARDEN_KIND_NONE 0
#define PCPU_BP_HARDEN_KIND_BPIALL 1
#define PCPU_BP_HARDEN_KIND_ICIALLU 2
#define PCPU_MD_FIELDS \
unsigned int pc_vfpsid; \
unsigned int pc_vfpmvfr0; \
unsigned int pc_vfpmvfr1; \
struct pmap *pc_curpmap; \
struct mtx pc_cmap_lock; \
void *pc_cmap1_pte2p; \
void *pc_cmap2_pte2p; \
caddr_t pc_cmap1_addr; \
caddr_t pc_cmap2_addr; \
vm_offset_t pc_qmap_addr; \
void *pc_qmap_pte2p; \
unsigned int pc_dbreg[32]; \
int pc_dbreg_cmd; \
int pc_bp_harden_kind; \
uint32_t pc_original_actlr; \
uint64_t pc_clock; \
uint32_t pc_mpidr; \
char __pad[135]
#ifdef _KERNEL
#define PC_DBREG_CMD_NONE 0
#define PC_DBREG_CMD_LOAD 1
struct pcb;
struct pcpu;
extern struct pcpu *pcpup;
#define CPU_MASK (0xf)
#ifndef SMP
#define get_pcpu() (pcpup)
#else
#define get_pcpu() __extension__ ({ \
int id; \
__asm __volatile("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); \
(pcpup + (id & CPU_MASK)); \
})
#endif
static inline struct thread *
get_curthread(void)
{
void *ret;
__asm __volatile("mrc p15, 0, %0, c13, c0, 4" : "=r" (ret));
return (ret);
}
static inline void
set_curthread(struct thread *td)
{
__asm __volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (td));
}
static inline void *
get_tls(void)
{
void *tls;
/* TPIDRURW contains the authoritative value. */
__asm __volatile("mrc p15, 0, %0, c13, c0, 2" : "=r" (tls));
return (tls);
}
static inline void
set_tls(void *tls)
{
/*
* Update both TPIDRURW and TPIDRURO. TPIDRURW needs to be written
* first to ensure that a context switch between the two writes will
* still give the desired result of updating both.
*/
__asm __volatile(
"mcr p15, 0, %0, c13, c0, 2\n"
"mcr p15, 0, %0, c13, c0, 3\n"
: : "r" (tls));
}
#define curthread get_curthread()
#define PCPU_GET(member) (get_pcpu()->pc_ ## member)
#define PCPU_ADD(member, value) (get_pcpu()->pc_ ## member += (value))
#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&get_pcpu()->pc_ ## member)
#define PCPU_SET(member,value) (get_pcpu()->pc_ ## member = (value))
+#define PCPU_GET_MPIDR(pc) ((pc)->pc_mpidr)
+
void pcpu0_init(void);
#endif /* _KERNEL */
#endif /* !_MACHINE_PCPU_H_ */
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
index 14d0dcbf6c2f..6e2ec7d39ed2 100644
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -1,1007 +1,1008 @@
/*-
* Copyright (c) 2014 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_acpi.h"
#include "opt_platform.h"
#include "opt_ddb.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef VFP
#include
#endif
#ifdef DEV_ACPI
#include
#include
#endif
#ifdef FDT
#include
#include
#endif
enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
/*
* XXX: The .bss is assumed to be in the boot CPU NUMA domain. If not we
* could relocate this, but will need to keep the same virtual address as
* it's reverenced by the EARLY_COUNTER macro.
*/
struct pcpu pcpu0;
#if defined(PERTHREAD_SSP)
/*
* The boot SSP canary. Will be replaced with a per-thread canary when
* scheduling has started.
*/
uintptr_t boot_canary = 0x49a2d892bc05a0b1ul;
#endif
static struct trapframe proc0_tf;
int early_boot = 1;
int cold = 1;
static int boot_el;
struct kva_md_info kmi;
int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */
int has_pan;
/*
* Physical address of the EFI System Table. Stashed from the metadata hints
* passed into the kernel and used by the EFI code to call runtime services.
*/
vm_paddr_t efi_systbl_phys;
static struct efi_map_header *efihdr;
/* pagezero_* implementations are provided in support.S */
void pagezero_simple(void *);
void pagezero_cache(void *);
/* pagezero_simple is default pagezero */
void (*pagezero)(void *p) = pagezero_simple;
int (*apei_nmi)(void);
#if defined(PERTHREAD_SSP_WARNING)
static void
print_ssp_warning(void *data __unused)
{
printf("WARNING: Per-thread SSP is enabled but the compiler is too old to support it\n");
}
SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL);
SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL);
#endif
static void
pan_setup(void)
{
uint64_t id_aa64mfr1;
id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
has_pan = 1;
}
void
pan_enable(void)
{
/*
* The LLVM integrated assembler doesn't understand the PAN
* PSTATE field. Because of this we need to manually create
* the instruction in an asm block. This is equivalent to:
* msr pan, #1
*
* This sets the PAN bit, stopping the kernel from accessing
* memory when userspace can also access it unless the kernel
* uses the userspace load/store instructions.
*/
if (has_pan) {
WRITE_SPECIALREG(sctlr_el1,
READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
__asm __volatile(".inst 0xd500409f | (0x1 << 8)");
}
}
bool
has_hyp(void)
{
return (boot_el == 2);
}
static void
cpu_startup(void *dummy)
{
vm_paddr_t size;
int i;
printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
ptoa((uintmax_t)realmem) / 1024 / 1024);
if (bootverbose) {
printf("Physical memory chunk(s):\n");
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
size = phys_avail[i + 1] - phys_avail[i];
printf("%#016jx - %#016jx, %ju bytes (%ju pages)\n",
(uintmax_t)phys_avail[i],
(uintmax_t)phys_avail[i + 1] - 1,
(uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
}
}
printf("avail memory = %ju (%ju MB)\n",
ptoa((uintmax_t)vm_free_count()),
ptoa((uintmax_t)vm_free_count()) / 1024 / 1024);
undef_init();
install_cpu_errata();
vm_ksubmap_init(&kmi);
bufinit();
vm_pager_bufferinit();
}
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
static void
late_ifunc_resolve(void *dummy __unused)
{
link_elf_late_ireloc();
}
SYSINIT(late_ifunc_resolve, SI_SUB_CPU, SI_ORDER_ANY, late_ifunc_resolve, NULL);
int
cpu_idle_wakeup(int cpu)
{
return (0);
}
void
cpu_idle(int busy)
{
spinlock_enter();
if (!busy)
cpu_idleclock();
if (!sched_runnable())
__asm __volatile(
"dsb sy \n"
"wfi \n");
if (!busy)
cpu_activeclock();
spinlock_exit();
}
void
cpu_halt(void)
{
/* We should have shutdown by now, if not enter a low power sleep */
intr_disable();
while (1) {
__asm __volatile("wfi");
}
}
/*
* Flush the D-cache for non-DMA I/O so that the I-cache can
* be made coherent later.
*/
void
cpu_flush_dcache(void *ptr, size_t len)
{
/* ARM64TODO TBD */
}
/* Get current clock frequency for the given CPU ID. */
int
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
struct pcpu *pc;
pc = pcpu_find(cpu_id);
if (pc == NULL || rate == NULL)
return (EINVAL);
if (pc->pc_clock == 0)
return (EOPNOTSUPP);
*rate = pc->pc_clock;
return (0);
}
void
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
{
pcpu->pc_acpi_id = 0xffffffff;
- pcpu->pc_mpidr = 0xffffffff;
+ pcpu->pc_mpidr_low = 0xffffffff;
+ pcpu->pc_mpidr_high = 0xffffffff;
}
void
spinlock_enter(void)
{
struct thread *td;
register_t daif;
td = curthread;
if (td->td_md.md_spinlock_count == 0) {
daif = intr_disable();
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_daif = daif;
critical_enter();
} else
td->td_md.md_spinlock_count++;
}
void
spinlock_exit(void)
{
struct thread *td;
register_t daif;
td = curthread;
daif = td->td_md.md_saved_daif;
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0) {
critical_exit();
intr_restore(daif);
}
}
/*
* Construct a PCB from a trapframe. This is called from kdb_trap() where
* we want to start a backtrace from the function that caused us to enter
* the debugger. We have the context in the trapframe, but base the trace
* on the PCB. The PCB doesn't have to be perfect, as long as it contains
* enough for a backtrace.
*/
void
makectx(struct trapframe *tf, struct pcb *pcb)
{
int i;
for (i = 0; i < nitems(pcb->pcb_x); i++)
pcb->pcb_x[i] = tf->tf_x[i];
/* NB: pcb_lr is the PC, see PC_REGS() in db_machdep.h */
pcb->pcb_lr = tf->tf_elr;
pcb->pcb_sp = tf->tf_sp;
}
static void
init_proc0(vm_offset_t kstack)
{
struct pcpu *pcpup;
pcpup = cpuid_to_pcpu[0];
MPASS(pcpup != NULL);
proc_linkup0(&proc0, &thread0);
thread0.td_kstack = kstack;
thread0.td_kstack_pages = KSTACK_PAGES;
#if defined(PERTHREAD_SSP)
thread0.td_md.md_canary = boot_canary;
#endif
thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
thread0.td_kstack_pages * PAGE_SIZE) - 1;
thread0.td_pcb->pcb_fpflags = 0;
thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
thread0.td_frame = &proc0_tf;
pcpup->pc_curpcb = thread0.td_pcb;
/*
* Unmask SError exceptions. They are used to signal a RAS failure,
* or other hardware error.
*/
serror_enable();
}
/*
* Get an address to be used to write to kernel data that may be mapped
* read-only, e.g. to patch kernel code.
*/
bool
arm64_get_writable_addr(vm_offset_t addr, vm_offset_t *out)
{
vm_paddr_t pa;
/* Check if the page is writable */
if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) {
*out = addr;
return (true);
}
/*
* Find the physical address of the given page.
*/
if (!pmap_klookup(addr, &pa)) {
return (false);
}
/*
* If it is within the DMAP region and is writable use that.
*/
if (PHYS_IN_DMAP(pa)) {
addr = PHYS_TO_DMAP(pa);
if (PAR_SUCCESS(arm64_address_translate_s1e1w(addr))) {
*out = addr;
return (true);
}
}
return (false);
}
typedef struct {
uint32_t type;
uint64_t phys_start;
uint64_t virt_start;
uint64_t num_pages;
uint64_t attr;
} EFI_MEMORY_DESCRIPTOR;
typedef void (*efi_map_entry_cb)(struct efi_md *);
static void
foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb)
{
struct efi_md *map, *p;
size_t efisz;
int ndesc, i;
/*
* Memory map data provided by UEFI via the GetMemoryMap
* Boot Services API.
*/
efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
map = (struct efi_md *)((uint8_t *)efihdr + efisz);
if (efihdr->descriptor_size == 0)
return;
ndesc = efihdr->memory_size / efihdr->descriptor_size;
for (i = 0, p = map; i < ndesc; i++,
p = efi_next_descriptor(p, efihdr->descriptor_size)) {
cb(p);
}
}
static void
exclude_efi_map_entry(struct efi_md *p)
{
switch (p->md_type) {
case EFI_MD_TYPE_CODE:
case EFI_MD_TYPE_DATA:
case EFI_MD_TYPE_BS_CODE:
case EFI_MD_TYPE_BS_DATA:
case EFI_MD_TYPE_FREE:
/*
* We're allowed to use any entry with these types.
*/
break;
default:
physmem_exclude_region(p->md_phys, p->md_pages * EFI_PAGE_SIZE,
EXFLAG_NOALLOC);
}
}
static void
exclude_efi_map_entries(struct efi_map_header *efihdr)
{
foreach_efi_map_entry(efihdr, exclude_efi_map_entry);
}
static void
add_efi_map_entry(struct efi_md *p)
{
switch (p->md_type) {
case EFI_MD_TYPE_RECLAIM:
/*
* The recomended location for ACPI tables. Map into the
* DMAP so we can access them from userspace via /dev/mem.
*/
case EFI_MD_TYPE_RT_CODE:
/*
* Some UEFI implementations put the system table in the
* runtime code section. Include it in the DMAP, but will
* be excluded from phys_avail later.
*/
case EFI_MD_TYPE_RT_DATA:
/*
* Runtime data will be excluded after the DMAP
* region is created to stop it from being added
* to phys_avail.
*/
case EFI_MD_TYPE_CODE:
case EFI_MD_TYPE_DATA:
case EFI_MD_TYPE_BS_CODE:
case EFI_MD_TYPE_BS_DATA:
case EFI_MD_TYPE_FREE:
/*
* We're allowed to use any entry with these types.
*/
physmem_hardware_region(p->md_phys,
p->md_pages * EFI_PAGE_SIZE);
break;
}
}
static void
add_efi_map_entries(struct efi_map_header *efihdr)
{
foreach_efi_map_entry(efihdr, add_efi_map_entry);
}
static void
print_efi_map_entry(struct efi_md *p)
{
const char *type;
static const char *types[] = {
"Reserved",
"LoaderCode",
"LoaderData",
"BootServicesCode",
"BootServicesData",
"RuntimeServicesCode",
"RuntimeServicesData",
"ConventionalMemory",
"UnusableMemory",
"ACPIReclaimMemory",
"ACPIMemoryNVS",
"MemoryMappedIO",
"MemoryMappedIOPortSpace",
"PalCode",
"PersistentMemory"
};
if (p->md_type < nitems(types))
type = types[p->md_type];
else
type = "";
printf("%23s %012lx %12p %08lx ", type, p->md_phys,
p->md_virt, p->md_pages);
if (p->md_attr & EFI_MD_ATTR_UC)
printf("UC ");
if (p->md_attr & EFI_MD_ATTR_WC)
printf("WC ");
if (p->md_attr & EFI_MD_ATTR_WT)
printf("WT ");
if (p->md_attr & EFI_MD_ATTR_WB)
printf("WB ");
if (p->md_attr & EFI_MD_ATTR_UCE)
printf("UCE ");
if (p->md_attr & EFI_MD_ATTR_WP)
printf("WP ");
if (p->md_attr & EFI_MD_ATTR_RP)
printf("RP ");
if (p->md_attr & EFI_MD_ATTR_XP)
printf("XP ");
if (p->md_attr & EFI_MD_ATTR_NV)
printf("NV ");
if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
printf("MORE_RELIABLE ");
if (p->md_attr & EFI_MD_ATTR_RO)
printf("RO ");
if (p->md_attr & EFI_MD_ATTR_RT)
printf("RUNTIME");
printf("\n");
}
static void
print_efi_map_entries(struct efi_map_header *efihdr)
{
printf("%23s %12s %12s %8s %4s\n",
"Type", "Physical", "Virtual", "#Pages", "Attr");
foreach_efi_map_entry(efihdr, print_efi_map_entry);
}
#ifdef FDT
static void
try_load_dtb(caddr_t kmdp)
{
vm_offset_t dtbp;
dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
#if defined(FDT_DTB_STATIC)
/*
* In case the device tree blob was not retrieved (from metadata) try
* to use the statically embedded one.
*/
if (dtbp == 0)
dtbp = (vm_offset_t)&fdt_static_dtb;
#endif
if (dtbp == (vm_offset_t)NULL) {
#ifndef TSLOG
printf("ERROR loading DTB\n");
#endif
return;
}
if (OF_install(OFW_FDT, 0) == FALSE)
panic("Cannot install FDT");
if (OF_init((void *)dtbp) != 0)
panic("OF_init failed with the found device tree");
parse_fdt_bootargs();
}
#endif
static bool
bus_probe(void)
{
bool has_acpi, has_fdt;
char *order, *env;
has_acpi = has_fdt = false;
#ifdef FDT
has_fdt = (OF_peer(0) != 0);
#endif
#ifdef DEV_ACPI
has_acpi = (AcpiOsGetRootPointer() != 0);
#endif
env = kern_getenv("kern.cfg.order");
if (env != NULL) {
order = env;
while (order != NULL) {
if (has_acpi &&
strncmp(order, "acpi", 4) == 0 &&
(order[4] == ',' || order[4] == '\0')) {
arm64_bus_method = ARM64_BUS_ACPI;
break;
}
if (has_fdt &&
strncmp(order, "fdt", 3) == 0 &&
(order[3] == ',' || order[3] == '\0')) {
arm64_bus_method = ARM64_BUS_FDT;
break;
}
order = strchr(order, ',');
}
freeenv(env);
/* If we set the bus method it is valid */
if (arm64_bus_method != ARM64_BUS_NONE)
return (true);
}
/* If no order or an invalid order was set use the default */
if (arm64_bus_method == ARM64_BUS_NONE) {
if (has_fdt)
arm64_bus_method = ARM64_BUS_FDT;
else if (has_acpi)
arm64_bus_method = ARM64_BUS_ACPI;
}
/*
* If no option was set the default is valid, otherwise we are
* setting one to get cninit() working, then calling panic to tell
* the user about the invalid bus setup.
*/
return (env == NULL);
}
static void
cache_setup(void)
{
int dczva_line_shift;
uint32_t dczid_el0;
identify_cache(READ_SPECIALREG(ctr_el0));
dczid_el0 = READ_SPECIALREG(dczid_el0);
/* Check if dc zva is not prohibited */
if (dczid_el0 & DCZID_DZP)
dczva_line_size = 0;
else {
/* Same as with above calculations */
dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
dczva_line_size = sizeof(int) << dczva_line_shift;
/* Change pagezero function */
pagezero = pagezero_cache;
}
}
int
memory_mapping_mode(vm_paddr_t pa)
{
struct efi_md *map, *p;
size_t efisz;
int ndesc, i;
if (efihdr == NULL)
return (VM_MEMATTR_WRITE_BACK);
/*
* Memory map data provided by UEFI via the GetMemoryMap
* Boot Services API.
*/
efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
map = (struct efi_md *)((uint8_t *)efihdr + efisz);
if (efihdr->descriptor_size == 0)
return (VM_MEMATTR_WRITE_BACK);
ndesc = efihdr->memory_size / efihdr->descriptor_size;
for (i = 0, p = map; i < ndesc; i++,
p = efi_next_descriptor(p, efihdr->descriptor_size)) {
if (pa < p->md_phys ||
pa >= p->md_phys + p->md_pages * EFI_PAGE_SIZE)
continue;
if (p->md_type == EFI_MD_TYPE_IOMEM ||
p->md_type == EFI_MD_TYPE_IOPORT)
return (VM_MEMATTR_DEVICE);
else if ((p->md_attr & EFI_MD_ATTR_WB) != 0 ||
p->md_type == EFI_MD_TYPE_RECLAIM)
return (VM_MEMATTR_WRITE_BACK);
else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
return (VM_MEMATTR_WRITE_THROUGH);
else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
return (VM_MEMATTR_WRITE_COMBINING);
break;
}
return (VM_MEMATTR_DEVICE);
}
void
initarm(struct arm64_bootparams *abp)
{
struct efi_fb *efifb;
struct pcpu *pcpup;
char *env;
#ifdef FDT
struct mem_region mem_regions[FDT_MEM_REGIONS];
int mem_regions_sz;
phandle_t root;
char dts_version[255];
#endif
vm_offset_t lastaddr;
caddr_t kmdp;
bool valid;
TSRAW(&thread0, TS_ENTER, __func__, NULL);
boot_el = abp->boot_el;
/* Parse loader or FDT boot parametes. Determine last used address. */
lastaddr = parse_boot_param(abp);
/* Find the kernel address */
kmdp = preload_search_by_type("elf kernel");
if (kmdp == NULL)
kmdp = preload_search_by_type("elf64 kernel");
identify_cpu(0);
update_special_regs(0);
link_elf_ireloc(kmdp);
try_load_dtb(kmdp);
efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
/* Load the physical memory ranges */
efihdr = (struct efi_map_header *)preload_search_info(kmdp,
MODINFO_METADATA | MODINFOMD_EFI_MAP);
if (efihdr != NULL)
add_efi_map_entries(efihdr);
#ifdef FDT
else {
/* Grab physical memory regions information from device tree. */
if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
NULL) != 0)
panic("Cannot get physical memory regions");
physmem_hardware_regions(mem_regions, mem_regions_sz);
}
if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0)
physmem_exclude_regions(mem_regions, mem_regions_sz,
EXFLAG_NODUMP | EXFLAG_NOALLOC);
#endif
/* Exclude the EFI framebuffer from our view of physical memory. */
efifb = (struct efi_fb *)preload_search_info(kmdp,
MODINFO_METADATA | MODINFOMD_EFI_FB);
if (efifb != NULL)
physmem_exclude_region(efifb->fb_addr, efifb->fb_size,
EXFLAG_NOALLOC);
/* Set the pcpu data, this is needed by pmap_bootstrap */
pcpup = &pcpu0;
pcpu_init(pcpup, 0, sizeof(struct pcpu));
/*
* Set the pcpu pointer with a backup in tpidr_el1 to be
* loaded when entering the kernel from userland.
*/
__asm __volatile(
"mov x18, %0 \n"
"msr tpidr_el1, %0" :: "r"(pcpup));
/* locore.S sets sp_el0 to &thread0 so no need to set it here. */
PCPU_SET(curthread, &thread0);
PCPU_SET(midr, get_midr());
/* Do basic tuning, hz etc */
init_param1();
cache_setup();
pan_setup();
/* Bootstrap enough of pmap to enter the kernel proper */
pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
/* Exclude entries neexed in teh DMAP region, but not phys_avail */
if (efihdr != NULL)
exclude_efi_map_entries(efihdr);
physmem_init_kernel_globals();
devmap_bootstrap(0, NULL);
valid = bus_probe();
cninit();
set_ttbr0(abp->kern_ttbr0);
cpu_tlb_flushID();
if (!valid)
panic("Invalid bus configuration: %s",
kern_getenv("kern.cfg.order"));
/*
* Dump the boot metadata. We have to wait for cninit() since console
* output is required. If it's grossly incorrect the kernel will never
* make it this far.
*/
if (getenv_is_true("debug.dump_modinfo_at_boot"))
preload_dump();
init_proc0(abp->kern_stack);
msgbufinit(msgbufp, msgbufsize);
mutex_init();
init_param2(physmem);
dbg_init();
kdb_init();
#ifdef KDB
if ((boothowto & RB_KDB) != 0)
kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
pan_enable();
kcsan_cpu_init(0);
env = kern_getenv("kernelname");
if (env != NULL)
strlcpy(kernelname, env, sizeof(kernelname));
#ifdef FDT
if (arm64_bus_method == ARM64_BUS_FDT) {
root = OF_finddevice("/");
if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
printf("WARNING: DTB version is %s while kernel expects %s, "
"please update the DTB in the ESP\n",
dts_version,
LINUX_DTS_VERSION);
} else {
printf("WARNING: Cannot find freebsd,dts-version property, "
"cannot check DTB compliance\n");
}
}
#endif
if (boothowto & RB_VERBOSE) {
if (efihdr != NULL)
print_efi_map_entries(efihdr);
physmem_print_tables();
}
early_boot = 0;
TSEXIT();
}
void
dbg_init(void)
{
/* Clear OS lock */
WRITE_SPECIALREG(oslar_el1, 0);
/* This permits DDB to use debug registers for watchpoints. */
dbg_monitor_init();
/* TODO: Eventually will need to initialize debug registers here. */
}
#ifdef DDB
#include
DB_SHOW_COMMAND(specialregs, db_show_spregs)
{
#define PRINT_REG(reg) \
db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
PRINT_REG(actlr_el1);
PRINT_REG(afsr0_el1);
PRINT_REG(afsr1_el1);
PRINT_REG(aidr_el1);
PRINT_REG(amair_el1);
PRINT_REG(ccsidr_el1);
PRINT_REG(clidr_el1);
PRINT_REG(contextidr_el1);
PRINT_REG(cpacr_el1);
PRINT_REG(csselr_el1);
PRINT_REG(ctr_el0);
PRINT_REG(currentel);
PRINT_REG(daif);
PRINT_REG(dczid_el0);
PRINT_REG(elr_el1);
PRINT_REG(esr_el1);
PRINT_REG(far_el1);
#if 0
/* ARM64TODO: Enable VFP before reading floating-point registers */
PRINT_REG(fpcr);
PRINT_REG(fpsr);
#endif
PRINT_REG(id_aa64afr0_el1);
PRINT_REG(id_aa64afr1_el1);
PRINT_REG(id_aa64dfr0_el1);
PRINT_REG(id_aa64dfr1_el1);
PRINT_REG(id_aa64isar0_el1);
PRINT_REG(id_aa64isar1_el1);
PRINT_REG(id_aa64pfr0_el1);
PRINT_REG(id_aa64pfr1_el1);
PRINT_REG(id_afr0_el1);
PRINT_REG(id_dfr0_el1);
PRINT_REG(id_isar0_el1);
PRINT_REG(id_isar1_el1);
PRINT_REG(id_isar2_el1);
PRINT_REG(id_isar3_el1);
PRINT_REG(id_isar4_el1);
PRINT_REG(id_isar5_el1);
PRINT_REG(id_mmfr0_el1);
PRINT_REG(id_mmfr1_el1);
PRINT_REG(id_mmfr2_el1);
PRINT_REG(id_mmfr3_el1);
#if 0
/* Missing from llvm */
PRINT_REG(id_mmfr4_el1);
#endif
PRINT_REG(id_pfr0_el1);
PRINT_REG(id_pfr1_el1);
PRINT_REG(isr_el1);
PRINT_REG(mair_el1);
PRINT_REG(midr_el1);
PRINT_REG(mpidr_el1);
PRINT_REG(mvfr0_el1);
PRINT_REG(mvfr1_el1);
PRINT_REG(mvfr2_el1);
PRINT_REG(revidr_el1);
PRINT_REG(sctlr_el1);
PRINT_REG(sp_el0);
PRINT_REG(spsel);
PRINT_REG(spsr_el1);
PRINT_REG(tcr_el1);
PRINT_REG(tpidr_el0);
PRINT_REG(tpidr_el1);
PRINT_REG(tpidrro_el0);
PRINT_REG(ttbr0_el1);
PRINT_REG(ttbr1_el1);
PRINT_REG(vbar_el1);
#undef PRINT_REG
}
DB_SHOW_COMMAND(vtop, db_show_vtop)
{
uint64_t phys;
if (have_addr) {
phys = arm64_address_translate_s1e1r(addr);
db_printf("EL1 physical address reg (read): 0x%016lx\n", phys);
phys = arm64_address_translate_s1e1w(addr);
db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
phys = arm64_address_translate_s1e0r(addr);
db_printf("EL0 physical address reg (read): 0x%016lx\n", phys);
phys = arm64_address_translate_s1e0w(addr);
db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
} else
db_printf("show vtop \n");
}
#endif
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
index 8443c766923b..4d3f9f4963f5 100644
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -1,955 +1,960 @@
/*-
* Copyright (c) 2015-2016 The FreeBSD Foundation
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include "opt_acpi.h"
#include "opt_ddb.h"
#include "opt_kstack_pages.h"
#include "opt_platform.h"
#include
__FBSDID("$FreeBSD$");
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef VFP
#include
#endif
#ifdef DEV_ACPI
#include
#include
#endif
#ifdef FDT
#include
#include
#include
#include
#endif
#include
#include "pic_if.h"
#define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE)
#define MP_QUIRK_CPULIST 0x01 /* The list of cpus may be wrong, */
/* don't panic if one fails to start */
static uint32_t mp_quirks;
#ifdef FDT
static struct {
const char *compat;
uint32_t quirks;
} fdt_quirks[] = {
{ "arm,foundation-aarch64", MP_QUIRK_CPULIST },
{ "arm,fvp-base", MP_QUIRK_CPULIST },
/* This is incorrect in some DTS files */
{ "arm,vfp-base", MP_QUIRK_CPULIST },
{ NULL, 0 },
};
#endif
typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
typedef void intr_ipi_handler_t(void *);
#define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
struct intr_ipi {
intr_ipi_handler_t * ii_handler;
void * ii_handler_arg;
intr_ipi_send_t * ii_send;
void * ii_send_arg;
char ii_name[INTR_IPI_NAMELEN];
u_long * ii_count;
};
static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
static struct intr_ipi *intr_ipi_lookup(u_int);
static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
void *);
static void ipi_ast(void *);
static void ipi_hardclock(void *);
static void ipi_preempt(void *);
static void ipi_rendezvous(void *);
static void ipi_stop(void *);
struct pcb stoppcbs[MAXCPU];
#ifdef FDT
static u_int fdt_cpuid;
#endif
void mpentry(unsigned long cpuid);
void init_secondary(uint64_t);
/* Synchronize AP startup. */
static struct mtx ap_boot_mtx;
/* Stacks for AP initialization, discarded once idle threads are started. */
void *bootstack;
static void *bootstacks[MAXCPU];
/* Count of started APs, used to synchronize access to bootstack. */
static volatile int aps_started;
/* Set to 1 once we're ready to let the APs out of the pen. */
static volatile int aps_ready;
/* Temporary variables for init_secondary() */
void *dpcpu[MAXCPU - 1];
static bool
is_boot_cpu(uint64_t target_cpu)
{
- return (cpuid_to_pcpu[0]->pc_mpidr == (target_cpu & CPU_AFF_MASK));
+ return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
}
static void
release_aps(void *dummy __unused)
{
int i, started;
/* Only release CPUs if they exist */
if (mp_ncpus == 1)
return;
intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
atomic_store_rel_int(&aps_ready, 1);
/* Wake up the other CPUs */
__asm __volatile(
"dsb ishst \n"
"sev \n"
::: "memory");
printf("Release APs...");
started = 0;
for (i = 0; i < 2000; i++) {
if (atomic_load_acq_int(&smp_started) != 0) {
printf("done\n");
return;
}
/*
* Don't time out while we are making progress. Some large
* systems can take a while to start all CPUs.
*/
if (smp_cpus > started) {
i = 0;
started = smp_cpus;
}
DELAY(1000);
}
printf("APs not started\n");
}
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
void
init_secondary(uint64_t cpu)
{
struct pcpu *pcpup;
pmap_t pmap0;
- u_int mpidr;
+ uint64_t mpidr;
/*
* Verify that the value passed in 'cpu' argument (aka context_id) is
* valid. Some older U-Boot based PSCI implementations are buggy,
* they can pass random value in it.
*/
mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
- cpuid_to_pcpu[cpu]->pc_mpidr != mpidr) {
+ PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) {
for (cpu = 0; cpu < mp_maxid; cpu++)
if (cpuid_to_pcpu[cpu] != NULL &&
- cpuid_to_pcpu[cpu]->pc_mpidr == mpidr)
+ PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr)
break;
if ( cpu >= MAXCPU)
panic("MPIDR for this CPU is not in pcpu table");
}
pcpup = cpuid_to_pcpu[cpu];
/*
* Set the pcpu pointer with a backup in tpidr_el1 to be
* loaded when entering the kernel from userland.
*/
__asm __volatile(
"mov x18, %0 \n"
"msr tpidr_el1, %0" :: "r"(pcpup));
/*
* Identify current CPU. This is necessary to setup
* affinity registers and to provide support for
* runtime chip identification.
*
* We need this before signalling the CPU is ready to
* let the boot CPU use the results.
*/
pcpup->pc_midr = get_midr();
identify_cpu(cpu);
/* Ensure the stores in identify_cpu have completed */
atomic_thread_fence_acq_rel();
/* Signal the BSP and spin until it has released all APs. */
atomic_add_int(&aps_started, 1);
while (!atomic_load_int(&aps_ready))
__asm __volatile("wfe");
/* Initialize curthread */
KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
pcpup->pc_curthread = pcpup->pc_idlethread;
schedinit_ap();
/* Initialize curpmap to match TTBR0's current setting. */
pmap0 = vmspace_pmap(&vmspace0);
KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
("pmap0 doesn't match cpu %ld's ttbr0", cpu));
pcpup->pc_curpmap = pmap0;
install_cpu_errata();
intr_pic_init_secondary();
/* Start per-CPU event timers. */
cpu_initclocks_ap();
#ifdef VFP
vfp_init();
#endif
dbg_init();
pan_enable();
mtx_lock_spin(&ap_boot_mtx);
atomic_add_rel_32(&smp_cpus, 1);
if (smp_cpus == mp_ncpus) {
/* enable IPI's, tlb shootdown, freezes etc */
atomic_store_rel_int(&smp_started, 1);
}
mtx_unlock_spin(&ap_boot_mtx);
kcsan_cpu_init(cpu);
/* Enter the scheduler */
sched_throw(NULL);
panic("scheduler returned us to init_secondary");
/* NOTREACHED */
}
static void
smp_after_idle_runnable(void *arg __unused)
{
int cpu;
if (mp_ncpus == 1)
return;
KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
/*
* Wait for all APs to handle an interrupt. After that, we know that
* the APs have entered the scheduler at least once, so the boot stacks
* are safe to free.
*/
smp_rendezvous(smp_no_rendezvous_barrier, NULL,
smp_no_rendezvous_barrier, NULL);
for (cpu = 1; cpu < mp_ncpus; cpu++) {
if (bootstacks[cpu] != NULL)
kmem_free((vm_offset_t)bootstacks[cpu],
MP_BOOTSTACK_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
smp_after_idle_runnable, NULL);
/*
* Send IPI thru interrupt controller.
*/
static void
pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
{
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
/*
* Ensure that this CPU's stores will be visible to IPI
* recipients before starting to send the interrupts.
*/
dsb(ishst);
PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
}
/*
* Setup IPI handler on interrupt controller.
*
* Not SMP coherent.
*/
static void
intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
void *arg)
{
struct intr_irqsrc *isrc;
struct intr_ipi *ii;
int error;
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
if (error != 0)
return;
isrc->isrc_handlers++;
ii = intr_ipi_lookup(ipi);
KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
ii->ii_handler = hand;
ii->ii_handler_arg = arg;
ii->ii_send = pic_ipi_send;
ii->ii_send_arg = isrc;
strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
ii->ii_count = intr_ipi_setup_counters(name);
PIC_ENABLE_INTR(intr_irq_root_dev, isrc);
}
static void
intr_ipi_send(cpuset_t cpus, u_int ipi)
{
struct intr_ipi *ii;
ii = intr_ipi_lookup(ipi);
if (ii->ii_count == NULL)
panic("%s: not setup IPI %u", __func__, ipi);
ii->ii_send(ii->ii_send_arg, cpus, ipi);
}
static void
ipi_ast(void *dummy __unused)
{
CTR0(KTR_SMP, "IPI_AST");
}
static void
ipi_hardclock(void *dummy __unused)
{
CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
hardclockintr();
}
static void
ipi_preempt(void *dummy __unused)
{
CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
sched_preempt(curthread);
}
static void
ipi_rendezvous(void *dummy __unused)
{
CTR0(KTR_SMP, "IPI_RENDEZVOUS");
smp_rendezvous_action();
}
static void
ipi_stop(void *dummy __unused)
{
u_int cpu;
CTR0(KTR_SMP, "IPI_STOP");
cpu = PCPU_GET(cpuid);
savectx(&stoppcbs[cpu]);
/* Indicate we are stopped */
CPU_SET_ATOMIC(cpu, &stopped_cpus);
/* Wait for restart */
while (!CPU_ISSET(cpu, &started_cpus))
cpu_spinwait();
#ifdef DDB
dbg_register_sync(NULL);
#endif
CPU_CLR_ATOMIC(cpu, &started_cpus);
CPU_CLR_ATOMIC(cpu, &stopped_cpus);
CTR0(KTR_SMP, "IPI_STOP (restart)");
}
struct cpu_group *
cpu_topo(void)
{
struct cpu_group *dom, *root;
int i;
root = smp_topo_alloc(1);
dom = smp_topo_alloc(vm_ndomains);
root->cg_parent = NULL;
root->cg_child = dom;
CPU_COPY(&all_cpus, &root->cg_mask);
root->cg_count = mp_ncpus;
root->cg_children = vm_ndomains;
root->cg_level = CG_SHARE_NONE;
root->cg_flags = 0;
/*
* Redundant layers will be collapsed by the caller so we don't need a
* special case for a single domain.
*/
for (i = 0; i < vm_ndomains; i++, dom++) {
dom->cg_parent = root;
dom->cg_child = NULL;
CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
dom->cg_count = CPU_COUNT(&dom->cg_mask);
dom->cg_children = 0;
dom->cg_level = CG_SHARE_L3;
dom->cg_flags = 0;
}
return (root);
}
/* Determine if we running MP machine */
int
cpu_mp_probe(void)
{
/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
return (1);
}
/*
* Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
* do nothing. Returns true if the CPU is present and running.
*/
static bool
start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
{
struct pcpu *pcpup;
vm_offset_t pcpu_mem;
vm_size_t size;
vm_paddr_t pa;
int err, naps;
/* Check we are able to start this cpu */
if (cpuid > mp_maxid)
return (false);
/* Skip boot CPU */
if (is_boot_cpu(target_cpu))
return (true);
KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
pcpu_mem = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
M_WAITOK | M_ZERO);
pmap_disable_promotion(pcpu_mem, size);
pcpup = (struct pcpu *)pcpu_mem;
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
- pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
+ pcpup->pc_mpidr_low = target_cpu & CPU_AFF_MASK;
+ pcpup->pc_mpidr_high = (target_cpu & CPU_AFF_MASK) >> 32;
dpcpu[cpuid - 1] = (void *)(pcpup + 1);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
bootstacks[cpuid] = (void *)kmem_malloc_domainset(
DOMAINSET_PREF(domain), MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
naps = atomic_load_int(&aps_started);
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
err = psci_cpu_on(target_cpu, pa, cpuid);
if (err != PSCI_RETVAL_SUCCESS) {
/*
* Panic here if INVARIANTS are enabled and PSCI failed to
* start the requested CPU. psci_cpu_on() returns PSCI_MISSING
* to indicate we are unable to use it to start the given CPU.
*/
KASSERT(err == PSCI_MISSING ||
(mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
("Failed to start CPU %u (%lx), error %d\n",
cpuid, target_cpu, err));
pcpu_destroy(pcpup);
dpcpu[cpuid - 1] = NULL;
kmem_free((vm_offset_t)bootstacks[cpuid], MP_BOOTSTACK_SIZE);
kmem_free(pcpu_mem, size);
bootstacks[cpuid] = NULL;
mp_ncpus--;
return (false);
}
/* Wait for the AP to switch to its boot stack. */
while (atomic_load_int(&aps_started) < naps + 1)
cpu_spinwait();
CPU_SET(cpuid, &all_cpus);
return (true);
}
#ifdef DEV_ACPI
static void
madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
{
ACPI_MADT_GENERIC_INTERRUPT *intr;
u_int *cpuid;
u_int id;
int domain;
switch(entry->Type) {
case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
cpuid = arg;
if (is_boot_cpu(intr->ArmMpidr))
id = 0;
else
id = *cpuid;
domain = 0;
#ifdef NUMA
if (vm_ndomains > 1)
domain = acpi_pxm_get_cpu_locality(intr->Uid);
#endif
if (start_cpu(id, intr->ArmMpidr, domain)) {
MPASS(cpuid_to_pcpu[id] != NULL);
cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
/*
* Don't increment for the boot CPU, its CPU ID is
* reserved.
*/
if (!is_boot_cpu(intr->ArmMpidr))
(*cpuid)++;
}
break;
default:
break;
}
}
static void
cpu_init_acpi(void)
{
ACPI_TABLE_MADT *madt;
vm_paddr_t physaddr;
u_int cpuid;
physaddr = acpi_find_table(ACPI_SIG_MADT);
if (physaddr == 0)
return;
madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
if (madt == NULL) {
printf("Unable to map the MADT, not starting APs\n");
return;
}
/* Boot CPU is always 0 */
cpuid = 1;
acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
madt_handler, &cpuid);
acpi_unmap_table(madt);
#if MAXMEMDOM > 1
acpi_pxm_set_cpu_locality();
#endif
}
#endif
#ifdef FDT
static boolean_t
start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
{
uint64_t target_cpu;
int domain;
int cpuid;
target_cpu = reg[0];
if (addr_size == 2) {
target_cpu <<= 32;
target_cpu |= reg[1];
}
if (is_boot_cpu(target_cpu))
cpuid = 0;
else
cpuid = fdt_cpuid;
if (!start_cpu(cpuid, target_cpu, 0))
return (FALSE);
/*
* Don't increment for the boot CPU, its CPU ID is reserved.
*/
if (!is_boot_cpu(target_cpu))
fdt_cpuid++;
/* Try to read the numa node of this cpu */
if (vm_ndomains == 1 ||
OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
domain = 0;
cpuid_to_pcpu[cpuid]->pc_domain = domain;
if (domain < MAXMEMDOM)
CPU_SET(cpuid, &cpuset_domain[domain]);
return (TRUE);
}
static void
cpu_init_fdt(void)
{
phandle_t node;
int i;
node = OF_peer(0);
for (i = 0; fdt_quirks[i].compat != NULL; i++) {
if (ofw_bus_node_is_compatible(node,
fdt_quirks[i].compat) != 0) {
mp_quirks = fdt_quirks[i].quirks;
}
}
fdt_cpuid = 1;
ofw_cpu_early_foreach(start_cpu_fdt, true);
}
#endif
/* Initialize and fire up non-boot processors */
void
cpu_mp_start(void)
{
+ uint64_t mpidr;
+
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
/* CPU 0 is always boot CPU. */
CPU_SET(0, &all_cpus);
- cpuid_to_pcpu[0]->pc_mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
+ mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
+ cpuid_to_pcpu[0]->pc_mpidr_low = mpidr;
+ cpuid_to_pcpu[0]->pc_mpidr_high = mpidr >> 32;
switch(arm64_bus_method) {
#ifdef DEV_ACPI
case ARM64_BUS_ACPI:
mp_quirks = MP_QUIRK_CPULIST;
cpu_init_acpi();
break;
#endif
#ifdef FDT
case ARM64_BUS_FDT:
cpu_init_fdt();
break;
#endif
default:
break;
}
}
/* Introduce rest of cores to the world */
void
cpu_mp_announce(void)
{
}
#ifdef DEV_ACPI
static void
cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
{
ACPI_MADT_GENERIC_INTERRUPT *intr;
u_int *cores = arg;
switch(entry->Type) {
case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
(*cores)++;
break;
default:
break;
}
}
static u_int
cpu_count_acpi(void)
{
ACPI_TABLE_MADT *madt;
vm_paddr_t physaddr;
u_int cores;
physaddr = acpi_find_table(ACPI_SIG_MADT);
if (physaddr == 0)
return (0);
madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
if (madt == NULL) {
printf("Unable to map the MADT, not starting APs\n");
return (0);
}
cores = 0;
acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
cpu_count_acpi_handler, &cores);
acpi_unmap_table(madt);
return (cores);
}
#endif
void
cpu_mp_setmaxid(void)
{
int cores;
mp_ncpus = 1;
mp_maxid = 0;
switch(arm64_bus_method) {
#ifdef DEV_ACPI
case ARM64_BUS_ACPI:
cores = cpu_count_acpi();
if (cores > 0) {
cores = MIN(cores, MAXCPU);
if (bootverbose)
printf("Found %d CPUs in the ACPI tables\n",
cores);
mp_ncpus = cores;
mp_maxid = cores - 1;
}
break;
#endif
#ifdef FDT
case ARM64_BUS_FDT:
cores = ofw_cpu_early_foreach(NULL, false);
if (cores > 0) {
cores = MIN(cores, MAXCPU);
if (bootverbose)
printf("Found %d CPUs in the device tree\n",
cores);
mp_ncpus = cores;
mp_maxid = cores - 1;
}
break;
#endif
default:
if (bootverbose)
printf("No CPU data, limiting to 1 core\n");
break;
}
if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
if (cores > 0 && cores < mp_ncpus) {
mp_ncpus = cores;
mp_maxid = cores - 1;
}
}
}
/*
* Lookup IPI source.
*/
static struct intr_ipi *
intr_ipi_lookup(u_int ipi)
{
if (ipi >= INTR_IPI_COUNT)
panic("%s: no such IPI %u", __func__, ipi);
return (&ipi_sources[ipi]);
}
/*
* interrupt controller dispatch function for IPIs. It should
* be called straight from the interrupt controller, when associated
* interrupt source is learned. Or from anybody who has an interrupt
* source mapped.
*/
void
intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
{
void *arg;
struct intr_ipi *ii;
ii = intr_ipi_lookup(ipi);
if (ii->ii_count == NULL)
panic("%s: not setup IPI %u", __func__, ipi);
intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
/*
* Supply ipi filter with trapframe argument
* if none is registered.
*/
arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
ii->ii_handler(arg);
}
#ifdef notyet
/*
* Map IPI into interrupt controller.
*
* Not SMP coherent.
*/
static int
ipi_map(struct intr_irqsrc *isrc, u_int ipi)
{
boolean_t is_percpu;
int error;
if (ipi >= INTR_IPI_COUNT)
panic("%s: no such IPI %u", __func__, ipi);
KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
isrc->isrc_type = INTR_ISRCT_NAMESPACE;
isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
isrc->isrc_nspc_num = ipi_next_num;
error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
if (error == 0) {
isrc->isrc_dev = intr_irq_root_dev;
ipi_next_num++;
}
return (error);
}
/*
* Setup IPI handler to interrupt source.
*
* Note that there could be more ways how to send and receive IPIs
* on a platform like fast interrupts for example. In that case,
* one can call this function with ASIF_NOALLOC flag set and then
* call intr_ipi_dispatch() when appropriate.
*
* Not SMP coherent.
*/
int
intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
void *arg, u_int flags)
{
struct intr_irqsrc *isrc;
int error;
if (filter == NULL)
return(EINVAL);
isrc = intr_ipi_lookup(ipi);
if (isrc->isrc_ipifilter != NULL)
return (EEXIST);
if ((flags & AISHF_NOALLOC) == 0) {
error = ipi_map(isrc, ipi);
if (error != 0)
return (error);
}
isrc->isrc_ipifilter = filter;
isrc->isrc_arg = arg;
isrc->isrc_handlers = 1;
isrc->isrc_count = intr_ipi_setup_counters(name);
isrc->isrc_index = 0; /* it should not be used in IPI case */
if (isrc->isrc_dev != NULL) {
PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
}
return (0);
}
#endif
/* Sending IPI */
void
ipi_all_but_self(u_int ipi)
{
cpuset_t cpus;
cpus = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &cpus);
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
intr_ipi_send(cpus, ipi);
}
void
ipi_cpu(int cpu, u_int ipi)
{
cpuset_t cpus;
CPU_ZERO(&cpus);
CPU_SET(cpu, &cpus);
CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
intr_ipi_send(cpus, ipi);
}
void
ipi_selected(cpuset_t cpus, u_int ipi)
{
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
intr_ipi_send(cpus, ipi);
}
diff --git a/sys/arm64/include/pcpu.h b/sys/arm64/include/pcpu.h
index d83c9a656634..8ec4a33b0d67 100644
--- a/sys/arm64/include/pcpu.h
+++ b/sys/arm64/include/pcpu.h
@@ -1,89 +1,94 @@
/*-
* Copyright (c) 1999 Luoqi Chen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27
* $FreeBSD$
*/
#ifndef _MACHINE_PCPU_H_
#define _MACHINE_PCPU_H_
#include
#include
#define ALT_STACK_SIZE 128
typedef int (*pcpu_bp_harden)(void);
typedef int (*pcpu_ssbd)(int);
struct debug_monitor_state;
#define PCPU_MD_FIELDS \
u_int pc_acpi_id; /* ACPI CPU id */ \
u_int pc_midr; /* stored MIDR value */ \
uint64_t pc_clock; \
pcpu_bp_harden pc_bp_harden; \
pcpu_ssbd pc_ssbd; \
struct pmap *pc_curpmap; \
struct pmap *pc_curvmpmap; \
u_int pc_bcast_tlbi_workaround; \
- u_int pc_mpidr; /* stored MPIDR value */ \
- char __pad[201]
+ /* Store as two u_int values to preserve KBI */ \
+ u_int pc_mpidr_low; /* lower MPIDR 32 bits */ \
+ u_int pc_mpidr_high; /* upper MPIDR 32 bits */ \
+ char __pad[197]
#ifdef _KERNEL
struct pcb;
struct pcpu;
register struct pcpu *pcpup __asm ("x18");
static inline struct pcpu *
get_pcpu(void)
{
struct pcpu *pcpu;
__asm __volatile("mov %0, x18" : "=&r"(pcpu));
return (pcpu);
}
static inline struct thread *
get_curthread(void)
{
struct thread *td;
__asm __volatile("ldr %0, [x18]" : "=&r"(td));
return (td);
}
#define curthread get_curthread()
#define PCPU_GET(member) (pcpup->pc_ ## member)
#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
+#define PCPU_GET_MPIDR(pc) \
+ ((((uint64_t)((pc)->pc_mpidr_high)) << 32) | ((pc)->pc_mpidr_low))
+
#endif /* _KERNEL */
#endif /* !_MACHINE_PCPU_H_ */