diff --git a/sys/amd64/include/kdb.h b/sys/amd64/include/kdb.h index e2a16ccca0eb..6612bfce6361 100644 --- a/sys/amd64/include/kdb.h +++ b/sys/amd64/include/kdb.h @@ -1,64 +1,62 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #include -#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] - int kdb_cpu_set_watchpoint(vm_offset_t addr, vm_size_t size, int access); int kdb_cpu_clr_watchpoint(vm_offset_t addr, vm_size_t size); static __inline void kdb_cpu_clear_singlestep(void) { kdb_frame->tf_rflags &= ~PSL_T; } static __inline void kdb_cpu_set_singlestep(void) { kdb_frame->tf_rflags |= PSL_T; } static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { } static __inline void kdb_cpu_trap(int type, int code) { } #endif /* _MACHINE_KDB_H_ */ diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c index 36a38291dd3a..b823270eb1c9 100644 --- a/sys/arm/arm/mp_machdep.c +++ b/sys/arm/arm/mp_machdep.c @@ -1,373 +1,372 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef CPU_MV_PJ4B #include #endif /* used to hold the AP's until we are ready to release them */ struct mtx ap_boot_mtx; -struct pcb stoppcbs[MAXCPU]; /* # of Applications processors */ volatile int mp_naps; /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; void set_stackptrs(int cpu); /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; /* Determine if we running MP machine */ int cpu_mp_probe(void) { KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset")); CPU_SETOF(0, &all_cpus); return (mp_ncpus > 1); } /* Start Application Processor via platform specific function */ static int check_ap(void) { uint32_t ms; for (ms = 0; ms < 2000; ++ms) { if ((mp_naps + 1) == mp_ncpus) return (0); /* success */ else DELAY(1000); } return (-2); } /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { int error, i; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* Reserve memory for application processors */ for(i = 0; i < (mp_ncpus - 1); i++) dpcpu[i] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dcache_wbinv_poc_all(); /* Initialize boot code and start up processors */ platform_mp_start_ap(); /* Check if ap's started properly */ error = check_ap(); if (error) printf("WARNING: Some AP's failed to start\n"); else for (i = 1; i < mp_ncpus; i++) CPU_SET(i, &all_cpus); } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; pmap_set_tex(); cpuinfo_reinit_mmu(pmap_kern_ttb); cpu_setup(); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); enable_interrupts(PSR_A); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); pc->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF; dpcpu_init(dpcpu[cpu - 1], cpu); #if defined(DDB) dbg_monitor_init_secondary(); #endif /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!atomic_load_acq_int(&aps_ready)) { #if __ARM_ARCH >= 7 __asm __volatile("wfe"); #endif } /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); schedinit_ap(); #ifdef VFP vfp_init(); #endif /* Configure the interrupt controller */ intr_pic_init_secondary(); /* Apply possible BP hardening */ cpuinfo_init_bp_hardening(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); /* Enter the scheduler */ sched_ap_entry(); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_stop(void *dummy __unused) { u_int cpu; /* * IPI_STOP_HARD is mapped to IPI_STOP. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); } static void ipi_preempt(void *arg) { CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); } static void ipi_hardclock(void *arg) { CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); } static void release_aps(void *dummy __unused) { uint32_t loop_counter; if (mp_ncpus == 1) return; intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); atomic_store_rel_int(&aps_ready, 1); /* Wake the other threads up */ dsb(); sev(); printf("Release APs\n"); for (loop_counter = 0; loop_counter < 2000; loop_counter++) { if (smp_started) return; DELAY(1000); } printf("AP's not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); struct cpu_group * cpu_topo(void) { return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); } void cpu_mp_setmaxid(void) { platform_mp_setmaxid(); } /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(other_cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); intr_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } diff --git a/sys/arm/include/kdb.h b/sys/arm/include/kdb.h index fde37d148fb6..3fed40807761 100644 --- a/sys/arm/include/kdb.h +++ b/sys/arm/include/kdb.h @@ -1,59 +1,57 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #include #include #include -#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] - extern void kdb_cpu_clear_singlestep(void); extern void kdb_cpu_set_singlestep(void); boolean_t kdb_cpu_pc_is_singlestep(db_addr_t); int kdb_cpu_set_watchpoint(vm_offset_t addr, size_t size, int access); int kdb_cpu_clr_watchpoint(vm_offset_t addr, size_t size); static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { icache_sync((vm_offset_t)addr, size); } static __inline void kdb_cpu_trap(int type, int code) { } #endif /* _MACHINE_KDB_H_ */ diff --git a/sys/arm/include/smp.h b/sys/arm/include/smp.h index 4f1545872a4e..fbb769151de6 100644 --- a/sys/arm/include/smp.h +++ b/sys/arm/include/smp.h @@ -1,35 +1,32 @@ /* $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #include #include enum { IPI_AST, IPI_PREEMPT, IPI_RENDEZVOUS, IPI_STOP, IPI_STOP_HARD = IPI_STOP, /* These are synonyms on arm. */ IPI_HARDCLOCK, IPI_TLB, /* Not used now, but keep it reserved. */ IPI_CACHE, /* Not used now, but keep it reserved. */ INTR_IPI_COUNT }; void init_secondary(int cpu); void mpentry(void); void ipi_all_but_self(u_int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpuset_t cpus, u_int ipi); /* Platform interface */ void platform_mp_setmaxid(void); void platform_mp_start_ap(void); -/* global data in mp_machdep.c */ -extern struct pcb stoppcbs[]; - #endif /* !_MACHINE_SMP_H_ */ diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c index 9a4997358f8a..9dd1182e5721 100644 --- a/sys/arm64/arm64/mp_machdep.c +++ b/sys/arm64/arm64/mp_machdep.c @@ -1,1034 +1,1032 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include #include "pic_if.h" #define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE) #define MP_QUIRK_CPULIST 0x01 /* The list of cpus may be wrong, */ /* don't panic if one fails to start */ static uint32_t mp_quirks; #ifdef FDT static struct { const char *compat; uint32_t quirks; } fdt_quirks[] = { { "arm,foundation-aarch64", MP_QUIRK_CPULIST }, { "arm,fvp-base", MP_QUIRK_CPULIST }, /* This is incorrect in some DTS files */ { "arm,vfp-base", MP_QUIRK_CPULIST }, { NULL, 0 }, }; #endif typedef void intr_ipi_send_t(void *, cpuset_t, u_int); typedef void intr_ipi_handler_t(void *); #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) struct intr_ipi { intr_ipi_handler_t * ii_handler; void * ii_handler_arg; intr_ipi_send_t * ii_send; void * ii_send_arg; char ii_name[INTR_IPI_NAMELEN]; u_long * ii_count; }; static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; static struct intr_ipi *intr_ipi_lookup(u_int); static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); static void ipi_ast(void *); static void ipi_hardclock(void *); static void ipi_preempt(void *); static void ipi_rendezvous(void *); static void ipi_stop(void *); -struct pcb stoppcbs[MAXCPU]; - #ifdef FDT static u_int fdt_cpuid; #endif void mpentry(unsigned long cpuid); void init_secondary(uint64_t); /* Synchronize AP startup. */ static struct mtx ap_boot_mtx; /* Stacks for AP initialization, discarded once idle threads are started. */ void *bootstack; static void *bootstacks[MAXCPU]; /* Count of started APs, used to synchronize access to bootstack. */ static volatile int aps_started; /* Set to 1 once we're ready to let the APs out of the pen. */ static volatile int aps_ready; /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; static bool is_boot_cpu(uint64_t target_cpu) { return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK)); } static void release_aps(void *dummy __unused) { int i, started; /* Only release CPUs if they exist */ if (mp_ncpus == 1) return; intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL); intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); atomic_store_rel_int(&aps_ready, 1); /* Wake up the other CPUs */ __asm __volatile( "dsb ishst \n" "sev \n" ::: "memory"); printf("Release APs..."); started = 0; for (i = 0; i < 2000; i++) { if (atomic_load_acq_int(&smp_started) != 0) { printf("done\n"); return; } /* * Don't time out while we are making progress. Some large * systems can take a while to start all CPUs. */ if (smp_cpus > started) { i = 0; started = smp_cpus; } DELAY(1000); } printf("APs not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); void init_secondary(uint64_t cpu) { struct pcpu *pcpup; pmap_t pmap0; uint64_t mpidr; ptrauth_mp_start(cpu); /* * Verify that the value passed in 'cpu' argument (aka context_id) is * valid. Some older U-Boot based PSCI implementations are buggy, * they can pass random value in it. */ mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK; if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL || PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) { for (cpu = 0; cpu < mp_maxid; cpu++) if (cpuid_to_pcpu[cpu] != NULL && PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr) break; if ( cpu >= MAXCPU) panic("MPIDR for this CPU is not in pcpu table"); } pcpup = cpuid_to_pcpu[cpu]; /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); /* * Identify current CPU. This is necessary to setup * affinity registers and to provide support for * runtime chip identification. * * We need this before signalling the CPU is ready to * let the boot CPU use the results. */ pcpup->pc_midr = get_midr(); identify_cpu(cpu); /* Ensure the stores in identify_cpu have completed */ atomic_thread_fence_acq_rel(); /* Signal the BSP and spin until it has released all APs. */ atomic_add_int(&aps_started, 1); while (!atomic_load_int(&aps_ready)) __asm __volatile("wfe"); /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; schedinit_ap(); /* Initialize curpmap to match TTBR0's current setting. */ pmap0 = vmspace_pmap(&vmspace0); KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1), ("pmap0 doesn't match cpu %ld's ttbr0", cpu)); pcpup->pc_curpmap = pmap0; install_cpu_errata(); intr_pic_init_secondary(); /* Start per-CPU event timers. */ cpu_initclocks_ap(); #ifdef VFP vfp_init(); #endif dbg_init(); pan_enable(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); kcsan_cpu_init(cpu); /* Enter the scheduler */ sched_ap_entry(); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ } static void smp_after_idle_runnable(void *arg __unused) { int cpu; if (mp_ncpus == 1) return; KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); /* * Wait for all APs to handle an interrupt. After that, we know that * the APs have entered the scheduler at least once, so the boot stacks * are safe to free. */ smp_rendezvous(smp_no_rendezvous_barrier, NULL, smp_no_rendezvous_barrier, NULL); for (cpu = 1; cpu < mp_ncpus; cpu++) { if (bootstacks[cpu] != NULL) kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE); } } SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, smp_after_idle_runnable, NULL); /* * Send IPI thru interrupt controller. */ static void pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) { KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); /* * Ensure that this CPU's stores will be visible to IPI * recipients before starting to send the interrupts. */ dsb(ishst); PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); } /* * Setup IPI handler on interrupt controller. * * Not SMP coherent. */ static void intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg) { struct intr_irqsrc *isrc; struct intr_ipi *ii; int error; KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); if (error != 0) return; isrc->isrc_handlers++; ii = intr_ipi_lookup(ipi); KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); ii->ii_handler = hand; ii->ii_handler_arg = arg; ii->ii_send = pic_ipi_send; ii->ii_send_arg = isrc; strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); ii->ii_count = intr_ipi_setup_counters(name); PIC_ENABLE_INTR(intr_irq_root_dev, isrc); } static void intr_ipi_send(cpuset_t cpus, u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); ii->ii_send(ii->ii_send_arg, cpus, ipi); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_hardclock(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); } static void ipi_preempt(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); } static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_stop(void *dummy __unused) { u_int cpu; CTR0(KTR_SMP, "IPI_STOP"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); #ifdef DDB dbg_register_sync(NULL); #endif CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); } struct cpu_group * cpu_topo(void) { struct cpu_group *dom, *root; int i; root = smp_topo_alloc(1); dom = smp_topo_alloc(vm_ndomains); root->cg_parent = NULL; root->cg_child = dom; CPU_COPY(&all_cpus, &root->cg_mask); root->cg_count = mp_ncpus; root->cg_children = vm_ndomains; root->cg_level = CG_SHARE_NONE; root->cg_flags = 0; /* * Redundant layers will be collapsed by the caller so we don't need a * special case for a single domain. */ for (i = 0; i < vm_ndomains; i++, dom++) { dom->cg_parent = root; dom->cg_child = NULL; CPU_COPY(&cpuset_domain[i], &dom->cg_mask); dom->cg_count = CPU_COUNT(&dom->cg_mask); dom->cg_children = 0; dom->cg_level = CG_SHARE_L3; dom->cg_flags = 0; } return (root); } /* Determine if we running MP machine */ int cpu_mp_probe(void) { /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */ return (1); } static int enable_cpu_psci(uint64_t target_cpu, vm_paddr_t entry, u_int cpuid) { int err; err = psci_cpu_on(target_cpu, entry, cpuid); if (err != PSCI_RETVAL_SUCCESS) { /* * Panic here if INVARIANTS are enabled and PSCI failed to * start the requested CPU. psci_cpu_on() returns PSCI_MISSING * to indicate we are unable to use it to start the given CPU. */ KASSERT(err == PSCI_MISSING || (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST, ("Failed to start CPU %u (%lx), error %d\n", cpuid, target_cpu, err)); return (EINVAL); } return (0); } static int enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr) { vm_paddr_t *release_addr; release_addr = pmap_mapdev(release_paddr, sizeof(*release_addr)); if (release_addr == NULL) return (ENOMEM); *release_addr = entry; pmap_unmapdev(release_addr, sizeof(*release_addr)); __asm __volatile( "dsb sy \n" "sev \n" ::: "memory"); return (0); } /* * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU, * do nothing. Returns true if the CPU is present and running. */ static bool start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr) { struct pcpu *pcpup; vm_size_t size; vm_paddr_t pa; int err, naps; /* Check we are able to start this cpu */ if (cpuid > mp_maxid) return (false); /* Skip boot CPU */ if (is_boot_cpu(target_cpu)) return (true); KASSERT(cpuid < MAXCPU, ("Too many CPUs")); size = round_page(sizeof(*pcpup) + DPCPU_SIZE); pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size, M_WAITOK | M_ZERO); pmap_disable_promotion((vm_offset_t)pcpup, size); pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK; dpcpu[cpuid - 1] = (void *)(pcpup + 1); dpcpu_init(dpcpu[cpuid - 1], cpuid); bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain), MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO); naps = atomic_load_int(&aps_started); bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE; printf("Starting CPU %u (%lx)\n", cpuid, target_cpu); pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry); /* * A limited set of hardware we support can only do spintables and * remain useful, due to lack of EL3. Thus, we'll usually fall into the * PSCI branch here. */ MPASS(release_addr == 0 || !psci_present); if (release_addr != 0) err = enable_cpu_spin(target_cpu, pa, release_addr); else err = enable_cpu_psci(target_cpu, pa, cpuid); if (err != 0) { pcpu_destroy(pcpup); dpcpu[cpuid - 1] = NULL; kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE); kmem_free(pcpup, size); bootstacks[cpuid] = NULL; mp_ncpus--; return (false); } /* Wait for the AP to switch to its boot stack. */ while (atomic_load_int(&aps_started) < naps + 1) cpu_spinwait(); CPU_SET(cpuid, &all_cpus); return (true); } #ifdef DEV_ACPI static void madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_MADT_GENERIC_INTERRUPT *intr; u_int *cpuid; u_int id; int domain; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_INTERRUPT: intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry; cpuid = arg; if (is_boot_cpu(intr->ArmMpidr)) id = 0; else id = *cpuid; domain = 0; #ifdef NUMA if (vm_ndomains > 1) domain = acpi_pxm_get_cpu_locality(intr->Uid); #endif if (start_cpu(id, intr->ArmMpidr, domain, 0)) { MPASS(cpuid_to_pcpu[id] != NULL); cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid; /* * Don't increment for the boot CPU, its CPU ID is * reserved. */ if (!is_boot_cpu(intr->ArmMpidr)) (*cpuid)++; } break; default: break; } } static void cpu_init_acpi(void) { ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; u_int cpuid; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return; madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { printf("Unable to map the MADT, not starting APs\n"); return; } /* Boot CPU is always 0 */ cpuid = 1; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, madt_handler, &cpuid); acpi_unmap_table(madt); #if MAXMEMDOM > 1 acpi_pxm_set_cpu_locality(); #endif } #endif #ifdef FDT /* * Failure is indicated by failing to populate *release_addr. */ static void populate_release_addr(phandle_t node, vm_paddr_t *release_addr) { pcell_t buf[2]; if (OF_getencprop(node, "cpu-release-addr", buf, sizeof(buf)) != sizeof(buf)) return; *release_addr = (((uintptr_t)buf[0] << 32) | buf[1]); } static bool start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t target_cpu; vm_paddr_t release_addr; char *enable_method; int domain; int cpuid; target_cpu = reg[0]; if (addr_size == 2) { target_cpu <<= 32; target_cpu |= reg[1]; } if (is_boot_cpu(target_cpu)) cpuid = 0; else cpuid = fdt_cpuid; /* * If PSCI is present, we'll always use that -- the cpu_on method is * mandated in both v0.1 and v0.2. We'll check the enable-method if * we don't have PSCI and use spin table if it's provided. */ release_addr = 0; if (!psci_present && cpuid != 0) { if (OF_getprop_alloc(node, "enable-method", (void **)&enable_method) <= 0) return (false); if (strcmp(enable_method, "spin-table") != 0) { OF_prop_free(enable_method); return (false); } OF_prop_free(enable_method); populate_release_addr(node, &release_addr); if (release_addr == 0) { printf("Failed to fetch release address for CPU %u", cpuid); return (false); } } if (!start_cpu(cpuid, target_cpu, 0, release_addr)) return (false); /* * Don't increment for the boot CPU, its CPU ID is reserved. */ if (!is_boot_cpu(target_cpu)) fdt_cpuid++; /* Try to read the numa node of this cpu */ if (vm_ndomains == 1 || OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0) domain = 0; cpuid_to_pcpu[cpuid]->pc_domain = domain; if (domain < MAXMEMDOM) CPU_SET(cpuid, &cpuset_domain[domain]); return (true); } static void cpu_init_fdt(void) { phandle_t node; int i; node = OF_peer(0); for (i = 0; fdt_quirks[i].compat != NULL; i++) { if (ofw_bus_node_is_compatible(node, fdt_quirks[i].compat) != 0) { mp_quirks = fdt_quirks[i].quirks; } } fdt_cpuid = 1; ofw_cpu_early_foreach(start_cpu_fdt, true); } #endif /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { uint64_t mpidr; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* CPU 0 is always boot CPU. */ CPU_SET(0, &all_cpus); mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK; cpuid_to_pcpu[0]->pc_mpidr = mpidr; switch(arm64_bus_method) { #ifdef DEV_ACPI case ARM64_BUS_ACPI: mp_quirks = MP_QUIRK_CPULIST; cpu_init_acpi(); break; #endif #ifdef FDT case ARM64_BUS_FDT: cpu_init_fdt(); break; #endif default: break; } } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } #ifdef DEV_ACPI static void cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { u_int *cores = arg; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_INTERRUPT: (*cores)++; break; default: break; } } static u_int cpu_count_acpi(void) { ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; u_int cores; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return (0); madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { printf("Unable to map the MADT, not starting APs\n"); return (0); } cores = 0; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, cpu_count_acpi_handler, &cores); acpi_unmap_table(madt); return (cores); } #endif void cpu_mp_setmaxid(void) { int cores; mp_ncpus = 1; mp_maxid = 0; switch(arm64_bus_method) { #ifdef DEV_ACPI case ARM64_BUS_ACPI: cores = cpu_count_acpi(); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the ACPI tables\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; } break; #endif #ifdef FDT case ARM64_BUS_FDT: cores = ofw_cpu_early_foreach(NULL, false); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the device tree\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; } break; #endif default: if (bootverbose) printf("No CPU data, limiting to 1 core\n"); break; } if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) { if (cores > 0 && cores < mp_ncpus) { mp_ncpus = cores; mp_maxid = cores - 1; } } } /* * Lookup IPI source. */ static struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } /* * interrupt controller dispatch function for IPIs. It should * be called straight from the interrupt controller, when associated * interrupt source is learned. Or from anybody who has an interrupt * source mapped. */ void intr_ipi_dispatch(u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); ii->ii_handler(ii->ii_handler_arg); } #ifdef notyet /* * Map IPI into interrupt controller. * * Not SMP coherent. */ static int ipi_map(struct intr_irqsrc *isrc, u_int ipi) { boolean_t is_percpu; int error; if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); isrc->isrc_type = INTR_ISRCT_NAMESPACE; isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; isrc->isrc_nspc_num = ipi_next_num; error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu); if (error == 0) { isrc->isrc_dev = intr_irq_root_dev; ipi_next_num++; } return (error); } /* * Setup IPI handler to interrupt source. * * Note that there could be more ways how to send and receive IPIs * on a platform like fast interrupts for example. In that case, * one can call this function with ASIF_NOALLOC flag set and then * call intr_ipi_dispatch() when appropriate. * * Not SMP coherent. */ int intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, void *arg, u_int flags) { struct intr_irqsrc *isrc; int error; if (filter == NULL) return(EINVAL); isrc = intr_ipi_lookup(ipi); if (isrc->isrc_ipifilter != NULL) return (EEXIST); if ((flags & AISHF_NOALLOC) == 0) { error = ipi_map(isrc, ipi); if (error != 0) return (error); } isrc->isrc_ipifilter = filter; isrc->isrc_arg = arg; isrc->isrc_handlers = 1; isrc->isrc_count = intr_ipi_setup_counters(name); isrc->isrc_index = 0; /* it should not be used in IPI case */ if (isrc->isrc_dev != NULL) { PIC_ENABLE_INTR(isrc->isrc_dev, isrc); PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); } return (0); } #endif /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t cpus; cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); intr_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } diff --git a/sys/arm64/include/smp.h b/sys/arm64/include/smp.h index 538981a954f0..1404a72f13cc 100644 --- a/sys/arm64/include/smp.h +++ b/sys/arm64/include/smp.h @@ -1,55 +1,52 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #include enum { IPI_AST, IPI_PREEMPT, IPI_RENDEZVOUS, IPI_STOP, IPI_STOP_HARD, IPI_HARDCLOCK, INTR_IPI_COUNT, }; void ipi_all_but_self(u_int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpuset_t cpus, u_int ipi); -/* global data in mp_machdep.c */ -extern struct pcb stoppcbs[]; - #endif /* !_MACHINE_SMP_H_ */ diff --git a/sys/i386/include/kdb.h b/sys/i386/include/kdb.h index 53fbe6cbe908..47a8ff86fd44 100644 --- a/sys/i386/include/kdb.h +++ b/sys/i386/include/kdb.h @@ -1,64 +1,62 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #include -#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] - int kdb_cpu_set_watchpoint(vm_offset_t addr, vm_size_t size, int access); int kdb_cpu_clr_watchpoint(vm_offset_t addr, vm_size_t size); static __inline void kdb_cpu_clear_singlestep(void) { kdb_frame->tf_eflags &= ~PSL_T; } static __inline void kdb_cpu_set_singlestep(void) { kdb_frame->tf_eflags |= PSL_T; } static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { } static __inline void kdb_cpu_trap(int type, int code) { } #endif /* _MACHINE_KDB_H_ */ diff --git a/sys/kern/subr_kdb.c b/sys/kern/subr_kdb.c index 79b61f67b1e0..f8b37c2319b0 100644 --- a/sys/kern/subr_kdb.c +++ b/sys/kern/subr_kdb.c @@ -1,805 +1,805 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 The FreeBSD Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kdb.h" #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include u_char __read_frequently kdb_active = 0; static void *kdb_jmpbufp = NULL; struct kdb_dbbe *kdb_dbbe = NULL; static struct pcb kdb_pcb; struct pcb *kdb_thrctx = NULL; struct thread *kdb_thread = NULL; struct trapframe *kdb_frame = NULL; #ifdef BREAK_TO_DEBUGGER #define KDB_BREAK_TO_DEBUGGER 1 #else #define KDB_BREAK_TO_DEBUGGER 0 #endif #ifdef ALT_BREAK_TO_DEBUGGER #define KDB_ALT_BREAK_TO_DEBUGGER 1 #else #define KDB_ALT_BREAK_TO_DEBUGGER 0 #endif static int kdb_break_to_debugger = KDB_BREAK_TO_DEBUGGER; static int kdb_alt_break_to_debugger = KDB_ALT_BREAK_TO_DEBUGGER; static int kdb_enter_securelevel = 0; KDB_BACKEND(null, NULL, NULL, NULL, NULL); static int kdb_sysctl_available(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_current(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_enter(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_panic(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_panic_str(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_trap(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS); static int kdb_sysctl_stack_overflow(SYSCTL_HANDLER_ARGS); static SYSCTL_NODE(_debug, OID_AUTO, kdb, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "KDB nodes"); SYSCTL_PROC(_debug_kdb, OID_AUTO, available, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_available, "A", "list of available KDB backends"); SYSCTL_PROC(_debug_kdb, OID_AUTO, current, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_current, "A", "currently selected KDB backend"); SYSCTL_PROC(_debug_kdb, OID_AUTO, enter, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_enter, "I", "set to enter the debugger"); SYSCTL_PROC(_debug_kdb, OID_AUTO, panic, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_panic, "I", "set to panic the kernel"); SYSCTL_PROC(_debug_kdb, OID_AUTO, panic_str, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_panic_str, "A", "trigger a kernel panic, using the provided string as the panic message"); SYSCTL_PROC(_debug_kdb, OID_AUTO, trap, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_trap, "I", "set to cause a page fault via data access"); SYSCTL_PROC(_debug_kdb, OID_AUTO, trap_code, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_trap_code, "I", "set to cause a page fault via code access"); SYSCTL_PROC(_debug_kdb, OID_AUTO, stack_overflow, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE | CTLFLAG_MPSAFE, NULL, 0, kdb_sysctl_stack_overflow, "I", "set to cause a stack overflow"); SYSCTL_INT(_debug_kdb, OID_AUTO, break_to_debugger, CTLFLAG_RWTUN, &kdb_break_to_debugger, 0, "Enable break to debugger"); SYSCTL_INT(_debug_kdb, OID_AUTO, alt_break_to_debugger, CTLFLAG_RWTUN, &kdb_alt_break_to_debugger, 0, "Enable alternative break to debugger"); SYSCTL_INT(_debug_kdb, OID_AUTO, enter_securelevel, CTLFLAG_RWTUN | CTLFLAG_SECURE, &kdb_enter_securelevel, 0, "Maximum securelevel to enter a KDB backend"); /* * Flag to indicate to debuggers why the debugger was entered. */ const char * volatile kdb_why = KDB_WHY_UNSET; static int kdb_sysctl_available(SYSCTL_HANDLER_ARGS) { struct kdb_dbbe **iter; struct sbuf sbuf; int error; sbuf_new_for_sysctl(&sbuf, NULL, 64, req); SET_FOREACH(iter, kdb_dbbe_set) { if ((*iter)->dbbe_active == 0) sbuf_printf(&sbuf, "%s ", (*iter)->dbbe_name); } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } static int kdb_sysctl_current(SYSCTL_HANDLER_ARGS) { char buf[16]; int error; if (kdb_dbbe != NULL) strlcpy(buf, kdb_dbbe->dbbe_name, sizeof(buf)); else *buf = '\0'; error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (kdb_active) return (EBUSY); return (kdb_dbbe_select(buf)); } static int kdb_sysctl_enter(SYSCTL_HANDLER_ARGS) { int error, i; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); if (kdb_active) return (EBUSY); kdb_enter(KDB_WHY_SYSCTL, "sysctl debug.kdb.enter"); return (0); } static int kdb_sysctl_panic(SYSCTL_HANDLER_ARGS) { int error, i; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); panic("kdb_sysctl_panic"); return (0); } static int kdb_sysctl_panic_str(SYSCTL_HANDLER_ARGS) { int error; static char buf[256]; /* static buffer to limit mallocs when panicing */ *buf = '\0'; error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); panic("kdb_sysctl_panic: %s", buf); return (0); } static int kdb_sysctl_trap(SYSCTL_HANDLER_ARGS) { int error, i; int *addr = (int *)0x10; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); return (*addr); } static int kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS) { int error, i; void (*fp)(u_int, u_int, u_int) = (void *)0xdeadc0de; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); (*fp)(0x11111111, 0x22222222, 0x33333333); return (0); } static void kdb_stack_overflow(volatile int *x) __noinline; static void kdb_stack_overflow(volatile int *x) { if (*x > 10000000) return; kdb_stack_overflow(x); *x += PCPU_GET(cpuid) / 1000000; } static int kdb_sysctl_stack_overflow(SYSCTL_HANDLER_ARGS) { int error, i; volatile int x; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); x = 0; kdb_stack_overflow(&x); return (0); } void kdb_panic(const char *msg) { kdb_why = KDB_WHY_PANIC; printf("KDB: panic\n"); panic("%s", msg); } void kdb_reboot(void) { kdb_why = KDB_WHY_REBOOT; printf("KDB: reboot requested\n"); shutdown_nice(0); } /* * Solaris implements a new BREAK which is initiated by a character sequence * CR ~ ^b which is similar to a familiar pattern used on Sun servers by the * Remote Console. * * Note that this function may be called from almost anywhere, with interrupts * disabled and with unknown locks held, so it must not access data other than * its arguments. Its up to the caller to ensure that the state variable is * consistent. */ #define KEY_CR 13 /* CR '\r' */ #define KEY_TILDE 126 /* ~ */ #define KEY_CRTLB 2 /* ^B */ #define KEY_CRTLP 16 /* ^P */ #define KEY_CRTLR 18 /* ^R */ /* States of th KDB "alternate break sequence" detecting state machine. */ enum { KDB_ALT_BREAK_SEEN_NONE, KDB_ALT_BREAK_SEEN_CR, KDB_ALT_BREAK_SEEN_CR_TILDE, }; int kdb_break(void) { if (!kdb_break_to_debugger) return (0); kdb_enter(KDB_WHY_BREAK, "Break to debugger"); return (KDB_REQ_DEBUGGER); } static int kdb_alt_break_state(int key, int *state) { int brk; /* All states transition to KDB_ALT_BREAK_SEEN_CR on a CR. */ if (key == KEY_CR) { *state = KDB_ALT_BREAK_SEEN_CR; return (0); } brk = 0; switch (*state) { case KDB_ALT_BREAK_SEEN_CR: *state = KDB_ALT_BREAK_SEEN_NONE; if (key == KEY_TILDE) *state = KDB_ALT_BREAK_SEEN_CR_TILDE; break; case KDB_ALT_BREAK_SEEN_CR_TILDE: *state = KDB_ALT_BREAK_SEEN_NONE; if (key == KEY_CRTLB) brk = KDB_REQ_DEBUGGER; else if (key == KEY_CRTLP) brk = KDB_REQ_PANIC; else if (key == KEY_CRTLR) brk = KDB_REQ_REBOOT; break; case KDB_ALT_BREAK_SEEN_NONE: default: *state = KDB_ALT_BREAK_SEEN_NONE; break; } return (brk); } static int kdb_alt_break_internal(int key, int *state, int force_gdb) { int brk; if (!kdb_alt_break_to_debugger) return (0); brk = kdb_alt_break_state(key, state); switch (brk) { case KDB_REQ_DEBUGGER: if (force_gdb) kdb_dbbe_select("gdb"); kdb_enter(KDB_WHY_BREAK, "Break to debugger"); break; case KDB_REQ_PANIC: if (force_gdb) kdb_dbbe_select("gdb"); kdb_panic("Panic sequence on console"); break; case KDB_REQ_REBOOT: kdb_reboot(); break; } return (0); } int kdb_alt_break(int key, int *state) { return (kdb_alt_break_internal(key, state, 0)); } /* * This variation on kdb_alt_break() is used only by dcons, which has its own * configuration flag to force GDB use regardless of the global KDB * configuration. */ int kdb_alt_break_gdb(int key, int *state) { return (kdb_alt_break_internal(key, state, 1)); } /* * Print a backtrace of the calling thread. The backtrace is generated by * the selected debugger, provided it supports backtraces. If no debugger * is selected or the current debugger does not support backtraces, this * function silently returns. */ void kdb_backtrace(void) { if (kdb_dbbe != NULL && kdb_dbbe->dbbe_trace != NULL) { printf("KDB: stack backtrace:\n"); kdb_dbbe->dbbe_trace(); } #ifdef STACK else { struct stack st; printf("KDB: stack backtrace:\n"); stack_save(&st); stack_print_ddb(&st); } #endif } /* * Similar to kdb_backtrace() except that it prints a backtrace of an * arbitrary thread rather than the calling thread. */ void kdb_backtrace_thread(struct thread *td) { if (kdb_dbbe != NULL && kdb_dbbe->dbbe_trace_thread != NULL) { printf("KDB: stack backtrace of thread %d:\n", td->td_tid); kdb_dbbe->dbbe_trace_thread(td); } #ifdef STACK else { struct stack st; printf("KDB: stack backtrace of thread %d:\n", td->td_tid); if (stack_save_td(&st, td) == 0) stack_print_ddb(&st); } #endif } /* * Set/change the current backend. */ int kdb_dbbe_select(const char *name) { struct kdb_dbbe *be, **iter; int error; error = priv_check(curthread, PRIV_KDB_SET_BACKEND); if (error) return (error); SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; if (be->dbbe_active == 0 && strcmp(be->dbbe_name, name) == 0) { kdb_dbbe = be; return (0); } } return (EINVAL); } static bool kdb_backend_permitted(struct kdb_dbbe *be, struct ucred *cred) { int error; error = securelevel_gt(cred, kdb_enter_securelevel); #ifdef MAC /* * Give MAC a chance to weigh in on the policy: if the securelevel is * not raised, then MAC may veto the backend, otherwise MAC may * explicitly grant access. */ if (error == 0) { error = mac_kdb_check_backend(be); if (error != 0) { printf("MAC prevented execution of KDB backend: %s\n", be->dbbe_name); return (false); } } else if (mac_kdb_grant_backend(be) == 0) { error = 0; } #endif if (error != 0) printf("refusing to enter KDB with elevated securelevel\n"); return (error == 0); } /* * Enter the currently selected debugger. If a message has been provided, * it is printed first. If the debugger does not support the enter method, * it is entered by using breakpoint(), which enters the debugger through * kdb_trap(). The 'why' argument will contain a more mechanically usable * string than 'msg', and is relied upon by DDB scripting to identify the * reason for entering the debugger so that the right script can be run. */ void kdb_enter(const char *why, const char *msg) { if (kdb_dbbe != NULL && kdb_active == 0) { kdb_why = why; if (msg != NULL) printf("KDB: enter: %s\n", msg); breakpoint(); kdb_why = KDB_WHY_UNSET; } } /* * Initialize the kernel debugger interface. */ void kdb_init(void) { struct kdb_dbbe *be, **iter; int cur_pri, pri; kdb_active = 0; kdb_dbbe = NULL; cur_pri = -1; SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; pri = (be->dbbe_init != NULL) ? be->dbbe_init() : -1; be->dbbe_active = (pri >= 0) ? 0 : -1; if (pri > cur_pri) { cur_pri = pri; kdb_dbbe = be; } } if (kdb_dbbe != NULL) { printf("KDB: debugger backends:"); SET_FOREACH(iter, kdb_dbbe_set) { be = *iter; if (be->dbbe_active == 0) printf(" %s", be->dbbe_name); } printf("\n"); printf("KDB: current backend: %s\n", kdb_dbbe->dbbe_name); } } /* * Handle contexts. */ void * kdb_jmpbuf(jmp_buf new) { void *old; old = kdb_jmpbufp; kdb_jmpbufp = new; return (old); } void kdb_reenter(void) { if (!kdb_active || kdb_jmpbufp == NULL) return; printf("KDB: reentering\n"); kdb_backtrace(); longjmp(kdb_jmpbufp, 1); /* NOTREACHED */ } void kdb_reenter_silent(void) { if (!kdb_active || kdb_jmpbufp == NULL) return; longjmp(kdb_jmpbufp, 1); /* NOTREACHED */ } /* * Thread-related support functions. */ struct pcb * kdb_thr_ctx(struct thread *thr) { -#if defined(SMP) && defined(KDB_STOPPEDPCB) +#ifdef SMP struct pcpu *pc; #endif if (thr == curthread) return (&kdb_pcb); -#if defined(SMP) && defined(KDB_STOPPEDPCB) +#ifdef SMP STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc->pc_curthread == thr && CPU_ISSET(pc->pc_cpuid, &stopped_cpus)) - return (KDB_STOPPEDPCB(pc)); + return (&stoppcbs[pc->pc_cpuid]); } #endif return (thr->td_pcb); } struct thread * kdb_thr_first(void) { struct proc *p; struct thread *thr; u_int i; /* This function may be called early. */ if (pidhashtbl == NULL) return (&thread0); for (i = 0; i <= pidhash; i++) { LIST_FOREACH(p, &pidhashtbl[i], p_hash) { thr = FIRST_THREAD_IN_PROC(p); if (thr != NULL) return (thr); } } return (NULL); } struct thread * kdb_thr_from_pid(pid_t pid) { struct proc *p; LIST_FOREACH(p, PIDHASH(pid), p_hash) { if (p->p_pid == pid) return (FIRST_THREAD_IN_PROC(p)); } return (NULL); } struct thread * kdb_thr_lookup(lwpid_t tid) { struct thread *thr; thr = kdb_thr_first(); while (thr != NULL && thr->td_tid != tid) thr = kdb_thr_next(thr); return (thr); } struct thread * kdb_thr_next(struct thread *thr) { struct proc *p; u_int hash; p = thr->td_proc; thr = TAILQ_NEXT(thr, td_plist); if (thr != NULL) return (thr); if (pidhashtbl == NULL) return (NULL); hash = p->p_pid & pidhash; for (;;) { p = LIST_NEXT(p, p_hash); while (p == NULL) { if (++hash > pidhash) return (NULL); p = LIST_FIRST(&pidhashtbl[hash]); } thr = FIRST_THREAD_IN_PROC(p); if (thr != NULL) return (thr); } } int kdb_thr_select(struct thread *thr) { if (thr == NULL) return (EINVAL); kdb_thread = thr; kdb_thrctx = kdb_thr_ctx(thr); return (0); } /* * Enter the debugger due to a trap. */ int kdb_trap(int type, int code, struct trapframe *tf) { #ifdef SMP cpuset_t other_cpus; #endif struct kdb_dbbe *be; register_t intr; int handled; int did_stop_cpus; be = kdb_dbbe; if (be == NULL || be->dbbe_trap == NULL) return (0); /* We reenter the debugger through kdb_reenter(). */ if (kdb_active) return (0); intr = intr_disable(); if (!SCHEDULER_STOPPED()) { #ifdef SMP other_cpus = all_cpus; CPU_ANDNOT(&other_cpus, &other_cpus, &stopped_cpus); CPU_CLR(PCPU_GET(cpuid), &other_cpus); stop_cpus_hard(other_cpus); #endif curthread->td_stopsched = 1; did_stop_cpus = 1; } else did_stop_cpus = 0; kdb_active++; kdb_frame = tf; /* Let MD code do its thing first... */ kdb_cpu_trap(type, code); makectx(tf, &kdb_pcb); kdb_thr_select(curthread); cngrab(); for (;;) { if (!kdb_backend_permitted(be, curthread->td_ucred)) { /* Unhandled breakpoint traps are fatal. */ handled = 1; break; } handled = be->dbbe_trap(type, code); if (be == kdb_dbbe) break; be = kdb_dbbe; if (be == NULL || be->dbbe_trap == NULL) break; printf("Switching to %s back-end\n", be->dbbe_name); } cnungrab(); kdb_active--; if (did_stop_cpus) { curthread->td_stopsched = 0; #ifdef SMP CPU_AND(&other_cpus, &other_cpus, &stopped_cpus); restart_cpus(other_cpus); #endif } intr_restore(intr); return (handled); } diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c index 00e2396d5774..cdee6fbebb3c 100644 --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -1,1356 +1,1363 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001, John Baldwin . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module holds the global variables and machine independent functions * used for the kernel SMP support. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include "opt_sched.h" #ifdef SMP MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data"); volatile cpuset_t stopped_cpus; volatile cpuset_t started_cpus; volatile cpuset_t suspended_cpus; cpuset_t hlt_cpus_mask; cpuset_t logical_cpus_mask; void (*cpustop_restartfunc)(void); #endif static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS); /* This is used in modules that need to work in both SMP and UP. */ cpuset_t all_cpus; int mp_ncpus; /* export this for libkvm consumers. */ int mp_maxcpus = MAXCPU; volatile int smp_started; u_int mp_maxid; +/* Array of CPU contexts saved during a panic. */ +struct pcb *stoppcbs; + static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL, "Kernel SMP"); SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0, "Max CPU ID."); SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus, 0, "Max number of CPUs that the system was compiled for."); SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode"); int smp_disabled = 0; /* has smp been disabled? */ SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD, &smp_disabled, 0, "SMP has been disabled from the loader"); int smp_cpus = 1; /* how many cpu's running */ SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0, "Number of CPUs online"); int smp_threads_per_core = 1; /* how many SMT threads are running per core */ SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_threads_per_core, 0, "Number of SMT threads online per core"); int mp_ncores = -1; /* how many physical cores running */ SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0, "Number of physical cores online"); int smp_topology = 0; /* Which topology we're using. */ SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0, "Topology override setting; 0 is default provided by hardware."); #ifdef SMP /* Enable forwarding of a signal to a process running on a different CPU */ static int forward_signal_enabled = 1; SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, &forward_signal_enabled, 0, "Forwarding of a signal to a process on a different CPU"); /* Variables needed for SMP rendezvous. */ static volatile int smp_rv_ncpus; static void (*volatile smp_rv_setup_func)(void *arg); static void (*volatile smp_rv_action_func)(void *arg); static void (*volatile smp_rv_teardown_func)(void *arg); static void *volatile smp_rv_func_arg; static volatile int smp_rv_waiters[4]; /* * Shared mutex to restrict busywaits between smp_rendezvous() and * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these * functions trigger at once and cause multiple CPUs to busywait with * interrupts disabled. */ struct mtx smp_ipi_mtx; /* * Let the MD SMP code initialize mp_maxid very early if it can. */ static void mp_setmaxid(void *dummy) { cpu_mp_setmaxid(); KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__)); KASSERT(mp_ncpus > 1 || mp_maxid == 0, ("%s: one CPU but mp_maxid is not zero", __func__)); KASSERT(mp_maxid >= mp_ncpus - 1, ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid, mp_ncpus)); cpusetsizemin = howmany(mp_maxid + 1, NBBY); } SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); /* * Call the MD SMP initialization code. */ static void mp_start(void *dummy) { mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); /* Probe for MP hardware. */ if (smp_disabled != 0 || cpu_mp_probe() == 0) { mp_ncores = 1; mp_ncpus = 1; CPU_SETOF(PCPU_GET(cpuid), &all_cpus); return; } cpu_mp_start(); printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", mp_ncpus); /* Provide a default for most architectures that don't have SMT/HTT. */ if (mp_ncores < 0) mp_ncores = mp_ncpus; + stoppcbs = mallocarray(mp_maxid + 1, sizeof(struct pcb), M_DEVBUF, + M_WAITOK | M_ZERO); + cpu_mp_announce(); } SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); void forward_signal(struct thread *td) { int id; /* * signotify() has already set TDA_AST and TDA_SIG on td_ast for * this thread, so all we need to do is poke it if it is currently * executing so that it executes ast(). */ THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_RUNNING(td), ("forward_signal: thread is not TDS_RUNNING")); CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); if (!smp_started || cold || KERNEL_PANICKED()) return; if (!forward_signal_enabled) return; /* No need to IPI ourself. */ if (td == curthread) return; id = td->td_oncpu; if (id == NOCPU) return; ipi_cpu(id, IPI_AST); } /* * When called the executing CPU will send an IPI to all other CPUs * requesting that they halt execution. * * Usually (but not necessarily) called with 'other_cpus' as its arg. * * - Signals all CPUs in map to stop. * - Waits for each to stop. * * Returns: * -1: error * 0: NA * 1: ok * */ #if defined(__amd64__) || defined(__i386__) #define X86 1 #else #define X86 0 #endif static int generic_stop_cpus(cpuset_t map, u_int type) { #ifdef KTR char cpusetbuf[CPUSETBUFSIZ]; #endif static volatile u_int stopping_cpu = NOCPU; int i; volatile cpuset_t *cpus; KASSERT( type == IPI_STOP || type == IPI_STOP_HARD #if X86 || type == IPI_SUSPEND #endif , ("%s: invalid stop type", __func__)); if (!smp_started) return (0); CTR2(KTR_SMP, "stop_cpus(%s) with %u type", cpusetobj_strprint(cpusetbuf, &map), type); #if X86 /* * When suspending, ensure there are are no IPIs in progress. * IPIs that have been issued, but not yet delivered (e.g. * not pending on a vCPU when running under virtualization) * will be lost, violating FreeBSD's assumption of reliable * IPI delivery. */ if (type == IPI_SUSPEND) mtx_lock_spin(&smp_ipi_mtx); #endif #if X86 if (!nmi_is_broadcast || nmi_kdb_lock == 0) { #endif if (stopping_cpu != PCPU_GET(cpuid)) while (atomic_cmpset_int(&stopping_cpu, NOCPU, PCPU_GET(cpuid)) == 0) while (stopping_cpu != NOCPU) cpu_spinwait(); /* spin */ /* send the stop IPI to all CPUs in map */ ipi_selected(map, type); #if X86 } #endif #if X86 if (type == IPI_SUSPEND) cpus = &suspended_cpus; else #endif cpus = &stopped_cpus; i = 0; while (!CPU_SUBSET(cpus, &map)) { /* spin */ cpu_spinwait(); i++; if (i == 100000000) { printf("timeout stopping cpus\n"); break; } } #if X86 if (type == IPI_SUSPEND) mtx_unlock_spin(&smp_ipi_mtx); #endif stopping_cpu = NOCPU; return (1); } int stop_cpus(cpuset_t map) { return (generic_stop_cpus(map, IPI_STOP)); } int stop_cpus_hard(cpuset_t map) { return (generic_stop_cpus(map, IPI_STOP_HARD)); } #if X86 int suspend_cpus(cpuset_t map) { return (generic_stop_cpus(map, IPI_SUSPEND)); } #endif /* * Called by a CPU to restart stopped CPUs. * * Usually (but not necessarily) called with 'stopped_cpus' as its arg. * * - Signals all CPUs in map to restart. * - Waits for each to restart. * * Returns: * -1: error * 0: NA * 1: ok */ static int generic_restart_cpus(cpuset_t map, u_int type) { #ifdef KTR char cpusetbuf[CPUSETBUFSIZ]; #endif volatile cpuset_t *cpus; #if X86 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, ("%s: invalid stop type", __func__)); if (!smp_started) return (0); CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); if (type == IPI_SUSPEND) cpus = &resuming_cpus; else cpus = &stopped_cpus; /* signal other cpus to restart */ if (type == IPI_SUSPEND) CPU_COPY_STORE_REL(&map, &toresume_cpus); else CPU_COPY_STORE_REL(&map, &started_cpus); /* * Wake up any CPUs stopped with MWAIT. From MI code we can't tell if * MONITOR/MWAIT is enabled, but the potentially redundant writes are * relatively inexpensive. */ if (type == IPI_STOP) { struct monitorbuf *mb; u_int id; CPU_FOREACH(id) { if (!CPU_ISSET(id, &map)) continue; mb = &pcpu_find(id)->pc_monitorbuf; atomic_store_int(&mb->stop_state, MONITOR_STOPSTATE_RUNNING); } } if (!nmi_is_broadcast || nmi_kdb_lock == 0) { /* wait for each to clear its bit */ while (CPU_OVERLAP(cpus, &map)) cpu_spinwait(); } #else /* !X86 */ KASSERT(type == IPI_STOP || type == IPI_STOP_HARD, ("%s: invalid stop type", __func__)); if (!smp_started) return (0); CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); cpus = &stopped_cpus; /* signal other cpus to restart */ CPU_COPY_STORE_REL(&map, &started_cpus); /* wait for each to clear its bit */ while (CPU_OVERLAP(cpus, &map)) cpu_spinwait(); #endif return (1); } int restart_cpus(cpuset_t map) { return (generic_restart_cpus(map, IPI_STOP)); } #if X86 int resume_cpus(cpuset_t map) { return (generic_restart_cpus(map, IPI_SUSPEND)); } #endif #undef X86 /* * All-CPU rendezvous. CPUs are signalled, all execute the setup function * (if specified), rendezvous, execute the action function (if specified), * rendezvous again, execute the teardown function (if specified), and then * resume. * * Note that the supplied external functions _must_ be reentrant and aware * that they are running in parallel and in an unknown lock context. */ void smp_rendezvous_action(void) { struct thread *td; void *local_func_arg; void (*local_setup_func)(void*); void (*local_action_func)(void*); void (*local_teardown_func)(void*); #ifdef INVARIANTS int owepreempt; #endif /* Ensure we have up-to-date values. */ atomic_add_acq_int(&smp_rv_waiters[0], 1); while (smp_rv_waiters[0] < smp_rv_ncpus) cpu_spinwait(); /* Fetch rendezvous parameters after acquire barrier. */ local_func_arg = smp_rv_func_arg; local_setup_func = smp_rv_setup_func; local_action_func = smp_rv_action_func; local_teardown_func = smp_rv_teardown_func; /* * Use a nested critical section to prevent any preemptions * from occurring during a rendezvous action routine. * Specifically, if a rendezvous handler is invoked via an IPI * and the interrupted thread was in the critical_exit() * function after setting td_critnest to 0 but before * performing a deferred preemption, this routine can be * invoked with td_critnest set to 0 and td_owepreempt true. * In that case, a critical_exit() during the rendezvous * action would trigger a preemption which is not permitted in * a rendezvous action. To fix this, wrap all of the * rendezvous action handlers in a critical section. We * cannot use a regular critical section however as having * critical_exit() preempt from this routine would also be * problematic (the preemption must not occur before the IPI * has been acknowledged via an EOI). Instead, we * intentionally ignore td_owepreempt when leaving the * critical section. This should be harmless because we do * not permit rendezvous action routines to schedule threads, * and thus td_owepreempt should never transition from 0 to 1 * during this routine. */ td = curthread; td->td_critnest++; #ifdef INVARIANTS owepreempt = td->td_owepreempt; #endif /* * If requested, run a setup function before the main action * function. Ensure all CPUs have completed the setup * function before moving on to the action function. */ if (local_setup_func != smp_no_rendezvous_barrier) { if (local_setup_func != NULL) local_setup_func(local_func_arg); atomic_add_int(&smp_rv_waiters[1], 1); while (smp_rv_waiters[1] < smp_rv_ncpus) cpu_spinwait(); } if (local_action_func != NULL) local_action_func(local_func_arg); if (local_teardown_func != smp_no_rendezvous_barrier) { /* * Signal that the main action has been completed. If a * full exit rendezvous is requested, then all CPUs will * wait here until all CPUs have finished the main action. */ atomic_add_int(&smp_rv_waiters[2], 1); while (smp_rv_waiters[2] < smp_rv_ncpus) cpu_spinwait(); if (local_teardown_func != NULL) local_teardown_func(local_func_arg); } /* * Signal that the rendezvous is fully completed by this CPU. * This means that no member of smp_rv_* pseudo-structure will be * accessed by this target CPU after this point; in particular, * memory pointed by smp_rv_func_arg. * * The release semantic ensures that all accesses performed by * the current CPU are visible when smp_rendezvous_cpus() * returns, by synchronizing with the * atomic_load_acq_int(&smp_rv_waiters[3]). */ atomic_add_rel_int(&smp_rv_waiters[3], 1); td->td_critnest--; KASSERT(owepreempt == td->td_owepreempt, ("rendezvous action changed td_owepreempt")); } void smp_rendezvous_cpus(cpuset_t map, void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void *arg) { int curcpumap, i, ncpus = 0; /* See comments in the !SMP case. */ if (!smp_started) { spinlock_enter(); if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); spinlock_exit(); return; } /* * Make sure we come here with interrupts enabled. Otherwise we * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI. */ MPASS(curthread->td_md.md_spinlock_count == 0); CPU_FOREACH(i) { if (CPU_ISSET(i, &map)) ncpus++; } if (ncpus == 0) panic("ncpus is 0 with non-zero map"); mtx_lock_spin(&smp_ipi_mtx); /* Pass rendezvous parameters via global variables. */ smp_rv_ncpus = ncpus; smp_rv_setup_func = setup_func; smp_rv_action_func = action_func; smp_rv_teardown_func = teardown_func; smp_rv_func_arg = arg; smp_rv_waiters[1] = 0; smp_rv_waiters[2] = 0; smp_rv_waiters[3] = 0; atomic_store_rel_int(&smp_rv_waiters[0], 0); /* * Signal other processors, which will enter the IPI with * interrupts off. */ curcpumap = CPU_ISSET(curcpu, &map); CPU_CLR(curcpu, &map); ipi_selected(map, IPI_RENDEZVOUS); /* Check if the current CPU is in the map */ if (curcpumap != 0) smp_rendezvous_action(); /* * Ensure that the master CPU waits for all the other * CPUs to finish the rendezvous, so that smp_rv_* * pseudo-structure and the arg are guaranteed to not * be in use. * * Load acquire synchronizes with the release add in * smp_rendezvous_action(), which ensures that our caller sees * all memory actions done by the called functions on other * CPUs. */ while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) cpu_spinwait(); mtx_unlock_spin(&smp_ipi_mtx); } void smp_rendezvous(void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void *arg) { smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); } static void smp_topo_fill(struct cpu_group *cg) { int c; for (c = 0; c < cg->cg_children; c++) smp_topo_fill(&cg->cg_child[c]); cg->cg_first = CPU_FFS(&cg->cg_mask) - 1; cg->cg_last = CPU_FLS(&cg->cg_mask) - 1; } struct cpu_group * smp_topo(void) { char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; static struct cpu_group *top = NULL; /* * The first call to smp_topo() is guaranteed to occur * during the kernel boot while we are still single-threaded. */ if (top != NULL) return (top); /* * Check for a fake topology request for debugging purposes. */ switch (smp_topology) { case 1: /* Dual core with no sharing. */ top = smp_topo_1level(CG_SHARE_NONE, 2, 0); break; case 2: /* No topology, all cpus are equal. */ top = smp_topo_none(); break; case 3: /* Dual core with shared L2. */ top = smp_topo_1level(CG_SHARE_L2, 2, 0); break; case 4: /* quad core, shared l3 among each package, private l2. */ top = smp_topo_1level(CG_SHARE_L3, 4, 0); break; case 5: /* quad core, 2 dualcore parts on each package share l2. */ top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); break; case 6: /* Single-core 2xHTT */ top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); break; case 7: /* quad core with a shared l3, 8 threads sharing L2. */ top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, CG_FLAG_SMT); break; default: /* Default, ask the system what it wants. */ top = cpu_topo(); break; } /* * Verify the returned topology. */ if (top->cg_count != mp_ncpus) panic("Built bad topology at %p. CPU count %d != %d", top, top->cg_count, mp_ncpus); if (CPU_CMP(&top->cg_mask, &all_cpus)) panic("Built bad topology at %p. CPU mask (%s) != (%s)", top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), cpusetobj_strprint(cpusetbuf2, &all_cpus)); /* * Collapse nonsense levels that may be created out of convenience by * the MD layers. They cause extra work in the search functions. */ while (top->cg_children == 1) { top = &top->cg_child[0]; top->cg_parent = NULL; } smp_topo_fill(top); return (top); } struct cpu_group * smp_topo_alloc(u_int count) { static struct cpu_group *group = NULL; static u_int index; u_int curr; if (group == NULL) { group = mallocarray((mp_maxid + 1) * MAX_CACHE_LEVELS + 1, sizeof(*group), M_DEVBUF, M_WAITOK | M_ZERO); } curr = index; index += count; return (&group[curr]); } struct cpu_group * smp_topo_none(void) { struct cpu_group *top; top = smp_topo_alloc(1); top->cg_parent = NULL; top->cg_child = NULL; top->cg_mask = all_cpus; top->cg_count = mp_ncpus; top->cg_children = 0; top->cg_level = CG_SHARE_NONE; top->cg_flags = 0; return (top); } static int smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, int count, int flags, int start) { char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; cpuset_t mask; int i; CPU_ZERO(&mask); for (i = 0; i < count; i++, start++) CPU_SET(start, &mask); child->cg_parent = parent; child->cg_child = NULL; child->cg_children = 0; child->cg_level = share; child->cg_count = count; child->cg_flags = flags; child->cg_mask = mask; parent->cg_children++; for (; parent != NULL; parent = parent->cg_parent) { if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) panic("Duplicate children in %p. mask (%s) child (%s)", parent, cpusetobj_strprint(cpusetbuf, &parent->cg_mask), cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask); parent->cg_count += child->cg_count; } return (start); } struct cpu_group * smp_topo_1level(int share, int count, int flags) { struct cpu_group *child; struct cpu_group *top; int packages; int cpu; int i; cpu = 0; packages = mp_ncpus / count; top = smp_topo_alloc(1 + packages); top->cg_child = child = top + 1; top->cg_level = CG_SHARE_NONE; for (i = 0; i < packages; i++, child++) cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); return (top); } struct cpu_group * smp_topo_2level(int l2share, int l2count, int l1share, int l1count, int l1flags) { struct cpu_group *top; struct cpu_group *l1g; struct cpu_group *l2g; int cpu; int i; int j; cpu = 0; top = smp_topo_alloc(1 + mp_ncpus / (l2count * l1count) + mp_ncpus / l1count); l2g = top + 1; top->cg_child = l2g; top->cg_level = CG_SHARE_NONE; top->cg_children = mp_ncpus / (l2count * l1count); l1g = l2g + top->cg_children; for (i = 0; i < top->cg_children; i++, l2g++) { l2g->cg_parent = top; l2g->cg_child = l1g; l2g->cg_level = l2share; for (j = 0; j < l2count; j++, l1g++) cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, l1flags, cpu); } return (top); } struct cpu_group * smp_topo_find(struct cpu_group *top, int cpu) { struct cpu_group *cg; cpuset_t mask; int children; int i; CPU_SETOF(cpu, &mask); cg = top; for (;;) { if (!CPU_OVERLAP(&cg->cg_mask, &mask)) return (NULL); if (cg->cg_children == 0) return (cg); children = cg->cg_children; for (i = 0, cg = cg->cg_child; i < children; cg++, i++) if (CPU_OVERLAP(&cg->cg_mask, &mask)) break; } return (NULL); } #else /* !SMP */ void smp_rendezvous_cpus(cpuset_t map, void (*setup_func)(void *), void (*action_func)(void *), void (*teardown_func)(void *), void *arg) { /* * In the !SMP case we just need to ensure the same initial conditions * as the SMP case. */ spinlock_enter(); if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); spinlock_exit(); } void smp_rendezvous(void (*setup_func)(void *), void (*action_func)(void *), void (*teardown_func)(void *), void *arg) { smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); } /* * Provide dummy SMP support for UP kernels. Modules that need to use SMP * APIs will still work using this dummy support. */ static void mp_setvariables_for_up(void *dummy) { mp_ncpus = 1; mp_ncores = 1; mp_maxid = PCPU_GET(cpuid); CPU_SETOF(mp_maxid, &all_cpus); KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); } SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setvariables_for_up, NULL); #endif /* SMP */ void smp_no_rendezvous_barrier(void *dummy) { #ifdef SMP KASSERT((!smp_started),("smp_no_rendezvous called and smp is started")); #endif } void smp_rendezvous_cpus_retry(cpuset_t map, void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void (* wait_func)(void *, int), struct smp_rendezvous_cpus_retry_arg *arg) { int cpu; CPU_COPY(&map, &arg->cpus); /* * Only one CPU to execute on. */ if (!smp_started) { spinlock_enter(); if (setup_func != NULL) setup_func(arg); if (action_func != NULL) action_func(arg); if (teardown_func != NULL) teardown_func(arg); spinlock_exit(); return; } /* * Execute an action on all specified CPUs while retrying until they * all acknowledge completion. */ for (;;) { smp_rendezvous_cpus( arg->cpus, setup_func, action_func, teardown_func, arg); if (CPU_EMPTY(&arg->cpus)) break; CPU_FOREACH(cpu) { if (!CPU_ISSET(cpu, &arg->cpus)) continue; wait_func(arg, cpu); } } } void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg) { CPU_CLR_ATOMIC(curcpu, &arg->cpus); } /* * If (prio & PDROP) == 0: * Wait for specified idle threads to switch once. This ensures that even * preempted threads have cycled through the switch function once, * exiting their codepaths. This allows us to change global pointers * with no other synchronization. * If (prio & PDROP) != 0: * Force the specified CPUs to switch context at least once. */ int quiesce_cpus(cpuset_t map, const char *wmesg, int prio) { struct pcpu *pcpu; u_int *gen; int error; int cpu; error = 0; if ((prio & PDROP) == 0) { gen = mallocarray(sizeof(u_int), mp_maxid + 1, M_TEMP, M_WAITOK); for (cpu = 0; cpu <= mp_maxid; cpu++) { if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) continue; pcpu = pcpu_find(cpu); gen[cpu] = pcpu->pc_idlethread->td_generation; } } for (cpu = 0; cpu <= mp_maxid; cpu++) { if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) continue; pcpu = pcpu_find(cpu); thread_lock(curthread); sched_bind(curthread, cpu); thread_unlock(curthread); if ((prio & PDROP) != 0) continue; while (gen[cpu] == pcpu->pc_idlethread->td_generation) { error = tsleep(quiesce_cpus, prio & ~PDROP, wmesg, 1); if (error != EWOULDBLOCK) goto out; error = 0; } } out: thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); if ((prio & PDROP) == 0) free(gen, M_TEMP); return (error); } int quiesce_all_cpus(const char *wmesg, int prio) { return quiesce_cpus(all_cpus, wmesg, prio); } /* * Observe all CPUs not executing in critical section. * We are not in one so the check for us is safe. If the found * thread changes to something else we know the section was * exited as well. */ void quiesce_all_critical(void) { struct thread *td, *newtd; struct pcpu *pcpu; int cpu; MPASS(curthread->td_critnest == 0); CPU_FOREACH(cpu) { pcpu = cpuid_to_pcpu[cpu]; td = pcpu->pc_curthread; for (;;) { if (td->td_critnest == 0) break; cpu_spinwait(); newtd = (struct thread *) atomic_load_acq_ptr((void *)pcpu->pc_curthread); if (td != newtd) break; } } } static void cpus_fence_seq_cst_issue(void *arg __unused) { atomic_thread_fence_seq_cst(); } /* * Send an IPI forcing a sequentially consistent fence. * * Allows replacement of an explicitly fence with a compiler barrier. * Trades speed up during normal execution for a significant slowdown when * the barrier is needed. */ void cpus_fence_seq_cst(void) { #ifdef SMP smp_rendezvous( smp_no_rendezvous_barrier, cpus_fence_seq_cst_issue, smp_no_rendezvous_barrier, NULL ); #else cpus_fence_seq_cst_issue(NULL); #endif } /* Extra care is taken with this sysctl because the data type is volatile */ static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS) { int error, active; active = smp_started; error = SYSCTL_OUT(req, &active, sizeof(active)); return (error); } #ifdef SMP void topo_init_node(struct topo_node *node) { bzero(node, sizeof(*node)); TAILQ_INIT(&node->children); } void topo_init_root(struct topo_node *root) { topo_init_node(root); root->type = TOPO_TYPE_SYSTEM; } /* * Add a child node with the given ID under the given parent. * Do nothing if there is already a child with that ID. */ struct topo_node * topo_add_node_by_hwid(struct topo_node *parent, int hwid, topo_node_type type, uintptr_t subtype) { struct topo_node *node; TAILQ_FOREACH_REVERSE(node, &parent->children, topo_children, siblings) { if (node->hwid == hwid && node->type == type && node->subtype == subtype) { return (node); } } node = malloc(sizeof(*node), M_TOPO, M_WAITOK); topo_init_node(node); node->parent = parent; node->hwid = hwid; node->type = type; node->subtype = subtype; TAILQ_INSERT_TAIL(&parent->children, node, siblings); parent->nchildren++; return (node); } /* * Find a child node with the given ID under the given parent. */ struct topo_node * topo_find_node_by_hwid(struct topo_node *parent, int hwid, topo_node_type type, uintptr_t subtype) { struct topo_node *node; TAILQ_FOREACH(node, &parent->children, siblings) { if (node->hwid == hwid && node->type == type && node->subtype == subtype) { return (node); } } return (NULL); } /* * Given a node change the order of its parent's child nodes such * that the node becomes the firt child while preserving the cyclic * order of the children. In other words, the given node is promoted * by rotation. */ void topo_promote_child(struct topo_node *child) { struct topo_node *next; struct topo_node *node; struct topo_node *parent; parent = child->parent; next = TAILQ_NEXT(child, siblings); TAILQ_REMOVE(&parent->children, child, siblings); TAILQ_INSERT_HEAD(&parent->children, child, siblings); while (next != NULL) { node = next; next = TAILQ_NEXT(node, siblings); TAILQ_REMOVE(&parent->children, node, siblings); TAILQ_INSERT_AFTER(&parent->children, child, node, siblings); child = node; } } /* * Iterate to the next node in the depth-first search (traversal) of * the topology tree. */ struct topo_node * topo_next_node(struct topo_node *top, struct topo_node *node) { struct topo_node *next; if ((next = TAILQ_FIRST(&node->children)) != NULL) return (next); if ((next = TAILQ_NEXT(node, siblings)) != NULL) return (next); while (node != top && (node = node->parent) != top) if ((next = TAILQ_NEXT(node, siblings)) != NULL) return (next); return (NULL); } /* * Iterate to the next node in the depth-first search of the topology tree, * but without descending below the current node. */ struct topo_node * topo_next_nonchild_node(struct topo_node *top, struct topo_node *node) { struct topo_node *next; if ((next = TAILQ_NEXT(node, siblings)) != NULL) return (next); while (node != top && (node = node->parent) != top) if ((next = TAILQ_NEXT(node, siblings)) != NULL) return (next); return (NULL); } /* * Assign the given ID to the given topology node that represents a logical * processor. */ void topo_set_pu_id(struct topo_node *node, cpuid_t id) { KASSERT(node->type == TOPO_TYPE_PU, ("topo_set_pu_id: wrong node type: %u", node->type)); KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0, ("topo_set_pu_id: cpuset already not empty")); node->id = id; CPU_SET(id, &node->cpuset); node->cpu_count = 1; node->subtype = 1; while ((node = node->parent) != NULL) { KASSERT(!CPU_ISSET(id, &node->cpuset), ("logical ID %u is already set in node %p", id, node)); CPU_SET(id, &node->cpuset); node->cpu_count++; } } static struct topology_spec { topo_node_type type; bool match_subtype; uintptr_t subtype; } topology_level_table[TOPO_LEVEL_COUNT] = { [TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, }, [TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, }, [TOPO_LEVEL_CACHEGROUP] = { .type = TOPO_TYPE_CACHE, .match_subtype = true, .subtype = CG_SHARE_L3, }, [TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, }, [TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, }, }; static bool topo_analyze_table(struct topo_node *root, int all, enum topo_level level, struct topo_analysis *results) { struct topology_spec *spec; struct topo_node *node; int count; if (level >= TOPO_LEVEL_COUNT) return (true); spec = &topology_level_table[level]; count = 0; node = topo_next_node(root, root); while (node != NULL) { if (node->type != spec->type || (spec->match_subtype && node->subtype != spec->subtype)) { node = topo_next_node(root, node); continue; } if (!all && CPU_EMPTY(&node->cpuset)) { node = topo_next_nonchild_node(root, node); continue; } count++; if (!topo_analyze_table(node, all, level + 1, results)) return (false); node = topo_next_nonchild_node(root, node); } /* No explicit subgroups is essentially one subgroup. */ if (count == 0) { count = 1; if (!topo_analyze_table(root, all, level + 1, results)) return (false); } if (results->entities[level] == -1) results->entities[level] = count; else if (results->entities[level] != count) return (false); return (true); } /* * Check if the topology is uniform, that is, each package has the same number * of cores in it and each core has the same number of threads (logical * processors) in it. If so, calculate the number of packages, the number of * groups per package, the number of cachegroups per group, and the number of * logical processors per cachegroup. 'all' parameter tells whether to include * administratively disabled logical processors into the analysis. */ int topo_analyze(struct topo_node *topo_root, int all, struct topo_analysis *results) { results->entities[TOPO_LEVEL_PKG] = -1; results->entities[TOPO_LEVEL_CORE] = -1; results->entities[TOPO_LEVEL_THREAD] = -1; results->entities[TOPO_LEVEL_GROUP] = -1; results->entities[TOPO_LEVEL_CACHEGROUP] = -1; if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results)) return (0); KASSERT(results->entities[TOPO_LEVEL_PKG] > 0, ("bug in topology or analysis")); return (1); } #endif /* SMP */ diff --git a/sys/powerpc/include/kdb.h b/sys/powerpc/include/kdb.h index f399b7523143..6594acb77fd1 100644 --- a/sys/powerpc/include/kdb.h +++ b/sys/powerpc/include/kdb.h @@ -1,71 +1,69 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include #include #include #include #include void kdb_cpu_clear_singlestep(void); void kdb_cpu_set_singlestep(void); -#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] - static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { __syncicache(addr, size); } static __inline void kdb_cpu_trap(int vector, int _) { } static __inline int kdb_cpu_set_watchpoint(vm_offset_t addr, vm_size_t size, int access) { return (ENXIO); } static __inline int kdb_cpu_clr_watchpoint(vm_offset_t addr, vm_size_t size) { return (0); } #endif /* _MACHINE_KDB_H_ */ diff --git a/sys/powerpc/include/smp.h b/sys/powerpc/include/smp.h index 67e9d51f0bc4..6fcff18ffa4b 100644 --- a/sys/powerpc/include/smp.h +++ b/sys/powerpc/include/smp.h @@ -1,68 +1,66 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #ifdef _KERNEL #define IPI_AST 0 #define IPI_PREEMPT 1 #define IPI_RENDEZVOUS 2 #define IPI_STOP 3 #define IPI_STOP_HARD 3 #define IPI_HARDCLOCK 4 #ifndef LOCORE #include #include void ipi_all_but_self(int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpuset_t cpus, int ipi); struct cpuref { uintptr_t cr_hwref; u_int cr_cpuid; u_int cr_domain; }; void pmap_cpu_bootstrap(int); void cpudep_ap_early_bootstrap(void); uintptr_t cpudep_ap_bootstrap(void); void cpudep_ap_setup(void); void machdep_ap_bootstrap(void); -extern struct pcb stoppcbs[]; - #endif /* !LOCORE */ #endif /* _KERNEL */ #endif /* !_MACHINE_SMP_H */ diff --git a/sys/powerpc/powerpc/mp_machdep.c b/sys/powerpc/powerpc/mp_machdep.c index df602912b234..59a000e8e24d 100644 --- a/sys/powerpc/powerpc/mp_machdep.c +++ b/sys/powerpc/powerpc/mp_machdep.c @@ -1,388 +1,387 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2008 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" volatile static int ap_awake; volatile static u_int ap_letgo; volatile static u_quad_t ap_timebase; static struct mtx ap_boot_mtx; -struct pcb stoppcbs[MAXCPU]; void machdep_ap_bootstrap(void) { PCPU_SET(awake, 1); __asm __volatile("msync; isync"); while (ap_letgo == 0) nop_prio_vlow(); nop_prio_medium(); /* * Set timebase as soon as possible to meet an implicit rendezvous * from cpu_mp_unleash(), which sets ap_letgo and then immediately * sets timebase. * * Note that this is instrinsically racy and is only relevant on * platforms that do not support better mechanisms. */ platform_smp_timebase_sync(ap_timebase, 1); /* Give platform code a chance to do anything else necessary */ platform_smp_ap_init(); /* Initialize decrementer */ decr_ap_init(); /* Serialize console output and AP count increment */ mtx_lock_spin(&ap_boot_mtx); ap_awake++; if (bootverbose) printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid)); else printf("%s%d%s", ap_awake == 2 ? "Launching APs: " : "", PCPU_GET(cpuid), ap_awake == mp_ncpus ? "\n" : " "); mtx_unlock_spin(&ap_boot_mtx); while(smp_started == 0) ; /* Start per-CPU event timers. */ cpu_initclocks_ap(); /* Announce ourselves awake, and enter the scheduler */ sched_ap_entry(); } void cpu_mp_setmaxid(void) { struct cpuref cpuref; int error; mp_ncpus = 0; mp_maxid = 0; error = platform_smp_first_cpu(&cpuref); while (!error) { mp_ncpus++; mp_maxid = max(cpuref.cr_cpuid, mp_maxid); error = platform_smp_next_cpu(&cpuref); } /* Sanity. */ if (mp_ncpus == 0) mp_ncpus = 1; } int cpu_mp_probe(void) { /* * We're not going to enable SMP if there's only 1 processor. */ return (mp_ncpus > 1); } void cpu_mp_start(void) { struct cpuref bsp, cpu; struct pcpu *pc; int domain, error; error = platform_smp_get_bsp(&bsp); KASSERT(error == 0, ("Don't know BSP")); error = platform_smp_first_cpu(&cpu); while (!error) { if (cpu.cr_cpuid >= MAXCPU) { printf("SMP: cpu%d: skipped -- ID out of range\n", cpu.cr_cpuid); goto next; } if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) { printf("SMP: cpu%d: skipped - duplicate ID\n", cpu.cr_cpuid); goto next; } if (vm_ndomains > 1) domain = cpu.cr_domain; else domain = 0; if (cpu.cr_cpuid != bsp.cr_cpuid) { void *dpcpu; pc = &__pcpu[cpu.cr_cpuid]; dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain), DPCPU_SIZE, M_WAITOK | M_ZERO); pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc)); dpcpu_init(dpcpu, cpu.cr_cpuid); } else { pc = pcpup; pc->pc_cpuid = bsp.cr_cpuid; pc->pc_bsp = 1; } pc->pc_domain = domain; pc->pc_hwref = cpu.cr_hwref; CPU_SET(pc->pc_cpuid, &cpuset_domain[pc->pc_domain]); KASSERT(pc->pc_domain < MAXMEMDOM, ("bad domain value %d\n", pc->pc_domain)); CPU_SET(pc->pc_cpuid, &all_cpus); next: error = platform_smp_next_cpu(&cpu); } #ifdef SMP platform_smp_probe_threads(); #endif } void cpu_mp_announce(void) { struct pcpu *pc; int i; if (!bootverbose) return; CPU_FOREACH(i) { pc = pcpu_find(i); if (pc == NULL) continue; printf("cpu%d: dev=%x domain=%d ", i, (int)pc->pc_hwref, pc->pc_domain); if (pc->pc_bsp) printf(" (BSP)"); printf("\n"); } } static void cpu_mp_unleash(void *dummy) { struct pcpu *pc; int cpus, timeout; int ret; if (mp_ncpus <= 1) return; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); cpus = 0; smp_cpus = 0; #ifdef BOOKE tlb1_ap_prep(); #endif STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { cpus++; if (!pc->pc_bsp) { if (bootverbose) printf("Waking up CPU %d (dev=%x)\n", pc->pc_cpuid, (int)pc->pc_hwref); pc->pc_flags = PCPU_GET(flags); /* Copy cached CPU flags */ ret = platform_smp_start_cpu(pc); if (ret == 0) { timeout = 2000; /* wait 2sec for the AP */ while (!pc->pc_awake && --timeout > 0) DELAY(1000); } } else { pc->pc_awake = 1; } if (pc->pc_awake) { if (bootverbose) printf("Adding CPU %d, hwref=%jx, awake=%x\n", pc->pc_cpuid, (uintmax_t)pc->pc_hwref, pc->pc_awake); smp_cpus++; } else CPU_SET(pc->pc_cpuid, &stopped_cpus); } ap_awake = 1; /* Provide our current DEC and TB values for APs */ ap_timebase = mftb() + 10; __asm __volatile("msync; isync"); /* Let APs continue */ atomic_store_rel_int(&ap_letgo, 1); platform_smp_timebase_sync(ap_timebase, 0); while (ap_awake < smp_cpus) ; if (smp_cpus != cpus || cpus != mp_ncpus) { printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", mp_ncpus, cpus, smp_cpus); } if (smp_cpus > 1) atomic_store_rel_int(&smp_started, 1); /* Let the APs get into the scheduler */ DELAY(10000); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); int powerpc_ipi_handler(void *arg) { u_int cpuid; uint32_t ipimask; CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr()); ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask)); if (ipimask == 0) return (FILTER_STRAY); if (ipimask & (1 << IPI_AST)) { CTR1(KTR_SMP, "%s: IPI_AST", __func__); } if (ipimask & (1 << IPI_PREEMPT)) { CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); } if (ipimask & (1 << IPI_RENDEZVOUS)) { CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__); smp_rendezvous_action(); } if (ipimask & (1 << IPI_STOP)) { /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add such case. */ CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)", __func__); cpuid = PCPU_GET(cpuid); savectx(&stoppcbs[cpuid]); CPU_SET_ATOMIC(cpuid, &stopped_cpus); while (!CPU_ISSET(cpuid, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpuid, &stopped_cpus); CPU_CLR_ATOMIC(cpuid, &started_cpus); CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__); } if (ipimask & (1 << IPI_HARDCLOCK)) { CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); } return (FILTER_HANDLED); } static void ipi_send(struct pcpu *pc, int ipi) { CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__, pc, pc->pc_cpuid, ipi); atomic_set_32(&pc->pc_ipimask, (1 << ipi)); powerpc_sync(); PIC_IPI(root_pic, pc->pc_cpuid); CTR1(KTR_SMP, "%s: sent", __func__); } /* Send an IPI to a set of cpus. */ void ipi_selected(cpuset_t cpus, int ipi) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (CPU_ISSET(pc->pc_cpuid, &cpus)) ipi_send(pc, ipi); } } /* Send an IPI to a specific CPU. */ void ipi_cpu(int cpu, u_int ipi) { ipi_send(cpuid_to_pcpu[cpu], ipi); } /* Send an IPI to all CPUs EXCEPT myself. */ void ipi_all_but_self(int ipi) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) ipi_send(pc, ipi); } } diff --git a/sys/riscv/include/kdb.h b/sys/riscv/include/kdb.h index 0fdff26bb432..24c6145853c3 100644 --- a/sys/riscv/include/kdb.h +++ b/sys/riscv/include/kdb.h @@ -1,76 +1,74 @@ /*- * Copyright (c) 2004 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_KDB_H_ #define _MACHINE_KDB_H_ #include -#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid] - static __inline void kdb_cpu_clear_singlestep(void) { } static __inline void kdb_cpu_set_singlestep(void) { } static __inline void kdb_cpu_sync_icache(unsigned char *addr, size_t size) { /* * Other CPUs flush their instruction cache when resuming from * IPI_STOP. */ fence_i(); } static __inline void kdb_cpu_trap(int type, int code) { } static __inline int kdb_cpu_set_watchpoint(vm_offset_t addr, vm_size_t size, int access) { return (ENXIO); } static __inline int kdb_cpu_clr_watchpoint(vm_offset_t addr, vm_size_t size) { return (0); } #endif /* _MACHINE_KDB_H_ */ diff --git a/sys/riscv/include/smp.h b/sys/riscv/include/smp.h index a1d588f31567..1760647903cb 100644 --- a/sys/riscv/include/smp.h +++ b/sys/riscv/include/smp.h @@ -1,57 +1,55 @@ /*- * Copyright (c) 2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #include #define IPI_AST (1 << 0) #define IPI_PREEMPT (1 << 1) #define IPI_RENDEZVOUS (1 << 2) #define IPI_STOP (1 << 3) #define IPI_STOP_HARD (1 << 4) #define IPI_HARDCLOCK (1 << 5) #define INTR_IPI_COUNT 1 void ipi_all_but_self(u_int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpuset_t cpus, u_int ipi); -extern struct pcb stoppcbs[]; - #endif /* !_MACHINE_SMP_H_ */ diff --git a/sys/riscv/riscv/mp_machdep.c b/sys/riscv/riscv/mp_machdep.c index cd75f2dbbb73..018d7c653304 100644 --- a/sys/riscv/riscv/mp_machdep.c +++ b/sys/riscv/riscv/mp_machdep.c @@ -1,581 +1,579 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * Copyright (c) 2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE) uint32_t __riscv_boot_ap[MAXCPU]; static enum { CPUS_UNKNOWN, #ifdef FDT CPUS_FDT, #endif } cpu_enum_method; static device_identify_t riscv64_cpu_identify; static device_probe_t riscv64_cpu_probe; static device_attach_t riscv64_cpu_attach; static int ipi_handler(void *); -struct pcb stoppcbs[MAXCPU]; - extern uint32_t boot_hart; extern cpuset_t all_harts; #ifdef INVARIANTS static uint32_t cpu_reg[MAXCPU][2]; #endif static device_t cpu_list[MAXCPU]; void mpentry(u_long hartid); void init_secondary(uint64_t); static struct mtx ap_boot_mtx; /* Stacks for AP initialization, discarded once idle threads are started. */ void *bootstack; static void *bootstacks[MAXCPU]; /* Count of started APs, used to synchronize access to bootstack. */ static volatile int aps_started; /* Set to 1 once we're ready to let the APs out of the pen. */ static volatile int aps_ready; /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; static device_method_t riscv64_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_identify, riscv64_cpu_identify), DEVMETHOD(device_probe, riscv64_cpu_probe), DEVMETHOD(device_attach, riscv64_cpu_attach), DEVMETHOD_END }; static driver_t riscv64_cpu_driver = { "riscv64_cpu", riscv64_cpu_methods, 0 }; DRIVER_MODULE(riscv64_cpu, cpu, riscv64_cpu_driver, 0, 0); static void riscv64_cpu_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "riscv64_cpu", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "riscv64_cpu", -1) == NULL) device_printf(parent, "add child failed\n"); } static int riscv64_cpu_probe(device_t dev) { u_int cpuid; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); device_quiet(dev); return (0); } static int riscv64_cpu_attach(device_t dev) { const uint32_t *reg; size_t reg_size; u_int cpuid; int i; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid)); reg = cpu_get_cpuid(dev, ®_size); if (reg == NULL) return (EINVAL); if (bootverbose) { device_printf(dev, "register <"); for (i = 0; i < reg_size; i++) printf("%s%x", (i == 0) ? "" : " ", reg[i]); printf(">\n"); } /* Set the device to start it later */ cpu_list[cpuid] = dev; return (0); } static void release_aps(void *dummy __unused) { cpuset_t mask; int i; if (mp_ncpus == 1) return; /* Setup the IPI handler */ riscv_setup_ipihandler(ipi_handler); atomic_store_rel_int(&aps_ready, 1); /* Wake up the other CPUs */ mask = all_harts; CPU_CLR(boot_hart, &mask); printf("Release APs\n"); sbi_send_ipi(mask.__bits); for (i = 0; i < 2000; i++) { if (atomic_load_acq_int(&smp_started)) return; DELAY(1000); } printf("APs not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); void init_secondary(uint64_t hart) { struct pcpu *pcpup; u_int cpuid; /* Renumber this cpu */ cpuid = hart; if (cpuid < boot_hart) cpuid += mp_maxid + 1; cpuid -= boot_hart; /* Setup the pcpu pointer */ pcpup = &__pcpu[cpuid]; __asm __volatile("mv tp, %0" :: "r"(pcpup)); /* Workaround: make sure wfi doesn't halt the hart */ csr_set(sie, SIE_SSIE); csr_set(sip, SIE_SSIE); /* Signal the BSP and spin until it has released all APs. */ atomic_add_int(&aps_started, 1); while (!atomic_load_int(&aps_ready)) __asm __volatile("wfi"); /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; schedinit_ap(); /* Enable software interrupts */ riscv_unmask_ipi(); #ifndef EARLY_AP_STARTUP /* Start per-CPU event timers. */ cpu_initclocks_ap(); #endif /* Enable external (PLIC) interrupts */ csr_set(sie, SIE_SEIE); /* Activate this hart in the kernel pmap. */ CPU_SET_ATOMIC(hart, &kernel_pmap->pm_active); /* Activate process 0's pmap. */ pmap_activate_boot(vmspace_pmap(proc0.p_vmspace)); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); if (bootverbose) printf("Secondary CPU %u fully online\n", cpuid); /* Enter the scheduler */ sched_ap_entry(); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ } static void smp_after_idle_runnable(void *arg __unused) { int cpu; if (mp_ncpus == 1) return; KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); /* * Wait for all APs to handle an interrupt. After that, we know that * the APs have entered the scheduler at least once, so the boot stacks * are safe to free. */ smp_rendezvous(smp_no_rendezvous_barrier, NULL, smp_no_rendezvous_barrier, NULL); for (cpu = 1; cpu <= mp_maxid; cpu++) { if (bootstacks[cpu] != NULL) kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE); } } SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, smp_after_idle_runnable, NULL); static int ipi_handler(void *arg) { u_int ipi_bitmap; u_int cpu, ipi; int bit; csr_clear(sip, SIP_SSIP); cpu = PCPU_GET(cpuid); mb(); ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); if (ipi_bitmap == 0) return (FILTER_HANDLED); while ((bit = ffs(ipi_bitmap))) { bit = (bit - 1); ipi = (1 << bit); ipi_bitmap &= ~ipi; mb(); switch (ipi) { case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_STOP: case IPI_STOP_HARD: CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); /* * The kernel debugger might have set a breakpoint, * so flush the instruction cache. */ fence_i(); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI %#0x on cpu %d", ipi, curcpu); } } return (FILTER_HANDLED); } struct cpu_group * cpu_topo(void) { return (smp_topo_none()); } /* Determine if we running MP machine */ int cpu_mp_probe(void) { return (mp_ncpus > 1); } #ifdef FDT static bool cpu_check_mmu(u_int id __unused, phandle_t node, u_int addr_size __unused, pcell_t *reg __unused) { char type[32]; /* Check if this hart supports MMU. */ if (OF_getprop(node, "mmu-type", (void *)type, sizeof(type)) == -1 || strncmp(type, "riscv,none", 10) == 0) return (false); return (true); } static bool cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { struct pcpu *pcpup; vm_paddr_t start_addr; uint64_t hart; u_int cpuid; int naps; int error; if (!cpu_check_mmu(id, node, addr_size, reg)) return (false); KASSERT(id < MAXCPU, ("Too many CPUs")); KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size")); #ifdef INVARIANTS cpu_reg[id][0] = reg[0]; if (addr_size == 2) cpu_reg[id][1] = reg[1]; #endif hart = reg[0]; if (addr_size == 2) { hart <<= 32; hart |= reg[1]; } KASSERT(hart < MAXCPU, ("Too many harts.")); /* We are already running on this cpu */ if (hart == boot_hart) return (true); /* * Rotate the CPU IDs to put the boot CPU as CPU 0. * We keep the other CPUs ordered. */ cpuid = hart; if (cpuid < boot_hart) cpuid += mp_maxid + 1; cpuid -= boot_hart; /* Check if we are able to start this cpu */ if (cpuid > mp_maxid) return (false); /* * Depending on the SBI implementation, APs are waiting either in * locore.S or to be activated explicitly, via SBI call. */ if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) { start_addr = pmap_kextract((vm_offset_t)mpentry); error = sbi_hsm_hart_start(hart, start_addr, 0); if (error != 0) { mp_ncpus--; /* Send a warning to the user and continue. */ printf("AP %u (hart %lu) failed to start, error %d\n", cpuid, hart, error); return (false); } } pcpup = &__pcpu[cpuid]; pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); pcpup->pc_hart = hart; dpcpu[cpuid - 1] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[cpuid - 1], cpuid); bootstacks[cpuid] = kmem_malloc(MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO); naps = atomic_load_int(&aps_started); bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE; if (bootverbose) printf("Starting CPU %u (hart %lx)\n", cpuid, hart); atomic_store_32(&__riscv_boot_ap[hart], 1); /* Wait for the AP to switch to its boot stack. */ while (atomic_load_int(&aps_started) < naps + 1) cpu_spinwait(); CPU_SET(cpuid, &all_cpus); CPU_SET(hart, &all_harts); return (true); } #endif /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { u_int cpu; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_SET(0, &all_cpus); CPU_SET(boot_hart, &all_harts); switch(cpu_enum_method) { #ifdef FDT case CPUS_FDT: ofw_cpu_early_foreach(cpu_init_fdt, true); break; #endif case CPUS_UNKNOWN: break; } CPU_FOREACH(cpu) { /* Already identified. */ if (cpu == 0) continue; identify_cpu(cpu); } } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { u_int cpu; CPU_FOREACH(cpu) { /* Already announced. */ if (cpu == 0) continue; printcpuinfo(cpu); } } void cpu_mp_setmaxid(void) { int cores; #ifdef FDT cores = ofw_cpu_early_foreach(cpu_check_mmu, true); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the device tree\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; cpu_enum_method = CPUS_FDT; } else #endif { if (bootverbose) printf("No CPU data, limiting to 1 core\n"); mp_ncpus = 1; mp_maxid = 0; } if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) { if (cores > 0 && cores < mp_ncpus) { mp_ncpus = cores; mp_maxid = cores - 1; } } } diff --git a/sys/sys/param.h b/sys/sys/param.h index 25fac3efde1d..5bdc3e2ebee4 100644 --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -1,392 +1,392 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * documentation/content/en/books/porters-handbook/versions/_index.adoc * * Encoding: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. * Short hand: MMmmXXX * * __FreeBSD_version is bumped every time there's a change in the base system * that's noteworthy. A noteworthy change is any change which changes the * kernel's KBI in -CURRENT, one that changes some detail about the system that * external software (or the ports system) would want to know about, one that * adds a system call, one that adds or deletes a shipped library, a security * fix, or similar change not specifically noted here. Bumps should be limited * to one per day / a couple per week except for security fixes. * * The approved way to obtain this from a shell script is: * awk '/^\#define[[:space:]]*__FreeBSD_version/ {print $3}' * Other methods to parse this file may work, but are not guaranteed against * future changes. The above script works back to FreeBSD 3.x when this macro * was introduced. This number is propagated to other places needing it that * cannot include sys/param.h and should only be updated here. */ #undef __FreeBSD_version -#define __FreeBSD_version 1400088 +#define __FreeBSD_version 1400089 /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(_WANT_P_OSREL) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_CK_SUPERBLOCK 1300000 #define P_OSREL_CK_INODE 1300005 #define P_OSREL_POWERPC_NEW_AUX_ARGS 1300070 #define P_OSREL_TIDPID 1400079 #define P_OSREL_ARM64_SPSR 1400084 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 255 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL #ifndef LOCORE /* Signals. */ #include #endif #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define PNOLOCK 0x400 /* OR'd with pri to allow sleeping w/o a lock */ #define PRILASTFLAG 0x400 /* Last flag defined above */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) __align_down(x, y) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) __align_up(x, y) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). Since the intermediate * calculation is done with 64-bit precision, the maximum load average that can * be calculated is approximately 2^32 / FSCALE. * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11. This gives a maximum load avg of 2 million. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */ diff --git a/sys/sys/smp.h b/sys/sys/smp.h index 4583dc4350c9..7c3ce7f4ff8d 100644 --- a/sys/sys/smp.h +++ b/sys/sys/smp.h @@ -1,297 +1,300 @@ /*- * SPDX-License-Identifier: Beerware * * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * $FreeBSD$ */ #ifndef _SYS_SMP_H_ #define _SYS_SMP_H_ #ifdef _KERNEL #ifndef LOCORE #include #include /* * Types of nodes in the topological tree. */ typedef enum { /* No node has this type; can be used in topo API calls. */ TOPO_TYPE_DUMMY, /* Processing unit aka computing unit aka logical CPU. */ TOPO_TYPE_PU, /* Physical subdivision of a package. */ TOPO_TYPE_CORE, /* CPU L1/L2/L3 cache. */ TOPO_TYPE_CACHE, /* Package aka chip, equivalent to socket. */ TOPO_TYPE_PKG, /* NUMA node. */ TOPO_TYPE_NODE, /* Other logical or physical grouping of PUs. */ /* E.g. PUs on the same dye, or PUs sharing an FPU. */ TOPO_TYPE_GROUP, /* The whole system. */ TOPO_TYPE_SYSTEM } topo_node_type; /* Hardware indenitifier of a topology component. */ typedef unsigned int hwid_t; /* Logical CPU idenitifier. */ typedef int cpuid_t; /* A node in the topology. */ struct topo_node { struct topo_node *parent; TAILQ_HEAD(topo_children, topo_node) children; TAILQ_ENTRY(topo_node) siblings; cpuset_t cpuset; topo_node_type type; uintptr_t subtype; hwid_t hwid; cpuid_t id; int nchildren; int cpu_count; }; /* * Scheduling topology of a NUMA or SMP system. * * The top level topology is an array of pointers to groups. Each group * contains a bitmask of cpus in its group or subgroups. It may also * contain a pointer to an array of child groups. * * The bitmasks at non leaf groups may be used by consumers who support * a smaller depth than the hardware provides. * * The topology may be omitted by systems where all CPUs are equal. */ struct cpu_group { struct cpu_group *cg_parent; /* Our parent group. */ struct cpu_group *cg_child; /* Optional children groups. */ cpuset_t cg_mask; /* Mask of cpus in this group. */ int32_t cg_count; /* Count of cpus in this group. */ int32_t cg_first; /* First cpu in this group. */ int32_t cg_last; /* Last cpu in this group. */ int16_t cg_children; /* Number of children groups. */ int8_t cg_level; /* Shared cache level. */ int8_t cg_flags; /* Traversal modifiers. */ }; typedef struct cpu_group *cpu_group_t; /* * Defines common resources for CPUs in the group. The highest level * resource should be used when multiple are shared. */ #define CG_SHARE_NONE 0 #define CG_SHARE_L1 1 #define CG_SHARE_L2 2 #define CG_SHARE_L3 3 #define MAX_CACHE_LEVELS CG_SHARE_L3 /* * Behavior modifiers for load balancing and affinity. */ #define CG_FLAG_HTT 0x01 /* Schedule the alternate core last. */ #define CG_FLAG_SMT 0x02 /* New age htt, less crippled. */ #define CG_FLAG_THREAD (CG_FLAG_HTT | CG_FLAG_SMT) /* Any threading. */ #define CG_FLAG_NODE 0x04 /* NUMA node. */ /* * Convenience routines for building and traversing topologies. */ #ifdef SMP void topo_init_node(struct topo_node *node); void topo_init_root(struct topo_node *root); struct topo_node * topo_add_node_by_hwid(struct topo_node *parent, int hwid, topo_node_type type, uintptr_t subtype); struct topo_node * topo_find_node_by_hwid(struct topo_node *parent, int hwid, topo_node_type type, uintptr_t subtype); void topo_promote_child(struct topo_node *child); struct topo_node * topo_next_node(struct topo_node *top, struct topo_node *node); struct topo_node * topo_next_nonchild_node(struct topo_node *top, struct topo_node *node); void topo_set_pu_id(struct topo_node *node, cpuid_t id); enum topo_level { TOPO_LEVEL_PKG = 0, /* * Some systems have useful sub-package core organizations. On these, * a package has one or more subgroups. Each subgroup contains one or * more cache groups (cores that share a last level cache). */ TOPO_LEVEL_GROUP, TOPO_LEVEL_CACHEGROUP, TOPO_LEVEL_CORE, TOPO_LEVEL_THREAD, TOPO_LEVEL_COUNT /* Must be last */ }; struct topo_analysis { int entities[TOPO_LEVEL_COUNT]; }; int topo_analyze(struct topo_node *topo_root, int all, struct topo_analysis *results); #define TOPO_FOREACH(i, root) \ for (i = root; i != NULL; i = topo_next_node(root, i)) struct cpu_group *smp_topo(void); struct cpu_group *smp_topo_alloc(u_int count); struct cpu_group *smp_topo_none(void); struct cpu_group *smp_topo_1level(int l1share, int l1count, int l1flags); struct cpu_group *smp_topo_2level(int l2share, int l2count, int l1share, int l1count, int l1flags); struct cpu_group *smp_topo_find(struct cpu_group *top, int cpu); extern void (*cpustop_restartfunc)(void); /* The suspend/resume cpusets are x86 only, but minimize ifdefs. */ extern volatile cpuset_t resuming_cpus; /* woken up cpus in suspend pen */ extern volatile cpuset_t started_cpus; /* cpus to let out of stop pen */ extern volatile cpuset_t stopped_cpus; /* cpus in stop pen */ extern volatile cpuset_t suspended_cpus; /* cpus [near] sleeping in susp pen */ extern volatile cpuset_t toresume_cpus; /* cpus to let out of suspend pen */ extern cpuset_t hlt_cpus_mask; /* XXX 'mask' is detail in old impl */ extern cpuset_t logical_cpus_mask; #endif /* SMP */ extern u_int mp_maxid; extern int mp_maxcpus; extern int mp_ncores; extern int mp_ncpus; extern int smp_cpus; extern volatile int smp_started; extern int smp_threads_per_core; extern cpuset_t all_cpus; extern cpuset_t cpuset_domain[MAXMEMDOM]; /* CPUs in each NUMA domain. */ +struct pcb; +extern struct pcb *stoppcbs; + /* * Macro allowing us to determine whether a CPU is absent at any given * time, thus permitting us to configure sparse maps of cpuid-dependent * (per-CPU) structures. */ #define CPU_ABSENT(x_cpu) (!CPU_ISSET(x_cpu, &all_cpus)) /* * Macros to iterate over non-absent CPUs. CPU_FOREACH() takes an * integer iterator and iterates over the available set of CPUs. * CPU_FIRST() returns the id of the first non-absent CPU. CPU_NEXT() * returns the id of the next non-absent CPU. It will wrap back to * CPU_FIRST() once the end of the list is reached. The iterators are * currently implemented via inline functions. */ #define CPU_FOREACH(i) \ for ((i) = 0; (i) <= mp_maxid; (i)++) \ if (!CPU_ABSENT((i))) static __inline int cpu_first(void) { int i; for (i = 0;; i++) if (!CPU_ABSENT(i)) return (i); } static __inline int cpu_next(int i) { for (;;) { i++; if ((u_int)i > mp_maxid) i = 0; if (!CPU_ABSENT(i)) return (i); } } #define CPU_FIRST() cpu_first() #define CPU_NEXT(i) cpu_next((i)) #ifdef SMP /* * Machine dependent functions used to initialize MP support. * * The cpu_mp_probe() should check to see if MP support is present and return * zero if it is not or non-zero if it is. If MP support is present, then * cpu_mp_start() will be called so that MP can be enabled. This function * should do things such as startup secondary processors. It should also * setup mp_ncpus, all_cpus, and smp_cpus. It should also ensure that * smp_started is initialized at the appropriate time. * Once cpu_mp_start() returns, machine independent MP startup code will be * executed and a simple message will be output to the console. Finally, * cpu_mp_announce() will be called so that machine dependent messages about * the MP support may be output to the console if desired. * * The cpu_setmaxid() function is called very early during the boot process * so that the MD code may set mp_maxid to provide an upper bound on CPU IDs * that other subsystems may use. If a platform is not able to determine * the exact maximum ID that early, then it may set mp_maxid to MAXCPU - 1. */ struct thread; struct cpu_group *cpu_topo(void); void cpu_mp_announce(void); int cpu_mp_probe(void); void cpu_mp_setmaxid(void); void cpu_mp_start(void); void forward_signal(struct thread *); int restart_cpus(cpuset_t); int stop_cpus(cpuset_t); int stop_cpus_hard(cpuset_t); #if defined(__amd64__) || defined(__i386__) int suspend_cpus(cpuset_t); int resume_cpus(cpuset_t); #endif void smp_rendezvous_action(void); extern struct mtx smp_ipi_mtx; #endif /* SMP */ int quiesce_all_cpus(const char *, int); int quiesce_cpus(cpuset_t, const char *, int); void quiesce_all_critical(void); void cpus_fence_seq_cst(void); void smp_no_rendezvous_barrier(void *); void smp_rendezvous(void (*)(void *), void (*)(void *), void (*)(void *), void *arg); void smp_rendezvous_cpus(cpuset_t, void (*)(void *), void (*)(void *), void (*)(void *), void *arg); struct smp_rendezvous_cpus_retry_arg { cpuset_t cpus; }; void smp_rendezvous_cpus_retry(cpuset_t, void (*)(void *), void (*)(void *), void (*)(void *), void (*)(void *, int), struct smp_rendezvous_cpus_retry_arg *); void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *); #endif /* !LOCORE */ #endif /* _KERNEL */ #endif /* _SYS_SMP_H_ */ diff --git a/sys/x86/include/x86_smp.h b/sys/x86/include/x86_smp.h index 9d8e1c4ebaec..356d8b881962 100644 --- a/sys/x86/include/x86_smp.h +++ b/sys/x86/include/x86_smp.h @@ -1,129 +1,128 @@ /*- * SPDX-License-Identifier: Beerware * * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * $FreeBSD$ * */ #ifndef _X86_X86_SMP_H_ #define _X86_X86_SMP_H_ #include #include #include #include #include struct pmap; #ifdef __i386__ extern unsigned int boot_address; #endif /* global data in mp_x86.c */ extern int mp_naps; extern int boot_cpu_id; -extern struct pcb stoppcbs[]; extern int cpu_apic_ids[]; extern int bootAP; extern void *dpcpu; extern char *bootSTK; extern void *bootstacks[]; extern unsigned int bootMP_size; extern volatile int aps_ready; extern struct mtx ap_boot_mtx; extern int cpu_logical; extern int cpu_cores; extern volatile uint32_t smp_tlb_generation; extern struct pmap *smp_tlb_pmap; extern vm_offset_t smp_tlb_addr1, smp_tlb_addr2; extern u_int xhits_gbl[]; extern u_int xhits_pg[]; extern u_int xhits_rng[]; extern u_int ipi_global; extern u_int ipi_page; extern u_int ipi_range; extern u_int ipi_range_size; extern int nmi_kdb_lock; extern int nmi_is_broadcast; struct cpu_info { bool cpu_present:1; bool cpu_bsp:1; bool cpu_disabled:1; bool cpu_hyperthread:1; }; extern struct cpu_info *cpu_info; /* * Set if MWAIT does not reliably wake when the MONITORed address is written. */ extern bool mwait_cpustop_broken; #ifdef COUNT_IPIS extern u_long *ipi_invltlb_counts[MAXCPU]; extern u_long *ipi_invlrng_counts[MAXCPU]; extern u_long *ipi_invlpg_counts[MAXCPU]; extern u_long *ipi_invlcache_counts[MAXCPU]; extern u_long *ipi_rendezvous_counts[MAXCPU]; #endif /* IPI handlers */ inthand_t IDTVEC(ipi_intr_bitmap_handler), /* Bitmap based IPIs */ IDTVEC(ipi_swi), /* Runs delayed SWI */ IDTVEC(cpustop), /* CPU stops & waits to be restarted */ IDTVEC(cpususpend), /* CPU suspends & waits to be resumed */ IDTVEC(rendezvous); /* handle CPU rendezvous */ typedef void (*smp_invl_cb_t)(struct pmap *, vm_offset_t addr1, vm_offset_t addr2); #ifdef __i386__ void alloc_ap_trampoline(vm_paddr_t *physmap, unsigned int *physmap_idx); #endif /* functions in x86_mp.c */ void assign_cpu_ids(void); void cpu_add(u_int apic_id, char boot_cpu); void cpustop_handler(void); void cpususpend_handler(void); void init_secondary_tail(void); void init_secondary(void); void ipi_startup(int apic_id, int vector); void ipi_all_but_self(u_int ipi); void ipi_bitmap_handler(struct trapframe frame); void ipi_cpu(int cpu, u_int ipi); int ipi_nmi_handler(void); void ipi_swi_handler(struct trapframe frame); void ipi_selected(cpuset_t cpus, u_int ipi); void ipi_self_from_nmi(u_int vector); void set_interrupt_apic_ids(void); void mem_range_AP_init(void); void topo_probe(void); /* functions in mp_machdep.c */ void smp_cache_flush(smp_invl_cb_t curcpu_cb); #ifdef __i386__ void smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, struct pmap *pmap, smp_invl_cb_t curcpu_cb); void smp_masked_invlpg_range(cpuset_t mask, vm_offset_t startva, vm_offset_t endva, struct pmap *pmap, smp_invl_cb_t curcpu_cb); void smp_masked_invltlb(cpuset_t mask, struct pmap *pmap, smp_invl_cb_t curcpu_cb); #else void smp_masked_invlpg(vm_offset_t addr, struct pmap *pmap, smp_invl_cb_t curcpu_cb); void smp_masked_invlpg_range(vm_offset_t startva, vm_offset_t endva, struct pmap *pmap, smp_invl_cb_t curcpu_cb); void smp_masked_invltlb(struct pmap *pmap, smp_invl_cb_t curcpu_cb); #endif #endif diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c index 34c54b27eeff..581ec0ac518a 100644 --- a/sys/x86/x86/mp_x86.c +++ b/sys/x86/x86/mp_x86.c @@ -1,1716 +1,1715 @@ /*- * Copyright (c) 1996, by Steve Passe * Copyright (c) 2003, by Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the developer may NOT be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #ifdef __i386__ #include "opt_apic.h" #endif #include "opt_cpu.h" #include "opt_ddb.h" #include "opt_gdb.h" #include "opt_kstack_pages.h" #include "opt_pmap.h" #include "opt_sched.h" #include "opt_smp.h" #include "opt_stack.h" #include #include #include #include #include /* cngetc() */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif static MALLOC_DEFINE(M_CPUS, "cpus", "CPU items"); int mp_naps; /* # of Applications processors */ int boot_cpu_id = -1; /* designated BSP */ /* AP uses this during bootstrap. Do not staticize. */ char *bootSTK; int bootAP; /* Free these after use */ void *bootstacks[MAXCPU]; void *dpcpu; -struct pcb stoppcbs[MAXCPU]; struct susppcb **susppcbs; #ifdef COUNT_IPIS /* Interrupt counts. */ static u_long *ipi_preempt_counts[MAXCPU]; static u_long *ipi_ast_counts[MAXCPU]; u_long *ipi_invltlb_counts[MAXCPU]; u_long *ipi_invlrng_counts[MAXCPU]; u_long *ipi_invlpg_counts[MAXCPU]; u_long *ipi_invlcache_counts[MAXCPU]; u_long *ipi_rendezvous_counts[MAXCPU]; static u_long *ipi_hardclock_counts[MAXCPU]; #endif /* Default cpu_ops implementation. */ struct cpu_ops cpu_ops; /* * Local data and functions. */ static volatile cpuset_t ipi_stop_nmi_pending; volatile cpuset_t resuming_cpus; volatile cpuset_t toresume_cpus; /* used to hold the AP's until we are ready to release them */ struct mtx ap_boot_mtx; /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; /* * Store data from cpu_add() until later in the boot when we actually setup * the APs. */ struct cpu_info *cpu_info; int *apic_cpuids; int cpu_apic_ids[MAXCPU]; _Static_assert(MAXCPU <= MAX_APIC_ID, "MAXCPU cannot be larger that MAX_APIC_ID"); _Static_assert(xAPIC_MAX_APIC_ID <= MAX_APIC_ID, "xAPIC_MAX_APIC_ID cannot be larger that MAX_APIC_ID"); static void release_aps(void *dummy); static void cpustop_handler_post(u_int cpu); static int hyperthreading_allowed = 1; SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_allowed, CTLFLAG_RDTUN, &hyperthreading_allowed, 0, "Use Intel HTT logical CPUs"); static int hyperthreading_intr_allowed = 0; SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_intr_allowed, CTLFLAG_RDTUN, &hyperthreading_intr_allowed, 0, "Allow interrupts on HTT logical CPUs"); static struct topo_node topo_root; static int pkg_id_shift; static int node_id_shift; static int core_id_shift; static int disabled_cpus; struct cache_info { int id_shift; int present; } static caches[MAX_CACHE_LEVELS]; static bool stop_mwait = false; SYSCTL_BOOL(_machdep, OID_AUTO, stop_mwait, CTLFLAG_RWTUN, &stop_mwait, 0, "Use MONITOR/MWAIT when stopping CPU, if available"); void mem_range_AP_init(void) { if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) mem_range_softc.mr_op->initAP(&mem_range_softc); } /* * Round up to the next power of two, if necessary, and then * take log2. * Returns -1 if argument is zero. */ static __inline int mask_width(u_int x) { return (fls(x << (1 - powerof2(x))) - 1); } /* * Add a cache level to the cache topology description. */ static int add_deterministic_cache(int type, int level, int share_count) { if (type == 0) return (0); if (type > 3) { printf("unexpected cache type %d\n", type); return (1); } if (type == 2) /* ignore instruction cache */ return (1); if (level == 0 || level > MAX_CACHE_LEVELS) { printf("unexpected cache level %d\n", level); return (1); } if (caches[level - 1].present) { printf("WARNING: multiple entries for L%u data cache\n", level); printf("%u => %u\n", caches[level - 1].id_shift, mask_width(share_count)); } caches[level - 1].id_shift = mask_width(share_count); caches[level - 1].present = 1; if (caches[level - 1].id_shift > pkg_id_shift) { printf("WARNING: L%u data cache covers more " "APIC IDs than a package (%u > %u)\n", level, caches[level - 1].id_shift, pkg_id_shift); caches[level - 1].id_shift = pkg_id_shift; } if (caches[level - 1].id_shift < core_id_shift) { printf("WARNING: L%u data cache covers fewer " "APIC IDs than a core (%u < %u)\n", level, caches[level - 1].id_shift, core_id_shift); caches[level - 1].id_shift = core_id_shift; } return (1); } /* * Determine topology of processing units and caches for AMD CPUs. * See: * - AMD CPUID Specification (Publication # 25481) * - BKDG for AMD NPT Family 0Fh Processors (Publication # 32559) * - BKDG For AMD Family 10h Processors (Publication # 31116) * - BKDG For AMD Family 15h Models 00h-0Fh Processors (Publication # 42301) * - BKDG For AMD Family 16h Models 00h-0Fh Processors (Publication # 48751) * - PPR For AMD Family 17h Models 00h-0Fh Processors (Publication # 54945) */ static void topo_probe_amd(void) { u_int p[4]; uint64_t v; int level; int nodes_per_socket; int share_count; int type; int i; /* No multi-core capability. */ if ((amd_feature2 & AMDID2_CMP) == 0) return; /* For families 10h and newer. */ pkg_id_shift = (cpu_procinfo2 & AMDID_COREID_SIZE) >> AMDID_COREID_SIZE_SHIFT; /* For 0Fh family. */ if (pkg_id_shift == 0) pkg_id_shift = mask_width((cpu_procinfo2 & AMDID_CMP_CORES) + 1); /* * Families prior to 16h define the following value as * cores per compute unit and we don't really care about the AMD * compute units at the moment. Perhaps we should treat them as * cores and cores within the compute units as hardware threads, * but that's up for debate. * Later families define the value as threads per compute unit, * so we are following AMD's nomenclature here. */ if ((amd_feature2 & AMDID2_TOPOLOGY) != 0 && CPUID_TO_FAMILY(cpu_id) >= 0x16) { cpuid_count(0x8000001e, 0, p); share_count = ((p[1] >> 8) & 0xff) + 1; core_id_shift = mask_width(share_count); /* * For Zen (17h), gather Nodes per Processor. Each node is a * Zeppelin die; TR and EPYC CPUs will have multiple dies per * package. Communication latency between dies is higher than * within them. */ nodes_per_socket = ((p[2] >> 8) & 0x7) + 1; node_id_shift = pkg_id_shift - mask_width(nodes_per_socket); } if ((amd_feature2 & AMDID2_TOPOLOGY) != 0) { for (i = 0; ; i++) { cpuid_count(0x8000001d, i, p); type = p[0] & 0x1f; level = (p[0] >> 5) & 0x7; share_count = 1 + ((p[0] >> 14) & 0xfff); if (!add_deterministic_cache(type, level, share_count)) break; } } else { if (cpu_exthigh >= 0x80000005) { cpuid_count(0x80000005, 0, p); if (((p[2] >> 24) & 0xff) != 0) { caches[0].id_shift = 0; caches[0].present = 1; } } if (cpu_exthigh >= 0x80000006) { cpuid_count(0x80000006, 0, p); if (((p[2] >> 16) & 0xffff) != 0) { caches[1].id_shift = 0; caches[1].present = 1; } if (((p[3] >> 18) & 0x3fff) != 0) { nodes_per_socket = 1; if ((amd_feature2 & AMDID2_NODE_ID) != 0) { /* * Handle multi-node processors that * have multiple chips, each with its * own L3 cache, on the same die. */ v = rdmsr(0xc001100c); nodes_per_socket = 1 + ((v >> 3) & 0x7); } caches[2].id_shift = pkg_id_shift - mask_width(nodes_per_socket); caches[2].present = 1; } } } } /* * Determine topology of processing units for Intel CPUs * using CPUID Leaf 1 and Leaf 4, if supported. * See: * - Intel 64 Architecture Processor Topology Enumeration * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual, * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS */ static void topo_probe_intel_0x4(void) { u_int p[4]; int max_cores; int max_logical; /* Both zero and one here mean one logical processor per package. */ max_logical = (cpu_feature & CPUID_HTT) != 0 ? (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1; if (max_logical <= 1) return; if (cpu_high >= 0x4) { cpuid_count(0x04, 0, p); max_cores = ((p[0] >> 26) & 0x3f) + 1; } else max_cores = 1; core_id_shift = mask_width(max_logical/max_cores); KASSERT(core_id_shift >= 0, ("intel topo: max_cores > max_logical\n")); pkg_id_shift = core_id_shift + mask_width(max_cores); } /* * Determine topology of processing units for Intel CPUs * using CPUID Leaf 1Fh or 0Bh, if supported. * See: * - Intel 64 Architecture Processor Topology Enumeration * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual, * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS */ static void topo_probe_intel_0xb(void) { u_int leaf; u_int p[4] = { 0 }; int bits; int type; int i; /* Prefer leaf 1Fh (V2 Extended Topology Enumeration). */ if (cpu_high >= 0x1f) { leaf = 0x1f; cpuid_count(leaf, 0, p); } /* Fall back to leaf 0Bh (Extended Topology Enumeration). */ if (p[1] == 0) { leaf = 0x0b; cpuid_count(leaf, 0, p); } /* Fall back to leaf 04h (Deterministic Cache Parameters). */ if (p[1] == 0) { topo_probe_intel_0x4(); return; } /* We only support three levels for now. */ for (i = 0; ; i++) { cpuid_count(leaf, i, p); bits = p[0] & 0x1f; type = (p[2] >> 8) & 0xff; if (type == 0) break; if (type == CPUID_TYPE_SMT) core_id_shift = bits; else if (type == CPUID_TYPE_CORE) pkg_id_shift = bits; else if (bootverbose) printf("Topology level type %d shift: %d\n", type, bits); } if (pkg_id_shift < core_id_shift) { printf("WARNING: core covers more APIC IDs than a package\n"); core_id_shift = pkg_id_shift; } } /* * Determine topology of caches for Intel CPUs. * See: * - Intel 64 Architecture Processor Topology Enumeration * - Intel 64 and IA-32 Architectures Software Developer’s Manual * Volume 2A: Instruction Set Reference, A-M, * CPUID instruction */ static void topo_probe_intel_caches(void) { u_int p[4]; int level; int share_count; int type; int i; if (cpu_high < 0x4) { /* * Available cache level and sizes can be determined * via CPUID leaf 2, but that requires a huge table of hardcoded * values, so for now just assume L1 and L2 caches potentially * shared only by HTT processing units, if HTT is present. */ caches[0].id_shift = pkg_id_shift; caches[0].present = 1; caches[1].id_shift = pkg_id_shift; caches[1].present = 1; return; } for (i = 0; ; i++) { cpuid_count(0x4, i, p); type = p[0] & 0x1f; level = (p[0] >> 5) & 0x7; share_count = 1 + ((p[0] >> 14) & 0xfff); if (!add_deterministic_cache(type, level, share_count)) break; } } /* * Determine topology of processing units and caches for Intel CPUs. * See: * - Intel 64 Architecture Processor Topology Enumeration */ static void topo_probe_intel(void) { /* * Note that 0x1 <= cpu_high < 4 case should be * compatible with topo_probe_intel_0x4() logic when * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1) * or it should trigger the fallback otherwise. */ if (cpu_high >= 0xb) topo_probe_intel_0xb(); else if (cpu_high >= 0x1) topo_probe_intel_0x4(); topo_probe_intel_caches(); } /* * Topology information is queried only on BSP, on which this * code runs and for which it can query CPUID information. * Then topology is extrapolated on all packages using an * assumption that APIC ID to hardware component ID mapping is * homogenious. * That doesn't necesserily imply that the topology is uniform. */ void topo_probe(void) { static int cpu_topo_probed = 0; struct x86_topo_layer { int type; int subtype; int id_shift; } topo_layers[MAX_CACHE_LEVELS + 5]; struct topo_node *parent; struct topo_node *node; int layer; int nlayers; int node_id; int i; #if defined(DEV_ACPI) && MAXMEMDOM > 1 int d, domain; #endif if (cpu_topo_probed) return; CPU_ZERO(&logical_cpus_mask); if (mp_ncpus <= 1) ; /* nothing */ else if (cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_HYGON) topo_probe_amd(); else if (cpu_vendor_id == CPU_VENDOR_INTEL) topo_probe_intel(); KASSERT(pkg_id_shift >= core_id_shift, ("bug in APIC topology discovery")); nlayers = 0; bzero(topo_layers, sizeof(topo_layers)); topo_layers[nlayers].type = TOPO_TYPE_PKG; topo_layers[nlayers].id_shift = pkg_id_shift; if (bootverbose) printf("Package ID shift: %u\n", topo_layers[nlayers].id_shift); nlayers++; if (pkg_id_shift > node_id_shift && node_id_shift != 0) { topo_layers[nlayers].type = TOPO_TYPE_GROUP; topo_layers[nlayers].id_shift = node_id_shift; if (bootverbose) printf("Node ID shift: %u\n", topo_layers[nlayers].id_shift); nlayers++; } /* * Consider all caches to be within a package/chip * and "in front" of all sub-components like * cores and hardware threads. */ for (i = MAX_CACHE_LEVELS - 1; i >= 0; --i) { if (caches[i].present) { if (node_id_shift != 0) KASSERT(caches[i].id_shift <= node_id_shift, ("bug in APIC topology discovery")); KASSERT(caches[i].id_shift <= pkg_id_shift, ("bug in APIC topology discovery")); KASSERT(caches[i].id_shift >= core_id_shift, ("bug in APIC topology discovery")); topo_layers[nlayers].type = TOPO_TYPE_CACHE; topo_layers[nlayers].subtype = i + 1; topo_layers[nlayers].id_shift = caches[i].id_shift; if (bootverbose) printf("L%u cache ID shift: %u\n", topo_layers[nlayers].subtype, topo_layers[nlayers].id_shift); nlayers++; } } if (pkg_id_shift > core_id_shift) { topo_layers[nlayers].type = TOPO_TYPE_CORE; topo_layers[nlayers].id_shift = core_id_shift; if (bootverbose) printf("Core ID shift: %u\n", topo_layers[nlayers].id_shift); nlayers++; } topo_layers[nlayers].type = TOPO_TYPE_PU; topo_layers[nlayers].id_shift = 0; nlayers++; #if defined(DEV_ACPI) && MAXMEMDOM > 1 if (vm_ndomains > 1) { for (layer = 0; layer < nlayers; ++layer) { for (i = 0; i <= max_apic_id; ++i) { if ((i & ((1 << topo_layers[layer].id_shift) - 1)) == 0) domain = -1; if (!cpu_info[i].cpu_present) continue; d = acpi_pxm_get_cpu_locality(i); if (domain >= 0 && domain != d) break; domain = d; } if (i > max_apic_id) break; } KASSERT(layer < nlayers, ("NUMA domain smaller than PU")); memmove(&topo_layers[layer+1], &topo_layers[layer], sizeof(*topo_layers) * (nlayers - layer)); topo_layers[layer].type = TOPO_TYPE_NODE; topo_layers[layer].subtype = CG_SHARE_NONE; nlayers++; } #endif topo_init_root(&topo_root); for (i = 0; i <= max_apic_id; ++i) { if (!cpu_info[i].cpu_present) continue; parent = &topo_root; for (layer = 0; layer < nlayers; ++layer) { #if defined(DEV_ACPI) && MAXMEMDOM > 1 if (topo_layers[layer].type == TOPO_TYPE_NODE) { node_id = acpi_pxm_get_cpu_locality(i); } else #endif node_id = i >> topo_layers[layer].id_shift; parent = topo_add_node_by_hwid(parent, node_id, topo_layers[layer].type, topo_layers[layer].subtype); } } parent = &topo_root; for (layer = 0; layer < nlayers; ++layer) { #if defined(DEV_ACPI) && MAXMEMDOM > 1 if (topo_layers[layer].type == TOPO_TYPE_NODE) node_id = acpi_pxm_get_cpu_locality(boot_cpu_id); else #endif node_id = boot_cpu_id >> topo_layers[layer].id_shift; node = topo_find_node_by_hwid(parent, node_id, topo_layers[layer].type, topo_layers[layer].subtype); topo_promote_child(node); parent = node; } cpu_topo_probed = 1; } /* * Assign logical CPU IDs to local APICs. */ void assign_cpu_ids(void) { struct topo_node *node; u_int smt_mask; int nhyper; smt_mask = (1u << core_id_shift) - 1; /* * Assign CPU IDs to local APIC IDs and disable any CPUs * beyond MAXCPU. CPU 0 is always assigned to the BSP. */ mp_ncpus = 0; nhyper = 0; TOPO_FOREACH(node, &topo_root) { if (node->type != TOPO_TYPE_PU) continue; if ((node->hwid & smt_mask) != (boot_cpu_id & smt_mask)) cpu_info[node->hwid].cpu_hyperthread = 1; if (resource_disabled("lapic", node->hwid)) { if (node->hwid != boot_cpu_id) cpu_info[node->hwid].cpu_disabled = 1; else printf("Cannot disable BSP, APIC ID = %d\n", node->hwid); } if (!hyperthreading_allowed && cpu_info[node->hwid].cpu_hyperthread) cpu_info[node->hwid].cpu_disabled = 1; if (mp_ncpus >= MAXCPU) cpu_info[node->hwid].cpu_disabled = 1; if (cpu_info[node->hwid].cpu_disabled) { disabled_cpus++; continue; } if (cpu_info[node->hwid].cpu_hyperthread) nhyper++; cpu_apic_ids[mp_ncpus] = node->hwid; apic_cpuids[node->hwid] = mp_ncpus; topo_set_pu_id(node, mp_ncpus); mp_ncpus++; } KASSERT(mp_maxid >= mp_ncpus - 1, ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid, mp_ncpus)); mp_ncores = mp_ncpus - nhyper; smp_threads_per_core = mp_ncpus / mp_ncores; } /* * Print various information about the SMP system hardware and setup. */ void cpu_mp_announce(void) { struct topo_node *node; const char *hyperthread; struct topo_analysis topology; printf("FreeBSD/SMP: "); if (topo_analyze(&topo_root, 1, &topology)) { printf("%d package(s)", topology.entities[TOPO_LEVEL_PKG]); if (topology.entities[TOPO_LEVEL_GROUP] > 1) printf(" x %d groups", topology.entities[TOPO_LEVEL_GROUP]); if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1) printf(" x %d cache groups", topology.entities[TOPO_LEVEL_CACHEGROUP]); if (topology.entities[TOPO_LEVEL_CORE] > 0) printf(" x %d core(s)", topology.entities[TOPO_LEVEL_CORE]); if (topology.entities[TOPO_LEVEL_THREAD] > 1) printf(" x %d hardware threads", topology.entities[TOPO_LEVEL_THREAD]); } else { printf("Non-uniform topology"); } printf("\n"); if (disabled_cpus) { printf("FreeBSD/SMP Online: "); if (topo_analyze(&topo_root, 0, &topology)) { printf("%d package(s)", topology.entities[TOPO_LEVEL_PKG]); if (topology.entities[TOPO_LEVEL_GROUP] > 1) printf(" x %d groups", topology.entities[TOPO_LEVEL_GROUP]); if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1) printf(" x %d cache groups", topology.entities[TOPO_LEVEL_CACHEGROUP]); if (topology.entities[TOPO_LEVEL_CORE] > 0) printf(" x %d core(s)", topology.entities[TOPO_LEVEL_CORE]); if (topology.entities[TOPO_LEVEL_THREAD] > 1) printf(" x %d hardware threads", topology.entities[TOPO_LEVEL_THREAD]); } else { printf("Non-uniform topology"); } printf("\n"); } if (!bootverbose) return; TOPO_FOREACH(node, &topo_root) { switch (node->type) { case TOPO_TYPE_PKG: printf("Package HW ID = %u\n", node->hwid); break; case TOPO_TYPE_CORE: printf("\tCore HW ID = %u\n", node->hwid); break; case TOPO_TYPE_PU: if (cpu_info[node->hwid].cpu_hyperthread) hyperthread = "/HT"; else hyperthread = ""; if (node->subtype == 0) printf("\t\tCPU (AP%s): APIC ID: %u" "(disabled)\n", hyperthread, node->hwid); else if (node->id == 0) printf("\t\tCPU0 (BSP): APIC ID: %u\n", node->hwid); else printf("\t\tCPU%u (AP%s): APIC ID: %u\n", node->id, hyperthread, node->hwid); break; default: /* ignored */ break; } } } /* * Add a scheduling group, a group of logical processors sharing * a particular cache (and, thus having an affinity), to the scheduling * topology. * This function recursively works on lower level caches. */ static void x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root) { struct topo_node *node; int nchildren; int ncores; int i; KASSERT(root->type == TOPO_TYPE_SYSTEM || root->type == TOPO_TYPE_CACHE || root->type == TOPO_TYPE_NODE || root->type == TOPO_TYPE_GROUP, ("x86topo_add_sched_group: bad type: %u", root->type)); CPU_COPY(&root->cpuset, &cg_root->cg_mask); cg_root->cg_count = root->cpu_count; if (root->type == TOPO_TYPE_CACHE) cg_root->cg_level = root->subtype; else cg_root->cg_level = CG_SHARE_NONE; if (root->type == TOPO_TYPE_NODE) cg_root->cg_flags = CG_FLAG_NODE; else cg_root->cg_flags = 0; /* * Check how many core nodes we have under the given root node. * If we have multiple logical processors, but not multiple * cores, then those processors must be hardware threads. */ ncores = 0; node = root; while (node != NULL) { if (node->type != TOPO_TYPE_CORE) { node = topo_next_node(root, node); continue; } ncores++; node = topo_next_nonchild_node(root, node); } if (cg_root->cg_level != CG_SHARE_NONE && root->cpu_count > 1 && ncores < 2) cg_root->cg_flags |= CG_FLAG_SMT; /* * Find out how many cache nodes we have under the given root node. * We ignore cache nodes that cover all the same processors as the * root node. Also, we do not descend below found cache nodes. * That is, we count top-level "non-redundant" caches under the root * node. */ nchildren = 0; node = root; while (node != NULL) { /* * When some APICs are disabled by tunables, nodes can end up * with an empty cpuset. Nodes with an empty cpuset will be * translated into cpu groups with empty cpusets. smp_topo_fill * will then set cg_first and cg_last to -1. This isn't * correctly handled in all functions. E.g. when * cpu_search_lowest and cpu_search_highest loop through all * cpus, they call CPU_ISSET on cpu -1 which ends up in a * general protection fault. * * We could fix the scheduler to handle empty cpu groups * correctly. Nevertheless, empty cpu groups are causing * overhead for no value. So, it makes more sense to just don't * create them. */ if (CPU_EMPTY(&node->cpuset)) { node = topo_next_node(root, node); continue; } if (CPU_CMP(&node->cpuset, &root->cpuset) == 0) { if (node->type == TOPO_TYPE_CACHE && cg_root->cg_level < node->subtype) cg_root->cg_level = node->subtype; if (node->type == TOPO_TYPE_NODE) cg_root->cg_flags |= CG_FLAG_NODE; node = topo_next_node(root, node); continue; } if (node->type != TOPO_TYPE_GROUP && node->type != TOPO_TYPE_NODE && node->type != TOPO_TYPE_CACHE) { node = topo_next_node(root, node); continue; } nchildren++; node = topo_next_nonchild_node(root, node); } /* * We are not interested in nodes including only one CPU each. */ if (nchildren == root->cpu_count) return; /* * We are not interested in nodes without children. */ cg_root->cg_children = nchildren; if (nchildren == 0) return; cg_root->cg_child = smp_topo_alloc(nchildren); /* * Now find again the same cache nodes as above and recursively * build scheduling topologies for them. */ node = root; i = 0; while (node != NULL) { if ((node->type != TOPO_TYPE_GROUP && node->type != TOPO_TYPE_NODE && node->type != TOPO_TYPE_CACHE) || CPU_CMP(&node->cpuset, &root->cpuset) == 0 || CPU_EMPTY(&node->cpuset)) { node = topo_next_node(root, node); continue; } cg_root->cg_child[i].cg_parent = cg_root; x86topo_add_sched_group(node, &cg_root->cg_child[i]); i++; node = topo_next_nonchild_node(root, node); } } /* * Build the MI scheduling topology from the discovered hardware topology. */ struct cpu_group * cpu_topo(void) { struct cpu_group *cg_root; if (mp_ncpus <= 1) return (smp_topo_none()); cg_root = smp_topo_alloc(1); x86topo_add_sched_group(&topo_root, cg_root); return (cg_root); } static void cpu_alloc(void *dummy __unused) { /* * Dynamically allocate the arrays that depend on the * maximum APIC ID. */ cpu_info = malloc(sizeof(*cpu_info) * (max_apic_id + 1), M_CPUS, M_WAITOK | M_ZERO); apic_cpuids = malloc(sizeof(*apic_cpuids) * (max_apic_id + 1), M_CPUS, M_WAITOK | M_ZERO); } SYSINIT(cpu_alloc, SI_SUB_CPU, SI_ORDER_FIRST, cpu_alloc, NULL); /* * Add a logical CPU to the topology. */ void cpu_add(u_int apic_id, char boot_cpu) { if (apic_id > max_apic_id) panic("SMP: APIC ID %d too high", apic_id); KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %u added twice", apic_id)); cpu_info[apic_id].cpu_present = 1; if (boot_cpu) { KASSERT(boot_cpu_id == -1, ("CPU %u claims to be BSP, but CPU %u already is", apic_id, boot_cpu_id)); boot_cpu_id = apic_id; cpu_info[apic_id].cpu_bsp = 1; } if (bootverbose) printf("SMP: Added CPU %u (%s)\n", apic_id, boot_cpu ? "BSP" : "AP"); } void cpu_mp_setmaxid(void) { /* * mp_ncpus and mp_maxid should be already set by calls to cpu_add(). * If there were no calls to cpu_add() assume this is a UP system. */ if (mp_ncpus == 0) mp_ncpus = 1; } int cpu_mp_probe(void) { /* * Always record BSP in CPU map so that the mbuf init code works * correctly. */ CPU_SETOF(0, &all_cpus); return (mp_ncpus > 1); } /* * AP CPU's call this to initialize themselves. */ void init_secondary_tail(void) { u_int cpuid; pmap_activate_boot(vmspace_pmap(proc0.p_vmspace)); /* * On real hardware, switch to x2apic mode if possible. Do it * after aps_ready was signalled, to avoid manipulating the * mode while BSP might still want to send some IPI to us * (second startup IPI is ignored on modern hardware etc). */ lapic_xapic_mode(); /* Initialize the PAT MSR. */ pmap_init_pat(); /* set up CPU registers and state */ cpu_setregs(); /* set up SSE/NX */ initializecpu(); /* set up FPU state on the AP */ #ifdef __amd64__ fpuinit(); #else npxinit(false); #endif if (cpu_ops.cpu_init) cpu_ops.cpu_init(); /* A quick check from sanity claus */ cpuid = PCPU_GET(cpuid); if (PCPU_GET(apic_id) != lapic_id()) { printf("SMP: cpuid = %d\n", cpuid); printf("SMP: actual apic_id = %d\n", lapic_id()); printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); panic("cpuid mismatch! boom!!"); } /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); schedinit_ap(); mtx_lock_spin(&ap_boot_mtx); mca_init(); /* Init local apic for irq's */ lapic_setup(1); /* Set memory range attributes for this CPU to match the BSP */ mem_range_AP_init(); smp_cpus++; CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid); if (bootverbose) printf("SMP: AP CPU #%d Launched!\n", cpuid); else printf("%s%d%s", smp_cpus == 2 ? "Launching APs: " : "", cpuid, smp_cpus == mp_ncpus ? "\n" : " "); /* Determine if we are a logical CPU. */ if (cpu_info[PCPU_GET(apic_id)].cpu_hyperthread) CPU_SET(cpuid, &logical_cpus_mask); if (bootverbose) lapic_dump("AP"); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } #ifdef __amd64__ if (pmap_pcid_enabled) load_cr4(rcr4() | CR4_PCIDE); load_ds(_udatasel); load_es(_udatasel); load_fs(_ufssel); #endif mtx_unlock_spin(&ap_boot_mtx); /* Wait until all the AP's are up. */ while (atomic_load_acq_int(&smp_started) == 0) ia32_pause(); #ifndef EARLY_AP_STARTUP /* Start per-CPU event timers. */ cpu_initclocks_ap(); #endif kcsan_cpu_init(cpuid); sched_ap_entry(); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } static void smp_after_idle_runnable(void *arg __unused) { int cpu; if (mp_ncpus == 1) return; KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); /* * Wait for all APs to handle an interrupt. After that, we know that * the APs have entered the scheduler at least once, so the boot stacks * are safe to free. */ smp_rendezvous(smp_no_rendezvous_barrier, NULL, smp_no_rendezvous_barrier, NULL); for (cpu = 1; cpu < mp_ncpus; cpu++) { kmem_free(bootstacks[cpu], kstack_pages * PAGE_SIZE); } } SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, smp_after_idle_runnable, NULL); /* * We tell the I/O APIC code about all the CPUs we want to receive * interrupts. If we don't want certain CPUs to receive IRQs we * can simply not tell the I/O APIC code about them in this function. * We also do not tell it about the BSP since it tells itself about * the BSP internally to work with UP kernels and on UP machines. */ void set_interrupt_apic_ids(void) { u_int i, apic_id; for (i = 0; i < MAXCPU; i++) { apic_id = cpu_apic_ids[i]; if (apic_id == -1) continue; if (cpu_info[apic_id].cpu_bsp) continue; if (cpu_info[apic_id].cpu_disabled) continue; /* Don't let hyperthreads service interrupts. */ if (cpu_info[apic_id].cpu_hyperthread && !hyperthreading_intr_allowed) continue; intr_add_cpu(i); } } #ifdef COUNT_XINVLTLB_HITS u_int xhits_gbl[MAXCPU]; u_int xhits_pg[MAXCPU]; u_int xhits_rng[MAXCPU]; static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, ""); SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl, sizeof(xhits_gbl), "IU", ""); SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg, sizeof(xhits_pg), "IU", ""); SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng, sizeof(xhits_rng), "IU", ""); u_int ipi_global; u_int ipi_page; u_int ipi_range; u_int ipi_range_size; SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, ""); SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, ""); SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, ""); SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size, 0, ""); #endif /* COUNT_XINVLTLB_HITS */ /* * Init and startup IPI. */ void ipi_startup(int apic_id, int vector) { /* * This attempts to follow the algorithm described in the * Intel Multiprocessor Specification v1.4 in section B.4. * For each IPI, we allow the local APIC ~20us to deliver the * IPI. If that times out, we panic. */ /* * first we do an INIT IPI: this INIT IPI might be run, resetting * and running the target CPU. OR this INIT IPI might be latched (P5 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be * ignored. */ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL | APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id); lapic_ipi_wait(100); /* Explicitly deassert the INIT IPI. */ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL | APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id); DELAY(10000); /* wait ~10mS */ /* * next we do a STARTUP IPI: the previous INIT IPI might still be * latched, (P5 bug) this 1st STARTUP would then terminate * immediately, and the previously started INIT IPI would continue. OR * the previous INIT IPI has already run. and this STARTUP IPI will * run. OR the previous INIT IPI was ignored. and this STARTUP IPI * will run. */ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | vector, apic_id); if (!lapic_ipi_wait(100)) panic("Failed to deliver first STARTUP IPI to APIC %d", apic_id); DELAY(200); /* wait ~200uS */ /* * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is * recognized after hardware RESET or INIT IPI. */ lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | vector, apic_id); if (!lapic_ipi_wait(100)) panic("Failed to deliver second STARTUP IPI to APIC %d", apic_id); DELAY(200); /* wait ~200uS */ } static bool ipi_bitmap_set(int cpu, u_int ipi) { u_int bitmap, old, new; u_int *cpu_bitmap; bitmap = 1 << ipi; cpu_bitmap = &cpuid_to_pcpu[cpu]->pc_ipi_bitmap; old = *cpu_bitmap; for (;;) { if ((old & bitmap) != 0) break; new = old | bitmap; if (atomic_fcmpset_int(cpu_bitmap, &old, new)) break; } return (old != 0); } /* * Send an IPI to specified CPU handling the bitmap logic. */ static void ipi_send_cpu(int cpu, u_int ipi) { KASSERT((u_int)cpu < MAXCPU && cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu)); if (IPI_IS_BITMAPED(ipi)) { if (ipi_bitmap_set(cpu, ipi)) return; ipi = IPI_BITMAP_VECTOR; } lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]); } void ipi_bitmap_handler(struct trapframe frame) { struct trapframe *oldframe; struct thread *td; int cpu = PCPU_GET(cpuid); u_int ipi_bitmap; kasan_mark(&frame, sizeof(frame), sizeof(frame), 0); td = curthread; ipi_bitmap = atomic_readandclear_int(&cpuid_to_pcpu[cpu]-> pc_ipi_bitmap); /* * sched_preempt() must be called to clear the pending preempt * IPI to enable delivery of further preempts. However, the * critical section will cause extra scheduler lock thrashing * when used unconditionally. Only critical_enter() if * hardclock must also run, which requires the section entry. */ if (ipi_bitmap & (1 << IPI_HARDCLOCK)) critical_enter(); td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = &frame; #if defined(STACK) || defined(DDB) if (ipi_bitmap & (1 << IPI_TRACE)) stack_capture_intr(); #endif if (ipi_bitmap & (1 << IPI_PREEMPT)) { #ifdef COUNT_IPIS (*ipi_preempt_counts[cpu])++; #endif sched_preempt(td); } if (ipi_bitmap & (1 << IPI_AST)) { #ifdef COUNT_IPIS (*ipi_ast_counts[cpu])++; #endif /* Nothing to do for AST */ } if (ipi_bitmap & (1 << IPI_HARDCLOCK)) { #ifdef COUNT_IPIS (*ipi_hardclock_counts[cpu])++; #endif hardclockintr(); } td->td_intr_frame = oldframe; td->td_intr_nesting_level--; if (ipi_bitmap & (1 << IPI_HARDCLOCK)) critical_exit(); } /* * send an IPI to a set of cpus. */ void ipi_selected(cpuset_t cpus, u_int ipi) { int cpu; /* * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit * of help in order to understand what is the source. * Set the mask of receiving CPUs for this purpose. */ if (ipi == IPI_STOP_HARD) CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus); CPU_FOREACH_ISSET(cpu, &cpus) { CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); ipi_send_cpu(cpu, ipi); } } /* * send an IPI to a specific CPU. */ void ipi_cpu(int cpu, u_int ipi) { /* * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit * of help in order to understand what is the source. * Set the mask of receiving CPUs for this purpose. */ if (ipi == IPI_STOP_HARD) CPU_SET_ATOMIC(cpu, &ipi_stop_nmi_pending); CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); ipi_send_cpu(cpu, ipi); } /* * send an IPI to all CPUs EXCEPT myself */ void ipi_all_but_self(u_int ipi) { cpuset_t other_cpus; int cpu, c; /* * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit * of help in order to understand what is the source. * Set the mask of receiving CPUs for this purpose. */ if (ipi == IPI_STOP_HARD) { other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &other_cpus); } CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); if (IPI_IS_BITMAPED(ipi)) { cpu = PCPU_GET(cpuid); CPU_FOREACH(c) { if (c != cpu) ipi_bitmap_set(c, ipi); } ipi = IPI_BITMAP_VECTOR; } lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); } void ipi_self_from_nmi(u_int vector) { lapic_ipi_vectored(vector, APIC_IPI_DEST_SELF); /* Wait for IPI to finish. */ if (!lapic_ipi_wait(50000)) { if (KERNEL_PANICKED()) return; else panic("APIC: IPI is stuck"); } } int ipi_nmi_handler(void) { u_int cpuid; /* * As long as there is not a simple way to know about a NMI's * source, if the bitmask for the current CPU is present in * the global pending bitword an IPI_STOP_HARD has been issued * and should be handled. */ cpuid = PCPU_GET(cpuid); if (!CPU_ISSET(cpuid, &ipi_stop_nmi_pending)) return (1); CPU_CLR_ATOMIC(cpuid, &ipi_stop_nmi_pending); cpustop_handler(); return (0); } int nmi_kdb_lock; void nmi_call_kdb_smp(u_int type, struct trapframe *frame) { int cpu; bool call_post; cpu = PCPU_GET(cpuid); if (atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1)) { nmi_call_kdb(cpu, type, frame); call_post = false; } else { savectx(&stoppcbs[cpu]); CPU_SET_ATOMIC(cpu, &stopped_cpus); while (!atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1)) ia32_pause(); call_post = true; } atomic_store_rel_int(&nmi_kdb_lock, 0); if (call_post) cpustop_handler_post(cpu); } /* * Handle an IPI_STOP by saving our current context and spinning (or mwaiting, * if available) until we are resumed. */ void cpustop_handler(void) { struct monitorbuf *mb; u_int cpu; bool use_mwait; cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); use_mwait = (stop_mwait && (cpu_feature2 & CPUID2_MON) != 0 && !mwait_cpustop_broken); if (use_mwait) { mb = PCPU_PTR(monitorbuf); atomic_store_int(&mb->stop_state, MONITOR_STOPSTATE_STOPPED); } /* Indicate that we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) { if (use_mwait) { cpu_monitor(mb, 0, 0); if (atomic_load_int(&mb->stop_state) == MONITOR_STOPSTATE_STOPPED) cpu_mwait(0, MWAIT_C1); continue; } ia32_pause(); /* * Halt non-BSP CPUs on panic -- we're never going to need them * again, and might as well save power / release resources * (e.g., overprovisioned VM infrastructure). */ while (__predict_false(!IS_BSP() && KERNEL_PANICKED())) halt(); } cpustop_handler_post(cpu); } static void cpustop_handler_post(u_int cpu) { CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); /* * We don't broadcast TLB invalidations to other CPUs when they are * stopped. Hence, we clear the TLB before resuming. */ invltlb_glob(); #if defined(__amd64__) && (defined(DDB) || defined(GDB)) amd64_db_resume_dbreg(); #endif if (cpu == 0 && cpustop_restartfunc != NULL) { cpustop_restartfunc(); cpustop_restartfunc = NULL; } } /* * Handle an IPI_SUSPEND by saving our current context and spinning until we * are resumed. */ void cpususpend_handler(void) { u_int cpu; mtx_assert(&smp_ipi_mtx, MA_NOTOWNED); cpu = PCPU_GET(cpuid); if (savectx(&susppcbs[cpu]->sp_pcb)) { #ifdef __amd64__ fpususpend(susppcbs[cpu]->sp_fpususpend); #else npxsuspend(susppcbs[cpu]->sp_fpususpend); #endif /* * suspended_cpus is cleared shortly after each AP is restarted * by a Startup IPI, so that the BSP can proceed to restarting * the next AP. * * resuming_cpus gets cleared when the AP completes * initialization after having been released by the BSP. * resuming_cpus is probably not the best name for the * variable, because it is actually a set of processors that * haven't resumed yet and haven't necessarily started resuming. * * Note that suspended_cpus is meaningful only for ACPI suspend * as it's not really used for Xen suspend since the APs are * automatically restored to the running state and the correct * context. For the same reason resumectx is never called in * that case. */ CPU_SET_ATOMIC(cpu, &suspended_cpus); CPU_SET_ATOMIC(cpu, &resuming_cpus); /* * Invalidate the cache after setting the global status bits. * The last AP to set its bit may end up being an Owner of the * corresponding cache line in MOESI protocol. The AP may be * stopped before the cache line is written to the main memory. */ wbinvd(); } else { #ifdef __amd64__ fpuresume(susppcbs[cpu]->sp_fpususpend); #else npxresume(susppcbs[cpu]->sp_fpususpend); #endif pmap_init_pat(); initializecpu(); PCPU_SET(switchtime, 0); PCPU_SET(switchticks, ticks); /* Indicate that we have restarted and restored the context. */ CPU_CLR_ATOMIC(cpu, &suspended_cpus); } /* Wait for resume directive */ while (!CPU_ISSET(cpu, &toresume_cpus)) ia32_pause(); /* Re-apply microcode updates. */ ucode_reload(); #ifdef __i386__ /* Finish removing the identity mapping of low memory for this AP. */ invltlb_glob(); #endif if (cpu_ops.cpu_resume) cpu_ops.cpu_resume(); #ifdef __amd64__ if (vmm_resume_p) vmm_resume_p(); #endif /* Resume MCA and local APIC */ lapic_xapic_mode(); mca_resume(); lapic_setup(0); /* Indicate that we are resumed */ CPU_CLR_ATOMIC(cpu, &resuming_cpus); CPU_CLR_ATOMIC(cpu, &suspended_cpus); CPU_CLR_ATOMIC(cpu, &toresume_cpus); } /* * Handle an IPI_SWI by waking delayed SWI thread. */ void ipi_swi_handler(struct trapframe frame) { intr_event_handle(clk_intr_event, &frame); } /* * This is called once the rest of the system is up and running and we're * ready to let the AP's out of the pen. */ static void release_aps(void *dummy __unused) { if (mp_ncpus == 1) return; atomic_store_rel_int(&aps_ready, 1); while (smp_started == 0) ia32_pause(); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); #ifdef COUNT_IPIS /* * Setup interrupt counters for IPI handlers. */ static void mp_ipi_intrcnt(void *dummy) { char buf[64]; int i; CPU_FOREACH(i) { snprintf(buf, sizeof(buf), "cpu%d:invltlb", i); intrcnt_add(buf, &ipi_invltlb_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:invlrng", i); intrcnt_add(buf, &ipi_invlrng_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:invlpg", i); intrcnt_add(buf, &ipi_invlpg_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:invlcache", i); intrcnt_add(buf, &ipi_invlcache_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:preempt", i); intrcnt_add(buf, &ipi_preempt_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:ast", i); intrcnt_add(buf, &ipi_ast_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i); intrcnt_add(buf, &ipi_rendezvous_counts[i]); snprintf(buf, sizeof(buf), "cpu%d:hardclock", i); intrcnt_add(buf, &ipi_hardclock_counts[i]); } } SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL); #endif