Index: sys/amd64/conf/FIRECRACKER =================================================================== --- sys/amd64/conf/FIRECRACKER +++ sys/amd64/conf/FIRECRACKER @@ -115,7 +115,6 @@ # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel -options EARLY_AP_STARTUP # Pseudo devices. device crypto # core crypto support Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC +++ sys/amd64/conf/GENERIC @@ -124,7 +124,6 @@ # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel -options EARLY_AP_STARTUP # CPU frequency control device cpufreq Index: sys/amd64/conf/MINIMAL =================================================================== --- sys/amd64/conf/MINIMAL +++ sys/amd64/conf/MINIMAL @@ -80,7 +80,6 @@ # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel -options EARLY_AP_STARTUP # CPU frequency control device cpufreq Index: sys/cddl/dev/dtrace/amd64/dtrace_subr.c =================================================================== --- sys/cddl/dev/dtrace/amd64/dtrace_subr.c +++ sys/cddl/dev/dtrace/amd64/dtrace_subr.c @@ -285,7 +285,6 @@ hst_cpu_tsc = rdtsc(); } -#ifdef EARLY_AP_STARTUP static void dtrace_gethrtime_init(void *arg) { @@ -293,16 +292,6 @@ uint64_t tsc_f; cpuset_t map; int i; -#else -/* - * Get the frequency and scale factor as early as possible so that they can be - * used for boot-time tracing. - */ -static void -dtrace_gethrtime_init_early(void *arg) -{ - uint64_t tsc_f; -#endif /* * Get TSC frequency known at this moment. @@ -331,18 +320,6 @@ * (terahertz) values; */ nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f; -#ifndef EARLY_AP_STARTUP -} -SYSINIT(dtrace_gethrtime_init_early, SI_SUB_CPU, SI_ORDER_ANY, - dtrace_gethrtime_init_early, NULL); - -static void -dtrace_gethrtime_init(void *arg) -{ - struct pcpu *pc; - cpuset_t map; - int i; -#endif if (vm_guest != VM_GUEST_NO) return; @@ -366,13 +343,8 @@ } sched_unpin(); } -#ifdef EARLY_AP_STARTUP SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY, dtrace_gethrtime_init, NULL); -#else -SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, - NULL); -#endif /* * DTrace needs a high resolution time function which can Index: sys/cddl/dev/dtrace/dtrace_load.c =================================================================== --- sys/cddl/dev/dtrace/dtrace_load.c +++ sys/cddl/dev/dtrace/dtrace_load.c @@ -22,35 +22,11 @@ * */ -#ifndef EARLY_AP_STARTUP -static void -dtrace_ap_start(void *dummy) -{ - int i; - - mutex_enter(&cpu_lock); - - /* Setup the rest of the CPUs. */ - CPU_FOREACH(i) { - if (i == 0) - continue; - - (void) dtrace_cpu_setup(CPU_CONFIG, i); - } - - mutex_exit(&cpu_lock); -} - -SYSINIT(dtrace_ap_start, SI_SUB_SMP, SI_ORDER_ANY, dtrace_ap_start, NULL); -#endif - static void dtrace_load(void *dummy) { dtrace_provider_id_t id; -#ifdef EARLY_AP_STARTUP int i; -#endif #ifndef illumos /* @@ -152,14 +128,9 @@ mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); -#ifdef EARLY_AP_STARTUP CPU_FOREACH(i) { (void) dtrace_cpu_setup(CPU_CONFIG, i); } -#else - /* Setup the boot CPU */ - (void) dtrace_cpu_setup(CPU_CONFIG, 0); -#endif mutex_exit(&cpu_lock); Index: sys/cddl/dev/dtrace/i386/dtrace_subr.c =================================================================== --- sys/cddl/dev/dtrace/i386/dtrace_subr.c +++ sys/cddl/dev/dtrace/i386/dtrace_subr.c @@ -285,7 +285,6 @@ hst_cpu_tsc = rdtsc(); } -#ifdef EARLY_AP_STARTUP static void dtrace_gethrtime_init(void *arg) { @@ -293,16 +292,6 @@ uint64_t tsc_f; cpuset_t map; int i; -#else -/* - * Get the frequency and scale factor as early as possible so that they can be - * used for boot-time tracing. - */ -static void -dtrace_gethrtime_init_early(void *arg) -{ - uint64_t tsc_f; -#endif /* * Get TSC frequency known at this moment. @@ -331,18 +320,6 @@ * (terahertz) values; */ nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f; -#ifndef EARLY_AP_STARTUP -} -SYSINIT(dtrace_gethrtime_init_early, SI_SUB_CPU, SI_ORDER_ANY, - dtrace_gethrtime_init_early, NULL); - -static void -dtrace_gethrtime_init(void *arg) -{ - cpuset_t map; - struct pcpu *pc; - int i; -#endif if (vm_guest != VM_GUEST_NO) return; @@ -366,13 +343,8 @@ } sched_unpin(); } -#ifdef EARLY_AP_STARTUP SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY, dtrace_gethrtime_init, NULL); -#else -SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, - NULL); -#endif /* * DTrace needs a high resolution time function which can Index: sys/cddl/dev/dtrace/powerpc/dtrace_subr.c =================================================================== --- sys/cddl/dev/dtrace/powerpc/dtrace_subr.c +++ sys/cddl/dev/dtrace/powerpc/dtrace_subr.c @@ -220,13 +220,8 @@ } sched_unpin(); } -#ifdef EARLY_AP_STARTUP SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY, dtrace_gethrtime_init, NULL); -#else -SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, - NULL); -#endif /* * DTrace needs a high resolution time function which can Index: sys/conf/NOTES =================================================================== --- sys/conf/NOTES +++ sys/conf/NOTES @@ -223,12 +223,6 @@ # Mandatory: options SMP # Symmetric MultiProcessor Kernel -# EARLY_AP_STARTUP releases the Application Processors earlier in the -# kernel startup process (before devices are probed) rather than at the -# end. This is a temporary option for use during the transition from -# late to early AP startup. -options EARLY_AP_STARTUP - # MAXCPU defines the maximum number of CPUs that can boot in the system. # A default value should be already present, for every architecture. options MAXCPU=32 Index: sys/conf/options =================================================================== --- sys/conf/options +++ sys/conf/options @@ -634,7 +634,6 @@ DEBUG_REDZONE opt_vm.h # Standard SMP options -EARLY_AP_STARTUP opt_global.h SMP opt_global.h NUMA opt_global.h Index: sys/dev/acpica/acpi.c =================================================================== --- sys/dev/acpica/acpi.c +++ sys/dev/acpica/acpi.c @@ -3397,18 +3397,10 @@ suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); -#else - if (smp_started) { - thread_lock(curthread); - sched_bind(curthread, 0); - thread_unlock(curthread); - } -#endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME @@ -3541,17 +3533,9 @@ bus_topo_unlock(); -#ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); -#else - if (smp_started) { - thread_lock(curthread); - sched_unbind(curthread); - thread_unlock(curthread); - } -#endif resume_all_fs(); resume_all_proc(); Index: sys/dev/acpica/acpi_cpu.c =================================================================== --- sys/dev/acpica/acpi_cpu.c +++ sys/dev/acpica/acpi_cpu.c @@ -479,12 +479,7 @@ bus_topo_unlock(); if (attached) { -#ifdef EARLY_AP_STARTUP acpi_cpu_startup(NULL); -#else - /* Queue post cpu-probing task handler */ - AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); -#endif } } Index: sys/dev/hwpmc/hwpmc_mod.c =================================================================== --- sys/dev/hwpmc/hwpmc_mod.c +++ sys/dev/hwpmc/hwpmc_mod.c @@ -435,11 +435,7 @@ .priv = &pmc_syscall_mod, }; -#ifdef EARLY_AP_STARTUP DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SYSCALLS, SI_ORDER_ANY); -#else -DECLARE_MODULE(pmc, pmc_mod, SI_SUB_SMP, SI_ORDER_ANY); -#endif MODULE_VERSION(pmc, PMC_VERSION); #ifdef HWPMC_DEBUG Index: sys/dev/hyperv/vmbus/vmbus.c =================================================================== --- sys/dev/hyperv/vmbus/vmbus.c +++ sys/dev/hyperv/vmbus/vmbus.c @@ -114,9 +114,7 @@ device_t dev, int cpu); static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, int); -#if defined(EARLY_AP_STARTUP) static void vmbus_intrhook(void *); -#endif static int vmbus_init(struct vmbus_softc *); static int vmbus_connect(struct vmbus_softc *, uint32_t); @@ -1482,8 +1480,6 @@ { } -#if defined(EARLY_AP_STARTUP) - static void vmbus_intrhook(void *xsc) { @@ -1495,8 +1491,6 @@ config_intrhook_disestablish(&sc->vmbus_intrhook); } -#endif /* EARLY_AP_STARTUP */ - static int vmbus_attach(device_t dev) { @@ -1511,14 +1505,12 @@ */ vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; -#if defined(EARLY_AP_STARTUP) /* * Defer the real attach until the pause(9) works as expected. */ vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; config_intrhook_establish(&vmbus_sc->vmbus_intrhook); -#endif /* EARLY_AP_STARTUP and aarch64 */ return (0); } @@ -1562,23 +1554,3 @@ #endif return (0); } - -#if !defined(EARLY_AP_STARTUP) - -static void -vmbus_sysinit(void *arg __unused) -{ - struct vmbus_softc *sc = vmbus_get_softc(); - - if (vm_guest != VM_GUEST_HV || sc == NULL) - return; - - vmbus_doattach(sc); -} -/* - * NOTE: - * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is - * initialized. - */ -SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); -#endif /* !EARLY_AP_STARTUP */ Index: sys/dev/kvm_clock/kvm_clock.c =================================================================== --- sys/dev/kvm_clock/kvm_clock.c +++ sys/dev/kvm_clock/kvm_clock.c @@ -71,9 +71,6 @@ struct pvclock_vcpu_time_info *timeinfos; u_int msr_tc; u_int msr_wc; -#ifndef EARLY_AP_STARTUP - int firstcpu; -#endif }; static struct pvclock_wall_clock *kvm_clock_get_wallclock(void *arg); @@ -109,32 +106,6 @@ wrmsr(sc->msr_tc, vtophys(&(sc->timeinfos)[curcpu]) | 1); } -#ifndef EARLY_AP_STARTUP -static void -kvm_clock_init_smp(void *arg __unused) -{ - devclass_t kvm_clock_devclass; - cpuset_t cpus; - struct kvm_clock_softc *sc; - - kvm_clock_devclass = devclass_find(KVM_CLOCK_DEVNAME); - sc = devclass_get_softc(kvm_clock_devclass, 0); - if (sc == NULL || mp_ncpus == 1) - return; - - /* - * Register with the hypervisor on all CPUs except the one that - * registered in kvm_clock_attach(). - */ - cpus = all_cpus; - KASSERT(CPU_ISSET(sc->firstcpu, &cpus), - ("%s: invalid first CPU %d", __func__, sc->firstcpu)); - CPU_CLR(sc->firstcpu, &cpus); - kvm_clock_system_time_enable(sc, &cpus); -} -SYSINIT(kvm_clock, SI_SUB_SMP, SI_ORDER_ANY, kvm_clock_init_smp, NULL); -#endif - static void kvm_clock_identify(driver_t *driver, device_t parent) { @@ -181,12 +152,7 @@ /* Set up 'struct pvclock_vcpu_time_info' page(s): */ sc->timeinfos = kmem_malloc(mp_ncpus * sizeof(struct pvclock_vcpu_time_info), M_WAITOK | M_ZERO); -#ifdef EARLY_AP_STARTUP kvm_clock_system_time_enable(sc, &all_cpus); -#else - sc->firstcpu = curcpu; - kvm_clock_system_time_enable_pcpu(sc); -#endif /* * Init pvclock; register KVM clock wall clock, register KVM clock Index: sys/dev/ntb/ntb_hw/ntb_hw_intel.c =================================================================== --- sys/dev/ntb/ntb_hw/ntb_hw_intel.c +++ sys/dev/ntb/ntb_hw/ntb_hw_intel.c @@ -2976,19 +2976,6 @@ return (uidx); } -#ifndef EARLY_AP_STARTUP -static int msix_ready; - -static void -intel_ntb_msix_ready(void *arg __unused) -{ - - msix_ready = 1; -} -SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY, - intel_ntb_msix_ready, NULL); -#endif - static void intel_ntb_exchange_msix(void *ctx) { @@ -3003,12 +2990,6 @@ if (ntb->peer_msix_done) goto msix_done; -#ifndef EARLY_AP_STARTUP - /* Block MSIX negotiation until SMP started and IRQ reshuffled. */ - if (!msix_ready) - goto reschedule; -#endif - intel_ntb_get_msix_info(ntb); for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i, Index: sys/dev/usb/usb_process.c =================================================================== --- sys/dev/usb/usb_process.c +++ sys/dev/usb/usb_process.c @@ -435,15 +435,6 @@ up->up_csleep = 0; cv_signal(&up->up_cv); } -#ifndef EARLY_AP_STARTUP - /* Check if we are still cold booted */ - if (cold) { - USB_THREAD_SUSPEND(up->up_ptr); - printf("WARNING: A USB process has " - "been left suspended\n"); - break; - } -#endif cv_wait(&up->up_cv, up->up_mtx); } /* Check if someone is waiting - should not happen */ Index: sys/dev/xen/control/control.c =================================================================== --- sys/dev/xen/control/control.c +++ sys/dev/xen/control/control.c @@ -217,18 +217,10 @@ suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); -#else - if (smp_started) { - thread_lock(curthread); - sched_bind(curthread, 0); - thread_unlock(curthread); - } -#endif KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0")); /* @@ -242,7 +234,6 @@ } #ifdef SMP -#ifdef EARLY_AP_STARTUP /* * Suspend other CPUs. This prevents IPIs while we * are resuming, and will allow us to reset per-cpu @@ -252,20 +243,6 @@ CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map); if (!CPU_EMPTY(&cpu_suspend_map)) suspend_cpus(cpu_suspend_map); -#else - CPU_ZERO(&cpu_suspend_map); /* silence gcc */ - if (smp_started) { - /* - * Suspend other CPUs. This prevents IPIs while we - * are resuming, and will allow us to reset per-cpu - * vcpu_info on resume. - */ - cpu_suspend_map = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map); - if (!CPU_EMPTY(&cpu_suspend_map)) - suspend_cpus(cpu_suspend_map); - } -#endif #endif /* @@ -317,17 +294,9 @@ timecounter->tc_get_timecount(timecounter); inittodr(time_second); -#ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); -#else - if (smp_started) { - thread_lock(curthread); - sched_unbind(curthread); - thread_unlock(curthread); - } -#endif resume_all_fs(); resume_all_proc(); Index: sys/geom/eli/g_eli.c =================================================================== --- sys/geom/eli/g_eli.c +++ sys/geom/eli/g_eli.c @@ -665,15 +665,7 @@ wr = arg; sc = wr->w_softc; -#ifdef EARLY_AP_STARTUP MPASS(!sc->sc_cpubind || smp_started); -#elif defined(SMP) - /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ - if (sc->sc_cpubind) { - while (!smp_started) - tsleep(wr, 0, "geli:smp", hz / 4); - } -#endif thread_lock(curthread); sched_prio(curthread, PUSER); if (sc->sc_cpubind) Index: sys/i386/conf/GENERIC =================================================================== --- sys/i386/conf/GENERIC +++ sys/i386/conf/GENERIC @@ -109,7 +109,6 @@ # To make an SMP kernel, the next two lines are needed options SMP # Symmetric MultiProcessor Kernel device apic # I/O APIC -options EARLY_AP_STARTUP # CPU frequency control device cpufreq Index: sys/i386/conf/MINIMAL =================================================================== --- sys/i386/conf/MINIMAL +++ sys/i386/conf/MINIMAL @@ -88,7 +88,6 @@ # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel -options EARLY_AP_STARTUP device apic # CPU frequency control Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c +++ sys/kern/kern_clocksource.c @@ -325,16 +325,10 @@ (int)(now >> 32), (u_int)(now & 0xffffffff)); #ifdef SMP -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); -#endif /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ bcast = 0; -#ifdef EARLY_AP_STARTUP if ((et->et_flags & ET_FLAGS_PERCPU) == 0) { -#else - if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { -#endif CPU_FOREACH(cpu) { state = DPCPU_ID_PTR(cpu, timerstate); ET_HW_LOCK(state); @@ -495,18 +489,11 @@ nexttick = next; else nexttick = -1; -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); -#endif CPU_FOREACH(cpu) { state = DPCPU_ID_PTR(cpu, timerstate); state->now = now; -#ifndef EARLY_AP_STARTUP - if (!smp_started && cpu != CPU_FIRST()) - state->nextevent = SBT_MAX; - else -#endif - state->nextevent = next; + state->nextevent = next; if (periodic) state->nexttick = next; else @@ -528,13 +515,8 @@ } ET_HW_UNLOCK(DPCPU_PTR(timerstate)); #ifdef SMP -#ifdef EARLY_AP_STARTUP /* If timer is global we are done. */ if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { -#else - /* If timer is global or there is no other CPUs yet - we are done. */ - if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { -#endif critical_exit(); return; } Index: sys/kern/kern_cpu.c =================================================================== --- sys/kern/kern_cpu.c +++ sys/kern/kern_cpu.c @@ -255,22 +255,7 @@ CF_MTX_LOCK(&sc->lock); #ifdef SMP -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); -#else - /* - * If still booting and secondary CPUs not started yet, don't allow - * changing the frequency until they're online. This is because we - * can't switch to them using sched_bind() and thus we'd only be - * switching the main CPU. XXXTODO: Need to think more about how to - * handle having different CPUs at different frequencies. - */ - if (mp_ncpus > 1 && !smp_started) { - device_printf(dev, "rejecting change, SMP not started yet\n"); - error = ENXIO; - goto out; - } -#endif #endif /* SMP */ /* Index: sys/kern/subr_epoch.c =================================================================== --- sys/kern/subr_epoch.c +++ sys/kern/subr_epoch.c @@ -330,15 +330,6 @@ } SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL); -#if !defined(EARLY_AP_STARTUP) -static void -epoch_init_smp(void *dummy __unused) -{ - inited = 2; -} -SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); -#endif - static void epoch_ctor(epoch_t epoch) { @@ -788,10 +779,6 @@ /* too early in boot to have epoch set up */ if (__predict_false(epoch == NULL)) goto boottime; -#if !defined(EARLY_AP_STARTUP) - if (__predict_false(inited < 2)) - goto boottime; -#endif critical_enter(); *DPCPU_PTR(epoch_cb_count) += 1; @@ -972,10 +959,6 @@ /* too early in boot to have epoch set up */ if (__predict_false(epoch == NULL)) return; -#if !defined(EARLY_AP_STARTUP) - if (__predict_false(inited < 2)) - return; -#endif DROP_GIANT(); sx_xlock(&epoch->e_drain_sx); Index: sys/kern/subr_intr.c =================================================================== --- sys/kern/subr_intr.c +++ sys/kern/subr_intr.c @@ -138,11 +138,7 @@ static u_int irq_next_free; #ifdef SMP -#ifdef EARLY_AP_STARTUP static bool irq_assign_cpu = true; -#else -static bool irq_assign_cpu = false; -#endif #endif u_int intr_nirq = NIRQ; @@ -1265,49 +1261,6 @@ return (last_cpu); } -#ifndef EARLY_AP_STARTUP -/* - * Distribute all the interrupt sources among the available - * CPUs once the AP's have been launched. - */ -static void -intr_irq_shuffle(void *arg __unused) -{ - struct intr_irqsrc *isrc; - u_int i; - - if (mp_ncpus == 1) - return; - - mtx_lock(&isrc_table_lock); - irq_assign_cpu = true; - for (i = 0; i < intr_nirq; i++) { - isrc = irq_sources[i]; - if (isrc == NULL || isrc->isrc_handlers == 0 || - isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) - continue; - - if (isrc->isrc_event != NULL && - isrc->isrc_flags & INTR_ISRCF_BOUND && - isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) - panic("%s: CPU inconsistency", __func__); - - if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) - CPU_ZERO(&isrc->isrc_cpu); /* start again */ - - /* - * We are in wicked position here if the following call fails - * for bound ISRC. The best thing we can do is to clear - * isrc_cpu so inconsistency with ie_cpu will be detectable. - */ - if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) - CPU_ZERO(&isrc->isrc_cpu); - } - mtx_unlock(&isrc_table_lock); -} -SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); -#endif /* !EARLY_AP_STARTUP */ - #else u_int intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) Index: sys/net/netisr.c =================================================================== --- sys/net/netisr.c +++ sys/net/netisr.c @@ -1333,41 +1333,14 @@ } #endif -#ifdef EARLY_AP_STARTUP STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (nws_count >= netisr_maxthreads) break; netisr_start_swi(pc->pc_cpuid, pc); } -#else - pc = get_pcpu(); - netisr_start_swi(pc->pc_cpuid, pc); -#endif } SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); -#ifndef EARLY_AP_STARTUP -/* - * Start worker threads for additional CPUs. No attempt to gracefully handle - * work reassignment, we don't yet support dynamic reconfiguration. - */ -static void -netisr_start(void *arg) -{ - struct pcpu *pc; - - STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { - if (nws_count >= netisr_maxthreads) - break; - /* Worker will already be present for boot CPU. */ - if (pc->pc_netisr != NULL) - continue; - netisr_start_swi(pc->pc_cpuid, pc); - } -} -SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); -#endif - /* * Sysctl monitoring for netisr: query a list of registered protocols. */ Index: sys/riscv/riscv/clock.c =================================================================== --- sys/riscv/riscv/clock.c +++ sys/riscv/riscv/clock.c @@ -46,7 +46,6 @@ void cpu_initclocks(void) { -#ifdef EARLY_AP_STARTUP struct thread *td; int i; @@ -64,7 +63,4 @@ if (sched_is_bound(td)) sched_unbind(td); thread_unlock(td); -#else - cpu_initclocks_bsp(); -#endif } Index: sys/riscv/riscv/mp_machdep.c =================================================================== --- sys/riscv/riscv/mp_machdep.c +++ sys/riscv/riscv/mp_machdep.c @@ -250,11 +250,6 @@ /* Enable software interrupts */ riscv_unmask_ipi(); -#ifndef EARLY_AP_STARTUP - /* Start per-CPU event timers. */ - cpu_initclocks_ap(); -#endif - /* Enable external (PLIC) interrupts */ csr_set(sie, SIE_SEIE); Index: sys/sys/kernel.h =================================================================== --- sys/sys/kernel.h +++ sys/sys/kernel.h @@ -127,9 +127,7 @@ SI_SUB_INTR = 0x2800000, /* interrupt threads */ SI_SUB_TASKQ = 0x2880000, /* task queues */ SI_SUB_EPOCH = 0x2888000, /* epoch subsystem */ -#ifdef EARLY_AP_STARTUP SI_SUB_SMP = 0x2900000, /* start the APs*/ -#endif SI_SUB_SOFTINTR = 0x2A00000, /* start soft interrupt thread */ SI_SUB_DEVFS = 0x2F00000, /* devfs ready for devices */ SI_SUB_INIT_IF = 0x3000000, /* prep for net interfaces */ @@ -169,9 +167,6 @@ SI_SUB_KTHREAD_BUF = 0xea00000, /* buffer daemon*/ SI_SUB_KTHREAD_UPDATE = 0xec00000, /* update daemon*/ SI_SUB_KTHREAD_IDLE = 0xee00000, /* idle procs*/ -#ifndef EARLY_AP_STARTUP - SI_SUB_SMP = 0xf000000, /* start the APs*/ -#endif SI_SUB_RACCTD = 0xf100000, /* start racctd*/ SI_SUB_LAST = 0xfffffff /* final initialization */ }; Index: sys/x86/isa/clock.c =================================================================== --- sys/x86/isa/clock.c +++ sys/x86/isa/clock.c @@ -413,7 +413,6 @@ void cpu_initclocks(void) { -#ifdef EARLY_AP_STARTUP struct thread *td; int i; @@ -436,13 +435,6 @@ if (sched_is_bound(td)) sched_unbind(td); thread_unlock(td); -#else - tsc_calibrate(); -#ifdef DEV_APIC - lapic_calibrate_timer(); -#endif - cpu_initclocks_bsp(); -#endif } static int Index: sys/x86/x86/intr_machdep.c =================================================================== --- sys/x86/x86/intr_machdep.c +++ sys/x86/x86/intr_machdep.c @@ -93,10 +93,6 @@ static TAILQ_HEAD(pics_head, pic) pics; u_int num_io_irqs; -#if defined(SMP) && !defined(EARLY_AP_STARTUP) -static int assign_cpu; -#endif - u_long *intrcnt; char *intrnames; size_t sintrcnt = sizeof(intrcnt); @@ -402,18 +398,10 @@ struct intsrc *isrc; int error; -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); /* Nothing to do if there is only a single CPU. */ if (mp_ncpus > 1 && cpu != NOCPU) { -#else - /* - * Don't do anything during early boot. We will pick up the - * assignment once the APs are started. - */ - if (assign_cpu && cpu != NOCPU) { -#endif isrc = arg; sx_xlock(&intrsrc_lock); error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); @@ -608,15 +596,9 @@ { u_int apic_id; -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); if (mp_ncpus == 1) return (PCPU_GET(apic_id)); -#else - /* Leave all interrupts on the BSP during boot. */ - if (!assign_cpu) - return (PCPU_GET(apic_id)); -#endif mtx_lock_spin(&icu_lock); apic_id = cpu_apic_ids[current_cpu[domain]]; @@ -647,7 +629,6 @@ CPU_SET(cpu, &intr_cpus); } -#ifdef EARLY_AP_STARTUP static void intr_smp_startup(void *arg __unused) { @@ -658,52 +639,6 @@ SYSINIT(intr_smp_startup, SI_SUB_SMP, SI_ORDER_SECOND, intr_smp_startup, NULL); -#else -/* - * Distribute all the interrupt sources among the available CPUs once the - * AP's have been launched. - */ -static void -intr_shuffle_irqs(void *arg __unused) -{ - struct intsrc *isrc; - u_int cpu, i; - - intr_init_cpus(); - /* Don't bother on UP. */ - if (mp_ncpus == 1) - return; - - /* Round-robin assign a CPU to each enabled source. */ - sx_xlock(&intrsrc_lock); - assign_cpu = 1; - for (i = 0; i < num_io_irqs; i++) { - isrc = interrupt_sources[i]; - if (isrc != NULL && isrc->is_handlers > 0) { - /* - * If this event is already bound to a CPU, - * then assign the source to that CPU instead - * of picking one via round-robin. Note that - * this is careful to only advance the - * round-robin if the CPU assignment succeeds. - */ - cpu = isrc->is_event->ie_cpu; - if (cpu == NOCPU) - cpu = current_cpu[isrc->is_domain]; - if (isrc->is_pic->pic_assign_cpu(isrc, - cpu_apic_ids[cpu]) == 0) { - isrc->is_cpu = cpu; - if (isrc->is_event->ie_cpu == NOCPU) - intr_next_cpu(isrc->is_domain); - } - } - } - sx_xunlock(&intrsrc_lock); -} -SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, - NULL); -#endif - /* * TODO: Export this information in a non-MD fashion, integrate with vmstat -i. */ Index: sys/x86/x86/local_apic.c =================================================================== --- sys/x86/x86/local_apic.c +++ sys/x86/x86/local_apic.c @@ -878,22 +878,8 @@ lvts[APIC_LVT_PMC].lvt_masked = 0; -#ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL); -#else -#ifdef SMP - /* - * If hwpmc was loaded at boot time then the APs may not be - * started yet. In that case, don't forward the request to - * them as they will program the lvt when they start. - */ - if (smp_started) - smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL); - else -#endif - lapic_update_pmc(NULL); -#endif return (1); #else return (0); Index: sys/x86/x86/mca.c =================================================================== --- sys/x86/x86/mca.c +++ sys/x86/x86/mca.c @@ -1079,11 +1079,7 @@ taskqueue_enqueue_timeout_sbt(mca_tq, &mca_scan_task, mca_ticks * SBT_1S, 0, C_PREL(1)); } -#ifdef EARLY_AP_STARTUP SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL); -#else -SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL); -#endif #ifdef DEV_APIC static void Index: sys/x86/x86/mp_x86.c =================================================================== --- sys/x86/x86/mp_x86.c +++ sys/x86/x86/mp_x86.c @@ -1114,11 +1114,6 @@ while (atomic_load_acq_int(&smp_started) == 0) ia32_pause(); -#ifndef EARLY_AP_STARTUP - /* Start per-CPU event timers. */ - cpu_initclocks_ap(); -#endif - kcsan_cpu_init(cpuid); sched_ap_entry();