diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c --- a/sys/arm64/arm64/mp_machdep.c +++ b/sys/arm64/arm64/mp_machdep.c @@ -183,7 +183,7 @@ started = 0; for (i = 0; i < 2000; i++) { - if (smp_started) { + if (atomic_load_acq_int(&smp_started) != 0) { printf("done\n"); return; } @@ -290,11 +290,6 @@ kcsan_cpu_init(cpu); - /* - * Assert that smp_after_idle_runnable condition is reasonable. - */ - MPASS(PCPU_GET(curpcb) == NULL); - /* Enter the scheduler */ sched_ap_entry(); @@ -305,16 +300,24 @@ static void smp_after_idle_runnable(void *arg __unused) { - struct pcpu *pc; int cpu; + if (mp_ncpus == 1) + return; + + KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); + + /* + * Wait for all APs to handle an interrupt. After that, we know that + * the APs have entered the scheduler at least once, so the boot stacks + * are safe to free. + */ + smp_rendezvous(smp_no_rendezvous_barrier, NULL, + smp_no_rendezvous_barrier, NULL); + for (cpu = 1; cpu < mp_ncpus; cpu++) { - if (bootstacks[cpu] != NULL) { - pc = pcpu_find(cpu); - while (atomic_load_ptr(&pc->pc_curpcb) == NULL) - cpu_spinwait(); + if (bootstacks[cpu] != NULL) kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE); - } } } SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, diff --git a/sys/riscv/riscv/mp_machdep.c b/sys/riscv/riscv/mp_machdep.c --- a/sys/riscv/riscv/mp_machdep.c +++ b/sys/riscv/riscv/mp_machdep.c @@ -210,7 +210,7 @@ sbi_send_ipi(mask.__bits); for (i = 0; i < 2000; i++) { - if (smp_started) + if (atomic_load_acq_int(&smp_started)) return; DELAY(1000); } @@ -284,11 +284,6 @@ mtx_unlock_spin(&ap_boot_mtx); - /* - * Assert that smp_after_idle_runnable condition is reasonable. - */ - MPASS(PCPU_GET(curpcb) == NULL); - /* Enter the scheduler */ sched_ap_entry(); @@ -299,16 +294,24 @@ static void smp_after_idle_runnable(void *arg __unused) { - struct pcpu *pc; int cpu; + if (mp_ncpus == 1) + return; + + KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); + + /* + * Wait for all APs to handle an interrupt. After that, we know that + * the APs have entered the scheduler at least once, so the boot stacks + * are safe to free. + */ + smp_rendezvous(smp_no_rendezvous_barrier, NULL, + smp_no_rendezvous_barrier, NULL); + for (cpu = 1; cpu <= mp_maxid; cpu++) { - if (bootstacks[cpu] != NULL) { - pc = pcpu_find(cpu); - while (atomic_load_ptr(&pc->pc_curpcb) == NULL) - cpu_spinwait(); + if (bootstacks[cpu] != NULL) kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE); - } } } SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, diff --git a/sys/x86/x86/mp_x86.c b/sys/x86/x86/mp_x86.c --- a/sys/x86/x86/mp_x86.c +++ b/sys/x86/x86/mp_x86.c @@ -1129,11 +1129,6 @@ kcsan_cpu_init(cpuid); - /* - * Assert that smp_after_idle_runnable condition is reasonable. - */ - MPASS(PCPU_GET(curpcb) == NULL); - sched_ap_entry(); panic("scheduler returned us to %s", __func__); @@ -1143,13 +1138,22 @@ static void smp_after_idle_runnable(void *arg __unused) { - struct pcpu *pc; int cpu; + if (mp_ncpus == 1) + return; + + KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); + + /* + * Wait for all APs to handle an interrupt. After that, we know that + * the APs have entered the scheduler at least once, so the boot stacks + * are safe to free. + */ + smp_rendezvous(smp_no_rendezvous_barrier, NULL, + smp_no_rendezvous_barrier, NULL); + for (cpu = 1; cpu < mp_ncpus; cpu++) { - pc = pcpu_find(cpu); - while (atomic_load_ptr(&pc->pc_curpcb) == NULL) - cpu_spinwait(); kmem_free((vm_offset_t)bootstacks[cpu], kstack_pages * PAGE_SIZE); }