diff --git a/usr.sbin/bhyve/bhyverun.h b/usr.sbin/bhyve/bhyverun.h --- a/usr.sbin/bhyve/bhyverun.h +++ b/usr.sbin/bhyve/bhyverun.h @@ -44,7 +44,6 @@ #endif void fbsdrun_set_capabilities(struct vmctx *ctx, int cpu); -void fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip); int fbsdrun_virtio_msix(void); #endif diff --git a/usr.sbin/bhyve/bhyverun.c b/usr.sbin/bhyve/bhyverun.c --- a/usr.sbin/bhyve/bhyverun.c +++ b/usr.sbin/bhyve/bhyverun.c @@ -546,13 +546,11 @@ return (NULL); } -void -fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip) +static void +fbsdrun_addcpu(struct vmctx *ctx, int newcpu, uint64_t rip, bool suspend) { int error; - assert(fromcpu == BSP); - /* * The 'newcpu' must be activated in the context of 'fromcpu'. If * vm_activate_cpu() is delayed until newcpu's pthread starts running @@ -565,6 +563,9 @@ CPU_SET_ATOMIC(newcpu, &cpumask); + if (suspend) + vm_suspend_cpu(ctx, newcpu); + /* * Set up the vmexit struct to allow execution to start * at the given RIP @@ -687,8 +688,7 @@ vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu) { - (void)spinup_ap(ctx, *pvcpu, - vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); + (void)spinup_ap(ctx, vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip); return (VMEXIT_CONTINUE); } @@ -1138,8 +1138,8 @@ return (ctx); } -void -spinup_vcpu(struct vmctx *ctx, int vcpu) +static void +spinup_vcpu(struct vmctx *ctx, int vcpu, bool suspend) { int error; uint64_t rip; @@ -1151,7 +1151,7 @@ error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1); assert(error == 0); - fbsdrun_addcpu(ctx, BSP, vcpu, rip); + fbsdrun_addcpu(ctx, vcpu, rip, suspend); } static bool @@ -1584,25 +1584,16 @@ mt_vmm_info = calloc(guest_ncpus, sizeof(*mt_vmm_info)); /* - * Add CPU 0 + * Add all vCPUs. */ - fbsdrun_addcpu(ctx, BSP, BSP, rip); - + for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) { + bool suspend = (vcpu != BSP); #ifdef BHYVE_SNAPSHOT - /* - * If we restore a VM, start all vCPUs now (including APs), otherwise, - * let the guest OS to spin them up later via vmexits. - */ - if (restore_file != NULL) { - for (vcpu = 0; vcpu < guest_ncpus; vcpu++) { - if (vcpu == BSP) - continue; - - fprintf(stdout, "spinning up vcpu no %d...\r\n", vcpu); - spinup_vcpu(ctx, vcpu); - } - } + if (restore_file != NULL) + suspend = false; #endif + spinup_vcpu(ctx, vcpu, suspend); + } /* * Head off to the main event dispatch loop diff --git a/usr.sbin/bhyve/spinup_ap.h b/usr.sbin/bhyve/spinup_ap.h --- a/usr.sbin/bhyve/spinup_ap.h +++ b/usr.sbin/bhyve/spinup_ap.h @@ -31,6 +31,6 @@ #ifndef _SPINUP_AP_H_ #define _SPINUP_AP_H_ -int spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip); +int spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip); #endif diff --git a/usr.sbin/bhyve/spinup_ap.c b/usr.sbin/bhyve/spinup_ap.c --- a/usr.sbin/bhyve/spinup_ap.c +++ b/usr.sbin/bhyve/spinup_ap.c @@ -77,7 +77,7 @@ } int -spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip) +spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip) { int error; @@ -100,7 +100,7 @@ spinup_ap_realmode(ctx, newcpu, &rip); - fbsdrun_addcpu(ctx, vcpu, newcpu, rip); + vm_resume_cpu(ctx, newcpu); return (newcpu); }