diff --git a/usr.sbin/bhyve/bhyverun.h b/usr.sbin/bhyve/bhyverun.h
--- a/usr.sbin/bhyve/bhyverun.h
+++ b/usr.sbin/bhyve/bhyverun.h
@@ -44,7 +44,7 @@
 #endif
 
 void fbsdrun_set_capabilities(struct vmctx *ctx, int cpu);
-void fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip);
+void fbsdrun_addcpu(struct vmctx *ctx, int newcpu, uint64_t rip, int suspend);
 int  fbsdrun_virtio_msix(void);
 
 #endif
diff --git a/usr.sbin/bhyve/bhyverun.c b/usr.sbin/bhyve/bhyverun.c
--- a/usr.sbin/bhyve/bhyverun.c
+++ b/usr.sbin/bhyve/bhyverun.c
@@ -548,12 +548,10 @@
 }
 
 void
-fbsdrun_addcpu(struct vmctx *ctx, int fromcpu, int newcpu, uint64_t rip)
+fbsdrun_addcpu(struct vmctx *ctx, int newcpu, uint64_t rip, int suspend)
 {
 	int error;
 
-	assert(fromcpu == BSP);
-
 	/*
 	 * The 'newcpu' must be activated in the context of 'fromcpu'. If
 	 * vm_activate_cpu() is delayed until newcpu's pthread starts running
@@ -566,6 +564,9 @@
 
 	CPU_SET_ATOMIC(newcpu, &cpumask);
 
+	if (suspend)
+		vm_suspend_cpu(ctx, newcpu);
+
 	/*
 	 * Set up the vmexit struct to allow execution to start
 	 * at the given RIP
@@ -688,8 +689,7 @@
 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
 {
 
-	(void)spinup_ap(ctx, *pvcpu,
-		    vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
+	(void)spinup_ap(ctx, vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
 
 	return (VMEXIT_CONTINUE);
 }
@@ -1135,8 +1135,8 @@
 	return (ctx);
 }
 
-void
-spinup_vcpu(struct vmctx *ctx, int vcpu)
+static void
+spinup_vcpu(struct vmctx *ctx, int vcpu, int suspend)
 {
 	int error;
 	uint64_t rip;
@@ -1148,7 +1148,7 @@
 	error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
 	assert(error == 0);
 
-	fbsdrun_addcpu(ctx, BSP, vcpu, rip);
+	fbsdrun_addcpu(ctx, vcpu, rip, suspend);
 }
 
 static bool
@@ -1588,25 +1588,20 @@
 #endif
 
 	/*
-	 * Add CPU 0
+	 * Add all vCPUs.
 	 */
-	fbsdrun_addcpu(ctx, BSP, BSP, rip);
-
+	for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) {
+		int suspend = true;
+		if (vcpu == BSP) {
+			suspend = false;
+		}
 #ifdef BHYVE_SNAPSHOT
-	/*
-	 * If we restore a VM, start all vCPUs now (including APs), otherwise,
-	 * let the guest OS to spin them up later via vmexits.
-	 */
-	if (restore_file != NULL) {
-		for (vcpu = 0; vcpu < guest_ncpus; vcpu++) {
-			if (vcpu == BSP)
-				continue;
-
-			fprintf(stdout, "spinning up vcpu no %d...\r\n", vcpu);
-			spinup_vcpu(ctx, vcpu);
+		if (restore_file != NULL) {
+			suspend = false;
 		}
-	}
 #endif
+		spinup_vcpu(ctx, vcpu, suspend);
+	}
 
 	/*
 	 * Head off to the main event dispatch loop
diff --git a/usr.sbin/bhyve/spinup_ap.h b/usr.sbin/bhyve/spinup_ap.h
--- a/usr.sbin/bhyve/spinup_ap.h
+++ b/usr.sbin/bhyve/spinup_ap.h
@@ -31,6 +31,6 @@
 #ifndef	_SPINUP_AP_H_
 #define	_SPINUP_AP_H_
 
-int spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip);
+int spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip);
 
 #endif
diff --git a/usr.sbin/bhyve/spinup_ap.c b/usr.sbin/bhyve/spinup_ap.c
--- a/usr.sbin/bhyve/spinup_ap.c
+++ b/usr.sbin/bhyve/spinup_ap.c
@@ -77,7 +77,7 @@
 }
 
 int
-spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip)
+spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip)
 {
 	int error;
 
@@ -100,7 +100,7 @@
 
 	spinup_ap_realmode(ctx, newcpu, &rip);
 
-	fbsdrun_addcpu(ctx, vcpu, newcpu, rip);
+	vm_resume_cpu(ctx, newcpu);
 
 	return (newcpu);
 }