diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h --- a/sys/amd64/include/vmm.h +++ b/sys/amd64/include/vmm.h @@ -334,10 +334,16 @@ #endif /* _SYS__CPUSET_H_ */ static __inline int -vcpu_rendezvous_pending(struct vm_eventinfo *info) +vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info) { - - return (*((uintptr_t *)(info->rptr)) != 0); + /* + * This check isn't done with atomic operations or under a lock because + * there's no need to. If the vcpuid bit is set, the vcpu is part of a + * rendezvous and the bit won't be cleared until the vcpu enters the + * rendezvous. On rendezvous exit, the cpuset is cleared and the vcpu + * will see an empty cpuset. So, there can't be a race. + */ + return CPU_ISSET(vcpu_vcpuid(vcpu), (cpuset_t *)info->rptr); } static __inline int diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -2053,7 +2053,7 @@ break; } - if (vcpu_rendezvous_pending(evinfo)) { + if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { enable_gintr(); vm_exit_rendezvous(vcpu->vcpu, state->rip); break; diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -3071,7 +3071,7 @@ break; } - if (vcpu_rendezvous_pending(evinfo)) { + if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) { enable_intr(); vm_exit_rendezvous(vcpu->vcpu, rip); break; diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -1433,6 +1433,7 @@ if (CPU_CMP(&vm->rendezvous_req_cpus, &vm->rendezvous_done_cpus) == 0) { VMM_CTR0(vcpu, "Rendezvous completed"); + CPU_ZERO(&vm->rendezvous_req_cpus); vm->rendezvous_func = NULL; wakeup(&vm->rendezvous_func); break; @@ -1853,7 +1854,7 @@ pmap = vmspace_pmap(vm->vmspace); vme = &vcpu->exitinfo; - evinfo.rptr = &vm->rendezvous_func; + evinfo.rptr = &vm->rendezvous_req_cpus; evinfo.sptr = &vm->suspend; evinfo.iptr = &vcpu->reqidle; restart: