Changeset View
Standalone View
sys/amd64/vmm/vmm.c
Show First 20 Lines • Show All 1,432 Lines • ▼ Show 20 Lines | if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && | ||||
!CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { | !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { | ||||
VMM_CTR0(vcpu, "Calling rendezvous func"); | VMM_CTR0(vcpu, "Calling rendezvous func"); | ||||
(*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); | (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg); | ||||
CPU_SET(vcpuid, &vm->rendezvous_done_cpus); | CPU_SET(vcpuid, &vm->rendezvous_done_cpus); | ||||
} | } | ||||
if (CPU_CMP(&vm->rendezvous_req_cpus, | if (CPU_CMP(&vm->rendezvous_req_cpus, | ||||
&vm->rendezvous_done_cpus) == 0) { | &vm->rendezvous_done_cpus) == 0) { | ||||
VMM_CTR0(vcpu, "Rendezvous completed"); | VMM_CTR0(vcpu, "Rendezvous completed"); | ||||
CPU_ZERO(&vm->rendezvous_req_cpus); | |||||
vm->rendezvous_func = NULL; | vm->rendezvous_func = NULL; | ||||
wakeup(&vm->rendezvous_func); | wakeup(&vm->rendezvous_func); | ||||
jhb: I don't know that we need quite this long a comment. We didn't have one before for clearing… | |||||
Not Done Inline ActionsThe first intention of this patch was to avoid spurious rendezvous completely (and those fixing the issue of bz). This comment explains some details how it's done. As we are going to remove the rendezvous assertion, it's not required any more to avoid spurious rendezvous completely. So, this comment isn't neccessary. corvink: The first intention of this patch was to avoid spurious rendezvous completely (and those fixing… | |||||
break; | break; | ||||
} | } | ||||
VMM_CTR0(vcpu, "Wait for rendezvous completion"); | VMM_CTR0(vcpu, "Wait for rendezvous completion"); | ||||
mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, | mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, | ||||
Done Inline ActionsThis is a "point" of rendezvous. All vCPU threads are sitting here until the last is done. vcpu_rendezvous_pending() will never have side effect on an old rendezvous_req_cpus value and therefore added OR/AND logic looks excessive. gusev.vitaliy_gmail.com: This is a "point" of rendezvous. All vCPU threads are sitting here until the last is done. | |||||
Done Inline ActionsThere one minor mistake in your comment which causes the bug of bz. It's only guaranteed that a subset of all vcpu thread (those specified by rendezvous_req_cpus) are sitting here. Yeah, I thought about it and you're right. Adding atomic isn't required here. corvink: There one minor mistake in your comment which causes the bug of bz. It's only guaranteed that a… | |||||
"vmrndv", hz); | "vmrndv", hz); | ||||
if (td_ast_pending(td, TDA_SUSPEND)) { | if (td_ast_pending(td, TDA_SUSPEND)) { | ||||
mtx_unlock(&vm->rendezvous_mtx); | mtx_unlock(&vm->rendezvous_mtx); | ||||
error = thread_check_susp(td, true); | error = thread_check_susp(td, true); | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
mtx_lock(&vm->rendezvous_mtx); | mtx_lock(&vm->rendezvous_mtx); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 398 Lines • ▼ Show 20 Lines | vm_run(struct vcpu *vcpu, struct vm_exit *vme_user) | ||||
if (!CPU_ISSET(vcpuid, &vm->active_cpus)) | if (!CPU_ISSET(vcpuid, &vm->active_cpus)) | ||||
return (EINVAL); | return (EINVAL); | ||||
if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) | if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) | ||||
return (EINVAL); | return (EINVAL); | ||||
pmap = vmspace_pmap(vm->vmspace); | pmap = vmspace_pmap(vm->vmspace); | ||||
vme = &vcpu->exitinfo; | vme = &vcpu->exitinfo; | ||||
evinfo.rptr = &vm->rendezvous_func; | evinfo.rptr = &vm->rendezvous_req_cpus; | ||||
Not Done Inline ActionsOh, this is why the previous code worked (I had thought it was always pointing to the cpuset) jhb: Oh, this is why the previous code worked (I had thought it was always pointing to the cpuset) | |||||
evinfo.sptr = &vm->suspend; | evinfo.sptr = &vm->suspend; | ||||
evinfo.iptr = &vcpu->reqidle; | evinfo.iptr = &vcpu->reqidle; | ||||
restart: | restart: | ||||
critical_enter(); | critical_enter(); | ||||
KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), | KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), | ||||
("vm_run: absurd pm_active")); | ("vm_run: absurd pm_active")); | ||||
▲ Show 20 Lines • Show All 771 Lines • ▼ Show 20 Lines | if (vm->rendezvous_func != NULL) { | ||||
if (error != 0) | if (error != 0) | ||||
return (error); | return (error); | ||||
goto restart; | goto restart; | ||||
} | } | ||||
KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " | KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " | ||||
"rendezvous is still in progress")); | "rendezvous is still in progress")); | ||||
VMM_CTR0(vcpu, "Initiating rendezvous"); | VMM_CTR0(vcpu, "Initiating rendezvous"); | ||||
vm->rendezvous_req_cpus = dest; | vm->rendezvous_req_cpus = dest; | ||||
Not Done Inline ActionsIn general it should work. But in terms of atomicity, you can see a little window between setting rendezvous_req_cpus and then setting vm->rendezvous_func = func. Setting order is wrong. Reader can check rendezvous_req_cpus and see the bit set and then see rendezvous_func is NULL. When all code is protected by mutex or spinlock, all changes in that section are seen as atomic (consistent) . But if we change data under protected mutex, but read (access) data without mutex protection, it can be inconsistent. As solution you can move ATOMIC operation after 'vm->rendezvous_func = func' assignment. That would protect ordering for data writing. Another alternative for atomic is use memory barriers: Also CPU_OR_ATOMIC can be replaced by CPU_SET_ATOMIC - adding AND/OR is excessive because it is rendezvous. gusev.vitaliy_gmail.com: In general it should work. But in terms of atomicity, you can see a little window between… | |||||
Not Done Inline ActionsYou're right but I don't understand why it should be required to use a memory barrier after CPU_ISSET() in vcpu_rendezvous_pendig. Btw: CPU_SET_ATOMIC wouldn't work properly for vms with many vcpu. It only sets a single long value. So, you can only set 64 vcpus. Look at it's implementation: #define __BIT_SET_ATOMIC(_s, n, p) \ atomic_set_long(&(p)->__bits[__bitset_word(_s, n)], \ __bitset_mask((_s), n)) corvink: You're right but I don't understand why it should be required to use a memory barrier after… | |||||
CPU_ZERO(&vm->rendezvous_done_cpus); | CPU_ZERO(&vm->rendezvous_done_cpus); | ||||
vm->rendezvous_arg = arg; | vm->rendezvous_arg = arg; | ||||
vm->rendezvous_func = func; | vm->rendezvous_func = func; | ||||
mtx_unlock(&vm->rendezvous_mtx); | mtx_unlock(&vm->rendezvous_mtx); | ||||
/* | /* | ||||
* Wake up any sleeping vcpus and trigger a VM-exit in any running | * Wake up any sleeping vcpus and trigger a VM-exit in any running | ||||
* vcpus so they handle the rendezvous as soon as possible. | * vcpus so they handle the rendezvous as soon as possible. | ||||
*/ | */ | ||||
for (i = 0; i < vm->maxcpus; i++) { | for (i = 0; i < vm->maxcpus; i++) { | ||||
Not Done Inline ActionsWe generally use atomic_* now instead of these. Also, what you want here instead is probably atomic_thread_fence_rel or you want to use atomic_store_ptr_rel for setting vm->rendezvous_func. OTOH, I don't know that you need this barrier at all since you've removed the assertion. The fact that the rendezvous handler grabs the rendezvous_mtx before checking any of these members means that we don't actually depend on any real ordering here. It probably is better to set rendezvous_req_cpus earlier as the existing code does now so that any vCPUs doing a VM enter concurrent with this have chance to see it and get ready for rendezvous without needing an IPI to force an exit. jhb: We generally use `atomic_*` now instead of these. Also, what you want here instead is probably… | |||||
Not Done Inline ActionsThanks for the explanation how to use barriers. You're right as we are going to remove the rendezvous assertion. corvink: Thanks for the explanation how to use barriers.
You're right as we are going to remove the… | |||||
if (CPU_ISSET(i, &dest)) | if (CPU_ISSET(i, &dest)) | ||||
vcpu_notify_event(vm_vcpu(vm, i), false); | vcpu_notify_event(vm_vcpu(vm, i), false); | ||||
} | } | ||||
return (vm_handle_rendezvous(vcpu)); | return (vm_handle_rendezvous(vcpu)); | ||||
} | } | ||||
struct vatpic * | struct vatpic * | ||||
▲ Show 20 Lines • Show All 324 Lines • Show Last 20 Lines |
I don't know that we need quite this long a comment. We didn't have one before for clearing rendezvous_func above.