diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c --- a/sys/amd64/vmm/vmm_dev.c +++ b/sys/amd64/vmm/vmm_dev.c @@ -26,7 +26,6 @@ * SUCH DAMAGE. */ -#include #include "opt_bhyve_snapshot.h" #include @@ -163,14 +162,14 @@ } static void -vcpu_unlock_one(struct vmmdev_softc *sc, int vcpuid, struct vcpu *vcpu) +vcpu_unlock_one(struct vcpu *vcpu) { enum vcpu_state state; state = vcpu_get_state(vcpu, NULL); if (state != VCPU_FROZEN) { - panic("vcpu %s(%d) has invalid state %d", vm_name(sc->vm), - vcpuid, state); + panic("vcpu %s(%d) has invalid state %d", + vm_name(vcpu_vm(vcpu)), vcpu_vcpuid(vcpu), state); } vcpu_set_state(vcpu, VCPU_IDLE, false); @@ -200,7 +199,7 @@ vcpu = vm_vcpu(sc->vm, j); if (vcpu == NULL) continue; - vcpu_unlock_one(sc, j, vcpu); + vcpu_unlock_one(vcpu); } vm_unlock_vcpus(sc->vm); } @@ -219,7 +218,7 @@ vcpu = vm_vcpu(sc->vm, i); if (vcpu == NULL) continue; - vcpu_unlock_one(sc, i, vcpu); + vcpu_unlock_one(vcpu); } vm_unlock_vcpus(sc->vm); } @@ -1086,7 +1085,7 @@ done: if (vcpus_locked == SINGLE) - vcpu_unlock_one(sc, vcpuid, vcpu); + vcpu_unlock_one(vcpu); else if (vcpus_locked == ALL) vcpu_unlock_all(sc); if (memsegs_locked) @@ -1231,7 +1230,7 @@ * is scheduled for destruction. */ cdev = sc->cdev; - sc->cdev = NULL; + sc->cdev = NULL; mtx_unlock(&vmmdev_mtx); /* diff --git a/sys/arm64/vmm/vmm_dev.c b/sys/arm64/vmm/vmm_dev.c --- a/sys/arm64/vmm/vmm_dev.c +++ b/sys/arm64/vmm/vmm_dev.c @@ -102,10 +102,7 @@ static int vcpu_lock_one(struct vcpu *vcpu) { - int error; - - error = vcpu_set_state(vcpu, VCPU_FROZEN, true); - return (error); + return (vcpu_set_state(vcpu, VCPU_FROZEN, true)); } static void @@ -252,8 +249,10 @@ return (error); } +CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= VM_MAX_SUFFIXLEN + 1); + static int -get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg) +get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) { struct devmem_softc *dsc; int error; @@ -270,17 +269,16 @@ } KASSERT(dsc != NULL, ("%s: devmem segment %d not found", __func__, mseg->segid)); - error = copystr(dsc->name, mseg->name, sizeof(mseg->name), - NULL); + error = copystr(dsc->name, mseg->name, len, NULL); } else { - bzero(mseg->name, sizeof(mseg->name)); + bzero(mseg->name, len); } return (error); } static int -alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg) +alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len) { char *name; int error; @@ -296,8 +294,8 @@ */ if (VM_MEMSEG_NAME(mseg)) { sysmem = false; - name = malloc(sizeof(mseg->name), M_VMMDEV, M_WAITOK); - error = copystr(mseg->name, name, sizeof(mseg->name), NULL); + name = malloc(len, M_VMMDEV, M_WAITOK); + error = copystr(mseg->name, name, len, NULL); if (error) goto done; } @@ -545,10 +543,12 @@ error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len); break; case VM_ALLOC_MEMSEG: - error = alloc_memseg(sc, (struct vm_memseg *)data); + error = alloc_memseg(sc, (struct vm_memseg *)data, + sizeof(((struct vm_memseg *)0)->name)); break; case VM_GET_MEMSEG: - error = get_memseg(sc, (struct vm_memseg *)data); + error = get_memseg(sc, (struct vm_memseg *)data, + sizeof(((struct vm_memseg *)0)->name)); break; case VM_GET_REGISTER: vmreg = (struct vm_register *)data; @@ -994,7 +994,8 @@ if (seglen >= last) vm_object_reference(*objp); else - error = 0; + error = EINVAL; + vm_unlock_memsegs(dsc->sc->vm); return (error); }