Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/vmm/vmm_dev.c
Show First 20 Lines • Show All 93 Lines • ▼ Show 20 Lines | |||||
static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); | static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev"); | ||||
SYSCTL_DECL(_hw_vmm); | SYSCTL_DECL(_hw_vmm); | ||||
static int vmm_priv_check(struct ucred *ucred); | static int vmm_priv_check(struct ucred *ucred); | ||||
static int devmem_create_cdev(const char *vmname, int id, char *devmem); | static int devmem_create_cdev(const char *vmname, int id, char *devmem); | ||||
static void devmem_destroy(void *arg); | static void devmem_destroy(void *arg); | ||||
static void vmmdev_destroy(void *arg); | |||||
static int | static int | ||||
vmm_priv_check(struct ucred *ucred) | vmm_priv_check(struct ucred *ucred) | ||||
{ | { | ||||
if (jailed(ucred) && | if (jailed(ucred) && | ||||
!(ucred->cr_prison->pr_allow & pr_allow_flag)) | !(ucred->cr_prison->pr_allow & pr_allow_flag)) | ||||
return (EPERM); | return (EPERM); | ||||
▲ Show 20 Lines • Show All 239 Lines • ▼ Show 20 Lines | for (i = 0; i < count; i++) { | ||||
error = vm_set_register(vm, vcpu, regnum[i], regval[i]); | error = vm_set_register(vm, vcpu, regnum[i], regval[i]); | ||||
if (error) | if (error) | ||||
break; | break; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
vmm_destroy(struct vmmdev_softc *sc) | |||||
{ | |||||
struct devmem_softc *dsc; | |||||
struct cdev *cdev; | |||||
int error; | |||||
mtx_lock(&vmmdev_mtx); | |||||
if (sc == NULL || sc->cdev == NULL) { | |||||
mtx_unlock(&vmmdev_mtx); | |||||
error = EINVAL; | |||||
goto out; | |||||
} | |||||
/* | |||||
* The 'cdev' will be destroyed asynchronously when 'si_threadcount' | |||||
* goes down to 0 so we should not do it again in the callback. | |||||
* | |||||
* Setting 'sc->cdev' to NULL is also used to indicate that the VM | |||||
* is scheduled for destruction. | |||||
*/ | |||||
cdev = sc->cdev; | |||||
sc->cdev = NULL; | |||||
mtx_unlock(&vmmdev_mtx); | |||||
/* | |||||
* Schedule all cdevs to be destroyed: | |||||
* | |||||
* - any new operations on the 'cdev' will return an error (ENXIO). | |||||
* | |||||
* - when the 'si_threadcount' dwindles down to zero the 'cdev' will | |||||
* be destroyed and the callback will be invoked in a taskqueue | |||||
* context. | |||||
* | |||||
* - the 'devmem' cdevs are destroyed before the virtual machine 'cdev' | |||||
*/ | |||||
SLIST_FOREACH(dsc, &sc->devmem, link) { | |||||
KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed")); | |||||
destroy_dev_sched_cb(dsc->cdev, devmem_destroy, dsc); | |||||
} | |||||
destroy_dev_sched_cb(cdev, vmmdev_destroy, sc); | |||||
error = 0; | |||||
out: | |||||
return (error); | |||||
} | |||||
static int | |||||
vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, | vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, | ||||
struct thread *td) | struct thread *td) | ||||
{ | { | ||||
int error, vcpu, state_changed, size; | int error, vcpu, state_changed, size; | ||||
cpuset_t *cpuset; | cpuset_t *cpuset; | ||||
struct vmmdev_softc *sc; | struct vmmdev_softc *sc; | ||||
struct vm_register *vmreg; | struct vm_register *vmreg; | ||||
struct vm_seg_desc *vmsegdesc; | struct vm_seg_desc *vmsegdesc; | ||||
▲ Show 20 Lines • Show All 509 Lines • ▼ Show 20 Lines | #ifdef BHYVE_SNAPSHOT | ||||
case VM_SNAPSHOT_REQ: | case VM_SNAPSHOT_REQ: | ||||
snapshot_meta = (struct vm_snapshot_meta *)data; | snapshot_meta = (struct vm_snapshot_meta *)data; | ||||
error = vm_snapshot_req(sc->vm, snapshot_meta); | error = vm_snapshot_req(sc->vm, snapshot_meta); | ||||
break; | break; | ||||
case VM_RESTORE_TIME: | case VM_RESTORE_TIME: | ||||
error = vm_restore_time(sc->vm); | error = vm_restore_time(sc->vm); | ||||
break; | break; | ||||
#endif | #endif | ||||
case VM_DESTROY: | |||||
error = vmm_destroy(sc); | |||||
break; | |||||
default: | default: | ||||
error = ENOTTY; | error = ENOTTY; | ||||
break; | break; | ||||
} | } | ||||
if (state_changed == 1) | if (state_changed == 1) | ||||
vcpu_unlock_one(sc, vcpu); | vcpu_unlock_one(sc, vcpu); | ||||
else if (state_changed == 2) | else if (state_changed == 2) | ||||
▲ Show 20 Lines • Show All 106 Lines • ▼ Show 20 Lines | vmmdev_destroy(void *arg) | ||||
} | } | ||||
free(sc, M_VMMDEV); | free(sc, M_VMMDEV); | ||||
} | } | ||||
static int | static int | ||||
sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) | sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct devmem_softc *dsc; | |||||
struct vmmdev_softc *sc; | struct vmmdev_softc *sc; | ||||
struct cdev *cdev; | |||||
char *buf; | char *buf; | ||||
int error, buflen; | int error, buflen; | ||||
error = vmm_priv_check(req->td->td_ucred); | error = vmm_priv_check(req->td->td_ucred); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
buflen = VM_MAX_NAMELEN + 1; | buflen = VM_MAX_NAMELEN + 1; | ||||
buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); | buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO); | ||||
strlcpy(buf, "beavis", buflen); | strlcpy(buf, "beavis", buflen); | ||||
error = sysctl_handle_string(oidp, buf, buflen, req); | error = sysctl_handle_string(oidp, buf, buflen, req); | ||||
if (error != 0 || req->newptr == NULL) | if (error != 0 || req->newptr == NULL) | ||||
goto out; | goto out; | ||||
mtx_lock(&vmmdev_mtx); | mtx_lock(&vmmdev_mtx); | ||||
sc = vmmdev_lookup(buf); | sc = vmmdev_lookup(buf); | ||||
if (sc == NULL || sc->cdev == NULL) { | |||||
mtx_unlock(&vmmdev_mtx); | mtx_unlock(&vmmdev_mtx); | ||||
error = EINVAL; | |||||
goto out; | |||||
} | |||||
/* | error = vmm_destroy(sc); | ||||
* The 'cdev' will be destroyed asynchronously when 'si_threadcount' | |||||
* goes down to 0 so we should not do it again in the callback. | |||||
* | |||||
* Setting 'sc->cdev' to NULL is also used to indicate that the VM | |||||
* is scheduled for destruction. | |||||
*/ | |||||
cdev = sc->cdev; | |||||
sc->cdev = NULL; | |||||
mtx_unlock(&vmmdev_mtx); | |||||
/* | |||||
* Schedule all cdevs to be destroyed: | |||||
* | |||||
* - any new operations on the 'cdev' will return an error (ENXIO). | |||||
* | |||||
* - when the 'si_threadcount' dwindles down to zero the 'cdev' will | |||||
* be destroyed and the callback will be invoked in a taskqueue | |||||
* context. | |||||
* | |||||
* - the 'devmem' cdevs are destroyed before the virtual machine 'cdev' | |||||
*/ | |||||
SLIST_FOREACH(dsc, &sc->devmem, link) { | |||||
KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed")); | |||||
destroy_dev_sched_cb(dsc->cdev, devmem_destroy, dsc); | |||||
} | |||||
destroy_dev_sched_cb(cdev, vmmdev_destroy, sc); | |||||
error = 0; | |||||
out: | out: | ||||
free(buf, M_VMMDEV); | free(buf, M_VMMDEV); | ||||
return (error); | return (error); | ||||
} | } | ||||
SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, | SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, | ||||
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE, | ||||
NULL, 0, sysctl_vmm_destroy, "A", | NULL, 0, sysctl_vmm_destroy, "A", | ||||
NULL); | NULL); | ||||
▲ Show 20 Lines • Show All 201 Lines • Show Last 20 Lines |