426 case VM_GET_REGISTER:
427 case VM_SET_REGISTER:
428 case VM_GET_SEGMENT_DESCRIPTOR:
429 case VM_SET_SEGMENT_DESCRIPTOR:
430 case VM_GET_REGISTER_SET:
431 case VM_SET_REGISTER_SET:
432 case VM_INJECT_EXCEPTION:
433 case VM_GET_CAPABILITY:
434 case VM_SET_CAPABILITY:
435 case VM_PPTDEV_MSI:
436 case VM_PPTDEV_MSIX:
437 case VM_SET_X2APIC_STATE:
438 case VM_GLA2GPA:
439 case VM_GLA2GPA_NOFAULT:
440 case VM_ACTIVATE_CPU:
441 case VM_SET_INTINFO:
442 case VM_GET_INTINFO:
443 case VM_RESTART_INSTRUCTION:
444 case VM_SET_KERNEMU_DEV:
445 case VM_GET_KERNEMU_DEV:
446 /*
447 * Copy in the ID of the vCPU chosen for this operation.
448 * Since a nefarious caller could update their struct between
449 * this locking and when the rest of the ioctl data is copied
450 * in, it is _critical_ that this local 'vcpu' variable be used
451 * rather than the in-struct one when performing the ioctl.
452 */
453 if (ddi_copyin(datap, &vcpu, sizeof (vcpu), md)) {
454 return (EFAULT);
455 }
456 if (vcpu < 0 || vcpu > vm_get_maxcpus(sc->vmm_vm)) {
457 return (EINVAL);
458 }
459 vcpu_lock_one(sc, vcpu);
460 lock_type = LOCK_VCPU;
461 break;
462
463 case VM_REINIT:
464 case VM_BIND_PPTDEV:
465 case VM_UNBIND_PPTDEV:
972 break;
973 }
974
975 error = 0;
976 for (uint_t i = 0; i < vrs.count && error == 0; i++) {
977 /*
978 * Setting registers in a set is not atomic, since a
979 * failure in the middle of the set will cause a
980 * bail-out and inconsistent register state. Callers
981 * should be wary of this.
982 */
983 if (regnums[i] < 0) {
984 error = EINVAL;
985 break;
986 }
987 error = vm_set_register(sc->vmm_vm, vcpu, regnums[i],
988 regvals[i]);
989 }
990 break;
991 }
992
993 case VM_SET_KERNEMU_DEV:
994 case VM_GET_KERNEMU_DEV: {
995 struct vm_readwrite_kernemu_device kemu;
996 size_t size = 0;
997
998 if (ddi_copyin(datap, &kemu, sizeof (kemu), md)) {
999 error = EFAULT;
1000 break;
1001 }
1002
1003 if (kemu.access_width > 3) {
1004 error = EINVAL;
1005 break;
1006 }
1007 size = (1 << kemu.access_width);
1008 ASSERT(size >= 1 && size <= 8);
1009
1010 if (cmd == VM_SET_KERNEMU_DEV) {
1011 error = vm_service_mmio_write(sc->vmm_vm, vcpu,
|
426 case VM_GET_REGISTER:
427 case VM_SET_REGISTER:
428 case VM_GET_SEGMENT_DESCRIPTOR:
429 case VM_SET_SEGMENT_DESCRIPTOR:
430 case VM_GET_REGISTER_SET:
431 case VM_SET_REGISTER_SET:
432 case VM_INJECT_EXCEPTION:
433 case VM_GET_CAPABILITY:
434 case VM_SET_CAPABILITY:
435 case VM_PPTDEV_MSI:
436 case VM_PPTDEV_MSIX:
437 case VM_SET_X2APIC_STATE:
438 case VM_GLA2GPA:
439 case VM_GLA2GPA_NOFAULT:
440 case VM_ACTIVATE_CPU:
441 case VM_SET_INTINFO:
442 case VM_GET_INTINFO:
443 case VM_RESTART_INSTRUCTION:
444 case VM_SET_KERNEMU_DEV:
445 case VM_GET_KERNEMU_DEV:
446 case VM_RESET_CPU:
447 case VM_GET_RUN_STATE:
448 case VM_SET_RUN_STATE:
449 /*
450 * Copy in the ID of the vCPU chosen for this operation.
451 * Since a nefarious caller could update their struct between
452 * this locking and when the rest of the ioctl data is copied
453 * in, it is _critical_ that this local 'vcpu' variable be used
454 * rather than the in-struct one when performing the ioctl.
455 */
456 if (ddi_copyin(datap, &vcpu, sizeof (vcpu), md)) {
457 return (EFAULT);
458 }
459 if (vcpu < 0 || vcpu > vm_get_maxcpus(sc->vmm_vm)) {
460 return (EINVAL);
461 }
462 vcpu_lock_one(sc, vcpu);
463 lock_type = LOCK_VCPU;
464 break;
465
466 case VM_REINIT:
467 case VM_BIND_PPTDEV:
468 case VM_UNBIND_PPTDEV:
975 break;
976 }
977
978 error = 0;
979 for (uint_t i = 0; i < vrs.count && error == 0; i++) {
980 /*
981 * Setting registers in a set is not atomic, since a
982 * failure in the middle of the set will cause a
983 * bail-out and inconsistent register state. Callers
984 * should be wary of this.
985 */
986 if (regnums[i] < 0) {
987 error = EINVAL;
988 break;
989 }
990 error = vm_set_register(sc->vmm_vm, vcpu, regnums[i],
991 regvals[i]);
992 }
993 break;
994 }
995 case VM_RESET_CPU: {
996 struct vm_vcpu_reset vvr;
997
998 if (ddi_copyin(datap, &vvr, sizeof (vvr), md)) {
999 error = EFAULT;
1000 break;
1001 }
1002 if (vvr.kind != VRK_RESET && vvr.kind != VRK_INIT) {
1003 error = EINVAL;
1004 }
1005
1006 error = vcpu_arch_reset(sc->vmm_vm, vcpu, vvr.kind == VRK_INIT);
1007 break;
1008 }
1009 case VM_GET_RUN_STATE: {
1010 struct vm_run_state vrs;
1011
1012 bzero(&vrs, sizeof (vrs));
1013 error = vm_get_run_state(sc->vmm_vm, vcpu, &vrs.state,
1014 &vrs.sipi_vector);
1015 if (error == 0) {
1016 if (ddi_copyout(&vrs, datap, sizeof (vrs), md)) {
1017 error = EFAULT;
1018 break;
1019 }
1020 }
1021 break;
1022 }
1023 case VM_SET_RUN_STATE: {
1024 struct vm_run_state vrs;
1025
1026 if (ddi_copyin(datap, &vrs, sizeof (vrs), md)) {
1027 error = EFAULT;
1028 break;
1029 }
1030 error = vm_set_run_state(sc->vmm_vm, vcpu, vrs.state,
1031 vrs.sipi_vector);
1032 break;
1033 }
1034
1035 case VM_SET_KERNEMU_DEV:
1036 case VM_GET_KERNEMU_DEV: {
1037 struct vm_readwrite_kernemu_device kemu;
1038 size_t size = 0;
1039
1040 if (ddi_copyin(datap, &kemu, sizeof (kemu), md)) {
1041 error = EFAULT;
1042 break;
1043 }
1044
1045 if (kemu.access_width > 3) {
1046 error = EINVAL;
1047 break;
1048 }
1049 size = (1 << kemu.access_width);
1050 ASSERT(size >= 1 && size <= 8);
1051
1052 if (cmd == VM_SET_KERNEMU_DEV) {
1053 error = vm_service_mmio_write(sc->vmm_vm, vcpu,
|