jcs's openbsd hax
openbsd

Remove vmd(8) send & receive functionality.

It has atrophied, currently broken, and now impeding some progress
on improving existing device emulation. We can revisit this
functionality in the future if need (and developer support) arises.

This removes the vmctl(8) command support as well.

ok mlarkin@

dv a49e2d7a 6b7165bf

+34 -889
+1 -44
usr.sbin/vmctl/main.c
··· 1 - /* $OpenBSD: main.c,v 1.86 2025/05/31 00:38:56 dv Exp $ */ 1 + /* $OpenBSD: main.c,v 1.87 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Reyk Floeter <reyk@openbsd.org> ··· 63 63 int ctl_waitfor(struct parse_result *, int, char *[]); 64 64 int ctl_pause(struct parse_result *, int, char *[]); 65 65 int ctl_unpause(struct parse_result *, int, char *[]); 66 - int ctl_send(struct parse_result *, int, char *[]); 67 - int ctl_receive(struct parse_result *, int, char *[]); 68 66 69 67 struct ctl_command ctl_commands[] = { 70 68 { "console", CMD_CONSOLE, ctl_console, "id" }, ··· 73 71 { "load", CMD_LOAD, ctl_load, "filename" }, 74 72 { "log", CMD_LOG, ctl_log, "[brief | verbose]" }, 75 73 { "pause", CMD_PAUSE, ctl_pause, "id" }, 76 - { "receive", CMD_RECEIVE, ctl_receive, "name" , 1}, 77 74 { "reload", CMD_RELOAD, ctl_reload, "" }, 78 75 { "reset", CMD_RESET, ctl_reset, "[all | switches | vms]" }, 79 - { "send", CMD_SEND, ctl_send, "id", 1}, 80 76 { "show", CMD_STATUS, ctl_status, "[id]" }, 81 77 { "start", CMD_START, ctl_start, 82 78 "[-cL] [-B device] [-b path] [-d disk] [-i count]\n" ··· 257 253 case CMD_UNPAUSE: 258 254 unpause_vm(res->id, res->name); 259 255 break; 260 - case CMD_SEND: 261 - send_vm(res->id, res->name); 262 - done = 1; 263 - ret = 0; 264 - break; 265 - case CMD_RECEIVE: 266 - vm_receive(res->id, res->name); 267 - break; 268 256 case CMD_CREATE: 269 257 case NONE: 270 258 /* The action is not expected here */ ··· 324 312 break; 325 313 case CMD_PAUSE: 326 314 done = pause_vm_complete(&imsg, &ret); 327 - break; 328 - case CMD_RECEIVE: 329 - done = vm_start_complete(&imsg, &ret, 0); 330 315 break; 331 316 case CMD_UNPAUSE: 332 317 done = unpause_vm_complete(&imsg, &ret); ··· 1005 990 { 1006 991 if (argc == 2) { 1007 992 if (parse_vmid(res, argv[1], 0) == -1) 1008 - errx(1, "invalid id: %s", argv[1]); 1009 - } else if (argc != 2) 1010 - ctl_usage(res->ctl); 1011 - 1012 - return (vmmaction(res)); 1013 - } 1014 - 1015 - int 1016 - ctl_send(struct parse_result *res, int argc, char *argv[]) 1017 - { 1018 - if (pledge("stdio unix sendfd unveil", NULL) == -1) 1019 - err(1, "pledge"); 1020 - if (argc == 2) { 1021 - if (parse_vmid(res, argv[1], 0) == -1) 1022 - errx(1, "invalid id: %s", argv[1]); 1023 - } else if (argc != 2) 1024 - ctl_usage(res->ctl); 1025 - 1026 - return (vmmaction(res)); 1027 - } 1028 - 1029 - int 1030 - ctl_receive(struct parse_result *res, int argc, char *argv[]) 1031 - { 1032 - if (pledge("stdio unix sendfd unveil", NULL) == -1) 1033 - err(1, "pledge"); 1034 - if (argc == 2) { 1035 - if (parse_vmid(res, argv[1], 1) == -1) 1036 993 errx(1, "invalid id: %s", argv[1]); 1037 994 } else if (argc != 2) 1038 995 ctl_usage(res->ctl);
+2 -16
usr.sbin/vmctl/vmctl.8
··· 1 - .\" $OpenBSD: vmctl.8,v 1.78 2024/05/04 07:51:21 jmc Exp $ 1 + .\" $OpenBSD: vmctl.8,v 1.79 2025/06/09 18:43:01 dv Exp $ 2 2 .\" 3 3 .\" Copyright (c) 2015-2024 Mike Larkin <mlarkin@openbsd.org> 4 4 .\" ··· 14 14 .\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 15 .\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 16 .\" 17 - .Dd $Mdocdate: May 4 2024 $ 17 + .Dd $Mdocdate: June 9 2025 $ 18 18 .Dt VMCTL 8 19 19 .Os 20 20 .Sh NAME ··· 117 117 .It Cm pause Ar id 118 118 Pause a VM with the specified 119 119 .Ar id . 120 - .It Cm receive Ar name 121 - Receive a VM from standard input and start it with the specified 122 - .Ar name . 123 120 .It Cm reload 124 121 Remove all stopped VMs and reload the configuration from the default 125 122 configuration file. ··· 132 129 .Cm switches , 133 130 or reset and terminate all 134 131 .Cm vms . 135 - .It Cm send Ar id 136 - Send a VM with the specified 137 - .Ar id 138 - to standard output and terminate it. 139 - The VM is paused during send processing. 140 - Data sent to standard output contains the VM parameters and its memory, 141 - not the disk image. 142 - .Pp 143 - In order to move a VM from one host to another, disk files must be 144 - synced between the send and the receive processes and must be located 145 - under the same path. 146 132 .It Cm show Oo Fl r Oc Op Ar id 147 133 An alias for the 148 134 .Cm status
+1 -79
usr.sbin/vmctl/vmctl.c
··· 1 - /* $OpenBSD: vmctl.c,v 1.93 2025/05/31 00:38:56 dv Exp $ */ 1 + /* $OpenBSD: vmctl.c,v 1.94 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> ··· 288 288 } 289 289 290 290 return (1); 291 - } 292 - 293 - void 294 - send_vm(uint32_t id, const char *name) 295 - { 296 - struct vmop_id vid; 297 - int fds[2], readn, writen; 298 - long pagesz; 299 - char *buf; 300 - 301 - pagesz = getpagesize(); 302 - buf = malloc(pagesz); 303 - if (buf == NULL) 304 - errx(1, "%s: memory allocation failure", __func__); 305 - 306 - memset(&vid, 0, sizeof(vid)); 307 - vid.vid_id = id; 308 - if (name != NULL) 309 - strlcpy(vid.vid_name, name, sizeof(vid.vid_name)); 310 - if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, fds) == -1) { 311 - warnx("%s: socketpair creation failed", __func__); 312 - } else { 313 - imsg_compose(ibuf, IMSG_VMDOP_SEND_VM_REQUEST, 0, 0, fds[0], 314 - &vid, sizeof(vid)); 315 - imsgbuf_flush(ibuf); 316 - while (1) { 317 - readn = atomicio(read, fds[1], buf, pagesz); 318 - if (!readn) 319 - break; 320 - writen = atomicio(vwrite, STDOUT_FILENO, buf, 321 - readn); 322 - if (writen != readn) 323 - break; 324 - } 325 - if (vid.vid_id) 326 - warnx("sent vm %d successfully", vid.vid_id); 327 - else 328 - warnx("sent vm %s successfully", vid.vid_name); 329 - } 330 - 331 - free(buf); 332 - } 333 - 334 - void 335 - vm_receive(uint32_t id, const char *name) 336 - { 337 - struct vmop_id vid; 338 - int fds[2], readn, writen; 339 - long pagesz; 340 - char *buf; 341 - 342 - pagesz = getpagesize(); 343 - buf = malloc(pagesz); 344 - if (buf == NULL) 345 - errx(1, "%s: memory allocation failure", __func__); 346 - 347 - memset(&vid, 0, sizeof(vid)); 348 - if (name != NULL) 349 - strlcpy(vid.vid_name, name, sizeof(vid.vid_name)); 350 - if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, fds) == -1) { 351 - warnx("%s: socketpair creation failed", __func__); 352 - } else { 353 - imsg_compose(ibuf, IMSG_VMDOP_RECEIVE_VM_REQUEST, 0, 0, fds[0], 354 - &vid, sizeof(vid)); 355 - imsgbuf_flush(ibuf); 356 - while (1) { 357 - readn = atomicio(read, STDIN_FILENO, buf, pagesz); 358 - if (!readn) { 359 - close(fds[1]); 360 - break; 361 - } 362 - writen = atomicio(vwrite, fds[1], buf, readn); 363 - if (writen != readn) 364 - break; 365 - } 366 - } 367 - 368 - free(buf); 369 291 } 370 292 371 293 void
+1 -3
usr.sbin/vmctl/vmctl.h
··· 1 - /* $OpenBSD: vmctl.h,v 1.41 2025/05/31 00:38:56 dv Exp $ */ 1 + /* $OpenBSD: vmctl.h,v 1.42 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Reyk Floeter <reyk@openbsd.org> ··· 36 36 CMD_WAITFOR, 37 37 CMD_PAUSE, 38 38 CMD_UNPAUSE, 39 - CMD_SEND, 40 - CMD_RECEIVE, 41 39 }; 42 40 43 41 struct ctl_command;
+1 -25
usr.sbin/vmd/arm64_vm.c
··· 1 - /* $OpenBSD: arm64_vm.c,v 1.5 2025/06/04 12:47:59 tb Exp $ */ 1 + /* $OpenBSD: arm64_vm.c,v 1.6 2025/06/09 18:43:01 dv Exp $ */ 2 2 /* 3 3 * Copyright (c) 2024 Dave Voutila <dv@openbsd.org> 4 4 * ··· 43 43 } 44 44 45 45 void 46 - restore_emulated_hw(struct vm_create_params *vcp, int fd, int *child_taps, 47 - int child_disks[][VM_MAX_BASE_PER_DISK], int child_cdrom) 48 - { 49 - fatalx("%s: unimplemented", __func__); 50 - /* NOTREACHED */ 51 - } 52 - 53 - void 54 46 pause_vm_md(struct vmd_vm *vm) 55 47 { 56 48 fatalx("%s: unimplemented", __func__); ··· 62 54 { 63 55 fatalx("%s: unimplemented", __func__); 64 56 /* NOTREACHED */ 65 - } 66 - 67 - int 68 - dump_devs(int fd) 69 - { 70 - fatalx("%s: unimplemented", __func__); 71 - /* NOTREACHED */ 72 - return (-1); 73 - } 74 - 75 - int 76 - dump_send_header(int fd) 77 - { 78 - fatalx("%s: unimplemented", __func__); 79 - /* NOTREACHED */ 80 - return (-1); 81 57 } 82 58 83 59 void *
+6 -13
usr.sbin/vmd/config.c
··· 1 - /* $OpenBSD: config.c,v 1.77 2025/05/12 17:17:42 dv Exp $ */ 1 + /* $OpenBSD: config.c,v 1.78 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Reyk Floeter <reyk@openbsd.org> ··· 258 258 /* 259 259 * From here onward, all failures need cleanup and use goto fail 260 260 */ 261 - if (!(vm->vm_state & VM_STATE_RECEIVED) && vm->vm_kernel == -1) { 261 + if (vm->vm_kernel == -1) { 262 262 if (vm->vm_kernel_path != NULL) { 263 263 /* Open external kernel for child */ 264 264 kernfd = open(vm->vm_kernel_path, O_RDONLY | O_CLOEXEC); ··· 455 455 456 456 /* Send VM information */ 457 457 /* XXX check proc_compose_imsg return values */ 458 - if (vm->vm_state & VM_STATE_RECEIVED) 459 - proc_compose_imsg(ps, PROC_VMM, -1, 460 - IMSG_VMDOP_RECEIVE_VM_REQUEST, vm->vm_vmid, fd, vmc, 461 - sizeof(struct vmop_create_params)); 462 - else 463 - proc_compose_imsg(ps, PROC_VMM, -1, 464 - IMSG_VMDOP_START_VM_REQUEST, vm->vm_vmid, vm->vm_kernel, 465 - vmc, sizeof(*vmc)); 458 + proc_compose_imsg(ps, PROC_VMM, -1, IMSG_VMDOP_START_VM_REQUEST, 459 + vm->vm_vmid, vm->vm_kernel, vmc, sizeof(*vmc)); 466 460 467 461 if (strlen(vmc->vmc_cdrom)) 468 462 proc_compose_imsg(ps, PROC_VMM, -1, ··· 490 484 vm->vm_vmid, dup(tapfds[i]), &var, sizeof(var)); 491 485 } 492 486 493 - if (!(vm->vm_state & VM_STATE_RECEIVED)) 494 - proc_compose_imsg(ps, PROC_VMM, -1, 495 - IMSG_VMDOP_START_VM_END, vm->vm_vmid, fd, NULL, 0); 487 + proc_compose_imsg(ps, PROC_VMM, -1, IMSG_VMDOP_START_VM_END, 488 + vm->vm_vmid, fd, NULL, 0); 496 489 497 490 free(tapfds); 498 491
+1 -4
usr.sbin/vmd/control.c
··· 1 - /* $OpenBSD: control.c,v 1.50 2025/05/12 17:17:42 dv Exp $ */ 1 + /* $OpenBSD: control.c,v 1.51 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2010-2015 Reyk Floeter <reyk@openbsd.org> ··· 93 93 switch (type) { 94 94 case IMSG_VMDOP_START_VM_RESPONSE: 95 95 case IMSG_VMDOP_PAUSE_VM_RESPONSE: 96 - case IMSG_VMDOP_SEND_VM_RESPONSE: 97 96 case IMSG_VMDOP_UNPAUSE_VM_RESPONSE: 98 97 case IMSG_VMDOP_GET_INFO_VM_DATA: 99 98 case IMSG_VMDOP_GET_INFO_VM_END_DATA: ··· 438 437 peer_id, -1, &v, sizeof(v))) 439 438 goto fail; 440 439 break; 441 - case IMSG_VMDOP_RECEIVE_VM_REQUEST: 442 - case IMSG_VMDOP_SEND_VM_REQUEST: 443 440 case IMSG_CTL_RESET: 444 441 case IMSG_VMDOP_LOAD: 445 442 case IMSG_VMDOP_RELOAD:
+1 -13
usr.sbin/vmd/vioblk.c
··· 1 - /* $OpenBSD: vioblk.c,v 1.22 2025/05/12 17:17:42 dv Exp $ */ 1 + /* $OpenBSD: vioblk.c,v 1.23 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2023 Dave Voutila <dv@openbsd.org> ··· 154 154 vioblk->capacity = szp / 512; 155 155 log_debug("%s: initialized vioblk%d with %s image (capacity=%lld)", 156 156 __func__, vioblk->idx, disk_type(type), vioblk->capacity); 157 - 158 - /* If we're restoring hardware, reinitialize the virtqueue hva. */ 159 - if (vm.vm_state & VM_STATE_RECEIVED) 160 - vioblk_update_qa(vioblk); 161 157 162 158 /* Initialize libevent so we can start wiring event handlers. */ 163 159 event_init(); ··· 540 536 imsg_free(&imsg); 541 537 542 538 switch (msg.type) { 543 - case VIODEV_MSG_DUMP: 544 - /* Dump device */ 545 - n = atomicio(vwrite, dev->sync_fd, dev, sizeof(*dev)); 546 - if (n != sizeof(*dev)) { 547 - log_warnx("%s: failed to dump vioblk device", 548 - __func__); 549 - break; 550 - } 551 539 case VIODEV_MSG_IO_READ: 552 540 /* Read IO: make sure to send a reply */ 553 541 msg.data = handle_io_read(&msg, dev, &intr);
+1 -37
usr.sbin/vmd/vionet.c
··· 1 - /* $OpenBSD: vionet.c,v 1.23 2025/05/12 17:17:42 dv Exp $ */ 1 + /* $OpenBSD: vionet.c,v 1.24 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2023 Dave Voutila <dv@openbsd.org> ··· 158 158 close_fd(fd_vmm); 159 159 if (pledge("stdio", NULL) == -1) 160 160 fatal("pledge2"); 161 - 162 - /* If we're restoring hardware, re-initialize virtqueue hva's. */ 163 - if (vm.vm_state & VM_STATE_RECEIVED) { 164 - struct virtio_vq_info *vq_info; 165 - void *hva = NULL; 166 - 167 - vq_info = &dev.vionet.vq[TXQ]; 168 - if (vq_info->q_gpa != 0) { 169 - log_debug("%s: restoring TX virtqueue for gpa 0x%llx", 170 - __func__, vq_info->q_gpa); 171 - hva = hvaddr_mem(vq_info->q_gpa, 172 - vring_size(VIONET_QUEUE_SIZE)); 173 - if (hva == NULL) 174 - fatalx("%s: hva == NULL", __func__); 175 - vq_info->q_hva = hva; 176 - } 177 - 178 - vq_info = &dev.vionet.vq[RXQ]; 179 - if (vq_info->q_gpa != 0) { 180 - log_debug("%s: restoring RX virtqueue for gpa 0x%llx", 181 - __func__, vq_info->q_gpa); 182 - hva = hvaddr_mem(vq_info->q_gpa, 183 - vring_size(VIONET_QUEUE_SIZE)); 184 - if (hva == NULL) 185 - fatalx("%s: hva == NULL", __func__); 186 - vq_info->q_hva = hva; 187 - } 188 - } 189 161 190 162 /* Initialize our packet injection pipe. */ 191 163 if (pipe2(pipe_inject, O_NONBLOCK) == -1) { ··· 1015 987 imsg_free(&imsg); 1016 988 1017 989 switch (msg.type) { 1018 - case VIODEV_MSG_DUMP: 1019 - /* Dump device */ 1020 - n = atomicio(vwrite, dev->sync_fd, dev, sizeof(*dev)); 1021 - if (n != sizeof(*dev)) { 1022 - log_warnx("%s: failed to dump vionet device", 1023 - __func__); 1024 - break; 1025 - } 1026 990 case VIODEV_MSG_IO_READ: 1027 991 /* Read IO: make sure to send a reply */ 1028 992 msg.data = handle_io_read(&msg, dev, &intr);
+9 -239
usr.sbin/vmd/vm.c
··· 1 - /* $OpenBSD: vm.c,v 1.113 2025/06/04 08:21:29 bluhm Exp $ */ 1 + /* $OpenBSD: vm.c,v 1.114 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> ··· 49 49 static void *event_thread(void *); 50 50 static void *vcpu_run_loop(void *); 51 51 static int vmm_create_vm(struct vmd_vm *); 52 - static int send_vm(int, struct vmd_vm *); 53 - static int dump_vmr(int , struct vm_mem_range *); 54 - static int dump_mem(int, struct vmd_vm *); 55 - static void restore_vmr(int, struct vm_mem_range *); 56 - static void restore_mem(int, struct vm_create_params *); 57 - static int restore_vm_params(int, struct vm_create_params *); 58 52 static void pause_vm(struct vmd_vm *); 59 53 static void unpause_vm(struct vmd_vm *); 60 54 static int start_vm(struct vmd_vm *, int); ··· 143 137 * We need, at minimum, a vm_kernel fd to boot a vm. This is either a 144 138 * kernel or a BIOS image. 145 139 */ 146 - if (!(vm.vm_state & VM_STATE_RECEIVED)) { 147 - if (vm.vm_kernel == -1) { 148 - log_warnx("%s: failed to receive boot fd", 149 - vcp->vcp_name); 150 - _exit(EINVAL); 151 - } 140 + if (vm.vm_kernel == -1) { 141 + log_warnx("%s: failed to receive boot fd", vcp->vcp_name); 142 + _exit(EINVAL); 152 143 } 153 144 154 145 if (vcp->vcp_sev && env->vmd_psp_fd < 0) { ··· 191 182 int nicfds[VM_MAX_NICS_PER_VM]; 192 183 int ret; 193 184 size_t i; 194 - struct vm_rwregs_params vrp; 195 185 196 186 /* 197 187 * We first try to initialize and allocate memory before bothering 198 188 * vmm(4) with a request to create a new vm. 199 189 */ 200 - if (!(vm->vm_state & VM_STATE_RECEIVED)) 201 - create_memory_map(vcp); 190 + create_memory_map(vcp); 202 191 203 192 /* Create the vm in vmm(4). */ 204 193 ret = vmm_create_vm(vm); ··· 247 236 return (1); 248 237 } 249 238 250 - /* Prepare either our boot image or receive an existing vm to launch. */ 251 - if (vm->vm_state & VM_STATE_RECEIVED) { 252 - ret = atomicio(read, vm->vm_receive_fd, &vrp, sizeof(vrp)); 253 - if (ret != sizeof(vrp)) 254 - fatal("received incomplete vrp - exiting"); 255 - vrs = vrp.vrwp_regs; 256 - } else if (load_firmware(vm, &vrs)) 239 + /* Prepare our boot image. */ 240 + if (load_firmware(vm, &vrs)) 257 241 fatalx("failed to load kernel or firmware image"); 258 242 259 243 if (vm->vm_kernel != -1) ··· 291 275 fatal("setup vm pipe"); 292 276 293 277 /* 294 - * Initialize or restore our emulated hardware. 278 + * Initialize our emulated hardware. 295 279 */ 296 280 for (i = 0; i < VMM_MAX_NICS_PER_VM; i++) 297 281 nicfds[i] = vm->vm_ifs[i].vif_fd; 298 - 299 - if (vm->vm_state & VM_STATE_RECEIVED) { 300 - restore_mem(vm->vm_receive_fd, vcp); 301 - restore_emulated_hw(vcp, vm->vm_receive_fd, nicfds, 302 - vm->vm_disks, vm->vm_cdrom); 303 - if (restore_vm_params(vm->vm_receive_fd, vcp)) 304 - fatal("restore vm params failed"); 305 - unpause_vm(vm); 306 - } else 307 - init_emulated_hw(vmc, vm->vm_cdrom, vm->vm_disks, nicfds); 282 + init_emulated_hw(vmc, vm->vm_cdrom, vm->vm_disks, nicfds); 308 283 309 284 /* Drop privleges further before starting the vcpu run loop(s). */ 310 285 if (pledge("stdio vmm recvfd", NULL) == -1) ··· 404 379 IMSG_VMDOP_UNPAUSE_VM_RESPONSE, id, pid, -1, &vmr, 405 380 sizeof(vmr)); 406 381 break; 407 - case IMSG_VMDOP_SEND_VM_REQUEST: 408 - vmr.vmr_id = vm->vm_vmid; 409 - vmr.vmr_result = send_vm(imsg_get_fd(&imsg), vm); 410 - imsg_compose_event(&vm->vm_iev, 411 - IMSG_VMDOP_SEND_VM_RESPONSE, id, pid, -1, &vmr, 412 - sizeof(vmr)); 413 - if (!vmr.vmr_result) { 414 - imsgbuf_flush(&current_vm->vm_iev.ibuf); 415 - _exit(0); 416 - } 417 - break; 418 382 case IMSG_VMDOP_PRIV_GET_ADDR_RESPONSE: 419 383 vmop_addr_result_read(&imsg, &var); 420 384 log_debug("%s: received tap addr %s for nic %d", ··· 461 425 _exit(0); 462 426 } 463 427 464 - int 465 - send_vm(int fd, struct vmd_vm *vm) 466 - { 467 - struct vm_rwregs_params vrp; 468 - struct vm_rwvmparams_params vpp; 469 - struct vmop_create_params *vmc; 470 - struct vm_terminate_params vtp; 471 - unsigned int flags = 0; 472 - unsigned int i; 473 - int ret = 0; 474 - size_t sz; 475 - 476 - if (dump_send_header(fd)) { 477 - log_warnx("%s: failed to send vm dump header", __func__); 478 - goto err; 479 - } 480 - 481 - pause_vm(vm); 482 - 483 - vmc = calloc(1, sizeof(struct vmop_create_params)); 484 - if (vmc == NULL) { 485 - log_warn("%s: calloc error getting vmc", __func__); 486 - ret = -1; 487 - goto err; 488 - } 489 - 490 - flags |= VMOP_CREATE_MEMORY; 491 - memcpy(&vmc->vmc_params, &current_vm->vm_params, sizeof(struct 492 - vmop_create_params)); 493 - vmc->vmc_flags = flags; 494 - vrp.vrwp_vm_id = vm->vm_params.vmc_params.vcp_id; 495 - vrp.vrwp_mask = VM_RWREGS_ALL; 496 - vpp.vpp_mask = VM_RWVMPARAMS_ALL; 497 - vpp.vpp_vm_id = vm->vm_params.vmc_params.vcp_id; 498 - 499 - sz = atomicio(vwrite, fd, vmc, sizeof(struct vmop_create_params)); 500 - if (sz != sizeof(struct vmop_create_params)) { 501 - ret = -1; 502 - goto err; 503 - } 504 - 505 - for (i = 0; i < vm->vm_params.vmc_params.vcp_ncpus; i++) { 506 - vrp.vrwp_vcpu_id = i; 507 - if ((ret = ioctl(env->vmd_fd, VMM_IOC_READREGS, &vrp))) { 508 - log_warn("%s: readregs failed", __func__); 509 - goto err; 510 - } 511 - 512 - sz = atomicio(vwrite, fd, &vrp, 513 - sizeof(struct vm_rwregs_params)); 514 - if (sz != sizeof(struct vm_rwregs_params)) { 515 - log_warn("%s: dumping registers failed", __func__); 516 - ret = -1; 517 - goto err; 518 - } 519 - } 520 - 521 - /* Dump memory before devices to aid in restoration. */ 522 - if ((ret = dump_mem(fd, vm))) 523 - goto err; 524 - if ((ret = dump_devs(fd))) 525 - goto err; 526 - if ((ret = pci_dump(fd))) 527 - goto err; 528 - if ((ret = virtio_dump(fd))) 529 - goto err; 530 - 531 - for (i = 0; i < vm->vm_params.vmc_params.vcp_ncpus; i++) { 532 - vpp.vpp_vcpu_id = i; 533 - if ((ret = ioctl(env->vmd_fd, VMM_IOC_READVMPARAMS, &vpp))) { 534 - log_warn("%s: readvmparams failed", __func__); 535 - goto err; 536 - } 537 - 538 - sz = atomicio(vwrite, fd, &vpp, 539 - sizeof(struct vm_rwvmparams_params)); 540 - if (sz != sizeof(struct vm_rwvmparams_params)) { 541 - log_warn("%s: dumping vm params failed", __func__); 542 - ret = -1; 543 - goto err; 544 - } 545 - } 546 - 547 - vtp.vtp_vm_id = vm->vm_params.vmc_params.vcp_id; 548 - if (ioctl(env->vmd_fd, VMM_IOC_TERM, &vtp) == -1) { 549 - log_warnx("%s: term IOC error: %d, %d", __func__, 550 - errno, ENOENT); 551 - } 552 - err: 553 - close(fd); 554 - if (ret) 555 - unpause_vm(vm); 556 - return ret; 557 - } 558 - 559 - int 560 - dump_mem(int fd, struct vmd_vm *vm) 561 - { 562 - unsigned int i; 563 - int ret; 564 - struct vm_mem_range *vmr; 565 - 566 - for (i = 0; i < vm->vm_params.vmc_params.vcp_nmemranges; i++) { 567 - vmr = &vm->vm_params.vmc_params.vcp_memranges[i]; 568 - ret = dump_vmr(fd, vmr); 569 - if (ret) 570 - return ret; 571 - } 572 - return (0); 573 - } 574 - 575 - int 576 - restore_vm_params(int fd, struct vm_create_params *vcp) { 577 - unsigned int i; 578 - struct vm_rwvmparams_params vpp; 579 - 580 - for (i = 0; i < vcp->vcp_ncpus; i++) { 581 - if (atomicio(read, fd, &vpp, sizeof(vpp)) != sizeof(vpp)) { 582 - log_warn("%s: error restoring vm params", __func__); 583 - return (-1); 584 - } 585 - vpp.vpp_vm_id = vcp->vcp_id; 586 - vpp.vpp_vcpu_id = i; 587 - if (ioctl(env->vmd_fd, VMM_IOC_WRITEVMPARAMS, &vpp) < 0) { 588 - log_debug("%s: writing vm params failed", __func__); 589 - return (-1); 590 - } 591 - } 592 - return (0); 593 - } 594 - 595 - void 596 - restore_mem(int fd, struct vm_create_params *vcp) 597 - { 598 - unsigned int i; 599 - struct vm_mem_range *vmr; 600 - 601 - for (i = 0; i < vcp->vcp_nmemranges; i++) { 602 - vmr = &vcp->vcp_memranges[i]; 603 - restore_vmr(fd, vmr); 604 - } 605 - } 606 - 607 - int 608 - dump_vmr(int fd, struct vm_mem_range *vmr) 609 - { 610 - size_t rem = vmr->vmr_size, read=0; 611 - char buf[PAGE_SIZE]; 612 - 613 - while (rem > 0) { 614 - if (read_mem(vmr->vmr_gpa + read, buf, PAGE_SIZE)) { 615 - log_warn("failed to read vmr"); 616 - return (-1); 617 - } 618 - if (atomicio(vwrite, fd, buf, sizeof(buf)) != sizeof(buf)) { 619 - log_warn("failed to dump vmr"); 620 - return (-1); 621 - } 622 - rem = rem - PAGE_SIZE; 623 - read = read + PAGE_SIZE; 624 - } 625 - return (0); 626 - } 627 - 628 - void 629 - restore_vmr(int fd, struct vm_mem_range *vmr) 630 - { 631 - size_t rem = vmr->vmr_size, wrote=0; 632 - char buf[PAGE_SIZE]; 633 - 634 - while (rem > 0) { 635 - if (atomicio(read, fd, buf, sizeof(buf)) != sizeof(buf)) 636 - fatal("failed to restore vmr"); 637 - if (write_mem(vmr->vmr_gpa + wrote, buf, PAGE_SIZE)) 638 - fatal("failed to write vmr"); 639 - rem = rem - PAGE_SIZE; 640 - wrote = wrote + PAGE_SIZE; 641 - } 642 - } 643 - 644 428 static void 645 429 pause_vm(struct vmd_vm *vm) 646 430 { ··· 813 597 run_vm(struct vmop_create_params *vmc, struct vcpu_reg_state *vrs) 814 598 { 815 599 struct vm_create_params *vcp = &vmc->vmc_params; 816 - struct vm_rwregs_params vregsp; 817 600 uint8_t evdone = 0; 818 601 size_t i; 819 602 int ret; ··· 880 663 log_warnx("%s: memory encryption failed for VCPU " 881 664 "%zu failed - exiting.", __progname, i); 882 665 return (EIO); 883 - } 884 - 885 - /* once more because reset_cpu changes regs */ 886 - if (current_vm->vm_state & VM_STATE_RECEIVED) { 887 - vregsp.vrwp_vm_id = vcp->vcp_id; 888 - vregsp.vrwp_vcpu_id = i; 889 - vregsp.vrwp_regs = *vrs; 890 - vregsp.vrwp_mask = VM_RWREGS_ALL; 891 - if ((ret = ioctl(env->vmd_fd, VMM_IOC_WRITEREGS, 892 - &vregsp)) == -1) { 893 - log_warn("%s: writeregs failed", __func__); 894 - return (ret); 895 - } 896 666 } 897 667 898 668 if (sev_encrypt_state(current_vm, i)) {
+3 -119
usr.sbin/vmd/vmd.c
··· 1 - /* $OpenBSD: vmd.c,v 1.166 2025/06/04 12:47:59 tb Exp $ */ 1 + /* $OpenBSD: vmd.c,v 1.167 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Reyk Floeter <reyk@openbsd.org> ··· 54 54 int vmd_dispatch_vmm(int, struct privsep_proc *, struct imsg *); 55 55 int vmd_dispatch_agentx(int, struct privsep_proc *, struct imsg *); 56 56 int vmd_dispatch_priv(int, struct privsep_proc *, struct imsg *); 57 - int vmd_check_vmh(struct vm_dump_header *); 58 57 59 58 int vm_instance(struct privsep *, struct vmd_vm **, 60 59 struct vmop_create_params *, uid_t); ··· 92 91 vmd_dispatch_control(int fd, struct privsep_proc *p, struct imsg *imsg) 93 92 { 94 93 struct privsep *ps = p->p_ps; 95 - int res = 0, ret = 0, cmd = 0, verbose; 96 - int ifd; 94 + int res = 0, cmd = 0, verbose; 97 95 unsigned int v = 0, flags; 98 96 struct vmop_create_params vmc; 99 97 struct vmop_id vid; 100 98 struct vmop_result vmr; 101 - struct vm_dump_header vmh; 102 99 struct vmd_vm *vm = NULL; 103 100 char *str = NULL; 104 101 uint32_t peer_id, type, vm_id = 0; 105 102 struct control_sock *rcs; 106 - size_t i; 107 103 108 104 peer_id = imsg_get_id(imsg); 109 105 type = imsg_get_type(imsg); ··· 251 247 proc_compose_imsg(ps, PROC_VMM, -1, type, vm->vm_peerid, -1, 252 248 &vid, sizeof(vid)); 253 249 break; 254 - case IMSG_VMDOP_SEND_VM_REQUEST: 255 - vmop_id_read(imsg, &vid); 256 - vm_id = vid.vid_id; 257 - ifd = imsg_get_fd(imsg); 258 - if (vid.vid_id == 0) { 259 - if ((vm = vm_getbyname(vid.vid_name)) == NULL) { 260 - res = ENOENT; 261 - cmd = IMSG_VMDOP_SEND_VM_RESPONSE; 262 - close(ifd); 263 - break; 264 - } else { 265 - vid.vid_id = vm->vm_vmid; 266 - } 267 - } else if ((vm = vm_getbyvmid(vid.vid_id)) == NULL) { 268 - res = ENOENT; 269 - cmd = IMSG_VMDOP_SEND_VM_RESPONSE; 270 - close(ifd); 271 - break; 272 - } 273 - vmr.vmr_id = vid.vid_id; 274 - log_debug("%s: sending fd to vmm", __func__); 275 - proc_compose_imsg(ps, PROC_VMM, -1, type, peer_id, ifd, &vid, 276 - sizeof(vid)); 277 - break; 278 - case IMSG_VMDOP_RECEIVE_VM_REQUEST: 279 - vmop_id_read(imsg, &vid); 280 - ifd = imsg_get_fd(imsg); 281 - if (ifd == -1) { 282 - log_warnx("%s: invalid fd", __func__); 283 - return (-1); 284 - } 285 - if (atomicio(read, ifd, &vmh, sizeof(vmh)) != sizeof(vmh)) { 286 - log_warnx("%s: error reading vmh from received vm", 287 - __func__); 288 - res = EIO; 289 - close(ifd); 290 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 291 - break; 292 - } 293 - 294 - if (vmd_check_vmh(&vmh)) { 295 - res = ENOENT; 296 - close(ifd); 297 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 298 - break; 299 - } 300 - if (atomicio(read, ifd, &vmc, sizeof(vmc)) != sizeof(vmc)) { 301 - log_warnx("%s: error reading vmc from received vm", 302 - __func__); 303 - res = EIO; 304 - close(ifd); 305 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 306 - break; 307 - } 308 - 309 - /* vm_create_params was read from an untrusted source. Scrub. */ 310 - vmc.vmc_params.vcp_name[sizeof(vmc.vmc_params.vcp_name) - 1] = 311 - '\0'; 312 - for (i = 0; i < nitems(vmc.vmc_disks); i++) 313 - vmc.vmc_disks[i][sizeof(vmc.vmc_disks[i]) - 1] = '\0'; 314 - for (i = 0; i < nitems(vmc.vmc_ifnames); i++) 315 - vmc.vmc_ifnames[i][sizeof(vmc.vmc_ifnames[i]) - 1] 316 - = '\0'; 317 - for (i = 0; i < nitems(vmc.vmc_ifswitch); i++) 318 - vmc.vmc_ifswitch[i][sizeof(vmc.vmc_ifswitch[i]) - 1] 319 - = '\0'; 320 - for (i = 0; i < nitems(vmc.vmc_ifgroup); i++) 321 - vmc.vmc_ifgroup[i][sizeof(vmc.vmc_ifgroup[i]) - 1] 322 - = '\0'; 323 - vmc.vmc_instance[sizeof(vmc.vmc_instance) - 1] = '\0'; 324 - 325 - strlcpy(vmc.vmc_params.vcp_name, vid.vid_name, 326 - sizeof(vmc.vmc_params.vcp_name)); 327 - vmc.vmc_params.vcp_id = 0; 328 - 329 - ret = vm_register(ps, &vmc, &vm, 0, vmc.vmc_owner.uid); 330 - if (ret != 0) { 331 - res = errno; 332 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 333 - close(ifd); 334 - } else { 335 - vm->vm_state |= VM_STATE_RECEIVED; 336 - config_setvm(ps, vm, peer_id, vmc.vmc_owner.uid); 337 - log_debug("%s: sending fd to vmm", __func__); 338 - proc_compose_imsg(ps, PROC_VMM, -1, 339 - IMSG_VMDOP_RECEIVE_VM_END, vm->vm_vmid, ifd, 340 - NULL, 0); 341 - } 342 - break; 343 250 case IMSG_VMDOP_DONE: 344 251 control_reset(&ps->ps_csock); 345 252 TAILQ_FOREACH(rcs, &ps->ps_rcsocks, cs_entry) ··· 461 368 break; 462 369 /* Mark VM as shutting down */ 463 370 vm->vm_state |= VM_STATE_SHUTDOWN; 464 - } 465 - break; 466 - case IMSG_VMDOP_SEND_VM_RESPONSE: 467 - vmop_result_read(imsg, &vmr); 468 - if ((vm = vm_getbyvmid(vmr.vmr_id)) == NULL) 469 - break; 470 - if (!vmr.vmr_result) { 471 - log_info("%s: sent vm %d successfully.", 472 - vm->vm_params.vmc_params.vcp_name, 473 - vm->vm_vmid); 474 - vm_terminate(vm, __func__); 475 - } 476 - 477 - /* Send a response if a control client is waiting for it */ 478 - if (peer_id != (uint32_t)-1) { 479 - /* the error is meaningless for deferred responses */ 480 - vmr.vmr_result = 0; 481 - 482 - if (proc_compose_imsg(ps, PROC_CONTROL, -1, 483 - IMSG_VMDOP_SEND_VM_RESPONSE, peer_id, -1, &vmr, 484 - sizeof(vmr)) == -1) 485 - return (-1); 486 371 } 487 372 break; 488 373 case IMSG_VMDOP_TERMINATE_VM_EVENT: ··· 1150 1035 __func__, ps->ps_title[privsep_process], caller, 1151 1036 vm->vm_vmid, keeptty ? ", keeping tty open" : ""); 1152 1037 1153 - vm->vm_state &= ~(VM_STATE_RECEIVED | VM_STATE_RUNNING 1154 - | VM_STATE_SHUTDOWN); 1038 + vm->vm_state &= ~(VM_STATE_RUNNING | VM_STATE_SHUTDOWN); 1155 1039 1156 1040 if (vm->vm_iev.ibuf.fd != -1) { 1157 1041 event_del(&vm->vm_iev.ev);
+1 -6
usr.sbin/vmd/vmd.h
··· 1 - /* $OpenBSD: vmd.h,v 1.135 2025/06/04 08:21:29 bluhm Exp $ */ 1 + /* $OpenBSD: vmd.h,v 1.136 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> ··· 115 115 IMSG_VMDOP_PAUSE_VM_RESPONSE, 116 116 IMSG_VMDOP_UNPAUSE_VM, 117 117 IMSG_VMDOP_UNPAUSE_VM_RESPONSE, 118 - IMSG_VMDOP_SEND_VM_REQUEST, 119 - IMSG_VMDOP_SEND_VM_RESPONSE, 120 - IMSG_VMDOP_RECEIVE_VM_REQUEST, 121 - IMSG_VMDOP_RECEIVE_VM_END, 122 118 IMSG_VMDOP_WAIT_VM_REQUEST, 123 119 IMSG_VMDOP_TERMINATE_VM_REQUEST, 124 120 IMSG_VMDOP_TERMINATE_VM_RESPONSE, ··· 322 318 #define VM_STATE_DISABLED 0x02 323 319 /* When set, VM is marked to be shut down */ 324 320 #define VM_STATE_SHUTDOWN 0x04 325 - #define VM_STATE_RECEIVED 0x08 326 321 #define VM_STATE_PAUSED 0x10 327 322 #define VM_STATE_WAITING 0x20 328 323
+5 -45
usr.sbin/vmd/vmm.c
··· 1 - /* $OpenBSD: vmm.c,v 1.131 2025/05/12 17:17:42 dv Exp $ */ 1 + /* $OpenBSD: vmm.c,v 1.132 2025/06/09 18:43:01 dv Exp $ */ 2 2 3 3 /* 4 4 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> ··· 100 100 struct vm_terminate_params vtp; 101 101 struct vmop_id vid; 102 102 struct vmop_result vmr; 103 - struct vmop_create_params vmc; 104 103 struct vmop_addr_result var; 105 104 uint32_t id = 0, vm_id, type; 106 105 pid_t pid, vm_pid = 0; ··· 250 249 imsg_compose_event(&vm->vm_iev, type, -1, pid, 251 250 imsg_get_fd(imsg), &vid, sizeof(vid)); 252 251 break; 253 - case IMSG_VMDOP_SEND_VM_REQUEST: 254 - vmop_id_read(imsg, &vid); 255 - id = vid.vid_id; 256 - if ((vm = vm_getbyvmid(id)) == NULL) { 257 - res = ENOENT; 258 - close(imsg_get_fd(imsg)); /* XXX */ 259 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 260 - break; 261 - } 262 - imsg_compose_event(&vm->vm_iev, type, -1, pid, 263 - imsg_get_fd(imsg), &vid, sizeof(vid)); 264 - break; 265 - case IMSG_VMDOP_RECEIVE_VM_REQUEST: 266 - vmop_create_params_read(imsg, &vmc); 267 - if (vm_register(ps, &vmc, &vm, vm_id, vmc.vmc_owner.uid) != 0) { 268 - res = errno; 269 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 270 - break; 271 - } 272 - vm->vm_tty = imsg_get_fd(imsg); 273 - vm->vm_state |= VM_STATE_RECEIVED; 274 - vm->vm_state |= VM_STATE_PAUSED; 275 - break; 276 - case IMSG_VMDOP_RECEIVE_VM_END: 277 - if ((vm = vm_getbyvmid(vm_id)) == NULL) { 278 - res = ENOENT; 279 - close(imsg_get_fd(imsg)); /* XXX */ 280 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 281 - break; 282 - } 283 - vm->vm_receive_fd = imsg_get_fd(imsg); 284 - res = vmm_start_vm(imsg, &id, &pid); 285 - /* Check if the ID can be mapped correctly */ 286 - if ((id = vm_id2vmid(id, NULL)) == 0) 287 - res = ENOENT; 288 - cmd = IMSG_VMDOP_START_VM_RESPONSE; 289 - break; 290 252 case IMSG_VMDOP_PRIV_GET_ADDR_RESPONSE: 291 253 vmop_addr_result_read(imsg, &var); 292 254 if ((vm = vm_getbyvmid(var.var_vmid)) == NULL) { ··· 523 485 case IMSG_VMDOP_VM_REBOOT: 524 486 vm->vm_state &= ~VM_STATE_SHUTDOWN; 525 487 break; 526 - case IMSG_VMDOP_SEND_VM_RESPONSE: 527 488 case IMSG_VMDOP_PAUSE_VM_RESPONSE: 528 489 case IMSG_VMDOP_UNPAUSE_VM_RESPONSE: 529 490 for (i = 0; i < nitems(procs); i++) { ··· 644 605 } 645 606 vcp = &vm->vm_params.vmc_params; 646 607 647 - if (!(vm->vm_state & VM_STATE_RECEIVED)) { 648 - if ((vm->vm_tty = imsg_get_fd(imsg)) == -1) { 649 - log_warnx("%s: can't get tty", __func__); 650 - goto err; 651 - } 608 + if ((vm->vm_tty = imsg_get_fd(imsg)) == -1) { 609 + log_warnx("%s: can't get tty", __func__); 610 + goto err; 652 611 } 612 + 653 613 654 614 if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, PF_UNSPEC, fds) 655 615 == -1)
+1 -246
usr.sbin/vmd/x86_vm.c
··· 1 - /* $OpenBSD: x86_vm.c,v 1.5 2024/10/02 17:05:56 dv Exp $ */ 1 + /* $OpenBSD: x86_vm.c,v 1.6 2025/06/09 18:43:01 dv Exp $ */ 2 2 /* 3 3 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> 4 4 * ··· 402 402 ioports_map[FW_CFG_IO_DMA_ADDR_LOW] = vcpu_exit_fw_cfg_dma; 403 403 } 404 404 405 - /* 406 - * restore_emulated_hw 407 - * 408 - * Restores the userspace hardware emulation from fd 409 - */ 410 - void 411 - restore_emulated_hw(struct vm_create_params *vcp, int fd, 412 - int *child_taps, int child_disks[][VM_MAX_BASE_PER_DISK], int child_cdrom) 413 - { 414 - /* struct vm_create_params *vcp = &vmc->vmc_params; */ 415 - int i; 416 - memset(&ioports_map, 0, sizeof(io_fn_t) * MAX_PORTS); 417 - 418 - /* Init i8253 PIT */ 419 - i8253_restore(fd, vcp->vcp_id); 420 - ioports_map[TIMER_CTRL] = vcpu_exit_i8253; 421 - ioports_map[TIMER_BASE + TIMER_CNTR0] = vcpu_exit_i8253; 422 - ioports_map[TIMER_BASE + TIMER_CNTR1] = vcpu_exit_i8253; 423 - ioports_map[TIMER_BASE + TIMER_CNTR2] = vcpu_exit_i8253; 424 - 425 - /* Init master and slave PICs */ 426 - i8259_restore(fd); 427 - ioports_map[IO_ICU1] = vcpu_exit_i8259; 428 - ioports_map[IO_ICU1 + 1] = vcpu_exit_i8259; 429 - ioports_map[IO_ICU2] = vcpu_exit_i8259; 430 - ioports_map[IO_ICU2 + 1] = vcpu_exit_i8259; 431 - 432 - /* Init ns8250 UART */ 433 - ns8250_restore(fd, con_fd, vcp->vcp_id); 434 - for (i = COM1_DATA; i <= COM1_SCR; i++) 435 - ioports_map[i] = vcpu_exit_com; 436 - 437 - /* Init mc146818 RTC */ 438 - mc146818_restore(fd, vcp->vcp_id); 439 - ioports_map[IO_RTC] = vcpu_exit_mc146818; 440 - ioports_map[IO_RTC + 1] = vcpu_exit_mc146818; 441 - 442 - /* Init QEMU fw_cfg interface */ 443 - fw_cfg_restore(fd); 444 - ioports_map[FW_CFG_IO_SELECT] = vcpu_exit_fw_cfg; 445 - ioports_map[FW_CFG_IO_DATA] = vcpu_exit_fw_cfg; 446 - ioports_map[FW_CFG_IO_DMA_ADDR_HIGH] = vcpu_exit_fw_cfg_dma; 447 - ioports_map[FW_CFG_IO_DMA_ADDR_LOW] = vcpu_exit_fw_cfg_dma; 448 - 449 - /* Initialize PCI */ 450 - for (i = VM_PCI_IO_BAR_BASE; i <= VM_PCI_IO_BAR_END; i++) 451 - ioports_map[i] = vcpu_exit_pci; 452 - 453 - ioports_map[PCI_MODE1_ADDRESS_REG] = vcpu_exit_pci; 454 - ioports_map[PCI_MODE1_DATA_REG] = vcpu_exit_pci; 455 - ioports_map[PCI_MODE1_DATA_REG + 1] = vcpu_exit_pci; 456 - ioports_map[PCI_MODE1_DATA_REG + 2] = vcpu_exit_pci; 457 - ioports_map[PCI_MODE1_DATA_REG + 3] = vcpu_exit_pci; 458 - pci_restore(fd); 459 - virtio_restore(fd, current_vm, child_cdrom, child_disks, child_taps); 460 - } 461 - 462 405 void 463 406 pause_vm_md(struct vmd_vm *vm) 464 407 { ··· 476 419 ns8250_start(); 477 420 virtio_start(vm); 478 421 } 479 - 480 - int 481 - dump_devs(int fd) 482 - { 483 - int ret = 0; 484 - 485 - if ((ret = i8253_dump(fd))) 486 - return ret; 487 - if ((ret = i8259_dump(fd))) 488 - return ret; 489 - if ((ret = ns8250_dump(fd))) 490 - return ret; 491 - if ((ret = mc146818_dump(fd))) 492 - return ret; 493 - ret = fw_cfg_dump(fd); 494 - 495 - return ret; 496 - } 497 - 498 - int 499 - dump_send_header(int fd) { 500 - struct vm_dump_header vmh; 501 - int i; 502 - 503 - memcpy(&vmh.vmh_signature, VM_DUMP_SIGNATURE, 504 - sizeof(vmh.vmh_signature)); 505 - 506 - vmh.vmh_cpuids[0].code = 0x00; 507 - vmh.vmh_cpuids[0].leaf = 0x00; 508 - 509 - vmh.vmh_cpuids[1].code = 0x01; 510 - vmh.vmh_cpuids[1].leaf = 0x00; 511 - 512 - vmh.vmh_cpuids[2].code = 0x07; 513 - vmh.vmh_cpuids[2].leaf = 0x00; 514 - 515 - vmh.vmh_cpuids[3].code = 0x0d; 516 - vmh.vmh_cpuids[3].leaf = 0x00; 517 - 518 - vmh.vmh_cpuids[4].code = 0x80000001; 519 - vmh.vmh_cpuids[4].leaf = 0x00; 520 - 521 - vmh.vmh_version = VM_DUMP_VERSION; 522 - 523 - for (i=0; i < VM_DUMP_HEADER_CPUID_COUNT; i++) { 524 - CPUID_LEAF(vmh.vmh_cpuids[i].code, 525 - vmh.vmh_cpuids[i].leaf, 526 - vmh.vmh_cpuids[i].a, 527 - vmh.vmh_cpuids[i].b, 528 - vmh.vmh_cpuids[i].c, 529 - vmh.vmh_cpuids[i].d); 530 - } 531 - 532 - if (atomicio(vwrite, fd, &vmh, sizeof(vmh)) != sizeof(vmh)) 533 - return (-1); 534 - 535 - return (0); 536 - } 537 - 538 422 539 423 /* 540 424 * vcpu_exit_inout ··· 1238 1122 /* XXX select active interrupt controller */ 1239 1123 pic_set_elcr(irq, val); 1240 1124 } 1241 - 1242 - int 1243 - vmd_check_vmh(struct vm_dump_header *vmh) 1244 - { 1245 - int i; 1246 - unsigned int code, leaf; 1247 - unsigned int a, b, c, d; 1248 - 1249 - if (strncmp(vmh->vmh_signature, VM_DUMP_SIGNATURE, strlen(VM_DUMP_SIGNATURE)) != 0) { 1250 - log_warnx("%s: incompatible dump signature", __func__); 1251 - return (-1); 1252 - } 1253 - 1254 - if (vmh->vmh_version != VM_DUMP_VERSION) { 1255 - log_warnx("%s: incompatible dump version", __func__); 1256 - return (-1); 1257 - } 1258 - 1259 - for (i = 0; i < VM_DUMP_HEADER_CPUID_COUNT; i++) { 1260 - code = vmh->vmh_cpuids[i].code; 1261 - leaf = vmh->vmh_cpuids[i].leaf; 1262 - if (leaf != 0x00) { 1263 - log_debug("%s: invalid leaf 0x%x for code 0x%x", 1264 - __func__, leaf, code); 1265 - return (-1); 1266 - } 1267 - 1268 - switch (code) { 1269 - case 0x00: 1270 - CPUID_LEAF(code, leaf, a, b, c, d); 1271 - if (vmh->vmh_cpuids[i].a > a) { 1272 - log_debug("%s: incompatible cpuid level", 1273 - __func__); 1274 - return (-1); 1275 - } 1276 - if (!(vmh->vmh_cpuids[i].b == b && 1277 - vmh->vmh_cpuids[i].c == c && 1278 - vmh->vmh_cpuids[i].d == d)) { 1279 - log_debug("%s: incompatible cpu brand", 1280 - __func__); 1281 - return (-1); 1282 - } 1283 - break; 1284 - 1285 - case 0x01: 1286 - CPUID_LEAF(code, leaf, a, b, c, d); 1287 - if ((vmh->vmh_cpuids[i].c & c & VMM_CPUIDECX_MASK) != 1288 - (vmh->vmh_cpuids[i].c & VMM_CPUIDECX_MASK)) { 1289 - log_debug("%s: incompatible cpu features " 1290 - "code: 0x%x leaf: 0x%x reg: c", __func__, 1291 - code, leaf); 1292 - return (-1); 1293 - } 1294 - if ((vmh->vmh_cpuids[i].d & d & VMM_CPUIDEDX_MASK) != 1295 - (vmh->vmh_cpuids[i].d & VMM_CPUIDEDX_MASK)) { 1296 - log_debug("%s: incompatible cpu features " 1297 - "code: 0x%x leaf: 0x%x reg: d", __func__, 1298 - code, leaf); 1299 - return (-1); 1300 - } 1301 - break; 1302 - 1303 - case 0x07: 1304 - CPUID_LEAF(code, leaf, a, b, c, d); 1305 - if ((vmh->vmh_cpuids[i].b & b & VMM_SEFF0EBX_MASK) != 1306 - (vmh->vmh_cpuids[i].b & VMM_SEFF0EBX_MASK)) { 1307 - log_debug("%s: incompatible cpu features " 1308 - "code: 0x%x leaf: 0x%x reg: c", __func__, 1309 - code, leaf); 1310 - return (-1); 1311 - } 1312 - if ((vmh->vmh_cpuids[i].c & c & VMM_SEFF0ECX_MASK) != 1313 - (vmh->vmh_cpuids[i].c & VMM_SEFF0ECX_MASK)) { 1314 - log_debug("%s: incompatible cpu features " 1315 - "code: 0x%x leaf: 0x%x reg: d", __func__, 1316 - code, leaf); 1317 - return (-1); 1318 - } 1319 - break; 1320 - 1321 - case 0x0d: 1322 - CPUID_LEAF(code, leaf, a, b, c, d); 1323 - if (vmh->vmh_cpuids[i].b > b) { 1324 - log_debug("%s: incompatible cpu: insufficient " 1325 - "max save area for enabled XCR0 features", 1326 - __func__); 1327 - return (-1); 1328 - } 1329 - if (vmh->vmh_cpuids[i].c > c) { 1330 - log_debug("%s: incompatible cpu: insufficient " 1331 - "max save area for supported XCR0 features", 1332 - __func__); 1333 - return (-1); 1334 - } 1335 - break; 1336 - 1337 - case 0x80000001: 1338 - CPUID_LEAF(code, leaf, a, b, c, d); 1339 - if ((vmh->vmh_cpuids[i].a & a) != 1340 - vmh->vmh_cpuids[i].a) { 1341 - log_debug("%s: incompatible cpu features " 1342 - "code: 0x%x leaf: 0x%x reg: a", __func__, 1343 - code, leaf); 1344 - return (-1); 1345 - } 1346 - if ((vmh->vmh_cpuids[i].c & c) != 1347 - vmh->vmh_cpuids[i].c) { 1348 - log_debug("%s: incompatible cpu features " 1349 - "code: 0x%x leaf: 0x%x reg: c", __func__, 1350 - code, leaf); 1351 - return (-1); 1352 - } 1353 - if ((vmh->vmh_cpuids[i].d & d) != 1354 - vmh->vmh_cpuids[i].d) { 1355 - log_debug("%s: incompatible cpu features " 1356 - "code: 0x%x leaf: 0x%x reg: d", __func__, 1357 - code, leaf); 1358 - return (-1); 1359 - } 1360 - break; 1361 - 1362 - default: 1363 - log_debug("%s: unknown code 0x%x", __func__, code); 1364 - return (-1); 1365 - } 1366 - } 1367 - 1368 - return (0); 1369 - }