Bug Summary

File:src/usr.sbin/vmd/vmm.c
Warning:line 747, column 2
Value stored to 'ret' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name vmm.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/usr.sbin/vmd/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/usr.sbin/vmd -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -fdebug-compilation-dir=/usr/src/usr.sbin/vmd/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/usr.sbin/vmd/vmm.c
1/* $OpenBSD: vmm.c,v 1.103 2022/01/04 15:25:05 claudio Exp $ */
2
3/*
4 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/types.h>
20#include <sys/ioctl.h>
21#include <sys/queue.h>
22#include <sys/wait.h>
23#include <sys/uio.h>
24#include <sys/socket.h>
25#include <sys/time.h>
26#include <sys/mman.h>
27
28#include <dev/ic/i8253reg.h>
29#include <dev/isa/isareg.h>
30#include <dev/pci/pcireg.h>
31
32#include <machine/psl.h>
33#include <machine/specialreg.h>
34#include <machine/vmmvar.h>
35
36#include <net/if.h>
37
38#include <errno(*__errno()).h>
39#include <event.h>
40#include <fcntl.h>
41#include <imsg.h>
42#include <limits.h>
43#include <poll.h>
44#include <pthread.h>
45#include <stddef.h>
46#include <stdio.h>
47#include <stdlib.h>
48#include <string.h>
49#include <unistd.h>
50#include <util.h>
51
52#include "vmd.h"
53#include "vmm.h"
54
55void vmm_sighdlr(int, short, void *);
56int vmm_start_vm(struct imsg *, uint32_t *, pid_t *);
57int vmm_dispatch_parent(int, struct privsep_proc *, struct imsg *);
58void vmm_run(struct privsep *, struct privsep_proc *, void *);
59void vmm_dispatch_vm(int, short, void *);
60int terminate_vm(struct vm_terminate_params *);
61int get_info_vm(struct privsep *, struct imsg *, int);
62int opentap(char *);
63
64extern struct vmd *env;
65
66static struct privsep_proc procs[] = {
67 { "parent", PROC_PARENT, vmm_dispatch_parent },
68};
69
70void
71vmm(struct privsep *ps, struct privsep_proc *p)
72{
73 proc_run(ps, p, procs, nitems(procs)(sizeof((procs)) / sizeof((procs)[0])), vmm_run, NULL((void*)0));
74}
75
76void
77vmm_run(struct privsep *ps, struct privsep_proc *p, void *arg)
78{
79 if (config_init(ps->ps_env) == -1)
80 fatal("failed to initialize configuration");
81
82 signal_del(&ps->ps_evsigchld)event_del(&ps->ps_evsigchld);
83 signal_set(&ps->ps_evsigchld, SIGCHLD, vmm_sighdlr, ps)event_set(&ps->ps_evsigchld, 20, 0x08|0x10, vmm_sighdlr
, ps)
;
84 signal_add(&ps->ps_evsigchld, NULL)event_add(&ps->ps_evsigchld, ((void*)0));
85
86 /*
87 * pledge in the vmm process:
88 * stdio - for malloc and basic I/O including events.
89 * vmm - for the vmm ioctls and operations.
90 * proc - for forking and maitaining vms.
91 * send - for sending send/recv fds to vm proc.
92 * recvfd - for disks, interfaces and other fds.
93 */
94 if (pledge("stdio vmm sendfd recvfd proc", NULL((void*)0)) == -1)
95 fatal("pledge");
96
97 /* Get and terminate all running VMs */
98 get_info_vm(ps, NULL((void*)0), 1);
99}
100
101int
102vmm_dispatch_parent(int fd, struct privsep_proc *p, struct imsg *imsg)
103{
104 struct privsep *ps = p->p_ps;
105 int res = 0, cmd = 0, verbose;
106 struct vmd_vm *vm = NULL((void*)0);
107 struct vm_terminate_params vtp;
108 struct vmop_id vid;
109 struct vmop_result vmr;
110 struct vmop_create_params vmc;
111 struct vmop_addr_result var;
112 uint32_t id = 0, peerid = imsg->hdr.peerid;
113 pid_t pid = 0;
114 unsigned int mode, flags;
115
116 switch (imsg->hdr.type) {
117 case IMSG_VMDOP_START_VM_REQUEST:
118 res = config_getvm(ps, imsg);
119 if (res == -1) {
120 res = errno(*__errno());
121 cmd = IMSG_VMDOP_START_VM_RESPONSE;
122 }
123 break;
124 case IMSG_VMDOP_START_VM_CDROM:
125 res = config_getcdrom(ps, imsg);
126 if (res == -1) {
127 res = errno(*__errno());
128 cmd = IMSG_VMDOP_START_VM_RESPONSE;
129 }
130 break;
131 case IMSG_VMDOP_START_VM_DISK:
132 res = config_getdisk(ps, imsg);
133 if (res == -1) {
134 res = errno(*__errno());
135 cmd = IMSG_VMDOP_START_VM_RESPONSE;
136 }
137 break;
138 case IMSG_VMDOP_START_VM_IF:
139 res = config_getif(ps, imsg);
140 if (res == -1) {
141 res = errno(*__errno());
142 cmd = IMSG_VMDOP_START_VM_RESPONSE;
143 }
144 break;
145 case IMSG_VMDOP_START_VM_END:
146 res = vmm_start_vm(imsg, &id, &pid);
147 /* Check if the ID can be mapped correctly */
148 if ((id = vm_id2vmid(id, NULL((void*)0))) == 0)
149 res = ENOENT2;
150 cmd = IMSG_VMDOP_START_VM_RESPONSE;
151 break;
152 case IMSG_VMDOP_TERMINATE_VM_REQUEST:
153 IMSG_SIZE_CHECK(imsg, &vid)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&vid)) fatalx("bad length imsg received (%s)", "&vid"
); } while (0)
;
154 memcpy(&vid, imsg->data, sizeof(vid));
155 id = vid.vid_id;
156 flags = vid.vid_flags;
157
158 DPRINTF("%s: recv'ed TERMINATE_VM for %d", __func__, id)do {} while(0);
159
160 cmd = IMSG_VMDOP_TERMINATE_VM_RESPONSE;
161
162 if (id == 0) {
163 res = ENOENT2;
164 } else if ((vm = vm_getbyvmid(id)) != NULL((void*)0)) {
165 if (flags & VMOP_FORCE0x01) {
166 vtp.vtp_vm_id = vm_vmid2id(vm->vm_vmid, vm);
167 vm->vm_state |= VM_STATE_SHUTDOWN0x04;
168 (void)terminate_vm(&vtp);
169 res = 0;
170 } else if (!(vm->vm_state & VM_STATE_SHUTDOWN0x04)) {
171 log_debug("%s: sending shutdown request"
172 " to vm %d", __func__, id);
173
174 /*
175 * Request reboot but mark the VM as shutting
176 * down. This way we can terminate the VM after
177 * the triple fault instead of reboot and
178 * avoid being stuck in the ACPI-less powerdown
179 * ("press any key to reboot") of the VM.
180 */
181 vm->vm_state |= VM_STATE_SHUTDOWN0x04;
182 if (imsg_compose_event(&vm->vm_iev,
183 IMSG_VMDOP_VM_REBOOT,
184 0, 0, -1, NULL((void*)0), 0) == -1)
185 res = errno(*__errno());
186 else
187 res = 0;
188 } else {
189 /*
190 * VM is currently being shutdown.
191 * Check to see if the VM process is still
192 * active. If not, return VMD_VM_STOP_INVALID.
193 */
194 if (vm_vmid2id(vm->vm_vmid, vm) == 0) {
195 log_debug("%s: no vm running anymore",
196 __func__);
197 res = VMD_VM_STOP_INVALID1004;
198 }
199 }
200 } else {
201 /* VM doesn't exist, cannot stop vm */
202 log_debug("%s: cannot stop vm that is not running",
203 __func__);
204 res = VMD_VM_STOP_INVALID1004;
205 }
206 break;
207 case IMSG_VMDOP_GET_INFO_VM_REQUEST:
208 res = get_info_vm(ps, imsg, 0);
209 cmd = IMSG_VMDOP_GET_INFO_VM_END_DATA;
210 break;
211 case IMSG_VMDOP_CONFIG:
212 config_getconfig(env, imsg);
213 break;
214 case IMSG_CTL_RESET:
215 IMSG_SIZE_CHECK(imsg, &mode)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&mode)) fatalx("bad length imsg received (%s)", "&mode"
); } while (0)
;
216 memcpy(&mode, imsg->data, sizeof(mode));
217
218 if (mode & CONFIG_VMS0x01) {
219 /* Terminate and remove all VMs */
220 vmm_shutdown();
221 mode &= ~CONFIG_VMS0x01;
222 }
223
224 config_getreset(env, imsg);
225 break;
226 case IMSG_CTL_VERBOSE:
227 IMSG_SIZE_CHECK(imsg, &verbose)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&verbose)) fatalx("bad length imsg received (%s)", "&verbose"
); } while (0)
;
228 memcpy(&verbose, imsg->data, sizeof(verbose));
229 log_setverbose(verbose);
230
231 /* Forward message to each VM process */
232 TAILQ_FOREACH(vm, env->vmd_vms, vm_entry)for((vm) = ((env->vmd_vms)->tqh_first); (vm) != ((void*
)0); (vm) = ((vm)->vm_entry.tqe_next))
{
233 imsg_compose_event(&vm->vm_iev,
234 imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
235 -1, &verbose, sizeof(verbose));
236 }
237 break;
238 case IMSG_VMDOP_PAUSE_VM:
239 IMSG_SIZE_CHECK(imsg, &vid)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&vid)) fatalx("bad length imsg received (%s)", "&vid"
); } while (0)
;
240 memcpy(&vid, imsg->data, sizeof(vid));
241 id = vid.vid_id;
242 if ((vm = vm_getbyvmid(id)) == NULL((void*)0)) {
243 res = ENOENT2;
244 cmd = IMSG_VMDOP_PAUSE_VM_RESPONSE;
245 break;
246 }
247 imsg_compose_event(&vm->vm_iev,
248 imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
249 imsg->fd, &vid, sizeof(vid));
250 break;
251 case IMSG_VMDOP_UNPAUSE_VM:
252 IMSG_SIZE_CHECK(imsg, &vid)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&vid)) fatalx("bad length imsg received (%s)", "&vid"
); } while (0)
;
253 memcpy(&vid, imsg->data, sizeof(vid));
254 id = vid.vid_id;
255 if ((vm = vm_getbyvmid(id)) == NULL((void*)0)) {
256 res = ENOENT2;
257 cmd = IMSG_VMDOP_UNPAUSE_VM_RESPONSE;
258 break;
259 }
260 imsg_compose_event(&vm->vm_iev,
261 imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
262 imsg->fd, &vid, sizeof(vid));
263 break;
264 case IMSG_VMDOP_SEND_VM_REQUEST:
265 IMSG_SIZE_CHECK(imsg, &vid)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&vid)) fatalx("bad length imsg received (%s)", "&vid"
); } while (0)
;
266 memcpy(&vid, imsg->data, sizeof(vid));
267 id = vid.vid_id;
268 if ((vm = vm_getbyvmid(id)) == NULL((void*)0)) {
269 res = ENOENT2;
270 close(imsg->fd);
271 cmd = IMSG_VMDOP_START_VM_RESPONSE;
272 break;
273 }
274 imsg_compose_event(&vm->vm_iev,
275 imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
276 imsg->fd, &vid, sizeof(vid));
277 break;
278 case IMSG_VMDOP_RECEIVE_VM_REQUEST:
279 IMSG_SIZE_CHECK(imsg, &vmc)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&vmc)) fatalx("bad length imsg received (%s)", "&vmc"
); } while (0)
;
280 memcpy(&vmc, imsg->data, sizeof(vmc));
281 if (vm_register(ps, &vmc, &vm,
282 imsg->hdr.peerid, vmc.vmc_owner.uid) != 0) {
283 res = errno(*__errno());
284 cmd = IMSG_VMDOP_START_VM_RESPONSE;
285 break;
286 }
287 vm->vm_tty = imsg->fd;
288 vm->vm_state |= VM_STATE_RECEIVED0x08;
289 vm->vm_state |= VM_STATE_PAUSED0x10;
290 break;
291 case IMSG_VMDOP_RECEIVE_VM_END:
292 if ((vm = vm_getbyvmid(imsg->hdr.peerid)) == NULL((void*)0)) {
293 res = ENOENT2;
294 close(imsg->fd);
295 cmd = IMSG_VMDOP_START_VM_RESPONSE;
296 break;
297 }
298 vm->vm_receive_fd = imsg->fd;
299 res = vmm_start_vm(imsg, &id, &pid);
300 /* Check if the ID can be mapped correctly */
301 if ((id = vm_id2vmid(id, NULL((void*)0))) == 0)
302 res = ENOENT2;
303 cmd = IMSG_VMDOP_START_VM_RESPONSE;
304 break;
305 case IMSG_VMDOP_PRIV_GET_ADDR_RESPONSE:
306 IMSG_SIZE_CHECK(imsg, &var)do { if (((imsg)->hdr.len - sizeof(struct imsg_hdr)) < sizeof
(*&var)) fatalx("bad length imsg received (%s)", "&var"
); } while (0)
;
307 memcpy(&var, imsg->data, sizeof(var));
308 if ((vm = vm_getbyvmid(var.var_vmid)) == NULL((void*)0)) {
309 res = ENOENT2;
310 break;
311 }
312 /* Forward hardware address details to the guest vm */
313 imsg_compose_event(&vm->vm_iev,
314 imsg->hdr.type, imsg->hdr.peerid, imsg->hdr.pid,
315 imsg->fd, &var, sizeof(var));
316 break;
317 default:
318 return (-1);
319 }
320
321 switch (cmd) {
322 case 0:
323 break;
324 case IMSG_VMDOP_START_VM_RESPONSE:
325 if (res != 0) {
326 /* Remove local reference if it exists */
327 if ((vm = vm_getbyvmid(imsg->hdr.peerid)) != NULL((void*)0)) {
328 log_debug("%s: removing vm, START_VM_RESPONSE",
329 __func__);
330 vm_remove(vm, __func__);
331 }
332 }
333 if (id == 0)
334 id = imsg->hdr.peerid;
335 /* FALLTHROUGH */
336 case IMSG_VMDOP_PAUSE_VM_RESPONSE:
337 case IMSG_VMDOP_UNPAUSE_VM_RESPONSE:
338 case IMSG_VMDOP_TERMINATE_VM_RESPONSE:
339 memset(&vmr, 0, sizeof(vmr));
340 vmr.vmr_result = res;
341 vmr.vmr_id = id;
342 vmr.vmr_pid = pid;
343 if (proc_compose_imsg(ps, PROC_PARENT, -1, cmd,
344 peerid, -1, &vmr, sizeof(vmr)) == -1)
345 return (-1);
346 break;
347 default:
348 if (proc_compose_imsg(ps, PROC_PARENT, -1, cmd,
349 peerid, -1, &res, sizeof(res)) == -1)
350 return (-1);
351 break;
352 }
353
354 return (0);
355}
356
357void
358vmm_sighdlr(int sig, short event, void *arg)
359{
360 struct privsep *ps = arg;
361 int status, ret = 0;
362 uint32_t vmid;
363 pid_t pid;
364 struct vmop_result vmr;
365 struct vmd_vm *vm;
366 struct vm_terminate_params vtp;
367
368 log_debug("%s: handling signal %d", __func__, sig);
369 switch (sig) {
370 case SIGCHLD20:
371 do {
372 pid = waitpid(-1, &status, WNOHANG1);
373 if (pid <= 0)
374 continue;
375
376 if (WIFEXITED(status)(((status) & 0177) == 0) || WIFSIGNALED(status)(((status) & 0177) != 0177 && ((status) & 0177
) != 0)
) {
377 vm = vm_getbypid(pid);
378 if (vm == NULL((void*)0)) {
379 /*
380 * If the VM is gone already, it
381 * got terminated via a
382 * IMSG_VMDOP_TERMINATE_VM_REQUEST.
383 */
384 continue;
385 }
386
387 if (WIFEXITED(status)(((status) & 0177) == 0))
388 ret = WEXITSTATUS(status)(int)(((unsigned)(status) >> 8) & 0xff);
389
390 /* Don't reboot on pending shutdown */
391 if (ret == EAGAIN35 &&
392 (vm->vm_state & VM_STATE_SHUTDOWN0x04))
393 ret = 0;
394
395 vmid = vm->vm_params.vmc_params.vcp_id;
396 vtp.vtp_vm_id = vmid;
397
398 if (terminate_vm(&vtp) == 0)
399 log_debug("%s: terminated vm %s"
400 " (id %d)", __func__,
401 vm->vm_params.vmc_params.vcp_name,
402 vm->vm_vmid);
403
404 memset(&vmr, 0, sizeof(vmr));
405 vmr.vmr_result = ret;
406 vmr.vmr_id = vm_id2vmid(vmid, vm);
407 if (proc_compose_imsg(ps, PROC_PARENT,
408 -1, IMSG_VMDOP_TERMINATE_VM_EVENT,
409 vm->vm_peerid, -1,
410 &vmr, sizeof(vmr)) == -1)
411 log_warnx("could not signal "
412 "termination of VM %u to "
413 "parent", vm->vm_vmid);
414
415 vm_remove(vm, __func__);
416 } else
417 fatalx("unexpected cause of SIGCHLD");
418 } while (pid > 0 || (pid == -1 && errno(*__errno()) == EINTR4));
419 break;
420 default:
421 fatalx("unexpected signal");
422 }
423}
424
425/*
426 * vmm_shutdown
427 *
428 * Terminate VMs on shutdown to avoid "zombie VM" processes.
429 */
430void
431vmm_shutdown(void)
432{
433 struct vm_terminate_params vtp;
434 struct vmd_vm *vm, *vm_next;
435
436 TAILQ_FOREACH_SAFE(vm, env->vmd_vms, vm_entry, vm_next)for ((vm) = ((env->vmd_vms)->tqh_first); (vm) != ((void
*)0) && ((vm_next) = ((vm)->vm_entry.tqe_next), 1)
; (vm) = (vm_next))
{
437 vtp.vtp_vm_id = vm_vmid2id(vm->vm_vmid, vm);
438
439 /* XXX suspend or request graceful shutdown */
440 (void)terminate_vm(&vtp);
441 vm_remove(vm, __func__);
442 }
443}
444
445/*
446 * vmm_pipe
447 *
448 * Create a new imsg control channel between vmm parent and a VM
449 * (can be called on both sides).
450 */
451int
452vmm_pipe(struct vmd_vm *vm, int fd, void (*cb)(int, short, void *))
453{
454 struct imsgev *iev = &vm->vm_iev;
455
456 if (fcntl(fd, F_SETFL4, O_NONBLOCK0x0004) == -1) {
457 log_warn("failed to set nonblocking mode on vm pipe");
458 return (-1);
459 }
460
461 imsg_init(&iev->ibuf, fd);
462 iev->handler = cb;
463 iev->data = vm;
464 imsg_event_add(iev);
465
466 return (0);
467}
468
469/*
470 * vmm_dispatch_vm
471 *
472 * imsg callback for messages that are received from a VM child process.
473 */
474void
475vmm_dispatch_vm(int fd, short event, void *arg)
476{
477 struct vmd_vm *vm = arg;
478 struct vmop_result vmr;
479 struct imsgev *iev = &vm->vm_iev;
480 struct imsgbuf *ibuf = &iev->ibuf;
481 struct imsg imsg;
482 ssize_t n;
483 unsigned int i;
484
485 if (event & EV_READ0x02) {
486 if ((n = imsg_read(ibuf)) == -1 && errno(*__errno()) != EAGAIN35)
487 fatal("%s: imsg_read", __func__);
488 if (n == 0) {
489 /* This pipe is dead, so remove the event handler */
490 event_del(&iev->ev);
491 return;
492 }
493 }
494
495 if (event & EV_WRITE0x04) {
496 if ((n = msgbuf_write(&ibuf->w)) == -1 && errno(*__errno()) != EAGAIN35)
497 fatal("%s: msgbuf_write fd %d", __func__, ibuf->fd);
498 if (n == 0) {
499 /* This pipe is dead, so remove the event handler */
500 event_del(&iev->ev);
501 return;
502 }
503 }
504
505 for (;;) {
506 if ((n = imsg_get(ibuf, &imsg)) == -1)
507 fatal("%s: imsg_get", __func__);
508 if (n == 0)
509 break;
510
511 DPRINTF("%s: got imsg %d from %s",do {} while(0)
512 __func__, imsg.hdr.type,do {} while(0)
513 vm->vm_params.vmc_params.vcp_name)do {} while(0);
514
515 switch (imsg.hdr.type) {
516 case IMSG_VMDOP_VM_SHUTDOWN:
517 vm->vm_state |= VM_STATE_SHUTDOWN0x04;
518 break;
519 case IMSG_VMDOP_VM_REBOOT:
520 vm->vm_state &= ~VM_STATE_SHUTDOWN0x04;
521 break;
522 case IMSG_VMDOP_SEND_VM_RESPONSE:
523 IMSG_SIZE_CHECK(&imsg, &vmr)do { if (((&imsg)->hdr.len - sizeof(struct imsg_hdr)) <
sizeof(*&vmr)) fatalx("bad length imsg received (%s)", "&vmr"
); } while (0)
;
524 case IMSG_VMDOP_PAUSE_VM_RESPONSE:
525 case IMSG_VMDOP_UNPAUSE_VM_RESPONSE:
526 for (i = 0; i < nitems(procs)(sizeof((procs)) / sizeof((procs)[0])); i++) {
527 if (procs[i].p_id == PROC_PARENT) {
528 proc_forward_imsg(procs[i].p_ps,
529 &imsg, PROC_PARENT, -1);
530 break;
531 }
532 }
533 break;
534
535 default:
536 fatalx("%s: got invalid imsg %d from %s",
537 __func__, imsg.hdr.type,
538 vm->vm_params.vmc_params.vcp_name);
539 }
540 imsg_free(&imsg);
541 }
542 imsg_event_add(iev);
543}
544
545/*
546 * terminate_vm
547 *
548 * Requests vmm(4) to terminate the VM whose ID is provided in the
549 * supplied vm_terminate_params structure (vtp->vtp_vm_id)
550 *
551 * Parameters
552 * vtp: vm_terminate_params struct containing the ID of the VM to terminate
553 *
554 * Return values:
555 * 0: success
556 * !0: ioctl to vmm(4) failed (eg, ENOENT if the supplied VM is not valid)
557 */
558int
559terminate_vm(struct vm_terminate_params *vtp)
560{
561 if (ioctl(env->vmd_fd, VMM_IOC_TERM((unsigned long)0x80000000 | ((sizeof(struct vm_terminate_params
) & 0x1fff) << 16) | ((('V')) << 8) | ((4)))
, vtp) == -1)
562 return (errno(*__errno()));
563
564 return (0);
565}
566
567/*
568 * opentap
569 *
570 * Opens the next available tap device, up to MAX_TAP.
571 *
572 * Parameters
573 * ifname: a buffer of at least IF_NAMESIZE bytes.
574 *
575 * Returns a file descriptor to the tap node opened, or -1 if no tap
576 * devices were available.
577 */
578int
579opentap(char *ifname)
580{
581 int i, fd;
582 char path[PATH_MAX1024];
583
584 for (i = 0; i < MAX_TAP256; i++) {
585 snprintf(path, PATH_MAX1024, "/dev/tap%d", i);
586 fd = open(path, O_RDWR0x0002 | O_NONBLOCK0x0004);
587 if (fd != -1) {
588 snprintf(ifname, IF_NAMESIZE16, "tap%d", i);
589 return (fd);
590 }
591 }
592 strlcpy(ifname, "tap", IF_NAMESIZE16);
593
594 return (-1);
595}
596
597/*
598 * vmm_start_vm
599 *
600 * Prepares and forks a new VM process.
601 *
602 * Parameters:
603 * imsg: The VM data structure that is including the VM create parameters.
604 * id: Returns the VM id as reported by the kernel and obtained from the VM.
605 * pid: Returns the VM pid to the parent.
606 *
607 * Return values:
608 * 0: success
609 * !0: failure - typically an errno indicating the source of the failure
610 */
611int
612vmm_start_vm(struct imsg *imsg, uint32_t *id, pid_t *pid)
613{
614 struct vm_create_params *vcp;
615 struct vmd_vm *vm;
616 int ret = EINVAL22;
617 int fds[2];
618 size_t i, j;
619
620 if ((vm = vm_getbyvmid(imsg->hdr.peerid)) == NULL((void*)0)) {
621 log_warnx("%s: can't find vm", __func__);
622 ret = ENOENT2;
623 goto err;
624 }
625 vcp = &vm->vm_params.vmc_params;
626
627 if (!(vm->vm_state & VM_STATE_RECEIVED0x08)) {
628 if ((vm->vm_tty = imsg->fd) == -1) {
629 log_warnx("%s: can't get tty", __func__);
630 goto err;
631 }
632 }
633
634 if (socketpair(AF_UNIX1, SOCK_STREAM1, PF_UNSPEC0, fds) == -1)
635 fatal("socketpair");
636
637 /* Start child vmd for this VM (fork, chroot, drop privs) */
638 ret = fork();
639
640 /* Start child failed? - cleanup and leave */
641 if (ret == -1) {
642 log_warnx("%s: start child failed", __func__);
643 ret = EIO5;
644 goto err;
645 }
646
647 if (ret > 0) {
648 /* Parent */
649 vm->vm_pid = ret;
650 close(fds[1]);
651
652 for (i = 0 ; i < vcp->vcp_ndisks; i++) {
653 for (j = 0; j < VM_MAX_BASE_PER_DISK4; j++) {
654 if (vm->vm_disks[i][j] != -1)
655 close(vm->vm_disks[i][j]);
656 vm->vm_disks[i][j] = -1;
657 }
658 }
659 for (i = 0 ; i < vcp->vcp_nnics; i++) {
660 close(vm->vm_ifs[i].vif_fd);
661 vm->vm_ifs[i].vif_fd = -1;
662 }
663 if (vm->vm_kernel != -1) {
664 close(vm->vm_kernel);
665 vm->vm_kernel = -1;
666 }
667 if (vm->vm_cdrom != -1) {
668 close(vm->vm_cdrom);
669 vm->vm_cdrom = -1;
670 }
671 if (vm->vm_tty != -1) {
672 close(vm->vm_tty);
673 vm->vm_tty = -1;
674 }
675
676 /* Read back the kernel-generated vm id from the child */
677 if (read(fds[0], &vcp->vcp_id, sizeof(vcp->vcp_id)) !=
678 sizeof(vcp->vcp_id))
679 fatal("read vcp id");
680
681 if (vcp->vcp_id == 0)
682 goto err;
683
684 *id = vcp->vcp_id;
685 *pid = vm->vm_pid;
686
687 if (vmm_pipe(vm, fds[0], vmm_dispatch_vm) == -1)
688 fatal("setup vm pipe");
689
690 return (0);
691 } else {
692 /* Child */
693 close(fds[0]);
694 close(PROC_PARENT_SOCK_FILENO3);
695
696 ret = start_vm(vm, fds[1]);
697
698 _exit(ret);
699 }
700
701 return (0);
702
703 err:
704 vm_remove(vm, __func__);
705
706 return (ret);
707}
708
709/*
710 * get_info_vm
711 *
712 * Returns a list of VMs known to vmm(4).
713 *
714 * Parameters:
715 * ps: the privsep context.
716 * imsg: the received imsg including the peer id.
717 * terminate: terminate the listed vm.
718 *
719 * Return values:
720 * 0: success
721 * !0: failure (eg, ENOMEM, EIO or another error code from vmm(4) ioctl)
722 */
723int
724get_info_vm(struct privsep *ps, struct imsg *imsg, int terminate)
725{
726 int ret;
727 size_t ct, i;
728 struct vm_info_params vip;
729 struct vm_info_result *info;
730 struct vm_terminate_params vtp;
731 struct vmop_info_result vir;
732
733 /*
734 * We issue the VMM_IOC_INFO ioctl twice, once with an input
735 * buffer size of 0, which results in vmm(4) returning the
736 * number of bytes required back to us in vip.vip_size,
737 * and then we call it again after malloc'ing the required
738 * number of bytes.
739 *
740 * It is possible that we could fail a second time (e.g. if
741 * another VM was created in the instant between the two
742 * ioctls, but in that case the caller can just try again
743 * as vmm(4) will return a zero-sized list in that case.
744 */
745 vip.vip_size = 0;
746 info = NULL((void*)0);
747 ret = 0;
Value stored to 'ret' is never read
748 memset(&vir, 0, sizeof(vir));
749
750 /* First ioctl to see how many bytes needed (vip.vip_size) */
751 if (ioctl(env->vmd_fd, VMM_IOC_INFO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct vm_info_params) & 0x1fff) << 16) | ((('V'))
<< 8) | ((3)))
, &vip) == -1)
752 return (errno(*__errno()));
753
754 if (vip.vip_info_ct != 0)
755 return (EIO5);
756
757 info = malloc(vip.vip_size);
758 if (info == NULL((void*)0))
759 return (ENOMEM12);
760
761 /* Second ioctl to get the actual list */
762 vip.vip_info = info;
763 if (ioctl(env->vmd_fd, VMM_IOC_INFO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct vm_info_params) & 0x1fff) << 16) | ((('V'))
<< 8) | ((3)))
, &vip) == -1) {
764 ret = errno(*__errno());
765 free(info);
766 return (ret);
767 }
768
769 /* Return info */
770 ct = vip.vip_size / sizeof(struct vm_info_result);
771 for (i = 0; i < ct; i++) {
772 if (terminate) {
773 vtp.vtp_vm_id = info[i].vir_id;
774 if ((ret = terminate_vm(&vtp)) != 0)
775 return (ret);
776 log_debug("%s: terminated vm %s (id %d)", __func__,
777 info[i].vir_name, info[i].vir_id);
778 continue;
779 }
780 memcpy(&vir.vir_info, &info[i], sizeof(vir.vir_info));
781 vir.vir_info.vir_id = vm_id2vmid(info[i].vir_id, NULL((void*)0));
782 if (proc_compose_imsg(ps, PROC_PARENT, -1,
783 IMSG_VMDOP_GET_INFO_VM_DATA, imsg->hdr.peerid, -1,
784 &vir, sizeof(vir)) == -1)
785 return (EIO5);
786 }
787 free(info);
788
789 return (0);
790}