underhill_core/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! This module implements the interactive control process and the entry point
5//! for the underhill environment.
6
7#![cfg(target_os = "linux")]
8#![expect(missing_docs)]
9#![forbid(unsafe_code)]
10
11mod diag;
12mod dispatch;
13mod emuplat;
14mod get_tracing;
15mod inspect_internal;
16mod inspect_proc;
17mod livedump;
18mod loader;
19mod nvme_manager;
20mod options;
21mod reference_time;
22mod servicing;
23mod threadpool_vm_task_backend;
24mod vmbus_relay_unit;
25mod vmgs_logger;
26mod vp;
27mod vpci;
28mod worker;
29mod wrapped_partition;
30
31// `pub` so that the missing_docs warning fires for options without
32// documentation.
33pub use options::Options;
34
35use crate::diag::DiagWorker;
36use crate::dispatch::UhVmRpc;
37use crate::worker::UnderhillEnvCfg;
38use crate::worker::UnderhillRemoteConsoleCfg;
39use crate::worker::UnderhillVmWorker;
40use crate::worker::UnderhillWorkerParameters;
41use anyhow::Context;
42use bootloader_fdt_parser::BootTimes;
43use cvm_tracing::CVM_ALLOWED;
44use framebuffer::FRAMEBUFFER_SIZE;
45use framebuffer::FramebufferAccess;
46use futures::StreamExt;
47use futures_concurrency::stream::Merge;
48use get_tracing::init_tracing;
49use get_tracing::init_tracing_backend;
50use inspect::Inspect;
51use inspect::SensitivityLevel;
52use mesh::CancelContext;
53use mesh::CancelReason;
54use mesh::MeshPayload;
55use mesh::error::RemoteError;
56use mesh::rpc::Rpc;
57use mesh::rpc::RpcSend;
58use mesh_process::Mesh;
59use mesh_process::ProcessConfig;
60use mesh_process::try_run_mesh_host;
61use mesh_tracing::RemoteTracer;
62use mesh_tracing::TracingBackend;
63use mesh_worker::RegisteredWorkers;
64use mesh_worker::WorkerEvent;
65use mesh_worker::WorkerHandle;
66use mesh_worker::WorkerHost;
67use mesh_worker::WorkerHostRunner;
68use mesh_worker::launch_local_worker;
69use mesh_worker::register_workers;
70use pal_async::DefaultDriver;
71use pal_async::DefaultPool;
72use pal_async::task::Spawn;
73#[cfg(feature = "profiler")]
74use profiler_worker::ProfilerWorker;
75#[cfg(feature = "profiler")]
76use profiler_worker::ProfilerWorkerParameters;
77use std::time::Duration;
78use vmsocket::VmAddress;
79use vmsocket::VmListener;
80use vnc_worker_defs::VncParameters;
81
82fn new_underhill_remote_console_cfg(
83    framebuffer_gpa_base: Option<u64>,
84) -> anyhow::Result<(UnderhillRemoteConsoleCfg, Option<FramebufferAccess>)> {
85    if let Some(framebuffer_gpa_base) = framebuffer_gpa_base {
86        // Underhill accesses the framebuffer by using /dev/mshv_vtl_low to read
87        // from a second mapping placed after the end of RAM at a static
88        // location specified by the host.
89        //
90        // Open the file directly rather than use the `hcl` crate to avoid
91        // leaking `hcl` stuff into this crate.
92        //
93        // FUTURE: use an approach that doesn't require double mapping the
94        // framebuffer from the host.
95        let gpa_fd = fs_err::OpenOptions::new()
96            .read(true)
97            .write(true)
98            .open("/dev/mshv_vtl_low")
99            .context("failed to open gpa device")?;
100
101        let vram = sparse_mmap::new_mappable_from_file(gpa_fd.file(), true, false)?;
102        let (fb, fba) = framebuffer::framebuffer(vram, FRAMEBUFFER_SIZE, framebuffer_gpa_base)
103            .context("allocating framebuffer")?;
104        tracing::debug!("framebuffer_gpa_base: {:#x}", framebuffer_gpa_base);
105
106        Ok((
107            UnderhillRemoteConsoleCfg {
108                synth_keyboard: true,
109                synth_mouse: true,
110                synth_video: true,
111                input: mesh::Receiver::new(),
112                framebuffer: Some(fb),
113            },
114            Some(fba),
115        ))
116    } else {
117        Ok((
118            UnderhillRemoteConsoleCfg {
119                synth_keyboard: false,
120                synth_mouse: false,
121                synth_video: false,
122                input: mesh::Receiver::new(),
123                framebuffer: None,
124            },
125            None,
126        ))
127    }
128}
129
130pub fn main() -> anyhow::Result<()> {
131    // Install a panic hook to prefix the current async task name before the
132    // standard panic output.
133    install_task_name_panic_hook();
134
135    if let Some(path) = std::env::var_os("OPENVMM_WRITE_SAVED_STATE_PROTO") {
136        if cfg!(debug_assertions) {
137            mesh::payload::protofile::DescriptorWriter::new(
138                vmcore::save_restore::saved_state_roots(),
139            )
140            .write_to_path(path)
141            .context("failed to write protobuf descriptors")?;
142            return Ok(());
143        } else {
144            // The generated code for this is too large for release builds.
145            anyhow::bail!(".proto output only supported in debug builds");
146        }
147    }
148
149    // FUTURE: create and use the affinitized threadpool here.
150    let (_, tracing_driver) = DefaultPool::spawn_on_thread("tracing");
151
152    // Try to run as a worker host, sending a remote tracer that will forward
153    // tracing events back to the initial process for logging to the host. See
154    // [`get_tracing`] doc comments for more details.
155    //
156    // On success the worker runs to completion and then exits the process (does
157    // not return). Any worker host setup errors are return and bubbled up.
158    try_run_mesh_host("underhill", {
159        let tracing_driver = tracing_driver.clone();
160        async |params: MeshHostParams| {
161            if let Some(remote_tracer) = params.tracer {
162                init_tracing(tracing_driver, remote_tracer).context("failed to init tracing")?;
163            }
164            params.runner.run(RegisteredWorkers).await;
165            Ok(())
166        }
167    })?;
168
169    // Initialize the tracing backend used by this and all subprocesses.
170    let mut tracing = init_tracing_backend(tracing_driver.clone())?;
171    // Initialize tracing from the backend.
172    init_tracing(tracing_driver, tracing.tracer()).context("failed to init tracing")?;
173    DefaultPool::run_with(|driver| do_main(driver, tracing))
174}
175
176fn install_task_name_panic_hook() {
177    use std::io::Write;
178
179    let panic_hook = std::panic::take_hook();
180    std::panic::set_hook(Box::new(move |info| {
181        pal_async::task::with_current_task_metadata(|metadata| {
182            if let Some(metadata) = metadata {
183                let _ = write!(std::io::stderr(), "task '{}', ", metadata.name());
184            }
185        });
186        // This will proceed with writing "thread ... panicked at ..."
187        panic_hook(info);
188    }));
189}
190
191async fn do_main(driver: DefaultDriver, mut tracing: TracingBackend) -> anyhow::Result<()> {
192    let opt = Options::parse(Vec::new(), Vec::new())?;
193
194    let crate_name = build_info::get().crate_name();
195    let crate_revision = build_info::get().scm_revision();
196    tracing::info!(CVM_ALLOWED, ?crate_name, ?crate_revision, "VMM process");
197    log_boot_times().context("failure logging boot times")?;
198
199    // Write the current pid to a file.
200    if let Some(pid_path) = &opt.pid {
201        std::fs::write(pid_path, std::process::id().to_string())
202            .with_context(|| format!("failed to write pid to {}", pid_path.display()))?;
203    }
204
205    let mesh = Mesh::new("underhill".to_string()).context("failed to create mesh")?;
206
207    let r = run_control(driver, &mesh, opt, &mut tracing).await;
208    if let Err(err) = &r {
209        tracing::error!(
210            CVM_ALLOWED,
211            error = err.as_ref() as &dyn std::error::Error,
212            "VM failure"
213        );
214    }
215
216    // Wait a few seconds for child processes to terminate and tracing to finish.
217    CancelContext::new()
218        .with_timeout(Duration::from_secs(10))
219        .until_cancelled(async {
220            mesh.shutdown().await;
221            tracing.shutdown().await;
222        })
223        .await
224        .ok();
225
226    r
227}
228
229fn log_boot_times() -> anyhow::Result<()> {
230    fn diff(start: Option<u64>, end: Option<u64>) -> Option<tracing::field::DebugValue<Duration>> {
231        use reference_time::ReferenceTime;
232        Some(tracing::field::debug(
233            ReferenceTime::new(end?).since(ReferenceTime::new(start?))?,
234        ))
235    }
236
237    // Read boot times provided by the bootloader.
238    let BootTimes {
239        start,
240        end,
241        sidecar_start,
242        sidecar_end,
243    } = BootTimes::new().context("failed to parse boot times")?;
244    tracing::info!(
245        CVM_ALLOWED,
246        start,
247        end,
248        sidecar_start,
249        sidecar_end,
250        elapsed = diff(start, end),
251        sidecar_elapsed = diff(sidecar_start, sidecar_end),
252        "boot loader times"
253    );
254    Ok(())
255}
256
257struct DiagState {
258    _worker: WorkerHandle,
259    request_recv: mesh::Receiver<diag_server::DiagRequest>,
260}
261
262impl DiagState {
263    async fn new() -> anyhow::Result<Self> {
264        // Start the diagnostics worker immediately.
265        let (request_send, request_recv) = mesh::channel();
266        let worker = launch_local_worker::<DiagWorker>(diag::DiagWorkerParameters { request_send })
267            .await
268            .context("failed to launch diagnostics worker")?;
269        Ok(Self {
270            _worker: worker,
271            request_recv,
272        })
273    }
274}
275
276#[derive(Inspect)]
277struct Workers {
278    vm: WorkerHandle,
279    #[inspect(skip)]
280    vm_rpc: mesh::Sender<UhVmRpc>,
281    vnc: Option<WorkerHandle>,
282    #[cfg(feature = "gdb")]
283    gdb: Option<WorkerHandle>,
284}
285
286#[derive(MeshPayload)]
287struct MeshHostParams {
288    tracer: Option<RemoteTracer>,
289    runner: WorkerHostRunner,
290}
291
292async fn launch_mesh_host(
293    mesh: &Mesh,
294    name: &str,
295    tracer: Option<RemoteTracer>,
296) -> anyhow::Result<WorkerHost> {
297    let (host, runner) = mesh_worker::worker_host();
298    mesh.launch_host(ProcessConfig::new(name), MeshHostParams { tracer, runner })
299        .await?;
300    Ok(host)
301}
302
303async fn launch_workers(
304    mesh: &Mesh,
305    tracing: &mut TracingBackend,
306    control_send: mesh::Sender<ControlRequest>,
307    opt: Options,
308) -> anyhow::Result<Workers> {
309    let env_cfg = UnderhillEnvCfg {
310        vmbus_max_version: opt.vmbus_max_version,
311        vmbus_enable_mnf: opt.vmbus_enable_mnf,
312        vmbus_force_confidential_external_memory: opt.vmbus_force_confidential_external_memory,
313        vmbus_channel_unstick_delay: (opt.vmbus_channel_unstick_delay_ms != 0)
314            .then(|| Duration::from_millis(opt.vmbus_channel_unstick_delay_ms)),
315        cmdline_append: opt.cmdline_append.clone(),
316        reformat_vmgs: opt.reformat_vmgs,
317        vtl0_starts_paused: opt.vtl0_starts_paused,
318        emulated_serial_wait_for_rts: opt.serial_wait_for_rts,
319        force_load_vtl0_image: opt.force_load_vtl0_image,
320        nvme_vfio: opt.nvme_vfio,
321        mcr: opt.mcr,
322        halt_on_guest_halt: opt.halt_on_guest_halt,
323        no_sidecar_hotplug: opt.no_sidecar_hotplug,
324        gdbstub: opt.gdbstub,
325        hide_isolation: opt.hide_isolation,
326        nvme_keep_alive: opt.nvme_keep_alive,
327        mana_keep_alive: opt.mana_keep_alive,
328        nvme_always_flr: opt.nvme_always_flr,
329        test_configuration: opt.test_configuration,
330        disable_uefi_frontpage: opt.disable_uefi_frontpage,
331        default_boot_always_attempt: opt.default_boot_always_attempt,
332        guest_state_lifetime: opt.guest_state_lifetime,
333        guest_state_encryption_policy: opt.guest_state_encryption_policy,
334        strict_encryption_policy: opt.strict_encryption_policy,
335        attempt_ak_cert_callback: opt.attempt_ak_cert_callback,
336        enable_vpci_relay: opt.enable_vpci_relay,
337        disable_proxy_redirect: opt.disable_proxy_redirect,
338        disable_lower_vtl_timer_virt: opt.disable_lower_vtl_timer_virt,
339        config_timeout_in_seconds: opt.config_timeout_in_seconds,
340        servicing_timeout_dump_collection_in_ms: opt.servicing_timeout_dump_collection_in_ms,
341    };
342
343    let (mut remote_console_cfg, framebuffer_access) =
344        new_underhill_remote_console_cfg(opt.framebuffer_gpa_base)?;
345
346    let mut vnc_worker = None;
347    if let Some(framebuffer) = framebuffer_access {
348        let listener = VmListener::bind(VmAddress::vsock_any(opt.vnc_port))
349            .context("failed to bind socket")?;
350
351        let input_send = remote_console_cfg.input.sender();
352
353        let vnc_host = launch_mesh_host(mesh, "vnc", Some(tracing.tracer()))
354            .await
355            .context("spawning vnc process failed")?;
356
357        vnc_worker = Some(
358            vnc_host
359                .launch_worker(
360                    vnc_worker_defs::VNC_WORKER_VMSOCKET,
361                    VncParameters {
362                        listener,
363                        framebuffer,
364                        input_send,
365                    },
366                )
367                .await?,
368        )
369    }
370
371    #[cfg(feature = "gdb")]
372    let mut gdbstub_worker = None;
373    #[cfg_attr(not(feature = "gdb"), expect(unused_mut))]
374    let mut debugger_rpc = None;
375    #[cfg(feature = "gdb")]
376    if opt.gdbstub {
377        let listener = VmListener::bind(VmAddress::vsock_any(opt.gdbstub_port))
378            .context("failed to bind socket")?;
379
380        let gdb_host = launch_mesh_host(mesh, "gdb", Some(tracing.tracer()))
381            .await
382            .context("failed to spawn gdb host process")?;
383
384        // Get the VP count of this machine. It's too early to read it directly
385        // from IGVM parameters, but the kernel already has the IGVM parsed VP
386        // count via the boot loader anyways.
387        let vp_count =
388            pal::unix::affinity::max_present_cpu().context("failed to get max present cpu")? + 1;
389
390        let (send, recv) = mesh::channel();
391        debugger_rpc = Some(recv);
392        gdbstub_worker = Some(
393            gdb_host
394                .launch_worker(
395                    debug_worker_defs::DEBUGGER_VSOCK_WORKER,
396                    debug_worker_defs::DebuggerParameters {
397                        listener,
398                        req_chan: send,
399                        vp_count,
400                        target_arch: if cfg!(guest_arch = "x86_64") {
401                            debug_worker_defs::TargetArch::X86_64
402                        } else {
403                            debug_worker_defs::TargetArch::Aarch64
404                        },
405                    },
406                )
407                .await?,
408        );
409    }
410    let (vm_rpc, vm_rpc_rx) = mesh::channel();
411
412    // Spawn the worker in a separate process in case the diagnostics server (in
413    // this process) is used to run gdbserver against it, or in case it needs to
414    // be restarted.
415    let host = launch_mesh_host(mesh, "vm", Some(tracing.tracer()))
416        .await
417        .context("failed to launch worker process")?;
418
419    let vm_worker = host
420        .start_worker(
421            worker::UNDERHILL_WORKER,
422            UnderhillWorkerParameters {
423                env_cfg,
424                remote_console_cfg,
425                debugger_rpc,
426                vm_rpc: vm_rpc_rx,
427                control_send,
428            },
429        )
430        .context("failed to launch worker")?;
431
432    Ok(Workers {
433        vm: vm_worker,
434        vm_rpc,
435        vnc: vnc_worker,
436        #[cfg(feature = "gdb")]
437        gdb: gdbstub_worker,
438    })
439}
440
441/// State for inspect only.
442#[derive(Inspect)]
443enum ControlState {
444    WaitingForStart,
445    Starting,
446    Started,
447    Restarting,
448}
449
450#[derive(MeshPayload)]
451pub enum ControlRequest {
452    FlushLogs(Rpc<CancelContext, Result<(), CancelReason>>),
453    MakeWorker(Rpc<String, Result<WorkerHost, RemoteError>>),
454}
455
456async fn run_control(
457    driver: DefaultDriver,
458    mesh: &Mesh,
459    opt: Options,
460    mut tracing: &mut TracingBackend,
461) -> anyhow::Result<()> {
462    let (control_send, mut control_recv) = mesh::channel();
463    let mut control_send = Some(control_send);
464
465    if opt.signal_vtl0_started {
466        signal_vtl0_started(&driver)
467            .await
468            .context("failed to signal vtl0 started")?;
469    }
470
471    let mut diag = DiagState::new().await?;
472
473    let (diag_reinspect_send, mut diag_reinspect_recv) = mesh::channel();
474    #[cfg(feature = "profiler")]
475    let mut profiler_host = None;
476    let mut state;
477    let mut workers = if opt.wait_for_start {
478        state = ControlState::WaitingForStart;
479        None
480    } else {
481        state = ControlState::Starting;
482        let workers = launch_workers(mesh, tracing, control_send.take().unwrap(), opt)
483            .await
484            .context("failed to launch workers")?;
485        Some(workers)
486    };
487
488    enum Event {
489        Diag(diag_server::DiagRequest),
490        Worker(WorkerEvent),
491        Control(ControlRequest),
492    }
493
494    let mut restart_rpc = None;
495    #[cfg(feature = "mem-profile-tracing")]
496    let mut profiler = mem_profile_tracing::HeapProfiler::new();
497    loop {
498        let event = {
499            let mut stream = (
500                (&mut diag.request_recv).map(Event::Diag),
501                (&mut diag_reinspect_recv)
502                    .map(|req| Event::Diag(diag_server::DiagRequest::Inspect(req))),
503                (&mut control_recv).map(Event::Control),
504                futures::stream::select_all(workers.as_mut().map(|w| &mut w.vm)).map(Event::Worker),
505            )
506                .merge();
507
508            let Some(event) = stream.next().await else {
509                break;
510            };
511            event
512        };
513
514        match event {
515            Event::Diag(request) => {
516                match request {
517                    diag_server::DiagRequest::Start(rpc) => {
518                        rpc.handle_failable(async |params| {
519                            if workers.is_some() {
520                                Err(anyhow::anyhow!("workers have already been started"))?;
521                            }
522                            let new_opt = Options::parse(params.args, params.env)
523                                .context("failed to parse new options")?;
524
525                            workers = Some(
526                                launch_workers(
527                                    mesh,
528                                    tracing,
529                                    control_send.take().unwrap(),
530                                    new_opt,
531                                )
532                                .await?,
533                            );
534                            state = ControlState::Starting;
535                            anyhow::Ok(())
536                        })
537                        .await
538                    }
539                    diag_server::DiagRequest::Inspect(deferred) => deferred.respond(|resp| {
540                        resp.sensitivity_field("mesh", SensitivityLevel::Safe, mesh)
541                            .sensitivity_field_mut("trace", SensitivityLevel::Safe, &mut tracing)
542                            .sensitivity_field(
543                                "build_info",
544                                SensitivityLevel::Safe,
545                                build_info::get(),
546                            )
547                            .sensitivity_child(
548                                "proc",
549                                SensitivityLevel::Safe,
550                                inspect_proc::inspect_proc,
551                            )
552                            .sensitivity_field("control_state", SensitivityLevel::Safe, &state)
553                            // This node can not be renamed due to stability guarantees.
554                            // See the comment at the top of inspect_internal for more details.
555                            .sensitivity_child("uhdiag", SensitivityLevel::Safe, |req| {
556                                inspect_internal::inspect_internal_diagnostics(
557                                    req,
558                                    &diag_reinspect_send,
559                                    &driver,
560                                )
561                            });
562
563                        resp.merge(&workers);
564                    }),
565                    diag_server::DiagRequest::Crash(pid) => {
566                        mesh.crash(pid);
567                    }
568                    diag_server::DiagRequest::Restart(rpc) => {
569                        let Some(workers) = &mut workers else {
570                            rpc.complete(Err(RemoteError::new(anyhow::anyhow!(
571                                "worker has not been started yet"
572                            ))));
573                            continue;
574                        };
575
576                        let r = async {
577                            if restart_rpc.is_some() {
578                                anyhow::bail!("previous restart still in progress");
579                            }
580
581                            let host = launch_mesh_host(mesh, "vm", Some(tracing.tracer()))
582                                .await
583                                .context("failed to launch worker process")?;
584
585                            workers.vm.restart(&host);
586                            Ok(())
587                        }
588                        .await;
589
590                        if r.is_err() {
591                            rpc.complete(r.map_err(RemoteError::new));
592                        } else {
593                            state = ControlState::Restarting;
594                            restart_rpc = Some(rpc);
595                        }
596                    }
597                    diag_server::DiagRequest::Pause(rpc) => {
598                        let Some(workers) = &mut workers else {
599                            rpc.complete(Err(RemoteError::new(anyhow::anyhow!(
600                                "worker has not been started yet"
601                            ))));
602                            continue;
603                        };
604
605                        // create the req future output the spawn, so that
606                        // we don't need to clone + move vm_rpc.
607                        let req = workers.vm_rpc.call(UhVmRpc::Pause, ());
608
609                        // FUTURE: consider supporting cancellation
610                        driver
611                            .spawn("diag-pause", async move {
612                                let was_paused = req.await.expect("failed to pause VM");
613                                rpc.handle_failable_sync(|_| {
614                                    if !was_paused {
615                                        Err(anyhow::anyhow!("VM is already paused"))
616                                    } else {
617                                        Ok(())
618                                    }
619                                });
620                            })
621                            .detach();
622                    }
623                    diag_server::DiagRequest::PacketCapture(rpc) => {
624                        let Some(workers) = &mut workers else {
625                            rpc.complete(Err(RemoteError::new(anyhow::anyhow!(
626                                "worker has not been started yet"
627                            ))));
628                            continue;
629                        };
630
631                        workers.vm_rpc.send(UhVmRpc::PacketCapture(rpc));
632                    }
633                    #[cfg(feature = "mem-profile-tracing")]
634                    diag_server::DiagRequest::MemoryProfileTrace(rpc) => {
635                        rpc.handle_failable(async |pid| {
636                            if pid == std::process::id() as i32 {
637                                anyhow::Ok(profiler.capture_and_restart())
638                            } else {
639                                let Some(workers) = &mut workers else {
640                                    anyhow::bail!("workers have not been started yet");
641                                };
642
643                                let result = workers
644                                    .vm_rpc
645                                    .call(UhVmRpc::MemoryProfileTrace, pid)
646                                    .await
647                                    .context("failed to get memory profile from worker process")?;
648                                Ok(result?)
649                            }
650                        })
651                        .await
652                    }
653                    diag_server::DiagRequest::Resume(rpc) => {
654                        let Some(workers) = &mut workers else {
655                            rpc.complete(Err(RemoteError::new(anyhow::anyhow!(
656                                "worker has not been started yet"
657                            ))));
658                            continue;
659                        };
660
661                        let was_resumed = workers
662                            .vm_rpc
663                            .call(UhVmRpc::Resume, ())
664                            .await
665                            .context("failed to resumed VM")?;
666
667                        let was_halted = workers
668                            .vm_rpc
669                            .call(UhVmRpc::ClearHalt, ())
670                            .await
671                            .context("failed to clear halt from VPs")?;
672
673                        rpc.handle_sync(|_| {
674                            if was_resumed || was_halted {
675                                Ok(())
676                            } else {
677                                Err(RemoteError::new(anyhow::anyhow!("VM is currently running")))
678                            }
679                        });
680                    }
681                    diag_server::DiagRequest::Save(rpc) => {
682                        let Some(workers) = &mut workers else {
683                            rpc.complete(Err(RemoteError::new(anyhow::anyhow!(
684                                "worker has not been started yet"
685                            ))));
686                            continue;
687                        };
688
689                        workers.vm_rpc.send(UhVmRpc::Save(rpc));
690                    }
691                    #[cfg(feature = "profiler")]
692                    diag_server::DiagRequest::Profile(rpc) => {
693                        let (rpc_params, rpc_sender) = rpc.split();
694                        // Create profiler host if there is none created before
695                        if profiler_host.is_none() {
696                            match launch_mesh_host(mesh, "profiler", Some(tracing.tracer()))
697                                .await
698                                .context("failed to launch profiler host")
699                            {
700                                Ok(host) => {
701                                    profiler_host = Some(host);
702                                }
703                                Err(e) => {
704                                    rpc_sender.complete(Err(RemoteError::new(e)));
705                                    continue;
706                                }
707                            }
708                        }
709
710                        let profiling_duration = rpc_params.duration;
711                        let host = profiler_host.as_ref().unwrap();
712                        let mut profiler_worker;
713                        match host
714                            .launch_worker(
715                                profiler_worker::PROFILER_WORKER,
716                                ProfilerWorkerParameters {
717                                    profiler_request: rpc_params,
718                                },
719                            )
720                            .await
721                        {
722                            Ok(worker) => {
723                                profiler_worker = worker;
724                            }
725                            Err(e) => {
726                                rpc_sender.complete(Err(RemoteError::new(e)));
727                                continue;
728                            }
729                        }
730
731                        driver
732                            .spawn("profiler_worker", async move {
733                                let result = CancelContext::new()
734                                    .with_timeout(Duration::from_secs(profiling_duration + 30))
735                                    .until_cancelled(profiler_worker.join())
736                                    .await
737                                    .context("profiler worker cancelled")
738                                    .and_then(|result| result.context("profiler worker failed"))
739                                    .map_err(RemoteError::new);
740
741                                rpc_sender.complete(result);
742                            })
743                            .detach();
744                    }
745                }
746            }
747            Event::Worker(event) => match event {
748                WorkerEvent::Started => {
749                    if let Some(response) = restart_rpc.take() {
750                        tracing::info!(CVM_ALLOWED, "restart complete");
751                        response.complete(Ok(()));
752                    } else {
753                        tracing::info!(CVM_ALLOWED, "vm worker started");
754                    }
755                    state = ControlState::Started;
756                }
757                WorkerEvent::Stopped => {
758                    anyhow::bail!("worker unexpectedly stopped");
759                }
760                WorkerEvent::Failed(err) => {
761                    return Err(anyhow::Error::from(err)).context("vm worker failed");
762                }
763                WorkerEvent::RestartFailed(err) => {
764                    tracing::error!(
765                        CVM_ALLOWED,
766                        error = &err as &dyn std::error::Error,
767                        "restart failed"
768                    );
769                    restart_rpc.take().unwrap().complete(Err(err));
770                    state = ControlState::Started;
771                }
772            },
773            Event::Control(req) => match req {
774                ControlRequest::FlushLogs(rpc) => {
775                    rpc.handle(async |mut ctx| {
776                        tracing::info!(CVM_ALLOWED, "flushing logs");
777                        ctx.until_cancelled(tracing.flush()).await?;
778                        Ok(())
779                    })
780                    .await
781                }
782                ControlRequest::MakeWorker(rpc) => {
783                    rpc.handle_failable(async |name| {
784                        launch_mesh_host(mesh, &name, Some(tracing.tracer())).await
785                    })
786                    .await
787                }
788            },
789        }
790    }
791
792    Ok(())
793}
794
795async fn signal_vtl0_started(driver: &DefaultDriver) -> anyhow::Result<()> {
796    tracing::info!(CVM_ALLOWED, "signaling vtl0 started early");
797    let (client, task) = guest_emulation_transport::spawn_get_worker(driver.clone())
798        .await
799        .context("failed to spawn GET")?;
800    client.complete_start_vtl0(None).await;
801    // Disconnect the GET so that it can be reused.
802    drop(client);
803    task.await.unwrap();
804    tracing::info!(CVM_ALLOWED, "signaled vtl0 start");
805    Ok(())
806}
807
808// The "base" workers for Underhill. Other workers are defined in the
809// `underhill_resources` crate.
810//
811// FUTURE: split these workers into separate crates and move them to
812// `underhill_resources`, too.
813register_workers! {
814    UnderhillVmWorker,
815    DiagWorker,
816    #[cfg(feature = "profiler")]
817    ProfilerWorker,
818}