petri/vm/openvmm/
start.rs1use super::PetriVmConfigOpenVmm;
7use super::PetriVmOpenVmm;
8use super::PetriVmResourcesOpenVmm;
9use crate::OpenvmmLogConfig;
10use crate::PetriLogFile;
11use crate::PetriVmRuntimeConfig;
12use crate::worker::Worker;
13use anyhow::Context;
14use mesh_process::Mesh;
15use mesh_process::ProcessConfig;
16use mesh_worker::WorkerHost;
17use openvmm_defs::config::DeviceVtl;
18use pal_async::pipe::PolledPipe;
19use pal_async::task::Spawn;
20use petri_artifacts_common::tags::MachineArch;
21use petri_artifacts_common::tags::OsFlavor;
22use std::collections::BTreeMap;
23use std::ffi::OsString;
24use std::io::Write;
25use std::sync::Arc;
26use vm_resource::IntoResource;
27
28impl PetriVmConfigOpenVmm {
29 async fn run_core(self) -> anyhow::Result<(PetriVmOpenVmm, PetriVmRuntimeConfig)> {
30 let Self {
31 runtime_config,
32 arch,
33 host_log_levels,
34 mut config,
35
36 mesh,
37
38 mut resources,
39
40 openvmm_log_file,
41
42 memory_backing_file,
43
44 ged,
45 framebuffer_view,
46 } = self;
47
48 let has_pcie = !config.pcie_root_complexes.is_empty();
49
50 let supports_save_restore = !resources.properties.is_openhcl
56 && !resources.properties.is_pcat
57 && !matches!(arch, MachineArch::Aarch64)
58 && !resources.properties.using_vpci
59 && !has_pcie;
60
61 if let Some(mut ged) = ged {
63 ged.vtl2_settings = Some(prost::Message::encode_to_vec(
64 runtime_config.vtl2_settings.as_ref().unwrap(),
65 ));
66 config
67 .vmbus_devices
68 .push((DeviceVtl::Vtl2, ged.into_resource()));
69 }
70
71 tracing::debug!(?config, "OpenVMM config");
72
73 let log_env = match host_log_levels {
74 None | Some(OpenvmmLogConfig::TestDefault) => BTreeMap::<OsString, OsString>::from([
75 ("OPENVMM_LOG".into(), "debug".into()),
76 ("OPENVMM_SHOW_SPANS".into(), "true".into()),
77 ]),
78 Some(OpenvmmLogConfig::BuiltInDefault) => BTreeMap::new(),
79 Some(OpenvmmLogConfig::Custom(levels)) => levels
80 .iter()
81 .map(|(k, v)| (OsString::from(k), OsString::from(v)))
82 .collect::<BTreeMap<OsString, OsString>>(),
83 };
84
85 let (host, pid) = Self::openvmm_host(&mut resources, &mesh, openvmm_log_file, log_env)
86 .await
87 .context("failed to create host process")?;
88 let shared_memory = memory_backing_file
91 .as_ref()
92 .map(|mem_path| {
93 openvmm_helpers::shared_memory::open_memory_backing_file(
94 mem_path,
95 config.memory.mem_size,
96 )
97 })
98 .transpose()?;
99
100 let (worker, halt_notif) = Worker::launch(&host, config, shared_memory)
101 .await
102 .context("failed to launch vm worker")?;
103
104 let worker = Arc::new(worker);
105
106 let is_minimal = resources.properties.minimal_mode;
107
108 let mut vm = PetriVmOpenVmm::new(
109 super::runtime::PetriVmInner {
110 resources,
111 mesh,
112 worker,
113 framebuffer_view,
114 cidata_mounted: false,
115 pid,
116 },
117 halt_notif,
118 );
119
120 tracing::info!("Resuming VM");
121 vm.resume().await?;
122
123 if supports_save_restore && !is_minimal {
125 tracing::info!("Testing save/restore");
126 vm.verify_save_restore().await?;
127 }
128
129 tracing::info!("VM ready");
130 Ok((vm, runtime_config))
131 }
132
133 pub async fn run(mut self) -> anyhow::Result<(PetriVmOpenVmm, PetriVmRuntimeConfig)> {
136 if self.resources.properties.using_vtl0_pipette
138 && matches!(self.resources.properties.os_flavor, OsFlavor::Windows)
139 && !self.resources.properties.is_isolated
140 {
141 let mut imc_hive_file = tempfile::tempfile().context("failed to create temp file")?;
142 imc_hive_file
143 .write_all(include_bytes!("../../../guest-bootstrap/imc.hiv"))
144 .context("failed to write imc hive")?;
145
146 self.config.vmbus_devices.push((
147 DeviceVtl::Vtl0,
148 vmbfs_resources::VmbfsImcDeviceHandle {
149 file: imc_hive_file,
150 }
151 .into_resource(),
152 ));
153 }
154
155 let launch_via_serial = self.resources.linux_direct_serial_agent.is_some()
159 && self.resources.properties.using_vtl0_pipette;
160
161 let (mut vm, config) = self.run_core().await?;
163
164 if launch_via_serial {
165 vm.launch_linux_direct_pipette().await?;
166 }
167
168 Ok((vm, config))
169 }
170
171 async fn openvmm_host(
172 resources: &mut PetriVmResourcesOpenVmm,
173 mesh: &Mesh,
174 log_file: PetriLogFile,
175 vmm_env: BTreeMap<OsString, OsString>,
176 ) -> anyhow::Result<(WorkerHost, i32)> {
177 let (stderr_read, stderr_write) = pal::pipe_pair()?;
180 let task = resources.driver.spawn(
181 "serial log",
182 crate::log_task(
183 log_file,
184 PolledPipe::new(&resources.driver, stderr_read)
185 .context("failed to create polled pipe")?,
186 "openvmm stderr",
187 ),
188 );
189 resources.log_stream_tasks.push(task);
190
191 let (host, runner) = mesh_worker::worker_host();
192 let pid = mesh
193 .launch_host(
194 ProcessConfig::new("vmm")
195 .process_name(&resources.openvmm_path)
196 .stderr(Some(stderr_write))
197 .env(vmm_env.into_iter()),
198 openvmm_defs::entrypoint::MeshHostParams { runner },
199 )
200 .await?;
201 Ok((host, pid))
202 }
203}