petri/vm/openvmm/
start.rs1use super::PetriVmConfigOpenVmm;
7use super::PetriVmOpenVmm;
8use super::PetriVmResourcesOpenVmm;
9use crate::BootDeviceType;
10use crate::Firmware;
11use crate::PetriLogFile;
12use crate::worker::Worker;
13use anyhow::Context;
14use disk_backend_resources::FileDiskHandle;
15use guid::Guid;
16use hvlite_defs::config::DeviceVtl;
17use mesh_process::Mesh;
18use mesh_process::ProcessConfig;
19use mesh_worker::WorkerHost;
20use pal_async::pipe::PolledPipe;
21use pal_async::task::Spawn;
22use petri_artifacts_common::tags::MachineArch;
23use petri_artifacts_common::tags::OsFlavor;
24use scsidisk_resources::SimpleScsiDiskHandle;
25use std::io::Write;
26use std::sync::Arc;
27use storvsp_resources::ScsiControllerHandle;
28use storvsp_resources::ScsiDeviceAndPath;
29use storvsp_resources::ScsiPath;
30use vm_resource::IntoResource;
31
32impl PetriVmConfigOpenVmm {
33 async fn run_core(self) -> anyhow::Result<PetriVmOpenVmm> {
34 let Self {
35 firmware,
36 arch,
37 mut config,
38 boot_device_type,
39
40 mut resources,
41
42 openvmm_log_file,
43
44 petri_vtl0_scsi,
45 ged,
46 framebuffer_view,
47 } = self;
48
49 if firmware.is_openhcl() {
50 const UH_CIDATA_SCSI_INSTANCE: Guid =
52 guid::guid!("766e96f8-2ceb-437e-afe3-a93169e48a7c");
53
54 if let Some(openhcl_agent_disk) = resources
55 .openhcl_agent_image
56 .as_ref()
57 .unwrap()
58 .build()
59 .context("failed to build agent image")?
60 {
61 config.vmbus_devices.push((
62 DeviceVtl::Vtl2,
63 ScsiControllerHandle {
64 instance_id: UH_CIDATA_SCSI_INSTANCE,
65 max_sub_channel_count: 1,
66 io_queue_depth: None,
67 devices: vec![ScsiDeviceAndPath {
68 path: ScsiPath {
69 path: 0,
70 target: 0,
71 lun: crate::vm::PETRI_VTL0_SCSI_BOOT_LUN,
72 },
73 device: SimpleScsiDiskHandle {
74 read_only: true,
75 parameters: Default::default(),
76 disk: FileDiskHandle(openhcl_agent_disk.into_file())
77 .into_resource(),
78 }
79 .into_resource(),
80 }],
81 requests: None,
82 poll_mode_queue_depth: None,
83 }
84 .into_resource(),
85 ));
86 }
87 }
88
89 if !petri_vtl0_scsi.devices.is_empty() {
91 config
92 .vmbus_devices
93 .push((DeviceVtl::Vtl0, petri_vtl0_scsi.into_resource()));
94 }
95
96 if let Some(mut ged) = ged {
98 ged.vtl2_settings = Some(prost::Message::encode_to_vec(
99 resources.vtl2_settings.as_ref().unwrap(),
100 ));
101 config
102 .vmbus_devices
103 .push((DeviceVtl::Vtl2, ged.into_resource()));
104 }
105
106 tracing::debug!(?config, ?firmware, ?arch, "VM config");
107
108 let has_pcie = !config.pcie_root_complexes.is_empty();
109
110 let mesh = Mesh::new("petri_mesh".to_string())?;
111
112 let host = Self::openvmm_host(&mut resources, &mesh, openvmm_log_file)
113 .await
114 .context("failed to create host process")?;
115 let (worker, halt_notif) = Worker::launch(&host, config)
116 .await
117 .context("failed to launch vm worker")?;
118
119 let worker = Arc::new(worker);
120
121 let mut vm = PetriVmOpenVmm::new(
122 super::runtime::PetriVmInner {
123 resources,
124 mesh,
125 worker,
126 framebuffer_view,
127 },
128 halt_notif,
129 );
130
131 tracing::info!("Resuming VM");
132 vm.resume().await?;
133
134 if !firmware.is_openhcl()
141 && !matches!(firmware, Firmware::Pcat { .. })
142 && !matches!(arch, MachineArch::Aarch64)
143 && !matches!(boot_device_type, BootDeviceType::Nvme)
144 && !has_pcie
145 {
146 tracing::info!("Testing save/restore");
147 vm.verify_save_restore().await?;
148 }
149
150 tracing::info!("VM ready");
151 Ok(vm)
152 }
153
154 pub async fn run(mut self) -> anyhow::Result<PetriVmOpenVmm> {
157 let launch_linux_direct_pipette = if let Some(agent_image) = &self.resources.agent_image {
158 if let Some(agent_disk) = agent_image.build().context("failed to build agent image")? {
160 self.petri_vtl0_scsi.devices.push(ScsiDeviceAndPath {
161 path: ScsiPath {
162 path: 0,
163 target: 0,
164 lun: crate::vm::PETRI_VTL0_SCSI_PIPETTE_LUN,
165 },
166 device: SimpleScsiDiskHandle {
167 read_only: true,
168 parameters: Default::default(),
169 disk: FileDiskHandle(agent_disk.into_file()).into_resource(),
170 }
171 .into_resource(),
172 });
173 }
174
175 if matches!(self.firmware.os_flavor(), OsFlavor::Windows)
176 && self.firmware.isolation().is_none()
177 {
178 let mut imc_hive_file =
181 tempfile::tempfile().context("failed to create temp file")?;
182 imc_hive_file
183 .write_all(include_bytes!("../../../guest-bootstrap/imc.hiv"))
184 .context("failed to write imc hive")?;
185
186 self.config.vmbus_devices.push((
188 DeviceVtl::Vtl0,
189 vmbfs_resources::VmbfsImcDeviceHandle {
190 file: imc_hive_file,
191 }
192 .into_resource(),
193 ));
194 }
195
196 self.firmware.is_linux_direct() && agent_image.contains_pipette()
197 } else {
198 false
199 };
200
201 let mut vm = self.run_core().await?;
203
204 if launch_linux_direct_pipette {
205 vm.launch_linux_direct_pipette().await?;
206 }
207
208 Ok(vm)
209 }
210
211 async fn openvmm_host(
212 resources: &mut PetriVmResourcesOpenVmm,
213 mesh: &Mesh,
214 log_file: PetriLogFile,
215 ) -> anyhow::Result<WorkerHost> {
216 let (stderr_read, stderr_write) = pal::pipe_pair()?;
219 let task = resources.driver.spawn(
220 "serial log",
221 crate::log_task(
222 log_file,
223 PolledPipe::new(&resources.driver, stderr_read)
224 .context("failed to create polled pipe")?,
225 "openvmm stderr",
226 ),
227 );
228 resources.log_stream_tasks.push(task);
229
230 let (host, runner) = mesh_worker::worker_host();
231 mesh.launch_host(
232 ProcessConfig::new("vmm")
233 .process_name(&resources.openvmm_path)
234 .stderr(Some(stderr_write)),
235 hvlite_defs::entrypoint::MeshHostParams { runner },
236 )
237 .await?;
238 Ok(host)
239 }
240}