petri/vm/openvmm/
start.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Methods to start a [`PetriVmConfigOpenVmm`] and produce a running [`PetriVmOpenVmm`].
5
6use super::PetriVmConfigOpenVmm;
7use super::PetriVmOpenVmm;
8use super::PetriVmResourcesOpenVmm;
9use crate::BootDeviceType;
10use crate::Firmware;
11use crate::PetriLogFile;
12use crate::worker::Worker;
13use anyhow::Context;
14use disk_backend_resources::FileDiskHandle;
15use guid::Guid;
16use hvlite_defs::config::DeviceVtl;
17use mesh_process::Mesh;
18use mesh_process::ProcessConfig;
19use mesh_worker::WorkerHost;
20use pal_async::pipe::PolledPipe;
21use pal_async::task::Spawn;
22use petri_artifacts_common::tags::MachineArch;
23use petri_artifacts_common::tags::OsFlavor;
24use scsidisk_resources::SimpleScsiDiskHandle;
25use std::io::Write;
26use std::sync::Arc;
27use storvsp_resources::ScsiControllerHandle;
28use storvsp_resources::ScsiDeviceAndPath;
29use storvsp_resources::ScsiPath;
30use vm_resource::IntoResource;
31
32impl PetriVmConfigOpenVmm {
33    async fn run_core(self) -> anyhow::Result<PetriVmOpenVmm> {
34        let Self {
35            firmware,
36            arch,
37            mut config,
38            boot_device_type,
39
40            mut resources,
41
42            openvmm_log_file,
43
44            ged,
45            framebuffer_view,
46        } = self;
47
48        if firmware.is_openhcl() {
49            // Add a pipette disk for VTL 2
50            const UH_CIDATA_SCSI_INSTANCE: Guid =
51                guid::guid!("766e96f8-2ceb-437e-afe3-a93169e48a7c");
52
53            if let Some(openhcl_agent_disk) = resources
54                .openhcl_agent_image
55                .as_ref()
56                .unwrap()
57                .build()
58                .context("failed to build agent image")?
59            {
60                config.vmbus_devices.push((
61                    DeviceVtl::Vtl2,
62                    ScsiControllerHandle {
63                        instance_id: UH_CIDATA_SCSI_INSTANCE,
64                        max_sub_channel_count: 1,
65                        io_queue_depth: None,
66                        devices: vec![ScsiDeviceAndPath {
67                            path: ScsiPath {
68                                path: 0,
69                                target: 0,
70                                lun: 0,
71                            },
72                            device: SimpleScsiDiskHandle {
73                                read_only: true,
74                                parameters: Default::default(),
75                                disk: FileDiskHandle(openhcl_agent_disk.into_file())
76                                    .into_resource(),
77                            }
78                            .into_resource(),
79                        }],
80                        requests: None,
81                        poll_mode_queue_depth: None,
82                    }
83                    .into_resource(),
84                ));
85            }
86        }
87
88        // Add the GED and VTL 2 settings.
89        if let Some(mut ged) = ged {
90            ged.vtl2_settings = Some(prost::Message::encode_to_vec(
91                resources.vtl2_settings.as_ref().unwrap(),
92            ));
93            config
94                .vmbus_devices
95                .push((DeviceVtl::Vtl2, ged.into_resource()));
96        }
97
98        tracing::debug!(?config, ?firmware, ?arch, "VM config");
99
100        let has_pcie = !config.pcie_root_complexes.is_empty();
101
102        let mesh = Mesh::new("petri_mesh".to_string())?;
103
104        let host = Self::openvmm_host(&mut resources, &mesh, openvmm_log_file)
105            .await
106            .context("failed to create host process")?;
107        let (worker, halt_notif) = Worker::launch(&host, config)
108            .await
109            .context("failed to launch vm worker")?;
110
111        let worker = Arc::new(worker);
112
113        let mut vm = PetriVmOpenVmm::new(
114            super::runtime::PetriVmInner {
115                resources,
116                mesh,
117                worker,
118                framebuffer_view,
119            },
120            halt_notif,
121        );
122
123        tracing::info!("Resuming VM");
124        vm.resume().await?;
125
126        // Run basic save/restore test that should run on every vm
127        // TODO: OpenHCL needs virt_whp support
128        // TODO: PCAT needs vga device support
129        // TODO: arm64 is broken?
130        // TODO: VPCI and NVMe don't support save/restore
131        // TODO: PCIe emulators don't support save/restore yet
132        if !firmware.is_openhcl()
133            && !matches!(firmware, Firmware::Pcat { .. })
134            && !matches!(arch, MachineArch::Aarch64)
135            && !matches!(boot_device_type, BootDeviceType::Nvme)
136            && !has_pcie
137        {
138            tracing::info!("Testing save/restore");
139            vm.verify_save_restore().await?;
140        }
141
142        tracing::info!("VM ready");
143        Ok(vm)
144    }
145
146    /// Run the VM, configuring pipette to automatically start if it is
147    /// included in the config
148    pub async fn run(mut self) -> anyhow::Result<PetriVmOpenVmm> {
149        let launch_linux_direct_pipette = if let Some(agent_image) = &self.resources.agent_image {
150            const CIDATA_SCSI_INSTANCE: Guid = guid::guid!("766e96f8-2ceb-437e-afe3-a93169e48a7b");
151
152            // Construct the agent disk.
153            if let Some(agent_disk) = agent_image.build().context("failed to build agent image")? {
154                // Add a SCSI controller to contain the agent disk. Don't reuse an
155                // existing controller so that we can avoid interfering with
156                // test-specific configuration.
157                self.config.vmbus_devices.push((
158                    DeviceVtl::Vtl0,
159                    ScsiControllerHandle {
160                        instance_id: CIDATA_SCSI_INSTANCE,
161                        max_sub_channel_count: 1,
162                        io_queue_depth: None,
163                        devices: vec![ScsiDeviceAndPath {
164                            path: ScsiPath {
165                                path: 0,
166                                target: 0,
167                                lun: 0,
168                            },
169                            device: SimpleScsiDiskHandle {
170                                read_only: true,
171                                parameters: Default::default(),
172                                disk: FileDiskHandle(agent_disk.into_file()).into_resource(),
173                            }
174                            .into_resource(),
175                        }],
176                        requests: None,
177                        poll_mode_queue_depth: None,
178                    }
179                    .into_resource(),
180                ));
181            }
182
183            if matches!(self.firmware.os_flavor(), OsFlavor::Windows)
184                && self.firmware.isolation().is_none()
185            {
186                // Make a file for the IMC hive. It's not guaranteed to be at a fixed
187                // location at runtime.
188                let mut imc_hive_file =
189                    tempfile::tempfile().context("failed to create temp file")?;
190                imc_hive_file
191                    .write_all(include_bytes!("../../../guest-bootstrap/imc.hiv"))
192                    .context("failed to write imc hive")?;
193
194                // Add the IMC device.
195                self.config.vmbus_devices.push((
196                    DeviceVtl::Vtl0,
197                    vmbfs_resources::VmbfsImcDeviceHandle {
198                        file: imc_hive_file,
199                    }
200                    .into_resource(),
201                ));
202            }
203
204            self.firmware.is_linux_direct() && agent_image.contains_pipette()
205        } else {
206            false
207        };
208
209        // Start the VM.
210        let mut vm = self.run_core().await?;
211
212        if launch_linux_direct_pipette {
213            vm.launch_linux_direct_pipette().await?;
214        }
215
216        Ok(vm)
217    }
218
219    async fn openvmm_host(
220        resources: &mut PetriVmResourcesOpenVmm,
221        mesh: &Mesh,
222        log_file: PetriLogFile,
223    ) -> anyhow::Result<WorkerHost> {
224        // Copy the child's stderr to this process's, since internally this is
225        // wrapped by the test harness.
226        let (stderr_read, stderr_write) = pal::pipe_pair()?;
227        let task = resources.driver.spawn(
228            "serial log",
229            crate::log_task(
230                log_file,
231                PolledPipe::new(&resources.driver, stderr_read)
232                    .context("failed to create polled pipe")?,
233                "openvmm stderr",
234            ),
235        );
236        resources.log_stream_tasks.push(task);
237
238        let (host, runner) = mesh_worker::worker_host();
239        mesh.launch_host(
240            ProcessConfig::new("vmm")
241                .process_name(&resources.openvmm_path)
242                .stderr(Some(stderr_write)),
243            hvlite_defs::entrypoint::MeshHostParams { runner },
244        )
245        .await?;
246        Ok(host)
247    }
248}