openhcl_boot/host_params/
shim_params.rs1use crate::arch::get_isolation_type;
7use core::slice;
8use loader_defs::paravisor::ImportedRegionDescriptor;
9use loader_defs::paravisor::ParavisorCommandLine;
10use loader_defs::shim::ShimParamsRaw;
11use memory_range::MemoryRange;
12
13#[derive(Debug, PartialEq, Eq, Copy, Clone)]
15pub enum IsolationType {
16 None,
17 Vbs,
18 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
19 Snp,
20 Tdx,
21}
22
23impl IsolationType {
24 pub fn is_hardware_isolated(&self) -> bool {
25 match self {
26 IsolationType::None => false,
27 IsolationType::Vbs => false,
28 IsolationType::Snp => true,
29 IsolationType::Tdx => true,
30 }
31 }
32}
33
34pub struct ImportedRegionIter<'a> {
37 imported_regions: &'a [ImportedRegionDescriptor],
38}
39
40impl Iterator for ImportedRegionIter<'_> {
41 type Item = (MemoryRange, bool);
42
43 fn next(&mut self) -> Option<Self::Item> {
44 if self.imported_regions.is_empty() {
45 None
46 } else {
47 let element = self.imported_regions[0]
48 .pages()
49 .map(|(base_page, count, accepted)| {
50 let base_address = base_page * hvdef::HV_PAGE_SIZE;
51 let end_address = base_address + (count * hvdef::HV_PAGE_SIZE);
52 (MemoryRange::try_new(base_address..end_address).expect(
53 "page number conversion into addresses results in a valid address range",
54 ), accepted)
55 });
56
57 if element.is_some() {
58 self.imported_regions = &self.imported_regions[1..];
59 } else {
60 self.imported_regions = &[];
61 }
62
63 element
64 }
65 }
66}
67
68#[derive(Debug)]
72pub struct ShimParams {
73 pub kernel_entry_address: u64,
76 pub cmdline_base: u64,
78 pub initrd_base: u64,
80 pub initrd_size: u64,
82 pub initrd_crc: u32,
84 pub memory_start_address: u64,
86 pub memory_size: u64,
88 pub parameter_region_start: u64,
90 pub parameter_region_size: u64,
92 pub vtl2_reserved_region_start: u64,
94 pub vtl2_reserved_region_size: u64,
96 pub isolation_type: IsolationType,
98 pub sidecar_entry_address: u64,
99 pub sidecar_base: u64,
100 pub sidecar_size: u64,
101 pub used: MemoryRange,
103 pub bounce_buffer: Option<MemoryRange>,
104 pub page_tables: Option<MemoryRange>,
106 pub log_buffer: MemoryRange,
108 pub heap: MemoryRange,
110 pub persisted_state: MemoryRange,
112}
113
114impl ShimParams {
115 pub fn new(shim_base_address: u64, raw: &ShimParamsRaw) -> Self {
118 let &ShimParamsRaw {
119 kernel_entry_offset,
120 cmdline_offset,
121 initrd_offset,
122 initrd_size,
123 initrd_crc,
124 supported_isolation_type,
125 memory_start_offset,
126 memory_size,
127 parameter_region_offset,
128 parameter_region_size,
129 vtl2_reserved_region_offset,
130 vtl2_reserved_region_size,
131 sidecar_offset,
132 sidecar_size,
133 sidecar_entry_offset,
134 used_start,
135 used_end,
136 bounce_buffer_start,
137 bounce_buffer_size,
138 page_tables_start,
139 page_tables_size,
140 log_buffer_start,
141 log_buffer_size,
142 heap_start_offset,
143 heap_size,
144 persisted_state_region_offset,
145 persisted_state_region_size,
146 } = raw;
147
148 let isolation_type = get_isolation_type(supported_isolation_type);
149
150 let bounce_buffer = if bounce_buffer_size == 0 {
151 None
152 } else {
153 let base = shim_base_address.wrapping_add_signed(bounce_buffer_start);
154 Some(MemoryRange::new(base..base + bounce_buffer_size))
155 };
156
157 let page_tables = if page_tables_size == 0 {
158 None
159 } else {
160 let base = shim_base_address.wrapping_add_signed(page_tables_start);
161 Some(MemoryRange::new(base..base + page_tables_size))
162 };
163
164 let log_buffer = {
165 let base = shim_base_address.wrapping_add_signed(log_buffer_start);
166 MemoryRange::new(base..base + log_buffer_size)
167 };
168
169 let heap = {
170 let base = shim_base_address.wrapping_add_signed(heap_start_offset);
171 MemoryRange::new(base..base + heap_size)
172 };
173
174 let persisted_state = {
175 let base = shim_base_address.wrapping_add_signed(persisted_state_region_offset);
176 MemoryRange::new(base..base + persisted_state_region_size)
177 };
178
179 Self {
180 kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
181 cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
182 initrd_base: shim_base_address.wrapping_add_signed(initrd_offset),
183 initrd_size,
184 initrd_crc,
185 memory_start_address: shim_base_address.wrapping_add_signed(memory_start_offset),
186 memory_size,
187 parameter_region_start: shim_base_address.wrapping_add_signed(parameter_region_offset),
188 parameter_region_size,
189 vtl2_reserved_region_start: shim_base_address
190 .wrapping_add_signed(vtl2_reserved_region_offset),
191 vtl2_reserved_region_size,
192 isolation_type,
193 sidecar_entry_address: shim_base_address.wrapping_add_signed(sidecar_entry_offset),
194 sidecar_base: shim_base_address.wrapping_add_signed(sidecar_offset),
195 sidecar_size,
196 used: MemoryRange::new(
197 shim_base_address.wrapping_add_signed(used_start)
198 ..shim_base_address.wrapping_add_signed(used_end),
199 ),
200 bounce_buffer,
201 page_tables,
202 log_buffer,
203 heap,
204 persisted_state,
205 }
206 }
207
208 #[cfg(target_arch = "x86_64")]
210 pub fn secrets_start(&self) -> u64 {
211 self.vtl2_reserved_region_start
212 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX
213 * hvdef::HV_PAGE_SIZE
214 }
215
216 #[cfg(target_arch = "x86_64")]
218 pub fn cpuid_start(&self) -> u64 {
219 self.vtl2_reserved_region_start
220 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX
221 * hvdef::HV_PAGE_SIZE
222 }
223
224 pub fn dt_start(&self) -> u64 {
226 self.parameter_region_start
227 + loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX * hvdef::HV_PAGE_SIZE
228 }
229
230 pub fn dt_size(&self) -> u64 {
232 loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES * hvdef::HV_PAGE_SIZE
233 }
234
235 pub fn initrd(&self) -> &'static [u8] {
237 unsafe { slice::from_raw_parts(self.initrd_base as *const u8, self.initrd_size as usize) }
240 }
241
242 pub fn command_line(&self) -> &'static ParavisorCommandLine {
245 unsafe {
248 (self.cmdline_base as *const ParavisorCommandLine)
249 .as_ref()
250 .expect("should always be non null")
251 }
252 }
253
254 pub fn device_tree(&self) -> &'static [u8] {
258 unsafe { slice::from_raw_parts(self.dt_start() as *const u8, self.dt_size() as usize) }
261 }
262
263 pub fn imported_regions(&self) -> ImportedRegionIter<'_> {
267 use loader_defs::paravisor::ImportedRegionsPageHeader;
268
269 let imported_region_page_address = self.parameter_region_start
270 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
271 * hvdef::HV_PAGE_SIZE);
272
273 assert!(
274 imported_region_page_address + hvdef::HV_PAGE_SIZE
275 <= self.parameter_region_start + self.parameter_region_size
276 );
277
278 let imported_region_start =
279 imported_region_page_address + size_of::<ImportedRegionsPageHeader>() as u64;
280
281 unsafe {
284 ImportedRegionIter {
285 imported_regions: slice::from_raw_parts(
286 imported_region_start as *const ImportedRegionDescriptor,
287 (hvdef::HV_PAGE_SIZE as usize - size_of::<ImportedRegionsPageHeader>())
288 / size_of::<ImportedRegionDescriptor>(),
289 ),
290 }
291 }
292 }
293
294 #[cfg(target_arch = "x86_64")]
295 pub fn imported_regions_hash(&self) -> &'static [u8] {
296 let header_start = self.parameter_region_start
297 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
298 * hvdef::HV_PAGE_SIZE);
299
300 unsafe {
303 let header =
304 &*(header_start as *const loader_defs::paravisor::ImportedRegionsPageHeader);
305 &header.sha384_hash
306 }
307 }
308}