openhcl_boot/host_params/
shim_params.rs1use crate::arch::get_isolation_type;
7use core::slice;
8use loader_defs::paravisor::ImportedRegionDescriptor;
9use loader_defs::paravisor::ParavisorCommandLine;
10use loader_defs::shim::ShimParamsRaw;
11use memory_range::MemoryRange;
12
13#[derive(Debug, PartialEq, Eq, Copy, Clone)]
15pub enum IsolationType {
16 None,
17 Vbs,
18 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
19 Snp,
20 Tdx,
21}
22
23impl IsolationType {
24 pub fn is_hardware_isolated(&self) -> bool {
25 match self {
26 IsolationType::None => false,
27 IsolationType::Vbs => false,
28 IsolationType::Snp => true,
29 IsolationType::Tdx => true,
30 }
31 }
32}
33
34pub struct ImportedRegionIter<'a> {
37 imported_regions: &'a [ImportedRegionDescriptor],
38}
39
40impl Iterator for ImportedRegionIter<'_> {
41 type Item = (MemoryRange, bool);
42
43 fn next(&mut self) -> Option<Self::Item> {
44 if self.imported_regions.is_empty() {
45 None
46 } else {
47 let element = self.imported_regions[0]
48 .pages()
49 .map(|(base_page, count, accepted)| {
50 let base_address = base_page * hvdef::HV_PAGE_SIZE;
51 let end_address = base_address + (count * hvdef::HV_PAGE_SIZE);
52 (MemoryRange::try_new(base_address..end_address).expect(
53 "page number conversion into addresses results in a valid address range",
54 ), accepted)
55 });
56
57 if element.is_some() {
58 self.imported_regions = &self.imported_regions[1..];
59 } else {
60 self.imported_regions = &[];
61 }
62
63 element
64 }
65 }
66}
67
68#[derive(Debug)]
72pub struct ShimParams {
73 pub kernel_entry_address: u64,
76 pub cmdline_base: u64,
78 pub initrd_base: u64,
80 pub initrd_size: u64,
82 pub initrd_crc: u32,
84 pub memory_start_address: u64,
86 pub memory_size: u64,
88 pub parameter_region_start: u64,
90 pub parameter_region_size: u64,
92 pub vtl2_reserved_region_start: u64,
94 pub vtl2_reserved_region_size: u64,
96 pub isolation_type: IsolationType,
98 pub sidecar_entry_address: u64,
99 pub sidecar_base: u64,
100 pub sidecar_size: u64,
101 pub used: MemoryRange,
103 pub bounce_buffer: Option<MemoryRange>,
104 pub page_tables: Option<MemoryRange>,
106 pub log_buffer: MemoryRange,
108 pub heap: MemoryRange,
110}
111
112impl ShimParams {
113 pub fn new(shim_base_address: u64, raw: &ShimParamsRaw) -> Self {
116 let &ShimParamsRaw {
117 kernel_entry_offset,
118 cmdline_offset,
119 initrd_offset,
120 initrd_size,
121 initrd_crc,
122 supported_isolation_type,
123 memory_start_offset,
124 memory_size,
125 parameter_region_offset,
126 parameter_region_size,
127 vtl2_reserved_region_offset,
128 vtl2_reserved_region_size,
129 sidecar_offset,
130 sidecar_size,
131 sidecar_entry_offset,
132 used_start,
133 used_end,
134 bounce_buffer_start,
135 bounce_buffer_size,
136 page_tables_start,
137 page_tables_size,
138 log_buffer_start,
139 log_buffer_size,
140 heap_start_offset,
141 heap_size,
142 } = raw;
143
144 let isolation_type = get_isolation_type(supported_isolation_type);
145
146 let bounce_buffer = if bounce_buffer_size == 0 {
147 None
148 } else {
149 let base = shim_base_address.wrapping_add_signed(bounce_buffer_start);
150 Some(MemoryRange::new(base..base + bounce_buffer_size))
151 };
152
153 let page_tables = if page_tables_size == 0 {
154 None
155 } else {
156 let base = shim_base_address.wrapping_add_signed(page_tables_start);
157 Some(MemoryRange::new(base..base + page_tables_size))
158 };
159
160 let log_buffer = {
161 let base = shim_base_address.wrapping_add_signed(log_buffer_start);
162 MemoryRange::new(base..base + log_buffer_size)
163 };
164
165 let heap = {
166 let base = shim_base_address.wrapping_add_signed(heap_start_offset);
167 MemoryRange::new(base..base + heap_size)
168 };
169
170 Self {
171 kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
172 cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
173 initrd_base: shim_base_address.wrapping_add_signed(initrd_offset),
174 initrd_size,
175 initrd_crc,
176 memory_start_address: shim_base_address.wrapping_add_signed(memory_start_offset),
177 memory_size,
178 parameter_region_start: shim_base_address.wrapping_add_signed(parameter_region_offset),
179 parameter_region_size,
180 vtl2_reserved_region_start: shim_base_address
181 .wrapping_add_signed(vtl2_reserved_region_offset),
182 vtl2_reserved_region_size,
183 isolation_type,
184 sidecar_entry_address: shim_base_address.wrapping_add_signed(sidecar_entry_offset),
185 sidecar_base: shim_base_address.wrapping_add_signed(sidecar_offset),
186 sidecar_size,
187 used: MemoryRange::new(
188 shim_base_address.wrapping_add_signed(used_start)
189 ..shim_base_address.wrapping_add_signed(used_end),
190 ),
191 bounce_buffer,
192 page_tables,
193 log_buffer,
194 heap,
195 }
196 }
197
198 #[cfg(target_arch = "x86_64")]
200 pub fn secrets_start(&self) -> u64 {
201 self.vtl2_reserved_region_start
202 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX
203 * hvdef::HV_PAGE_SIZE
204 }
205
206 #[cfg(target_arch = "x86_64")]
208 pub fn cpuid_start(&self) -> u64 {
209 self.vtl2_reserved_region_start
210 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX
211 * hvdef::HV_PAGE_SIZE
212 }
213
214 pub fn dt_start(&self) -> u64 {
216 self.parameter_region_start
217 + loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX * hvdef::HV_PAGE_SIZE
218 }
219
220 pub fn dt_size(&self) -> u64 {
222 loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES * hvdef::HV_PAGE_SIZE
223 }
224
225 pub fn initrd(&self) -> &'static [u8] {
227 unsafe { slice::from_raw_parts(self.initrd_base as *const u8, self.initrd_size as usize) }
230 }
231
232 pub fn command_line(&self) -> &'static ParavisorCommandLine {
235 unsafe {
238 (self.cmdline_base as *const ParavisorCommandLine)
239 .as_ref()
240 .expect("should always be non null")
241 }
242 }
243
244 pub fn device_tree(&self) -> &'static [u8] {
248 unsafe { slice::from_raw_parts(self.dt_start() as *const u8, self.dt_size() as usize) }
251 }
252
253 pub fn imported_regions(&self) -> ImportedRegionIter<'_> {
257 use loader_defs::paravisor::ImportedRegionsPageHeader;
258
259 let imported_region_page_address = self.parameter_region_start
260 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
261 * hvdef::HV_PAGE_SIZE);
262
263 assert!(
264 imported_region_page_address + hvdef::HV_PAGE_SIZE
265 <= self.parameter_region_start + self.parameter_region_size
266 );
267
268 let imported_region_start =
269 imported_region_page_address + size_of::<ImportedRegionsPageHeader>() as u64;
270
271 unsafe {
274 ImportedRegionIter {
275 imported_regions: slice::from_raw_parts(
276 imported_region_start as *const ImportedRegionDescriptor,
277 (hvdef::HV_PAGE_SIZE as usize - size_of::<ImportedRegionsPageHeader>())
278 / size_of::<ImportedRegionDescriptor>(),
279 ),
280 }
281 }
282 }
283
284 #[cfg(target_arch = "x86_64")]
285 pub fn imported_regions_hash(&self) -> &'static [u8] {
286 let header_start = self.parameter_region_start
287 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
288 * hvdef::HV_PAGE_SIZE);
289
290 unsafe {
293 let header =
294 &*(header_start as *const loader_defs::paravisor::ImportedRegionsPageHeader);
295 &header.sha384_hash
296 }
297 }
298}