openhcl_boot/host_params/
shim_params.rs1use crate::arch::get_isolation_type;
7use core::slice;
8use loader_defs::paravisor::ImportedRegionDescriptor;
9use loader_defs::paravisor::ParavisorCommandLine;
10use loader_defs::shim::ShimParamsRaw;
11use memory_range::MemoryRange;
12
13#[derive(Debug, PartialEq, Eq, Copy, Clone)]
15pub enum IsolationType {
16 None,
17 Vbs,
18 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
19 Snp,
20 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
21 Tdx,
22}
23
24impl IsolationType {
25 pub fn is_hardware_isolated(&self) -> bool {
26 match self {
27 IsolationType::None => false,
28 IsolationType::Vbs => false,
29 IsolationType::Snp => true,
30 IsolationType::Tdx => true,
31 }
32 }
33}
34
35pub struct ImportedRegionIter<'a> {
38 imported_regions: &'a [ImportedRegionDescriptor],
39}
40
41impl Iterator for ImportedRegionIter<'_> {
42 type Item = (MemoryRange, bool);
43
44 fn next(&mut self) -> Option<Self::Item> {
45 if self.imported_regions.is_empty() {
46 None
47 } else {
48 let element = self.imported_regions[0]
49 .pages()
50 .map(|(base_page, count, accepted)| {
51 let base_address = base_page * hvdef::HV_PAGE_SIZE;
52 let end_address = base_address + (count * hvdef::HV_PAGE_SIZE);
53 (MemoryRange::try_new(base_address..end_address).expect(
54 "page number conversion into addresses results in a valid address range",
55 ), accepted)
56 });
57
58 if element.is_some() {
59 self.imported_regions = &self.imported_regions[1..];
60 } else {
61 self.imported_regions = &[];
62 }
63
64 element
65 }
66 }
67}
68
69#[derive(Debug)]
73pub struct ShimParams {
74 pub kernel_entry_address: u64,
77 pub cmdline_base: u64,
79 pub initrd_base: u64,
81 pub initrd_size: u64,
83 pub initrd_crc: u32,
85 pub memory_start_address: u64,
87 pub memory_size: u64,
89 pub parameter_region_start: u64,
91 pub parameter_region_size: u64,
93 pub vtl2_reserved_region_start: u64,
95 pub vtl2_reserved_region_size: u64,
97 pub isolation_type: IsolationType,
99 pub sidecar_entry_address: u64,
100 pub sidecar_base: u64,
101 pub sidecar_size: u64,
102 pub used: MemoryRange,
104 pub bounce_buffer: Option<MemoryRange>,
105 pub log_buffer: MemoryRange,
107 pub heap: MemoryRange,
109 pub persisted_state: MemoryRange,
111}
112
113impl ShimParams {
114 pub fn new(shim_base_address: u64, raw: &ShimParamsRaw) -> Self {
117 let &ShimParamsRaw {
118 kernel_entry_offset,
119 cmdline_offset,
120 initrd_offset,
121 initrd_size,
122 initrd_crc,
123 supported_isolation_type,
124 memory_start_offset,
125 memory_size,
126 parameter_region_offset,
127 parameter_region_size,
128 vtl2_reserved_region_offset,
129 vtl2_reserved_region_size,
130 sidecar_offset,
131 sidecar_size,
132 sidecar_entry_offset,
133 used_start,
134 used_end,
135 bounce_buffer_start,
136 bounce_buffer_size,
137 log_buffer_start,
138 log_buffer_size,
139 heap_start_offset,
140 heap_size,
141 persisted_state_region_offset,
142 persisted_state_region_size,
143 } = raw;
144
145 let isolation_type = get_isolation_type(supported_isolation_type);
146
147 let bounce_buffer = if bounce_buffer_size == 0 {
148 None
149 } else {
150 let base = shim_base_address.wrapping_add_signed(bounce_buffer_start);
151 Some(MemoryRange::new(base..base + bounce_buffer_size))
152 };
153
154 let log_buffer = {
155 let base = shim_base_address.wrapping_add_signed(log_buffer_start);
156 MemoryRange::new(base..base + log_buffer_size)
157 };
158
159 let heap = {
160 let base = shim_base_address.wrapping_add_signed(heap_start_offset);
161 MemoryRange::new(base..base + heap_size)
162 };
163
164 let persisted_state = {
165 let base = shim_base_address.wrapping_add_signed(persisted_state_region_offset);
166 MemoryRange::new(base..base + persisted_state_region_size)
167 };
168
169 Self {
170 kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
171 cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
172 initrd_base: shim_base_address.wrapping_add_signed(initrd_offset),
173 initrd_size,
174 initrd_crc,
175 memory_start_address: shim_base_address.wrapping_add_signed(memory_start_offset),
176 memory_size,
177 parameter_region_start: shim_base_address.wrapping_add_signed(parameter_region_offset),
178 parameter_region_size,
179 vtl2_reserved_region_start: shim_base_address
180 .wrapping_add_signed(vtl2_reserved_region_offset),
181 vtl2_reserved_region_size,
182 isolation_type,
183 sidecar_entry_address: shim_base_address.wrapping_add_signed(sidecar_entry_offset),
184 sidecar_base: shim_base_address.wrapping_add_signed(sidecar_offset),
185 sidecar_size,
186 used: MemoryRange::new(
187 shim_base_address.wrapping_add_signed(used_start)
188 ..shim_base_address.wrapping_add_signed(used_end),
189 ),
190 bounce_buffer,
191 log_buffer,
192 heap,
193 persisted_state,
194 }
195 }
196
197 #[cfg(target_arch = "x86_64")]
199 pub fn secrets_start(&self) -> u64 {
200 self.vtl2_reserved_region_start
201 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX
202 * hvdef::HV_PAGE_SIZE
203 }
204
205 #[cfg(target_arch = "x86_64")]
207 pub fn cpuid_start(&self) -> u64 {
208 self.vtl2_reserved_region_start
209 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX
210 * hvdef::HV_PAGE_SIZE
211 }
212
213 pub fn dt_start(&self) -> u64 {
215 self.parameter_region_start
216 + loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX * hvdef::HV_PAGE_SIZE
217 }
218
219 pub fn dt_size(&self) -> u64 {
221 loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES * hvdef::HV_PAGE_SIZE
222 }
223
224 pub fn initrd(&self) -> &'static [u8] {
226 unsafe { slice::from_raw_parts(self.initrd_base as *const u8, self.initrd_size as usize) }
229 }
230
231 pub fn command_line(&self) -> &'static ParavisorCommandLine {
234 unsafe {
237 (self.cmdline_base as *const ParavisorCommandLine)
238 .as_ref()
239 .expect("should always be non null")
240 }
241 }
242
243 pub fn device_tree(&self) -> &'static [u8] {
247 unsafe { slice::from_raw_parts(self.dt_start() as *const u8, self.dt_size() as usize) }
250 }
251
252 pub fn imported_regions(&self) -> ImportedRegionIter<'_> {
256 use loader_defs::paravisor::ImportedRegionsPageHeader;
257
258 let imported_region_page_address = self.parameter_region_start
259 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
260 * hvdef::HV_PAGE_SIZE);
261
262 assert!(
263 imported_region_page_address + hvdef::HV_PAGE_SIZE
264 <= self.parameter_region_start + self.parameter_region_size
265 );
266
267 let imported_region_start =
268 imported_region_page_address + size_of::<ImportedRegionsPageHeader>() as u64;
269
270 unsafe {
273 ImportedRegionIter {
274 imported_regions: slice::from_raw_parts(
275 imported_region_start as *const ImportedRegionDescriptor,
276 (hvdef::HV_PAGE_SIZE as usize - size_of::<ImportedRegionsPageHeader>())
277 / size_of::<ImportedRegionDescriptor>(),
278 ),
279 }
280 }
281 }
282
283 #[cfg(target_arch = "x86_64")]
284 pub fn imported_regions_hash(&self) -> &'static [u8] {
285 let header_start = self.parameter_region_start
286 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
287 * hvdef::HV_PAGE_SIZE);
288
289 unsafe {
292 let header =
293 &*(header_start as *const loader_defs::paravisor::ImportedRegionsPageHeader);
294 &header.sha384_hash
295 }
296 }
297}