openhcl_boot/host_params/
shim_params.rs1use crate::arch::get_isolation_type;
7use core::slice;
8use loader_defs::paravisor::ImportedRegionDescriptor;
9use loader_defs::paravisor::ParavisorCommandLine;
10use loader_defs::shim::ShimParamsRaw;
11use memory_range::MemoryRange;
12
13#[derive(Debug, PartialEq, Eq, Copy, Clone)]
15pub enum IsolationType {
16 None,
17 Vbs,
18 #[cfg_attr(target_arch = "aarch64", expect(dead_code))]
19 Snp,
20 Tdx,
21}
22
23impl IsolationType {
24 pub fn is_hardware_isolated(&self) -> bool {
25 match self {
26 IsolationType::None => false,
27 IsolationType::Vbs => false,
28 IsolationType::Snp => true,
29 IsolationType::Tdx => true,
30 }
31 }
32}
33
34pub struct ImportedRegionIter<'a> {
37 imported_regions: &'a [ImportedRegionDescriptor],
38}
39
40impl Iterator for ImportedRegionIter<'_> {
41 type Item = (MemoryRange, bool);
42
43 fn next(&mut self) -> Option<Self::Item> {
44 if self.imported_regions.is_empty() {
45 None
46 } else {
47 let element = self.imported_regions[0]
48 .pages()
49 .map(|(base_page, count, accepted)| {
50 let base_address = base_page * hvdef::HV_PAGE_SIZE;
51 let end_address = base_address + (count * hvdef::HV_PAGE_SIZE);
52 (MemoryRange::try_new(base_address..end_address).expect(
53 "page number conversion into addresses results in a valid address range",
54 ), accepted)
55 });
56
57 if element.is_some() {
58 self.imported_regions = &self.imported_regions[1..];
59 } else {
60 self.imported_regions = &[];
61 }
62
63 element
64 }
65 }
66}
67
68#[derive(Debug)]
72pub struct ShimParams {
73 pub kernel_entry_address: u64,
76 pub cmdline_base: u64,
78 pub initrd_base: u64,
80 pub initrd_size: u64,
82 pub initrd_crc: u32,
84 pub memory_start_address: u64,
86 pub memory_size: u64,
88 pub parameter_region_start: u64,
90 pub parameter_region_size: u64,
92 pub vtl2_reserved_region_start: u64,
94 pub vtl2_reserved_region_size: u64,
96 pub isolation_type: IsolationType,
98 pub sidecar_entry_address: u64,
99 pub sidecar_base: u64,
100 pub sidecar_size: u64,
101 pub used: MemoryRange,
103 pub bounce_buffer: Option<MemoryRange>,
104 pub page_tables: Option<MemoryRange>,
106}
107
108impl ShimParams {
109 pub fn new(shim_base_address: u64, raw: &ShimParamsRaw) -> Self {
112 let &ShimParamsRaw {
113 kernel_entry_offset,
114 cmdline_offset,
115 initrd_offset,
116 initrd_size,
117 initrd_crc,
118 supported_isolation_type,
119 memory_start_offset,
120 memory_size,
121 parameter_region_offset,
122 parameter_region_size,
123 vtl2_reserved_region_offset,
124 vtl2_reserved_region_size,
125 sidecar_offset,
126 sidecar_size,
127 sidecar_entry_offset,
128 used_start,
129 used_end,
130 bounce_buffer_start,
131 bounce_buffer_size,
132 page_tables_start,
133 page_tables_size,
134 } = raw;
135
136 let isolation_type = get_isolation_type(supported_isolation_type);
137
138 let bounce_buffer = if bounce_buffer_size == 0 {
139 None
140 } else {
141 let base = shim_base_address.wrapping_add_signed(bounce_buffer_start);
142 Some(MemoryRange::new(base..base + bounce_buffer_size))
143 };
144
145 let page_tables = if page_tables_size == 0 {
146 None
147 } else {
148 let base = shim_base_address.wrapping_add_signed(page_tables_start);
149 Some(MemoryRange::new(base..base + page_tables_size))
150 };
151
152 Self {
153 kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
154 cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
155 initrd_base: shim_base_address.wrapping_add_signed(initrd_offset),
156 initrd_size,
157 initrd_crc,
158 memory_start_address: shim_base_address.wrapping_add_signed(memory_start_offset),
159 memory_size,
160 parameter_region_start: shim_base_address.wrapping_add_signed(parameter_region_offset),
161 parameter_region_size,
162 vtl2_reserved_region_start: shim_base_address
163 .wrapping_add_signed(vtl2_reserved_region_offset),
164 vtl2_reserved_region_size,
165 isolation_type,
166 sidecar_entry_address: shim_base_address.wrapping_add_signed(sidecar_entry_offset),
167 sidecar_base: shim_base_address.wrapping_add_signed(sidecar_offset),
168 sidecar_size,
169 used: MemoryRange::new(
170 shim_base_address.wrapping_add_signed(used_start)
171 ..shim_base_address.wrapping_add_signed(used_end),
172 ),
173 bounce_buffer,
174 page_tables,
175 }
176 }
177
178 #[cfg(target_arch = "x86_64")]
180 pub fn secrets_start(&self) -> u64 {
181 self.vtl2_reserved_region_start
182 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX
183 * hvdef::HV_PAGE_SIZE
184 }
185
186 #[cfg(target_arch = "x86_64")]
188 pub fn cpuid_start(&self) -> u64 {
189 self.vtl2_reserved_region_start
190 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX
191 * hvdef::HV_PAGE_SIZE
192 }
193
194 pub fn dt_start(&self) -> u64 {
196 self.parameter_region_start
197 + loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX * hvdef::HV_PAGE_SIZE
198 }
199
200 pub fn dt_size(&self) -> u64 {
202 loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES * hvdef::HV_PAGE_SIZE
203 }
204
205 pub fn initrd(&self) -> &'static [u8] {
207 unsafe { slice::from_raw_parts(self.initrd_base as *const u8, self.initrd_size as usize) }
210 }
211
212 pub fn command_line(&self) -> &'static ParavisorCommandLine {
215 unsafe {
218 (self.cmdline_base as *const ParavisorCommandLine)
219 .as_ref()
220 .expect("should always be non null")
221 }
222 }
223
224 pub fn device_tree(&self) -> &'static [u8] {
228 unsafe { slice::from_raw_parts(self.dt_start() as *const u8, self.dt_size() as usize) }
231 }
232
233 pub fn imported_regions(&self) -> ImportedRegionIter<'_> {
237 use loader_defs::paravisor::ImportedRegionsPageHeader;
238
239 let imported_region_page_address = self.parameter_region_start
240 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
241 * hvdef::HV_PAGE_SIZE);
242
243 assert!(
244 imported_region_page_address + hvdef::HV_PAGE_SIZE
245 <= self.parameter_region_start + self.parameter_region_size
246 );
247
248 let imported_region_start =
249 imported_region_page_address + size_of::<ImportedRegionsPageHeader>() as u64;
250
251 unsafe {
254 ImportedRegionIter {
255 imported_regions: slice::from_raw_parts(
256 imported_region_start as *const ImportedRegionDescriptor,
257 (hvdef::HV_PAGE_SIZE as usize - size_of::<ImportedRegionsPageHeader>())
258 / size_of::<ImportedRegionDescriptor>(),
259 ),
260 }
261 }
262 }
263
264 #[cfg(target_arch = "x86_64")]
265 pub fn imported_regions_hash(&self) -> &'static [u8] {
266 let header_start = self.parameter_region_start
267 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
268 * hvdef::HV_PAGE_SIZE);
269
270 unsafe {
273 let header =
274 &*(header_start as *const loader_defs::paravisor::ImportedRegionsPageHeader);
275 &header.sha384_hash
276 }
277 }
278}