openhcl_boot/host_params/
shim_params.rs1use crate::arch::get_isolation_type;
7use core::slice;
8use loader_defs::paravisor::ImportedRegionDescriptor;
9use loader_defs::paravisor::ParavisorCommandLine;
10use loader_defs::shim::ShimParamsRaw;
11use memory_range::MemoryRange;
12
13#[derive(Debug, PartialEq, Eq, Copy, Clone)]
18pub enum IsolationType {
19 None,
20 Vbs,
21 #[cfg(target_arch = "x86_64")]
22 Snp,
23 #[cfg(target_arch = "x86_64")]
24 Tdx,
25}
26
27impl IsolationType {
28 pub fn is_hardware_isolated(&self) -> bool {
29 match self {
30 IsolationType::None => false,
31 IsolationType::Vbs => false,
32 #[cfg(target_arch = "x86_64")]
33 IsolationType::Snp => true,
34 #[cfg(target_arch = "x86_64")]
35 IsolationType::Tdx => true,
36 }
37 }
38}
39
40pub struct ImportedRegionIter<'a> {
43 imported_regions: &'a [ImportedRegionDescriptor],
44}
45
46impl Iterator for ImportedRegionIter<'_> {
47 type Item = (MemoryRange, bool);
48
49 fn next(&mut self) -> Option<Self::Item> {
50 if self.imported_regions.is_empty() {
51 None
52 } else {
53 let element = self.imported_regions[0]
54 .pages()
55 .map(|(base_page, count, accepted)| {
56 let base_address = base_page * hvdef::HV_PAGE_SIZE;
57 let end_address = base_address + (count * hvdef::HV_PAGE_SIZE);
58 (MemoryRange::try_new(base_address..end_address).expect(
59 "page number conversion into addresses results in a valid address range",
60 ), accepted)
61 });
62
63 if element.is_some() {
64 self.imported_regions = &self.imported_regions[1..];
65 } else {
66 self.imported_regions = &[];
67 }
68
69 element
70 }
71 }
72}
73
74#[derive(Debug)]
78pub struct ShimParams {
79 pub kernel_entry_address: u64,
82 pub cmdline_base: u64,
84 pub initrd_base: u64,
86 pub initrd_size: u64,
88 pub initrd_crc: u32,
90 pub memory_start_address: u64,
92 pub memory_size: u64,
94 pub parameter_region_start: u64,
96 pub parameter_region_size: u64,
98 pub vtl2_reserved_region_start: u64,
100 pub vtl2_reserved_region_size: u64,
102 pub isolation_type: IsolationType,
104 pub sidecar_entry_address: u64,
105 pub sidecar_base: u64,
106 pub sidecar_size: u64,
107 pub used: MemoryRange,
109 pub bounce_buffer: Option<MemoryRange>,
110 pub page_tables: Option<MemoryRange>,
112}
113
114impl ShimParams {
115 pub fn new(shim_base_address: u64, raw: &ShimParamsRaw) -> Self {
118 let &ShimParamsRaw {
119 kernel_entry_offset,
120 cmdline_offset,
121 initrd_offset,
122 initrd_size,
123 initrd_crc,
124 supported_isolation_type,
125 memory_start_offset,
126 memory_size,
127 parameter_region_offset,
128 parameter_region_size,
129 vtl2_reserved_region_offset,
130 vtl2_reserved_region_size,
131 sidecar_offset,
132 sidecar_size,
133 sidecar_entry_offset,
134 used_start,
135 used_end,
136 bounce_buffer_start,
137 bounce_buffer_size,
138 page_tables_start,
139 page_tables_size,
140 } = raw;
141
142 let isolation_type = get_isolation_type(supported_isolation_type);
143
144 let bounce_buffer = if bounce_buffer_size == 0 {
145 None
146 } else {
147 let base = shim_base_address.wrapping_add_signed(bounce_buffer_start);
148 Some(MemoryRange::new(base..base + bounce_buffer_size))
149 };
150
151 let page_tables = if page_tables_size == 0 {
152 None
153 } else {
154 let base = shim_base_address.wrapping_add_signed(page_tables_start);
155 Some(MemoryRange::new(base..base + page_tables_size))
156 };
157
158 Self {
159 kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
160 cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
161 initrd_base: shim_base_address.wrapping_add_signed(initrd_offset),
162 initrd_size,
163 initrd_crc,
164 memory_start_address: shim_base_address.wrapping_add_signed(memory_start_offset),
165 memory_size,
166 parameter_region_start: shim_base_address.wrapping_add_signed(parameter_region_offset),
167 parameter_region_size,
168 vtl2_reserved_region_start: shim_base_address
169 .wrapping_add_signed(vtl2_reserved_region_offset),
170 vtl2_reserved_region_size,
171 isolation_type,
172 sidecar_entry_address: shim_base_address.wrapping_add_signed(sidecar_entry_offset),
173 sidecar_base: shim_base_address.wrapping_add_signed(sidecar_offset),
174 sidecar_size,
175 used: MemoryRange::new(
176 shim_base_address.wrapping_add_signed(used_start)
177 ..shim_base_address.wrapping_add_signed(used_end),
178 ),
179 bounce_buffer,
180 page_tables,
181 }
182 }
183
184 #[cfg(target_arch = "x86_64")]
186 pub fn secrets_start(&self) -> u64 {
187 self.vtl2_reserved_region_start
188 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX
189 * hvdef::HV_PAGE_SIZE
190 }
191
192 #[cfg(target_arch = "x86_64")]
194 pub fn cpuid_start(&self) -> u64 {
195 self.vtl2_reserved_region_start
196 + loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX
197 * hvdef::HV_PAGE_SIZE
198 }
199
200 pub fn dt_start(&self) -> u64 {
202 self.parameter_region_start
203 + loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX * hvdef::HV_PAGE_SIZE
204 }
205
206 pub fn dt_size(&self) -> u64 {
208 loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES * hvdef::HV_PAGE_SIZE
209 }
210
211 pub fn initrd(&self) -> &'static [u8] {
213 unsafe { slice::from_raw_parts(self.initrd_base as *const u8, self.initrd_size as usize) }
216 }
217
218 pub fn command_line(&self) -> &'static ParavisorCommandLine {
221 unsafe {
224 (self.cmdline_base as *const ParavisorCommandLine)
225 .as_ref()
226 .expect("should always be non null")
227 }
228 }
229
230 pub fn device_tree(&self) -> &'static [u8] {
234 unsafe { slice::from_raw_parts(self.dt_start() as *const u8, self.dt_size() as usize) }
237 }
238
239 pub fn imported_regions(&self) -> ImportedRegionIter<'_> {
243 use loader_defs::paravisor::ImportedRegionsPageHeader;
244
245 let imported_region_page_address = self.parameter_region_start
246 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
247 * hvdef::HV_PAGE_SIZE);
248
249 assert!(
250 imported_region_page_address + hvdef::HV_PAGE_SIZE
251 <= self.parameter_region_start + self.parameter_region_size
252 );
253
254 let imported_region_start =
255 imported_region_page_address + size_of::<ImportedRegionsPageHeader>() as u64;
256
257 unsafe {
260 ImportedRegionIter {
261 imported_regions: slice::from_raw_parts(
262 imported_region_start as *const ImportedRegionDescriptor,
263 (hvdef::HV_PAGE_SIZE as usize - size_of::<ImportedRegionsPageHeader>())
264 / size_of::<ImportedRegionDescriptor>(),
265 ),
266 }
267 }
268 }
269
270 #[cfg(target_arch = "x86_64")]
271 pub fn imported_regions_hash(&self) -> &'static [u8] {
272 let header_start = self.parameter_region_start
273 + (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
274 * hvdef::HV_PAGE_SIZE);
275
276 unsafe {
279 let header =
280 &*(header_start as *const loader_defs::paravisor::ImportedRegionsPageHeader);
281 &header.sha384_hash
282 }
283 }
284}