1use crate::importer::BootPageAcceptance;
7use crate::importer::GuestArch;
8use crate::importer::ImageLoad;
9use crate::importer::SegmentRegister;
10use crate::importer::TableRegister;
11use crate::importer::X86Register;
12use hvdef::HV_PAGE_SIZE;
13use std::io::Read;
14use std::io::Seek;
15use thiserror::Error;
16use vm_topology::memory::MemoryLayout;
17use x86defs::GdtEntry;
18use x86defs::X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES;
19use x86defs::X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES;
20use zerocopy::FromZeros;
21use zerocopy::IntoBytes;
22
23pub trait ReadSeek: Read + Seek {}
25impl<T: Read + Seek> ReadSeek for T {}
26
27const DEFAULT_GDT_COUNT: usize = 4;
28pub const DEFAULT_GDT_SIZE: u64 = HV_PAGE_SIZE;
30
31pub fn import_default_gdt(
35 importer: &mut dyn ImageLoad<X86Register>,
36 gdt_page_base: u64,
37) -> anyhow::Result<()> {
38 let default_data_attributes: u16 = X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES.into();
42 let default_code_attributes: u16 = X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES.into();
43 let gdt: [GdtEntry; DEFAULT_GDT_COUNT] = [
44 GdtEntry::new_zeroed(),
45 GdtEntry {
46 limit_low: 0xffff,
47 attr_low: default_code_attributes as u8,
48 attr_high: (default_code_attributes >> 8) as u8,
49 ..GdtEntry::new_zeroed()
50 },
51 GdtEntry {
52 limit_low: 0xffff,
53 attr_low: default_data_attributes as u8,
54 attr_high: (default_data_attributes >> 8) as u8,
55 ..GdtEntry::new_zeroed()
56 },
57 GdtEntry::new_zeroed(),
58 ];
59 let gdt_entry_size = size_of::<GdtEntry>();
60 let linear_selector_offset = 2 * gdt_entry_size;
61 let linear_code64_selector_offset = gdt_entry_size;
62
63 importer.import_pages(
65 gdt_page_base,
66 DEFAULT_GDT_SIZE / HV_PAGE_SIZE,
67 "default-gdt",
68 BootPageAcceptance::Exclusive,
69 gdt.as_bytes(),
70 )?;
71
72 let mut import_reg = |register| importer.import_vp_register(register);
74 import_reg(X86Register::Gdtr(TableRegister {
75 base: gdt_page_base * HV_PAGE_SIZE,
76 limit: (size_of::<GdtEntry>() * DEFAULT_GDT_COUNT - 1) as u16,
77 }))?;
78
79 let ds = SegmentRegister {
80 selector: linear_selector_offset as u16,
81 base: 0,
82 limit: 0xffffffff,
83 attributes: default_data_attributes,
84 };
85 import_reg(X86Register::Ds(ds))?;
86 import_reg(X86Register::Es(ds))?;
87 import_reg(X86Register::Fs(ds))?;
88 import_reg(X86Register::Gs(ds))?;
89 import_reg(X86Register::Ss(ds))?;
90
91 let cs = SegmentRegister {
92 selector: linear_code64_selector_offset as u16,
93 base: 0,
94 limit: 0xffffffff,
95 attributes: default_code_attributes,
96 };
97 import_reg(X86Register::Cs(cs))?;
98
99 Ok(())
100}
101
102#[derive(Debug, Error)]
104#[error("exactly two MMIO gaps are required")]
105pub struct UnsupportedMmio;
106
107pub fn compute_variable_mtrrs(
113 memory: &MemoryLayout,
114 physical_address_width: u8,
115) -> Result<Vec<X86Register>, UnsupportedMmio> {
116 const WRITEBACK: u64 = 0x6;
117
118 let &[mmio_gap_low, mmio_gap_high] = memory.mmio().try_into().map_err(|_| UnsupportedMmio)?;
119
120 let gpa_space_size = physical_address_width.clamp(36, 52);
122
123 let mut result = Vec::with_capacity(8);
127
128 let pcat_mtrr_size = 128 * 1024 * 1024;
131
132 result.push(X86Register::MtrrPhysBase0(WRITEBACK));
133 result.push(X86Register::MtrrPhysMask0(mtrr_mask(
134 gpa_space_size,
135 pcat_mtrr_size - 1,
136 )));
137
138 if memory.end_of_ram() > pcat_mtrr_size {
141 result.push(X86Register::MtrrPhysBase1(pcat_mtrr_size | WRITEBACK));
142 result.push(X86Register::MtrrPhysMask1(mtrr_mask(
143 gpa_space_size,
144 mmio_gap_low.start() - 1,
145 )));
146 }
147
148 if memory.end_of_ram() > mmio_gap_low.end() {
151 result.push(X86Register::MtrrPhysBase2(mmio_gap_low.end() | WRITEBACK));
152 result.push(X86Register::MtrrPhysMask2(mtrr_mask(
153 gpa_space_size,
154 mmio_gap_high.start() - 1,
155 )));
156 }
157
158 if memory.end_of_ram() > mmio_gap_high.end() {
163 result.push(X86Register::MtrrPhysBase3(mmio_gap_high.end() | WRITEBACK));
164 result.push(X86Register::MtrrPhysMask3(mtrr_mask(
165 gpa_space_size,
166 (1 << std::cmp::min(gpa_space_size, 43)) - 1,
167 )));
168 if gpa_space_size > 43 {
169 result.push(X86Register::MtrrPhysBase4((1 << 43) | WRITEBACK));
170 result.push(X86Register::MtrrPhysMask4(mtrr_mask(
171 gpa_space_size,
172 (1 << gpa_space_size) - 1,
173 )));
174 }
175 }
176
177 Ok(result)
178}
179
180fn mtrr_mask(gpa_space_size: u8, maximum_address: u64) -> u64 {
181 const ENABLED: u64 = 1 << 11;
182
183 let mut result = ENABLED;
184
185 for index in 12..gpa_space_size {
187 result |= 1 << index;
188 }
189
190 for index in 12..gpa_space_size {
192 let test_maximum_address = 1 << index;
193
194 if maximum_address >= test_maximum_address {
195 result &= !(1 << index);
197 } else {
198 break;
200 }
201 }
202
203 result
204}
205
206#[derive(Debug, Error)]
208pub enum ImportFileRegionError {
209 #[error("file length {file_length} exceeds memory length {memory_length}")]
211 FileLengthExceedsMemoryLength {
212 file_length: u64,
214 memory_length: u64,
216 },
217 #[error("failed to seek file")]
219 Seek(#[source] std::io::Error),
220 #[error("failed to read file")]
222 Read(#[source] std::io::Error),
223 #[error("failed to import pages")]
225 ImportPages(#[source] anyhow::Error),
226 #[error("address computation overflowed")]
228 Overflow,
229}
230
231pub struct ImportFileRegion<'a, F: ?Sized> {
233 pub file: &'a mut F,
235 pub file_offset: u64,
237 pub file_length: u64,
239 pub gpa: u64,
241 pub memory_length: u64,
243 pub acceptance: BootPageAcceptance,
245 pub tag: &'a str,
247}
248
249pub struct ChunkBuf(Vec<u8>);
255
256impl ChunkBuf {
257 const DEFAULT_SIZE: usize = 64 * 1024;
259
260 pub fn new() -> Self {
262 Self::with_size(Self::DEFAULT_SIZE)
263 }
264
265 pub fn with_size(size: usize) -> Self {
270 let page_count = size as u64 / HV_PAGE_SIZE;
271 assert!(page_count > 0, "ChunkBuf must be at least one page");
272 Self(vec![0u8; (page_count * HV_PAGE_SIZE) as usize])
273 }
274
275 pub fn import_file_region<F, R: GuestArch>(
283 &mut self,
284 importer: &mut dyn ImageLoad<R>,
285 params: ImportFileRegion<'_, F>,
286 ) -> Result<(), ImportFileRegionError>
287 where
288 F: ReadSeek + ?Sized,
289 {
290 let ImportFileRegion {
291 file,
292 file_offset,
293 file_length,
294 gpa,
295 memory_length,
296 acceptance,
297 tag,
298 } = params;
299
300 if file_length > memory_length {
301 return Err(ImportFileRegionError::FileLengthExceedsMemoryLength {
302 file_length,
303 memory_length,
304 });
305 }
306
307 if memory_length == 0 {
308 return Ok(());
309 }
310
311 let buf = &mut self.0[..];
312 let buf_pages = buf.len() as u64 / HV_PAGE_SIZE;
313
314 let page_mask = HV_PAGE_SIZE - 1;
315 let leading_zero = gpa & page_mask;
316 let page_base = gpa / HV_PAGE_SIZE;
317 let total_page_count = leading_zero
318 .checked_add(memory_length)
319 .and_then(|v| v.checked_add(page_mask))
320 .ok_or(ImportFileRegionError::Overflow)?
321 / HV_PAGE_SIZE;
322
323 file.seek(std::io::SeekFrom::Start(file_offset))
324 .map_err(ImportFileRegionError::Seek)?;
325
326 let mut pages_done: u64 = 0;
327 let mut file_remaining = file_length;
328
329 while file_remaining > 0 {
330 let chunk_pages = (total_page_count - pages_done).min(buf_pages);
331 let chunk_bytes = (chunk_pages * HV_PAGE_SIZE) as usize;
332 let chunk_buf = &mut buf[..chunk_bytes];
333
334 let data_start = if pages_done == 0 {
335 leading_zero as usize
336 } else {
337 0
338 };
339 let data_len = file_remaining.min((chunk_bytes - data_start) as u64) as usize;
340
341 chunk_buf[..data_start].fill(0);
343
344 file.read_exact(&mut chunk_buf[data_start..data_start + data_len])
346 .map_err(ImportFileRegionError::Read)?;
347
348 file_remaining -= data_len as u64;
349
350 let import_page_count = if file_remaining == 0 {
353 total_page_count - pages_done
354 } else {
355 chunk_pages
356 };
357
358 importer
359 .import_pages(
360 page_base + pages_done,
361 import_page_count,
362 tag,
363 acceptance,
364 &chunk_buf[..data_start + data_len],
365 )
366 .map_err(ImportFileRegionError::ImportPages)?;
367
368 pages_done += import_page_count;
369 }
370
371 if file_length == 0 {
373 importer
374 .import_pages(page_base, total_page_count, tag, acceptance, &[])
375 .map_err(ImportFileRegionError::ImportPages)?;
376 }
377
378 Ok(())
379 }
380
381 pub fn crc32(&mut self, file: &mut dyn ReadSeek, len: u64) -> Result<u32, std::io::Error> {
383 file.seek(std::io::SeekFrom::Start(0))?;
384 let mut hasher = crc32fast::Hasher::new();
385 let mut remaining = len;
386 while remaining > 0 {
387 let to_read = remaining.min(self.0.len() as u64) as usize;
388 file.read_exact(&mut self.0[..to_read])?;
389 hasher.update(&self.0[..to_read]);
390 remaining -= to_read as u64;
391 }
392 file.rewind()?;
393 Ok(hasher.finalize())
394 }
395}