1use crate::common::import_default_gdt;
7use crate::elf::load_static_elf;
8use crate::importer::Aarch64Register;
9use crate::importer::BootPageAcceptance;
10use crate::importer::GuestArch;
11use crate::importer::ImageLoad;
12use crate::importer::X86Register;
13use aarch64defs::Cpsr64;
14use aarch64defs::IntermPhysAddrSize;
15use aarch64defs::SctlrEl1;
16use aarch64defs::TranslationBaseEl1;
17use aarch64defs::TranslationControlEl1;
18use aarch64defs::TranslationGranule0;
19use aarch64defs::TranslationGranule1;
20use bitfield_struct::bitfield;
21use hvdef::HV_PAGE_SIZE;
22use loader_defs::linux as defs;
23use page_table::IdentityMapSize;
24use page_table::x64::align_up_to_large_page_size;
25use page_table::x64::align_up_to_page_size;
26use page_table::x64::build_page_tables_64;
27use std::ffi::CString;
28use thiserror::Error;
29use vm_topology::memory::MemoryLayout;
30use zerocopy::FromBytes;
31use zerocopy::FromZeros;
32use zerocopy::Immutable;
33use zerocopy::IntoBytes;
34use zerocopy::KnownLayout;
35
36pub fn build_zero_page(
39 mem_layout: &MemoryLayout,
40 acpi_base: u64,
41 acpi_len: usize,
42 cmdline_config: &CommandLineConfig<'_>,
43 initrd_base: u32,
44 initrd_size: u32,
45) -> defs::boot_params {
46 let mut p = defs::boot_params {
47 hdr: defs::setup_header {
48 type_of_loader: 0xff,
49 boot_flag: 0xaa55.into(),
50 header: 0x53726448.into(),
51 cmd_line_ptr: cmdline_config.address.try_into().expect("must fit in u32"),
52 cmdline_size: (cmdline_config.cmdline.as_bytes().len() as u64)
53 .try_into()
54 .expect("must fit in u32"),
55 ramdisk_image: initrd_base.into(),
56 ramdisk_size: initrd_size.into(),
57 kernel_alignment: 0x100000.into(),
58 ..FromZeros::new_zeroed()
59 },
60 ..FromZeros::new_zeroed()
61 };
62
63 let mut ram = mem_layout.ram().iter().cloned();
64 let range = ram.next().expect("at least one ram range");
65 assert_eq!(range.range.start(), 0);
66 assert!(range.range.end() >= 0x100000);
67 assert_eq!(acpi_base, 0xe0000);
69 p.e820_map[0] = defs::e820entry {
70 addr: 0.into(),
71 size: 0xe0000.into(),
72 typ: defs::E820_RAM.into(),
73 };
74 let aligned_acpi_len = (acpi_len + 0xfff) & !0xfff;
75 p.e820_map[1] = defs::e820entry {
76 addr: 0xe0000.into(),
77 size: (aligned_acpi_len as u64).into(),
78 typ: defs::E820_ACPI.into(),
79 };
80 p.e820_map[2] = defs::e820entry {
81 addr: (0xe0000 + aligned_acpi_len as u64).into(),
82 size: (range.range.end() - 0xe0000 - aligned_acpi_len as u64).into(),
83 typ: defs::E820_RAM.into(),
84 };
85 let mut n = 3;
86 for range in ram {
87 p.e820_map[n] = defs::e820entry {
88 addr: range.range.start().into(),
89 size: range.range.len().into(),
90 typ: defs::E820_RAM.into(),
91 };
92 n += 1;
93 }
94 p.e820_entries = n as u8;
95
96 p
97}
98
99#[derive(Debug, Error)]
100pub enum FlatLoaderError {
101 #[error("unsupported ELF File byte order")]
102 BigEndianElfOnLittle,
103 #[error("error reading kernel data structure")]
104 BadImageMagic,
105 #[error("big-endian kernel image is not supported")]
106 BigEndianKernelImage,
107 #[error("only images with 4K pages are supported")]
108 FourKibPageImageIsRequired,
109 #[error("the kernel is required to run in the low memory; not supported")]
110 LowMemoryKernel,
111 #[error("failed to read kernel image")]
112 ReadKernelImage,
113 #[error("failed to seek to file offset as pointed by the ELF program header")]
114 SeekKernelStart,
115 #[error("failed to seek to offset of kernel image")]
116 SeekKernelImage,
117}
118
119#[derive(Debug, Error)]
120pub enum Error {
121 #[error("elf loader error")]
122 ElfLoader(#[source] crate::elf::Error),
123 #[error("flat loader error")]
124 FlatLoader(#[source] FlatLoaderError),
125 #[error("Address is not page aligned")]
126 UnalignedAddress(u64),
127 #[error("importer error")]
128 Importer(#[source] anyhow::Error),
129}
130
131pub struct AcpiConfig<'a> {
132 pub rdsp_address: u64,
133 pub rdsp: &'a [u8],
134 pub tables_address: u64,
135 pub tables: &'a [u8],
136}
137
138pub struct ZeroPageConfig<'a> {
139 pub address: u64,
141 pub mem_layout: &'a MemoryLayout,
143 pub acpi_base_address: u64,
145 pub acpi_len: usize,
147}
148
149pub struct CommandLineConfig<'a> {
150 pub address: u64,
151 pub cmdline: &'a CString,
152}
153
154pub struct RegisterConfig {
155 pub gdt_address: u64,
156 pub page_table_address: u64,
157}
158
159#[derive(Debug, PartialEq, Eq, Clone, Copy)]
160pub enum InitrdAddressType {
161 AfterKernel,
163 Address(u64),
165}
166
167pub struct InitrdConfig<'a> {
168 pub initrd_address: InitrdAddressType,
169 pub initrd: &'a [u8],
170}
171
172#[derive(Debug, Default)]
174pub struct KernelInfo {
175 pub gpa: u64,
177 pub size: u64,
179 pub entrypoint: u64,
181}
182
183#[derive(Debug, Default)]
185pub struct InitrdInfo {
186 pub gpa: u64,
188 pub size: u64,
190}
191
192#[derive(Debug, Default)]
194pub struct LoadInfo {
195 pub kernel: KernelInfo,
197 pub initrd: Option<InitrdInfo>,
199 pub dtb: Option<std::ops::Range<u64>>,
201}
202
203fn check_address_alignment(address: u64) -> Result<(), Error> {
205 if address % HV_PAGE_SIZE != 0 {
206 Err(Error::UnalignedAddress(address))
207 } else {
208 Ok(())
209 }
210}
211
212fn import_initrd<R: GuestArch>(
214 initrd: Option<InitrdConfig<'_>>,
215 next_addr: u64,
216 importer: &mut dyn ImageLoad<R>,
217) -> Result<Option<InitrdInfo>, Error> {
218 let initrd_info = match &initrd {
219 Some(cfg) => {
220 let initrd_address = match cfg.initrd_address {
221 InitrdAddressType::AfterKernel => align_up_to_large_page_size(next_addr),
222 InitrdAddressType::Address(addr) => addr,
223 };
224
225 tracing::trace!(initrd_address, "loading initrd");
226 check_address_alignment(initrd_address)?;
227 let initrd_size_pages = align_up_to_page_size(cfg.initrd.len() as u64) / HV_PAGE_SIZE;
228 importer
229 .import_pages(
230 initrd_address / HV_PAGE_SIZE,
231 initrd_size_pages,
232 "linux-initrd",
233 BootPageAcceptance::Exclusive,
234 cfg.initrd,
235 )
236 .map_err(Error::Importer)?;
237
238 Some(InitrdInfo {
239 gpa: initrd_address,
240 size: cfg.initrd.len() as u64,
241 })
242 }
243 None => None,
244 };
245 Ok(initrd_info)
246}
247
248pub fn load_kernel_and_initrd_x64<F>(
259 importer: &mut dyn ImageLoad<X86Register>,
260 kernel_image: &mut F,
261 kernel_minimum_start_address: u64,
262 initrd: Option<InitrdConfig<'_>>,
263) -> Result<LoadInfo, Error>
264where
265 F: std::io::Read + std::io::Seek,
266{
267 tracing::trace!(kernel_minimum_start_address, "loading x86_64 kernel");
268 let crate::elf::LoadInfo {
269 minimum_address_used: min_addr,
270 next_available_address: next_addr,
271 entrypoint,
272 } = load_static_elf(
273 importer,
274 kernel_image,
275 kernel_minimum_start_address,
276 0,
277 false,
278 BootPageAcceptance::Exclusive,
279 "linux-kernel",
280 )
281 .map_err(Error::ElfLoader)?;
282 tracing::trace!(min_addr, next_addr, entrypoint, "loaded kernel");
283
284 let initrd_info = import_initrd(initrd, next_addr, importer)?;
285
286 Ok(LoadInfo {
287 kernel: KernelInfo {
288 gpa: min_addr,
289 size: next_addr - min_addr,
290 entrypoint,
291 },
292 initrd: initrd_info,
293 dtb: None,
294 })
295}
296
297pub fn load_config(
306 importer: &mut impl ImageLoad<X86Register>,
307 load_info: &LoadInfo,
308 command_line: CommandLineConfig<'_>,
309 zero_page: ZeroPageConfig<'_>,
310 acpi: AcpiConfig<'_>,
311 registers: RegisterConfig,
312) -> Result<(), Error> {
313 tracing::trace!(command_line.address);
314 let raw_cmdline = command_line.cmdline.as_bytes_with_nul();
317 if raw_cmdline.len() > 1 {
318 check_address_alignment(command_line.address)?;
319 let cmdline_size_pages = align_up_to_page_size(raw_cmdline.len() as u64) / HV_PAGE_SIZE;
320 importer
321 .import_pages(
322 command_line.address / HV_PAGE_SIZE,
323 cmdline_size_pages,
324 "linux-commandline",
325 BootPageAcceptance::Exclusive,
326 raw_cmdline,
327 )
328 .map_err(Error::Importer)?;
329 }
330
331 check_address_alignment(registers.gdt_address)?;
332 import_default_gdt(importer, registers.gdt_address / HV_PAGE_SIZE).map_err(Error::Importer)?;
333 check_address_alignment(registers.page_table_address)?;
334 let page_table = build_page_tables_64(
335 registers.page_table_address,
336 0,
337 IdentityMapSize::Size4Gb,
338 None,
339 );
340 assert!(page_table.len() as u64 % HV_PAGE_SIZE == 0);
341 importer
342 .import_pages(
343 registers.page_table_address / HV_PAGE_SIZE,
344 page_table.len() as u64 / HV_PAGE_SIZE,
345 "linux-pagetables",
346 BootPageAcceptance::Exclusive,
347 &page_table,
348 )
349 .map_err(Error::Importer)?;
350
351 check_address_alignment(acpi.rdsp_address)?;
353 check_address_alignment(acpi.tables_address)?;
354 let acpi_tables_size_pages = align_up_to_page_size(acpi.tables.len() as u64) / HV_PAGE_SIZE;
355 importer
356 .import_pages(
357 acpi.rdsp_address / HV_PAGE_SIZE,
358 1,
359 "linux-rdsp",
360 BootPageAcceptance::Exclusive,
361 acpi.rdsp,
362 )
363 .map_err(Error::Importer)?;
364 importer
365 .import_pages(
366 acpi.tables_address / HV_PAGE_SIZE,
367 acpi_tables_size_pages,
368 "linux-acpi-tables",
369 BootPageAcceptance::Exclusive,
370 acpi.tables,
371 )
372 .map_err(Error::Importer)?;
373
374 check_address_alignment(zero_page.address)?;
375 let boot_params = build_zero_page(
376 zero_page.mem_layout,
377 zero_page.acpi_base_address,
378 zero_page.acpi_len,
379 &command_line,
380 load_info.initrd.as_ref().map(|info| info.gpa).unwrap_or(0) as u32,
381 load_info.initrd.as_ref().map(|info| info.size).unwrap_or(0) as u32,
382 );
383 importer
384 .import_pages(
385 zero_page.address / HV_PAGE_SIZE,
386 1,
387 "linux-zeropage",
388 BootPageAcceptance::Exclusive,
389 boot_params.as_bytes(),
390 )
391 .map_err(Error::Importer)?;
392
393 let mut import_reg = |register| {
395 importer
396 .import_vp_register(register)
397 .map_err(Error::Importer)
398 };
399
400 import_reg(X86Register::Cr0(x86defs::X64_CR0_PG | x86defs::X64_CR0_PE))?;
401 import_reg(X86Register::Cr3(registers.page_table_address))?;
402 import_reg(X86Register::Cr4(x86defs::X64_CR4_PAE))?;
403 import_reg(X86Register::Efer(
404 x86defs::X64_EFER_SCE
405 | x86defs::X64_EFER_LME
406 | x86defs::X64_EFER_LMA
407 | x86defs::X64_EFER_NXE,
408 ))?;
409 import_reg(X86Register::Pat(x86defs::X86X_MSR_DEFAULT_PAT))?;
410
411 import_reg(X86Register::Rip(load_info.kernel.entrypoint))?;
413 import_reg(X86Register::Rsi(zero_page.address))?;
414
415 import_reg(X86Register::MtrrDefType(0xc00))?;
418 import_reg(X86Register::MtrrFix64k00000(0x0606060606060606))?;
419 import_reg(X86Register::MtrrFix16k80000(0x0606060606060606))?;
420
421 Ok(())
422}
423
424pub fn load_x86<F>(
438 importer: &mut impl ImageLoad<X86Register>,
439 kernel_image: &mut F,
440 kernel_minimum_start_address: u64,
441 initrd: Option<InitrdConfig<'_>>,
442 command_line: CommandLineConfig<'_>,
443 zero_page: ZeroPageConfig<'_>,
444 acpi: AcpiConfig<'_>,
445 registers: RegisterConfig,
446) -> Result<LoadInfo, Error>
447where
448 F: std::io::Read + std::io::Seek,
449{
450 let load_info =
451 load_kernel_and_initrd_x64(importer, kernel_image, kernel_minimum_start_address, initrd)?;
452
453 load_config(
454 importer,
455 &load_info,
456 command_line,
457 zero_page,
458 acpi,
459 registers,
460 )?;
461
462 Ok(load_info)
463}
464
465open_enum::open_enum! {
466 #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
467 pub enum Aarch64ImagePageSize: u64 {
468 UNSPECIFIED = 0,
469 PAGE4_K = 1,
470 PAGE16_K = 2,
471 PAGE64_K = 3,
472 }
473
474}
475
476impl Aarch64ImagePageSize {
477 const fn into_bits(self) -> u64 {
478 self.0
479 }
480
481 const fn from_bits(bits: u64) -> Self {
482 Self(bits)
483 }
484}
485
486#[bitfield(u64)]
488struct Aarch64ImageFlags {
489 #[bits(1)]
491 pub big_endian: bool,
492 #[bits(2)]
498 pub page_size: Aarch64ImagePageSize,
499 #[bits(1)]
506 pub any_start_address: bool,
507 #[bits(60)]
509 pub _padding: u64,
510}
511
512#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
515#[repr(C)]
516struct Aarch64ImageHeader {
517 _code0: u32,
519 _code1: u32,
521 text_offset: u64,
523 image_size: u64,
525 flags: u64,
527 _res2: u64,
529 _res3: u64,
531 _res4: u64,
533 magic: [u8; 4],
535 _res5: u32,
537}
538
539const AARCH64_MAGIC_NUMBER: &[u8] = b"ARM\x64";
540
541pub fn load_kernel_and_initrd_arm64<F>(
553 importer: &mut dyn ImageLoad<Aarch64Register>,
554 kernel_image: &mut F,
555 kernel_minimum_start_address: u64,
556 initrd: Option<InitrdConfig<'_>>,
557 device_tree_blob: Option<&[u8]>,
558) -> Result<LoadInfo, Error>
559where
560 F: std::io::Read + std::io::Seek,
561{
562 tracing::trace!(kernel_minimum_start_address, "loading aarch64 kernel");
563
564 assert_eq!(
565 kernel_minimum_start_address & ((1 << 21) - 1),
566 0,
567 "Start offset must be aligned on the 2MiB boundary"
568 );
569
570 kernel_image
571 .seek(std::io::SeekFrom::Start(0))
572 .map_err(|_| Error::FlatLoader(FlatLoaderError::SeekKernelStart))?;
573
574 let mut header = Aarch64ImageHeader::new_zeroed();
575 kernel_image
576 .read_exact(header.as_mut_bytes())
577 .map_err(|_| Error::FlatLoader(FlatLoaderError::ReadKernelImage))?;
578
579 tracing::debug!("aarch64 kernel header {header:x?}");
580
581 if header.magic != AARCH64_MAGIC_NUMBER {
582 return Err(Error::FlatLoader(FlatLoaderError::BadImageMagic));
583 }
584
585 let flags = Aarch64ImageFlags::from(header.flags);
586 if flags.big_endian() {
587 return Err(Error::FlatLoader(FlatLoaderError::BigEndianKernelImage));
588 }
589 if flags.page_size() != Aarch64ImagePageSize::PAGE4_K {
590 return Err(Error::FlatLoader(
591 FlatLoaderError::FourKibPageImageIsRequired,
592 ));
593 }
594 if !flags.any_start_address() {
595 return Err(Error::FlatLoader(FlatLoaderError::LowMemoryKernel));
596 }
597
598 kernel_image
602 .seek(std::io::SeekFrom::Start(0))
603 .map_err(|_| Error::FlatLoader(FlatLoaderError::SeekKernelStart))?;
604
605 let mut image = Vec::new();
606 kernel_image
607 .read_to_end(&mut image)
608 .map_err(|_| Error::FlatLoader(FlatLoaderError::ReadKernelImage))?;
609
610 let kernel_load_offset = (kernel_minimum_start_address + header.text_offset) as usize;
611 let kernel_size = if header.image_size != 0 {
612 header.image_size
613 } else {
614 image.len() as u64
615 };
616
617 let kernel_size = align_up_to_page_size(kernel_size);
618 importer
619 .import_pages(
620 kernel_load_offset as u64 / HV_PAGE_SIZE,
621 kernel_size / HV_PAGE_SIZE,
622 "linux-kernel",
623 BootPageAcceptance::Exclusive,
624 &image,
625 )
626 .map_err(Error::Importer)?;
627
628 let next_addr = kernel_load_offset as u64 + kernel_size;
629
630 let (next_addr, dtb) = if let Some(device_tree_blob) = device_tree_blob {
631 let dtb_addr = align_up_to_page_size(next_addr);
632 tracing::trace!(dtb_addr, "loading device tree blob at {dtb_addr:x?}");
633
634 check_address_alignment(dtb_addr)?;
635 let dtb_size_pages = align_up_to_page_size(device_tree_blob.len() as u64) / HV_PAGE_SIZE;
636
637 importer
638 .import_pages(
639 dtb_addr / HV_PAGE_SIZE,
640 dtb_size_pages,
641 "linux-device-tree",
642 BootPageAcceptance::Exclusive,
643 device_tree_blob,
644 )
645 .map_err(Error::Importer)?;
646
647 (
648 dtb_addr + device_tree_blob.len() as u64,
649 Some(dtb_addr..dtb_addr + device_tree_blob.len() as u64),
650 )
651 } else {
652 (next_addr, None)
653 };
654
655 let initrd_info = import_initrd(initrd, next_addr, importer)?;
656
657 Ok(LoadInfo {
658 kernel: KernelInfo {
659 gpa: kernel_minimum_start_address,
660 size: kernel_size,
661 entrypoint: kernel_load_offset as u64,
662 },
663 initrd: initrd_info,
664 dtb,
665 })
666}
667
668pub fn set_direct_boot_registers_arm64(
674 importer: &mut impl ImageLoad<Aarch64Register>,
675 load_info: &LoadInfo,
676) -> Result<(), Error> {
677 let mut import_reg = |register| {
678 importer
679 .import_vp_register(register)
680 .map_err(Error::Importer)
681 };
682
683 import_reg(Aarch64Register::Pc(load_info.kernel.entrypoint))?;
684 import_reg(Aarch64Register::Cpsr(
685 Cpsr64::new()
686 .with_sp(true)
687 .with_el(1)
688 .with_f(true)
689 .with_i(true)
690 .with_a(true)
691 .with_d(true)
692 .into(),
693 ))?;
694 import_reg(Aarch64Register::SctlrEl1(
695 SctlrEl1::new()
696 .with_m(false)
700 .with_c(true)
702 .with_i(true)
704 .with_eos(true)
706 .with_tscxt(true)
707 .with_eis(true)
708 .with_span(true)
709 .with_n_tlsmd(true)
710 .with_lsmaoe(true)
711 .into(),
712 ))?;
713 import_reg(Aarch64Register::TcrEl1(
714 TranslationControlEl1::new()
715 .with_t0sz(0x11)
716 .with_irgn0(1)
717 .with_orgn0(1)
718 .with_sh0(3)
719 .with_tg0(TranslationGranule0::TG_4KB)
720 .with_epd0(1)
722 .with_epd1(1)
724 .with_tg1(TranslationGranule1::TG_4KB)
726 .with_ips(IntermPhysAddrSize::IPA_48_BITS_256_TB)
727 .into(),
728 ))?;
729 import_reg(Aarch64Register::Ttbr0El1(TranslationBaseEl1::new().into()))?;
730 import_reg(Aarch64Register::Ttbr1El1(TranslationBaseEl1::new().into()))?;
731 import_reg(Aarch64Register::VbarEl1(0))?;
732
733 if let Some(dtb) = &load_info.dtb {
734 import_reg(Aarch64Register::X0(dtb.start))?;
735 }
736
737 Ok(())
738}