loader/
paravisor.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Paravisor specific loader definitions and implementation.
5
6use crate::cpuid::HV_PSP_CPUID_PAGE;
7use crate::importer::Aarch64Register;
8use crate::importer::BootPageAcceptance;
9use crate::importer::IgvmParameterType;
10use crate::importer::ImageLoad;
11use crate::importer::IsolationConfig;
12use crate::importer::IsolationType;
13use crate::importer::SegmentRegister;
14use crate::importer::StartupMemoryType;
15use crate::importer::TableRegister;
16use crate::importer::X86Register;
17use crate::linux::InitrdAddressType;
18use crate::linux::InitrdConfig;
19use crate::linux::InitrdInfo;
20use crate::linux::KernelInfo;
21use crate::linux::load_kernel_and_initrd_arm64;
22use aarch64defs::Cpsr64;
23use aarch64defs::IntermPhysAddrSize;
24use aarch64defs::SctlrEl1;
25use aarch64defs::TranslationBaseEl1;
26use aarch64defs::TranslationControlEl1;
27use aarch64defs::TranslationGranule0;
28use aarch64defs::TranslationGranule1;
29use hvdef::HV_PAGE_SIZE;
30use hvdef::Vtl;
31use igvm::registers::AArch64Register;
32use loader_defs::paravisor::*;
33use loader_defs::shim::ShimParamsRaw;
34use memory_range::MemoryRange;
35use page_table::aarch64::Arm64PageSize;
36use page_table::aarch64::MemoryAttributeEl1;
37use page_table::aarch64::MemoryAttributeIndirectionEl1;
38use page_table::x64::PageTableBuilder;
39use page_table::x64::X64_LARGE_PAGE_SIZE;
40use page_table::x64::align_up_to_large_page_size;
41use page_table::x64::align_up_to_page_size;
42use page_table::x64::calculate_pde_table_count;
43use thiserror::Error;
44use x86defs::GdtEntry;
45use x86defs::SegmentSelector;
46use x86defs::X64_BUSY_TSS_SEGMENT_ATTRIBUTES;
47use x86defs::X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES;
48use x86defs::X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES;
49use x86defs::cpuid::CpuidFunction;
50use zerocopy::FromZeros;
51use zerocopy::IntoBytes;
52
53#[derive(Debug)]
54pub struct Vtl0Linux<'a> {
55    pub command_line: &'a std::ffi::CString,
56    pub load_info: crate::linux::LoadInfo,
57}
58
59#[derive(Debug)]
60pub struct Vtl0Config<'a> {
61    pub supports_pcat: bool,
62    /// The load info and the VP context page.
63    pub supports_uefi: Option<(crate::uefi::LoadInfo, Vec<u8>)>,
64    pub supports_linux: Option<Vtl0Linux<'a>>,
65}
66
67// See HclDefs.h
68pub const HCL_SECURE_VTL: Vtl = Vtl::Vtl2;
69
70#[derive(Debug, Error)]
71pub enum Error {
72    #[error("memory is unaligned: {0}")]
73    MemoryUnaligned(u64),
74    #[error("command line too large: {0}")]
75    CommandLineSize(usize),
76    #[error("kernel load error")]
77    Kernel(#[source] crate::linux::Error),
78    #[error("shim load error")]
79    Shim(#[source] crate::elf::Error),
80    #[error("invalid initrd size: {0}")]
81    InvalidInitrdSize(u64),
82    #[error("memory used: {0} is greater than available")]
83    NotEnoughMemory(u64),
84    #[error("importer error")]
85    Importer(#[from] anyhow::Error),
86}
87
88/// Kernel Command line type.
89pub enum CommandLineType<'a> {
90    /// The command line is a static string.
91    Static(&'a str),
92    /// The command line is dynamic and host appendable via the chosen node in
93    /// device tree, with initial data specified by the provided CStr. An empty
94    /// base_string may be provided to allow the host to specify the full kernel
95    /// command line.
96    HostAppendable(&'a str),
97}
98
99/// Load the underhill kernel on x64.
100///
101/// An optional initrd may be specified.
102///
103/// An optional `memory_page_base` may be specified. This will disable
104/// relocation support for underhill.
105pub fn load_openhcl_x64<F>(
106    importer: &mut dyn ImageLoad<X86Register>,
107    kernel_image: &mut F,
108    shim: &mut F,
109    sidecar: Option<&mut F>,
110    command_line: CommandLineType<'_>,
111    initrd: Option<&[u8]>,
112    memory_page_base: Option<u64>,
113    memory_page_count: u64,
114    vtl0_config: Vtl0Config<'_>,
115) -> Result<(), Error>
116where
117    F: std::io::Read + std::io::Seek,
118{
119    let IsolationConfig {
120        isolation_type,
121        paravisor_present,
122        shared_gpa_boundary_bits,
123    } = importer.isolation_config();
124
125    // If no explicit memory base is specified, load with relocation support.
126    let with_relocation = memory_page_base.is_none() && isolation_type == IsolationType::None;
127
128    let memory_start_address = memory_page_base
129        .map(|page_number| page_number * HV_PAGE_SIZE)
130        .unwrap_or(PARAVISOR_DEFAULT_MEMORY_BASE_ADDRESS);
131
132    let memory_size = memory_page_count * HV_PAGE_SIZE;
133
134    // OpenHCL is laid out as the following:
135    // --- High Memory, 2MB aligned ---
136    // free space
137    //
138    // page tables
139    // IGVM parameters
140    // reserved vtl2 ranges
141    // initrd
142    // openhcl_boot
143    // sidecar, if configured
144    // - pad to next 2MB -
145    // kernel
146    // optional 2mb bounce buf for CVM
147    // --- Low memory, 2MB aligned ---
148
149    // Paravisor memory ranges must be 2MB (large page) aligned.
150    if memory_start_address % X64_LARGE_PAGE_SIZE != 0 {
151        return Err(Error::MemoryUnaligned(memory_start_address));
152    }
153
154    if memory_size % X64_LARGE_PAGE_SIZE != 0 {
155        return Err(Error::MemoryUnaligned(memory_size));
156    }
157
158    // The whole memory range must be present and VTL2 protectable for the
159    // underhill kernel to work.
160    importer.verify_startup_memory_available(
161        memory_start_address / HV_PAGE_SIZE,
162        memory_page_count,
163        if paravisor_present {
164            StartupMemoryType::Vtl2ProtectableRam
165        } else {
166            StartupMemoryType::Ram
167        },
168    )?;
169
170    let kernel_acceptance = match isolation_type {
171        IsolationType::Snp | IsolationType::Tdx => BootPageAcceptance::Shared,
172        _ => BootPageAcceptance::Exclusive,
173    };
174
175    let mut offset = memory_start_address;
176
177    // If hardware isolated, reserve a 2MB range for bounce buffering shared
178    // pages. This is done first because we know the start address is 2MB
179    // aligned, with the next consumers wanting 2MB aligned ranges. This is
180    // reserved at load time in order to guarantee the pagetables have entries
181    // for this identity mapping.
182    //
183    // Leave this as a gap, as there's no need to accept or describe this range
184    // in the IGVM file.
185    let bounce_buffer = if matches!(isolation_type, IsolationType::Snp | IsolationType::Tdx) {
186        let bounce_buffer_gpa = offset;
187        assert_eq!(bounce_buffer_gpa % X64_LARGE_PAGE_SIZE, 0);
188        let range = MemoryRange::new(bounce_buffer_gpa..bounce_buffer_gpa + X64_LARGE_PAGE_SIZE);
189
190        offset += range.len();
191        Some(range)
192    } else {
193        None
194    };
195
196    tracing::trace!(offset, "loading the kernel");
197
198    // The x86_64 uncompressed kernel we use doesn't show any difference
199    // in the code sections upon flipping CONFIG_RELOCATABLE. In total,
200    // there are 6 places where a difference is found: dates in the Linux
201    // banner, GNU build ID, and metadata entries in the empty initrd image
202    // (it always is embedded into the kernel). No sections with relocations
203    // appear if CONFIG_RELOCATABLE is set.
204    // Assume that at least the kernel entry contains PIC and no loader
205    // assistance with the relocations records (if any) is required.
206    let load_info = crate::elf::load_static_elf(
207        importer,
208        kernel_image,
209        offset,
210        0,
211        true,
212        kernel_acceptance,
213        "underhill-kernel",
214    )
215    .map_err(|e| Error::Kernel(crate::linux::Error::ElfLoader(e)))?;
216    tracing::trace!("Kernel loaded at {load_info:x?}");
217    let crate::elf::LoadInfo {
218        minimum_address_used: _min_addr,
219        next_available_address: mut offset,
220        entrypoint: kernel_entrypoint,
221    } = load_info;
222
223    assert_eq!(offset & (HV_PAGE_SIZE - 1), 0);
224
225    // If an AP kernel was provided, load it next.
226    let (sidecar_size, sidecar_entrypoint) = if let Some(sidecar) = sidecar {
227        // Sidecar load addr must be 2MB aligned
228        offset = align_up_to_large_page_size(offset);
229
230        let load_info = crate::elf::load_static_elf(
231            importer,
232            sidecar,
233            0,
234            offset,
235            false,
236            BootPageAcceptance::Exclusive,
237            "sidecar-kernel",
238        )
239        .map_err(|e| Error::Kernel(crate::linux::Error::ElfLoader(e)))?;
240
241        (
242            load_info.next_available_address - offset,
243            load_info.entrypoint,
244        )
245    } else {
246        (0, 0)
247    };
248
249    let sidecar_base = offset;
250    offset += sidecar_size;
251
252    let load_info = crate::elf::load_static_elf(
253        importer,
254        shim,
255        0,
256        offset,
257        false,
258        BootPageAcceptance::Exclusive,
259        "underhill-boot-shim",
260    )
261    .map_err(Error::Shim)?;
262    tracing::trace!("The boot shim loaded at {load_info:x?}");
263    let crate::elf::LoadInfo {
264        minimum_address_used: shim_base_addr,
265        next_available_address: mut offset,
266        entrypoint: shim_entry_address,
267    } = load_info;
268
269    // Optionally import initrd if specified.
270    let ramdisk = if let Some(initrd) = initrd {
271        let initrd_base = offset;
272        let initrd_size = align_up_to_page_size(initrd.len() as u64);
273
274        importer.import_pages(
275            initrd_base / HV_PAGE_SIZE,
276            initrd_size / HV_PAGE_SIZE,
277            "underhill-initrd",
278            kernel_acceptance,
279            initrd,
280        )?;
281
282        offset += initrd_size;
283        Some((initrd_base, initrd.len() as u64))
284    } else {
285        None
286    };
287
288    let gdt_base_address = offset;
289    let gdt_size = HV_PAGE_SIZE;
290    offset += gdt_size;
291
292    let boot_params_base = offset;
293    let boot_params_size = HV_PAGE_SIZE;
294
295    offset += boot_params_size;
296
297    let cmdline_base = offset;
298    let (cmdline, policy) = match command_line {
299        CommandLineType::Static(val) => (val, CommandLinePolicy::STATIC),
300        CommandLineType::HostAppendable(val) => (val, CommandLinePolicy::APPEND_CHOSEN),
301    };
302
303    if cmdline.len() > COMMAND_LINE_SIZE {
304        return Err(Error::CommandLineSize(cmdline.len()));
305    }
306
307    let mut static_command_line = [0; COMMAND_LINE_SIZE];
308    static_command_line[..cmdline.len()].copy_from_slice(cmdline.as_bytes());
309    let paravisor_command_line = ParavisorCommandLine {
310        policy,
311        static_command_line_len: cmdline.len() as u16,
312        static_command_line,
313    };
314
315    importer.import_pages(
316        cmdline_base / HV_PAGE_SIZE,
317        1,
318        "underhill-command-line",
319        BootPageAcceptance::Exclusive,
320        paravisor_command_line.as_bytes(),
321    )?;
322
323    offset += HV_PAGE_SIZE;
324
325    // Reserve space for the VTL2 reserved region.
326    let reserved_region_size = PARAVISOR_RESERVED_VTL2_PAGE_COUNT_MAX * HV_PAGE_SIZE;
327    let reserved_region_start = offset;
328    offset += reserved_region_size;
329
330    tracing::debug!(reserved_region_start);
331
332    let parameter_region_size = PARAVISOR_VTL2_CONFIG_REGION_PAGE_COUNT_MAX * HV_PAGE_SIZE;
333    let parameter_region_start = offset;
334    offset += parameter_region_size;
335
336    tracing::debug!(parameter_region_start);
337
338    // The end of memory used by the loader, excluding pagetables.
339    let end_of_underhill_mem = offset;
340
341    // Page tables live at the end of VTL2 ram used by the bootshim.
342    //
343    // Size the available page table memory as 5 pages + 2 * 1GB of memory. This
344    // allows underhill to be mapped across a 512 GB boundary when using more
345    // than 1 GB, as the PDPTE will span 2 PML4E entries. Each GB of memory
346    // mapped requires 1 page for 2MB pages. Give 2 extra base pages and 1
347    // additional page per GB of mapped memory to allow the page table
348    // relocation code to be simpler, and not need to reclaim free pages from
349    // tables that have no valid entries.
350    //
351    // FUTURE: It would be better to change it so the shim only needs to map
352    //         itself, kernel, initrd and IGVM parameters. This requires
353    //         changing how the e820 map is constructed for the kernel along
354    //         with changing the contract on where the IGVM parameters live
355    //         within VTL2's memory.
356    let local_map = match isolation_type {
357        IsolationType::Snp | IsolationType::Tdx => {
358            Some((PARAVISOR_LOCAL_MAP_VA, PARAVISOR_LOCAL_MAP_SIZE))
359        }
360        _ => None,
361    };
362
363    // HACK: On TDX, the kernel uses the ACPI AP Mailbox protocol to start APs.
364    // However, the kernel assumes that all kernel ram is identity mapped, as
365    // the kernel will jump to a startup routine in any arbitrary kernel ram
366    // range.
367    //
368    // For now, describe 3GB of memory identity mapped in the page table used by
369    // the mailbox assembly stub, so the kernel can start APs regardless of how
370    // large the initial memory size was. An upcoming change will instead have
371    // the bootshim modify the pagetable at runtime to guarantee all ranges
372    // reported in the E820 map to kernel as ram are mapped.
373    //
374    // FUTURE: A future kernel change could remove this requirement entirely by
375    // making the kernel spec compliant, and only require that the reset vector
376    // page is identity mapped.
377
378    let page_table_mapping_size = if isolation_type == IsolationType::Tdx {
379        3 * 1024 * 1024 * 1024
380    } else {
381        memory_size
382    };
383
384    let page_table_base_page_count = 5;
385    let page_table_dynamic_page_count = {
386        // Double the count to allow for simpler reconstruction.
387        calculate_pde_table_count(memory_start_address, page_table_mapping_size) * 2
388            + local_map.map_or(0, |v| calculate_pde_table_count(v.0, v.1))
389    };
390    let page_table_isolation_page_count = match isolation_type {
391        IsolationType::Tdx => {
392            // TDX requires up to an extra 3 pages to map the reset vector as a
393            // 4K page.
394            3
395        }
396        _ => 0,
397    };
398    let page_table_page_count = page_table_base_page_count
399        + page_table_dynamic_page_count
400        + page_table_isolation_page_count;
401    let page_table_region_size = HV_PAGE_SIZE * page_table_page_count;
402    let page_table_region_start = offset;
403    offset += page_table_region_size;
404
405    tracing::debug!(page_table_region_start, page_table_region_size);
406
407    let mut page_table_builder = PageTableBuilder::new(page_table_region_start)
408        .with_mapped_region(memory_start_address, page_table_mapping_size);
409
410    if let Some((local_map_start, size)) = local_map {
411        page_table_builder = page_table_builder.with_local_map(local_map_start, size);
412    }
413
414    match isolation_type {
415        IsolationType::Snp => {
416            page_table_builder = page_table_builder.with_confidential_bit(51);
417        }
418        IsolationType::Tdx => {
419            page_table_builder = page_table_builder.with_reset_vector(true);
420        }
421        _ => {}
422    }
423
424    let page_table = page_table_builder.build();
425
426    assert!(page_table.len() as u64 % HV_PAGE_SIZE == 0);
427    let page_table_page_base = page_table_region_start / HV_PAGE_SIZE;
428    assert!(page_table.len() as u64 <= page_table_region_size);
429
430    let offset = offset;
431
432    if with_relocation {
433        // Indicate relocation information. Don't include page table region.
434        importer.relocation_region(
435            memory_start_address,
436            end_of_underhill_mem - memory_start_address,
437            X64_LARGE_PAGE_SIZE,
438            PARAVISOR_DEFAULT_MEMORY_BASE_ADDRESS,
439            1 << 48,
440            true,
441            true,
442            0, // BSP
443        )?;
444
445        // Tell the loader page table relocation information.
446        importer.page_table_relocation(
447            page_table_region_start,
448            page_table_region_size / HV_PAGE_SIZE,
449            page_table.len() as u64 / HV_PAGE_SIZE,
450            0,
451        )?;
452    }
453
454    // The memory used by the loader must be smaller than the memory available.
455    if offset > memory_start_address + memory_size {
456        return Err(Error::NotEnoughMemory(offset - memory_start_address));
457    }
458
459    let (initrd_base, initrd_size) = ramdisk.unwrap_or((0, 0));
460    // Shim parameters for locations are relative to the base of where the shim is loaded.
461    let calculate_shim_offset = |addr: u64| addr.wrapping_sub(shim_base_addr) as i64;
462    let initrd_crc = crc32fast::hash(initrd.unwrap_or(&[]));
463    let shim_params = ShimParamsRaw {
464        kernel_entry_offset: calculate_shim_offset(kernel_entrypoint),
465        cmdline_offset: calculate_shim_offset(cmdline_base),
466        initrd_offset: calculate_shim_offset(initrd_base),
467        initrd_size,
468        initrd_crc,
469        supported_isolation_type: match isolation_type {
470            // To the shim, None and VBS isolation are the same. The shim
471            // queries CPUID when running to determine if page acceptance needs
472            // to be done.
473            IsolationType::None | IsolationType::Vbs => {
474                loader_defs::shim::SupportedIsolationType::VBS
475            }
476            IsolationType::Snp => loader_defs::shim::SupportedIsolationType::SNP,
477            IsolationType::Tdx => loader_defs::shim::SupportedIsolationType::TDX,
478        },
479        memory_start_offset: calculate_shim_offset(memory_start_address),
480        memory_size,
481        parameter_region_offset: calculate_shim_offset(parameter_region_start),
482        parameter_region_size,
483        vtl2_reserved_region_offset: calculate_shim_offset(reserved_region_start),
484        vtl2_reserved_region_size: reserved_region_size,
485        sidecar_offset: calculate_shim_offset(sidecar_base),
486        sidecar_size,
487        sidecar_entry_offset: calculate_shim_offset(sidecar_entrypoint),
488        used_start: calculate_shim_offset(memory_start_address),
489        used_end: calculate_shim_offset(offset),
490        bounce_buffer_start: bounce_buffer.map_or(0, |r| calculate_shim_offset(r.start())),
491        bounce_buffer_size: bounce_buffer.map_or(0, |r| r.len()),
492        page_tables_start: calculate_shim_offset(page_table_region_start),
493        page_tables_size: page_table_region_size,
494    };
495
496    tracing::debug!(boot_params_base, "shim gpa");
497
498    importer
499        .import_pages(
500            boot_params_base / HV_PAGE_SIZE,
501            boot_params_size / HV_PAGE_SIZE,
502            "underhill-shim-params",
503            BootPageAcceptance::Exclusive,
504            shim_params.as_bytes(),
505        )
506        .map_err(Error::Importer)?;
507
508    importer.import_pages(
509        page_table_page_base,
510        page_table_page_count,
511        "underhill-page-tables",
512        BootPageAcceptance::Exclusive,
513        &page_table,
514    )?;
515
516    // Set selectors and control registers
517    // Setup two selectors and segment registers.
518    // ds, es, fs, gs, ss are linearSelector
519    // cs is linearCode64Selector
520
521    // GDT is laid out as (counting by the small entries):
522    //  0: null descriptor,
523    //  1: null descriptor,
524    //  2: linear code64 descriptor,
525    //  3. linear descriptor for data
526    //  4: here you can add more descriptors.
527
528    let default_data_attributes: u16 = X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES.into();
529    let default_code64_attributes: u16 = X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES.into();
530    let gdt = [
531        // A large null descriptor.
532        GdtEntry::new_zeroed(),
533        GdtEntry::new_zeroed(),
534        // Code descriptor for the long mode.
535        GdtEntry {
536            limit_low: 0xffff,
537            attr_low: default_code64_attributes as u8,
538            attr_high: (default_code64_attributes >> 8) as u8,
539            ..GdtEntry::new_zeroed()
540        },
541        // Data descriptor.
542        GdtEntry {
543            limit_low: 0xffff,
544            attr_low: default_data_attributes as u8,
545            attr_high: (default_data_attributes >> 8) as u8,
546            ..GdtEntry::new_zeroed()
547        },
548    ];
549
550    const LINEAR_CODE64_DESCRIPTOR_INDEX: usize = 2;
551    const LINEAR_DATA_DESCRIPTOR_INDEX: usize = 3;
552    const RPL: u8 = 0x00; // requested priviledge level: the highest
553
554    let linear_code64_descriptor_selector =
555        SegmentSelector::from_gdt_index(LINEAR_CODE64_DESCRIPTOR_INDEX as u16, RPL);
556    let linear_data_descriptor_selector =
557        SegmentSelector::from_gdt_index(LINEAR_DATA_DESCRIPTOR_INDEX as u16, RPL);
558
559    importer.import_pages(
560        gdt_base_address / HV_PAGE_SIZE,
561        gdt_size / HV_PAGE_SIZE,
562        "underhill-gdt",
563        BootPageAcceptance::Exclusive,
564        gdt.as_bytes(),
565    )?;
566
567    let mut import_reg = |register| {
568        importer
569            .import_vp_register(register)
570            .map_err(Error::Importer)
571    };
572
573    // Import GDTR and selectors.
574    import_reg(X86Register::Gdtr(TableRegister {
575        base: gdt_base_address,
576        limit: (size_of_val(&gdt) - 1) as u16,
577    }))?;
578
579    let ds = SegmentRegister {
580        selector: linear_data_descriptor_selector.into_bits(),
581        base: 0,
582        limit: 0xffffffff,
583        attributes: default_data_attributes,
584    };
585    import_reg(X86Register::Ds(ds))?;
586    import_reg(X86Register::Es(ds))?;
587    import_reg(X86Register::Fs(ds))?;
588    import_reg(X86Register::Gs(ds))?;
589    import_reg(X86Register::Ss(ds))?;
590
591    let cs = SegmentRegister {
592        selector: linear_code64_descriptor_selector.into_bits(),
593        base: 0,
594        limit: 0xffffffff,
595        attributes: default_code64_attributes,
596    };
597    import_reg(X86Register::Cs(cs))?;
598
599    // TODO: Workaround an OS repo bug where enabling a higher VTL zeros TR
600    //       instead of setting it to the reset default state. Manually set it
601    //       to the reset default state until the OS repo is fixed.
602    //
603    //       In the future, we should just not set this at all.
604    import_reg(X86Register::Tr(SegmentRegister {
605        selector: 0x0000,
606        base: 0x00000000,
607        limit: 0x0000FFFF,
608        attributes: X64_BUSY_TSS_SEGMENT_ATTRIBUTES.into(),
609    }))?;
610
611    // Set system registers to state expected by the boot shim, 64 bit mode with
612    // paging enabled.
613
614    // Set CR0
615    import_reg(X86Register::Cr0(
616        x86defs::X64_CR0_PG | x86defs::X64_CR0_PE | x86defs::X64_CR0_NE,
617    ))?;
618
619    // Set CR3 to point to page table
620    import_reg(X86Register::Cr3(page_table_region_start))?;
621
622    // Set CR4
623    import_reg(X86Register::Cr4(
624        x86defs::X64_CR4_PAE | x86defs::X64_CR4_MCE | x86defs::X64_CR4_OSXSAVE,
625    ))?;
626
627    // Set EFER to LMA, LME, and NXE for 64 bit mode.
628    import_reg(X86Register::Efer(
629        x86defs::X64_EFER_LMA | x86defs::X64_EFER_LME | x86defs::X64_EFER_NXE,
630    ))?;
631
632    // Set PAT
633    import_reg(X86Register::Pat(x86defs::X86X_MSR_DEFAULT_PAT))?;
634
635    // Setup remaining registers
636    // Set %rsi to relative location of boot_params_base
637    let relative_boot_params_base = boot_params_base - shim_base_addr;
638    import_reg(X86Register::Rsi(relative_boot_params_base))?;
639
640    // Set %rip to the shim entry point.
641    import_reg(X86Register::Rip(shim_entry_address))?;
642
643    // Load parameter regions.
644    let config_region_page_base = parameter_region_start / HV_PAGE_SIZE;
645
646    // Slit
647    let slit_page_base = config_region_page_base + PARAVISOR_CONFIG_SLIT_PAGE_INDEX;
648    let slit_parameter_area = importer.create_parameter_area(
649        slit_page_base,
650        PARAVISOR_CONFIG_SLIT_SIZE_PAGES as u32,
651        "underhill-slit",
652    )?;
653    importer.import_parameter(slit_parameter_area, 0, IgvmParameterType::Slit)?;
654
655    // Pptt
656    let pptt_page_base = config_region_page_base + PARAVISOR_CONFIG_PPTT_PAGE_INDEX;
657    let pptt_parameter_area = importer.create_parameter_area(
658        pptt_page_base,
659        PARAVISOR_CONFIG_PPTT_SIZE_PAGES as u32,
660        "underhill-pptt",
661    )?;
662    importer.import_parameter(pptt_parameter_area, 0, IgvmParameterType::Pptt)?;
663
664    // device tree
665    let dt_page_base = config_region_page_base + PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX;
666    let dt_parameter_area = importer.create_parameter_area(
667        dt_page_base,
668        PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES as u32,
669        "underhill-device-tree",
670    )?;
671    importer.import_parameter(dt_parameter_area, 0, IgvmParameterType::DeviceTree)?;
672
673    if isolation_type == IsolationType::Snp {
674        let reserved_region_page_base = reserved_region_start / HV_PAGE_SIZE;
675        let secrets_page_base: u64 =
676            reserved_region_page_base + PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX;
677        importer.import_pages(
678            secrets_page_base,
679            PARAVISOR_RESERVED_VTL2_SNP_SECRETS_SIZE_PAGES,
680            "underhill-snp-secrets-page",
681            BootPageAcceptance::SecretsPage,
682            &[],
683        )?;
684
685        let cpuid_page = create_snp_cpuid_page();
686        let cpuid_page_base =
687            reserved_region_page_base + PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX;
688        importer.import_pages(
689            cpuid_page_base,
690            1,
691            "underhill-snp-cpuid-page",
692            BootPageAcceptance::CpuidPage,
693            cpuid_page.as_bytes(),
694        )?;
695
696        importer.import_pages(
697            cpuid_page_base + 1,
698            1,
699            "underhill-snp-cpuid-extended-state-page",
700            BootPageAcceptance::CpuidExtendedStatePage,
701            &[],
702        )?;
703
704        let vmsa_page_base =
705            reserved_region_page_base + PARAVISOR_RESERVED_VTL2_SNP_VMSA_PAGE_INDEX;
706        importer.set_vp_context_page(vmsa_page_base)?;
707    }
708
709    // Load measured config.
710    // The measured config is at page 0. Free pages start at page 1.
711    let mut free_page = 1;
712    let mut measured_config = ParavisorMeasuredVtl0Config {
713        magic: ParavisorMeasuredVtl0Config::MAGIC,
714        ..FromZeros::new_zeroed()
715    };
716
717    let Vtl0Config {
718        supports_pcat,
719        supports_uefi,
720        supports_linux,
721    } = vtl0_config;
722
723    if supports_pcat {
724        measured_config.supported_vtl0.set_pcat_supported(true);
725    }
726
727    if let Some((uefi, vp_context)) = &supports_uefi {
728        measured_config.supported_vtl0.set_uefi_supported(true);
729        let vp_context_page = free_page;
730        free_page += 1;
731        measured_config.uefi_info = UefiInfo {
732            firmware: PageRegionDescriptor {
733                base_page_number: uefi.firmware_base / HV_PAGE_SIZE,
734                page_count: uefi.total_size / HV_PAGE_SIZE,
735            },
736            vtl0_vp_context: PageRegionDescriptor {
737                base_page_number: vp_context_page,
738                page_count: 1,
739            },
740        };
741
742        // Deposit the UEFI vp context.
743        importer.import_pages(
744            vp_context_page,
745            1,
746            "openhcl-uefi-vp-context",
747            BootPageAcceptance::Exclusive,
748            vp_context,
749        )?;
750    }
751
752    if let Some(linux) = supports_linux {
753        measured_config
754            .supported_vtl0
755            .set_linux_direct_supported(true);
756
757        let kernel_region = PageRegionDescriptor::new(
758            linux.load_info.kernel.gpa / HV_PAGE_SIZE,
759            align_up_to_page_size(linux.load_info.kernel.size) / HV_PAGE_SIZE,
760        );
761
762        let (initrd_region, initrd_size) = match linux.load_info.initrd {
763            Some(info) => {
764                if info.gpa % HV_PAGE_SIZE != 0 {
765                    return Err(Error::MemoryUnaligned(info.gpa));
766                }
767                (
768                    // initrd info is aligned up to the next page.
769                    PageRegionDescriptor::new(
770                        info.gpa / HV_PAGE_SIZE,
771                        align_up_to_page_size(info.size) / HV_PAGE_SIZE,
772                    ),
773                    info.size,
774                )
775            }
776            None => (PageRegionDescriptor::EMPTY, 0),
777        };
778
779        let command_line_page = free_page;
780        // free_page += 1;
781
782        // Import the command line as a C string.
783        importer
784            .import_pages(
785                command_line_page,
786                1,
787                "underhill-vtl0-linux-command-line",
788                BootPageAcceptance::Exclusive,
789                linux.command_line.as_bytes_with_nul(),
790            )
791            .map_err(Error::Importer)?;
792        let command_line = PageRegionDescriptor::new(command_line_page, 1);
793
794        measured_config.linux_info = LinuxInfo {
795            kernel_region,
796            kernel_entrypoint: linux.load_info.kernel.entrypoint,
797            initrd_region,
798            initrd_size,
799            command_line,
800        };
801    }
802
803    importer
804        .import_pages(
805            PARAVISOR_VTL0_MEASURED_CONFIG_BASE_PAGE_X64,
806            1,
807            "underhill-measured-config",
808            BootPageAcceptance::Exclusive,
809            measured_config.as_bytes(),
810        )
811        .map_err(Error::Importer)?;
812
813    let vtl2_measured_config = ParavisorMeasuredVtl2Config {
814        magic: ParavisorMeasuredVtl2Config::MAGIC,
815        vtom_offset_bit: shared_gpa_boundary_bits.unwrap_or(0),
816        padding: [0; 7],
817    };
818
819    importer
820        .import_pages(
821            config_region_page_base + PARAVISOR_MEASURED_VTL2_CONFIG_PAGE_INDEX,
822            PARAVISOR_MEASURED_VTL2_CONFIG_SIZE_PAGES,
823            "underhill-vtl2-measured-config",
824            BootPageAcceptance::Exclusive,
825            vtl2_measured_config.as_bytes(),
826        )
827        .map_err(Error::Importer)?;
828
829    let imported_region_base =
830        config_region_page_base + PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX;
831
832    importer.set_imported_regions_config_page(imported_region_base);
833    Ok(())
834}
835
836/// Create a hypervisor SNP CPUID page with the default values.
837fn create_snp_cpuid_page() -> HV_PSP_CPUID_PAGE {
838    let mut cpuid_page = HV_PSP_CPUID_PAGE::default();
839
840    // TODO SNP: The list used here is based earlier Microsoft projects.
841    // 1. ExtendedStateEnumeration should be part of BootPageAcceptance::CpuidExtendedStatePage,
842    // but it is unclear whether Linux supports a second page. The need for the second page is that
843    // the entries in it are actually based on supported features on a specific host.
844    // 2. ExtendedStateEnumeration should specify Xfem = 3
845    for (i, required_leaf) in crate::cpuid::SNP_REQUIRED_CPUID_LEAF_LIST_PARAVISOR
846        .iter()
847        .enumerate()
848    {
849        let entry = &mut cpuid_page.cpuid_leaf_info[i];
850        entry.eax_in = required_leaf.eax;
851        entry.ecx_in = required_leaf.ecx;
852        if required_leaf.eax == CpuidFunction::ExtendedStateEnumeration.0 {
853            entry.xfem_in = 1;
854        }
855        cpuid_page.count += 1;
856    }
857
858    cpuid_page
859}
860
861/// Load the underhill kernel on arm64.
862///
863/// An optional initrd may be specified.
864///
865/// An optional `memory_page_base` may be specified. This will disable
866/// relocation support for underhill.
867pub fn load_openhcl_arm64<F>(
868    importer: &mut dyn ImageLoad<Aarch64Register>,
869    kernel_image: &mut F,
870    shim: &mut F,
871    command_line: CommandLineType<'_>,
872    initrd: Option<&[u8]>,
873    memory_page_base: Option<u64>,
874    memory_page_count: u64,
875    vtl0_config: Vtl0Config<'_>,
876) -> Result<(), Error>
877where
878    F: std::io::Read + std::io::Seek,
879{
880    let Vtl0Config {
881        supports_pcat,
882        supports_uefi,
883        supports_linux,
884    } = vtl0_config;
885
886    assert!(!supports_pcat);
887    assert!(supports_uefi.is_some() || supports_linux.is_some());
888
889    let paravisor_present = importer.isolation_config().paravisor_present;
890
891    // If no explicit memory base is specified, load with relocation support.
892    let with_relocation = memory_page_base.is_none();
893
894    let memory_start_address = memory_page_base
895        .map(|page_number| page_number * HV_PAGE_SIZE)
896        .unwrap_or(PARAVISOR_DEFAULT_MEMORY_BASE_ADDRESS);
897
898    let memory_size = memory_page_count * HV_PAGE_SIZE;
899
900    // Paravisor memory ranges must be 2MB (large page) aligned.
901    if memory_start_address % u64::from(Arm64PageSize::Large) != 0 {
902        return Err(Error::MemoryUnaligned(memory_start_address));
903    }
904
905    if memory_size % u64::from(Arm64PageSize::Large) != 0 {
906        return Err(Error::MemoryUnaligned(memory_size));
907    }
908
909    // The whole memory range must be present and VTL2 protectable for the
910    // underhill kernel to work.
911    importer.verify_startup_memory_available(
912        memory_start_address / HV_PAGE_SIZE,
913        memory_page_count,
914        if paravisor_present {
915            StartupMemoryType::Vtl2ProtectableRam
916        } else {
917            StartupMemoryType::Ram
918        },
919    )?;
920
921    tracing::trace!(memory_start_address, "loading the kernel");
922
923    // The aarch64 Linux kernel image is most commonly found as a flat binary with a
924    // header rather than an ELF.
925    // DeviceTree is generated dynamically by the boot shim.
926    let initrd_address_type = InitrdAddressType::AfterKernel;
927    let initrd_config = InitrdConfig {
928        initrd_address: initrd_address_type,
929        initrd: initrd.unwrap_or_default(),
930    };
931    let device_tree_blob = None;
932    let crate::linux::LoadInfo {
933        kernel:
934            KernelInfo {
935                gpa: kernel_base,
936                size: kernel_size,
937                entrypoint: kernel_entry_point,
938            },
939        initrd: initrd_info,
940        dtb,
941    } = load_kernel_and_initrd_arm64(
942        importer,
943        kernel_image,
944        memory_start_address,
945        Some(initrd_config),
946        device_tree_blob,
947    )
948    .map_err(Error::Kernel)?;
949
950    assert!(
951        dtb.is_none(),
952        "DeviceTree is generated dynamically by the boot shim."
953    );
954
955    tracing::trace!(kernel_base, "kernel loaded");
956
957    let mut next_addr;
958
959    let InitrdInfo {
960        gpa: initrd_gpa,
961        size: initrd_size,
962    } = if let Some(initrd_info) = initrd_info {
963        assert!(initrd_address_type == InitrdAddressType::AfterKernel);
964        next_addr = initrd_info.gpa + initrd_info.size;
965        initrd_info
966    } else {
967        next_addr = kernel_base + kernel_size;
968        InitrdInfo { gpa: 0, size: 0 }
969    };
970
971    next_addr = align_up_to_page_size(next_addr);
972
973    tracing::trace!(next_addr, "loading the boot shim");
974
975    let crate::elf::LoadInfo {
976        minimum_address_used: shim_base_addr,
977        next_available_address: mut next_addr,
978        entrypoint: shim_entry_point,
979    } = crate::elf::load_static_elf(
980        importer,
981        shim,
982        0,
983        next_addr,
984        false,
985        BootPageAcceptance::Exclusive,
986        "underhill-boot-shim",
987    )
988    .map_err(Error::Shim)?;
989
990    tracing::trace!(shim_base_addr, "boot shim loaded");
991
992    tracing::trace!(next_addr, "loading the command line");
993
994    let cmdline_base = next_addr;
995    let (cmdline, policy) = match command_line {
996        CommandLineType::Static(val) => (val, CommandLinePolicy::STATIC),
997        CommandLineType::HostAppendable(val) => (val, CommandLinePolicy::APPEND_CHOSEN),
998    };
999
1000    if cmdline.len() > COMMAND_LINE_SIZE {
1001        return Err(Error::CommandLineSize(cmdline.len()));
1002    }
1003
1004    let mut static_command_line = [0; COMMAND_LINE_SIZE];
1005    static_command_line[..cmdline.len()].copy_from_slice(cmdline.as_bytes());
1006    let paravisor_command_line = ParavisorCommandLine {
1007        policy,
1008        static_command_line_len: cmdline.len() as u16,
1009        static_command_line,
1010    };
1011
1012    importer.import_pages(
1013        cmdline_base / HV_PAGE_SIZE,
1014        1,
1015        "underhill-command-line",
1016        BootPageAcceptance::Exclusive,
1017        paravisor_command_line.as_bytes(),
1018    )?;
1019
1020    next_addr += HV_PAGE_SIZE;
1021
1022    tracing::trace!(next_addr, "loading the boot shim parameters");
1023
1024    let shim_params_base = next_addr;
1025    let shim_params_size = HV_PAGE_SIZE;
1026
1027    next_addr += shim_params_size;
1028
1029    let parameter_region_size = PARAVISOR_VTL2_CONFIG_REGION_PAGE_COUNT_MAX * HV_PAGE_SIZE;
1030    let parameter_region_start = next_addr;
1031    next_addr += parameter_region_size;
1032
1033    tracing::debug!(parameter_region_start);
1034
1035    // The end of memory used by the loader, excluding pagetables.
1036    let end_of_underhill_mem = next_addr;
1037
1038    // Page tables live at the end of the VTL2 imported region, which allows it
1039    // to be relocated separately.
1040    let page_table_base_page_count = 5;
1041    let page_table_dynamic_page_count = 2 * page_table_base_page_count;
1042    let page_table_page_count = page_table_base_page_count + page_table_dynamic_page_count;
1043    let page_table_region_size = HV_PAGE_SIZE * page_table_page_count;
1044    let page_table_region_start = next_addr;
1045    next_addr += page_table_region_size;
1046
1047    tracing::debug!(page_table_region_start, page_table_region_size);
1048
1049    let next_addr = next_addr;
1050
1051    // The memory used by the loader must be smaller than the memory available.
1052    if next_addr > memory_start_address + memory_size {
1053        return Err(Error::NotEnoughMemory(next_addr - memory_start_address));
1054    }
1055
1056    // Shim parameters for locations are relative to the base of where the shim is loaded.
1057    let calculate_shim_offset = |addr: u64| -> i64 { addr.wrapping_sub(shim_base_addr) as i64 };
1058    let initrd_crc = crc32fast::hash(initrd.unwrap_or(&[]));
1059    let shim_params = ShimParamsRaw {
1060        kernel_entry_offset: calculate_shim_offset(kernel_entry_point),
1061        cmdline_offset: calculate_shim_offset(cmdline_base),
1062        initrd_offset: calculate_shim_offset(initrd_gpa),
1063        initrd_size,
1064        initrd_crc,
1065        supported_isolation_type: match importer.isolation_config().isolation_type {
1066            IsolationType::None | IsolationType::Vbs => {
1067                loader_defs::shim::SupportedIsolationType::VBS
1068            }
1069            _ => panic!("only None and VBS are supported for ARM64"),
1070        },
1071        memory_start_offset: calculate_shim_offset(memory_start_address),
1072        memory_size,
1073        parameter_region_offset: calculate_shim_offset(parameter_region_start),
1074        parameter_region_size,
1075        vtl2_reserved_region_offset: 0,
1076        vtl2_reserved_region_size: 0,
1077        sidecar_offset: 0,
1078        sidecar_size: 0,
1079        sidecar_entry_offset: 0,
1080        used_start: calculate_shim_offset(memory_start_address),
1081        used_end: calculate_shim_offset(next_addr),
1082        bounce_buffer_start: 0,
1083        bounce_buffer_size: 0,
1084        page_tables_start: 0,
1085        page_tables_size: 0,
1086    };
1087
1088    importer
1089        .import_pages(
1090            shim_params_base / HV_PAGE_SIZE,
1091            shim_params_size / HV_PAGE_SIZE,
1092            "underhill-shim-params",
1093            BootPageAcceptance::Exclusive,
1094            shim_params.as_bytes(),
1095        )
1096        .map_err(Error::Importer)?;
1097
1098    let mut measured_config = ParavisorMeasuredVtl0Config {
1099        magic: ParavisorMeasuredVtl0Config::MAGIC,
1100        ..FromZeros::new_zeroed()
1101    };
1102
1103    if let Some((uefi, vp_context)) = &supports_uefi {
1104        measured_config.supported_vtl0.set_uefi_supported(true);
1105        let vp_context_page = PARAVISOR_VTL0_MEASURED_CONFIG_BASE_PAGE_AARCH64 + 1;
1106        measured_config.uefi_info = UefiInfo {
1107            firmware: PageRegionDescriptor {
1108                base_page_number: uefi.firmware_base / HV_PAGE_SIZE,
1109                page_count: uefi.total_size / HV_PAGE_SIZE,
1110            },
1111            vtl0_vp_context: PageRegionDescriptor {
1112                base_page_number: vp_context_page,
1113                page_count: 1,
1114            },
1115        };
1116
1117        // Deposit the UEFI vp context.
1118        importer.import_pages(
1119            vp_context_page,
1120            1,
1121            "openhcl-uefi-vp-context",
1122            BootPageAcceptance::Exclusive,
1123            vp_context,
1124        )?;
1125    }
1126
1127    importer
1128        .import_pages(
1129            PARAVISOR_VTL0_MEASURED_CONFIG_BASE_PAGE_AARCH64,
1130            1,
1131            "underhill-measured-config",
1132            BootPageAcceptance::Exclusive,
1133            measured_config.as_bytes(),
1134        )
1135        .map_err(Error::Importer)?;
1136
1137    tracing::trace!(page_table_region_start, "loading the page tables");
1138
1139    let memory_attribute_indirection = MemoryAttributeIndirectionEl1([
1140        MemoryAttributeEl1::Device_nGnRnE,
1141        MemoryAttributeEl1::Normal_NonCacheable,
1142        MemoryAttributeEl1::Normal_WriteThrough,
1143        MemoryAttributeEl1::Normal_WriteBack,
1144        MemoryAttributeEl1::Device_nGnRnE,
1145        MemoryAttributeEl1::Device_nGnRnE,
1146        MemoryAttributeEl1::Device_nGnRnE,
1147        MemoryAttributeEl1::Device_nGnRnE,
1148    ]);
1149    let page_tables = page_table::aarch64::build_identity_page_tables_aarch64(
1150        page_table_region_start,
1151        memory_start_address,
1152        memory_size,
1153        memory_attribute_indirection,
1154        page_table_region_size as usize,
1155    );
1156    assert!(page_tables.len() as u64 % HV_PAGE_SIZE == 0);
1157    let page_table_page_base = page_table_region_start / HV_PAGE_SIZE;
1158    assert!(page_tables.len() as u64 <= page_table_region_size);
1159    assert!(page_table_region_size as usize > page_tables.len());
1160
1161    if with_relocation {
1162        // Indicate relocation information. Don't include page table region.
1163        importer.relocation_region(
1164            memory_start_address,
1165            end_of_underhill_mem - memory_start_address,
1166            Arm64PageSize::Large.into(),
1167            PARAVISOR_DEFAULT_MEMORY_BASE_ADDRESS,
1168            1 << 48,
1169            true,
1170            false,
1171            0, // BSP
1172        )?;
1173
1174        // Tell the loader page table relocation information.
1175        importer.page_table_relocation(
1176            page_table_region_start,
1177            page_table_region_size / HV_PAGE_SIZE,
1178            page_tables.len() as u64 / HV_PAGE_SIZE,
1179            0,
1180        )?;
1181    }
1182
1183    importer.import_pages(
1184        page_table_page_base,
1185        page_table_page_count,
1186        "underhill-page-tables",
1187        BootPageAcceptance::Exclusive,
1188        &page_tables,
1189    )?;
1190
1191    tracing::trace!("Importing register state");
1192
1193    let mut import_reg = |register| {
1194        importer
1195            .import_vp_register(register)
1196            .map_err(Error::Importer)
1197    };
1198
1199    // Set %X0 to relative location of boot_params_base
1200    let relative_boot_params_base = shim_params_base - shim_base_addr;
1201    import_reg(AArch64Register::X0(relative_boot_params_base).into())?;
1202
1203    // Set %pc to the shim entry point.
1204    import_reg(AArch64Register::Pc(shim_entry_point).into())?;
1205
1206    // System registers
1207
1208    import_reg(AArch64Register::Cpsr(Cpsr64::new().with_sp(true).with_el(1).into()).into())?;
1209
1210    // This is what Hyper-V uses. qemu/KVM, and qemu/max use slightly
1211    // different flags.
1212    // KVM sets these in addition to what the Hyper-V uses:
1213    //
1214    // .with_sa(true)
1215    // .with_itd(true)
1216    // .with_sed(true)
1217    //
1218    // Windows sets:
1219    //
1220    // .with_sa(true)
1221    // .with_sa0(true)
1222    // .with_n_aa(true)
1223    // .with_sed(true)
1224    // .with_dze(true)
1225    // .with_en_ib(true)
1226    // .with_dssbs(true)
1227    //
1228    // Maybe could enforce the `s`tack `a`lignment, here, too. Depends on
1229    // the compiler generating code aligned accesses for the stack.
1230    //
1231    // Hyper-V sets:
1232    import_reg(
1233        AArch64Register::SctlrEl1(
1234            SctlrEl1::new()
1235                // MMU enable for EL1&0 stage 1 address translation.
1236                // It can be turned off in VTL2 for debugging.
1237                // The family of the `at` instructions and the `PAR_EL1` register are
1238                // useful for debugging MMU issues.
1239                .with_m(true)
1240                // Stage 1 Cacheability control, for data accesses.
1241                .with_c(true)
1242                // Stage 1 Cacheability control, for code.
1243                .with_i(true)
1244                // Reserved flags, must be set
1245                .with_eos(true)
1246                .with_tscxt(true)
1247                .with_eis(true)
1248                .with_span(true)
1249                .with_n_tlsmd(true)
1250                .with_lsmaoe(true)
1251                .into(),
1252        )
1253        .into(),
1254    )?;
1255
1256    // Hyper-V UEFI and qemu/KVM use the same value for TCR_EL1.
1257    // They set `t0sz` to `28` as they map memory pretty low.
1258    // In the paravisor case, need more flexibility.
1259    // For the details, refer to the "Learning the architecture" series
1260    // on the ARM website.
1261    import_reg(
1262        AArch64Register::TcrEl1(
1263            TranslationControlEl1::new()
1264                .with_t0sz(0x11)
1265                .with_irgn0(1)
1266                .with_orgn0(1)
1267                .with_sh0(3)
1268                .with_tg0(TranslationGranule0::TG_4KB)
1269                // Disable TTBR1_EL1 walks (i.e. the upper half).
1270                .with_epd1(1)
1271                // Due to erratum #822227, need to set a valid TG1 regardless of EPD1.
1272                .with_tg1(TranslationGranule1::TG_4KB)
1273                .with_ips(IntermPhysAddrSize::IPA_48_BITS_256_TB)
1274                .into(),
1275        )
1276        .into(),
1277    )?;
1278
1279    // The Memory Attribute Indirection
1280    import_reg(AArch64Register::MairEl1(memory_attribute_indirection.into()).into())?;
1281    import_reg(
1282        AArch64Register::Ttbr0El1(
1283            TranslationBaseEl1::new()
1284                .with_baddr(page_table_region_start)
1285                .into(),
1286        )
1287        .into(),
1288    )?;
1289
1290    // VBAR is in the undefined state, setting it to 0 albeit
1291    // without the vector exception table. The shim can configure that on its own
1292    // if need be.
1293    import_reg(AArch64Register::VbarEl1(0).into())?;
1294
1295    // Load parameter regions.
1296    let config_region_page_base = parameter_region_start / HV_PAGE_SIZE;
1297
1298    // Slit
1299    let slit_page_base = config_region_page_base + PARAVISOR_CONFIG_SLIT_PAGE_INDEX;
1300    let slit_parameter_area = importer.create_parameter_area(
1301        slit_page_base,
1302        PARAVISOR_CONFIG_SLIT_SIZE_PAGES as u32,
1303        "underhill-slit",
1304    )?;
1305    importer.import_parameter(slit_parameter_area, 0, IgvmParameterType::Slit)?;
1306
1307    // Pptt
1308    let pptt_page_base = config_region_page_base + PARAVISOR_CONFIG_PPTT_PAGE_INDEX;
1309    let pptt_parameter_area = importer.create_parameter_area(
1310        pptt_page_base,
1311        PARAVISOR_CONFIG_PPTT_SIZE_PAGES as u32,
1312        "underhill-pptt",
1313    )?;
1314    importer.import_parameter(pptt_parameter_area, 0, IgvmParameterType::Pptt)?;
1315
1316    // device tree
1317    let dt_page_base = config_region_page_base + PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX;
1318    let dt_parameter_area = importer.create_parameter_area(
1319        dt_page_base,
1320        PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES as u32,
1321        "underhill-device-tree",
1322    )?;
1323    importer.import_parameter(dt_parameter_area, 0, IgvmParameterType::DeviceTree)?;
1324
1325    let vtl2_measured_config = ParavisorMeasuredVtl2Config {
1326        magic: ParavisorMeasuredVtl2Config::MAGIC,
1327        vtom_offset_bit: 0,
1328        padding: [0; 7],
1329    };
1330
1331    importer
1332        .import_pages(
1333            config_region_page_base + PARAVISOR_MEASURED_VTL2_CONFIG_PAGE_INDEX,
1334            PARAVISOR_MEASURED_VTL2_CONFIG_SIZE_PAGES,
1335            "underhill-vtl2-measured-config",
1336            BootPageAcceptance::Exclusive,
1337            vtl2_measured_config.as_bytes(),
1338        )
1339        .map_err(Error::Importer)?;
1340
1341    let imported_region_base =
1342        config_region_page_base + PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX;
1343
1344    importer.set_imported_regions_config_page(imported_region_base);
1345
1346    Ok(())
1347}