openhcl_boot/host_params/
shim_params.rsuse crate::arch::get_isolation_type;
use core::slice;
use loader_defs::paravisor::ImportedRegionDescriptor;
use loader_defs::paravisor::ParavisorCommandLine;
use loader_defs::shim::ShimParamsRaw;
use memory_range::MemoryRange;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum IsolationType {
None,
Vbs,
#[cfg(target_arch = "x86_64")]
Snp,
#[cfg(target_arch = "x86_64")]
Tdx,
}
impl IsolationType {
pub fn is_hardware_isolated(&self) -> bool {
match self {
IsolationType::None => false,
IsolationType::Vbs => false,
#[cfg(target_arch = "x86_64")]
IsolationType::Snp => true,
#[cfg(target_arch = "x86_64")]
IsolationType::Tdx => true,
}
}
}
pub struct ImportedRegionIter<'a> {
imported_regions: &'a [ImportedRegionDescriptor],
}
impl Iterator for ImportedRegionIter<'_> {
type Item = (MemoryRange, bool);
fn next(&mut self) -> Option<Self::Item> {
if self.imported_regions.is_empty() {
None
} else {
let element = self.imported_regions[0]
.pages()
.map(|(base_page, count, accepted)| {
let base_address = base_page * hvdef::HV_PAGE_SIZE;
let end_address = base_address + (count * hvdef::HV_PAGE_SIZE);
(MemoryRange::try_new(base_address..end_address).expect(
"page number conversion into addresses results in a valid address range",
), accepted)
});
if element.is_some() {
self.imported_regions = &self.imported_regions[1..];
} else {
self.imported_regions = &[];
}
element
}
}
}
#[derive(Debug)]
pub struct ShimParams {
pub kernel_entry_address: u64,
pub cmdline_base: u64,
pub initrd_base: u64,
pub initrd_size: u64,
pub initrd_crc: u32,
pub memory_start_address: u64,
pub memory_size: u64,
pub parameter_region_start: u64,
pub parameter_region_size: u64,
pub vtl2_reserved_region_start: u64,
pub vtl2_reserved_region_size: u64,
pub isolation_type: IsolationType,
pub sidecar_entry_address: u64,
pub sidecar_base: u64,
pub sidecar_size: u64,
pub used: MemoryRange,
pub bounce_buffer: Option<MemoryRange>,
}
impl ShimParams {
pub fn new(shim_base_address: u64, raw: &ShimParamsRaw) -> Self {
let &ShimParamsRaw {
kernel_entry_offset,
cmdline_offset,
initrd_offset,
initrd_size,
initrd_crc,
supported_isolation_type,
memory_start_offset,
memory_size,
parameter_region_offset,
parameter_region_size,
vtl2_reserved_region_offset,
vtl2_reserved_region_size,
sidecar_offset,
sidecar_size,
sidecar_entry_offset,
used_start,
used_end,
bounce_buffer_start,
bounce_buffer_size,
} = raw;
let isolation_type = get_isolation_type(supported_isolation_type);
let bounce_buffer = if bounce_buffer_size == 0 {
None
} else {
let base = shim_base_address.wrapping_add_signed(bounce_buffer_start);
Some(MemoryRange::new(base..base + bounce_buffer_size))
};
Self {
kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
initrd_base: shim_base_address.wrapping_add_signed(initrd_offset),
initrd_size,
initrd_crc,
memory_start_address: shim_base_address.wrapping_add_signed(memory_start_offset),
memory_size,
parameter_region_start: shim_base_address.wrapping_add_signed(parameter_region_offset),
parameter_region_size,
vtl2_reserved_region_start: shim_base_address
.wrapping_add_signed(vtl2_reserved_region_offset),
vtl2_reserved_region_size,
isolation_type,
sidecar_entry_address: shim_base_address.wrapping_add_signed(sidecar_entry_offset),
sidecar_base: shim_base_address.wrapping_add_signed(sidecar_offset),
sidecar_size,
used: MemoryRange::new(
shim_base_address.wrapping_add_signed(used_start)
..shim_base_address.wrapping_add_signed(used_end),
),
bounce_buffer,
}
}
#[cfg(target_arch = "x86_64")]
pub fn secrets_start(&self) -> u64 {
self.vtl2_reserved_region_start
+ loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_SECRETS_PAGE_INDEX
* hvdef::HV_PAGE_SIZE
}
#[cfg(target_arch = "x86_64")]
pub fn cpuid_start(&self) -> u64 {
self.vtl2_reserved_region_start
+ loader_defs::paravisor::PARAVISOR_RESERVED_VTL2_SNP_CPUID_PAGE_INDEX
* hvdef::HV_PAGE_SIZE
}
pub fn dt_start(&self) -> u64 {
self.parameter_region_start
+ loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_PAGE_INDEX * hvdef::HV_PAGE_SIZE
}
pub fn dt_size(&self) -> u64 {
loader_defs::paravisor::PARAVISOR_CONFIG_DEVICE_TREE_SIZE_PAGES * hvdef::HV_PAGE_SIZE
}
pub fn initrd(&self) -> &'static [u8] {
unsafe { slice::from_raw_parts(self.initrd_base as *const u8, self.initrd_size as usize) }
}
pub fn command_line(&self) -> &'static ParavisorCommandLine {
unsafe {
(self.cmdline_base as *const ParavisorCommandLine)
.as_ref()
.expect("should always be non null")
}
}
pub fn device_tree(&self) -> &'static [u8] {
unsafe { slice::from_raw_parts(self.dt_start() as *const u8, self.dt_size() as usize) }
}
pub fn imported_regions(&self) -> ImportedRegionIter<'_> {
use loader_defs::paravisor::ImportedRegionsPageHeader;
let imported_region_page_address = self.parameter_region_start
+ (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
* hvdef::HV_PAGE_SIZE);
assert!(
imported_region_page_address + hvdef::HV_PAGE_SIZE
<= self.parameter_region_start + self.parameter_region_size
);
let imported_region_start =
imported_region_page_address + size_of::<ImportedRegionsPageHeader>() as u64;
unsafe {
ImportedRegionIter {
imported_regions: slice::from_raw_parts(
imported_region_start as *const ImportedRegionDescriptor,
(hvdef::HV_PAGE_SIZE as usize - size_of::<ImportedRegionsPageHeader>())
/ size_of::<ImportedRegionDescriptor>(),
),
}
}
}
#[cfg(target_arch = "x86_64")]
pub fn imported_regions_hash(&self) -> &'static [u8] {
let header_start = self.parameter_region_start
+ (loader_defs::paravisor::PARAVISOR_MEASURED_VTL2_CONFIG_ACCEPTED_MEMORY_PAGE_INDEX
* hvdef::HV_PAGE_SIZE);
unsafe {
let header =
&*(header_start as *const loader_defs::paravisor::ImportedRegionsPageHeader);
&header.sha384_hash
}
}
}