1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
34//! Routines to prepare VTL2 memory for launching the kernel.
56use super::address_space::LocalMap;
7use super::address_space::init_local_map;
8use crate::ShimParams;
9use crate::host_params::PartitionInfo;
10use crate::host_params::shim_params::IsolationType;
11use crate::hypercall::hvcall;
12use memory_range::MemoryRange;
13use sha2::Digest;
14use sha2::Sha384;
15use x86defs::X64_LARGE_PAGE_SIZE;
16use x86defs::tdx::TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
1718/// On isolated systems, transitions all VTL2 RAM to be private and accepted, with the appropriate
19/// VTL permissions applied.
20pub fn setup_vtl2_memory(shim_params: &ShimParams, partition_info: &PartitionInfo) {
21// Only if the partition is VBS-isolated, accept memory and apply vtl 2 protections here.
22 // Non-isolated partitions can undergo servicing, and additional information
23 // would be needed to determine whether vtl 2 protections should be applied
24 // or skipped, since the operation is expensive.
25 // TODO: if applying vtl 2 protections for non-isolated VMs moves to the
26 // boot shim, apply them here.
27if let IsolationType::None = shim_params.isolation_type {
28return;
29 }
3031if let IsolationType::Vbs = shim_params.isolation_type {
32// Enable VTL protection so that vtl 2 protections can be applied. All other config
33 // should be set by the user mode
34let vsm_config = hvdef::HvRegisterVsmPartitionConfig::new()
35 .with_default_vtl_protection_mask(0xF)
36 .with_enable_vtl_protection(true);
3738 hvcall()
39 .set_register(
40 hvdef::HvX64RegisterName::VsmPartitionConfig.into(),
41 hvdef::HvRegisterValue::from(u64::from(vsm_config)),
42 )
43 .expect("setting vsm config shouldn't fail");
4445// VBS isolated VMs need to apply VTL2 protections to pages that were already accepted to
46 // prevent VTL0 access. Only those pages that belong to the VTL2 RAM region should have
47 // these protections applied - certain pages belonging to VTL0 are also among the accepted
48 // regions and should not be processed here.
49let accepted_ranges =
50 shim_params
51 .imported_regions()
52 .filter_map(|(imported_range, already_accepted)| {
53 already_accepted.then_some(imported_range)
54 });
55for range in memory_range::overlapping_ranges(
56 partition_info.vtl2_ram.iter().map(|entry| entry.range),
57 accepted_ranges,
58 ) {
59 hvcall()
60 .apply_vtl2_protections(range)
61 .expect("applying vtl 2 protections cannot fail");
62 }
63 }
6465// Initialize the local_map
66 // TODO: Consider moving this to ShimParams to pass around.
67let mut local_map = match shim_params.isolation_type {
68 IsolationType::Snp | IsolationType::Tdx => Some(init_local_map(
69 loader_defs::paravisor::PARAVISOR_LOCAL_MAP_VA,
70 )),
71 IsolationType::None | IsolationType::Vbs => None,
72 };
7374// Make sure imported regions are in increasing order.
75let mut last_range_end = None;
76for (imported_range, _) in shim_params.imported_regions() {
77assert!(last_range_end.is_none() || imported_range.start() > last_range_end.unwrap());
78 last_range_end = Some(imported_range.end() - hvdef::HV_PAGE_SIZE);
79 }
8081// Iterate over all VTL2 RAM that is not part of an imported region and
82 // accept it with appropriate VTL protections.
83for range in memory_range::subtract_ranges(
84 partition_info.vtl2_ram.iter().map(|e| e.range),
85 shim_params.imported_regions().map(|(r, _)| r),
86 ) {
87 accept_vtl2_memory(shim_params, &mut local_map, range);
88 }
8990let ram_buffer = if let Some(bounce_buffer) = shim_params.bounce_buffer {
91assert!(bounce_buffer.start() % X64_LARGE_PAGE_SIZE == 0);
92assert!(bounce_buffer.len() >= X64_LARGE_PAGE_SIZE);
9394for range in memory_range::subtract_ranges(
95 core::iter::once(bounce_buffer),
96 partition_info.vtl2_ram.iter().map(|e| e.range),
97 ) {
98 accept_vtl2_memory(shim_params, &mut local_map, range);
99 }
100101// SAFETY: The bounce buffer is trusted as it is obtained from measured
102 // shim parameters. The bootloader is identity mapped, and the PA is
103 // guaranteed to be mapped as the pagetable is prebuilt and measured.
104unsafe {
105 core::slice::from_raw_parts_mut(
106 bounce_buffer.start() as *mut u8,
107 bounce_buffer.len() as usize,
108 )
109 }
110 } else {
111&mut []
112 };
113114// Iterate over all imported regions that are not already accepted. They must be accepted here.
115 // TODO: No VTL0 memory is currently marked as pending.
116for (imported_range, already_accepted) in shim_params.imported_regions() {
117if !already_accepted {
118 accept_pending_vtl2_memory(shim_params, &mut local_map, ram_buffer, imported_range);
119 }
120 }
121}
122123/// Accepts VTL2 memory in the specified gpa range.
124fn accept_vtl2_memory(
125 shim_params: &ShimParams,
126 local_map: &mut Option<LocalMap<'_>>,
127 range: MemoryRange,
128) {
129match shim_params.isolation_type {
130 IsolationType::Vbs => {
131 hvcall()
132 .accept_vtl2_pages(range, hvdef::hypercall::AcceptMemoryType::RAM)
133 .expect("accepting vtl 2 memory must not fail");
134 }
135 IsolationType::Snp => {
136super::snp::set_page_acceptance(local_map.as_mut().unwrap(), range, true)
137 .expect("accepting vtl 2 memory must not fail");
138 }
139 IsolationType::Tdx => {
140super::tdx::accept_pages(range).expect("accepting vtl2 memory must not fail")
141 }
142_ => unreachable!(),
143 }
144}
145146/// Accepts VTL2 memory in the specified range that is currently marked as pending, i.e. not
147/// yet assigned as exclusive and private.
148fn accept_pending_vtl2_memory(
149 shim_params: &ShimParams,
150 local_map: &mut Option<LocalMap<'_>>,
151 ram_buffer: &mut [u8],
152 range: MemoryRange,
153) {
154let isolation_type = shim_params.isolation_type;
155156match isolation_type {
157 IsolationType::Vbs => {
158 hvcall()
159 .accept_vtl2_pages(range, hvdef::hypercall::AcceptMemoryType::RAM)
160 .expect("accepting vtl 2 memory must not fail");
161 }
162 IsolationType::Snp | IsolationType::Tdx => {
163let local_map = local_map.as_mut().unwrap();
164// Accepting pending memory for SNP is somewhat more complicated. The pending regions
165 // are unencrypted pages. Accepting them would result in their contents being scrambled.
166 // Instead their contents must be copied out to a private region, then copied back once
167 // the pages have been accepted. Additionally, the access to the unencrypted pages must
168 // happen with the C-bit cleared.
169let mut remaining = range;
170while !remaining.is_empty() {
171// Copy up to the next 2MB boundary.
172let range = MemoryRange::new(
173 remaining.start()
174 ..remaining.end().min(
175 (remaining.start() + X64_LARGE_PAGE_SIZE) & !(X64_LARGE_PAGE_SIZE - 1),
176 ),
177 );
178 remaining = MemoryRange::new(range.end()..remaining.end());
179180let ram_buffer = &mut ram_buffer[..range.len() as usize];
181182// Map the pages as shared and copy the necessary number to the buffer.
183{
184let map_range = if isolation_type == IsolationType::Tdx {
185// set vtom on the page number
186MemoryRange::new(
187 range.start() | TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT
188 ..range.end() | TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT,
189 )
190 } else {
191 range
192 };
193194let mapping = local_map.map_pages(map_range, false);
195 ram_buffer.copy_from_slice(mapping.data);
196 }
197198// Change visibility on the pages for this iteration.
199match isolation_type {
200 IsolationType::Snp => {
201super::snp::Ghcb::change_page_visibility(range, false);
202 }
203 IsolationType::Tdx => {
204super::tdx::change_page_visibility(range, false);
205 }
206_ => unreachable!(),
207 }
208209// accept the pages.
210match isolation_type {
211 IsolationType::Snp => {
212super::snp::set_page_acceptance(local_map, range, true)
213 .expect("accepting vtl 2 memory must not fail");
214 }
215 IsolationType::Tdx => {
216super::tdx::accept_pages(range)
217 .expect("accepting vtl 2 memory must not fail");
218 }
219_ => unreachable!(),
220 }
221222// Copy the buffer back. Use the identity map now that the memory has been accepted.
223{
224// SAFETY: Known memory region that was just accepted.
225let mapping = unsafe {
226 core::slice::from_raw_parts_mut(
227 range.start() as *mut u8,
228 range.len() as usize,
229 )
230 };
231232 mapping.copy_from_slice(ram_buffer);
233 }
234 }
235 }
236_ => unreachable!(),
237 }
238}
239240// Verify the SHA384 hash of pages that were imported as unaccepted/shared. Compare against the
241// desired hash that is passed in as a measured parameter. Failures result in a panic.
242pub fn verify_imported_regions_hash(shim_params: &ShimParams) {
243// Non isolated VMs can undergo servicing, and thus the hash might no longer be valid,
244 // as the memory regions can change during runtime.
245if let IsolationType::None = shim_params.isolation_type {
246return;
247 }
248249// If all imported pages are already accepted, there is no need to verify the hash.
250if shim_params
251 .imported_regions()
252 .all(|(_, already_accepted)| already_accepted)
253 {
254return;
255 }
256257let mut hasher = Sha384::new();
258 shim_params
259 .imported_regions()
260 .filter(|(_, already_accepted)| !already_accepted)
261 .for_each(|(range, _)| {
262// SAFETY: The location and identity of the range is trusted as it is obtained from
263 // measured shim parameters.
264let mapping = unsafe {
265 core::slice::from_raw_parts(range.start() as *const u8, range.len() as usize)
266 };
267 hasher.update(mapping);
268 });
269270if hasher.finalize().as_slice() != shim_params.imported_regions_hash() {
271panic!("Imported regions hash mismatch");
272 }
273}