openhcl_boot/arch/x86_64/
memory.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Routines to prepare VTL2 memory for launching the kernel.
5
6use super::address_space::LocalMap;
7use super::address_space::init_local_map;
8use crate::ShimParams;
9use crate::arch::TdxHypercallPage;
10use crate::arch::x86_64::address_space::tdx_share_large_page;
11use crate::host_params::PartitionInfo;
12use crate::host_params::shim_params::IsolationType;
13use crate::hypercall::hvcall;
14use memory_range::MemoryRange;
15use sha2::Digest;
16use sha2::Sha384;
17use x86defs::X64_LARGE_PAGE_SIZE;
18use x86defs::tdx::TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
19
20/// On isolated systems, transitions all VTL2 RAM to be private and accepted, with the appropriate
21/// VTL permissions applied.
22pub fn setup_vtl2_memory(shim_params: &ShimParams, partition_info: &PartitionInfo) {
23    // Only if the partition is VBS-isolated, accept memory and apply vtl 2 protections here.
24    // Non-isolated partitions can undergo servicing, and additional information
25    // would be needed to determine whether vtl 2 protections should be applied
26    // or skipped, since the operation is expensive.
27    // TODO: if applying vtl 2 protections for non-isolated VMs moves to the
28    // boot shim, apply them here.
29    if let IsolationType::None = shim_params.isolation_type {
30        return;
31    }
32
33    if let IsolationType::Vbs = shim_params.isolation_type {
34        // Enable VTL protection so that vtl 2 protections can be applied. All other config
35        // should be set by the user mode
36        let vsm_config = hvdef::HvRegisterVsmPartitionConfig::new()
37            .with_default_vtl_protection_mask(0xF)
38            .with_enable_vtl_protection(true);
39
40        hvcall()
41            .set_register(
42                hvdef::HvX64RegisterName::VsmPartitionConfig.into(),
43                hvdef::HvRegisterValue::from(u64::from(vsm_config)),
44            )
45            .expect("setting vsm config shouldn't fail");
46
47        // VBS isolated VMs need to apply VTL2 protections to pages that were already accepted to
48        // prevent VTL0 access. Only those pages that belong to the VTL2 RAM region should have
49        // these protections applied - certain pages belonging to VTL0 are also among the accepted
50        // regions and should not be processed here.
51        let accepted_ranges =
52            shim_params
53                .imported_regions()
54                .filter_map(|(imported_range, already_accepted)| {
55                    already_accepted.then_some(imported_range)
56                });
57        for range in memory_range::overlapping_ranges(
58            partition_info.vtl2_ram.iter().map(|entry| entry.range),
59            accepted_ranges,
60        ) {
61            hvcall()
62                .apply_vtl2_protections(range)
63                .expect("applying vtl 2 protections cannot fail");
64        }
65    }
66
67    // Initialize the local_map
68    // TODO: Consider moving this to ShimParams to pass around.
69    let mut local_map = match shim_params.isolation_type {
70        IsolationType::Snp | IsolationType::Tdx => Some(init_local_map(
71            loader_defs::paravisor::PARAVISOR_LOCAL_MAP_VA,
72        )),
73        IsolationType::None | IsolationType::Vbs => None,
74    };
75
76    // Make sure imported regions are in increasing order.
77    let mut last_range_end = None;
78    for (imported_range, _) in shim_params.imported_regions() {
79        assert!(last_range_end.is_none() || imported_range.start() > last_range_end.unwrap());
80        last_range_end = Some(imported_range.end() - hvdef::HV_PAGE_SIZE);
81    }
82
83    // Iterate over all VTL2 RAM that is not part of an imported region and
84    // accept it with appropriate VTL protections.
85    for range in memory_range::subtract_ranges(
86        partition_info.vtl2_ram.iter().map(|e| e.range),
87        shim_params.imported_regions().map(|(r, _)| r),
88    ) {
89        accept_vtl2_memory(shim_params, &mut local_map, range);
90    }
91
92    let ram_buffer = if let Some(bounce_buffer) = shim_params.bounce_buffer {
93        assert!(bounce_buffer.start() % X64_LARGE_PAGE_SIZE == 0);
94        assert!(bounce_buffer.len() >= X64_LARGE_PAGE_SIZE);
95
96        for range in memory_range::subtract_ranges(
97            core::iter::once(bounce_buffer),
98            partition_info.vtl2_ram.iter().map(|e| e.range),
99        ) {
100            accept_vtl2_memory(shim_params, &mut local_map, range);
101        }
102
103        // SAFETY: The bounce buffer is trusted as it is obtained from measured
104        // shim parameters. The bootloader is identity mapped, and the PA is
105        // guaranteed to be mapped as the pagetable is prebuilt and measured.
106        unsafe {
107            core::slice::from_raw_parts_mut(
108                bounce_buffer.start() as *mut u8,
109                bounce_buffer.len() as usize,
110            )
111        }
112    } else {
113        &mut []
114    };
115
116    // Iterate over all imported regions that are not already accepted. They must be accepted here.
117    // TODO: No VTL0 memory is currently marked as pending.
118    for (imported_range, already_accepted) in shim_params.imported_regions() {
119        if !already_accepted {
120            accept_pending_vtl2_memory(shim_params, &mut local_map, ram_buffer, imported_range);
121        }
122    }
123
124    // For TDVMCALL based hypercalls, take the first 2 MB region from ram_buffer for
125    // hypercall IO pages. ram_buffer must not be used again beyond this point
126    // TODO: find an approach that does not require re-using the ram_buffer
127    if shim_params.isolation_type == IsolationType::Tdx {
128        let free_buffer = ram_buffer.as_mut_ptr() as u64;
129        assert!(free_buffer % X64_LARGE_PAGE_SIZE == 0);
130        // SAFETY: The bottom 2MB region of the ram_buffer is unused by the shim
131        // The region is aligned to 2MB, and mapped as a large page
132        let tdx_io_page = unsafe {
133            tdx_share_large_page(free_buffer);
134            TdxHypercallPage::new(free_buffer)
135        };
136        hvcall().initialize_tdx(tdx_io_page);
137    }
138}
139
140/// Accepts VTL2 memory in the specified gpa range.
141fn accept_vtl2_memory(
142    shim_params: &ShimParams,
143    local_map: &mut Option<LocalMap<'_>>,
144    range: MemoryRange,
145) {
146    match shim_params.isolation_type {
147        IsolationType::Vbs => {
148            hvcall()
149                .accept_vtl2_pages(range, hvdef::hypercall::AcceptMemoryType::RAM)
150                .expect("accepting vtl 2 memory must not fail");
151        }
152        IsolationType::Snp => {
153            super::snp::set_page_acceptance(local_map.as_mut().unwrap(), range, true)
154                .expect("accepting vtl 2 memory must not fail");
155        }
156        IsolationType::Tdx => {
157            super::tdx::accept_pages(range).expect("accepting vtl2 memory must not fail")
158        }
159        _ => unreachable!(),
160    }
161}
162
163/// Accepts VTL2 memory in the specified range that is currently marked as pending, i.e. not
164/// yet assigned as exclusive and private.
165fn accept_pending_vtl2_memory(
166    shim_params: &ShimParams,
167    local_map: &mut Option<LocalMap<'_>>,
168    ram_buffer: &mut [u8],
169    range: MemoryRange,
170) {
171    let isolation_type = shim_params.isolation_type;
172
173    match isolation_type {
174        IsolationType::Vbs => {
175            hvcall()
176                .accept_vtl2_pages(range, hvdef::hypercall::AcceptMemoryType::RAM)
177                .expect("accepting vtl 2 memory must not fail");
178        }
179        IsolationType::Snp | IsolationType::Tdx => {
180            let local_map = local_map.as_mut().unwrap();
181            // Accepting pending memory for SNP is somewhat more complicated. The pending regions
182            // are unencrypted pages. Accepting them would result in their contents being scrambled.
183            // Instead their contents must be copied out to a private region, then copied back once
184            // the pages have been accepted. Additionally, the access to the unencrypted pages must
185            // happen with the C-bit cleared.
186            let mut remaining = range;
187            while !remaining.is_empty() {
188                // Copy up to the next 2MB boundary.
189                let range = MemoryRange::new(
190                    remaining.start()
191                        ..remaining.end().min(
192                            (remaining.start() + X64_LARGE_PAGE_SIZE) & !(X64_LARGE_PAGE_SIZE - 1),
193                        ),
194                );
195                remaining = MemoryRange::new(range.end()..remaining.end());
196
197                let ram_buffer = &mut ram_buffer[..range.len() as usize];
198
199                // Map the pages as shared and copy the necessary number to the buffer.
200                {
201                    let map_range = if isolation_type == IsolationType::Tdx {
202                        // set vtom on the page number
203                        MemoryRange::new(
204                            range.start() | TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT
205                                ..range.end() | TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT,
206                        )
207                    } else {
208                        range
209                    };
210
211                    let mapping = local_map.map_pages(map_range, false);
212                    ram_buffer.copy_from_slice(mapping.data);
213                }
214
215                // Change visibility on the pages for this iteration.
216                match isolation_type {
217                    IsolationType::Snp => {
218                        super::snp::Ghcb::change_page_visibility(range, false);
219                    }
220                    IsolationType::Tdx => {
221                        super::tdx::change_page_visibility(range, false);
222                    }
223                    _ => unreachable!(),
224                }
225
226                // accept the pages.
227                match isolation_type {
228                    IsolationType::Snp => {
229                        super::snp::set_page_acceptance(local_map, range, true)
230                            .expect("accepting vtl 2 memory must not fail");
231                    }
232                    IsolationType::Tdx => {
233                        super::tdx::accept_pages(range)
234                            .expect("accepting vtl 2 memory must not fail");
235                    }
236                    _ => unreachable!(),
237                }
238
239                // Copy the buffer back. Use the identity map now that the memory has been accepted.
240                {
241                    // SAFETY: Known memory region that was just accepted.
242                    let mapping = unsafe {
243                        core::slice::from_raw_parts_mut(
244                            range.start() as *mut u8,
245                            range.len() as usize,
246                        )
247                    };
248
249                    mapping.copy_from_slice(ram_buffer);
250                }
251            }
252        }
253        _ => unreachable!(),
254    }
255}
256
257// Verify the SHA384 hash of pages that were imported as unaccepted/shared. Compare against the
258// desired hash that is passed in as a measured parameter. Failures result in a panic.
259pub fn verify_imported_regions_hash(shim_params: &ShimParams) {
260    // Non isolated VMs can undergo servicing, and thus the hash might no longer be valid,
261    // as the memory regions can change during runtime.
262    if let IsolationType::None = shim_params.isolation_type {
263        return;
264    }
265
266    // If all imported pages are already accepted, there is no need to verify the hash.
267    if shim_params
268        .imported_regions()
269        .all(|(_, already_accepted)| already_accepted)
270    {
271        return;
272    }
273
274    let mut hasher = Sha384::new();
275    shim_params
276        .imported_regions()
277        .filter(|(_, already_accepted)| !already_accepted)
278        .for_each(|(range, _)| {
279            // SAFETY: The location and identity of the range is trusted as it is obtained from
280            // measured shim parameters.
281            let mapping = unsafe {
282                core::slice::from_raw_parts(range.start() as *const u8, range.len() as usize)
283            };
284            hasher.update(mapping);
285        });
286
287    if hasher.finalize().as_slice() != shim_params.imported_regions_hash() {
288        panic!("Imported regions hash mismatch");
289    }
290}