openhcl_boot/arch/x86_64/
memory.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Routines to prepare VTL2 memory for launching the kernel.
5
6use super::address_space::LocalMap;
7use super::address_space::init_local_map;
8use crate::ShimParams;
9use crate::host_params::PartitionInfo;
10use crate::host_params::shim_params::IsolationType;
11use crate::hypercall::hvcall;
12use memory_range::MemoryRange;
13use sha2::Digest;
14use sha2::Sha384;
15use x86defs::X64_LARGE_PAGE_SIZE;
16use x86defs::tdx::TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT;
17
18/// On isolated systems, transitions all VTL2 RAM to be private and accepted, with the appropriate
19/// VTL permissions applied.
20pub fn setup_vtl2_memory(shim_params: &ShimParams, partition_info: &PartitionInfo) {
21    // Only if the partition is VBS-isolated, accept memory and apply vtl 2 protections here.
22    // Non-isolated partitions can undergo servicing, and additional information
23    // would be needed to determine whether vtl 2 protections should be applied
24    // or skipped, since the operation is expensive.
25    // TODO: if applying vtl 2 protections for non-isolated VMs moves to the
26    // boot shim, apply them here.
27    if let IsolationType::None = shim_params.isolation_type {
28        return;
29    }
30
31    if let IsolationType::Vbs = shim_params.isolation_type {
32        // Enable VTL protection so that vtl 2 protections can be applied. All other config
33        // should be set by the user mode
34        let vsm_config = hvdef::HvRegisterVsmPartitionConfig::new()
35            .with_default_vtl_protection_mask(0xF)
36            .with_enable_vtl_protection(true);
37
38        hvcall()
39            .set_register(
40                hvdef::HvX64RegisterName::VsmPartitionConfig.into(),
41                hvdef::HvRegisterValue::from(u64::from(vsm_config)),
42            )
43            .expect("setting vsm config shouldn't fail");
44
45        // VBS isolated VMs need to apply VTL2 protections to pages that were already accepted to
46        // prevent VTL0 access. Only those pages that belong to the VTL2 RAM region should have
47        // these protections applied - certain pages belonging to VTL0 are also among the accepted
48        // regions and should not be processed here.
49        let accepted_ranges =
50            shim_params
51                .imported_regions()
52                .filter_map(|(imported_range, already_accepted)| {
53                    already_accepted.then_some(imported_range)
54                });
55        for range in memory_range::overlapping_ranges(
56            partition_info.vtl2_ram.iter().map(|entry| entry.range),
57            accepted_ranges,
58        ) {
59            hvcall()
60                .apply_vtl2_protections(range)
61                .expect("applying vtl 2 protections cannot fail");
62        }
63    }
64
65    // Initialize the local_map
66    // TODO: Consider moving this to ShimParams to pass around.
67    let mut local_map = match shim_params.isolation_type {
68        IsolationType::Snp | IsolationType::Tdx => Some(init_local_map(
69            loader_defs::paravisor::PARAVISOR_LOCAL_MAP_VA,
70        )),
71        IsolationType::None | IsolationType::Vbs => None,
72    };
73
74    // Make sure imported regions are in increasing order.
75    let mut last_range_end = None;
76    for (imported_range, _) in shim_params.imported_regions() {
77        assert!(last_range_end.is_none() || imported_range.start() > last_range_end.unwrap());
78        last_range_end = Some(imported_range.end() - hvdef::HV_PAGE_SIZE);
79    }
80
81    // Iterate over all VTL2 RAM that is not part of an imported region and
82    // accept it with appropriate VTL protections.
83    for range in memory_range::subtract_ranges(
84        partition_info.vtl2_ram.iter().map(|e| e.range),
85        shim_params.imported_regions().map(|(r, _)| r),
86    ) {
87        accept_vtl2_memory(shim_params, &mut local_map, range);
88    }
89
90    let ram_buffer = if let Some(bounce_buffer) = shim_params.bounce_buffer {
91        assert!(bounce_buffer.start() % X64_LARGE_PAGE_SIZE == 0);
92        assert!(bounce_buffer.len() >= X64_LARGE_PAGE_SIZE);
93
94        for range in memory_range::subtract_ranges(
95            core::iter::once(bounce_buffer),
96            partition_info.vtl2_ram.iter().map(|e| e.range),
97        ) {
98            accept_vtl2_memory(shim_params, &mut local_map, range);
99        }
100
101        // SAFETY: The bounce buffer is trusted as it is obtained from measured
102        // shim parameters. The bootloader is identity mapped, and the PA is
103        // guaranteed to be mapped as the pagetable is prebuilt and measured.
104        unsafe {
105            core::slice::from_raw_parts_mut(
106                bounce_buffer.start() as *mut u8,
107                bounce_buffer.len() as usize,
108            )
109        }
110    } else {
111        &mut []
112    };
113
114    // Iterate over all imported regions that are not already accepted. They must be accepted here.
115    // TODO: No VTL0 memory is currently marked as pending.
116    for (imported_range, already_accepted) in shim_params.imported_regions() {
117        if !already_accepted {
118            accept_pending_vtl2_memory(shim_params, &mut local_map, ram_buffer, imported_range);
119        }
120    }
121}
122
123/// Accepts VTL2 memory in the specified gpa range.
124fn accept_vtl2_memory(
125    shim_params: &ShimParams,
126    local_map: &mut Option<LocalMap<'_>>,
127    range: MemoryRange,
128) {
129    match shim_params.isolation_type {
130        IsolationType::Vbs => {
131            hvcall()
132                .accept_vtl2_pages(range, hvdef::hypercall::AcceptMemoryType::RAM)
133                .expect("accepting vtl 2 memory must not fail");
134        }
135        IsolationType::Snp => {
136            super::snp::set_page_acceptance(local_map.as_mut().unwrap(), range, true)
137                .expect("accepting vtl 2 memory must not fail");
138        }
139        IsolationType::Tdx => {
140            super::tdx::accept_pages(range).expect("accepting vtl2 memory must not fail")
141        }
142        _ => unreachable!(),
143    }
144}
145
146/// Accepts VTL2 memory in the specified range that is currently marked as pending, i.e. not
147/// yet assigned as exclusive and private.
148fn accept_pending_vtl2_memory(
149    shim_params: &ShimParams,
150    local_map: &mut Option<LocalMap<'_>>,
151    ram_buffer: &mut [u8],
152    range: MemoryRange,
153) {
154    let isolation_type = shim_params.isolation_type;
155
156    match isolation_type {
157        IsolationType::Vbs => {
158            hvcall()
159                .accept_vtl2_pages(range, hvdef::hypercall::AcceptMemoryType::RAM)
160                .expect("accepting vtl 2 memory must not fail");
161        }
162        IsolationType::Snp | IsolationType::Tdx => {
163            let local_map = local_map.as_mut().unwrap();
164            // Accepting pending memory for SNP is somewhat more complicated. The pending regions
165            // are unencrypted pages. Accepting them would result in their contents being scrambled.
166            // Instead their contents must be copied out to a private region, then copied back once
167            // the pages have been accepted. Additionally, the access to the unencrypted pages must
168            // happen with the C-bit cleared.
169            let mut remaining = range;
170            while !remaining.is_empty() {
171                // Copy up to the next 2MB boundary.
172                let range = MemoryRange::new(
173                    remaining.start()
174                        ..remaining.end().min(
175                            (remaining.start() + X64_LARGE_PAGE_SIZE) & !(X64_LARGE_PAGE_SIZE - 1),
176                        ),
177                );
178                remaining = MemoryRange::new(range.end()..remaining.end());
179
180                let ram_buffer = &mut ram_buffer[..range.len() as usize];
181
182                // Map the pages as shared and copy the necessary number to the buffer.
183                {
184                    let map_range = if isolation_type == IsolationType::Tdx {
185                        // set vtom on the page number
186                        MemoryRange::new(
187                            range.start() | TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT
188                                ..range.end() | TDX_SHARED_GPA_BOUNDARY_ADDRESS_BIT,
189                        )
190                    } else {
191                        range
192                    };
193
194                    let mapping = local_map.map_pages(map_range, false);
195                    ram_buffer.copy_from_slice(mapping.data);
196                }
197
198                // Change visibility on the pages for this iteration.
199                match isolation_type {
200                    IsolationType::Snp => {
201                        super::snp::Ghcb::change_page_visibility(range, false);
202                    }
203                    IsolationType::Tdx => {
204                        super::tdx::change_page_visibility(range, false);
205                    }
206                    _ => unreachable!(),
207                }
208
209                // accept the pages.
210                match isolation_type {
211                    IsolationType::Snp => {
212                        super::snp::set_page_acceptance(local_map, range, true)
213                            .expect("accepting vtl 2 memory must not fail");
214                    }
215                    IsolationType::Tdx => {
216                        super::tdx::accept_pages(range)
217                            .expect("accepting vtl 2 memory must not fail");
218                    }
219                    _ => unreachable!(),
220                }
221
222                // Copy the buffer back. Use the identity map now that the memory has been accepted.
223                {
224                    // SAFETY: Known memory region that was just accepted.
225                    let mapping = unsafe {
226                        core::slice::from_raw_parts_mut(
227                            range.start() as *mut u8,
228                            range.len() as usize,
229                        )
230                    };
231
232                    mapping.copy_from_slice(ram_buffer);
233                }
234            }
235        }
236        _ => unreachable!(),
237    }
238}
239
240// Verify the SHA384 hash of pages that were imported as unaccepted/shared. Compare against the
241// desired hash that is passed in as a measured parameter. Failures result in a panic.
242pub fn verify_imported_regions_hash(shim_params: &ShimParams) {
243    // Non isolated VMs can undergo servicing, and thus the hash might no longer be valid,
244    // as the memory regions can change during runtime.
245    if let IsolationType::None = shim_params.isolation_type {
246        return;
247    }
248
249    // If all imported pages are already accepted, there is no need to verify the hash.
250    if shim_params
251        .imported_regions()
252        .all(|(_, already_accepted)| already_accepted)
253    {
254        return;
255    }
256
257    let mut hasher = Sha384::new();
258    shim_params
259        .imported_regions()
260        .filter(|(_, already_accepted)| !already_accepted)
261        .for_each(|(range, _)| {
262            // SAFETY: The location and identity of the range is trusted as it is obtained from
263            // measured shim parameters.
264            let mapping = unsafe {
265                core::slice::from_raw_parts(range.start() as *const u8, range.len() as usize)
266            };
267            hasher.update(mapping);
268        });
269
270    if hasher.finalize().as_slice() != shim_params.imported_regions_hash() {
271        panic!("Imported regions hash mismatch");
272    }
273}