underhill_mem/
init.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4#![expect(missing_docs)]
5
6use crate::HardwareIsolatedMemoryProtector;
7use crate::MemoryAcceptor;
8use crate::mapping::GuestMemoryMapping;
9use crate::mapping::GuestMemoryView;
10use crate::mapping::GuestMemoryViewReadType;
11use crate::mapping::GuestPartitionMemoryView;
12use anyhow::Context;
13use cvm_tracing::CVM_ALLOWED;
14use futures::future::try_join_all;
15use guestmem::GuestMemory;
16use hcl::GuestVtl;
17use hcl::ioctl::MshvHvcall;
18use hcl::ioctl::MshvVtlLow;
19use hvdef::HypercallCode;
20use hvdef::Vtl;
21use hvdef::hypercall::HvInputVtl;
22use inspect::Inspect;
23use memory_range::AlignedSubranges;
24use memory_range::MemoryRange;
25use pal_async::task::Spawn;
26use std::sync::Arc;
27use tracing::Instrument;
28use underhill_threadpool::AffinitizedThreadpool;
29use virt::IsolationType;
30use virt_mshv_vtl::ProtectIsolatedMemory;
31use vm_topology::memory::MemoryLayout;
32use vm_topology::memory::MemoryRangeWithNode;
33use vm_topology::processor::ProcessorTopology;
34
35#[derive(Inspect)]
36pub struct MemoryMappings {
37    vtl0: Arc<GuestMemoryMapping>,
38    vtl1: Option<Arc<GuestMemoryMapping>>,
39    #[inspect(skip)]
40    vtl0_gm: GuestMemory,
41    #[inspect(skip)]
42    vtl0_kx_gm: GuestMemory,
43    #[inspect(skip)]
44    vtl0_ux_gm: GuestMemory,
45    #[inspect(skip)]
46    vtl1_gm: Option<GuestMemory>,
47    #[inspect(flatten)]
48    cvm_memory: Option<CvmMemory>,
49}
50
51#[derive(Inspect)]
52/// Mappings, pools, and useful types for memory management that are only
53/// available in confidential VMs.
54pub struct CvmMemory {
55    shared_mapping: Arc<GuestMemoryMapping>,
56    #[inspect(skip)]
57    pub shared_gm: GuestMemory,
58    #[inspect(skip)]
59    /// Includes only private VTL0 memory, not pages that have been made shared.
60    pub private_vtl0_memory: GuestMemory,
61    #[inspect(skip)]
62    pub protector: Arc<dyn ProtectIsolatedMemory>,
63}
64
65impl MemoryMappings {
66    /// Includes all VTL0-accessible memory (private and shared).
67    pub fn vtl0(&self) -> &GuestMemory {
68        &self.vtl0_gm
69    }
70
71    pub fn vtl0_kernel_execute(&self) -> &GuestMemory {
72        &self.vtl0_kx_gm
73    }
74
75    pub fn vtl0_user_execute(&self) -> &GuestMemory {
76        &self.vtl0_ux_gm
77    }
78
79    pub fn vtl1(&self) -> Option<&GuestMemory> {
80        self.vtl1_gm.as_ref()
81    }
82
83    pub fn cvm_memory(&self) -> Option<&CvmMemory> {
84        self.cvm_memory.as_ref()
85    }
86}
87
88pub struct Init<'a> {
89    pub processor_topology: &'a ProcessorTopology,
90    pub isolation: IsolationType,
91    pub vtl0_alias_map_bit: Option<u64>,
92    pub vtom: Option<u64>,
93    pub mem_layout: &'a MemoryLayout,
94    pub complete_memory_layout: &'a MemoryLayout,
95    pub boot_init: Option<BootInit<'a>>,
96    pub shared_pool: &'a [MemoryRangeWithNode],
97    pub maximum_vtl: Vtl,
98}
99
100pub struct BootInit<'a> {
101    pub tp: &'a AffinitizedThreadpool,
102    pub vtl2_memory: &'a [MemoryRangeWithNode],
103    pub accepted_regions: &'a [MemoryRange],
104}
105
106pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
107    let mut validated_ranges = Vec::new();
108
109    let acceptor = if params.isolation.is_isolated() {
110        Some(Arc::new(MemoryAcceptor::new(params.isolation)?))
111    } else {
112        None
113    };
114
115    let hardware_isolated = params.isolation.is_hardware_isolated();
116
117    if let Some(boot_init) = &params.boot_init {
118        if !params.isolation.is_isolated() {
119            // TODO: VTL 2 protections are applied in the boot shim for isolated
120            // VMs. Since non-isolated VMs can undergo servicing and this is an
121            // expensive operation, continue to apply protections here for now. In
122            // the future, the boot shim should be made aware of when it's booting
123            // during a servicing operation and unify the application of vtl2
124            // protections.
125
126            // Temporarily move HCL into an Arc so that it can be used across
127            // multiple processors.
128
129            tracing::debug!("Applying VTL2 protections");
130            apply_vtl2_protections(boot_init.tp, boot_init.vtl2_memory)
131                .instrument(tracing::info_span!("apply_vtl2_protections", CVM_ALLOWED))
132                .await?;
133        } else {
134            // Prepare VTL0 memory for mapping.
135            let acceptor = acceptor.as_ref().unwrap();
136            let ram = params.mem_layout.ram().iter().map(|r| r.range);
137            let accepted_ranges = boot_init.accepted_regions.iter().copied();
138            // On hardware isolated platforms, accepted memory was accepted with
139            // VTL2 only permissions. Provide VTL0 access here.
140            tracing::debug!("Applying VTL0 protections");
141            if hardware_isolated {
142                for range in memory_range::overlapping_ranges(ram.clone(), accepted_ranges.clone())
143                {
144                    acceptor.apply_initial_lower_vtl_protections(range)?;
145                }
146            }
147
148            // Accept the memory that was not accepted by the boot loader.
149            // FUTURE: do this lazily.
150            let vp_count = std::cmp::max(1, params.processor_topology.vp_count() - 1);
151            let accept_subrange = move |subrange| {
152                acceptor.accept_lower_vtl_pages(subrange).unwrap();
153                if hardware_isolated {
154                    // For VBS-isolated VMs, the VTL protections are set as
155                    // part of the accept call.
156                    acceptor
157                        .apply_initial_lower_vtl_protections(subrange)
158                        .unwrap();
159                }
160            };
161            tracing::debug!("Accepting VTL0 memory");
162            std::thread::scope(|scope| {
163                for source_range in memory_range::subtract_ranges(ram, accepted_ranges) {
164                    validated_ranges.push(source_range);
165
166                    // Chunks must be 2mb aligned
167                    let two_mb = 2 * 1024 * 1024;
168                    let mut range = source_range.aligned_subrange(two_mb);
169                    if !range.is_empty() {
170                        let chunk_size = (range.page_count_2m().div_ceil(vp_count as u64)) * two_mb;
171                        let chunk_count = range.len().div_ceil(chunk_size);
172
173                        for _ in 0..chunk_count {
174                            let subrange;
175                            (subrange, range) = if range.len() >= chunk_size {
176                                range.split_at_offset(chunk_size)
177                            } else {
178                                (range, MemoryRange::EMPTY)
179                            };
180                            scope.spawn(move || accept_subrange(subrange));
181                        }
182                        assert!(range.is_empty());
183                    }
184
185                    // Now accept whatever wasn't aligned on the edges
186                    scope.spawn(move || {
187                        for unaligned_subrange in memory_range::subtract_ranges(
188                            [source_range],
189                            [source_range.aligned_subrange(two_mb)],
190                        ) {
191                            accept_subrange(unaligned_subrange);
192                        }
193                    });
194                }
195            });
196        }
197    }
198
199    // Tell the hypervisor we want to use the shared pool for shared memory.
200    //
201    // TODO: don't we possibly need to unaccept these pages for SNP? Or are
202    // we assuming they were not in the boot loader's pre-accepted pages.
203    if let Some(acceptor) = &acceptor {
204        tracing::debug!("Making shared pool pages shared");
205        for range in params.shared_pool {
206            acceptor
207                .modify_gpa_visibility(
208                    hvdef::hypercall::HostVisibilityType::SHARED,
209                    &Vec::from_iter(range.range.start_4k_gpn()..range.range.end_4k_gpn()),
210                )
211                .context("unable to make shared pool pages shared vis")?;
212        }
213    }
214
215    // Map lower VTL memory.
216    let gpa_fd = MshvVtlLow::new().context("failed to open /dev/mshv_vtl_low")?;
217
218    let gm = if hardware_isolated {
219        assert!(params.vtl0_alias_map_bit.is_none());
220        let vtom = params.vtom.unwrap();
221
222        // Create the encrypted mapping with just the lower VTL memory.
223        //
224        // Do not register this mapping with the kernel. It will not be safe for
225        // use with syscalls that expect virtual addresses to be in
226        // kernel-registered RAM.
227
228        tracing::debug!("Building valid encrypted memory view");
229        let encrypted_memory_view = {
230            let _span = tracing::info_span!("create encrypted memory view", CVM_ALLOWED).entered();
231            GuestPartitionMemoryView::new(
232                params.mem_layout,
233                crate::mapping::GuestValidMemoryType::Encrypted,
234                true,
235            )?
236        };
237
238        tracing::debug!("Building encrypted memory map");
239        let encrypted_mapping = Arc::new({
240            let _span = tracing::info_span!("map_vtl1_memory", CVM_ALLOWED).entered();
241            GuestMemoryMapping::builder(0)
242                .dma_base_address(None)
243                .build_with_bitmap(&gpa_fd, &encrypted_memory_view)
244                .context("failed to map lower vtl encrypted memory")?
245        });
246
247        let use_vtl1 = params.maximum_vtl >= Vtl::Vtl1;
248
249        // Start by giving VTL 0 full access to all lower-vtl memory.
250        // TODO GUEST VSM: with lazy acceptance, it should instead be initialized to no
251        // access.
252        tracing::debug!("Building VTL0 memory map");
253        let vtl0_mapping = Arc::new({
254            let _span = tracing::info_span!("map_vtl0_memory", CVM_ALLOWED).entered();
255            GuestMemoryMapping::builder(0)
256                .dma_base_address(None)
257                .use_permissions_bitmaps(if use_vtl1 { Some(true) } else { None })
258                .build_with_bitmap(&gpa_fd, &encrypted_memory_view)
259                .context("failed to map vtl0 memory")?
260        });
261
262        // Create the shared mapping with the complete memory map, to include
263        // the shared pool. This memory is not private to VTL2 and is expected
264        // that devices will do DMA to them.
265        let shared_offset = match params.isolation {
266            IsolationType::Tdx => {
267                // Register memory just once, as shared memory. This
268                // registration will be used both to map pages as shared and as
269                // encrypted. If the kernel remaps a page into a kernel address,
270                // it will be marked as shared, which can cause a fault or,
271                // worse, an information leak.
272                //
273                // This is done this way because in TDX, there is only one
274                // mapping for each page. The distinguishing bit is a reserved
275                // bit, from the kernel's perspective. (You can also just see it
276                // as the high bit of the GPA, but the Linux kernel does not
277                // treat it that way.)
278                //
279                // TODO CVM: figure out how to prevent passing encrypted pages
280                // to syscalls. Idea: prohibit locking of `GuestMemory` pages
281                // for encrypted memory, so that there's no way to get a virtual
282                // address. Downside: vmbus ring buffers are currently accessed
283                // by locking memory, and this would need to be changed to use
284                // some kind of override, or to go through `GuestMemory`
285                // accessors, or something.
286                0
287            }
288            IsolationType::Snp => {
289                // SNP has two mappings for each shared page: one below and one
290                // above VTOM. So, unlike for TDX, for SNP we could choose to
291                // register memory twice, allowing the kernel to operate on
292                // either shared or encrypted memory. But, for consistency with
293                // TDX, just register the shared mapping.
294                //
295                // Register the VTOM mapping instead of the low mapping. In
296                // theory it shouldn't matter; we should be able to ignore VTOM.
297                // However, the ioctls to issue pvalidate and rmpadjust
298                // instructions operate on VAs, and they must either be VAs
299                // mapping unregistered pages or pages that were registered as
300                // encrypted. Since we want to avoid registering the pages as
301                // encrypted, the lower alias must remain unregistered, and so
302                // the shared registration must use the high mapping.
303                vtom
304            }
305            _ => unreachable!(),
306        };
307
308        // For TDX, the spec says that the IOMMU _may_ reject DMAs with the
309        // shared bit clear, so set it in the IOVAs returned for the shared
310        // mapping.
311        //
312        // For SNP, the hardware doesn't care; VTOM is not known by the IOMMU
313        // and the hypervisor includes the VTOM alias in the IOMMU's page
314        // tables. Use the VTOM alias for consistency with TDX.
315        let dma_base_address = vtom;
316
317        // Create the shared mapping with the complete memory map, to include
318        // the shared pool. This memory is not private to VTL2 and is expected
319        // that devices will access it via DMA.
320        //
321        // Don't allow kernel access here either--the kernel seems to get
322        // confused about shared memory, and our current use of kernel-mode
323        // guest memory access is limited to low-perf paths where we can use
324        // bounce buffering.
325        tracing::debug!("Building shared memory map");
326
327        let shared_memory_view = {
328            let _span = tracing::info_span!("create shared memory view", CVM_ALLOWED).entered();
329            GuestPartitionMemoryView::new(
330                params.complete_memory_layout,
331                crate::mapping::GuestValidMemoryType::Shared,
332                false,
333            )?
334        };
335
336        let valid_shared_memory = shared_memory_view.partition_valid_memory();
337
338        // Update the shared mapping bitmap for pages used by the shared
339        // visibility pool to be marked as shared, since by default pages are
340        // marked as no-access in the bitmap.
341        tracing::debug!("Updating shared mapping bitmaps");
342        for range in params.shared_pool {
343            valid_shared_memory.as_ref().update_valid(range.range, true);
344        }
345
346        let shared_mapping = Arc::new({
347            let _span = tracing::info_span!("map_shared_memory", CVM_ALLOWED).entered();
348            GuestMemoryMapping::builder(shared_offset)
349                .shared(true)
350                .ignore_registration_failure(params.boot_init.is_none())
351                .dma_base_address(Some(dma_base_address))
352                .build_with_bitmap(&gpa_fd, &shared_memory_view)
353                .context("failed to map shared memory")?
354        });
355
356        let protector = Arc::new(HardwareIsolatedMemoryProtector::new(
357            encrypted_memory_view.partition_valid_memory().clone(),
358            valid_shared_memory.clone(),
359            encrypted_mapping.clone(),
360            vtl0_mapping.clone(),
361            params.mem_layout.clone(),
362            acceptor.as_ref().unwrap().clone(),
363        )) as Arc<dyn ProtectIsolatedMemory>;
364
365        tracing::debug!("Creating VTL0 guest memory");
366        let vtl0_gm = GuestMemory::new_multi_region(
367            "vtl0",
368            vtom,
369            vec![
370                Some(GuestMemoryView::new(
371                    Some(protector.clone()),
372                    vtl0_mapping.clone(),
373                    GuestMemoryViewReadType::Read,
374                    GuestVtl::Vtl0,
375                )),
376                Some(GuestMemoryView::new(
377                    Some(protector.clone()),
378                    shared_mapping.clone(),
379                    GuestMemoryViewReadType::Read,
380                    GuestVtl::Vtl0,
381                )),
382            ],
383        )
384        .context("failed to make vtl0 guest memory")?;
385
386        let (vtl1_mapping, vtl1_gm) = if use_vtl1 {
387            tracing::debug!("Creating VTL1 guest memory");
388            // For VTL 1, vtl protections are dictated by what VTL 2 thinks is
389            // valid lower-vtl memory, and therefore additional vtl protection
390            // bitmaps aren't needed for the mapping.
391            (
392                Some(encrypted_mapping.clone()),
393                Some(
394                    GuestMemory::new_multi_region(
395                        "vtl1",
396                        vtom,
397                        vec![
398                            Some(GuestMemoryView::new(
399                                Some(protector.clone()),
400                                encrypted_mapping.clone(),
401                                GuestMemoryViewReadType::Read,
402                                GuestVtl::Vtl1,
403                            )),
404                            Some(GuestMemoryView::new(
405                                Some(protector.clone()),
406                                shared_mapping.clone(),
407                                GuestMemoryViewReadType::Read,
408                                GuestVtl::Vtl1,
409                            )),
410                        ],
411                    )
412                    .context("failed to make vtl1 guest memory")?,
413                ),
414            )
415        } else {
416            (None, None)
417        };
418
419        if params.isolation == IsolationType::Snp {
420            // For SNP, zero any newly accepted private lower-vtl memory in case
421            // the hypervisor decided to remap VTL 2 memory into lower-VTL GPA
422            // space. This is safe to do after the vtl permissions have been
423            // applied because the lower VTLs are not running yet.
424            //
425            // TODO: perform lazily
426            let _span =
427                tracing::info_span!("zeroing lower vtl memory for SNP", CVM_ALLOWED).entered();
428
429            tracing::debug!("zeroing lower vtl memory for SNP");
430            for range in validated_ranges {
431                vtl0_gm
432                    .fill_at(range.start(), 0, range.len() as usize)
433                    .expect("private memory should be valid at this stage");
434            }
435        }
436
437        // Untrusted devices can only access shared memory, but they can do so
438        // from either alias (below and above vtom). This is consistent with
439        // what the IOMMU is programmed with.
440        tracing::debug!("Creating untrusted shared DMA memory");
441        let shared_gm = GuestMemory::new_multi_region(
442            "shared",
443            vtom,
444            vec![
445                Some(GuestMemoryView::new(
446                    Some(protector.clone()),
447                    shared_mapping.clone(),
448                    GuestMemoryViewReadType::Read,
449                    GuestVtl::Vtl0,
450                )),
451                Some(GuestMemoryView::new(
452                    Some(protector.clone()),
453                    shared_mapping.clone(),
454                    GuestMemoryViewReadType::Read,
455                    GuestVtl::Vtl0,
456                )),
457            ],
458        )
459        .context("failed to make shared guest memory")?;
460
461        let private_vtl0_memory = GuestMemory::new(
462            "trusted",
463            GuestMemoryView::new(
464                Some(protector.clone()),
465                vtl0_mapping.clone(),
466                GuestMemoryViewReadType::Read,
467                GuestVtl::Vtl0,
468            ),
469        );
470
471        tracing::debug!("Creating VTL0 guest memory for kernel execute access");
472        let vtl0_kx_gm = GuestMemory::new_multi_region(
473            "vtl0_kx",
474            vtom,
475            vec![
476                Some(GuestMemoryView::new(
477                    Some(protector.clone()),
478                    vtl0_mapping.clone(),
479                    GuestMemoryViewReadType::KernelExecute,
480                    GuestVtl::Vtl0,
481                )),
482                Some(GuestMemoryView::new(
483                    Some(protector.clone()),
484                    shared_mapping.clone(),
485                    GuestMemoryViewReadType::KernelExecute,
486                    GuestVtl::Vtl0,
487                )),
488            ],
489        )
490        .context("failed to make vtl0 guest memory with kernel execute access")?;
491
492        tracing::debug!("Creating VTL0 guest memory for user execute access");
493        let vtl0_ux_gm = GuestMemory::new_multi_region(
494            "vtl0_ux",
495            vtom,
496            vec![
497                Some(GuestMemoryView::new(
498                    Some(protector.clone()),
499                    vtl0_mapping.clone(),
500                    GuestMemoryViewReadType::UserExecute,
501                    GuestVtl::Vtl0,
502                )),
503                Some(GuestMemoryView::new(
504                    Some(protector.clone()),
505                    shared_mapping.clone(),
506                    GuestMemoryViewReadType::UserExecute,
507                    GuestVtl::Vtl0,
508                )),
509            ],
510        )
511        .context("failed to make vtl0 guest memory with user execute access")?;
512
513        MemoryMappings {
514            vtl0: vtl0_mapping,
515            vtl1: vtl1_mapping,
516            vtl0_gm,
517            vtl0_kx_gm,
518            vtl0_ux_gm,
519            vtl1_gm,
520            cvm_memory: Some(CvmMemory {
521                shared_gm,
522                private_vtl0_memory,
523                shared_mapping,
524                protector,
525            }),
526        }
527    } else {
528        tracing::debug!("Creating VTL0 guest memory");
529        let vtl0_mapping = {
530            let _span = tracing::info_span!("map_vtl0_memory", CVM_ALLOWED).entered();
531            let base_address = params.vtl0_alias_map_bit.unwrap_or(0);
532
533            Arc::new(
534                GuestMemoryMapping::builder(base_address)
535                    .for_kernel_access(true)
536                    .dma_base_address(Some(base_address))
537                    .ignore_registration_failure(params.boot_init.is_none())
538                    .build_without_bitmap(&gpa_fd, params.mem_layout)
539                    .context("failed to map vtl0 memory")?,
540            )
541        };
542        let vtl0_gm = GuestMemory::new(
543            "vtl0",
544            GuestMemoryView::new(
545                None,
546                vtl0_mapping.clone(),
547                GuestMemoryViewReadType::Read,
548                GuestVtl::Vtl0,
549            ),
550        );
551
552        let vtl1_mapping = if params.maximum_vtl >= Vtl::Vtl1 {
553            if params.vtl0_alias_map_bit.is_none() {
554                if cfg!(guest_arch = "x86_64") {
555                    // Guest VSM cannot be exposed to the guest unless the
556                    // alias map is available. Otherwise, Underhill cannot
557                    // correctly check for VTL0 access protections. Ideally, UH
558                    // would hide Guest VSM from the guest if the alias map is
559                    // not available, but the guest secure kernel checks the
560                    // access_vsm permission to determine if Guest VSM is
561                    // supported, and there is no mechanism for UH to hide that
562                    // from the guest. Thus, it is not safe to proceed.
563                    anyhow::bail!("cannot safely support VTL 1 without using the alias map");
564                } else {
565                    // On ARM, the alias map is not exposed: see
566                    // underhill_core::init::vtl0_alias_map_bit.
567                    tracing::warn!(
568                        CVM_ALLOWED,
569                        "cannot safely support VTL 1 without using the alias map; Guest VSM not supported"
570                    );
571                    None
572                }
573            } else {
574                tracing::debug!("Creating VTL 1 memory map");
575
576                let _span = tracing::info_span!("map_vtl1_memory", CVM_ALLOWED).entered();
577                Some(Arc::new(
578                    GuestMemoryMapping::builder(0)
579                        .for_kernel_access(true)
580                        .dma_base_address(Some(0))
581                        .ignore_registration_failure(params.boot_init.is_none())
582                        .build_without_bitmap(&gpa_fd, params.mem_layout)
583                        .context("failed to map vtl1 memory")?,
584                ))
585            }
586        } else {
587            None
588        };
589
590        let vtl1_gm = if let Some(vtl1_mapping) = &vtl1_mapping {
591            tracing::info!(CVM_ALLOWED, "VTL 1 memory map created");
592            Some(GuestMemory::new(
593                "vtl1",
594                GuestMemoryView::new(
595                    None,
596                    vtl1_mapping.clone(),
597                    GuestMemoryViewReadType::Read,
598                    GuestVtl::Vtl1,
599                ),
600            ))
601        } else {
602            tracing::info!(CVM_ALLOWED, "Skipping VTL 1 memory map creation");
603            None
604        };
605
606        // TODO: make kernel/user execute guest memory objects that use a
607        // fallback path to query the hypervisor for the permissions.
608        MemoryMappings {
609            vtl0: vtl0_mapping,
610            vtl1: vtl1_mapping,
611            vtl0_gm: vtl0_gm.clone(),
612            vtl0_kx_gm: vtl0_gm.clone(),
613            vtl0_ux_gm: vtl0_gm.clone(),
614            vtl1_gm,
615            cvm_memory: None,
616        }
617    };
618    Ok(gm)
619}
620
621/// Apply VTL2 protections to all VTL2 ram ranges. This marks all VTL2 pages as
622/// no access by lower VTLs.
623async fn apply_vtl2_protections(
624    threadpool: &AffinitizedThreadpool,
625    vtl2_memory: &[MemoryRangeWithNode],
626) -> anyhow::Result<()> {
627    let mshv_hvcall = Arc::new(MshvHvcall::new().context("failed to open mshv_hvcall device")?);
628    mshv_hvcall.set_allowed_hypercalls(&[HypercallCode::HvCallModifyVtlProtectionMask]);
629
630    // Apply VTL2 protections in 2GB units. This is large enough to get large
631    // pages in the kernel, but small enough to allow parallelism across most of
632    // the VPs.
633    const MAX_RANGE_LEN: u64 = 2 << 30;
634
635    let ranges: Vec<_> = vtl2_memory
636        .iter()
637        .flat_map(|range| AlignedSubranges::new(range.range).with_max_range_len(MAX_RANGE_LEN))
638        .collect();
639
640    try_join_all(
641        ranges
642            .into_iter()
643            .zip(threadpool.active_drivers().cycle())
644            .map(|(range, driver)| {
645                let mshv_hvcall = mshv_hvcall.clone();
646                driver.spawn(
647                    "apply-vtl2-protections",
648                    async move {
649                        tracing::debug!(
650                            cpu = underhill_threadpool::Thread::current()
651                                .unwrap()
652                                .with_driver(|driver| driver.target_cpu()),
653                            %range,
654                            "applying protections"
655                        );
656                        mshv_hvcall
657                            .modify_vtl_protection_mask(
658                                range,
659                                hvdef::HV_MAP_GPA_PERMISSIONS_NONE,
660                                HvInputVtl::CURRENT_VTL,
661                            )
662                            .with_context(|| {
663                                format!("failed to apply vtl2 protections for {range}")
664                            })
665                    }
666                    .in_current_span(),
667                )
668            }),
669    )
670    .await?;
671
672    Ok(())
673}