underhill_mem/
mapping.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4// UNSAFETY: Implementing GuestMemoryAccess.
5#![expect(unsafe_code)]
6
7use crate::MshvVtlWithPolicy;
8use crate::RegistrationError;
9use crate::registrar::MemoryRegistrar;
10use guestmem::GuestMemoryAccess;
11use guestmem::GuestMemoryBackingError;
12use guestmem::PAGE_SIZE;
13use hcl::GuestVtl;
14use hcl::ioctl::Mshv;
15use hcl::ioctl::MshvVtlLow;
16use hvdef::HvMapGpaFlags;
17use inspect::Inspect;
18use memory_range::MemoryRange;
19use parking_lot::Mutex;
20use sparse_mmap::SparseMapping;
21use std::ptr::NonNull;
22use std::sync::Arc;
23use thiserror::Error;
24use virt_mshv_vtl::ProtectIsolatedMemory;
25use vm_topology::memory::MemoryLayout;
26
27pub struct GuestPartitionMemoryView<'a> {
28    memory_layout: &'a MemoryLayout,
29    valid_memory: Arc<GuestValidMemory>,
30}
31
32impl<'a> GuestPartitionMemoryView<'a> {
33    /// A bitmap is created to track the accessibility state of each page in the
34    /// lower VTL memory. The bitmap is initialized to valid_bitmap_state.
35    ///
36    /// This is used to support tracking the shared/encrypted state of each
37    /// page.
38    pub fn new(
39        memory_layout: &'a MemoryLayout,
40        memory_type: GuestValidMemoryType,
41        valid_bitmap_state: bool,
42    ) -> Result<Self, MappingError> {
43        let valid_memory =
44            GuestValidMemory::new(memory_layout, memory_type, valid_bitmap_state).map(Arc::new)?;
45        Ok(Self {
46            memory_layout,
47            valid_memory,
48        })
49    }
50
51    /// Returns the built partition-wide valid memory.
52    pub fn partition_valid_memory(&self) -> Arc<GuestValidMemory> {
53        self.valid_memory.clone()
54    }
55
56    /// Build a [`GuestMemoryMapping`], feeding in any related partition-wide
57    /// state.
58    fn build_guest_memory_mapping(
59        &self,
60        mshv_vtl_low: &MshvVtlLow,
61        memory_mapping_builder: &mut GuestMemoryMappingBuilder,
62    ) -> Result<GuestMemoryMapping, MappingError> {
63        memory_mapping_builder
64            .use_partition_valid_memory(Some(self.valid_memory.clone()))
65            .build(mshv_vtl_low, self.memory_layout)
66    }
67}
68
69#[derive(Debug, Inspect)]
70pub enum GuestMemoryViewReadType {
71    Read,
72    KernelExecute,
73    UserExecute,
74}
75
76#[derive(Inspect)]
77pub struct GuestMemoryView {
78    #[inspect(skip)]
79    protector: Option<Arc<dyn ProtectIsolatedMemory>>,
80    pub memory_mapping: Arc<GuestMemoryMapping>,
81    pub view_type: GuestMemoryViewReadType,
82    vtl: GuestVtl,
83}
84
85impl GuestMemoryView {
86    pub fn new(
87        protector: Option<Arc<dyn ProtectIsolatedMemory>>,
88        memory_mapping: Arc<GuestMemoryMapping>,
89        view_type: GuestMemoryViewReadType,
90        vtl: GuestVtl,
91    ) -> Self {
92        Self {
93            protector,
94            memory_mapping,
95            view_type,
96            vtl,
97        }
98    }
99}
100
101#[derive(Error, Debug)]
102#[error("the specified page is not mapped")]
103struct NotMapped;
104
105#[derive(Error, Debug)]
106enum BitmapFailure {
107    #[error("the specified page was accessed using the wrong visibility mapping")]
108    IncorrectHostVisibilityAccess,
109    #[error("the specified page access violates VTL 1 protections")]
110    Vtl1ProtectionsViolation,
111}
112
113/// SAFETY: Implementing the `GuestMemoryAccess` contract, including the
114/// size and lifetime of the mappings and bitmaps.
115unsafe impl GuestMemoryAccess for GuestMemoryView {
116    fn mapping(&self) -> Option<NonNull<u8>> {
117        NonNull::new(self.memory_mapping.mapping.as_ptr().cast())
118    }
119
120    fn max_address(&self) -> u64 {
121        self.memory_mapping.mapping.len() as u64
122    }
123
124    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
125        if let Some(registrar) = &self.memory_mapping.registrar {
126            registrar
127                .register(address, len)
128                .map_err(|start| GuestMemoryBackingError::other(start, RegistrationError))
129        } else {
130            // TODO: fail this call once we have a way to avoid calling this for
131            // user-mode-only accesses to locked memory (e.g., for vmbus ring
132            // buffers). We can't fail this for now because TDX cannot register
133            // encrypted memory.
134            Ok(())
135        }
136    }
137
138    fn base_iova(&self) -> Option<u64> {
139        // When the alias map is configured for this mapping, VTL2-mapped
140        // devices need to do DMA with the alias map bit set to avoid DMAing
141        // into VTL1 memory.
142        self.memory_mapping.iova_offset
143    }
144
145    fn access_bitmap(&self) -> Option<guestmem::BitmapInfo> {
146        // When the permissions bitmaps are available, they take precedence and
147        // therefore should be no more permissive than the access bitmap.
148        //
149        // TODO GUEST VSM: consider being able to dynamically update these
150        // bitmaps. There are two scenarios where this would be useful:
151        // 1. To reduce memory consumption in cases where the bitmaps aren't
152        //    needed, i.e. the guest chooses not to enable guest vsm and VTL 1
153        //    gets revoked.
154        // 2. Because the related guest memory objects are initialized before
155        // VTL 1 is, the code as it currently stands will always enforce vtl 1
156        // protections even if VTL 1 hasn't explicitly enabled it. e.g. if VTL 1
157        // never enables vtl protections via the vsm partition config, but it
158        // still makes hypercalls to modify the vtl protection mask (this is a
159        // valid scenario to help set up default protections), these protections
160        // will still be enforced. In practice, a well-designed VTL 1 probably
161        // would enable vtl protections before allowing VTL 0 to run again, but
162        // technically the implementation here is not to spec.
163        if let Some(bitmaps) = self.memory_mapping.permission_bitmaps.as_ref() {
164            match self.view_type {
165                GuestMemoryViewReadType::Read => Some(guestmem::BitmapInfo {
166                    read_bitmap: NonNull::new(bitmaps.read_bitmap.as_ptr().cast()).unwrap(),
167                    write_bitmap: NonNull::new(bitmaps.write_bitmap.as_ptr().cast()).unwrap(),
168                    bit_offset: 0,
169                }),
170                GuestMemoryViewReadType::KernelExecute => Some(guestmem::BitmapInfo {
171                    read_bitmap: NonNull::new(bitmaps.kernel_execute_bitmap.as_ptr().cast())
172                        .unwrap(),
173                    write_bitmap: NonNull::new(bitmaps.write_bitmap.as_ptr().cast()).unwrap(),
174                    bit_offset: 0,
175                }),
176                GuestMemoryViewReadType::UserExecute => Some(guestmem::BitmapInfo {
177                    read_bitmap: NonNull::new(bitmaps.user_execute_bitmap.as_ptr().cast()).unwrap(),
178                    write_bitmap: NonNull::new(bitmaps.write_bitmap.as_ptr().cast()).unwrap(),
179                    bit_offset: 0,
180                }),
181            }
182        } else {
183            self.memory_mapping
184                .valid_memory
185                .as_ref()
186                .map(|bitmap| bitmap.access_bitmap())
187        }
188    }
189
190    fn page_fault(
191        &self,
192        address: u64,
193        len: usize,
194        write: bool,
195        bitmap_failure: bool,
196    ) -> guestmem::PageFaultAction {
197        let gpn = address / PAGE_SIZE as u64;
198        if !bitmap_failure {
199            guestmem::PageFaultAction::Fail(guestmem::PageFaultError::other(NotMapped {}))
200        } else {
201            let valid_memory = self
202                .memory_mapping
203                .valid_memory
204                .as_ref()
205                .expect("all backings with bitmaps should have a GuestValidMemory");
206            if !valid_memory.check_valid(gpn) {
207                match valid_memory.memory_type() {
208                    GuestValidMemoryType::Shared => {
209                        tracing::warn!(
210                            ?address,
211                            ?len,
212                            ?write,
213                            "tried to access private page using shared mapping"
214                        );
215                        guestmem::PageFaultAction::Fail(guestmem::PageFaultError::new(
216                            guestmem::GuestMemoryErrorKind::NotShared,
217                            BitmapFailure::IncorrectHostVisibilityAccess,
218                        ))
219                    }
220                    GuestValidMemoryType::Encrypted => {
221                        tracing::warn!(
222                            ?address,
223                            ?len,
224                            ?write,
225                            "tried to access shared page using private mapping"
226                        );
227                        guestmem::PageFaultAction::Fail(guestmem::PageFaultError::new(
228                            guestmem::GuestMemoryErrorKind::NotPrivate,
229                            BitmapFailure::IncorrectHostVisibilityAccess,
230                        ))
231                    }
232                }
233            } else {
234                // Currently, only VTL 1 permissions are tracked, so any
235                // invalid accesses here violate VTL 1 protections.
236                if let Some(permission_bitmaps) = &self.memory_mapping.permission_bitmaps {
237                    let check_bitmap = if write {
238                        &permission_bitmaps.write_bitmap
239                    } else {
240                        match self.view_type {
241                            GuestMemoryViewReadType::Read => &permission_bitmaps.read_bitmap,
242                            GuestMemoryViewReadType::KernelExecute => {
243                                &permission_bitmaps.kernel_execute_bitmap
244                            }
245                            GuestMemoryViewReadType::UserExecute => {
246                                &permission_bitmaps.user_execute_bitmap
247                            }
248                        }
249                    };
250
251                    if !check_bitmap.page_state(gpn) {
252                        tracing::warn!(?address, ?len, ?write, ?self.view_type, "VTL 1 permissions violation");
253
254                        return guestmem::PageFaultAction::Fail(guestmem::PageFaultError::new(
255                            guestmem::GuestMemoryErrorKind::VtlProtected,
256                            BitmapFailure::Vtl1ProtectionsViolation,
257                        ));
258                    }
259                }
260
261                // Possible race condition where the bitmaps are in transition
262                // and while the original check failed, the bitmaps now show
263                // valid access to the page. Retry in that situation.
264                guestmem::PageFaultAction::Retry
265            }
266        }
267    }
268
269    fn lock_gpns(&self, gpns: &[u64]) -> Result<bool, GuestMemoryBackingError> {
270        if let Some(protector) = self.protector.as_ref() {
271            protector.lock_gpns(self.vtl, gpns)?;
272            Ok(true)
273        } else {
274            Ok(false)
275        }
276    }
277
278    fn unlock_gpns(&self, gpns: &[u64]) {
279        if let Some(protector) = self.protector.as_ref() {
280            protector.unlock_gpns(self.vtl, gpns)
281        }
282    }
283}
284
285#[derive(Debug, Copy, Clone)]
286pub enum GuestValidMemoryType {
287    Shared,
288    Encrypted,
289}
290
291/// Partition-wide (cross-vtl) tracking of valid memory that can be used in
292/// individual GuestMemoryMappings.
293#[derive(Debug)]
294pub struct GuestValidMemory {
295    valid_bitmap: GuestMemoryBitmap,
296    valid_bitmap_lock: Mutex<()>,
297    memory_type: GuestValidMemoryType,
298}
299
300impl GuestValidMemory {
301    fn new(
302        memory_layout: &MemoryLayout,
303        memory_type: GuestValidMemoryType,
304        valid_bitmap_state: bool,
305    ) -> Result<Self, MappingError> {
306        let valid_bitmap = {
307            let mut bitmap = {
308                // Calculate the total size of the address space by looking at the ending region.
309                let last_entry = memory_layout
310                    .ram()
311                    .last()
312                    .expect("memory map must have at least 1 entry");
313                let address_space_size = last_entry.range.end();
314                GuestMemoryBitmap::new(address_space_size as usize)?
315            };
316
317            for entry in memory_layout.ram() {
318                if entry.range.is_empty() {
319                    continue;
320                }
321
322                bitmap.init(entry.range, valid_bitmap_state)?;
323            }
324
325            bitmap
326        };
327
328        Ok(GuestValidMemory {
329            valid_bitmap,
330            valid_bitmap_lock: Default::default(),
331            memory_type,
332        })
333    }
334
335    /// Update the bitmap to reflect the validity of the given range.
336    pub fn update_valid(&self, range: MemoryRange, state: bool) {
337        let _lock = self.valid_bitmap_lock.lock();
338        self.valid_bitmap.update(range, state);
339    }
340
341    /// Check if the given page is valid.
342    pub(crate) fn check_valid(&self, gpn: u64) -> bool {
343        self.valid_bitmap.page_state(gpn)
344    }
345
346    /// Returns the type of memory tracked by the bitmap
347    pub(crate) fn memory_type(&self) -> GuestValidMemoryType {
348        self.memory_type
349    }
350
351    fn access_bitmap(&self) -> guestmem::BitmapInfo {
352        let ptr = NonNull::new(self.valid_bitmap.as_ptr()).unwrap();
353        guestmem::BitmapInfo {
354            read_bitmap: ptr,
355            write_bitmap: ptr,
356            bit_offset: 0,
357        }
358    }
359}
360
361/// An implementation of a [`GuestMemoryAccess`] trait for Underhill VMs.
362#[derive(Debug, Inspect)]
363pub struct GuestMemoryMapping {
364    #[inspect(skip)]
365    mapping: SparseMapping,
366    iova_offset: Option<u64>,
367    #[inspect(with = "Option::is_some")]
368    valid_memory: Option<Arc<GuestValidMemory>>,
369    #[inspect(with = "Option::is_some")]
370    permission_bitmaps: Option<PermissionBitmaps>,
371    registrar: Option<MemoryRegistrar<MshvVtlWithPolicy>>,
372}
373
374/// Bitmap implementation using sparse mapping that can be used to track page
375/// states.
376#[derive(Debug)]
377struct PermissionBitmaps {
378    permission_update_lock: Mutex<()>,
379    read_bitmap: GuestMemoryBitmap,
380    write_bitmap: GuestMemoryBitmap,
381    kernel_execute_bitmap: GuestMemoryBitmap,
382    user_execute_bitmap: GuestMemoryBitmap,
383}
384
385#[derive(Error, Debug)]
386pub enum VtlPermissionsError {
387    #[error("no vtl 1 permissions enforcement, bitmap is not present")]
388    NoPermissionsTracked,
389}
390
391#[derive(Debug)]
392struct GuestMemoryBitmap {
393    bitmap: SparseMapping,
394}
395
396impl GuestMemoryBitmap {
397    fn new(address_space_size: usize) -> Result<Self, MappingError> {
398        let bitmap = SparseMapping::new((address_space_size / PAGE_SIZE).div_ceil(8))
399            .map_err(MappingError::BitmapReserve)?;
400        bitmap
401            .map_zero(0, bitmap.len())
402            .map_err(MappingError::BitmapMap)?;
403        Ok(Self { bitmap })
404    }
405
406    fn init(&mut self, range: MemoryRange, state: bool) -> Result<(), MappingError> {
407        if range.start() % (PAGE_SIZE as u64 * 8) != 0 || range.end() % (PAGE_SIZE as u64 * 8) != 0
408        {
409            return Err(MappingError::BadAlignment(range));
410        }
411
412        let bitmap_start = range.start() as usize / PAGE_SIZE / 8;
413        let bitmap_end = (range.end() - 1) as usize / PAGE_SIZE / 8;
414        let bitmap_page_start = bitmap_start / PAGE_SIZE;
415        let bitmap_page_end = bitmap_end / PAGE_SIZE;
416        let page_count = bitmap_page_end + 1 - bitmap_page_start;
417
418        // TODO SNP: map some pre-reserved lower VTL memory into the
419        // bitmap. Or just figure out how to hot add that memory to the
420        // kernel. Or have the boot loader reserve it at boot time.
421        self.bitmap
422            .alloc(bitmap_page_start * PAGE_SIZE, page_count * PAGE_SIZE)
423            .map_err(MappingError::BitmapAlloc)?;
424
425        // Set the initial bitmap state.
426        if state {
427            let start_gpn = range.start() / PAGE_SIZE as u64;
428            let gpn_count = range.len() / PAGE_SIZE as u64;
429            assert_eq!(range.start() % 8, 0);
430            assert_eq!(gpn_count % 8, 0);
431            self.bitmap
432                .fill_at(start_gpn as usize / 8, 0xff, gpn_count as usize / 8)
433                .unwrap();
434        }
435
436        Ok(())
437    }
438
439    /// Panics if the range is outside of guest RAM.
440    fn update(&self, range: MemoryRange, state: bool) {
441        for gpn in range.start() / PAGE_SIZE as u64..range.end() / PAGE_SIZE as u64 {
442            // TODO: use `fill_at` for the aligned part of the range.
443            let mut b = 0;
444            self.bitmap
445                .read_at(gpn as usize / 8, std::slice::from_mut(&mut b))
446                .unwrap();
447            if state {
448                b |= 1 << (gpn % 8);
449            } else {
450                b &= !(1 << (gpn % 8));
451            }
452            self.bitmap
453                .write_at(gpn as usize / 8, std::slice::from_ref(&b))
454                .unwrap();
455        }
456    }
457
458    /// Read the bitmap for `gpn`.
459    /// Panics if the range is outside of guest RAM.
460    fn page_state(&self, gpn: u64) -> bool {
461        let mut b = 0;
462        self.bitmap
463            .read_at(gpn as usize / 8, std::slice::from_mut(&mut b))
464            .unwrap();
465        b & (1 << (gpn % 8)) != 0
466    }
467
468    fn as_ptr(&self) -> *mut u8 {
469        self.bitmap.as_ptr().cast()
470    }
471}
472
473/// Error constructing a [`GuestMemoryMapping`].
474#[derive(Debug, Error)]
475pub enum MappingError {
476    #[error("failed to allocate VA space for guest memory")]
477    Reserve(#[source] std::io::Error),
478    #[error("failed to map guest memory pages")]
479    Map(#[source] std::io::Error),
480    #[error("failed to allocate VA space for bitmap")]
481    BitmapReserve(#[source] std::io::Error),
482    #[error("failed to map zero pages for bitmap")]
483    BitmapMap(#[source] std::io::Error),
484    #[error("failed to allocate pages for bitmap")]
485    BitmapAlloc(#[source] std::io::Error),
486    #[error("memory map entry {0} has insufficient alignment to support a bitmap")]
487    BadAlignment(MemoryRange),
488    #[error("failed to open device")]
489    OpenDevice(#[source] hcl::ioctl::Error),
490}
491
492/// A builder for [`GuestMemoryMapping`].
493pub struct GuestMemoryMappingBuilder {
494    physical_address_base: u64,
495    valid_memory: Option<Arc<GuestValidMemory>>,
496    permissions_bitmap_state: Option<bool>,
497    shared: bool,
498    for_kernel_access: bool,
499    dma_base_address: Option<u64>,
500    ignore_registration_failure: bool,
501}
502
503impl GuestMemoryMappingBuilder {
504    fn use_partition_valid_memory(
505        &mut self,
506        valid_memory: Option<Arc<GuestValidMemory>>,
507    ) -> &mut Self {
508        self.valid_memory = valid_memory;
509        self
510    }
511
512    /// Set whether to allocate tracking bitmaps for memory access permissions,
513    /// and specify the initial state of the bitmaps.
514    ///
515    /// This is used to support tracking the read/write/kernel execute/user
516    /// execute permissions of each page.
517    pub fn use_permissions_bitmaps(&mut self, initial_state: Option<bool>) -> &mut Self {
518        self.permissions_bitmap_state = initial_state;
519        self
520    }
521
522    /// Set whether this is a mapping to access shared memory.
523    pub fn shared(&mut self, is_shared: bool) -> &mut Self {
524        self.shared = is_shared;
525        self
526    }
527
528    /// Set whether this mapping's memory can be locked to pass to the kernel.
529    ///
530    /// If so, then the memory will be registered with the kernel as part of
531    /// `expose_va`, which is called when memory is locked.
532    pub fn for_kernel_access(&mut self, for_kernel_access: bool) -> &mut Self {
533        self.for_kernel_access = for_kernel_access;
534        self
535    }
536
537    /// Sets the base address to use for DMAs to this memory.
538    ///
539    /// This may be `None` if DMA is not supported.
540    ///
541    /// The address to use depends on the backing technology. For SNP VMs, it
542    /// should be either zero or the VTOM address, since shared memory is mapped
543    /// twice. For TDX VMs, shared memory is only mapped once, but the IOMMU
544    /// expects the SHARED bit to be set in DMA transactions, so it should be
545    /// set here. And for non-isolated/software-isolated VMs, it should be zero
546    /// or the VTL0 alias address, depending on which VTL this memory mapping is
547    /// for.
548    pub fn dma_base_address(&mut self, dma_base_address: Option<u64>) -> &mut Self {
549        self.dma_base_address = dma_base_address;
550        self
551    }
552
553    /// Ignore registration failures when registering memory with the kernel.
554    ///
555    /// This should be used when user mode is restarted for servicing but the
556    /// kernel is not. Since this is not currently a production scenario, this
557    /// is a simple way to avoid needing to track the state of the kernel
558    /// registration across user-mode restarts.
559    ///
560    /// It is not a good idea to enable this otherwise, since the kernel very
561    /// noisily complains if memory is registered twice, so we don't want that
562    /// leaking into production scenarios.
563    ///
564    /// FUTURE: fix the kernel to silently succeed duplication registrations.
565    pub fn ignore_registration_failure(&mut self, ignore: bool) -> &mut Self {
566        self.ignore_registration_failure = ignore;
567        self
568    }
569
570    /// Mapping should leverage the bitmap used to track the accessibility state
571    /// of each page in the lower VTL memory.
572    pub fn build_with_bitmap(
573        &mut self,
574        mshv_vtl_low: &MshvVtlLow,
575        partition_builder: &GuestPartitionMemoryView<'_>,
576    ) -> Result<GuestMemoryMapping, MappingError> {
577        partition_builder.build_guest_memory_mapping(mshv_vtl_low, self)
578    }
579
580    pub fn build_without_bitmap(
581        &self,
582        mshv_vtl_low: &MshvVtlLow,
583        memory_layout: &MemoryLayout,
584    ) -> Result<GuestMemoryMapping, MappingError> {
585        self.build(mshv_vtl_low, memory_layout)
586    }
587
588    /// Map the lower VTL address space.
589    ///
590    /// If `is_shared`, then map the kernel mapping as shared memory.
591    ///
592    /// Add in `file_starting_offset` to construct the page offset for each
593    /// memory range. This can be the high bit to specify decrypted/shared
594    /// memory, or it can be the VTL0 alias map start for non-isolated VMs.
595    ///
596    /// When handing out IOVAs for device DMA, add `iova_offset`. This can be
597    /// VTOM for SNP-isolated VMs, or it can be the VTL0 alias map start for
598    /// non-isolated VMs.
599    fn build(
600        &self,
601        mshv_vtl_low: &MshvVtlLow,
602        memory_layout: &MemoryLayout,
603    ) -> Result<GuestMemoryMapping, MappingError> {
604        // Calculate the file offset within the `mshv_vtl_low` file.
605        let file_starting_offset = self.physical_address_base
606            | if self.shared {
607                MshvVtlLow::SHARED_MEMORY_FLAG
608            } else {
609                0
610            };
611
612        // Calculate the total size of the address space by looking at the ending region.
613        let last_entry = memory_layout
614            .ram()
615            .last()
616            .expect("memory map must have at least 1 entry");
617        let address_space_size = last_entry.range.end();
618        let mapping =
619            SparseMapping::new(address_space_size as usize).map_err(MappingError::Reserve)?;
620
621        tracing::trace!(?mapping, "map_lower_vtl_memory mapping");
622
623        let mut permission_bitmaps = if self.permissions_bitmap_state.is_some() {
624            Some(PermissionBitmaps {
625                permission_update_lock: Default::default(),
626                read_bitmap: GuestMemoryBitmap::new(address_space_size as usize)?,
627                write_bitmap: GuestMemoryBitmap::new(address_space_size as usize)?,
628                kernel_execute_bitmap: GuestMemoryBitmap::new(address_space_size as usize)?,
629                user_execute_bitmap: GuestMemoryBitmap::new(address_space_size as usize)?,
630            })
631        } else {
632            None
633        };
634
635        // Loop through each of the memory map entries and create a mapping for it.
636        for entry in memory_layout.ram() {
637            if entry.range.is_empty() {
638                continue;
639            }
640            let base_addr = entry.range.start();
641            let file_offset = file_starting_offset.checked_add(base_addr).unwrap();
642
643            tracing::trace!(base_addr, file_offset, "mapping lower ram");
644
645            mapping
646                .map_file(
647                    base_addr as usize,
648                    entry.range.len() as usize,
649                    mshv_vtl_low.get(),
650                    file_offset,
651                    true,
652                )
653                .map_err(MappingError::Map)?;
654
655            if let Some((bitmaps, state)) = permission_bitmaps
656                .as_mut()
657                .zip(self.permissions_bitmap_state)
658            {
659                bitmaps.read_bitmap.init(entry.range, state)?;
660                bitmaps.write_bitmap.init(entry.range, state)?;
661                bitmaps.kernel_execute_bitmap.init(entry.range, state)?;
662                bitmaps.user_execute_bitmap.init(entry.range, state)?;
663            }
664
665            tracing::trace!(?entry, "mapped memory map entry");
666        }
667
668        let registrar = if self.for_kernel_access {
669            let mshv = Mshv::new().map_err(MappingError::OpenDevice)?;
670            let mshv_vtl = mshv.create_vtl().map_err(MappingError::OpenDevice)?;
671            Some(MemoryRegistrar::new(
672                memory_layout,
673                self.physical_address_base,
674                MshvVtlWithPolicy {
675                    mshv_vtl,
676                    ignore_registration_failure: self.ignore_registration_failure,
677                    shared: self.shared,
678                },
679            ))
680        } else {
681            None
682        };
683
684        Ok(GuestMemoryMapping {
685            mapping,
686            iova_offset: self.dma_base_address,
687            valid_memory: self.valid_memory.clone(),
688            permission_bitmaps,
689            registrar,
690        })
691    }
692}
693
694impl GuestMemoryMapping {
695    /// Create a new builder for a guest memory mapping.
696    ///
697    /// Map all ranges with a physical address offset of
698    /// `physical_address_base`. This can be zero, or the VTOM address for SNP,
699    /// or the VTL0 alias address for non-isolated/software-isolated VMs.
700    pub fn builder(physical_address_base: u64) -> GuestMemoryMappingBuilder {
701        GuestMemoryMappingBuilder {
702            physical_address_base,
703            valid_memory: None,
704            permissions_bitmap_state: None,
705            shared: false,
706            for_kernel_access: false,
707            dma_base_address: None,
708            ignore_registration_failure: false,
709        }
710    }
711
712    /// Update the permission bitmaps to reflect the given flags.
713    /// Panics if the range is outside of guest RAM.
714    pub fn update_permission_bitmaps(&self, range: MemoryRange, flags: HvMapGpaFlags) {
715        if let Some(bitmaps) = self.permission_bitmaps.as_ref() {
716            let _lock = bitmaps.permission_update_lock.lock();
717            bitmaps.read_bitmap.update(range, flags.readable());
718            bitmaps.write_bitmap.update(range, flags.writable());
719            bitmaps
720                .kernel_execute_bitmap
721                .update(range, flags.kernel_executable());
722            bitmaps
723                .user_execute_bitmap
724                .update(range, flags.user_executable());
725        }
726    }
727
728    /// Query the permissions for the given gpn.
729    /// Panics if the range is outside of guest RAM.
730    pub fn query_access_permission(&self, gpn: u64) -> Result<HvMapGpaFlags, VtlPermissionsError> {
731        if let Some(bitmaps) = self.permission_bitmaps.as_ref() {
732            Ok(HvMapGpaFlags::new()
733                .with_readable(bitmaps.read_bitmap.page_state(gpn))
734                .with_writable(bitmaps.write_bitmap.page_state(gpn))
735                .with_kernel_executable(bitmaps.kernel_execute_bitmap.page_state(gpn))
736                .with_user_executable(bitmaps.user_execute_bitmap.page_state(gpn)))
737        } else {
738            Err(VtlPermissionsError::NoPermissionsTracked)
739        }
740    }
741
742    /// Zero the given range of memory.
743    pub(crate) fn zero_range(
744        &self,
745        range: MemoryRange,
746    ) -> Result<(), sparse_mmap::SparseMappingError> {
747        self.mapping
748            .fill_at(range.start() as usize, 0, range.len() as usize)
749    }
750}