membacking/
partition_mapper.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Implements the partition mapper, which is responsible for mapping regions
5//! into VM partitions.
6
7// UNSAFETY: Calling unsafe partition memory mapping functions.
8#![expect(unsafe_code)]
9
10use crate::mapping_manager::VaMapper;
11use crate::region_manager::MapParams;
12use memory_range::MemoryRange;
13use std::sync::Arc;
14use std::sync::Weak;
15use thiserror::Error;
16use virt::PartitionMemoryMap;
17
18/// The partition mapper.
19#[derive(Debug)]
20pub struct PartitionMapper {
21    partition: Weak<dyn PartitionMemoryMap>,
22    mapper: Arc<VaMapper>,
23    offset: u64,
24    pin_mappings: bool,
25}
26
27/// Failure to map a region.
28#[derive(Debug, Error)]
29pub enum PartitionMapperError {
30    #[error("failed to map range to partition")]
31    Map(#[source] virt::Error),
32    #[error("failed to pin range to partition")]
33    Pin(#[source] virt::Error),
34}
35
36impl PartitionMapper {
37    /// Returns a new partition mapper.
38    ///
39    /// If `pin_mappings`, call [`PartitionMemoryMap::pin_range`] on any region that is mapped.
40    pub fn new(
41        partition: &Arc<dyn PartitionMemoryMap>,
42        mapper: Arc<VaMapper>,
43        offset: u64,
44        pin_mappings: bool,
45    ) -> Self {
46        Self {
47            partition: Arc::downgrade(partition),
48            mapper,
49            offset,
50            pin_mappings,
51        }
52    }
53
54    /// Maps a region.
55    pub async fn map_region(
56        &self,
57        range: MemoryRange,
58        params: MapParams,
59    ) -> Result<(), PartitionMapperError> {
60        // Ensure this range does not exceed the mapper's reserved VA range.
61        assert!(range.end() <= self.mapper.len() as u64);
62
63        // If the partition is gone then there is nothing to do.
64        let Some(partition) = self.partition.upgrade() else {
65            return Ok(());
66        };
67
68        // Wait for the range to be mapped so that any second level faults can
69        // be satisfied by the kernel/hypervisor without VMM interaction.
70        let _ = self.mapper.ensure_mapped(range).await;
71
72        let addr = range.start().checked_add(self.offset).unwrap();
73        let size = range.len() as usize;
74        let data = self.mapper.as_ptr().wrapping_add(range.start() as usize);
75
76        match self.mapper.process() {
77            None => {
78                // SAFETY: Mapper will ensure the VA range is reserved (but not
79                // necessarily mapped) for its lifetime.
80                unsafe { partition.map_range(data, size, addr, params.writable, params.executable) }
81            }
82            Some(process) => {
83                match process {
84                    #[cfg(not(windows))]
85                    _ => unreachable!(),
86                    #[cfg(windows)]
87                    process => {
88                        // SAFETY: Mapper will ensure the VA range is reserved (but not
89                        // necessarily mapped) for its lifetime.
90                        unsafe {
91                            partition.map_remote_range(
92                                process.as_handle(),
93                                data,
94                                size,
95                                addr,
96                                params.writable,
97                                params.executable,
98                            )
99                        }
100                    }
101                }
102            }
103        }
104        .map_err(PartitionMapperError::Map)?;
105
106        if params.prefetch {
107            if let Err(err) = partition.prefetch_range(addr, size as u64) {
108                tracing::warn!(
109                    error = err.as_ref() as &dyn std::error::Error,
110                    addr,
111                    size,
112                    "prefetch failed"
113                );
114            }
115        }
116
117        if self.pin_mappings {
118            if let Err(err) = partition.pin_range(addr, size as u64) {
119                // Unmap the range to ensure we stay in a consistent state.
120                partition
121                    .unmap_range(addr, size as u64)
122                    .expect("unmap cannot fail");
123                return Err(PartitionMapperError::Pin(err));
124            }
125        }
126
127        Ok(())
128    }
129
130    /// Unmaps regions in `range`.
131    ///
132    /// `range` may overlap zero, one, or many regions that were mapped with
133    /// `map_region`, but it must fully contain any regions it overlaps.
134    ///
135    /// This cannot fail, but on some hypervisors, it may panic on partial
136    /// region unmap.
137    pub fn unmap_region(&mut self, range: MemoryRange) {
138        if let Some(partition) = self.partition.upgrade() {
139            partition
140                .unmap_range(range.start().checked_add(self.offset).unwrap(), range.len())
141                .expect("unmap cannot fail");
142        }
143    }
144
145    /// Notifies the partition that a new mapping has been mapped into a
146    /// previously mapped region.
147    pub async fn notify_new_mapping(&mut self, range: MemoryRange) {
148        // Ensure the VA range has been mapped for this mapping so that the
149        // kernel can update the hypervisor's SLAT on page fault.
150        let _ = self.mapper.ensure_mapped(range).await;
151    }
152}
153
154impl Drop for PartitionMapper {
155    fn drop(&mut self) {
156        // Ensure everything is unmapped from the partition since the underlying
157        // VA is going away.
158        self.unmap_region(MemoryRange::new(0..self.mapper.len() as u64));
159    }
160}