membacking/partition_mapper.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
//! Implements the partition mapper, which is responsible for mapping regions
//! into VM partitions.
// UNSAFETY: Calling unsafe partition memory mapping functions.
#![expect(unsafe_code)]
use crate::mapping_manager::VaMapper;
use crate::region_manager::MapParams;
use memory_range::MemoryRange;
use std::sync::Arc;
use std::sync::Weak;
use thiserror::Error;
use virt::PartitionMemoryMap;
/// The partition mapper.
#[derive(Debug)]
pub struct PartitionMapper {
partition: Weak<dyn PartitionMemoryMap>,
mapper: Arc<VaMapper>,
offset: u64,
pin_mappings: bool,
}
/// Failure to map a region.
#[derive(Debug, Error)]
pub enum PartitionMapperError {
#[error("failed to map range to partition")]
Map(#[source] virt::Error),
#[error("failed to pin range to partition")]
Pin(#[source] virt::Error),
}
impl PartitionMapper {
/// Returns a new partition mapper.
///
/// If `pin_mappings`, call [`PartitionMemoryMap::pin_range`] on any region that is mapped.
pub fn new(
partition: &Arc<dyn PartitionMemoryMap>,
mapper: Arc<VaMapper>,
offset: u64,
pin_mappings: bool,
) -> Self {
Self {
partition: Arc::downgrade(partition),
mapper,
offset,
pin_mappings,
}
}
/// Maps a region.
pub async fn map_region(
&self,
range: MemoryRange,
params: MapParams,
) -> Result<(), PartitionMapperError> {
// Ensure this range does not exceed the mapper's reserved VA range.
assert!(range.end() <= self.mapper.len() as u64);
// If the partition is gone then there is nothing to do.
let Some(partition) = self.partition.upgrade() else {
return Ok(());
};
// Wait for the range to be mapped so that any second level faults can
// be satisfied by the kernel/hypervisor without VMM interaction.
let _ = self.mapper.ensure_mapped(range).await;
let addr = range.start().checked_add(self.offset).unwrap();
let size = range.len() as usize;
let data = self.mapper.as_ptr().wrapping_add(range.start() as usize);
match self.mapper.process() {
None => {
// SAFETY: Mapper will ensure the VA range is reserved (but not
// necessarily mapped) for its lifetime.
unsafe { partition.map_range(data, size, addr, params.writable, params.executable) }
}
Some(process) => {
match process {
#[cfg(not(windows))]
_ => unreachable!(),
#[cfg(windows)]
process => {
// SAFETY: Mapper will ensure the VA range is reserved (but not
// necessarily mapped) for its lifetime.
unsafe {
partition.map_remote_range(
process.as_handle(),
data,
size,
addr,
params.writable,
params.executable,
)
}
}
}
}
}
.map_err(PartitionMapperError::Map)?;
if params.prefetch {
if let Err(err) = partition.prefetch_range(addr, size as u64) {
tracing::warn!(
error = err.as_ref() as &dyn std::error::Error,
addr,
size,
"prefetch failed"
);
}
}
if self.pin_mappings {
if let Err(err) = partition.pin_range(addr, size as u64) {
// Unmap the range to ensure we stay in a consistent state.
partition
.unmap_range(addr, size as u64)
.expect("unmap cannot fail");
return Err(PartitionMapperError::Pin(err));
}
}
Ok(())
}
/// Unmaps regions in `range`.
///
/// `range` may overlap zero, one, or many regions that were mapped with
/// `map_region`, but it must fully contain any regions it overlaps.
///
/// This cannot fail, but on some hypervisors, it may panic on partial
/// region unmap.
pub fn unmap_region(&mut self, range: MemoryRange) {
if let Some(partition) = self.partition.upgrade() {
partition
.unmap_range(range.start().checked_add(self.offset).unwrap(), range.len())
.expect("unmap cannot fail");
}
}
/// Notifies the partition that a new mapping has been mapped into a
/// previously mapped region.
pub async fn notify_new_mapping(&mut self, range: MemoryRange) {
// Ensure the VA range has been mapped for this mapping so that the
// kernel can update the hypervisor's SLAT on page fault.
let _ = self.mapper.ensure_mapped(range).await;
}
}
impl Drop for PartitionMapper {
fn drop(&mut self) {
// Ensure everything is unmapped from the partition since the underlying
// VA is going away.
self.unmap_region(MemoryRange::new(0..self.mapper.len() as u64));
}
}