membacking/memory_manager/
mod.rs1mod device_memory;
7
8pub use device_memory::DeviceMemoryMapper;
9
10use crate::RemoteProcess;
11use crate::mapping_manager::Mappable;
12use crate::mapping_manager::MappingManager;
13use crate::mapping_manager::MappingManagerClient;
14use crate::mapping_manager::VaMapper;
15use crate::mapping_manager::VaMapperError;
16use crate::partition_mapper::PartitionMapper;
17use crate::region_manager::MapParams;
18use crate::region_manager::RegionHandle;
19use crate::region_manager::RegionManager;
20use guestmem::GuestMemory;
21use hvdef::Vtl;
22use inspect::Inspect;
23use memory_range::MemoryRange;
24use mesh::MeshPayload;
25use pal_async::DefaultPool;
26use std::sync::Arc;
27use std::thread::JoinHandle;
28use thiserror::Error;
29use vm_topology::memory::MemoryLayout;
30
31#[derive(Debug, Inspect)]
33pub struct GuestMemoryManager {
34    #[inspect(skip)]
36    guest_ram: Mappable,
37
38    #[inspect(skip)]
39    ram_regions: Arc<Vec<RamRegion>>,
40
41    #[inspect(flatten)]
42    mapping_manager: MappingManager,
43
44    #[inspect(flatten)]
45    region_manager: RegionManager,
46
47    #[inspect(skip)]
48    va_mapper: Arc<VaMapper>,
49
50    #[inspect(skip)]
51    _thread: JoinHandle<()>,
52
53    vtl0_alias_map_offset: Option<u64>,
54    pin_mappings: bool,
55}
56
57#[derive(Debug)]
58struct RamRegion {
59    range: MemoryRange,
60    handle: RegionHandle,
61}
62
63#[derive(Error, Debug)]
65pub enum PartitionAttachError {
66    #[error("failed to reserve VA range for partition mapping")]
68    VaMapper(#[source] VaMapperError),
69    #[error("failed to attach partition to memory manager")]
71    PartitionMapper(#[source] crate::partition_mapper::PartitionMapperError),
72}
73
74#[derive(Error, Debug)]
76pub enum MemoryBuildError {
77    #[error("ram size {0} is too large")]
79    RamTooLarge(u64),
80    #[error("failed to allocate memory")]
82    AllocationFailed(#[source] std::io::Error),
83    #[error("failed to create VA mapper")]
85    VaMapper(#[source] VaMapperError),
86    #[error("not enough guest address space available for the vtl0 alias map")]
88    AliasMapWontFit,
89    #[error("x86 support requires RAM to start at 0 and contain at least 1MB")]
91    InvalidRamForX86,
92}
93
94pub struct GuestMemoryBuilder {
96    existing_mapping: Option<SharedMemoryBacking>,
97    vtl0_alias_map: Option<u64>,
98    prefetch_ram: bool,
99    pin_mappings: bool,
100    x86_legacy_support: bool,
101}
102
103impl GuestMemoryBuilder {
104    pub fn new() -> Self {
106        Self {
107            existing_mapping: None,
108            vtl0_alias_map: None,
109            pin_mappings: false,
110            prefetch_ram: false,
111            x86_legacy_support: false,
112        }
113    }
114
115    pub fn existing_backing(mut self, mapping: Option<SharedMemoryBacking>) -> Self {
117        self.existing_mapping = mapping;
118        self
119    }
120
121    pub fn vtl0_alias_map(mut self, offset: Option<u64>) -> Self {
125        self.vtl0_alias_map = offset;
126        self
127    }
128
129    pub fn pin_mappings(mut self, enable: bool) -> Self {
133        self.pin_mappings = enable;
134        self
135    }
136
137    pub fn prefetch_ram(mut self, enable: bool) -> Self {
140        self.prefetch_ram = enable;
141        self
142    }
143
144    pub fn x86_legacy_support(mut self, enable: bool) -> Self {
157        self.x86_legacy_support = enable;
158        self
159    }
160
161    pub async fn build(
164        self,
165        mem_layout: &MemoryLayout,
166    ) -> Result<GuestMemoryManager, MemoryBuildError> {
167        let ram_size = mem_layout.ram_size() + mem_layout.vtl2_range().map_or(0, |r| r.len());
168
169        let memory = if let Some(memory) = self.existing_mapping {
170            memory.guest_ram
171        } else {
172            sparse_mmap::alloc_shared_memory(
173                ram_size
174                    .try_into()
175                    .map_err(|_| MemoryBuildError::RamTooLarge(ram_size))?,
176            )
177            .map_err(MemoryBuildError::AllocationFailed)?
178            .into()
179        };
180
181        let (thread, spawner) = DefaultPool::spawn_on_thread("memory_manager");
185
186        let max_addr =
187            (mem_layout.end_of_ram_or_mmio()).max(mem_layout.vtl2_range().map_or(0, |r| r.end()));
188
189        let vtl0_alias_map_offset = if let Some(offset) = self.vtl0_alias_map {
190            if max_addr > offset {
191                return Err(MemoryBuildError::AliasMapWontFit);
192            }
193            Some(offset)
194        } else {
195            None
196        };
197
198        let mapping_manager = MappingManager::new(&spawner, max_addr);
199        let va_mapper = mapping_manager
200            .client()
201            .new_mapper()
202            .await
203            .map_err(MemoryBuildError::VaMapper)?;
204
205        let region_manager = RegionManager::new(&spawner, mapping_manager.client().clone());
206
207        let mut ram_ranges = mem_layout
208            .ram()
209            .iter()
210            .map(|x| x.range)
211            .chain(mem_layout.vtl2_range())
212            .collect::<Vec<_>>();
213
214        if self.x86_legacy_support {
215            if ram_ranges[0].start() != 0 || ram_ranges[0].end() < 0x100000 {
216                return Err(MemoryBuildError::InvalidRamForX86);
217            }
218
219            let range_starts = [
221                0,
222                0xa0000,
223                0xc0000,
224                0xc4000,
225                0xc8000,
226                0xcc000,
227                0xd0000,
228                0xd4000,
229                0xd8000,
230                0xdc000,
231                0xe0000,
232                0xe4000,
233                0xe8000,
234                0xec000,
235                0xf0000,
236                0x100000,
237                ram_ranges[0].end(),
238            ];
239
240            ram_ranges.splice(
241                0..1,
242                range_starts
243                    .iter()
244                    .zip(range_starts.iter().skip(1))
245                    .map(|(&start, &end)| MemoryRange::new(start..end)),
246            );
247        }
248
249        let mut ram_regions = Vec::new();
250        let mut start = 0;
251        for range in &ram_ranges {
252            let region = region_manager
253                .client()
254                .new_region("ram".into(), *range, RAM_PRIORITY)
255                .await
256                .expect("regions cannot overlap yet");
257
258            region
259                .add_mapping(
260                    MemoryRange::new(0..range.len()),
261                    memory.clone(),
262                    start,
263                    true,
264                )
265                .await;
266
267            region
268                .map(MapParams {
269                    writable: true,
270                    executable: true,
271                    prefetch: self.prefetch_ram,
272                })
273                .await;
274
275            ram_regions.push(RamRegion {
276                range: *range,
277                handle: region,
278            });
279            start += range.len();
280        }
281
282        let gm = GuestMemoryManager {
283            guest_ram: memory,
284            _thread: thread,
285            ram_regions: Arc::new(ram_regions),
286            mapping_manager,
287            region_manager,
288            va_mapper,
289            vtl0_alias_map_offset,
290            pin_mappings: self.pin_mappings,
291        };
292        Ok(gm)
293    }
294}
295
296#[derive(Debug, MeshPayload)]
298pub struct SharedMemoryBacking {
299    guest_ram: Mappable,
300}
301
302#[derive(Debug, MeshPayload)]
304pub struct GuestMemoryClient {
305    mapping_manager: MappingManagerClient,
306}
307
308impl GuestMemoryClient {
309    pub async fn guest_memory(&self) -> Result<GuestMemory, VaMapperError> {
316        Ok(GuestMemory::new(
317            "ram",
318            self.mapping_manager.new_mapper().await?,
319        ))
320    }
321}
322
323const RAM_PRIORITY: u8 = 255;
325
326const DEVICE_PRIORITY: u8 = 0;
328
329impl GuestMemoryManager {
330    pub fn client(&self) -> GuestMemoryClient {
332        GuestMemoryClient {
333            mapping_manager: self.mapping_manager.client().clone(),
334        }
335    }
336
337    pub fn device_memory_mapper(&self) -> DeviceMemoryMapper {
339        DeviceMemoryMapper::new(self.region_manager.client().clone())
340    }
341
342    pub fn ram_visibility_control(&self) -> RamVisibilityControl {
345        RamVisibilityControl {
346            regions: self.ram_regions.clone(),
347        }
348    }
349
350    pub fn shared_memory_backing(&self) -> SharedMemoryBacking {
358        let guest_ram = self.guest_ram.clone();
359        SharedMemoryBacking { guest_ram }
360    }
361
362    pub async fn attach_partition(
373        &mut self,
374        vtl: Vtl,
375        partition: &Arc<dyn virt::PartitionMemoryMap>,
376        process: Option<RemoteProcess>,
377    ) -> Result<(), PartitionAttachError> {
378        let va_mapper = if let Some(process) = process {
379            self.mapping_manager
380                .client()
381                .new_remote_mapper(process)
382                .await
383                .map_err(PartitionAttachError::VaMapper)?
384        } else {
385            self.va_mapper.clone()
386        };
387
388        if vtl == Vtl::Vtl2 {
389            if let Some(offset) = self.vtl0_alias_map_offset {
390                let partition =
391                    PartitionMapper::new(partition, va_mapper.clone(), offset, self.pin_mappings);
392                self.region_manager
393                    .client()
394                    .add_partition(partition)
395                    .await
396                    .map_err(PartitionAttachError::PartitionMapper)?;
397            }
398        }
399
400        let partition = PartitionMapper::new(partition, va_mapper, 0, self.pin_mappings);
401        self.region_manager
402            .client()
403            .add_partition(partition)
404            .await
405            .map_err(PartitionAttachError::PartitionMapper)?;
406        Ok(())
407    }
408}
409
410pub struct RamVisibilityControl {
413    regions: Arc<Vec<RamRegion>>,
414}
415
416#[derive(Debug, Copy, Clone, PartialEq, Eq)]
418pub enum RamVisibility {
419    Unmapped,
421    ReadOnly,
425    ReadWrite,
427}
428
429#[derive(Debug, Error)]
431#[error("{0} is not a controllable RAM range")]
432pub struct InvalidRamRegion(MemoryRange);
433
434impl RamVisibilityControl {
435    pub async fn set_ram_visibility(
442        &self,
443        range: MemoryRange,
444        visibility: RamVisibility,
445    ) -> Result<(), InvalidRamRegion> {
446        let region = self
447            .regions
448            .iter()
449            .find(|region| region.range == range)
450            .ok_or(InvalidRamRegion(range))?;
451
452        match visibility {
453            RamVisibility::ReadWrite | RamVisibility::ReadOnly => {
454                region
455                    .handle
456                    .map(MapParams {
457                        writable: matches!(visibility, RamVisibility::ReadWrite),
458                        executable: true,
459                        prefetch: false,
460                    })
461                    .await
462            }
463            RamVisibility::Unmapped => region.handle.unmap().await,
464        }
465        Ok(())
466    }
467}