guestmem/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Interfaces to read and write guest memory.
5
6// UNSAFETY: This crate's whole purpose is manual memory mapping and management.
7#![expect(unsafe_code)]
8#![expect(missing_docs)]
9
10pub mod ranges;
11
12use self::ranges::PagedRange;
13use inspect::Inspect;
14use pal_event::Event;
15use sparse_mmap::AsMappableRef;
16use std::any::Any;
17use std::fmt::Debug;
18use std::io;
19use std::ops::Deref;
20use std::ops::DerefMut;
21use std::ops::Range;
22use std::ptr::NonNull;
23use std::sync::Arc;
24use std::sync::atomic::AtomicU8;
25use thiserror::Error;
26use zerocopy::FromBytes;
27use zerocopy::FromZeros;
28use zerocopy::Immutable;
29use zerocopy::IntoBytes;
30use zerocopy::KnownLayout;
31
32// Effective page size for page-related operations in this crate.
33pub const PAGE_SIZE: usize = 4096;
34const PAGE_SIZE64: u64 = 4096;
35
36/// A memory access error returned by one of the [`GuestMemory`] methods.
37#[derive(Debug, Error)]
38#[error(transparent)]
39pub struct GuestMemoryError(Box<GuestMemoryErrorInner>);
40
41impl GuestMemoryError {
42    fn new(
43        debug_name: &Arc<str>,
44        range: Option<Range<u64>>,
45        op: GuestMemoryOperation,
46        err: GuestMemoryBackingError,
47    ) -> Self {
48        GuestMemoryError(Box::new(GuestMemoryErrorInner {
49            op,
50            debug_name: debug_name.clone(),
51            range,
52            gpa: (err.gpa != INVALID_ERROR_GPA).then_some(err.gpa),
53            kind: err.kind,
54            err: err.err,
55        }))
56    }
57
58    /// Returns the kind of the error.
59    pub fn kind(&self) -> GuestMemoryErrorKind {
60        self.0.kind
61    }
62}
63
64#[derive(Debug, Copy, Clone)]
65enum GuestMemoryOperation {
66    Read,
67    Write,
68    Fill,
69    CompareExchange,
70    Lock,
71    Subrange,
72    Probe,
73}
74
75impl std::fmt::Display for GuestMemoryOperation {
76    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
77        f.pad(match self {
78            GuestMemoryOperation::Read => "read",
79            GuestMemoryOperation::Write => "write",
80            GuestMemoryOperation::Fill => "fill",
81            GuestMemoryOperation::CompareExchange => "compare exchange",
82            GuestMemoryOperation::Lock => "lock",
83            GuestMemoryOperation::Subrange => "subrange",
84            GuestMemoryOperation::Probe => "probe",
85        })
86    }
87}
88
89#[derive(Debug, Error)]
90struct GuestMemoryErrorInner {
91    op: GuestMemoryOperation,
92    debug_name: Arc<str>,
93    range: Option<Range<u64>>,
94    gpa: Option<u64>,
95    kind: GuestMemoryErrorKind,
96    #[source]
97    err: Box<dyn std::error::Error + Send + Sync>,
98}
99
100impl std::fmt::Display for GuestMemoryErrorInner {
101    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
102        write!(
103            f,
104            "guest memory '{debug_name}': {op} error: failed to access ",
105            debug_name = self.debug_name,
106            op = self.op
107        )?;
108        if let Some(range) = &self.range {
109            write!(f, "{:#x}-{:#x}", range.start, range.end)?;
110        } else {
111            f.write_str("memory")?;
112        }
113        // Include the precise GPA if provided and different from the start of
114        // the range.
115        if let Some(gpa) = self.gpa {
116            if self.range.as_ref().is_none_or(|range| range.start != gpa) {
117                write!(f, " at {:#x}", gpa)?;
118            }
119        }
120        Ok(())
121    }
122}
123
124/// A memory access error returned by a [`GuestMemoryAccess`] trait method.
125#[derive(Debug)]
126pub struct GuestMemoryBackingError {
127    gpa: u64,
128    kind: GuestMemoryErrorKind,
129    err: Box<dyn std::error::Error + Send + Sync>,
130}
131
132/// The kind of memory access error.
133#[derive(Debug, Copy, Clone, PartialEq, Eq)]
134#[non_exhaustive]
135pub enum GuestMemoryErrorKind {
136    /// An error that does not fit any other category.
137    Other,
138    /// The address is outside the valid range of the memory.
139    OutOfRange,
140    /// The memory has been protected by a higher virtual trust level.
141    VtlProtected,
142    /// The memory is shared but was accessed via a private address.
143    NotPrivate,
144    /// The memory is private but was accessed via a shared address.
145    NotShared,
146}
147
148/// An error returned by a page fault handler in [`GuestMemoryAccess::page_fault`].
149pub struct PageFaultError {
150    kind: GuestMemoryErrorKind,
151    err: Box<dyn std::error::Error + Send + Sync>,
152}
153
154impl PageFaultError {
155    /// Returns a new page fault error.
156    pub fn new(
157        kind: GuestMemoryErrorKind,
158        err: impl Into<Box<dyn std::error::Error + Send + Sync>>,
159    ) -> Self {
160        Self {
161            kind,
162            err: err.into(),
163        }
164    }
165
166    /// Returns a page fault error without an explicit kind.
167    pub fn other(err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
168        Self::new(GuestMemoryErrorKind::Other, err)
169    }
170}
171
172/// Used to avoid needing an `Option` for [`GuestMemoryBackingError::gpa`], to
173/// save size in hot paths.
174const INVALID_ERROR_GPA: u64 = !0;
175
176impl GuestMemoryBackingError {
177    /// Returns a new error for a memory access failure at address `gpa`.
178    pub fn new(
179        kind: GuestMemoryErrorKind,
180        gpa: u64,
181        err: impl Into<Box<dyn std::error::Error + Send + Sync>>,
182    ) -> Self {
183        // `gpa` might incorrectly be INVALID_ERROR_GPA; this is harmless (just
184        // affecting the error message), so don't assert on it in case this is
185        // an untrusted value in some path.
186        Self {
187            kind,
188            gpa,
189            err: err.into(),
190        }
191    }
192
193    /// Returns a new error without an explicit kind.
194    pub fn other(gpa: u64, err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
195        Self::new(GuestMemoryErrorKind::Other, gpa, err)
196    }
197
198    fn gpn(err: InvalidGpn) -> Self {
199        Self {
200            kind: GuestMemoryErrorKind::OutOfRange,
201            gpa: INVALID_ERROR_GPA,
202            err: err.into(),
203        }
204    }
205}
206
207#[derive(Debug, Error)]
208#[error("no memory at address")]
209struct OutOfRange;
210
211#[derive(Debug, Error)]
212#[error("memory not lockable")]
213struct NotLockable;
214
215#[derive(Debug, Error)]
216#[error("no fallback for this operation")]
217struct NoFallback;
218
219#[derive(Debug, Error)]
220#[error("the specified page is not mapped")]
221struct NotMapped;
222
223#[derive(Debug, Error)]
224#[error("page inaccessible in bitmap")]
225struct BitmapFailure;
226
227/// A trait for a guest memory backing that is fully available via a virtual
228/// address mapping, as opposed to the fallback functions such as
229/// [`GuestMemoryAccess::read_fallback`].
230///
231/// By implementing this trait, a type guarantees that its
232/// [`GuestMemoryAccess::mapping`] will return `Some(_)` and that all of its
233/// memory can be accessed through that mapping, without needing to call the
234/// fallback functions.
235pub trait LinearGuestMemory: GuestMemoryAccess {}
236
237// SAFETY: the allocation will stay valid for the lifetime of the object.
238unsafe impl GuestMemoryAccess for sparse_mmap::alloc::SharedMem {
239    fn mapping(&self) -> Option<NonNull<u8>> {
240        NonNull::new(self.as_ptr().cast_mut().cast())
241    }
242
243    fn max_address(&self) -> u64 {
244        self.len() as u64
245    }
246}
247
248impl LinearGuestMemory for sparse_mmap::alloc::SharedMem {}
249
250/// A page-aligned heap allocation for use with [`GuestMemory`].
251pub struct AlignedHeapMemory {
252    pages: Box<[AlignedPage]>,
253}
254
255impl Debug for AlignedHeapMemory {
256    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
257        f.debug_struct("AlignedHeapMemory")
258            .field("len", &self.len())
259            .finish()
260    }
261}
262
263#[repr(C, align(4096))]
264struct AlignedPage([AtomicU8; PAGE_SIZE]);
265
266impl AlignedHeapMemory {
267    /// Allocates a new memory of `size` bytes, rounded up to a page size.
268    pub fn new(size: usize) -> Self {
269        #[expect(clippy::declare_interior_mutable_const)] // <https://github.com/rust-lang/rust-clippy/issues/7665>
270        const ZERO: AtomicU8 = AtomicU8::new(0);
271        #[expect(clippy::declare_interior_mutable_const)]
272        const ZERO_PAGE: AlignedPage = AlignedPage([ZERO; PAGE_SIZE]);
273        let mut pages = Vec::new();
274        pages.resize_with(size.div_ceil(PAGE_SIZE), || ZERO_PAGE);
275        Self {
276            pages: pages.into(),
277        }
278    }
279
280    /// Returns the length of the memory in bytes.
281    pub fn len(&self) -> usize {
282        self.pages.len() * PAGE_SIZE
283    }
284
285    /// Returns an immutable slice of bytes.
286    ///
287    /// This must take `&mut self` since the buffer is mutable via interior
288    /// mutability with just `&self`.
289    pub fn as_bytes(&mut self) -> &[u8] {
290        self.as_mut()
291    }
292
293    /// Returns a mutable slice of bytes.
294    pub fn as_mut_bytes(&mut self) -> &mut [u8] {
295        self.as_mut()
296    }
297}
298
299impl Deref for AlignedHeapMemory {
300    type Target = [AtomicU8];
301
302    fn deref(&self) -> &Self::Target {
303        // SAFETY: the buffer has the correct size and validity.
304        unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast(), self.len()) }
305    }
306}
307
308impl DerefMut for AlignedHeapMemory {
309    fn deref_mut(&mut self) -> &mut Self::Target {
310        // SAFETY: the buffer is unaliased and valid.
311        unsafe { std::slice::from_raw_parts_mut(self.pages.as_mut_ptr().cast(), self.len()) }
312    }
313}
314
315impl AsRef<[AtomicU8]> for AlignedHeapMemory {
316    fn as_ref(&self) -> &[AtomicU8] {
317        self
318    }
319}
320
321impl AsMut<[AtomicU8]> for AlignedHeapMemory {
322    fn as_mut(&mut self) -> &mut [AtomicU8] {
323        self
324    }
325}
326
327impl AsMut<[u8]> for AlignedHeapMemory {
328    fn as_mut(&mut self) -> &mut [u8] {
329        // FUTURE: use AtomicU8::get_mut_slice once stabilized.
330        // SAFETY: the buffer is unaliased, so it is fine to cast away the atomicness of the
331        // slice.
332        unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr().cast(), self.len()) }
333    }
334}
335
336// SAFETY: the allocation remains alive and valid for the lifetime of the
337// object.
338unsafe impl GuestMemoryAccess for AlignedHeapMemory {
339    fn mapping(&self) -> Option<NonNull<u8>> {
340        NonNull::new(self.pages.as_ptr().cast_mut().cast())
341    }
342
343    fn max_address(&self) -> u64 {
344        (self.pages.len() * PAGE_SIZE) as u64
345    }
346}
347
348impl LinearGuestMemory for AlignedHeapMemory {}
349
350/// A trait for a guest memory backing.
351///
352/// Guest memory may be backed by a virtual memory mapping, in which case this
353/// trait can provide the VA and length of that mapping. Alternatively, it may
354/// be backed by some other means, in which case this trait can provide fallback
355/// methods for reading and writing memory.
356///
357/// Memory access should first be attempted via the virtual address mapping. If
358/// this fails or is not present, the caller should fall back to `read_fallback`
359/// or `write_fallback`. This allows an implementation to have a fast path using
360/// the mapping, and a slow path using the fallback functions.
361///
362/// # Safety
363///
364/// The implementor must follow the contract for each method.
365pub unsafe trait GuestMemoryAccess: 'static + Send + Sync {
366    /// Returns a stable VA mapping for guest memory.
367    ///
368    /// The size of the mapping is the same as `max_address`.
369    ///
370    /// The VA is guaranteed to remain reserved, but individual ranges may be
371    /// uncommitted.
372    fn mapping(&self) -> Option<NonNull<u8>>;
373
374    /// The maximum address that can be passed to the `*_fallback` methods, as
375    /// well as the maximum offset into the VA range described by `mapping`.
376    fn max_address(&self) -> u64;
377
378    /// The bitmaps to check for validity, one bit per page. If a bit is set,
379    /// then the page is valid to access via the mapping; if it is clear, then
380    /// the page will not be accessed.
381    ///
382    /// The bitmaps must be at least `ceil(bitmap_start + max_address() /
383    /// PAGE_SIZE)` bits long, and they must be valid for atomic read access for
384    /// the lifetime of this object from any thread.
385    ///
386    /// The bitmaps are only checked if there is a mapping. If the bitmap check
387    /// fails, then the associated `*_fallback` routine is called to handle the
388    /// error.
389    ///
390    /// Bitmap checks are performed under the [`rcu()`] RCU domain, with relaxed
391    /// accesses. After a thread updates the bitmap to be more restrictive, it
392    /// must call [`minircu::global().synchronize()`] to ensure that all threads
393    /// see the update before taking any action that depends on the bitmap
394    /// update being visible.
395    #[cfg(feature = "bitmap")]
396    fn access_bitmap(&self) -> Option<BitmapInfo> {
397        None
398    }
399
400    // Returns an accessor for a subrange, or `None` to use the default
401    // implementation.
402    fn subrange(
403        &self,
404        offset: u64,
405        len: u64,
406        allow_preemptive_locking: bool,
407    ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
408        let _ = (offset, len, allow_preemptive_locking);
409        Ok(None)
410    }
411
412    /// Called when access to memory via the mapped range fails, either due to a
413    /// bitmap failure or due to a failure when accessing the virtual address.
414    ///
415    /// `address` is the address where the access failed. `len` is the remainder
416    /// of the access; it is not necessarily the case that all `len` bytes are
417    /// inaccessible in the bitmap or mapping.
418    ///
419    /// Returns whether the faulting operation should be retried, failed, or that
420    /// one of the fallback operations (e.g. `read_fallback`) should be called.
421    fn page_fault(
422        &self,
423        address: u64,
424        len: usize,
425        write: bool,
426        bitmap_failure: bool,
427    ) -> PageFaultAction {
428        let _ = (address, len, write);
429        let err = if bitmap_failure {
430            PageFaultError::other(BitmapFailure)
431        } else {
432            PageFaultError::other(NotMapped)
433        };
434        PageFaultAction::Fail(err)
435    }
436
437    /// Fallback called if a read fails via direct access to `mapped_range`.
438    ///
439    /// This is only called if `mapping()` returns `None` or if `page_fault()`
440    /// returns `PageFaultAction::Fallback`.
441    ///
442    /// Implementors must ensure that `dest[..len]` is fully initialized on
443    /// successful return.
444    ///
445    /// # Safety
446    /// The caller must ensure that `dest[..len]` is valid for write. Note,
447    /// however, that `dest` might be aliased by other threads, the guest, or
448    /// the kernel.
449    unsafe fn read_fallback(
450        &self,
451        addr: u64,
452        dest: *mut u8,
453        len: usize,
454    ) -> Result<(), GuestMemoryBackingError> {
455        let _ = (dest, len);
456        Err(GuestMemoryBackingError::other(addr, NoFallback))
457    }
458
459    /// Fallback called if a write fails via direct access to `mapped_range`.
460    ///
461    /// This is only called if `mapping()` returns `None` or if `page_fault()`
462    /// returns `PageFaultAction::Fallback`.
463    ///
464    /// # Safety
465    /// The caller must ensure that `src[..len]` is valid for read. Note,
466    /// however, that `src` might be aliased by other threads, the guest, or
467    /// the kernel.
468    unsafe fn write_fallback(
469        &self,
470        addr: u64,
471        src: *const u8,
472        len: usize,
473    ) -> Result<(), GuestMemoryBackingError> {
474        let _ = (src, len);
475        Err(GuestMemoryBackingError::other(addr, NoFallback))
476    }
477
478    /// Fallback called if a fill fails via direct access to `mapped_range`.
479    ///
480    /// This is only called if `mapping()` returns `None` or if `page_fault()`
481    /// returns `PageFaultAction::Fallback`.
482    fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
483        let _ = (val, len);
484        Err(GuestMemoryBackingError::other(addr, NoFallback))
485    }
486
487    /// Fallback called if a compare exchange fails via direct access to `mapped_range`.
488    ///
489    /// On compare failure, returns `Ok(false)` and updates `current`.
490    ///
491    /// This is only called if `mapping()` returns `None` or if `page_fault()`
492    /// returns `PageFaultAction::Fallback`.
493    fn compare_exchange_fallback(
494        &self,
495        addr: u64,
496        current: &mut [u8],
497        new: &[u8],
498    ) -> Result<bool, GuestMemoryBackingError> {
499        let _ = (current, new);
500        Err(GuestMemoryBackingError::other(addr, NoFallback))
501    }
502
503    /// Prepares a guest page for having its virtual address exposed as part of
504    /// a lock call.
505    ///
506    /// This is useful to ensure that the address is mapped in a way that it can
507    /// be passed to the kernel for DMA.
508    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
509        let _ = (address, len);
510        Ok(())
511    }
512
513    /// Returns the base IO virtual address for the mapping.
514    ///
515    /// This is the base address that should be used for DMA from a user-mode
516    /// device driver whose device is not otherwise configured to go through an
517    /// IOMMU.
518    fn base_iova(&self) -> Option<u64> {
519        None
520    }
521}
522
523trait DynGuestMemoryAccess: 'static + Send + Sync + Any {
524    fn subrange(
525        &self,
526        offset: u64,
527        len: u64,
528        allow_preemptive_locking: bool,
529    ) -> Result<Option<GuestMemory>, GuestMemoryBackingError>;
530
531    fn page_fault(
532        &self,
533        address: u64,
534        len: usize,
535        write: bool,
536        bitmap_failure: bool,
537    ) -> PageFaultAction;
538
539    /// # Safety
540    /// See [`GuestMemoryAccess::read_fallback`].
541    unsafe fn read_fallback(
542        &self,
543        addr: u64,
544        dest: *mut u8,
545        len: usize,
546    ) -> Result<(), GuestMemoryBackingError>;
547
548    /// # Safety
549    /// See [`GuestMemoryAccess::write_fallback`].
550    unsafe fn write_fallback(
551        &self,
552        addr: u64,
553        src: *const u8,
554        len: usize,
555    ) -> Result<(), GuestMemoryBackingError>;
556
557    fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError>;
558
559    fn compare_exchange_fallback(
560        &self,
561        addr: u64,
562        current: &mut [u8],
563        new: &[u8],
564    ) -> Result<bool, GuestMemoryBackingError>;
565
566    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError>;
567}
568
569impl<T: GuestMemoryAccess> DynGuestMemoryAccess for T {
570    fn subrange(
571        &self,
572        offset: u64,
573        len: u64,
574        allow_preemptive_locking: bool,
575    ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
576        self.subrange(offset, len, allow_preemptive_locking)
577    }
578
579    fn page_fault(
580        &self,
581        address: u64,
582        len: usize,
583        write: bool,
584        bitmap_failure: bool,
585    ) -> PageFaultAction {
586        self.page_fault(address, len, write, bitmap_failure)
587    }
588
589    unsafe fn read_fallback(
590        &self,
591        addr: u64,
592        dest: *mut u8,
593        len: usize,
594    ) -> Result<(), GuestMemoryBackingError> {
595        // SAFETY: guaranteed by caller.
596        unsafe { self.read_fallback(addr, dest, len) }
597    }
598
599    unsafe fn write_fallback(
600        &self,
601        addr: u64,
602        src: *const u8,
603        len: usize,
604    ) -> Result<(), GuestMemoryBackingError> {
605        // SAFETY: guaranteed by caller.
606        unsafe { self.write_fallback(addr, src, len) }
607    }
608
609    fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
610        self.fill_fallback(addr, val, len)
611    }
612
613    fn compare_exchange_fallback(
614        &self,
615        addr: u64,
616        current: &mut [u8],
617        new: &[u8],
618    ) -> Result<bool, GuestMemoryBackingError> {
619        self.compare_exchange_fallback(addr, current, new)
620    }
621
622    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
623        self.expose_va(address, len)
624    }
625}
626
627/// The action to take after [`GuestMemoryAccess::page_fault`] returns to
628/// continue the operation.
629pub enum PageFaultAction {
630    /// Fail the operation.
631    Fail(PageFaultError),
632    /// Retry the operation.
633    Retry,
634    /// Use the fallback method to access the memory.
635    Fallback,
636}
637
638/// Returned by [`GuestMemoryAccess::access_bitmap`].
639#[cfg(feature = "bitmap")]
640pub struct BitmapInfo {
641    /// A pointer to the bitmap for read access.
642    pub read_bitmap: NonNull<u8>,
643    /// A pointer to the bitmap for write access.
644    pub write_bitmap: NonNull<u8>,
645    /// The bit offset of the beginning of the bitmap.
646    ///
647    /// Typically this is zero, but it is needed to support subranges that are
648    /// not 8-page multiples.
649    pub bit_offset: u8,
650}
651
652// SAFETY: passing through guarantees from `T`.
653unsafe impl<T: GuestMemoryAccess> GuestMemoryAccess for Arc<T> {
654    fn mapping(&self) -> Option<NonNull<u8>> {
655        self.as_ref().mapping()
656    }
657
658    fn max_address(&self) -> u64 {
659        self.as_ref().max_address()
660    }
661
662    #[cfg(feature = "bitmap")]
663    fn access_bitmap(&self) -> Option<BitmapInfo> {
664        self.as_ref().access_bitmap()
665    }
666
667    fn subrange(
668        &self,
669        offset: u64,
670        len: u64,
671        allow_preemptive_locking: bool,
672    ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
673        self.as_ref()
674            .subrange(offset, len, allow_preemptive_locking)
675    }
676
677    fn page_fault(
678        &self,
679        addr: u64,
680        len: usize,
681        write: bool,
682        bitmap_failure: bool,
683    ) -> PageFaultAction {
684        self.as_ref().page_fault(addr, len, write, bitmap_failure)
685    }
686
687    unsafe fn read_fallback(
688        &self,
689        addr: u64,
690        dest: *mut u8,
691        len: usize,
692    ) -> Result<(), GuestMemoryBackingError> {
693        // SAFETY: passing through guarantees from caller.
694        unsafe { self.as_ref().read_fallback(addr, dest, len) }
695    }
696
697    unsafe fn write_fallback(
698        &self,
699        addr: u64,
700        src: *const u8,
701        len: usize,
702    ) -> Result<(), GuestMemoryBackingError> {
703        // SAFETY: passing through guarantees from caller.
704        unsafe { self.as_ref().write_fallback(addr, src, len) }
705    }
706
707    fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
708        self.as_ref().fill_fallback(addr, val, len)
709    }
710
711    fn compare_exchange_fallback(
712        &self,
713        addr: u64,
714        current: &mut [u8],
715        new: &[u8],
716    ) -> Result<bool, GuestMemoryBackingError> {
717        self.as_ref().compare_exchange_fallback(addr, current, new)
718    }
719
720    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
721        self.as_ref().expose_va(address, len)
722    }
723
724    fn base_iova(&self) -> Option<u64> {
725        self.as_ref().base_iova()
726    }
727}
728
729// SAFETY: the allocation will stay valid for the lifetime of the object.
730unsafe impl GuestMemoryAccess for sparse_mmap::SparseMapping {
731    fn mapping(&self) -> Option<NonNull<u8>> {
732        NonNull::new(self.as_ptr().cast())
733    }
734
735    fn max_address(&self) -> u64 {
736        self.len() as u64
737    }
738}
739
740/// Default guest memory range type, enforcing access boundaries.
741struct GuestMemoryAccessRange {
742    base: Arc<GuestMemoryInner>,
743    offset: u64,
744    len: u64,
745    region: usize,
746}
747
748impl GuestMemoryAccessRange {
749    fn adjust_range(&self, address: u64, len: u64) -> Result<u64, GuestMemoryBackingError> {
750        if address <= self.len && len <= self.len - address {
751            Ok(self.offset + address)
752        } else {
753            Err(GuestMemoryBackingError::new(
754                GuestMemoryErrorKind::OutOfRange,
755                address,
756                OutOfRange,
757            ))
758        }
759    }
760}
761
762// SAFETY: `mapping()` is guaranteed to be valid for the lifetime of the object.
763unsafe impl GuestMemoryAccess for GuestMemoryAccessRange {
764    fn mapping(&self) -> Option<NonNull<u8>> {
765        let region = &self.base.regions[self.region];
766        region.mapping.and_then(|mapping| {
767            let offset = self.offset & self.base.region_def.region_mask;
768            // This is guaranteed by construction.
769            assert!(region.len >= offset + self.len);
770            // SAFETY: this mapping is guaranteed to be within range by
771            // construction (and validated again via the assertion above).
772            NonNull::new(unsafe { mapping.0.as_ptr().add(offset as usize) })
773        })
774    }
775
776    fn max_address(&self) -> u64 {
777        self.len
778    }
779
780    #[cfg(feature = "bitmap")]
781    fn access_bitmap(&self) -> Option<BitmapInfo> {
782        let region = &self.base.regions[self.region];
783        region.bitmaps.map(|bitmaps| {
784            let offset = self.offset & self.base.region_def.region_mask;
785            let bit_offset = region.bitmap_start as u64 + offset / PAGE_SIZE64;
786            let [read_bitmap, write_bitmap] = bitmaps.map(|SendPtrU8(ptr)| {
787                // SAFETY: the bitmap is guaranteed to be big enough for the region
788                // by construction.
789                NonNull::new(unsafe { ptr.as_ptr().add((bit_offset / 8) as usize) }).unwrap()
790            });
791            let bitmap_start = (bit_offset % 8) as u8;
792            BitmapInfo {
793                read_bitmap,
794                write_bitmap,
795                bit_offset: bitmap_start,
796            }
797        })
798    }
799
800    fn subrange(
801        &self,
802        offset: u64,
803        len: u64,
804        _allow_preemptive_locking: bool,
805    ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
806        let address = self.adjust_range(offset, len)?;
807        Ok(Some(GuestMemory::new(
808            self.base.debug_name.clone(),
809            GuestMemoryAccessRange {
810                base: self.base.clone(),
811                offset: address,
812                len,
813                region: self.region,
814            },
815        )))
816    }
817
818    fn page_fault(
819        &self,
820        address: u64,
821        len: usize,
822        write: bool,
823        bitmap_failure: bool,
824    ) -> PageFaultAction {
825        let address = self
826            .adjust_range(address, len as u64)
827            .expect("the caller should have validated the range was in the mapping");
828
829        self.base
830            .imp
831            .page_fault(address, len, write, bitmap_failure)
832    }
833
834    unsafe fn write_fallback(
835        &self,
836        address: u64,
837        src: *const u8,
838        len: usize,
839    ) -> Result<(), GuestMemoryBackingError> {
840        let address = self.adjust_range(address, len as u64)?;
841        // SAFETY: guaranteed by caller.
842        unsafe { self.base.imp.write_fallback(address, src, len) }
843    }
844
845    fn fill_fallback(
846        &self,
847        address: u64,
848        val: u8,
849        len: usize,
850    ) -> Result<(), GuestMemoryBackingError> {
851        let address = self.adjust_range(address, len as u64)?;
852        self.base.imp.fill_fallback(address, val, len)
853    }
854
855    fn compare_exchange_fallback(
856        &self,
857        addr: u64,
858        current: &mut [u8],
859        new: &[u8],
860    ) -> Result<bool, GuestMemoryBackingError> {
861        let address = self.adjust_range(addr, new.len() as u64)?;
862        self.base
863            .imp
864            .compare_exchange_fallback(address, current, new)
865    }
866
867    unsafe fn read_fallback(
868        &self,
869        address: u64,
870        dest: *mut u8,
871        len: usize,
872    ) -> Result<(), GuestMemoryBackingError> {
873        let address = self.adjust_range(address, len as u64)?;
874        // SAFETY: guaranteed by caller.
875        unsafe { self.base.imp.read_fallback(address, dest, len) }
876    }
877
878    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
879        let address = self.adjust_range(address, len)?;
880        self.base.imp.expose_va(address, len)
881    }
882
883    fn base_iova(&self) -> Option<u64> {
884        let region = &self.base.regions[self.region];
885        Some(region.base_iova? + (self.offset & self.base.region_def.region_mask))
886    }
887}
888
889/// Create a default guest memory subrange that verifies range limits and calls
890/// back into the base implementation.
891fn create_memory_subrange(
892    base: Arc<GuestMemoryInner>,
893    offset: u64,
894    len: u64,
895    _allow_preemptive_locking: bool,
896) -> Result<GuestMemory, GuestMemoryBackingError> {
897    let (_, _, region) = base.region(offset, len)?;
898    Ok(GuestMemory::new(
899        base.debug_name.clone(),
900        GuestMemoryAccessRange {
901            base,
902            offset,
903            len,
904            region,
905        },
906    ))
907}
908
909struct MultiRegionGuestMemoryAccess<T> {
910    imps: Vec<Option<T>>,
911    region_def: RegionDefinition,
912}
913
914impl<T> MultiRegionGuestMemoryAccess<T> {
915    fn region(&self, gpa: u64, len: u64) -> Result<(&T, u64), GuestMemoryBackingError> {
916        let (i, offset) = self.region_def.region(gpa, len)?;
917        let imp = self.imps[i].as_ref().ok_or(GuestMemoryBackingError::new(
918            GuestMemoryErrorKind::OutOfRange,
919            gpa,
920            OutOfRange,
921        ))?;
922        Ok((imp, offset))
923    }
924}
925
926// SAFETY: `mapping()` is unreachable and panics if called.
927impl<T: GuestMemoryAccess> DynGuestMemoryAccess for MultiRegionGuestMemoryAccess<T> {
928    fn subrange(
929        &self,
930        offset: u64,
931        len: u64,
932        allow_preemptive_locking: bool,
933    ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
934        let (region, offset_in_region) = self.region(offset, len)?;
935        region.subrange(offset_in_region, len, allow_preemptive_locking)
936    }
937
938    unsafe fn read_fallback(
939        &self,
940        addr: u64,
941        dest: *mut u8,
942        len: usize,
943    ) -> Result<(), GuestMemoryBackingError> {
944        let (region, offset_in_region) = self.region(addr, len as u64)?;
945        // SAFETY: guaranteed by caller.
946        unsafe { region.read_fallback(offset_in_region, dest, len) }
947    }
948
949    unsafe fn write_fallback(
950        &self,
951        addr: u64,
952        src: *const u8,
953        len: usize,
954    ) -> Result<(), GuestMemoryBackingError> {
955        let (region, offset_in_region) = self.region(addr, len as u64)?;
956        // SAFETY: guaranteed by caller.
957        unsafe { region.write_fallback(offset_in_region, src, len) }
958    }
959
960    fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
961        let (region, offset_in_region) = self.region(addr, len as u64)?;
962        region.fill_fallback(offset_in_region, val, len)
963    }
964
965    fn compare_exchange_fallback(
966        &self,
967        addr: u64,
968        current: &mut [u8],
969        new: &[u8],
970    ) -> Result<bool, GuestMemoryBackingError> {
971        let (region, offset_in_region) = self.region(addr, new.len() as u64)?;
972        region.compare_exchange_fallback(offset_in_region, current, new)
973    }
974
975    fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
976        let (region, offset_in_region) = self.region(address, len)?;
977        region.expose_va(offset_in_region, len)
978    }
979
980    fn page_fault(
981        &self,
982        address: u64,
983        len: usize,
984        write: bool,
985        bitmap_failure: bool,
986    ) -> PageFaultAction {
987        match self.region(address, len as u64) {
988            Ok((region, offset_in_region)) => {
989                region.page_fault(offset_in_region, len, write, bitmap_failure)
990            }
991            Err(err) => PageFaultAction::Fail(PageFaultError {
992                kind: err.kind,
993                err: err.err,
994            }),
995        }
996    }
997}
998
999/// A wrapper around a `GuestMemoryAccess` that provides methods for safely
1000/// reading and writing guest memory.
1001// NOTE: this type uses `inspect(skip)`, as it end up being a dependency of
1002// _many_ objects, and littering the inspect graph with references to the same
1003// node would be silly.
1004#[derive(Debug, Clone, Inspect)]
1005#[inspect(skip)]
1006pub struct GuestMemory {
1007    inner: Arc<GuestMemoryInner>,
1008}
1009
1010struct GuestMemoryInner<T: ?Sized = dyn DynGuestMemoryAccess> {
1011    region_def: RegionDefinition,
1012    regions: Vec<MemoryRegion>,
1013    debug_name: Arc<str>,
1014    allocated: bool,
1015    imp: T,
1016}
1017
1018impl<T: ?Sized> Debug for GuestMemoryInner<T> {
1019    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1020        f.debug_struct("GuestMemoryInner")
1021            .field("region_def", &self.region_def)
1022            .field("regions", &self.regions)
1023            .finish()
1024    }
1025}
1026
1027#[derive(Debug, Copy, Clone, Default)]
1028struct MemoryRegion {
1029    mapping: Option<SendPtrU8>,
1030    #[cfg(feature = "bitmap")]
1031    bitmaps: Option<[SendPtrU8; 2]>,
1032    #[cfg(feature = "bitmap")]
1033    bitmap_start: u8,
1034    len: u64,
1035    base_iova: Option<u64>,
1036}
1037
1038/// The access type. The values correspond to bitmap indexes.
1039#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1040enum AccessType {
1041    Read = 0,
1042    Write = 1,
1043}
1044
1045/// `NonNull<u8>` that implements `Send+Sync`.
1046///
1047/// Rust makes pointers `!Send+!Sync` by default to force you to think about the
1048/// ownership model and thread safety of types using pointers--there is nothing
1049/// safety-related about `Send`/`Sync` on pointers by themselves since all such
1050/// accesses to pointers require `unsafe` blocks anyway.
1051///
1052/// However, in practice, this leads to spurious manual `Send+Sync` impls on
1053/// types containing pointers, especially those containing generics. Define a
1054/// wrapping pointer type that implements `Send+Sync` so that the normal auto
1055/// trait rules apply to types containing these pointers.
1056#[derive(Debug, Copy, Clone)]
1057struct SendPtrU8(NonNull<u8>);
1058
1059// SAFETY: see type description.
1060unsafe impl Send for SendPtrU8 {}
1061// SAFETY: see type description.
1062unsafe impl Sync for SendPtrU8 {}
1063
1064impl MemoryRegion {
1065    fn new(imp: &impl GuestMemoryAccess) -> Self {
1066        #[cfg(feature = "bitmap")]
1067        let (bitmaps, bitmap_start) = {
1068            let bitmap_info = imp.access_bitmap();
1069            let bitmaps = bitmap_info
1070                .as_ref()
1071                .map(|bm| [SendPtrU8(bm.read_bitmap), SendPtrU8(bm.write_bitmap)]);
1072            let bitmap_start = bitmap_info.map_or(0, |bi| bi.bit_offset);
1073            (bitmaps, bitmap_start)
1074        };
1075        Self {
1076            mapping: imp.mapping().map(SendPtrU8),
1077            #[cfg(feature = "bitmap")]
1078            bitmaps,
1079            #[cfg(feature = "bitmap")]
1080            bitmap_start,
1081            len: imp.max_address(),
1082            base_iova: imp.base_iova(),
1083        }
1084    }
1085
1086    /// # Safety
1087    ///
1088    /// The caller must ensure that `offset + len` fits in this region, and that
1089    /// the object bitmap is currently valid for atomic read access from this
1090    /// thread.
1091    unsafe fn check_access(
1092        &self,
1093        access_type: AccessType,
1094        offset: u64,
1095        len: u64,
1096    ) -> Result<(), u64> {
1097        debug_assert!(self.len >= offset + len);
1098        #[cfg(not(feature = "bitmap"))]
1099        let _ = access_type;
1100
1101        #[cfg(feature = "bitmap")]
1102        if let Some(bitmaps) = &self.bitmaps {
1103            let SendPtrU8(bitmap) = bitmaps[access_type as usize];
1104            let start = offset / PAGE_SIZE64;
1105            let end = (offset + len - 1) / PAGE_SIZE64;
1106            // FUTURE: consider optimizing this separately for multi-page and
1107            // single-page accesses.
1108            for gpn in start..=end {
1109                let bit_offset = self.bitmap_start as u64 + gpn;
1110                // SAFETY: the caller ensures that the bitmap is big enough and
1111                // valid for atomic read access from this thread.
1112                let bit = unsafe {
1113                    (*bitmap
1114                        .as_ptr()
1115                        .cast_const()
1116                        .cast::<AtomicU8>()
1117                        .add(bit_offset as usize / 8))
1118                    .load(std::sync::atomic::Ordering::Relaxed)
1119                        & (1 << (bit_offset % 8))
1120                };
1121                if bit == 0 {
1122                    return Err((gpn * PAGE_SIZE64).saturating_sub(offset));
1123                }
1124            }
1125        }
1126        Ok(())
1127    }
1128}
1129
1130/// The default implementation is [`GuestMemory::empty`].
1131impl Default for GuestMemory {
1132    fn default() -> Self {
1133        Self::empty()
1134    }
1135}
1136
1137struct Empty;
1138
1139// SAFETY: the mapping is empty, so all requirements are trivially satisfied.
1140unsafe impl GuestMemoryAccess for Empty {
1141    fn mapping(&self) -> Option<NonNull<u8>> {
1142        None
1143    }
1144
1145    fn max_address(&self) -> u64 {
1146        0
1147    }
1148}
1149
1150#[derive(Debug, Error)]
1151pub enum MultiRegionError {
1152    #[error("region size {0:#x} is not a power of 2")]
1153    NotPowerOfTwo(u64),
1154    #[error("region size {0:#x} is smaller than a page")]
1155    RegionSizeTooSmall(u64),
1156    #[error(
1157        "too many regions ({region_count}) for region size {region_size:#x}; max is {max_region_count}"
1158    )]
1159    TooManyRegions {
1160        region_count: usize,
1161        max_region_count: usize,
1162        region_size: u64,
1163    },
1164    #[error("backing size {backing_size:#x} is too large for region size {region_size:#x}")]
1165    BackingTooLarge { backing_size: u64, region_size: u64 },
1166}
1167
1168/// The RCU domain memory accesses occur under. Updates to any memory access
1169/// bitmaps must be synchronized under this domain.
1170///
1171/// See [`GuestMemoryAccess::access_bitmap`] for more details.
1172///
1173/// This is currently the global domain, but this is reexported here to make
1174/// calling code clearer.
1175#[cfg(feature = "bitmap")]
1176pub fn rcu() -> minircu::RcuDomain {
1177    // Use the global domain unless we find a reason to do something else.
1178    minircu::global()
1179}
1180
1181impl GuestMemory {
1182    /// Returns a new instance using `imp` as the backing.
1183    ///
1184    /// `debug_name` is used to specify which guest memory is being accessed in
1185    /// error messages.
1186    pub fn new(debug_name: impl Into<Arc<str>>, imp: impl GuestMemoryAccess) -> Self {
1187        // Install signal handlers on unix if a mapping is present.
1188        //
1189        // Skip this on miri even when there is a mapping, since the mapping may
1190        // never be accessed by the code under test.
1191        if imp.mapping().is_some() && !cfg!(miri) {
1192            sparse_mmap::initialize_try_copy();
1193        }
1194        Self::new_inner(debug_name.into(), imp, false)
1195    }
1196
1197    fn new_inner(debug_name: Arc<str>, imp: impl GuestMemoryAccess, allocated: bool) -> Self {
1198        let regions = vec![MemoryRegion::new(&imp)];
1199        Self {
1200            inner: Arc::new(GuestMemoryInner {
1201                imp,
1202                debug_name,
1203                region_def: RegionDefinition {
1204                    invalid_mask: 1 << 63,
1205                    region_mask: !0 >> 1,
1206                    region_bits: 63, // right shift of 64 isn't valid, so restrict the space
1207                },
1208                regions,
1209                allocated,
1210            }),
1211        }
1212    }
1213
1214    /// Creates a new multi-region guest memory, made up of multiple mappings.
1215    /// This allows you to create a very large sparse layout (up to the limits
1216    /// of the VM's physical address space) without having to allocate an
1217    /// enormous amount of virtual address space.
1218    ///
1219    /// Each region will be `region_size` bytes and will start immediately after
1220    /// the last one. This must be a power of two, be at least a page in size,
1221    /// and cannot fill the full 64-bit address space.
1222    ///
1223    /// `imps` must be a list of [`GuestMemoryAccess`] implementations, one for
1224    /// each region. Use `None` if the corresponding region is empty.
1225    ///
1226    /// A region's mapping cannot fully fill the region. This is necessary to
1227    /// avoid callers expecting to be able to access a memory range that spans
1228    /// two regions.
1229    pub fn new_multi_region(
1230        debug_name: impl Into<Arc<str>>,
1231        region_size: u64,
1232        mut imps: Vec<Option<impl GuestMemoryAccess>>,
1233    ) -> Result<Self, MultiRegionError> {
1234        // Install signal handlers on unix.
1235        sparse_mmap::initialize_try_copy();
1236
1237        if !region_size.is_power_of_two() {
1238            return Err(MultiRegionError::NotPowerOfTwo(region_size));
1239        }
1240        if region_size < PAGE_SIZE64 {
1241            return Err(MultiRegionError::RegionSizeTooSmall(region_size));
1242        }
1243        let region_bits = region_size.trailing_zeros();
1244
1245        let max_region_count = 1 << (63 - region_bits);
1246
1247        let region_count = imps.len().next_power_of_two();
1248        if region_count > max_region_count {
1249            return Err(MultiRegionError::TooManyRegions {
1250                region_count,
1251                max_region_count,
1252                region_size,
1253            });
1254        }
1255
1256        let valid_bits = region_bits + region_count.trailing_zeros();
1257        assert!(valid_bits < 64);
1258        let invalid_mask = !0 << valid_bits;
1259
1260        let mut regions = vec![MemoryRegion::default(); region_count];
1261        for (imp, region) in imps.iter().zip(&mut regions) {
1262            let Some(imp) = imp else { continue };
1263            let backing_size = imp.max_address();
1264            if backing_size > region_size {
1265                return Err(MultiRegionError::BackingTooLarge {
1266                    backing_size,
1267                    region_size,
1268                });
1269            }
1270            *region = MemoryRegion::new(imp);
1271        }
1272
1273        let region_def = RegionDefinition {
1274            invalid_mask,
1275            region_mask: region_size - 1,
1276            region_bits,
1277        };
1278
1279        imps.resize_with(region_count, || None);
1280        let imp = MultiRegionGuestMemoryAccess { imps, region_def };
1281
1282        let inner = GuestMemoryInner {
1283            debug_name: debug_name.into(),
1284            region_def,
1285            regions,
1286            imp,
1287            allocated: false,
1288        };
1289
1290        Ok(Self {
1291            inner: Arc::new(inner),
1292        })
1293    }
1294
1295    /// Allocates a guest memory object on the heap with the given size in
1296    /// bytes.
1297    ///
1298    /// `size` will be rounded up to the page size. The backing buffer will be
1299    /// page aligned.
1300    ///
1301    /// The debug name in errors will be "heap". If you want to provide a
1302    /// different debug name, manually use `GuestMemory::new` with
1303    /// [`AlignedHeapMemory`].
1304    pub fn allocate(size: usize) -> Self {
1305        Self::new_inner("heap".into(), AlignedHeapMemory::new(size), true)
1306    }
1307
1308    /// If this memory is unaliased and was created via
1309    /// [`GuestMemory::allocate`], returns the backing buffer.
1310    ///
1311    /// Returns `Err(self)` if there are other references to this memory (via
1312    /// `clone()`).
1313    pub fn into_inner_buf(self) -> Result<AlignedHeapMemory, Self> {
1314        if !self.inner.allocated {
1315            return Err(self);
1316        }
1317        // FUTURE: consider using `Any` and `Arc::downcast` once trait upcasting is stable.
1318        // SAFETY: the inner implementation is guaranteed to be a `AlignedHeapMemory`.
1319        let inner = unsafe {
1320            Arc::<GuestMemoryInner<AlignedHeapMemory>>::from_raw(Arc::into_raw(self.inner).cast())
1321        };
1322        let inner = Arc::try_unwrap(inner).map_err(|inner| Self { inner })?;
1323        Ok(inner.imp)
1324    }
1325
1326    /// If this memory was created via [`GuestMemory::allocate`], returns a slice to
1327    /// the allocated buffer.
1328    pub fn inner_buf(&self) -> Option<&[AtomicU8]> {
1329        if !self.inner.allocated {
1330            return None;
1331        }
1332        // FUTURE: consider using `<dyn Any>::downcast` once trait upcasting is stable.
1333        // SAFETY: the inner implementation is guaranteed to be a `AlignedHeapMemory`.
1334        let inner = unsafe { &*core::ptr::from_ref(&self.inner.imp).cast::<AlignedHeapMemory>() };
1335        Some(inner)
1336    }
1337
1338    /// If this memory was created via [`GuestMemory::allocate`] and there are
1339    /// no other references to it, returns a mutable slice to the backing
1340    /// buffer.
1341    pub fn inner_buf_mut(&mut self) -> Option<&mut [u8]> {
1342        if !self.inner.allocated {
1343            return None;
1344        }
1345        let inner = Arc::get_mut(&mut self.inner)?;
1346        // FUTURE: consider using `<dyn Any>::downcast` once trait upcasting is stable.
1347        // SAFETY: the inner implementation is guaranteed to be a `AlignedHeapMemory`.
1348        let imp = unsafe { &mut *core::ptr::from_mut(&mut inner.imp).cast::<AlignedHeapMemory>() };
1349        Some(imp.as_mut())
1350    }
1351
1352    /// Returns an empty guest memory, which fails every operation.
1353    pub fn empty() -> Self {
1354        GuestMemory::new("empty", Empty)
1355    }
1356
1357    fn wrap_err(
1358        &self,
1359        gpa_len: Option<(u64, u64)>,
1360        op: GuestMemoryOperation,
1361        err: GuestMemoryBackingError,
1362    ) -> GuestMemoryError {
1363        let range = gpa_len.map(|(gpa, len)| (gpa..gpa.wrapping_add(len)));
1364        GuestMemoryError::new(&self.inner.debug_name, range, op, err)
1365    }
1366
1367    fn with_op<T>(
1368        &self,
1369        gpa_len: Option<(u64, u64)>,
1370        op: GuestMemoryOperation,
1371        f: impl FnOnce() -> Result<T, GuestMemoryBackingError>,
1372    ) -> Result<T, GuestMemoryError> {
1373        f().map_err(|err| self.wrap_err(gpa_len, op, err))
1374    }
1375
1376    /// Creates a smaller view into guest memory, constraining accesses within the new boundaries. For smaller ranges,
1377    /// some memory implementations (e.g. HDV) may choose to lock the pages into memory for faster access. Locking
1378    /// random guest memory may cause issues, so only opt in to this behavior when the range can be considered "owned"
1379    /// by the caller.
1380    pub fn subrange(
1381        &self,
1382        offset: u64,
1383        len: u64,
1384        allow_preemptive_locking: bool,
1385    ) -> Result<GuestMemory, GuestMemoryError> {
1386        self.with_op(Some((offset, len)), GuestMemoryOperation::Subrange, || {
1387            if let Some(guest_memory) =
1388                self.inner
1389                    .imp
1390                    .subrange(offset, len, allow_preemptive_locking)?
1391            {
1392                Ok(guest_memory)
1393            } else {
1394                create_memory_subrange(self.inner.clone(), offset, len, allow_preemptive_locking)
1395            }
1396        })
1397    }
1398
1399    /// Returns a subrange where pages from the subrange can be locked.
1400    pub fn lockable_subrange(
1401        &self,
1402        offset: u64,
1403        len: u64,
1404    ) -> Result<GuestMemory, GuestMemoryError> {
1405        // TODO: Enforce subrange is actually lockable.
1406        self.subrange(offset, len, true)
1407    }
1408
1409    /// Returns the mapping for all of guest memory.
1410    ///
1411    /// Returns `None` if there is more than one region or if the memory is not
1412    /// mapped.
1413    pub fn full_mapping(&self) -> Option<(*mut u8, usize)> {
1414        if let [region] = self.inner.regions.as_slice() {
1415            #[cfg(feature = "bitmap")]
1416            if region.bitmaps.is_some() {
1417                return None;
1418            }
1419            region
1420                .mapping
1421                .map(|SendPtrU8(ptr)| (ptr.as_ptr(), region.len as usize))
1422        } else {
1423            None
1424        }
1425    }
1426
1427    /// Gets the IO address for DMAing to `gpa` from a user-mode driver not
1428    /// going through an IOMMU.
1429    pub fn iova(&self, gpa: u64) -> Option<u64> {
1430        let (region, offset, _) = self.inner.region(gpa, 1).ok()?;
1431        Some(region.base_iova? + offset)
1432    }
1433
1434    /// Gets a pointer to the VA range for `gpa..gpa+len`.
1435    ///
1436    /// Returns `Ok(None)` if there is no mapping. Returns `Err(_)` if the
1437    /// memory is out of range.
1438    fn mapping_range(
1439        &self,
1440        access_type: AccessType,
1441        gpa: u64,
1442        len: usize,
1443    ) -> Result<Option<*mut u8>, GuestMemoryBackingError> {
1444        let (region, offset, _) = self.inner.region(gpa, len as u64)?;
1445        if let Some(SendPtrU8(ptr)) = region.mapping {
1446            loop {
1447                // SAFETY: offset + len is checked by `region()` to be inside the VA range.
1448                let fault_offset = unsafe {
1449                    match region.check_access(access_type, offset, len as u64) {
1450                        Ok(()) => return Ok(Some(ptr.as_ptr().add(offset as usize))),
1451                        Err(n) => n,
1452                    }
1453                };
1454
1455                // Resolve the fault and try again.
1456                match self.inner.imp.page_fault(
1457                    gpa + fault_offset,
1458                    len - fault_offset as usize,
1459                    access_type == AccessType::Write,
1460                    true,
1461                ) {
1462                    PageFaultAction::Fail(err) => {
1463                        return Err(GuestMemoryBackingError::new(
1464                            err.kind,
1465                            gpa + fault_offset,
1466                            err.err,
1467                        ));
1468                    }
1469                    PageFaultAction::Retry => {}
1470                    PageFaultAction::Fallback => break,
1471                }
1472            }
1473        }
1474        Ok(None)
1475    }
1476
1477    /// Runs `f` with a pointer to the mapped memory. If `f` fails, tries to
1478    /// resolve the fault (failing on error), then loops.
1479    ///
1480    /// If there is no mapping for the memory, or if the fault handler requests
1481    /// it, call `fallback` instead. `fallback` will not be called unless `gpa`
1482    /// and `len` are in range.
1483    fn run_on_mapping<T, P>(
1484        &self,
1485        access_type: AccessType,
1486        gpa: u64,
1487        len: usize,
1488        mut param: P,
1489        mut f: impl FnMut(&mut P, *mut u8) -> Result<T, sparse_mmap::MemoryError>,
1490        fallback: impl FnOnce(&mut P) -> Result<T, GuestMemoryBackingError>,
1491    ) -> Result<T, GuestMemoryBackingError> {
1492        let op = || {
1493            let Some(mapping) = self.mapping_range(access_type, gpa, len)? else {
1494                return fallback(&mut param);
1495            };
1496
1497            // Try until the fault fails to resolve.
1498            loop {
1499                match f(&mut param, mapping) {
1500                    Ok(t) => return Ok(t),
1501                    Err(fault) => {
1502                        match self.inner.imp.page_fault(
1503                            gpa + fault.offset() as u64,
1504                            len - fault.offset(),
1505                            access_type == AccessType::Write,
1506                            false,
1507                        ) {
1508                            PageFaultAction::Fail(err) => {
1509                                return Err(GuestMemoryBackingError::new(
1510                                    err.kind,
1511                                    gpa + fault.offset() as u64,
1512                                    err.err,
1513                                ));
1514                            }
1515                            PageFaultAction::Retry => {}
1516                            PageFaultAction::Fallback => return fallback(&mut param),
1517                        }
1518                    }
1519                }
1520            }
1521        };
1522        // If the `bitmap` feature is enabled, run the function in an RCU
1523        // critical section. This will allow callers to flush concurrent
1524        // accesses after bitmap updates.
1525        #[cfg(feature = "bitmap")]
1526        return rcu().run(op);
1527        #[cfg(not(feature = "bitmap"))]
1528        op()
1529    }
1530
1531    /// # Safety
1532    ///
1533    /// The caller must ensure that `src`..`src + len` is a valid buffer for reads.
1534    unsafe fn write_ptr(
1535        &self,
1536        gpa: u64,
1537        src: *const u8,
1538        len: usize,
1539    ) -> Result<(), GuestMemoryBackingError> {
1540        if len == 0 {
1541            return Ok(());
1542        }
1543        self.run_on_mapping(
1544            AccessType::Write,
1545            gpa,
1546            len,
1547            (),
1548            |(), dest| {
1549                // SAFETY: dest..dest+len is guaranteed to point to a reserved VA
1550                // range, and src..src+len is guaranteed by the caller to be a valid
1551                // buffer for reads.
1552                unsafe { sparse_mmap::try_copy(src, dest, len) }
1553            },
1554            |()| {
1555                // SAFETY: src..src+len is guaranteed by the caller to point to a valid
1556                // buffer for reads.
1557                unsafe { self.inner.imp.write_fallback(gpa, src, len) }
1558            },
1559        )
1560    }
1561
1562    /// Writes `src` into guest memory at address `gpa`.
1563    pub fn write_at(&self, gpa: u64, src: &[u8]) -> Result<(), GuestMemoryError> {
1564        self.with_op(
1565            Some((gpa, src.len() as u64)),
1566            GuestMemoryOperation::Write,
1567            || self.write_at_inner(gpa, src),
1568        )
1569    }
1570
1571    fn write_at_inner(&self, gpa: u64, src: &[u8]) -> Result<(), GuestMemoryBackingError> {
1572        // SAFETY: `src` is a valid buffer for reads.
1573        unsafe { self.write_ptr(gpa, src.as_ptr(), src.len()) }
1574    }
1575
1576    /// Writes `src` into guest memory at address `gpa`.
1577    pub fn write_from_atomic(&self, gpa: u64, src: &[AtomicU8]) -> Result<(), GuestMemoryError> {
1578        self.with_op(
1579            Some((gpa, src.len() as u64)),
1580            GuestMemoryOperation::Write,
1581            || {
1582                // SAFETY: `src` is a valid buffer for reads.
1583                unsafe { self.write_ptr(gpa, src.as_ptr().cast(), src.len()) }
1584            },
1585        )
1586    }
1587
1588    /// Writes `len` bytes of `val` into guest memory at address `gpa`.
1589    pub fn fill_at(&self, gpa: u64, val: u8, len: usize) -> Result<(), GuestMemoryError> {
1590        self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Fill, || {
1591            self.fill_at_inner(gpa, val, len)
1592        })
1593    }
1594
1595    fn fill_at_inner(&self, gpa: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
1596        if len == 0 {
1597            return Ok(());
1598        }
1599        self.run_on_mapping(
1600            AccessType::Write,
1601            gpa,
1602            len,
1603            (),
1604            |(), dest| {
1605                // SAFETY: dest..dest+len is guaranteed to point to a reserved VA range.
1606                unsafe { sparse_mmap::try_write_bytes(dest, val, len) }
1607            },
1608            |()| self.inner.imp.fill_fallback(gpa, val, len),
1609        )
1610    }
1611
1612    /// Reads from guest memory into `dest..dest+len`.
1613    ///
1614    /// # Safety
1615    /// The caller must ensure dest..dest+len is a valid buffer for writes.
1616    unsafe fn read_ptr(
1617        &self,
1618        gpa: u64,
1619        dest: *mut u8,
1620        len: usize,
1621    ) -> Result<(), GuestMemoryBackingError> {
1622        if len == 0 {
1623            return Ok(());
1624        }
1625        self.run_on_mapping(
1626            AccessType::Read,
1627            gpa,
1628            len,
1629            (),
1630            |(), src| {
1631                // SAFETY: src..src+len is guaranteed to point to a reserved VA
1632                // range, and dest..dest+len is guaranteed by the caller to be a
1633                // valid buffer for writes.
1634                unsafe { sparse_mmap::try_copy(src, dest, len) }
1635            },
1636            |()| {
1637                // SAFETY: dest..dest+len is guaranteed by the caller to point to a
1638                // valid buffer for writes.
1639                unsafe { self.inner.imp.read_fallback(gpa, dest, len) }
1640            },
1641        )
1642    }
1643
1644    fn read_at_inner(&self, gpa: u64, dest: &mut [u8]) -> Result<(), GuestMemoryBackingError> {
1645        // SAFETY: `dest` is a valid buffer for writes.
1646        unsafe { self.read_ptr(gpa, dest.as_mut_ptr(), dest.len()) }
1647    }
1648
1649    /// Reads from guest memory address `gpa` into `dest`.
1650    pub fn read_at(&self, gpa: u64, dest: &mut [u8]) -> Result<(), GuestMemoryError> {
1651        self.with_op(
1652            Some((gpa, dest.len() as u64)),
1653            GuestMemoryOperation::Read,
1654            || self.read_at_inner(gpa, dest),
1655        )
1656    }
1657
1658    /// Reads from guest memory address `gpa` into `dest`.
1659    pub fn read_to_atomic(&self, gpa: u64, dest: &[AtomicU8]) -> Result<(), GuestMemoryError> {
1660        self.with_op(
1661            Some((gpa, dest.len() as u64)),
1662            GuestMemoryOperation::Read,
1663            // SAFETY: `dest` is a valid buffer for writes.
1664            || unsafe { self.read_ptr(gpa, dest.as_ptr() as *mut u8, dest.len()) },
1665        )
1666    }
1667
1668    /// Writes an object to guest memory at address `gpa`.
1669    ///
1670    /// If the object is 1, 2, 4, or 8 bytes and the address is naturally
1671    /// aligned, then the write will be performed atomically. Here, this means
1672    /// that concurrent readers (via `read_plain`) cannot observe a torn write
1673    /// but will observe either the old or new value.
1674    ///
1675    /// The memory ordering of the write is unspecified.
1676    ///
1677    /// FUTURE: once we are on Rust 1.79, add a method specifically for atomic
1678    /// accesses that const asserts that the size is appropriate.
1679    pub fn write_plain<T: IntoBytes + Immutable + KnownLayout>(
1680        &self,
1681        gpa: u64,
1682        b: &T,
1683    ) -> Result<(), GuestMemoryError> {
1684        // Note that this is const, so the match below will compile out.
1685        let len = size_of::<T>();
1686        self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Write, || {
1687            self.run_on_mapping(
1688                AccessType::Write,
1689                gpa,
1690                len,
1691                (),
1692                |(), dest| {
1693                    match len {
1694                        1 | 2 | 4 | 8 => {
1695                            // SAFETY: dest..dest+len is guaranteed to point to
1696                            // a reserved VA range.
1697                            unsafe { sparse_mmap::try_write_volatile(dest.cast(), b) }
1698                        }
1699                        _ => {
1700                            // SAFETY: dest..dest+len is guaranteed to point to
1701                            // a reserved VA range.
1702                            unsafe { sparse_mmap::try_copy(b.as_bytes().as_ptr(), dest, len) }
1703                        }
1704                    }
1705                },
1706                |()| {
1707                    // SAFETY: b is a valid buffer for reads.
1708                    unsafe {
1709                        self.inner
1710                            .imp
1711                            .write_fallback(gpa, b.as_bytes().as_ptr(), len)
1712                    }
1713                },
1714            )
1715        })
1716    }
1717
1718    /// Attempts a sequentially-consistent compare exchange of the value at `gpa`.
1719    pub fn compare_exchange<T: IntoBytes + FromBytes + Immutable + KnownLayout + Copy>(
1720        &self,
1721        gpa: u64,
1722        current: T,
1723        new: T,
1724    ) -> Result<Result<T, T>, GuestMemoryError> {
1725        let len = size_of_val(&new);
1726        self.with_op(
1727            Some((gpa, len as u64)),
1728            GuestMemoryOperation::CompareExchange,
1729            || {
1730                // Assume that if write is allowed, then read is allowed.
1731                self.run_on_mapping(
1732                    AccessType::Write,
1733                    gpa,
1734                    len,
1735                    (),
1736                    |(), dest| {
1737                        // SAFETY: dest..dest+len is guaranteed by the caller to be a valid
1738                        // buffer for writes.
1739                        unsafe { sparse_mmap::try_compare_exchange(dest.cast(), current, new) }
1740                    },
1741                    |()| {
1742                        let mut current = current;
1743                        let success = self.inner.imp.compare_exchange_fallback(
1744                            gpa,
1745                            current.as_mut_bytes(),
1746                            new.as_bytes(),
1747                        )?;
1748
1749                        Ok(if success { Ok(new) } else { Err(current) })
1750                    },
1751                )
1752            },
1753        )
1754    }
1755
1756    /// Attempts a sequentially-consistent compare exchange of the value at `gpa`.
1757    pub fn compare_exchange_bytes<T: IntoBytes + FromBytes + Immutable + KnownLayout + ?Sized>(
1758        &self,
1759        gpa: u64,
1760        current: &mut T,
1761        new: &T,
1762    ) -> Result<bool, GuestMemoryError> {
1763        let len = size_of_val(new);
1764        assert_eq!(size_of_val(current), len);
1765        self.with_op(
1766            Some((gpa, len as u64)),
1767            GuestMemoryOperation::CompareExchange,
1768            || {
1769                // Assume that if write is allowed, then read is allowed.
1770                self.run_on_mapping(
1771                    AccessType::Write,
1772                    gpa,
1773                    len,
1774                    current,
1775                    |current, dest| {
1776                        // SAFETY: dest..dest+len is guaranteed by the caller to be a valid
1777                        // buffer for writes.
1778                        unsafe { sparse_mmap::try_compare_exchange_ref(dest, *current, new) }
1779                    },
1780                    |current| {
1781                        let success = self.inner.imp.compare_exchange_fallback(
1782                            gpa,
1783                            current.as_mut_bytes(),
1784                            new.as_bytes(),
1785                        )?;
1786
1787                        Ok(success)
1788                    },
1789                )
1790            },
1791        )
1792    }
1793
1794    /// Reads an object from guest memory at address `gpa`.
1795    ///
1796    /// If the object is 1, 2, 4, or 8 bytes and the address is naturally
1797    /// aligned, then the read will be performed atomically. Here, this means
1798    /// that when there is a concurrent writer, callers will observe either the
1799    /// old or new value, but not a torn read.
1800    ///
1801    /// The memory ordering of the read is unspecified.
1802    ///
1803    /// FUTURE: once we are on Rust 1.79, add a method specifically for atomic
1804    /// accesses that const asserts that the size is appropriate.
1805    pub fn read_plain<T: FromBytes + Immutable + KnownLayout>(
1806        &self,
1807        gpa: u64,
1808    ) -> Result<T, GuestMemoryError> {
1809        // Note that this is const, so the match below will compile out.
1810        let len = size_of::<T>();
1811        self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Read, || {
1812            self.run_on_mapping(
1813                AccessType::Read,
1814                gpa,
1815                len,
1816                (),
1817                |(), src| {
1818                    match len {
1819                        1 | 2 | 4 | 8 => {
1820                            // SAFETY: src..src+len is guaranteed to point to a reserved VA
1821                            // range.
1822                            unsafe { sparse_mmap::try_read_volatile(src.cast::<T>()) }
1823                        }
1824                        _ => {
1825                            let mut obj = std::mem::MaybeUninit::<T>::zeroed();
1826                            // SAFETY: src..src+len is guaranteed to point to a reserved VA
1827                            // range.
1828                            unsafe { sparse_mmap::try_copy(src, obj.as_mut_ptr().cast(), len)? };
1829                            // SAFETY: `obj` was fully initialized by `try_copy`.
1830                            Ok(unsafe { obj.assume_init() })
1831                        }
1832                    }
1833                },
1834                |()| {
1835                    let mut obj = std::mem::MaybeUninit::<T>::zeroed();
1836                    // SAFETY: dest..dest+len is guaranteed by the caller to point to a
1837                    // valid buffer for writes.
1838                    unsafe {
1839                        self.inner
1840                            .imp
1841                            .read_fallback(gpa, obj.as_mut_ptr().cast(), len)?;
1842                    }
1843                    // SAFETY: `obj` was fully initialized by `read_fallback`.
1844                    Ok(unsafe { obj.assume_init() })
1845                },
1846            )
1847        })
1848    }
1849
1850    fn probe_page_for_lock(
1851        &self,
1852        with_kernel_access: bool,
1853        gpa: u64,
1854    ) -> Result<*const AtomicU8, GuestMemoryBackingError> {
1855        let (region, offset, _) = self.inner.region(gpa, 1)?;
1856        let Some(SendPtrU8(ptr)) = region.mapping else {
1857            return Err(GuestMemoryBackingError::other(gpa, NotLockable));
1858        };
1859        // Ensure the virtual address can be exposed.
1860        if with_kernel_access {
1861            self.inner.imp.expose_va(gpa, 1)?;
1862        }
1863        let mut b = [0];
1864        // FUTURE: check the correct bitmap for the access type, which needs to
1865        // be passed in.
1866        self.read_at_inner(gpa, &mut b)?;
1867        // SAFETY: the read_at call includes a check that ensures that
1868        // `gpa` is in the VA range.
1869        let page = unsafe { ptr.as_ptr().add(offset as usize) };
1870        Ok(page.cast())
1871    }
1872
1873    pub fn lock_gpns(
1874        &self,
1875        with_kernel_access: bool,
1876        gpns: &[u64],
1877    ) -> Result<LockedPages, GuestMemoryError> {
1878        self.with_op(None, GuestMemoryOperation::Lock, || {
1879            let mut pages = Vec::with_capacity(gpns.len());
1880            for &gpn in gpns {
1881                let gpa = gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?;
1882                let page = self.probe_page_for_lock(with_kernel_access, gpa)?;
1883                pages.push(PagePtr(page));
1884            }
1885            Ok(LockedPages {
1886                pages: pages.into_boxed_slice(),
1887                _mem: self.inner.clone(),
1888            })
1889        })
1890    }
1891
1892    pub fn probe_gpns(&self, gpns: &[u64]) -> Result<(), GuestMemoryError> {
1893        self.with_op(None, GuestMemoryOperation::Probe, || {
1894            for &gpn in gpns {
1895                let mut b = [0];
1896                self.read_at_inner(
1897                    gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?,
1898                    &mut b,
1899                )?;
1900            }
1901            Ok(())
1902        })
1903    }
1904
1905    /// Check if a given GPA is readable or not.
1906    pub fn probe_gpa_readable(&self, gpa: u64) -> Result<(), GuestMemoryErrorKind> {
1907        let mut b = [0];
1908        self.read_at_inner(gpa, &mut b).map_err(|err| err.kind)
1909    }
1910
1911    /// Gets a slice of guest memory assuming the memory was already locked via
1912    /// [`GuestMemory::lock_gpns`].
1913    ///
1914    /// This is dangerous--if the pages have not been locked, then it could
1915    /// cause an access violation or guest memory corruption.
1916    ///
1917    /// Note that this is not `unsafe` since this cannot cause memory corruption
1918    /// in this process. Even if there is an access violation, the underlying VA
1919    /// space is known to be reserved.
1920    ///
1921    /// Panics if the requested buffer is out of range.
1922    fn dangerous_access_pre_locked_memory(&self, gpa: u64, len: usize) -> &[AtomicU8] {
1923        let addr = self
1924            .mapping_range(AccessType::Write, gpa, len)
1925            .unwrap()
1926            .unwrap();
1927        // SAFETY: addr..addr+len is checked above to be a valid VA range. It's
1928        // possible some of the pages aren't mapped and will cause AVs at
1929        // runtime when accessed, but, as discussed above, at a language level
1930        // this cannot cause any safety issues.
1931        unsafe { std::slice::from_raw_parts(addr.cast(), len) }
1932    }
1933
1934    fn op_range<F: FnMut(u64, Range<usize>) -> Result<(), GuestMemoryBackingError>>(
1935        &self,
1936        op: GuestMemoryOperation,
1937        range: &PagedRange<'_>,
1938        mut f: F,
1939    ) -> Result<(), GuestMemoryError> {
1940        self.with_op(None, op, || {
1941            let gpns = range.gpns();
1942            let offset = range.offset();
1943
1944            // Perform the operation in three phases: the first page (if it is not a
1945            // full page), the full pages, and the last page (if it is not a full
1946            // page).
1947            let mut byte_index = 0;
1948            let mut len = range.len();
1949            let mut page = 0;
1950            if offset % PAGE_SIZE != 0 {
1951                let head_len = std::cmp::min(len, PAGE_SIZE - (offset % PAGE_SIZE));
1952                let addr = gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?
1953                    + offset as u64 % PAGE_SIZE64;
1954                f(addr, byte_index..byte_index + head_len)?;
1955                byte_index += head_len;
1956                len -= head_len;
1957                page += 1;
1958            }
1959            while len >= PAGE_SIZE {
1960                f(
1961                    gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?,
1962                    byte_index..byte_index + PAGE_SIZE,
1963                )?;
1964                byte_index += PAGE_SIZE;
1965                len -= PAGE_SIZE;
1966                page += 1;
1967            }
1968            if len > 0 {
1969                f(
1970                    gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?,
1971                    byte_index..byte_index + len,
1972                )?;
1973            }
1974
1975            Ok(())
1976        })
1977    }
1978
1979    pub fn write_range(&self, range: &PagedRange<'_>, data: &[u8]) -> Result<(), GuestMemoryError> {
1980        assert!(data.len() == range.len());
1981        self.op_range(GuestMemoryOperation::Write, range, move |addr, r| {
1982            self.write_at_inner(addr, &data[r])
1983        })
1984    }
1985
1986    pub fn fill_range(&self, range: &PagedRange<'_>, val: u8) -> Result<(), GuestMemoryError> {
1987        self.op_range(GuestMemoryOperation::Fill, range, move |addr, r| {
1988            self.fill_at_inner(addr, val, r.len())
1989        })
1990    }
1991
1992    pub fn zero_range(&self, range: &PagedRange<'_>) -> Result<(), GuestMemoryError> {
1993        self.op_range(GuestMemoryOperation::Fill, range, move |addr, r| {
1994            self.fill_at_inner(addr, 0, r.len())
1995        })
1996    }
1997
1998    pub fn read_range(
1999        &self,
2000        range: &PagedRange<'_>,
2001        data: &mut [u8],
2002    ) -> Result<(), GuestMemoryError> {
2003        assert!(data.len() == range.len());
2004        self.op_range(GuestMemoryOperation::Read, range, move |addr, r| {
2005            self.read_at_inner(addr, &mut data[r])
2006        })
2007    }
2008
2009    pub fn write_range_from_atomic(
2010        &self,
2011        range: &PagedRange<'_>,
2012        data: &[AtomicU8],
2013    ) -> Result<(), GuestMemoryError> {
2014        assert!(data.len() == range.len());
2015        self.op_range(GuestMemoryOperation::Write, range, move |addr, r| {
2016            let src = &data[r];
2017            // SAFETY: `src` is a valid buffer for reads.
2018            unsafe { self.write_ptr(addr, src.as_ptr().cast(), src.len()) }
2019        })
2020    }
2021
2022    pub fn read_range_to_atomic(
2023        &self,
2024        range: &PagedRange<'_>,
2025        data: &[AtomicU8],
2026    ) -> Result<(), GuestMemoryError> {
2027        assert!(data.len() == range.len());
2028        self.op_range(GuestMemoryOperation::Read, range, move |addr, r| {
2029            let dest = &data[r];
2030            // SAFETY: `dest` is a valid buffer for writes.
2031            unsafe { self.read_ptr(addr, dest.as_ptr().cast_mut().cast(), dest.len()) }
2032        })
2033    }
2034
2035    /// Locks the guest pages spanned by the specified `PagedRange` for the `'static` lifetime.
2036    ///
2037    /// # Arguments
2038    /// * 'paged_range' - The guest memory range to lock.
2039    /// * 'locked_range' - Receives a list of VA ranges to which each contiguous physical sub-range in `paged_range`
2040    ///   has been mapped. Must be initially empty.
2041    pub fn lock_range<T: LockedRange>(
2042        &self,
2043        paged_range: PagedRange<'_>,
2044        mut locked_range: T,
2045    ) -> Result<LockedRangeImpl<T>, GuestMemoryError> {
2046        self.with_op(None, GuestMemoryOperation::Lock, || {
2047            let gpns = paged_range.gpns();
2048            for &gpn in gpns {
2049                let gpa = gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?;
2050                self.probe_page_for_lock(true, gpa)?;
2051            }
2052            for range in paged_range.ranges() {
2053                let range = range.map_err(GuestMemoryBackingError::gpn)?;
2054                locked_range.push_sub_range(
2055                    self.dangerous_access_pre_locked_memory(range.start, range.len() as usize),
2056                );
2057            }
2058            Ok(LockedRangeImpl {
2059                _mem: self.inner.clone(),
2060                inner: locked_range,
2061            })
2062        })
2063    }
2064}
2065
2066#[derive(Debug, Error)]
2067#[error("invalid guest page number {0:#x}")]
2068pub struct InvalidGpn(u64);
2069
2070fn gpn_to_gpa(gpn: u64) -> Result<u64, InvalidGpn> {
2071    gpn.checked_mul(PAGE_SIZE64).ok_or(InvalidGpn(gpn))
2072}
2073
2074#[derive(Debug, Copy, Clone, Default)]
2075struct RegionDefinition {
2076    invalid_mask: u64,
2077    region_mask: u64,
2078    region_bits: u32,
2079}
2080
2081impl RegionDefinition {
2082    fn region(&self, gpa: u64, len: u64) -> Result<(usize, u64), GuestMemoryBackingError> {
2083        if (gpa | len) & self.invalid_mask != 0 {
2084            return Err(GuestMemoryBackingError::new(
2085                GuestMemoryErrorKind::OutOfRange,
2086                gpa,
2087                OutOfRange,
2088            ));
2089        }
2090        let offset = gpa & self.region_mask;
2091        if offset.wrapping_add(len) & !self.region_mask != 0 {
2092            return Err(GuestMemoryBackingError::new(
2093                GuestMemoryErrorKind::OutOfRange,
2094                gpa,
2095                OutOfRange,
2096            ));
2097        }
2098        let index = (gpa >> self.region_bits) as usize;
2099        Ok((index, offset))
2100    }
2101}
2102
2103impl GuestMemoryInner {
2104    fn region(
2105        &self,
2106        gpa: u64,
2107        len: u64,
2108    ) -> Result<(&MemoryRegion, u64, usize), GuestMemoryBackingError> {
2109        let (index, offset) = self.region_def.region(gpa, len)?;
2110        let region = &self.regions[index];
2111        if offset + len > region.len {
2112            return Err(GuestMemoryBackingError::new(
2113                GuestMemoryErrorKind::OutOfRange,
2114                gpa,
2115                OutOfRange,
2116            ));
2117        }
2118        Ok((&self.regions[index], offset, index))
2119    }
2120}
2121
2122#[derive(Clone)]
2123pub struct LockedPages {
2124    pages: Box<[PagePtr]>,
2125    // maintain a reference to the backing memory
2126    _mem: Arc<GuestMemoryInner>,
2127}
2128
2129impl Debug for LockedPages {
2130    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
2131        f.debug_struct("LockedPages")
2132            .field("page_count", &self.pages.len())
2133            .finish()
2134    }
2135}
2136
2137#[derive(Copy, Clone, Debug)]
2138// Field is read via slice transmute and pointer casts, not actually dead.
2139struct PagePtr(#[expect(dead_code)] *const AtomicU8);
2140
2141// SAFETY: PagePtr is just a pointer with no methods and has no inherent safety
2142// constraints.
2143unsafe impl Send for PagePtr {}
2144// SAFETY: see above comment
2145unsafe impl Sync for PagePtr {}
2146
2147pub type Page = [AtomicU8; PAGE_SIZE];
2148
2149impl LockedPages {
2150    #[inline]
2151    pub fn pages(&self) -> &[&Page] {
2152        // SAFETY: PagePtr is just a pointer to a Page. The pages are kept alive by
2153        // the reference in _mem, and the lifetimes here ensure the LockedPages outlives
2154        // the slice.
2155        unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast::<&Page>(), self.pages.len()) }
2156    }
2157}
2158
2159impl<'a> AsRef<[&'a Page]> for &'a LockedPages {
2160    fn as_ref(&self) -> &[&'a Page] {
2161        self.pages()
2162    }
2163}
2164
2165/// Represents a range of locked guest pages as an ordered list of the VA sub-ranges
2166/// to which the guest pages are mapped.
2167/// The range may only partially span the first and last page and must fully span all
2168/// intermediate pages.
2169pub trait LockedRange {
2170    /// Adds a sub-range to this range.
2171    fn push_sub_range(&mut self, sub_range: &[AtomicU8]);
2172
2173    /// Removes and returns the last sub range.
2174    fn pop_sub_range(&mut self) -> Option<(*const AtomicU8, usize)>;
2175}
2176
2177pub struct LockedRangeImpl<T: LockedRange> {
2178    _mem: Arc<GuestMemoryInner>,
2179    inner: T,
2180}
2181
2182impl<T: LockedRange> LockedRangeImpl<T> {
2183    pub fn get(&self) -> &T {
2184        &self.inner
2185    }
2186}
2187
2188impl<T: LockedRange> Drop for LockedRangeImpl<T> {
2189    fn drop(&mut self) {
2190        // FUTURE: Remove and unlock all sub ranges. This is currently
2191        // not necessary yet as only fully mapped VMs are supported.
2192        // while let Some(sub_range) = self.inner.pop_sub_range() {
2193        //     call self._mem to unlock the sub-range, individually or in batches
2194        // }
2195    }
2196}
2197
2198#[derive(Debug, Error)]
2199pub enum AccessError {
2200    #[error("memory access error")]
2201    Memory(#[from] GuestMemoryError),
2202    #[error("out of range: {0:#x} < {1:#x}")]
2203    OutOfRange(usize, usize),
2204    #[error("write attempted to read-only memory")]
2205    ReadOnly,
2206}
2207
2208pub trait MemoryRead {
2209    fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError>;
2210    fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError>;
2211    fn len(&self) -> usize;
2212
2213    fn read_plain<T: IntoBytes + FromBytes + Immutable + KnownLayout>(
2214        &mut self,
2215    ) -> Result<T, AccessError> {
2216        let mut value: T = FromZeros::new_zeroed();
2217        self.read(value.as_mut_bytes())?;
2218        Ok(value)
2219    }
2220
2221    fn read_n<T: IntoBytes + FromBytes + Immutable + KnownLayout + Copy>(
2222        &mut self,
2223        len: usize,
2224    ) -> Result<Vec<T>, AccessError> {
2225        let mut value = vec![FromZeros::new_zeroed(); len];
2226        self.read(value.as_mut_bytes())?;
2227        Ok(value)
2228    }
2229
2230    fn read_all(&mut self) -> Result<Vec<u8>, AccessError> {
2231        let mut value = vec![0; self.len()];
2232        self.read(&mut value)?;
2233        Ok(value)
2234    }
2235
2236    fn limit(self, len: usize) -> Limit<Self>
2237    where
2238        Self: Sized,
2239    {
2240        let len = len.min(self.len());
2241        Limit { inner: self, len }
2242    }
2243}
2244
2245pub trait MemoryWrite {
2246    fn write(&mut self, data: &[u8]) -> Result<(), AccessError>;
2247    fn zero(&mut self, len: usize) -> Result<(), AccessError> {
2248        self.fill(0, len)
2249    }
2250    fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError>;
2251    fn len(&self) -> usize;
2252
2253    fn limit(self, len: usize) -> Limit<Self>
2254    where
2255        Self: Sized,
2256    {
2257        let len = len.min(self.len());
2258        Limit { inner: self, len }
2259    }
2260}
2261
2262impl MemoryRead for &'_ [u8] {
2263    fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
2264        if self.len() < data.len() {
2265            return Err(AccessError::OutOfRange(self.len(), data.len()));
2266        }
2267        let (source, rest) = self.split_at(data.len());
2268        data.copy_from_slice(source);
2269        *self = rest;
2270        Ok(self)
2271    }
2272
2273    fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
2274        if self.len() < len {
2275            return Err(AccessError::OutOfRange(self.len(), len));
2276        }
2277        *self = &self[len..];
2278        Ok(self)
2279    }
2280
2281    fn len(&self) -> usize {
2282        <[u8]>::len(self)
2283    }
2284}
2285
2286impl MemoryWrite for &mut [u8] {
2287    fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
2288        if self.len() < data.len() {
2289            return Err(AccessError::OutOfRange(self.len(), data.len()));
2290        }
2291        let (dest, rest) = std::mem::take(self).split_at_mut(data.len());
2292        dest.copy_from_slice(data);
2293        *self = rest;
2294        Ok(())
2295    }
2296
2297    fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError> {
2298        if self.len() < len {
2299            return Err(AccessError::OutOfRange(self.len(), len));
2300        }
2301        let (dest, rest) = std::mem::take(self).split_at_mut(len);
2302        dest.fill(val);
2303        *self = rest;
2304        Ok(())
2305    }
2306
2307    fn len(&self) -> usize {
2308        <[u8]>::len(self)
2309    }
2310}
2311
2312#[derive(Debug, Clone)]
2313pub struct Limit<T> {
2314    inner: T,
2315    len: usize,
2316}
2317
2318impl<T: MemoryRead> MemoryRead for Limit<T> {
2319    fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
2320        let len = data.len();
2321        if len > self.len {
2322            return Err(AccessError::OutOfRange(self.len, len));
2323        }
2324        self.inner.read(data)?;
2325        self.len -= len;
2326        Ok(self)
2327    }
2328
2329    fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
2330        if len > self.len {
2331            return Err(AccessError::OutOfRange(self.len, len));
2332        }
2333        self.inner.skip(len)?;
2334        self.len -= len;
2335        Ok(self)
2336    }
2337
2338    fn len(&self) -> usize {
2339        self.len
2340    }
2341}
2342
2343impl<T: MemoryWrite> MemoryWrite for Limit<T> {
2344    fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
2345        let len = data.len();
2346        if len > self.len {
2347            return Err(AccessError::OutOfRange(self.len, len));
2348        }
2349        self.inner.write(data)?;
2350        self.len -= len;
2351        Ok(())
2352    }
2353
2354    fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError> {
2355        if len > self.len {
2356            return Err(AccessError::OutOfRange(self.len, len));
2357        }
2358        self.inner.fill(val, len)?;
2359        self.len -= len;
2360        Ok(())
2361    }
2362
2363    fn len(&self) -> usize {
2364        self.len
2365    }
2366}
2367
2368/// Trait implemented to allow mapping and unmapping a region of memory at
2369/// a particular guest address.
2370pub trait MappableGuestMemory: Send + Sync {
2371    /// Maps the memory into the guest.
2372    ///
2373    /// `writable` specifies whether the guest can write to the memory region.
2374    /// If a guest tries to write to a non-writable region, the virtual
2375    /// processor will exit for MMIO handling.
2376    fn map_to_guest(&mut self, gpa: u64, writable: bool) -> io::Result<()>;
2377
2378    fn unmap_from_guest(&mut self);
2379}
2380
2381/// Trait implemented for a region of memory that can have memory mapped into
2382/// it.
2383pub trait MappedMemoryRegion: Send + Sync {
2384    /// Maps an object at `offset` in the region.
2385    ///
2386    /// Behaves like mmap--overwrites and splits existing mappings.
2387    fn map(
2388        &self,
2389        offset: usize,
2390        section: &dyn AsMappableRef,
2391        file_offset: u64,
2392        len: usize,
2393        writable: bool,
2394    ) -> io::Result<()>;
2395
2396    /// Unmaps any mappings in the specified range within the region.
2397    fn unmap(&self, offset: usize, len: usize) -> io::Result<()>;
2398}
2399
2400/// Trait implemented to allow the creation of memory regions.
2401pub trait MemoryMapper: Send + Sync {
2402    /// Creates a new memory region that can later be mapped into the guest.
2403    ///
2404    /// Returns both an interface for mapping/unmapping the region and for
2405    /// adding internal mappings.
2406    fn new_region(
2407        &self,
2408        len: usize,
2409        debug_name: String,
2410    ) -> io::Result<(Box<dyn MappableGuestMemory>, Arc<dyn MappedMemoryRegion>)>;
2411}
2412
2413/// Doorbell provides a mechanism to register for notifications on writes to specific addresses in guest memory.
2414pub trait DoorbellRegistration: Send + Sync {
2415    /// Register a doorbell event.
2416    fn register_doorbell(
2417        &self,
2418        guest_address: u64,
2419        value: Option<u64>,
2420        length: Option<u32>,
2421        event: &Event,
2422    ) -> io::Result<Box<dyn Send + Sync>>;
2423}
2424
2425/// Trait to map a ROM at one or more locations in guest memory.
2426pub trait MapRom: Send + Sync {
2427    /// Maps the specified portion of the ROM into guest memory at `gpa`.
2428    ///
2429    /// The returned object will implicitly unmap the ROM when dropped.
2430    fn map_rom(&self, gpa: u64, offset: u64, len: u64) -> io::Result<Box<dyn UnmapRom>>;
2431
2432    /// Returns the length of the ROM in bytes.
2433    fn len(&self) -> u64;
2434}
2435
2436/// Trait to unmap a ROM from guest memory.
2437pub trait UnmapRom: Send + Sync {
2438    /// Unmaps the ROM from guest memory.
2439    fn unmap_rom(self);
2440}
2441
2442#[cfg(test)]
2443#[expect(clippy::undocumented_unsafe_blocks)]
2444mod tests {
2445    use crate::GuestMemory;
2446    use crate::PAGE_SIZE64;
2447    use crate::PageFaultAction;
2448    use crate::PageFaultError;
2449    use sparse_mmap::SparseMapping;
2450    use std::ptr::NonNull;
2451    use std::sync::Arc;
2452    use thiserror::Error;
2453
2454    /// An implementation of a GuestMemoryAccess trait that expects all of
2455    /// guest memory to be mapped at a given base, with mmap or the Windows
2456    /// equivalent. Pages that are not backed by RAM will return failure
2457    /// when attempting to access them.
2458    pub struct GuestMemoryMapping {
2459        mapping: SparseMapping,
2460        #[cfg(feature = "bitmap")]
2461        bitmap: Option<Vec<u8>>,
2462    }
2463
2464    unsafe impl crate::GuestMemoryAccess for GuestMemoryMapping {
2465        fn mapping(&self) -> Option<NonNull<u8>> {
2466            NonNull::new(self.mapping.as_ptr().cast())
2467        }
2468
2469        fn max_address(&self) -> u64 {
2470            self.mapping.len() as u64
2471        }
2472
2473        #[cfg(feature = "bitmap")]
2474        fn access_bitmap(&self) -> Option<crate::BitmapInfo> {
2475            self.bitmap.as_ref().map(|bm| crate::BitmapInfo {
2476                read_bitmap: NonNull::new(bm.as_ptr().cast_mut()).unwrap(),
2477                write_bitmap: NonNull::new(bm.as_ptr().cast_mut()).unwrap(),
2478                bit_offset: 0,
2479            })
2480        }
2481    }
2482
2483    const PAGE_SIZE: usize = 4096;
2484    const SIZE_1MB: usize = 1048576;
2485
2486    /// Create a test guest layout:
2487    /// 0           -> 1MB          RAM
2488    /// 1MB         -> 2MB          empty
2489    /// 2MB         -> 3MB          RAM
2490    /// 3MB         -> 3MB + 4K     empty
2491    /// 3MB + 4K    -> 4MB          RAM
2492    fn create_test_mapping() -> GuestMemoryMapping {
2493        let mapping = SparseMapping::new(SIZE_1MB * 4).unwrap();
2494        mapping.alloc(0, SIZE_1MB).unwrap();
2495        mapping.alloc(2 * SIZE_1MB, SIZE_1MB).unwrap();
2496        mapping
2497            .alloc(3 * SIZE_1MB + PAGE_SIZE, SIZE_1MB - PAGE_SIZE)
2498            .unwrap();
2499
2500        GuestMemoryMapping {
2501            mapping,
2502            #[cfg(feature = "bitmap")]
2503            bitmap: None,
2504        }
2505    }
2506
2507    #[test]
2508    fn test_basic_read_write() {
2509        let mapping = create_test_mapping();
2510        let gm = GuestMemory::new("test", mapping);
2511
2512        // Test reading at 0.
2513        let addr = 0;
2514        let result = gm.read_plain::<u8>(addr);
2515        assert_eq!(result.unwrap(), 0);
2516
2517        // Test read/write to first page
2518        let write_buffer = [1, 2, 3, 4, 5];
2519        let mut read_buffer = [0; 5];
2520        gm.write_at(0, &write_buffer).unwrap();
2521        gm.read_at(0, &mut read_buffer).unwrap();
2522        assert_eq!(write_buffer, read_buffer);
2523        assert_eq!(gm.read_plain::<u8>(0).unwrap(), 1);
2524        assert_eq!(gm.read_plain::<u8>(1).unwrap(), 2);
2525        assert_eq!(gm.read_plain::<u8>(2).unwrap(), 3);
2526        assert_eq!(gm.read_plain::<u8>(3).unwrap(), 4);
2527        assert_eq!(gm.read_plain::<u8>(4).unwrap(), 5);
2528
2529        // Test read/write to page at 2MB
2530        let addr = 2 * SIZE_1MB as u64;
2531        let write_buffer: Vec<u8> = (0..PAGE_SIZE).map(|x| x as u8).collect();
2532        let mut read_buffer: Vec<u8> = (0..PAGE_SIZE).map(|_| 0).collect();
2533        gm.write_at(addr, write_buffer.as_slice()).unwrap();
2534        gm.read_at(addr, read_buffer.as_mut_slice()).unwrap();
2535        assert_eq!(write_buffer, read_buffer);
2536
2537        // Test read/write to first 1MB
2538        let write_buffer: Vec<u8> = (0..SIZE_1MB).map(|x| x as u8).collect();
2539        let mut read_buffer: Vec<u8> = (0..SIZE_1MB).map(|_| 0).collect();
2540        gm.write_at(addr, write_buffer.as_slice()).unwrap();
2541        gm.read_at(addr, read_buffer.as_mut_slice()).unwrap();
2542        assert_eq!(write_buffer, read_buffer);
2543
2544        // Test bad read at 1MB
2545        let addr = SIZE_1MB as u64;
2546        let result = gm.read_plain::<u8>(addr);
2547        assert!(result.is_err());
2548    }
2549
2550    #[test]
2551    fn test_multi() {
2552        let len = SIZE_1MB * 4;
2553        let mapping = SparseMapping::new(len).unwrap();
2554        mapping.alloc(0, len).unwrap();
2555        let mapping = Arc::new(GuestMemoryMapping {
2556            mapping,
2557            #[cfg(feature = "bitmap")]
2558            bitmap: None,
2559        });
2560        let region_len = 1 << 30;
2561        let gm = GuestMemory::new_multi_region(
2562            "test",
2563            region_len,
2564            vec![Some(mapping.clone()), None, Some(mapping.clone())],
2565        )
2566        .unwrap();
2567
2568        let mut b = [0];
2569        let len = len as u64;
2570        gm.read_at(0, &mut b).unwrap();
2571        gm.read_at(len, &mut b).unwrap_err();
2572        gm.read_at(region_len, &mut b).unwrap_err();
2573        gm.read_at(2 * region_len, &mut b).unwrap();
2574        gm.read_at(2 * region_len + len, &mut b).unwrap_err();
2575        gm.read_at(3 * region_len, &mut b).unwrap_err();
2576    }
2577
2578    #[cfg(feature = "bitmap")]
2579    #[test]
2580    fn test_bitmap() {
2581        let len = PAGE_SIZE * 4;
2582        let mapping = SparseMapping::new(len).unwrap();
2583        mapping.alloc(0, len).unwrap();
2584        let bitmap = vec![0b0101];
2585        let mapping = Arc::new(GuestMemoryMapping {
2586            mapping,
2587            bitmap: Some(bitmap),
2588        });
2589        let gm = GuestMemory::new("test", mapping);
2590
2591        gm.read_plain::<[u8; 1]>(0).unwrap();
2592        gm.read_plain::<[u8; 1]>(PAGE_SIZE64 - 1).unwrap();
2593        gm.read_plain::<[u8; 2]>(PAGE_SIZE64 - 1).unwrap_err();
2594        gm.read_plain::<[u8; 1]>(PAGE_SIZE64).unwrap_err();
2595        gm.read_plain::<[u8; 1]>(PAGE_SIZE64 * 2).unwrap();
2596        gm.read_plain::<[u8; PAGE_SIZE * 2]>(0).unwrap_err();
2597    }
2598
2599    struct FaultingMapping {
2600        mapping: SparseMapping,
2601    }
2602
2603    #[derive(Debug, Error)]
2604    #[error("fault")]
2605    struct Fault;
2606
2607    unsafe impl crate::GuestMemoryAccess for FaultingMapping {
2608        fn mapping(&self) -> Option<NonNull<u8>> {
2609            NonNull::new(self.mapping.as_ptr().cast())
2610        }
2611
2612        fn max_address(&self) -> u64 {
2613            self.mapping.len() as u64
2614        }
2615
2616        fn page_fault(
2617            &self,
2618            address: u64,
2619            _len: usize,
2620            write: bool,
2621            bitmap_failure: bool,
2622        ) -> PageFaultAction {
2623            assert!(!bitmap_failure);
2624            let qlen = self.mapping.len() as u64 / 4;
2625            if address < qlen || address >= 3 * qlen {
2626                return PageFaultAction::Fail(PageFaultError::other(Fault));
2627            }
2628            let page_address = (address as usize) & !(PAGE_SIZE - 1);
2629            if address >= 2 * qlen {
2630                if write {
2631                    return PageFaultAction::Fail(PageFaultError::other(Fault));
2632                }
2633                self.mapping.map_zero(page_address, PAGE_SIZE).unwrap();
2634            } else {
2635                self.mapping.alloc(page_address, PAGE_SIZE).unwrap();
2636            }
2637            PageFaultAction::Retry
2638        }
2639    }
2640
2641    impl FaultingMapping {
2642        fn new(len: usize) -> Self {
2643            let mapping = SparseMapping::new(len).unwrap();
2644            FaultingMapping { mapping }
2645        }
2646    }
2647
2648    #[test]
2649    fn test_fault() {
2650        let len = PAGE_SIZE * 4;
2651        let mapping = FaultingMapping::new(len);
2652        let gm = GuestMemory::new("test", mapping);
2653
2654        gm.write_plain::<u8>(0, &0).unwrap_err();
2655        gm.read_plain::<u8>(PAGE_SIZE64 - 1).unwrap_err();
2656        gm.read_plain::<u8>(PAGE_SIZE64).unwrap();
2657        gm.write_plain::<u8>(PAGE_SIZE64, &0).unwrap();
2658        gm.write_plain::<u16>(PAGE_SIZE64 * 3 - 1, &0).unwrap_err();
2659        gm.read_plain::<u16>(PAGE_SIZE64 * 3 - 1).unwrap_err();
2660        gm.read_plain::<u8>(PAGE_SIZE64 * 3 - 1).unwrap();
2661        gm.write_plain::<u8>(PAGE_SIZE64 * 3 - 1, &0).unwrap_err();
2662    }
2663
2664    #[test]
2665    fn test_allocated() {
2666        let mut gm = GuestMemory::allocate(0x10000);
2667        let pattern = [0x42; 0x10000];
2668        gm.write_at(0, &pattern).unwrap();
2669        assert_eq!(gm.inner_buf_mut().unwrap(), &pattern);
2670        gm.inner_buf().unwrap();
2671        let gm2 = gm.clone();
2672        assert!(gm.inner_buf_mut().is_none());
2673        gm.inner_buf().unwrap();
2674        let mut gm = gm.into_inner_buf().unwrap_err();
2675        drop(gm2);
2676        assert_eq!(gm.inner_buf_mut().unwrap(), &pattern);
2677        gm.into_inner_buf().unwrap();
2678    }
2679}