1#![expect(unsafe_code)]
8#![expect(missing_docs)]
9
10pub mod ranges;
11
12use self::ranges::PagedRange;
13use inspect::Inspect;
14use pal_event::Event;
15use sparse_mmap::AsMappableRef;
16use std::any::Any;
17use std::fmt::Debug;
18use std::io;
19use std::ops::Deref;
20use std::ops::DerefMut;
21use std::ops::Range;
22use std::ptr::NonNull;
23use std::sync::Arc;
24use std::sync::atomic::AtomicU8;
25use thiserror::Error;
26use zerocopy::FromBytes;
27use zerocopy::FromZeros;
28use zerocopy::Immutable;
29use zerocopy::IntoBytes;
30use zerocopy::KnownLayout;
31
32pub const PAGE_SIZE: usize = 4096;
34const PAGE_SIZE64: u64 = 4096;
35
36#[derive(Debug, Error)]
38#[error(transparent)]
39pub struct GuestMemoryError(Box<GuestMemoryErrorInner>);
40
41impl GuestMemoryError {
42 fn new(
43 debug_name: &Arc<str>,
44 range: Option<Range<u64>>,
45 op: GuestMemoryOperation,
46 err: GuestMemoryBackingError,
47 ) -> Self {
48 GuestMemoryError(Box::new(GuestMemoryErrorInner {
49 op,
50 debug_name: debug_name.clone(),
51 range,
52 gpa: (err.gpa != INVALID_ERROR_GPA).then_some(err.gpa),
53 kind: err.kind,
54 err: err.err,
55 }))
56 }
57
58 pub fn kind(&self) -> GuestMemoryErrorKind {
60 self.0.kind
61 }
62}
63
64#[derive(Debug, Copy, Clone)]
65enum GuestMemoryOperation {
66 Read,
67 Write,
68 Fill,
69 CompareExchange,
70 Lock,
71 Subrange,
72 Probe,
73}
74
75impl std::fmt::Display for GuestMemoryOperation {
76 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
77 f.pad(match self {
78 GuestMemoryOperation::Read => "read",
79 GuestMemoryOperation::Write => "write",
80 GuestMemoryOperation::Fill => "fill",
81 GuestMemoryOperation::CompareExchange => "compare exchange",
82 GuestMemoryOperation::Lock => "lock",
83 GuestMemoryOperation::Subrange => "subrange",
84 GuestMemoryOperation::Probe => "probe",
85 })
86 }
87}
88
89#[derive(Debug, Error)]
90struct GuestMemoryErrorInner {
91 op: GuestMemoryOperation,
92 debug_name: Arc<str>,
93 range: Option<Range<u64>>,
94 gpa: Option<u64>,
95 kind: GuestMemoryErrorKind,
96 #[source]
97 err: Box<dyn std::error::Error + Send + Sync>,
98}
99
100impl std::fmt::Display for GuestMemoryErrorInner {
101 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
102 write!(
103 f,
104 "guest memory '{debug_name}': {op} error: failed to access ",
105 debug_name = self.debug_name,
106 op = self.op
107 )?;
108 if let Some(range) = &self.range {
109 write!(f, "{:#x}-{:#x}", range.start, range.end)?;
110 } else {
111 f.write_str("memory")?;
112 }
113 if let Some(gpa) = self.gpa {
116 if self.range.as_ref().is_none_or(|range| range.start != gpa) {
117 write!(f, " at {:#x}", gpa)?;
118 }
119 }
120 Ok(())
121 }
122}
123
124#[derive(Debug)]
126pub struct GuestMemoryBackingError {
127 gpa: u64,
128 kind: GuestMemoryErrorKind,
129 err: Box<dyn std::error::Error + Send + Sync>,
130}
131
132#[derive(Debug, Copy, Clone, PartialEq, Eq)]
134#[non_exhaustive]
135pub enum GuestMemoryErrorKind {
136 Other,
138 OutOfRange,
140 VtlProtected,
142 NotPrivate,
144 NotShared,
146}
147
148pub struct PageFaultError {
150 kind: GuestMemoryErrorKind,
151 err: Box<dyn std::error::Error + Send + Sync>,
152}
153
154impl PageFaultError {
155 pub fn new(
157 kind: GuestMemoryErrorKind,
158 err: impl Into<Box<dyn std::error::Error + Send + Sync>>,
159 ) -> Self {
160 Self {
161 kind,
162 err: err.into(),
163 }
164 }
165
166 pub fn other(err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
168 Self::new(GuestMemoryErrorKind::Other, err)
169 }
170}
171
172const INVALID_ERROR_GPA: u64 = !0;
175
176impl GuestMemoryBackingError {
177 pub fn new(
179 kind: GuestMemoryErrorKind,
180 gpa: u64,
181 err: impl Into<Box<dyn std::error::Error + Send + Sync>>,
182 ) -> Self {
183 Self {
187 kind,
188 gpa,
189 err: err.into(),
190 }
191 }
192
193 pub fn other(gpa: u64, err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
195 Self::new(GuestMemoryErrorKind::Other, gpa, err)
196 }
197
198 fn gpn(err: InvalidGpn) -> Self {
199 Self {
200 kind: GuestMemoryErrorKind::OutOfRange,
201 gpa: INVALID_ERROR_GPA,
202 err: err.into(),
203 }
204 }
205}
206
207#[derive(Debug, Error)]
208#[error("no memory at address")]
209struct OutOfRange;
210
211#[derive(Debug, Error)]
212#[error("memory not lockable")]
213struct NotLockable;
214
215#[derive(Debug, Error)]
216#[error("no fallback for this operation")]
217struct NoFallback;
218
219#[derive(Debug, Error)]
220#[error("the specified page is not mapped")]
221struct NotMapped;
222
223#[derive(Debug, Error)]
224#[error("page inaccessible in bitmap")]
225struct BitmapFailure;
226
227pub trait LinearGuestMemory: GuestMemoryAccess {}
236
237unsafe impl GuestMemoryAccess for sparse_mmap::alloc::SharedMem {
239 fn mapping(&self) -> Option<NonNull<u8>> {
240 NonNull::new(self.as_ptr().cast_mut().cast())
241 }
242
243 fn max_address(&self) -> u64 {
244 self.len() as u64
245 }
246}
247
248impl LinearGuestMemory for sparse_mmap::alloc::SharedMem {}
249
250pub struct AlignedHeapMemory {
252 pages: Box<[AlignedPage]>,
253}
254
255impl Debug for AlignedHeapMemory {
256 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
257 f.debug_struct("AlignedHeapMemory")
258 .field("len", &self.len())
259 .finish()
260 }
261}
262
263#[repr(C, align(4096))]
264struct AlignedPage([AtomicU8; PAGE_SIZE]);
265
266impl AlignedHeapMemory {
267 pub fn new(size: usize) -> Self {
269 #[expect(clippy::declare_interior_mutable_const)] const ZERO: AtomicU8 = AtomicU8::new(0);
271 #[expect(clippy::declare_interior_mutable_const)]
272 const ZERO_PAGE: AlignedPage = AlignedPage([ZERO; PAGE_SIZE]);
273 let mut pages = Vec::new();
274 pages.resize_with(size.div_ceil(PAGE_SIZE), || ZERO_PAGE);
275 Self {
276 pages: pages.into(),
277 }
278 }
279
280 pub fn len(&self) -> usize {
282 self.pages.len() * PAGE_SIZE
283 }
284
285 pub fn as_bytes(&mut self) -> &[u8] {
290 self.as_mut()
291 }
292
293 pub fn as_mut_bytes(&mut self) -> &mut [u8] {
295 self.as_mut()
296 }
297}
298
299impl Deref for AlignedHeapMemory {
300 type Target = [AtomicU8];
301
302 fn deref(&self) -> &Self::Target {
303 unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast(), self.len()) }
305 }
306}
307
308impl DerefMut for AlignedHeapMemory {
309 fn deref_mut(&mut self) -> &mut Self::Target {
310 unsafe { std::slice::from_raw_parts_mut(self.pages.as_mut_ptr().cast(), self.len()) }
312 }
313}
314
315impl AsRef<[AtomicU8]> for AlignedHeapMemory {
316 fn as_ref(&self) -> &[AtomicU8] {
317 self
318 }
319}
320
321impl AsMut<[AtomicU8]> for AlignedHeapMemory {
322 fn as_mut(&mut self) -> &mut [AtomicU8] {
323 self
324 }
325}
326
327impl AsMut<[u8]> for AlignedHeapMemory {
328 fn as_mut(&mut self) -> &mut [u8] {
329 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr().cast(), self.len()) }
333 }
334}
335
336unsafe impl GuestMemoryAccess for AlignedHeapMemory {
339 fn mapping(&self) -> Option<NonNull<u8>> {
340 NonNull::new(self.pages.as_ptr().cast_mut().cast())
341 }
342
343 fn max_address(&self) -> u64 {
344 (self.pages.len() * PAGE_SIZE) as u64
345 }
346}
347
348impl LinearGuestMemory for AlignedHeapMemory {}
349
350pub unsafe trait GuestMemoryAccess: 'static + Send + Sync {
366 fn mapping(&self) -> Option<NonNull<u8>>;
373
374 fn max_address(&self) -> u64;
377
378 #[cfg(feature = "bitmap")]
396 fn access_bitmap(&self) -> Option<BitmapInfo> {
397 None
398 }
399
400 fn subrange(
403 &self,
404 offset: u64,
405 len: u64,
406 allow_preemptive_locking: bool,
407 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
408 let _ = (offset, len, allow_preemptive_locking);
409 Ok(None)
410 }
411
412 fn page_fault(
422 &self,
423 address: u64,
424 len: usize,
425 write: bool,
426 bitmap_failure: bool,
427 ) -> PageFaultAction {
428 let _ = (address, len, write);
429 let err = if bitmap_failure {
430 PageFaultError::other(BitmapFailure)
431 } else {
432 PageFaultError::other(NotMapped)
433 };
434 PageFaultAction::Fail(err)
435 }
436
437 unsafe fn read_fallback(
450 &self,
451 addr: u64,
452 dest: *mut u8,
453 len: usize,
454 ) -> Result<(), GuestMemoryBackingError> {
455 let _ = (dest, len);
456 Err(GuestMemoryBackingError::other(addr, NoFallback))
457 }
458
459 unsafe fn write_fallback(
469 &self,
470 addr: u64,
471 src: *const u8,
472 len: usize,
473 ) -> Result<(), GuestMemoryBackingError> {
474 let _ = (src, len);
475 Err(GuestMemoryBackingError::other(addr, NoFallback))
476 }
477
478 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
483 let _ = (val, len);
484 Err(GuestMemoryBackingError::other(addr, NoFallback))
485 }
486
487 fn compare_exchange_fallback(
494 &self,
495 addr: u64,
496 current: &mut [u8],
497 new: &[u8],
498 ) -> Result<bool, GuestMemoryBackingError> {
499 let _ = (current, new);
500 Err(GuestMemoryBackingError::other(addr, NoFallback))
501 }
502
503 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
509 let _ = (address, len);
510 Ok(())
511 }
512
513 fn base_iova(&self) -> Option<u64> {
519 None
520 }
521
522 fn lock_gpns(&self, gpns: &[u64]) -> Result<bool, GuestMemoryBackingError> {
527 let _ = gpns;
528 Ok(false)
529 }
530
531 fn unlock_gpns(&self, gpns: &[u64]) {
537 let _ = gpns;
538 }
539}
540
541trait DynGuestMemoryAccess: 'static + Send + Sync + Any {
542 fn subrange(
543 &self,
544 offset: u64,
545 len: u64,
546 allow_preemptive_locking: bool,
547 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError>;
548
549 fn page_fault(
550 &self,
551 address: u64,
552 len: usize,
553 write: bool,
554 bitmap_failure: bool,
555 ) -> PageFaultAction;
556
557 unsafe fn read_fallback(
560 &self,
561 addr: u64,
562 dest: *mut u8,
563 len: usize,
564 ) -> Result<(), GuestMemoryBackingError>;
565
566 unsafe fn write_fallback(
569 &self,
570 addr: u64,
571 src: *const u8,
572 len: usize,
573 ) -> Result<(), GuestMemoryBackingError>;
574
575 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError>;
576
577 fn compare_exchange_fallback(
578 &self,
579 addr: u64,
580 current: &mut [u8],
581 new: &[u8],
582 ) -> Result<bool, GuestMemoryBackingError>;
583
584 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError>;
585
586 fn lock_gpns(&self, gpns: &[u64]) -> Result<bool, GuestMemoryBackingError>;
587
588 fn unlock_gpns(&self, gpns: &[u64]);
589}
590
591impl<T: GuestMemoryAccess> DynGuestMemoryAccess for T {
592 fn subrange(
593 &self,
594 offset: u64,
595 len: u64,
596 allow_preemptive_locking: bool,
597 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
598 self.subrange(offset, len, allow_preemptive_locking)
599 }
600
601 fn page_fault(
602 &self,
603 address: u64,
604 len: usize,
605 write: bool,
606 bitmap_failure: bool,
607 ) -> PageFaultAction {
608 self.page_fault(address, len, write, bitmap_failure)
609 }
610
611 unsafe fn read_fallback(
612 &self,
613 addr: u64,
614 dest: *mut u8,
615 len: usize,
616 ) -> Result<(), GuestMemoryBackingError> {
617 unsafe { self.read_fallback(addr, dest, len) }
619 }
620
621 unsafe fn write_fallback(
622 &self,
623 addr: u64,
624 src: *const u8,
625 len: usize,
626 ) -> Result<(), GuestMemoryBackingError> {
627 unsafe { self.write_fallback(addr, src, len) }
629 }
630
631 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
632 self.fill_fallback(addr, val, len)
633 }
634
635 fn compare_exchange_fallback(
636 &self,
637 addr: u64,
638 current: &mut [u8],
639 new: &[u8],
640 ) -> Result<bool, GuestMemoryBackingError> {
641 self.compare_exchange_fallback(addr, current, new)
642 }
643
644 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
645 self.expose_va(address, len)
646 }
647
648 fn lock_gpns(&self, gpns: &[u64]) -> Result<bool, GuestMemoryBackingError> {
649 self.lock_gpns(gpns)
650 }
651
652 fn unlock_gpns(&self, gpns: &[u64]) {
653 self.unlock_gpns(gpns)
654 }
655}
656
657pub enum PageFaultAction {
660 Fail(PageFaultError),
662 Retry,
664 Fallback,
666}
667
668#[cfg(feature = "bitmap")]
670pub struct BitmapInfo {
671 pub read_bitmap: NonNull<u8>,
673 pub write_bitmap: NonNull<u8>,
675 pub bit_offset: u8,
680}
681
682unsafe impl<T: GuestMemoryAccess> GuestMemoryAccess for Arc<T> {
684 fn mapping(&self) -> Option<NonNull<u8>> {
685 self.as_ref().mapping()
686 }
687
688 fn max_address(&self) -> u64 {
689 self.as_ref().max_address()
690 }
691
692 #[cfg(feature = "bitmap")]
693 fn access_bitmap(&self) -> Option<BitmapInfo> {
694 self.as_ref().access_bitmap()
695 }
696
697 fn subrange(
698 &self,
699 offset: u64,
700 len: u64,
701 allow_preemptive_locking: bool,
702 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
703 self.as_ref()
704 .subrange(offset, len, allow_preemptive_locking)
705 }
706
707 fn page_fault(
708 &self,
709 addr: u64,
710 len: usize,
711 write: bool,
712 bitmap_failure: bool,
713 ) -> PageFaultAction {
714 self.as_ref().page_fault(addr, len, write, bitmap_failure)
715 }
716
717 unsafe fn read_fallback(
718 &self,
719 addr: u64,
720 dest: *mut u8,
721 len: usize,
722 ) -> Result<(), GuestMemoryBackingError> {
723 unsafe { self.as_ref().read_fallback(addr, dest, len) }
725 }
726
727 unsafe fn write_fallback(
728 &self,
729 addr: u64,
730 src: *const u8,
731 len: usize,
732 ) -> Result<(), GuestMemoryBackingError> {
733 unsafe { self.as_ref().write_fallback(addr, src, len) }
735 }
736
737 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
738 self.as_ref().fill_fallback(addr, val, len)
739 }
740
741 fn compare_exchange_fallback(
742 &self,
743 addr: u64,
744 current: &mut [u8],
745 new: &[u8],
746 ) -> Result<bool, GuestMemoryBackingError> {
747 self.as_ref().compare_exchange_fallback(addr, current, new)
748 }
749
750 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
751 self.as_ref().expose_va(address, len)
752 }
753
754 fn base_iova(&self) -> Option<u64> {
755 self.as_ref().base_iova()
756 }
757}
758
759unsafe impl GuestMemoryAccess for sparse_mmap::SparseMapping {
761 fn mapping(&self) -> Option<NonNull<u8>> {
762 NonNull::new(self.as_ptr().cast())
763 }
764
765 fn max_address(&self) -> u64 {
766 self.len() as u64
767 }
768}
769
770struct GuestMemoryAccessRange {
772 base: Arc<GuestMemoryInner>,
773 offset: u64,
774 len: u64,
775 region: usize,
776}
777
778impl GuestMemoryAccessRange {
779 fn adjust_range(&self, address: u64, len: u64) -> Result<u64, GuestMemoryBackingError> {
780 if address <= self.len && len <= self.len - address {
781 Ok(self.offset + address)
782 } else {
783 Err(GuestMemoryBackingError::new(
784 GuestMemoryErrorKind::OutOfRange,
785 address,
786 OutOfRange,
787 ))
788 }
789 }
790}
791
792unsafe impl GuestMemoryAccess for GuestMemoryAccessRange {
794 fn mapping(&self) -> Option<NonNull<u8>> {
795 let region = &self.base.regions[self.region];
796 region.mapping.and_then(|mapping| {
797 let offset = self.offset & self.base.region_def.region_mask;
798 assert!(region.len >= offset + self.len);
800 NonNull::new(unsafe { mapping.0.as_ptr().add(offset as usize) })
803 })
804 }
805
806 fn max_address(&self) -> u64 {
807 self.len
808 }
809
810 #[cfg(feature = "bitmap")]
811 fn access_bitmap(&self) -> Option<BitmapInfo> {
812 let region = &self.base.regions[self.region];
813 region.bitmaps.map(|bitmaps| {
814 let offset = self.offset & self.base.region_def.region_mask;
815 let bit_offset = region.bitmap_start as u64 + offset / PAGE_SIZE64;
816 let [read_bitmap, write_bitmap] = bitmaps.map(|SendPtrU8(ptr)| {
817 NonNull::new(unsafe { ptr.as_ptr().add((bit_offset / 8) as usize) }).unwrap()
820 });
821 let bitmap_start = (bit_offset % 8) as u8;
822 BitmapInfo {
823 read_bitmap,
824 write_bitmap,
825 bit_offset: bitmap_start,
826 }
827 })
828 }
829
830 fn subrange(
831 &self,
832 offset: u64,
833 len: u64,
834 _allow_preemptive_locking: bool,
835 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
836 let address = self.adjust_range(offset, len)?;
837 Ok(Some(GuestMemory::new(
838 self.base.debug_name.clone(),
839 GuestMemoryAccessRange {
840 base: self.base.clone(),
841 offset: address,
842 len,
843 region: self.region,
844 },
845 )))
846 }
847
848 fn page_fault(
849 &self,
850 address: u64,
851 len: usize,
852 write: bool,
853 bitmap_failure: bool,
854 ) -> PageFaultAction {
855 let address = self
856 .adjust_range(address, len as u64)
857 .expect("the caller should have validated the range was in the mapping");
858
859 self.base
860 .imp
861 .page_fault(address, len, write, bitmap_failure)
862 }
863
864 unsafe fn write_fallback(
865 &self,
866 address: u64,
867 src: *const u8,
868 len: usize,
869 ) -> Result<(), GuestMemoryBackingError> {
870 let address = self.adjust_range(address, len as u64)?;
871 unsafe { self.base.imp.write_fallback(address, src, len) }
873 }
874
875 fn fill_fallback(
876 &self,
877 address: u64,
878 val: u8,
879 len: usize,
880 ) -> Result<(), GuestMemoryBackingError> {
881 let address = self.adjust_range(address, len as u64)?;
882 self.base.imp.fill_fallback(address, val, len)
883 }
884
885 fn compare_exchange_fallback(
886 &self,
887 addr: u64,
888 current: &mut [u8],
889 new: &[u8],
890 ) -> Result<bool, GuestMemoryBackingError> {
891 let address = self.adjust_range(addr, new.len() as u64)?;
892 self.base
893 .imp
894 .compare_exchange_fallback(address, current, new)
895 }
896
897 unsafe fn read_fallback(
898 &self,
899 address: u64,
900 dest: *mut u8,
901 len: usize,
902 ) -> Result<(), GuestMemoryBackingError> {
903 let address = self.adjust_range(address, len as u64)?;
904 unsafe { self.base.imp.read_fallback(address, dest, len) }
906 }
907
908 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
909 let address = self.adjust_range(address, len)?;
910 self.base.imp.expose_va(address, len)
911 }
912
913 fn base_iova(&self) -> Option<u64> {
914 let region = &self.base.regions[self.region];
915 Some(region.base_iova? + (self.offset & self.base.region_def.region_mask))
916 }
917}
918
919fn create_memory_subrange(
922 base: Arc<GuestMemoryInner>,
923 offset: u64,
924 len: u64,
925 _allow_preemptive_locking: bool,
926) -> Result<GuestMemory, GuestMemoryBackingError> {
927 let (_, _, region) = base.region(offset, len)?;
928 Ok(GuestMemory::new(
929 base.debug_name.clone(),
930 GuestMemoryAccessRange {
931 base,
932 offset,
933 len,
934 region,
935 },
936 ))
937}
938
939struct MultiRegionGuestMemoryAccess<T> {
940 imps: Vec<Option<T>>,
941 region_def: RegionDefinition,
942}
943
944impl<T> MultiRegionGuestMemoryAccess<T> {
945 fn region(&self, gpa: u64, len: u64) -> Result<(&T, u64), GuestMemoryBackingError> {
946 let (i, offset) = self.region_def.region(gpa, len)?;
947 let imp = self.imps[i].as_ref().ok_or(GuestMemoryBackingError::new(
948 GuestMemoryErrorKind::OutOfRange,
949 gpa,
950 OutOfRange,
951 ))?;
952 Ok((imp, offset))
953 }
954}
955
956impl<T: GuestMemoryAccess> DynGuestMemoryAccess for MultiRegionGuestMemoryAccess<T> {
958 fn subrange(
959 &self,
960 offset: u64,
961 len: u64,
962 allow_preemptive_locking: bool,
963 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
964 let (region, offset_in_region) = self.region(offset, len)?;
965 region.subrange(offset_in_region, len, allow_preemptive_locking)
966 }
967
968 unsafe fn read_fallback(
969 &self,
970 addr: u64,
971 dest: *mut u8,
972 len: usize,
973 ) -> Result<(), GuestMemoryBackingError> {
974 let (region, offset_in_region) = self.region(addr, len as u64)?;
975 unsafe { region.read_fallback(offset_in_region, dest, len) }
977 }
978
979 unsafe fn write_fallback(
980 &self,
981 addr: u64,
982 src: *const u8,
983 len: usize,
984 ) -> Result<(), GuestMemoryBackingError> {
985 let (region, offset_in_region) = self.region(addr, len as u64)?;
986 unsafe { region.write_fallback(offset_in_region, src, len) }
988 }
989
990 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
991 let (region, offset_in_region) = self.region(addr, len as u64)?;
992 region.fill_fallback(offset_in_region, val, len)
993 }
994
995 fn compare_exchange_fallback(
996 &self,
997 addr: u64,
998 current: &mut [u8],
999 new: &[u8],
1000 ) -> Result<bool, GuestMemoryBackingError> {
1001 let (region, offset_in_region) = self.region(addr, new.len() as u64)?;
1002 region.compare_exchange_fallback(offset_in_region, current, new)
1003 }
1004
1005 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
1006 let (region, offset_in_region) = self.region(address, len)?;
1007 region.expose_va(offset_in_region, len)
1008 }
1009
1010 fn page_fault(
1011 &self,
1012 address: u64,
1013 len: usize,
1014 write: bool,
1015 bitmap_failure: bool,
1016 ) -> PageFaultAction {
1017 match self.region(address, len as u64) {
1018 Ok((region, offset_in_region)) => {
1019 region.page_fault(offset_in_region, len, write, bitmap_failure)
1020 }
1021 Err(err) => PageFaultAction::Fail(PageFaultError {
1022 kind: err.kind,
1023 err: err.err,
1024 }),
1025 }
1026 }
1027
1028 fn lock_gpns(&self, gpns: &[u64]) -> Result<bool, GuestMemoryBackingError> {
1029 let mut ret = false;
1030 for gpn in gpns {
1031 let (region, offset_in_region) = self.region(gpn * PAGE_SIZE64, PAGE_SIZE64)?;
1032 ret |= region.lock_gpns(&[offset_in_region / PAGE_SIZE64])?;
1033 }
1034 Ok(ret)
1035 }
1036
1037 fn unlock_gpns(&self, gpns: &[u64]) {
1038 for gpn in gpns {
1039 let (region, offset_in_region) = self.region(gpn * PAGE_SIZE64, PAGE_SIZE64).unwrap();
1040 region.unlock_gpns(&[offset_in_region / PAGE_SIZE64]);
1041 }
1042 }
1043}
1044
1045#[derive(Debug, Clone, Inspect)]
1051#[inspect(skip)]
1052pub struct GuestMemory {
1053 inner: Arc<GuestMemoryInner>,
1054}
1055
1056struct GuestMemoryInner<T: ?Sized = dyn DynGuestMemoryAccess> {
1057 region_def: RegionDefinition,
1058 regions: Vec<MemoryRegion>,
1059 debug_name: Arc<str>,
1060 allocated: bool,
1061 imp: T,
1062}
1063
1064impl<T: ?Sized> Debug for GuestMemoryInner<T> {
1065 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1066 f.debug_struct("GuestMemoryInner")
1067 .field("region_def", &self.region_def)
1068 .field("regions", &self.regions)
1069 .finish()
1070 }
1071}
1072
1073#[derive(Debug, Copy, Clone, Default)]
1074struct MemoryRegion {
1075 mapping: Option<SendPtrU8>,
1076 #[cfg(feature = "bitmap")]
1077 bitmaps: Option<[SendPtrU8; 2]>,
1078 #[cfg(feature = "bitmap")]
1079 bitmap_start: u8,
1080 len: u64,
1081 base_iova: Option<u64>,
1082}
1083
1084#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1086enum AccessType {
1087 Read = 0,
1088 Write = 1,
1089}
1090
1091#[derive(Debug, Copy, Clone)]
1103struct SendPtrU8(NonNull<u8>);
1104
1105unsafe impl Send for SendPtrU8 {}
1107unsafe impl Sync for SendPtrU8 {}
1109
1110impl MemoryRegion {
1111 fn new(imp: &impl GuestMemoryAccess) -> Self {
1112 #[cfg(feature = "bitmap")]
1113 let (bitmaps, bitmap_start) = {
1114 let bitmap_info = imp.access_bitmap();
1115 let bitmaps = bitmap_info
1116 .as_ref()
1117 .map(|bm| [SendPtrU8(bm.read_bitmap), SendPtrU8(bm.write_bitmap)]);
1118 let bitmap_start = bitmap_info.map_or(0, |bi| bi.bit_offset);
1119 (bitmaps, bitmap_start)
1120 };
1121 Self {
1122 mapping: imp.mapping().map(SendPtrU8),
1123 #[cfg(feature = "bitmap")]
1124 bitmaps,
1125 #[cfg(feature = "bitmap")]
1126 bitmap_start,
1127 len: imp.max_address(),
1128 base_iova: imp.base_iova(),
1129 }
1130 }
1131
1132 unsafe fn check_access(
1138 &self,
1139 access_type: AccessType,
1140 offset: u64,
1141 len: u64,
1142 ) -> Result<(), u64> {
1143 debug_assert!(self.len >= offset + len);
1144 #[cfg(not(feature = "bitmap"))]
1145 let _ = access_type;
1146
1147 #[cfg(feature = "bitmap")]
1148 if let Some(bitmaps) = &self.bitmaps {
1149 let SendPtrU8(bitmap) = bitmaps[access_type as usize];
1150 let start = offset / PAGE_SIZE64;
1151 let end = (offset + len - 1) / PAGE_SIZE64;
1152 for gpn in start..=end {
1155 let bit_offset = self.bitmap_start as u64 + gpn;
1156 let bit = unsafe {
1159 (*bitmap
1160 .as_ptr()
1161 .cast_const()
1162 .cast::<AtomicU8>()
1163 .add(bit_offset as usize / 8))
1164 .load(std::sync::atomic::Ordering::Relaxed)
1165 & (1 << (bit_offset % 8))
1166 };
1167 if bit == 0 {
1168 return Err((gpn * PAGE_SIZE64).saturating_sub(offset));
1169 }
1170 }
1171 }
1172 Ok(())
1173 }
1174}
1175
1176impl Default for GuestMemory {
1178 fn default() -> Self {
1179 Self::empty()
1180 }
1181}
1182
1183struct Empty;
1184
1185unsafe impl GuestMemoryAccess for Empty {
1187 fn mapping(&self) -> Option<NonNull<u8>> {
1188 None
1189 }
1190
1191 fn max_address(&self) -> u64 {
1192 0
1193 }
1194}
1195
1196#[derive(Debug, Error)]
1197pub enum MultiRegionError {
1198 #[error("region size {0:#x} is not a power of 2")]
1199 NotPowerOfTwo(u64),
1200 #[error("region size {0:#x} is smaller than a page")]
1201 RegionSizeTooSmall(u64),
1202 #[error(
1203 "too many regions ({region_count}) for region size {region_size:#x}; max is {max_region_count}"
1204 )]
1205 TooManyRegions {
1206 region_count: usize,
1207 max_region_count: usize,
1208 region_size: u64,
1209 },
1210 #[error("backing size {backing_size:#x} is too large for region size {region_size:#x}")]
1211 BackingTooLarge { backing_size: u64, region_size: u64 },
1212}
1213
1214#[cfg(feature = "bitmap")]
1222pub fn rcu() -> minircu::RcuDomain {
1223 minircu::global()
1225}
1226
1227impl GuestMemory {
1228 pub fn new(debug_name: impl Into<Arc<str>>, imp: impl GuestMemoryAccess) -> Self {
1233 if imp.mapping().is_some() && !cfg!(miri) {
1238 sparse_mmap::initialize_try_copy();
1239 }
1240 Self::new_inner(debug_name.into(), imp, false)
1241 }
1242
1243 fn new_inner(debug_name: Arc<str>, imp: impl GuestMemoryAccess, allocated: bool) -> Self {
1244 let regions = vec![MemoryRegion::new(&imp)];
1245 Self {
1246 inner: Arc::new(GuestMemoryInner {
1247 imp,
1248 debug_name,
1249 region_def: RegionDefinition {
1250 invalid_mask: 1 << 63,
1251 region_mask: !0 >> 1,
1252 region_bits: 63, },
1254 regions,
1255 allocated,
1256 }),
1257 }
1258 }
1259
1260 pub fn new_multi_region(
1276 debug_name: impl Into<Arc<str>>,
1277 region_size: u64,
1278 mut imps: Vec<Option<impl GuestMemoryAccess>>,
1279 ) -> Result<Self, MultiRegionError> {
1280 sparse_mmap::initialize_try_copy();
1282
1283 if !region_size.is_power_of_two() {
1284 return Err(MultiRegionError::NotPowerOfTwo(region_size));
1285 }
1286 if region_size < PAGE_SIZE64 {
1287 return Err(MultiRegionError::RegionSizeTooSmall(region_size));
1288 }
1289 let region_bits = region_size.trailing_zeros();
1290
1291 let max_region_count = 1 << (63 - region_bits);
1292
1293 let region_count = imps.len().next_power_of_two();
1294 if region_count > max_region_count {
1295 return Err(MultiRegionError::TooManyRegions {
1296 region_count,
1297 max_region_count,
1298 region_size,
1299 });
1300 }
1301
1302 let valid_bits = region_bits + region_count.trailing_zeros();
1303 assert!(valid_bits < 64);
1304 let invalid_mask = !0 << valid_bits;
1305
1306 let mut regions = vec![MemoryRegion::default(); region_count];
1307 for (imp, region) in imps.iter().zip(&mut regions) {
1308 let Some(imp) = imp else { continue };
1309 let backing_size = imp.max_address();
1310 if backing_size > region_size {
1311 return Err(MultiRegionError::BackingTooLarge {
1312 backing_size,
1313 region_size,
1314 });
1315 }
1316 *region = MemoryRegion::new(imp);
1317 }
1318
1319 let region_def = RegionDefinition {
1320 invalid_mask,
1321 region_mask: region_size - 1,
1322 region_bits,
1323 };
1324
1325 imps.resize_with(region_count, || None);
1326 let imp = MultiRegionGuestMemoryAccess { imps, region_def };
1327
1328 let inner = GuestMemoryInner {
1329 debug_name: debug_name.into(),
1330 region_def,
1331 regions,
1332 imp,
1333 allocated: false,
1334 };
1335
1336 Ok(Self {
1337 inner: Arc::new(inner),
1338 })
1339 }
1340
1341 pub fn allocate(size: usize) -> Self {
1351 Self::new_inner("heap".into(), AlignedHeapMemory::new(size), true)
1352 }
1353
1354 pub fn into_inner_buf(self) -> Result<AlignedHeapMemory, Self> {
1360 if !self.inner.allocated {
1361 return Err(self);
1362 }
1363 let inner = unsafe {
1366 Arc::<GuestMemoryInner<AlignedHeapMemory>>::from_raw(Arc::into_raw(self.inner).cast())
1367 };
1368 let inner = Arc::try_unwrap(inner).map_err(|inner| Self { inner })?;
1369 Ok(inner.imp)
1370 }
1371
1372 pub fn inner_buf(&self) -> Option<&[AtomicU8]> {
1375 if !self.inner.allocated {
1376 return None;
1377 }
1378 let inner = unsafe { &*core::ptr::from_ref(&self.inner.imp).cast::<AlignedHeapMemory>() };
1381 Some(inner)
1382 }
1383
1384 pub fn inner_buf_mut(&mut self) -> Option<&mut [u8]> {
1388 if !self.inner.allocated {
1389 return None;
1390 }
1391 let inner = Arc::get_mut(&mut self.inner)?;
1392 let imp = unsafe { &mut *core::ptr::from_mut(&mut inner.imp).cast::<AlignedHeapMemory>() };
1395 Some(imp.as_mut())
1396 }
1397
1398 pub fn empty() -> Self {
1400 GuestMemory::new("empty", Empty)
1401 }
1402
1403 fn wrap_err(
1404 &self,
1405 gpa_len: Option<(u64, u64)>,
1406 op: GuestMemoryOperation,
1407 err: GuestMemoryBackingError,
1408 ) -> GuestMemoryError {
1409 let range = gpa_len.map(|(gpa, len)| (gpa..gpa.wrapping_add(len)));
1410 GuestMemoryError::new(&self.inner.debug_name, range, op, err)
1411 }
1412
1413 fn with_op<T>(
1414 &self,
1415 gpa_len: Option<(u64, u64)>,
1416 op: GuestMemoryOperation,
1417 f: impl FnOnce() -> Result<T, GuestMemoryBackingError>,
1418 ) -> Result<T, GuestMemoryError> {
1419 f().map_err(|err| self.wrap_err(gpa_len, op, err))
1420 }
1421
1422 pub fn subrange(
1427 &self,
1428 offset: u64,
1429 len: u64,
1430 allow_preemptive_locking: bool,
1431 ) -> Result<GuestMemory, GuestMemoryError> {
1432 self.with_op(Some((offset, len)), GuestMemoryOperation::Subrange, || {
1433 if let Some(guest_memory) =
1434 self.inner
1435 .imp
1436 .subrange(offset, len, allow_preemptive_locking)?
1437 {
1438 Ok(guest_memory)
1439 } else {
1440 create_memory_subrange(self.inner.clone(), offset, len, allow_preemptive_locking)
1441 }
1442 })
1443 }
1444
1445 pub fn lockable_subrange(
1447 &self,
1448 offset: u64,
1449 len: u64,
1450 ) -> Result<GuestMemory, GuestMemoryError> {
1451 self.subrange(offset, len, true)
1453 }
1454
1455 pub fn full_mapping(&self) -> Option<(*mut u8, usize)> {
1460 if let [region] = self.inner.regions.as_slice() {
1461 #[cfg(feature = "bitmap")]
1462 if region.bitmaps.is_some() {
1463 return None;
1464 }
1465 region
1466 .mapping
1467 .map(|SendPtrU8(ptr)| (ptr.as_ptr(), region.len as usize))
1468 } else {
1469 None
1470 }
1471 }
1472
1473 pub fn iova(&self, gpa: u64) -> Option<u64> {
1476 let (region, offset, _) = self.inner.region(gpa, 1).ok()?;
1477 Some(region.base_iova? + offset)
1478 }
1479
1480 fn mapping_range(
1485 &self,
1486 access_type: AccessType,
1487 gpa: u64,
1488 len: usize,
1489 ) -> Result<Option<*mut u8>, GuestMemoryBackingError> {
1490 let (region, offset, _) = self.inner.region(gpa, len as u64)?;
1491 if let Some(SendPtrU8(ptr)) = region.mapping {
1492 loop {
1493 let fault_offset = unsafe {
1495 match region.check_access(access_type, offset, len as u64) {
1496 Ok(()) => return Ok(Some(ptr.as_ptr().add(offset as usize))),
1497 Err(n) => n,
1498 }
1499 };
1500
1501 match self.inner.imp.page_fault(
1503 gpa + fault_offset,
1504 len - fault_offset as usize,
1505 access_type == AccessType::Write,
1506 true,
1507 ) {
1508 PageFaultAction::Fail(err) => {
1509 return Err(GuestMemoryBackingError::new(
1510 err.kind,
1511 gpa + fault_offset,
1512 err.err,
1513 ));
1514 }
1515 PageFaultAction::Retry => {}
1516 PageFaultAction::Fallback => break,
1517 }
1518 }
1519 }
1520 Ok(None)
1521 }
1522
1523 fn run_on_mapping<T, P>(
1530 &self,
1531 access_type: AccessType,
1532 gpa: u64,
1533 len: usize,
1534 mut param: P,
1535 mut f: impl FnMut(&mut P, *mut u8) -> Result<T, sparse_mmap::MemoryError>,
1536 fallback: impl FnOnce(&mut P) -> Result<T, GuestMemoryBackingError>,
1537 ) -> Result<T, GuestMemoryBackingError> {
1538 let op = || {
1539 let Some(mapping) = self.mapping_range(access_type, gpa, len)? else {
1540 return fallback(&mut param);
1541 };
1542
1543 loop {
1545 match f(&mut param, mapping) {
1546 Ok(t) => return Ok(t),
1547 Err(fault) => {
1548 match self.inner.imp.page_fault(
1549 gpa + fault.offset() as u64,
1550 len - fault.offset(),
1551 access_type == AccessType::Write,
1552 false,
1553 ) {
1554 PageFaultAction::Fail(err) => {
1555 return Err(GuestMemoryBackingError::new(
1556 err.kind,
1557 gpa + fault.offset() as u64,
1558 err.err,
1559 ));
1560 }
1561 PageFaultAction::Retry => {}
1562 PageFaultAction::Fallback => return fallback(&mut param),
1563 }
1564 }
1565 }
1566 }
1567 };
1568 #[cfg(feature = "bitmap")]
1572 return rcu().run(op);
1573 #[cfg(not(feature = "bitmap"))]
1574 op()
1575 }
1576
1577 unsafe fn write_ptr(
1581 &self,
1582 gpa: u64,
1583 src: *const u8,
1584 len: usize,
1585 ) -> Result<(), GuestMemoryBackingError> {
1586 if len == 0 {
1587 return Ok(());
1588 }
1589 self.run_on_mapping(
1590 AccessType::Write,
1591 gpa,
1592 len,
1593 (),
1594 |(), dest| {
1595 unsafe { sparse_mmap::try_copy(src, dest, len) }
1599 },
1600 |()| {
1601 unsafe { self.inner.imp.write_fallback(gpa, src, len) }
1604 },
1605 )
1606 }
1607
1608 pub fn write_at(&self, gpa: u64, src: &[u8]) -> Result<(), GuestMemoryError> {
1610 self.with_op(
1611 Some((gpa, src.len() as u64)),
1612 GuestMemoryOperation::Write,
1613 || self.write_at_inner(gpa, src),
1614 )
1615 }
1616
1617 fn write_at_inner(&self, gpa: u64, src: &[u8]) -> Result<(), GuestMemoryBackingError> {
1618 unsafe { self.write_ptr(gpa, src.as_ptr(), src.len()) }
1620 }
1621
1622 pub fn write_from_atomic(&self, gpa: u64, src: &[AtomicU8]) -> Result<(), GuestMemoryError> {
1624 self.with_op(
1625 Some((gpa, src.len() as u64)),
1626 GuestMemoryOperation::Write,
1627 || {
1628 unsafe { self.write_ptr(gpa, src.as_ptr().cast(), src.len()) }
1630 },
1631 )
1632 }
1633
1634 pub fn fill_at(&self, gpa: u64, val: u8, len: usize) -> Result<(), GuestMemoryError> {
1636 self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Fill, || {
1637 self.fill_at_inner(gpa, val, len)
1638 })
1639 }
1640
1641 fn fill_at_inner(&self, gpa: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
1642 if len == 0 {
1643 return Ok(());
1644 }
1645 self.run_on_mapping(
1646 AccessType::Write,
1647 gpa,
1648 len,
1649 (),
1650 |(), dest| {
1651 unsafe { sparse_mmap::try_write_bytes(dest, val, len) }
1653 },
1654 |()| self.inner.imp.fill_fallback(gpa, val, len),
1655 )
1656 }
1657
1658 unsafe fn read_ptr(
1663 &self,
1664 gpa: u64,
1665 dest: *mut u8,
1666 len: usize,
1667 ) -> Result<(), GuestMemoryBackingError> {
1668 if len == 0 {
1669 return Ok(());
1670 }
1671 self.run_on_mapping(
1672 AccessType::Read,
1673 gpa,
1674 len,
1675 (),
1676 |(), src| {
1677 unsafe { sparse_mmap::try_copy(src, dest, len) }
1681 },
1682 |()| {
1683 unsafe { self.inner.imp.read_fallback(gpa, dest, len) }
1686 },
1687 )
1688 }
1689
1690 fn read_at_inner(&self, gpa: u64, dest: &mut [u8]) -> Result<(), GuestMemoryBackingError> {
1691 unsafe { self.read_ptr(gpa, dest.as_mut_ptr(), dest.len()) }
1693 }
1694
1695 pub fn read_at(&self, gpa: u64, dest: &mut [u8]) -> Result<(), GuestMemoryError> {
1697 self.with_op(
1698 Some((gpa, dest.len() as u64)),
1699 GuestMemoryOperation::Read,
1700 || self.read_at_inner(gpa, dest),
1701 )
1702 }
1703
1704 pub fn read_to_atomic(&self, gpa: u64, dest: &[AtomicU8]) -> Result<(), GuestMemoryError> {
1706 self.with_op(
1707 Some((gpa, dest.len() as u64)),
1708 GuestMemoryOperation::Read,
1709 || unsafe { self.read_ptr(gpa, dest.as_ptr() as *mut u8, dest.len()) },
1711 )
1712 }
1713
1714 pub fn write_plain<T: IntoBytes + Immutable + KnownLayout>(
1726 &self,
1727 gpa: u64,
1728 b: &T,
1729 ) -> Result<(), GuestMemoryError> {
1730 let len = size_of::<T>();
1732 self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Write, || {
1733 self.run_on_mapping(
1734 AccessType::Write,
1735 gpa,
1736 len,
1737 (),
1738 |(), dest| {
1739 match len {
1740 1 | 2 | 4 | 8 => {
1741 unsafe { sparse_mmap::try_write_volatile(dest.cast(), b) }
1744 }
1745 _ => {
1746 unsafe { sparse_mmap::try_copy(b.as_bytes().as_ptr(), dest, len) }
1749 }
1750 }
1751 },
1752 |()| {
1753 unsafe {
1755 self.inner
1756 .imp
1757 .write_fallback(gpa, b.as_bytes().as_ptr(), len)
1758 }
1759 },
1760 )
1761 })
1762 }
1763
1764 pub fn compare_exchange<T: IntoBytes + FromBytes + Immutable + KnownLayout + Copy>(
1766 &self,
1767 gpa: u64,
1768 current: T,
1769 new: T,
1770 ) -> Result<Result<T, T>, GuestMemoryError> {
1771 let len = size_of_val(&new);
1772 self.with_op(
1773 Some((gpa, len as u64)),
1774 GuestMemoryOperation::CompareExchange,
1775 || {
1776 self.run_on_mapping(
1778 AccessType::Write,
1779 gpa,
1780 len,
1781 (),
1782 |(), dest| {
1783 unsafe { sparse_mmap::try_compare_exchange(dest.cast(), current, new) }
1786 },
1787 |()| {
1788 let mut current = current;
1789 let success = self.inner.imp.compare_exchange_fallback(
1790 gpa,
1791 current.as_mut_bytes(),
1792 new.as_bytes(),
1793 )?;
1794
1795 Ok(if success { Ok(new) } else { Err(current) })
1796 },
1797 )
1798 },
1799 )
1800 }
1801
1802 pub fn compare_exchange_bytes<T: IntoBytes + FromBytes + Immutable + KnownLayout + ?Sized>(
1804 &self,
1805 gpa: u64,
1806 current: &mut T,
1807 new: &T,
1808 ) -> Result<bool, GuestMemoryError> {
1809 let len = size_of_val(new);
1810 assert_eq!(size_of_val(current), len);
1811 self.with_op(
1812 Some((gpa, len as u64)),
1813 GuestMemoryOperation::CompareExchange,
1814 || {
1815 self.run_on_mapping(
1817 AccessType::Write,
1818 gpa,
1819 len,
1820 current,
1821 |current, dest| {
1822 unsafe { sparse_mmap::try_compare_exchange_ref(dest, *current, new) }
1825 },
1826 |current| {
1827 let success = self.inner.imp.compare_exchange_fallback(
1828 gpa,
1829 current.as_mut_bytes(),
1830 new.as_bytes(),
1831 )?;
1832
1833 Ok(success)
1834 },
1835 )
1836 },
1837 )
1838 }
1839
1840 pub fn read_plain<T: FromBytes + Immutable + KnownLayout>(
1852 &self,
1853 gpa: u64,
1854 ) -> Result<T, GuestMemoryError> {
1855 let len = size_of::<T>();
1857 self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Read, || {
1858 self.run_on_mapping(
1859 AccessType::Read,
1860 gpa,
1861 len,
1862 (),
1863 |(), src| {
1864 match len {
1865 1 | 2 | 4 | 8 => {
1866 unsafe { sparse_mmap::try_read_volatile(src.cast::<T>()) }
1869 }
1870 _ => {
1871 let mut obj = std::mem::MaybeUninit::<T>::zeroed();
1872 unsafe { sparse_mmap::try_copy(src, obj.as_mut_ptr().cast(), len)? };
1875 Ok(unsafe { obj.assume_init() })
1877 }
1878 }
1879 },
1880 |()| {
1881 let mut obj = std::mem::MaybeUninit::<T>::zeroed();
1882 unsafe {
1885 self.inner
1886 .imp
1887 .read_fallback(gpa, obj.as_mut_ptr().cast(), len)?;
1888 }
1889 Ok(unsafe { obj.assume_init() })
1891 },
1892 )
1893 })
1894 }
1895
1896 fn probe_page_for_lock(
1897 &self,
1898 with_kernel_access: bool,
1899 gpa: u64,
1900 ) -> Result<*const AtomicU8, GuestMemoryBackingError> {
1901 let (region, offset, _) = self.inner.region(gpa, 1)?;
1902 let Some(SendPtrU8(ptr)) = region.mapping else {
1903 return Err(GuestMemoryBackingError::other(gpa, NotLockable));
1904 };
1905 if with_kernel_access {
1907 self.inner.imp.expose_va(gpa, 1)?;
1908 }
1909 let mut b = [0];
1910 self.read_at_inner(gpa, &mut b)?;
1913 let page = unsafe { ptr.as_ptr().add(offset as usize) };
1916 Ok(page.cast())
1917 }
1918
1919 pub fn lock_gpns(
1920 &self,
1921 with_kernel_access: bool,
1922 gpns: &[u64],
1923 ) -> Result<LockedPages, GuestMemoryError> {
1924 self.with_op(None, GuestMemoryOperation::Lock, || {
1925 let mut pages = Vec::with_capacity(gpns.len());
1926 for &gpn in gpns {
1927 let gpa = gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?;
1928 let page = self.probe_page_for_lock(with_kernel_access, gpa)?;
1929 pages.push(PagePtr(page));
1930 }
1931 let store_gpns = self.inner.imp.lock_gpns(gpns)?;
1932 Ok(LockedPages {
1933 pages: pages.into_boxed_slice(),
1934 gpns: store_gpns.then(|| gpns.to_vec().into_boxed_slice()),
1935 mem: self.inner.clone(),
1936 })
1937 })
1938 }
1939
1940 pub fn probe_gpns(&self, gpns: &[u64]) -> Result<(), GuestMemoryError> {
1941 self.with_op(None, GuestMemoryOperation::Probe, || {
1942 for &gpn in gpns {
1943 let mut b = [0];
1944 self.read_at_inner(
1945 gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?,
1946 &mut b,
1947 )?;
1948 }
1949 Ok(())
1950 })
1951 }
1952
1953 pub fn probe_gpa_readable(&self, gpa: u64) -> Result<(), GuestMemoryErrorKind> {
1955 let mut b = [0];
1956 self.read_at_inner(gpa, &mut b).map_err(|err| err.kind)
1957 }
1958
1959 pub fn probe_gpa_writable(&self, gpa: u64) -> Result<(), GuestMemoryErrorKind> {
1961 let _ = self
1962 .compare_exchange(gpa, 0u8, 0)
1963 .map_err(|err| err.kind())?;
1964 Ok(())
1965 }
1966
1967 fn dangerous_access_pre_locked_memory(&self, gpa: u64, len: usize) -> &[AtomicU8] {
1979 let addr = self
1980 .mapping_range(AccessType::Write, gpa, len)
1981 .unwrap()
1982 .unwrap();
1983 unsafe { std::slice::from_raw_parts(addr.cast(), len) }
1988 }
1989
1990 fn op_range<F: FnMut(u64, Range<usize>) -> Result<(), GuestMemoryBackingError>>(
1991 &self,
1992 op: GuestMemoryOperation,
1993 range: &PagedRange<'_>,
1994 mut f: F,
1995 ) -> Result<(), GuestMemoryError> {
1996 self.with_op(None, op, || {
1997 let gpns = range.gpns();
1998 let offset = range.offset();
1999
2000 let mut byte_index = 0;
2004 let mut len = range.len();
2005 let mut page = 0;
2006 if offset % PAGE_SIZE != 0 {
2007 let head_len = std::cmp::min(len, PAGE_SIZE - (offset % PAGE_SIZE));
2008 let addr = gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?
2009 + offset as u64 % PAGE_SIZE64;
2010 f(addr, byte_index..byte_index + head_len)?;
2011 byte_index += head_len;
2012 len -= head_len;
2013 page += 1;
2014 }
2015 while len >= PAGE_SIZE {
2016 f(
2017 gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?,
2018 byte_index..byte_index + PAGE_SIZE,
2019 )?;
2020 byte_index += PAGE_SIZE;
2021 len -= PAGE_SIZE;
2022 page += 1;
2023 }
2024 if len > 0 {
2025 f(
2026 gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?,
2027 byte_index..byte_index + len,
2028 )?;
2029 }
2030
2031 Ok(())
2032 })
2033 }
2034
2035 pub fn write_range(&self, range: &PagedRange<'_>, data: &[u8]) -> Result<(), GuestMemoryError> {
2036 assert!(data.len() == range.len());
2037 self.op_range(GuestMemoryOperation::Write, range, move |addr, r| {
2038 self.write_at_inner(addr, &data[r])
2039 })
2040 }
2041
2042 pub fn fill_range(&self, range: &PagedRange<'_>, val: u8) -> Result<(), GuestMemoryError> {
2043 self.op_range(GuestMemoryOperation::Fill, range, move |addr, r| {
2044 self.fill_at_inner(addr, val, r.len())
2045 })
2046 }
2047
2048 pub fn zero_range(&self, range: &PagedRange<'_>) -> Result<(), GuestMemoryError> {
2049 self.op_range(GuestMemoryOperation::Fill, range, move |addr, r| {
2050 self.fill_at_inner(addr, 0, r.len())
2051 })
2052 }
2053
2054 pub fn read_range(
2055 &self,
2056 range: &PagedRange<'_>,
2057 data: &mut [u8],
2058 ) -> Result<(), GuestMemoryError> {
2059 assert!(data.len() == range.len());
2060 self.op_range(GuestMemoryOperation::Read, range, move |addr, r| {
2061 self.read_at_inner(addr, &mut data[r])
2062 })
2063 }
2064
2065 pub fn write_range_from_atomic(
2066 &self,
2067 range: &PagedRange<'_>,
2068 data: &[AtomicU8],
2069 ) -> Result<(), GuestMemoryError> {
2070 assert!(data.len() == range.len());
2071 self.op_range(GuestMemoryOperation::Write, range, move |addr, r| {
2072 let src = &data[r];
2073 unsafe { self.write_ptr(addr, src.as_ptr().cast(), src.len()) }
2075 })
2076 }
2077
2078 pub fn read_range_to_atomic(
2079 &self,
2080 range: &PagedRange<'_>,
2081 data: &[AtomicU8],
2082 ) -> Result<(), GuestMemoryError> {
2083 assert!(data.len() == range.len());
2084 self.op_range(GuestMemoryOperation::Read, range, move |addr, r| {
2085 let dest = &data[r];
2086 unsafe { self.read_ptr(addr, dest.as_ptr().cast_mut().cast(), dest.len()) }
2088 })
2089 }
2090
2091 pub fn lock_range<T: LockedRange>(
2098 &self,
2099 paged_range: PagedRange<'_>,
2100 mut locked_range: T,
2101 ) -> Result<LockedRangeImpl<T>, GuestMemoryError> {
2102 self.with_op(None, GuestMemoryOperation::Lock, || {
2103 let gpns = paged_range.gpns();
2104 for &gpn in gpns {
2105 let gpa = gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?;
2106 self.probe_page_for_lock(true, gpa)?;
2107 }
2108 for range in paged_range.ranges() {
2109 let range = range.map_err(GuestMemoryBackingError::gpn)?;
2110 locked_range.push_sub_range(
2111 self.dangerous_access_pre_locked_memory(range.start, range.len() as usize),
2112 );
2113 }
2114 let store_gpns = self.inner.imp.lock_gpns(paged_range.gpns())?;
2115 Ok(LockedRangeImpl {
2116 mem: self.inner.clone(),
2117 gpns: store_gpns.then(|| paged_range.gpns().to_vec().into_boxed_slice()),
2118 inner: locked_range,
2119 })
2120 })
2121 }
2122}
2123
2124#[derive(Debug, Error)]
2125#[error("invalid guest page number {0:#x}")]
2126pub struct InvalidGpn(u64);
2127
2128fn gpn_to_gpa(gpn: u64) -> Result<u64, InvalidGpn> {
2129 gpn.checked_mul(PAGE_SIZE64).ok_or(InvalidGpn(gpn))
2130}
2131
2132#[derive(Debug, Copy, Clone, Default)]
2133struct RegionDefinition {
2134 invalid_mask: u64,
2135 region_mask: u64,
2136 region_bits: u32,
2137}
2138
2139impl RegionDefinition {
2140 fn region(&self, gpa: u64, len: u64) -> Result<(usize, u64), GuestMemoryBackingError> {
2141 if (gpa | len) & self.invalid_mask != 0 {
2142 return Err(GuestMemoryBackingError::new(
2143 GuestMemoryErrorKind::OutOfRange,
2144 gpa,
2145 OutOfRange,
2146 ));
2147 }
2148 let offset = gpa & self.region_mask;
2149 if offset.wrapping_add(len) & !self.region_mask != 0 {
2150 return Err(GuestMemoryBackingError::new(
2151 GuestMemoryErrorKind::OutOfRange,
2152 gpa,
2153 OutOfRange,
2154 ));
2155 }
2156 let index = (gpa >> self.region_bits) as usize;
2157 Ok((index, offset))
2158 }
2159}
2160
2161impl GuestMemoryInner {
2162 fn region(
2163 &self,
2164 gpa: u64,
2165 len: u64,
2166 ) -> Result<(&MemoryRegion, u64, usize), GuestMemoryBackingError> {
2167 let (index, offset) = self.region_def.region(gpa, len)?;
2168 let region = &self.regions[index];
2169 if offset + len > region.len {
2170 return Err(GuestMemoryBackingError::new(
2171 GuestMemoryErrorKind::OutOfRange,
2172 gpa,
2173 OutOfRange,
2174 ));
2175 }
2176 Ok((&self.regions[index], offset, index))
2177 }
2178}
2179
2180#[derive(Clone)]
2181pub struct LockedPages {
2182 pages: Box<[PagePtr]>,
2183 gpns: Option<Box<[u64]>>,
2184 mem: Arc<GuestMemoryInner>,
2186}
2187
2188impl Drop for LockedPages {
2189 fn drop(&mut self) {
2190 if let Some(gpns) = &self.gpns {
2191 self.mem.imp.unlock_gpns(gpns);
2192 }
2193 }
2194}
2195
2196impl Debug for LockedPages {
2197 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
2198 f.debug_struct("LockedPages")
2199 .field("page_count", &self.pages.len())
2200 .finish()
2201 }
2202}
2203
2204#[derive(Copy, Clone, Debug)]
2205struct PagePtr(#[expect(dead_code)] *const AtomicU8);
2207
2208unsafe impl Send for PagePtr {}
2211unsafe impl Sync for PagePtr {}
2213
2214pub type Page = [AtomicU8; PAGE_SIZE];
2215
2216impl LockedPages {
2217 #[inline]
2218 pub fn pages(&self) -> &[&Page] {
2219 unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast::<&Page>(), self.pages.len()) }
2223 }
2224}
2225
2226impl<'a> AsRef<[&'a Page]> for &'a LockedPages {
2227 fn as_ref(&self) -> &[&'a Page] {
2228 self.pages()
2229 }
2230}
2231
2232pub trait LockedRange {
2237 fn push_sub_range(&mut self, sub_range: &[AtomicU8]);
2239}
2240
2241pub struct LockedRangeImpl<T: LockedRange> {
2242 mem: Arc<GuestMemoryInner>,
2243 gpns: Option<Box<[u64]>>,
2244 inner: T,
2245}
2246
2247impl<T: LockedRange> LockedRangeImpl<T> {
2248 pub fn get(&self) -> &T {
2249 &self.inner
2250 }
2251}
2252
2253impl<T: LockedRange> Drop for LockedRangeImpl<T> {
2254 fn drop(&mut self) {
2255 if let Some(gpns) = &self.gpns {
2256 self.mem.imp.unlock_gpns(gpns);
2257 }
2258 }
2259}
2260
2261#[derive(Debug, Error)]
2262pub enum AccessError {
2263 #[error("memory access error")]
2264 Memory(#[from] GuestMemoryError),
2265 #[error("out of range: {0:#x} < {1:#x}")]
2266 OutOfRange(usize, usize),
2267 #[error("write attempted to read-only memory")]
2268 ReadOnly,
2269}
2270
2271pub trait MemoryRead {
2272 fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError>;
2273 fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError>;
2274 fn len(&self) -> usize;
2275
2276 fn read_plain<T: IntoBytes + FromBytes + Immutable + KnownLayout>(
2277 &mut self,
2278 ) -> Result<T, AccessError> {
2279 let mut value: T = FromZeros::new_zeroed();
2280 self.read(value.as_mut_bytes())?;
2281 Ok(value)
2282 }
2283
2284 fn read_n<T: IntoBytes + FromBytes + Immutable + KnownLayout + Copy>(
2285 &mut self,
2286 len: usize,
2287 ) -> Result<Vec<T>, AccessError> {
2288 let mut value = vec![FromZeros::new_zeroed(); len];
2289 self.read(value.as_mut_bytes())?;
2290 Ok(value)
2291 }
2292
2293 fn read_all(&mut self) -> Result<Vec<u8>, AccessError> {
2294 let mut value = vec![0; self.len()];
2295 self.read(&mut value)?;
2296 Ok(value)
2297 }
2298
2299 fn limit(self, len: usize) -> Limit<Self>
2300 where
2301 Self: Sized,
2302 {
2303 let len = len.min(self.len());
2304 Limit { inner: self, len }
2305 }
2306}
2307
2308pub trait MemoryWrite {
2310 fn write(&mut self, data: &[u8]) -> Result<(), AccessError>;
2311 fn zero(&mut self, len: usize) -> Result<(), AccessError> {
2312 self.fill(0, len)
2313 }
2314 fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError>;
2315
2316 fn len(&self) -> usize;
2318
2319 fn limit(self, len: usize) -> Limit<Self>
2320 where
2321 Self: Sized,
2322 {
2323 let len = len.min(self.len());
2324 Limit { inner: self, len }
2325 }
2326}
2327
2328impl MemoryRead for &'_ [u8] {
2329 fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
2330 if self.len() < data.len() {
2331 return Err(AccessError::OutOfRange(self.len(), data.len()));
2332 }
2333 let (source, rest) = self.split_at(data.len());
2334 data.copy_from_slice(source);
2335 *self = rest;
2336 Ok(self)
2337 }
2338
2339 fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
2340 if self.len() < len {
2341 return Err(AccessError::OutOfRange(self.len(), len));
2342 }
2343 *self = &self[len..];
2344 Ok(self)
2345 }
2346
2347 fn len(&self) -> usize {
2348 <[u8]>::len(self)
2349 }
2350}
2351
2352impl MemoryWrite for &mut [u8] {
2353 fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
2354 if self.len() < data.len() {
2355 return Err(AccessError::OutOfRange(self.len(), data.len()));
2356 }
2357 let (dest, rest) = std::mem::take(self).split_at_mut(data.len());
2358 dest.copy_from_slice(data);
2359 *self = rest;
2360 Ok(())
2361 }
2362
2363 fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError> {
2364 if self.len() < len {
2365 return Err(AccessError::OutOfRange(self.len(), len));
2366 }
2367 let (dest, rest) = std::mem::take(self).split_at_mut(len);
2368 dest.fill(val);
2369 *self = rest;
2370 Ok(())
2371 }
2372
2373 fn len(&self) -> usize {
2374 <[u8]>::len(self)
2375 }
2376}
2377
2378#[derive(Debug, Clone)]
2379pub struct Limit<T> {
2380 inner: T,
2381 len: usize,
2382}
2383
2384impl<T: MemoryRead> MemoryRead for Limit<T> {
2385 fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
2386 let len = data.len();
2387 if len > self.len {
2388 return Err(AccessError::OutOfRange(self.len, len));
2389 }
2390 self.inner.read(data)?;
2391 self.len -= len;
2392 Ok(self)
2393 }
2394
2395 fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
2396 if len > self.len {
2397 return Err(AccessError::OutOfRange(self.len, len));
2398 }
2399 self.inner.skip(len)?;
2400 self.len -= len;
2401 Ok(self)
2402 }
2403
2404 fn len(&self) -> usize {
2405 self.len
2406 }
2407}
2408
2409impl<T: MemoryWrite> MemoryWrite for Limit<T> {
2410 fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
2411 let len = data.len();
2412 if len > self.len {
2413 return Err(AccessError::OutOfRange(self.len, len));
2414 }
2415 self.inner.write(data)?;
2416 self.len -= len;
2417 Ok(())
2418 }
2419
2420 fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError> {
2421 if len > self.len {
2422 return Err(AccessError::OutOfRange(self.len, len));
2423 }
2424 self.inner.fill(val, len)?;
2425 self.len -= len;
2426 Ok(())
2427 }
2428
2429 fn len(&self) -> usize {
2430 self.len
2431 }
2432}
2433
2434pub trait MappableGuestMemory: Send + Sync {
2437 fn map_to_guest(&mut self, gpa: u64, writable: bool) -> io::Result<()>;
2443
2444 fn unmap_from_guest(&mut self);
2445}
2446
2447pub trait MappedMemoryRegion: Send + Sync {
2450 fn map(
2454 &self,
2455 offset: usize,
2456 section: &dyn AsMappableRef,
2457 file_offset: u64,
2458 len: usize,
2459 writable: bool,
2460 ) -> io::Result<()>;
2461
2462 fn unmap(&self, offset: usize, len: usize) -> io::Result<()>;
2464}
2465
2466pub trait MemoryMapper: Send + Sync {
2468 fn new_region(
2473 &self,
2474 len: usize,
2475 debug_name: String,
2476 ) -> io::Result<(Box<dyn MappableGuestMemory>, Arc<dyn MappedMemoryRegion>)>;
2477}
2478
2479pub trait DoorbellRegistration: Send + Sync {
2481 fn register_doorbell(
2483 &self,
2484 guest_address: u64,
2485 value: Option<u64>,
2486 length: Option<u32>,
2487 event: &Event,
2488 ) -> io::Result<Box<dyn Send + Sync>>;
2489}
2490
2491pub trait MapRom: Send + Sync {
2493 fn map_rom(&self, gpa: u64, offset: u64, len: u64) -> io::Result<Box<dyn UnmapRom>>;
2497
2498 fn len(&self) -> u64;
2500}
2501
2502pub trait UnmapRom: Send + Sync {
2504 fn unmap_rom(self);
2506}
2507
2508#[cfg(test)]
2509#[expect(clippy::undocumented_unsafe_blocks)]
2510mod tests {
2511 use crate::GuestMemory;
2512 use crate::PAGE_SIZE64;
2513 use crate::PageFaultAction;
2514 use crate::PageFaultError;
2515 use sparse_mmap::SparseMapping;
2516 use std::ptr::NonNull;
2517 use std::sync::Arc;
2518 use thiserror::Error;
2519
2520 pub struct GuestMemoryMapping {
2525 mapping: SparseMapping,
2526 #[cfg(feature = "bitmap")]
2527 bitmap: Option<Vec<u8>>,
2528 }
2529
2530 unsafe impl crate::GuestMemoryAccess for GuestMemoryMapping {
2531 fn mapping(&self) -> Option<NonNull<u8>> {
2532 NonNull::new(self.mapping.as_ptr().cast())
2533 }
2534
2535 fn max_address(&self) -> u64 {
2536 self.mapping.len() as u64
2537 }
2538
2539 #[cfg(feature = "bitmap")]
2540 fn access_bitmap(&self) -> Option<crate::BitmapInfo> {
2541 self.bitmap.as_ref().map(|bm| crate::BitmapInfo {
2542 read_bitmap: NonNull::new(bm.as_ptr().cast_mut()).unwrap(),
2543 write_bitmap: NonNull::new(bm.as_ptr().cast_mut()).unwrap(),
2544 bit_offset: 0,
2545 })
2546 }
2547 }
2548
2549 const PAGE_SIZE: usize = 4096;
2550 const SIZE_1MB: usize = 1048576;
2551
2552 fn create_test_mapping() -> GuestMemoryMapping {
2559 let mapping = SparseMapping::new(SIZE_1MB * 4).unwrap();
2560 mapping.alloc(0, SIZE_1MB).unwrap();
2561 mapping.alloc(2 * SIZE_1MB, SIZE_1MB).unwrap();
2562 mapping
2563 .alloc(3 * SIZE_1MB + PAGE_SIZE, SIZE_1MB - PAGE_SIZE)
2564 .unwrap();
2565
2566 GuestMemoryMapping {
2567 mapping,
2568 #[cfg(feature = "bitmap")]
2569 bitmap: None,
2570 }
2571 }
2572
2573 #[test]
2574 fn test_basic_read_write() {
2575 let mapping = create_test_mapping();
2576 let gm = GuestMemory::new("test", mapping);
2577
2578 let addr = 0;
2580 let result = gm.read_plain::<u8>(addr);
2581 assert_eq!(result.unwrap(), 0);
2582
2583 let write_buffer = [1, 2, 3, 4, 5];
2585 let mut read_buffer = [0; 5];
2586 gm.write_at(0, &write_buffer).unwrap();
2587 gm.read_at(0, &mut read_buffer).unwrap();
2588 assert_eq!(write_buffer, read_buffer);
2589 assert_eq!(gm.read_plain::<u8>(0).unwrap(), 1);
2590 assert_eq!(gm.read_plain::<u8>(1).unwrap(), 2);
2591 assert_eq!(gm.read_plain::<u8>(2).unwrap(), 3);
2592 assert_eq!(gm.read_plain::<u8>(3).unwrap(), 4);
2593 assert_eq!(gm.read_plain::<u8>(4).unwrap(), 5);
2594
2595 let addr = 2 * SIZE_1MB as u64;
2597 let write_buffer: Vec<u8> = (0..PAGE_SIZE).map(|x| x as u8).collect();
2598 let mut read_buffer: Vec<u8> = (0..PAGE_SIZE).map(|_| 0).collect();
2599 gm.write_at(addr, write_buffer.as_slice()).unwrap();
2600 gm.read_at(addr, read_buffer.as_mut_slice()).unwrap();
2601 assert_eq!(write_buffer, read_buffer);
2602
2603 let write_buffer: Vec<u8> = (0..SIZE_1MB).map(|x| x as u8).collect();
2605 let mut read_buffer: Vec<u8> = (0..SIZE_1MB).map(|_| 0).collect();
2606 gm.write_at(addr, write_buffer.as_slice()).unwrap();
2607 gm.read_at(addr, read_buffer.as_mut_slice()).unwrap();
2608 assert_eq!(write_buffer, read_buffer);
2609
2610 let addr = SIZE_1MB as u64;
2612 let result = gm.read_plain::<u8>(addr);
2613 assert!(result.is_err());
2614 }
2615
2616 #[test]
2617 fn test_multi() {
2618 let len = SIZE_1MB * 4;
2619 let mapping = SparseMapping::new(len).unwrap();
2620 mapping.alloc(0, len).unwrap();
2621 let mapping = Arc::new(GuestMemoryMapping {
2622 mapping,
2623 #[cfg(feature = "bitmap")]
2624 bitmap: None,
2625 });
2626 let region_len = 1 << 30;
2627 let gm = GuestMemory::new_multi_region(
2628 "test",
2629 region_len,
2630 vec![Some(mapping.clone()), None, Some(mapping.clone())],
2631 )
2632 .unwrap();
2633
2634 let mut b = [0];
2635 let len = len as u64;
2636 gm.read_at(0, &mut b).unwrap();
2637 gm.read_at(len, &mut b).unwrap_err();
2638 gm.read_at(region_len, &mut b).unwrap_err();
2639 gm.read_at(2 * region_len, &mut b).unwrap();
2640 gm.read_at(2 * region_len + len, &mut b).unwrap_err();
2641 gm.read_at(3 * region_len, &mut b).unwrap_err();
2642 }
2643
2644 #[cfg(feature = "bitmap")]
2645 #[test]
2646 fn test_bitmap() {
2647 let len = PAGE_SIZE * 4;
2648 let mapping = SparseMapping::new(len).unwrap();
2649 mapping.alloc(0, len).unwrap();
2650 let bitmap = vec![0b0101];
2651 let mapping = Arc::new(GuestMemoryMapping {
2652 mapping,
2653 bitmap: Some(bitmap),
2654 });
2655 let gm = GuestMemory::new("test", mapping);
2656
2657 gm.read_plain::<[u8; 1]>(0).unwrap();
2658 gm.read_plain::<[u8; 1]>(PAGE_SIZE64 - 1).unwrap();
2659 gm.read_plain::<[u8; 2]>(PAGE_SIZE64 - 1).unwrap_err();
2660 gm.read_plain::<[u8; 1]>(PAGE_SIZE64).unwrap_err();
2661 gm.read_plain::<[u8; 1]>(PAGE_SIZE64 * 2).unwrap();
2662 gm.read_plain::<[u8; PAGE_SIZE * 2]>(0).unwrap_err();
2663 }
2664
2665 struct FaultingMapping {
2666 mapping: SparseMapping,
2667 }
2668
2669 #[derive(Debug, Error)]
2670 #[error("fault")]
2671 struct Fault;
2672
2673 unsafe impl crate::GuestMemoryAccess for FaultingMapping {
2674 fn mapping(&self) -> Option<NonNull<u8>> {
2675 NonNull::new(self.mapping.as_ptr().cast())
2676 }
2677
2678 fn max_address(&self) -> u64 {
2679 self.mapping.len() as u64
2680 }
2681
2682 fn page_fault(
2683 &self,
2684 address: u64,
2685 _len: usize,
2686 write: bool,
2687 bitmap_failure: bool,
2688 ) -> PageFaultAction {
2689 assert!(!bitmap_failure);
2690 let qlen = self.mapping.len() as u64 / 4;
2691 if address < qlen || address >= 3 * qlen {
2692 return PageFaultAction::Fail(PageFaultError::other(Fault));
2693 }
2694 let page_address = (address as usize) & !(PAGE_SIZE - 1);
2695 if address >= 2 * qlen {
2696 if write {
2697 return PageFaultAction::Fail(PageFaultError::other(Fault));
2698 }
2699 self.mapping.map_zero(page_address, PAGE_SIZE).unwrap();
2700 } else {
2701 self.mapping.alloc(page_address, PAGE_SIZE).unwrap();
2702 }
2703 PageFaultAction::Retry
2704 }
2705 }
2706
2707 impl FaultingMapping {
2708 fn new(len: usize) -> Self {
2709 let mapping = SparseMapping::new(len).unwrap();
2710 FaultingMapping { mapping }
2711 }
2712 }
2713
2714 #[test]
2715 fn test_fault() {
2716 let len = PAGE_SIZE * 4;
2717 let mapping = FaultingMapping::new(len);
2718 let gm = GuestMemory::new("test", mapping);
2719
2720 gm.write_plain::<u8>(0, &0).unwrap_err();
2721 gm.read_plain::<u8>(PAGE_SIZE64 - 1).unwrap_err();
2722 gm.read_plain::<u8>(PAGE_SIZE64).unwrap();
2723 gm.write_plain::<u8>(PAGE_SIZE64, &0).unwrap();
2724 gm.write_plain::<u16>(PAGE_SIZE64 * 3 - 1, &0).unwrap_err();
2725 gm.read_plain::<u16>(PAGE_SIZE64 * 3 - 1).unwrap_err();
2726 gm.read_plain::<u8>(PAGE_SIZE64 * 3 - 1).unwrap();
2727 gm.write_plain::<u8>(PAGE_SIZE64 * 3 - 1, &0).unwrap_err();
2728 }
2729
2730 #[test]
2731 fn test_allocated() {
2732 let mut gm = GuestMemory::allocate(0x10000);
2733 let pattern = [0x42; 0x10000];
2734 gm.write_at(0, &pattern).unwrap();
2735 assert_eq!(gm.inner_buf_mut().unwrap(), &pattern);
2736 gm.inner_buf().unwrap();
2737 let gm2 = gm.clone();
2738 assert!(gm.inner_buf_mut().is_none());
2739 gm.inner_buf().unwrap();
2740 let mut gm = gm.into_inner_buf().unwrap_err();
2741 drop(gm2);
2742 assert_eq!(gm.inner_buf_mut().unwrap(), &pattern);
2743 gm.into_inner_buf().unwrap();
2744 }
2745}