1#![expect(unsafe_code)]
8#![expect(missing_docs)]
9
10pub mod ranges;
11
12use self::ranges::PagedRange;
13use inspect::Inspect;
14use pal_event::Event;
15use sparse_mmap::AsMappableRef;
16use std::any::Any;
17use std::fmt::Debug;
18use std::io;
19use std::ops::Deref;
20use std::ops::DerefMut;
21use std::ops::Range;
22use std::ptr::NonNull;
23use std::sync::Arc;
24use std::sync::atomic::AtomicU8;
25use thiserror::Error;
26use zerocopy::FromBytes;
27use zerocopy::FromZeros;
28use zerocopy::Immutable;
29use zerocopy::IntoBytes;
30use zerocopy::KnownLayout;
31
32pub const PAGE_SIZE: usize = 4096;
34const PAGE_SIZE64: u64 = 4096;
35
36#[derive(Debug, Error)]
38#[error(transparent)]
39pub struct GuestMemoryError(Box<GuestMemoryErrorInner>);
40
41impl GuestMemoryError {
42 fn new(
43 debug_name: &Arc<str>,
44 range: Option<Range<u64>>,
45 op: GuestMemoryOperation,
46 err: GuestMemoryBackingError,
47 ) -> Self {
48 GuestMemoryError(Box::new(GuestMemoryErrorInner {
49 op,
50 debug_name: debug_name.clone(),
51 range,
52 gpa: (err.gpa != INVALID_ERROR_GPA).then_some(err.gpa),
53 kind: err.kind,
54 err: err.err,
55 }))
56 }
57
58 pub fn kind(&self) -> GuestMemoryErrorKind {
60 self.0.kind
61 }
62}
63
64#[derive(Debug, Copy, Clone)]
65enum GuestMemoryOperation {
66 Read,
67 Write,
68 Fill,
69 CompareExchange,
70 Lock,
71 Subrange,
72 Probe,
73}
74
75impl std::fmt::Display for GuestMemoryOperation {
76 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
77 f.pad(match self {
78 GuestMemoryOperation::Read => "read",
79 GuestMemoryOperation::Write => "write",
80 GuestMemoryOperation::Fill => "fill",
81 GuestMemoryOperation::CompareExchange => "compare exchange",
82 GuestMemoryOperation::Lock => "lock",
83 GuestMemoryOperation::Subrange => "subrange",
84 GuestMemoryOperation::Probe => "probe",
85 })
86 }
87}
88
89#[derive(Debug, Error)]
90struct GuestMemoryErrorInner {
91 op: GuestMemoryOperation,
92 debug_name: Arc<str>,
93 range: Option<Range<u64>>,
94 gpa: Option<u64>,
95 kind: GuestMemoryErrorKind,
96 #[source]
97 err: Box<dyn std::error::Error + Send + Sync>,
98}
99
100impl std::fmt::Display for GuestMemoryErrorInner {
101 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
102 write!(
103 f,
104 "guest memory '{debug_name}': {op} error: failed to access ",
105 debug_name = self.debug_name,
106 op = self.op
107 )?;
108 if let Some(range) = &self.range {
109 write!(f, "{:#x}-{:#x}", range.start, range.end)?;
110 } else {
111 f.write_str("memory")?;
112 }
113 if let Some(gpa) = self.gpa {
116 if self.range.as_ref().is_none_or(|range| range.start != gpa) {
117 write!(f, " at {:#x}", gpa)?;
118 }
119 }
120 Ok(())
121 }
122}
123
124#[derive(Debug)]
126pub struct GuestMemoryBackingError {
127 gpa: u64,
128 kind: GuestMemoryErrorKind,
129 err: Box<dyn std::error::Error + Send + Sync>,
130}
131
132#[derive(Debug, Copy, Clone, PartialEq, Eq)]
134#[non_exhaustive]
135pub enum GuestMemoryErrorKind {
136 Other,
138 OutOfRange,
140 VtlProtected,
142 NotPrivate,
144 NotShared,
146}
147
148pub struct PageFaultError {
150 kind: GuestMemoryErrorKind,
151 err: Box<dyn std::error::Error + Send + Sync>,
152}
153
154impl PageFaultError {
155 pub fn new(
157 kind: GuestMemoryErrorKind,
158 err: impl Into<Box<dyn std::error::Error + Send + Sync>>,
159 ) -> Self {
160 Self {
161 kind,
162 err: err.into(),
163 }
164 }
165
166 pub fn other(err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
168 Self::new(GuestMemoryErrorKind::Other, err)
169 }
170}
171
172const INVALID_ERROR_GPA: u64 = !0;
175
176impl GuestMemoryBackingError {
177 pub fn new(
179 kind: GuestMemoryErrorKind,
180 gpa: u64,
181 err: impl Into<Box<dyn std::error::Error + Send + Sync>>,
182 ) -> Self {
183 Self {
187 kind,
188 gpa,
189 err: err.into(),
190 }
191 }
192
193 pub fn other(gpa: u64, err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
195 Self::new(GuestMemoryErrorKind::Other, gpa, err)
196 }
197
198 fn gpn(err: InvalidGpn) -> Self {
199 Self {
200 kind: GuestMemoryErrorKind::OutOfRange,
201 gpa: INVALID_ERROR_GPA,
202 err: err.into(),
203 }
204 }
205}
206
207#[derive(Debug, Error)]
208#[error("no memory at address")]
209struct OutOfRange;
210
211#[derive(Debug, Error)]
212#[error("memory not lockable")]
213struct NotLockable;
214
215#[derive(Debug, Error)]
216#[error("no fallback for this operation")]
217struct NoFallback;
218
219#[derive(Debug, Error)]
220#[error("the specified page is not mapped")]
221struct NotMapped;
222
223#[derive(Debug, Error)]
224#[error("page inaccessible in bitmap")]
225struct BitmapFailure;
226
227pub trait LinearGuestMemory: GuestMemoryAccess {}
236
237unsafe impl GuestMemoryAccess for sparse_mmap::alloc::SharedMem {
239 fn mapping(&self) -> Option<NonNull<u8>> {
240 NonNull::new(self.as_ptr().cast_mut().cast())
241 }
242
243 fn max_address(&self) -> u64 {
244 self.len() as u64
245 }
246}
247
248impl LinearGuestMemory for sparse_mmap::alloc::SharedMem {}
249
250pub struct AlignedHeapMemory {
252 pages: Box<[AlignedPage]>,
253}
254
255impl Debug for AlignedHeapMemory {
256 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
257 f.debug_struct("AlignedHeapMemory")
258 .field("len", &self.len())
259 .finish()
260 }
261}
262
263#[repr(C, align(4096))]
264struct AlignedPage([AtomicU8; PAGE_SIZE]);
265
266impl AlignedHeapMemory {
267 pub fn new(size: usize) -> Self {
269 #[expect(clippy::declare_interior_mutable_const)] const ZERO: AtomicU8 = AtomicU8::new(0);
271 #[expect(clippy::declare_interior_mutable_const)]
272 const ZERO_PAGE: AlignedPage = AlignedPage([ZERO; PAGE_SIZE]);
273 let mut pages = Vec::new();
274 pages.resize_with(size.div_ceil(PAGE_SIZE), || ZERO_PAGE);
275 Self {
276 pages: pages.into(),
277 }
278 }
279
280 pub fn len(&self) -> usize {
282 self.pages.len() * PAGE_SIZE
283 }
284
285 pub fn as_bytes(&mut self) -> &[u8] {
290 self.as_mut()
291 }
292
293 pub fn as_mut_bytes(&mut self) -> &mut [u8] {
295 self.as_mut()
296 }
297}
298
299impl Deref for AlignedHeapMemory {
300 type Target = [AtomicU8];
301
302 fn deref(&self) -> &Self::Target {
303 unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast(), self.len()) }
305 }
306}
307
308impl DerefMut for AlignedHeapMemory {
309 fn deref_mut(&mut self) -> &mut Self::Target {
310 unsafe { std::slice::from_raw_parts_mut(self.pages.as_mut_ptr().cast(), self.len()) }
312 }
313}
314
315impl AsRef<[AtomicU8]> for AlignedHeapMemory {
316 fn as_ref(&self) -> &[AtomicU8] {
317 self
318 }
319}
320
321impl AsMut<[AtomicU8]> for AlignedHeapMemory {
322 fn as_mut(&mut self) -> &mut [AtomicU8] {
323 self
324 }
325}
326
327impl AsMut<[u8]> for AlignedHeapMemory {
328 fn as_mut(&mut self) -> &mut [u8] {
329 unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr().cast(), self.len()) }
333 }
334}
335
336unsafe impl GuestMemoryAccess for AlignedHeapMemory {
339 fn mapping(&self) -> Option<NonNull<u8>> {
340 NonNull::new(self.pages.as_ptr().cast_mut().cast())
341 }
342
343 fn max_address(&self) -> u64 {
344 (self.pages.len() * PAGE_SIZE) as u64
345 }
346}
347
348impl LinearGuestMemory for AlignedHeapMemory {}
349
350pub unsafe trait GuestMemoryAccess: 'static + Send + Sync {
366 fn mapping(&self) -> Option<NonNull<u8>>;
373
374 fn max_address(&self) -> u64;
377
378 #[cfg(feature = "bitmap")]
396 fn access_bitmap(&self) -> Option<BitmapInfo> {
397 None
398 }
399
400 fn subrange(
403 &self,
404 offset: u64,
405 len: u64,
406 allow_preemptive_locking: bool,
407 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
408 let _ = (offset, len, allow_preemptive_locking);
409 Ok(None)
410 }
411
412 fn page_fault(
422 &self,
423 address: u64,
424 len: usize,
425 write: bool,
426 bitmap_failure: bool,
427 ) -> PageFaultAction {
428 let _ = (address, len, write);
429 let err = if bitmap_failure {
430 PageFaultError::other(BitmapFailure)
431 } else {
432 PageFaultError::other(NotMapped)
433 };
434 PageFaultAction::Fail(err)
435 }
436
437 unsafe fn read_fallback(
450 &self,
451 addr: u64,
452 dest: *mut u8,
453 len: usize,
454 ) -> Result<(), GuestMemoryBackingError> {
455 let _ = (dest, len);
456 Err(GuestMemoryBackingError::other(addr, NoFallback))
457 }
458
459 unsafe fn write_fallback(
469 &self,
470 addr: u64,
471 src: *const u8,
472 len: usize,
473 ) -> Result<(), GuestMemoryBackingError> {
474 let _ = (src, len);
475 Err(GuestMemoryBackingError::other(addr, NoFallback))
476 }
477
478 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
483 let _ = (val, len);
484 Err(GuestMemoryBackingError::other(addr, NoFallback))
485 }
486
487 fn compare_exchange_fallback(
494 &self,
495 addr: u64,
496 current: &mut [u8],
497 new: &[u8],
498 ) -> Result<bool, GuestMemoryBackingError> {
499 let _ = (current, new);
500 Err(GuestMemoryBackingError::other(addr, NoFallback))
501 }
502
503 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
509 let _ = (address, len);
510 Ok(())
511 }
512
513 fn base_iova(&self) -> Option<u64> {
519 None
520 }
521}
522
523trait DynGuestMemoryAccess: 'static + Send + Sync + Any {
524 fn subrange(
525 &self,
526 offset: u64,
527 len: u64,
528 allow_preemptive_locking: bool,
529 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError>;
530
531 fn page_fault(
532 &self,
533 address: u64,
534 len: usize,
535 write: bool,
536 bitmap_failure: bool,
537 ) -> PageFaultAction;
538
539 unsafe fn read_fallback(
542 &self,
543 addr: u64,
544 dest: *mut u8,
545 len: usize,
546 ) -> Result<(), GuestMemoryBackingError>;
547
548 unsafe fn write_fallback(
551 &self,
552 addr: u64,
553 src: *const u8,
554 len: usize,
555 ) -> Result<(), GuestMemoryBackingError>;
556
557 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError>;
558
559 fn compare_exchange_fallback(
560 &self,
561 addr: u64,
562 current: &mut [u8],
563 new: &[u8],
564 ) -> Result<bool, GuestMemoryBackingError>;
565
566 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError>;
567}
568
569impl<T: GuestMemoryAccess> DynGuestMemoryAccess for T {
570 fn subrange(
571 &self,
572 offset: u64,
573 len: u64,
574 allow_preemptive_locking: bool,
575 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
576 self.subrange(offset, len, allow_preemptive_locking)
577 }
578
579 fn page_fault(
580 &self,
581 address: u64,
582 len: usize,
583 write: bool,
584 bitmap_failure: bool,
585 ) -> PageFaultAction {
586 self.page_fault(address, len, write, bitmap_failure)
587 }
588
589 unsafe fn read_fallback(
590 &self,
591 addr: u64,
592 dest: *mut u8,
593 len: usize,
594 ) -> Result<(), GuestMemoryBackingError> {
595 unsafe { self.read_fallback(addr, dest, len) }
597 }
598
599 unsafe fn write_fallback(
600 &self,
601 addr: u64,
602 src: *const u8,
603 len: usize,
604 ) -> Result<(), GuestMemoryBackingError> {
605 unsafe { self.write_fallback(addr, src, len) }
607 }
608
609 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
610 self.fill_fallback(addr, val, len)
611 }
612
613 fn compare_exchange_fallback(
614 &self,
615 addr: u64,
616 current: &mut [u8],
617 new: &[u8],
618 ) -> Result<bool, GuestMemoryBackingError> {
619 self.compare_exchange_fallback(addr, current, new)
620 }
621
622 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
623 self.expose_va(address, len)
624 }
625}
626
627pub enum PageFaultAction {
630 Fail(PageFaultError),
632 Retry,
634 Fallback,
636}
637
638#[cfg(feature = "bitmap")]
640pub struct BitmapInfo {
641 pub read_bitmap: NonNull<u8>,
643 pub write_bitmap: NonNull<u8>,
645 pub bit_offset: u8,
650}
651
652unsafe impl<T: GuestMemoryAccess> GuestMemoryAccess for Arc<T> {
654 fn mapping(&self) -> Option<NonNull<u8>> {
655 self.as_ref().mapping()
656 }
657
658 fn max_address(&self) -> u64 {
659 self.as_ref().max_address()
660 }
661
662 #[cfg(feature = "bitmap")]
663 fn access_bitmap(&self) -> Option<BitmapInfo> {
664 self.as_ref().access_bitmap()
665 }
666
667 fn subrange(
668 &self,
669 offset: u64,
670 len: u64,
671 allow_preemptive_locking: bool,
672 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
673 self.as_ref()
674 .subrange(offset, len, allow_preemptive_locking)
675 }
676
677 fn page_fault(
678 &self,
679 addr: u64,
680 len: usize,
681 write: bool,
682 bitmap_failure: bool,
683 ) -> PageFaultAction {
684 self.as_ref().page_fault(addr, len, write, bitmap_failure)
685 }
686
687 unsafe fn read_fallback(
688 &self,
689 addr: u64,
690 dest: *mut u8,
691 len: usize,
692 ) -> Result<(), GuestMemoryBackingError> {
693 unsafe { self.as_ref().read_fallback(addr, dest, len) }
695 }
696
697 unsafe fn write_fallback(
698 &self,
699 addr: u64,
700 src: *const u8,
701 len: usize,
702 ) -> Result<(), GuestMemoryBackingError> {
703 unsafe { self.as_ref().write_fallback(addr, src, len) }
705 }
706
707 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
708 self.as_ref().fill_fallback(addr, val, len)
709 }
710
711 fn compare_exchange_fallback(
712 &self,
713 addr: u64,
714 current: &mut [u8],
715 new: &[u8],
716 ) -> Result<bool, GuestMemoryBackingError> {
717 self.as_ref().compare_exchange_fallback(addr, current, new)
718 }
719
720 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
721 self.as_ref().expose_va(address, len)
722 }
723
724 fn base_iova(&self) -> Option<u64> {
725 self.as_ref().base_iova()
726 }
727}
728
729unsafe impl GuestMemoryAccess for sparse_mmap::SparseMapping {
731 fn mapping(&self) -> Option<NonNull<u8>> {
732 NonNull::new(self.as_ptr().cast())
733 }
734
735 fn max_address(&self) -> u64 {
736 self.len() as u64
737 }
738}
739
740struct GuestMemoryAccessRange {
742 base: Arc<GuestMemoryInner>,
743 offset: u64,
744 len: u64,
745 region: usize,
746}
747
748impl GuestMemoryAccessRange {
749 fn adjust_range(&self, address: u64, len: u64) -> Result<u64, GuestMemoryBackingError> {
750 if address <= self.len && len <= self.len - address {
751 Ok(self.offset + address)
752 } else {
753 Err(GuestMemoryBackingError::new(
754 GuestMemoryErrorKind::OutOfRange,
755 address,
756 OutOfRange,
757 ))
758 }
759 }
760}
761
762unsafe impl GuestMemoryAccess for GuestMemoryAccessRange {
764 fn mapping(&self) -> Option<NonNull<u8>> {
765 let region = &self.base.regions[self.region];
766 region.mapping.and_then(|mapping| {
767 let offset = self.offset & self.base.region_def.region_mask;
768 assert!(region.len >= offset + self.len);
770 NonNull::new(unsafe { mapping.0.as_ptr().add(offset as usize) })
773 })
774 }
775
776 fn max_address(&self) -> u64 {
777 self.len
778 }
779
780 #[cfg(feature = "bitmap")]
781 fn access_bitmap(&self) -> Option<BitmapInfo> {
782 let region = &self.base.regions[self.region];
783 region.bitmaps.map(|bitmaps| {
784 let offset = self.offset & self.base.region_def.region_mask;
785 let bit_offset = region.bitmap_start as u64 + offset / PAGE_SIZE64;
786 let [read_bitmap, write_bitmap] = bitmaps.map(|SendPtrU8(ptr)| {
787 NonNull::new(unsafe { ptr.as_ptr().add((bit_offset / 8) as usize) }).unwrap()
790 });
791 let bitmap_start = (bit_offset % 8) as u8;
792 BitmapInfo {
793 read_bitmap,
794 write_bitmap,
795 bit_offset: bitmap_start,
796 }
797 })
798 }
799
800 fn subrange(
801 &self,
802 offset: u64,
803 len: u64,
804 _allow_preemptive_locking: bool,
805 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
806 let address = self.adjust_range(offset, len)?;
807 Ok(Some(GuestMemory::new(
808 self.base.debug_name.clone(),
809 GuestMemoryAccessRange {
810 base: self.base.clone(),
811 offset: address,
812 len,
813 region: self.region,
814 },
815 )))
816 }
817
818 fn page_fault(
819 &self,
820 address: u64,
821 len: usize,
822 write: bool,
823 bitmap_failure: bool,
824 ) -> PageFaultAction {
825 let address = self
826 .adjust_range(address, len as u64)
827 .expect("the caller should have validated the range was in the mapping");
828
829 self.base
830 .imp
831 .page_fault(address, len, write, bitmap_failure)
832 }
833
834 unsafe fn write_fallback(
835 &self,
836 address: u64,
837 src: *const u8,
838 len: usize,
839 ) -> Result<(), GuestMemoryBackingError> {
840 let address = self.adjust_range(address, len as u64)?;
841 unsafe { self.base.imp.write_fallback(address, src, len) }
843 }
844
845 fn fill_fallback(
846 &self,
847 address: u64,
848 val: u8,
849 len: usize,
850 ) -> Result<(), GuestMemoryBackingError> {
851 let address = self.adjust_range(address, len as u64)?;
852 self.base.imp.fill_fallback(address, val, len)
853 }
854
855 fn compare_exchange_fallback(
856 &self,
857 addr: u64,
858 current: &mut [u8],
859 new: &[u8],
860 ) -> Result<bool, GuestMemoryBackingError> {
861 let address = self.adjust_range(addr, new.len() as u64)?;
862 self.base
863 .imp
864 .compare_exchange_fallback(address, current, new)
865 }
866
867 unsafe fn read_fallback(
868 &self,
869 address: u64,
870 dest: *mut u8,
871 len: usize,
872 ) -> Result<(), GuestMemoryBackingError> {
873 let address = self.adjust_range(address, len as u64)?;
874 unsafe { self.base.imp.read_fallback(address, dest, len) }
876 }
877
878 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
879 let address = self.adjust_range(address, len)?;
880 self.base.imp.expose_va(address, len)
881 }
882
883 fn base_iova(&self) -> Option<u64> {
884 let region = &self.base.regions[self.region];
885 Some(region.base_iova? + (self.offset & self.base.region_def.region_mask))
886 }
887}
888
889fn create_memory_subrange(
892 base: Arc<GuestMemoryInner>,
893 offset: u64,
894 len: u64,
895 _allow_preemptive_locking: bool,
896) -> Result<GuestMemory, GuestMemoryBackingError> {
897 let (_, _, region) = base.region(offset, len)?;
898 Ok(GuestMemory::new(
899 base.debug_name.clone(),
900 GuestMemoryAccessRange {
901 base,
902 offset,
903 len,
904 region,
905 },
906 ))
907}
908
909struct MultiRegionGuestMemoryAccess<T> {
910 imps: Vec<Option<T>>,
911 region_def: RegionDefinition,
912}
913
914impl<T> MultiRegionGuestMemoryAccess<T> {
915 fn region(&self, gpa: u64, len: u64) -> Result<(&T, u64), GuestMemoryBackingError> {
916 let (i, offset) = self.region_def.region(gpa, len)?;
917 let imp = self.imps[i].as_ref().ok_or(GuestMemoryBackingError::new(
918 GuestMemoryErrorKind::OutOfRange,
919 gpa,
920 OutOfRange,
921 ))?;
922 Ok((imp, offset))
923 }
924}
925
926impl<T: GuestMemoryAccess> DynGuestMemoryAccess for MultiRegionGuestMemoryAccess<T> {
928 fn subrange(
929 &self,
930 offset: u64,
931 len: u64,
932 allow_preemptive_locking: bool,
933 ) -> Result<Option<GuestMemory>, GuestMemoryBackingError> {
934 let (region, offset_in_region) = self.region(offset, len)?;
935 region.subrange(offset_in_region, len, allow_preemptive_locking)
936 }
937
938 unsafe fn read_fallback(
939 &self,
940 addr: u64,
941 dest: *mut u8,
942 len: usize,
943 ) -> Result<(), GuestMemoryBackingError> {
944 let (region, offset_in_region) = self.region(addr, len as u64)?;
945 unsafe { region.read_fallback(offset_in_region, dest, len) }
947 }
948
949 unsafe fn write_fallback(
950 &self,
951 addr: u64,
952 src: *const u8,
953 len: usize,
954 ) -> Result<(), GuestMemoryBackingError> {
955 let (region, offset_in_region) = self.region(addr, len as u64)?;
956 unsafe { region.write_fallback(offset_in_region, src, len) }
958 }
959
960 fn fill_fallback(&self, addr: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
961 let (region, offset_in_region) = self.region(addr, len as u64)?;
962 region.fill_fallback(offset_in_region, val, len)
963 }
964
965 fn compare_exchange_fallback(
966 &self,
967 addr: u64,
968 current: &mut [u8],
969 new: &[u8],
970 ) -> Result<bool, GuestMemoryBackingError> {
971 let (region, offset_in_region) = self.region(addr, new.len() as u64)?;
972 region.compare_exchange_fallback(offset_in_region, current, new)
973 }
974
975 fn expose_va(&self, address: u64, len: u64) -> Result<(), GuestMemoryBackingError> {
976 let (region, offset_in_region) = self.region(address, len)?;
977 region.expose_va(offset_in_region, len)
978 }
979
980 fn page_fault(
981 &self,
982 address: u64,
983 len: usize,
984 write: bool,
985 bitmap_failure: bool,
986 ) -> PageFaultAction {
987 match self.region(address, len as u64) {
988 Ok((region, offset_in_region)) => {
989 region.page_fault(offset_in_region, len, write, bitmap_failure)
990 }
991 Err(err) => PageFaultAction::Fail(PageFaultError {
992 kind: err.kind,
993 err: err.err,
994 }),
995 }
996 }
997}
998
999#[derive(Debug, Clone, Inspect)]
1005#[inspect(skip)]
1006pub struct GuestMemory {
1007 inner: Arc<GuestMemoryInner>,
1008}
1009
1010struct GuestMemoryInner<T: ?Sized = dyn DynGuestMemoryAccess> {
1011 region_def: RegionDefinition,
1012 regions: Vec<MemoryRegion>,
1013 debug_name: Arc<str>,
1014 allocated: bool,
1015 imp: T,
1016}
1017
1018impl<T: ?Sized> Debug for GuestMemoryInner<T> {
1019 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1020 f.debug_struct("GuestMemoryInner")
1021 .field("region_def", &self.region_def)
1022 .field("regions", &self.regions)
1023 .finish()
1024 }
1025}
1026
1027#[derive(Debug, Copy, Clone, Default)]
1028struct MemoryRegion {
1029 mapping: Option<SendPtrU8>,
1030 #[cfg(feature = "bitmap")]
1031 bitmaps: Option<[SendPtrU8; 2]>,
1032 #[cfg(feature = "bitmap")]
1033 bitmap_start: u8,
1034 len: u64,
1035 base_iova: Option<u64>,
1036}
1037
1038#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1040enum AccessType {
1041 Read = 0,
1042 Write = 1,
1043}
1044
1045#[derive(Debug, Copy, Clone)]
1057struct SendPtrU8(NonNull<u8>);
1058
1059unsafe impl Send for SendPtrU8 {}
1061unsafe impl Sync for SendPtrU8 {}
1063
1064impl MemoryRegion {
1065 fn new(imp: &impl GuestMemoryAccess) -> Self {
1066 #[cfg(feature = "bitmap")]
1067 let (bitmaps, bitmap_start) = {
1068 let bitmap_info = imp.access_bitmap();
1069 let bitmaps = bitmap_info
1070 .as_ref()
1071 .map(|bm| [SendPtrU8(bm.read_bitmap), SendPtrU8(bm.write_bitmap)]);
1072 let bitmap_start = bitmap_info.map_or(0, |bi| bi.bit_offset);
1073 (bitmaps, bitmap_start)
1074 };
1075 Self {
1076 mapping: imp.mapping().map(SendPtrU8),
1077 #[cfg(feature = "bitmap")]
1078 bitmaps,
1079 #[cfg(feature = "bitmap")]
1080 bitmap_start,
1081 len: imp.max_address(),
1082 base_iova: imp.base_iova(),
1083 }
1084 }
1085
1086 unsafe fn check_access(
1092 &self,
1093 access_type: AccessType,
1094 offset: u64,
1095 len: u64,
1096 ) -> Result<(), u64> {
1097 debug_assert!(self.len >= offset + len);
1098 #[cfg(not(feature = "bitmap"))]
1099 let _ = access_type;
1100
1101 #[cfg(feature = "bitmap")]
1102 if let Some(bitmaps) = &self.bitmaps {
1103 let SendPtrU8(bitmap) = bitmaps[access_type as usize];
1104 let start = offset / PAGE_SIZE64;
1105 let end = (offset + len - 1) / PAGE_SIZE64;
1106 for gpn in start..=end {
1109 let bit_offset = self.bitmap_start as u64 + gpn;
1110 let bit = unsafe {
1113 (*bitmap
1114 .as_ptr()
1115 .cast_const()
1116 .cast::<AtomicU8>()
1117 .add(bit_offset as usize / 8))
1118 .load(std::sync::atomic::Ordering::Relaxed)
1119 & (1 << (bit_offset % 8))
1120 };
1121 if bit == 0 {
1122 return Err((gpn * PAGE_SIZE64).saturating_sub(offset));
1123 }
1124 }
1125 }
1126 Ok(())
1127 }
1128}
1129
1130impl Default for GuestMemory {
1132 fn default() -> Self {
1133 Self::empty()
1134 }
1135}
1136
1137struct Empty;
1138
1139unsafe impl GuestMemoryAccess for Empty {
1141 fn mapping(&self) -> Option<NonNull<u8>> {
1142 None
1143 }
1144
1145 fn max_address(&self) -> u64 {
1146 0
1147 }
1148}
1149
1150#[derive(Debug, Error)]
1151pub enum MultiRegionError {
1152 #[error("region size {0:#x} is not a power of 2")]
1153 NotPowerOfTwo(u64),
1154 #[error("region size {0:#x} is smaller than a page")]
1155 RegionSizeTooSmall(u64),
1156 #[error(
1157 "too many regions ({region_count}) for region size {region_size:#x}; max is {max_region_count}"
1158 )]
1159 TooManyRegions {
1160 region_count: usize,
1161 max_region_count: usize,
1162 region_size: u64,
1163 },
1164 #[error("backing size {backing_size:#x} is too large for region size {region_size:#x}")]
1165 BackingTooLarge { backing_size: u64, region_size: u64 },
1166}
1167
1168#[cfg(feature = "bitmap")]
1176pub fn rcu() -> minircu::RcuDomain {
1177 minircu::global()
1179}
1180
1181impl GuestMemory {
1182 pub fn new(debug_name: impl Into<Arc<str>>, imp: impl GuestMemoryAccess) -> Self {
1187 if imp.mapping().is_some() && !cfg!(miri) {
1192 sparse_mmap::initialize_try_copy();
1193 }
1194 Self::new_inner(debug_name.into(), imp, false)
1195 }
1196
1197 fn new_inner(debug_name: Arc<str>, imp: impl GuestMemoryAccess, allocated: bool) -> Self {
1198 let regions = vec![MemoryRegion::new(&imp)];
1199 Self {
1200 inner: Arc::new(GuestMemoryInner {
1201 imp,
1202 debug_name,
1203 region_def: RegionDefinition {
1204 invalid_mask: 1 << 63,
1205 region_mask: !0 >> 1,
1206 region_bits: 63, },
1208 regions,
1209 allocated,
1210 }),
1211 }
1212 }
1213
1214 pub fn new_multi_region(
1230 debug_name: impl Into<Arc<str>>,
1231 region_size: u64,
1232 mut imps: Vec<Option<impl GuestMemoryAccess>>,
1233 ) -> Result<Self, MultiRegionError> {
1234 sparse_mmap::initialize_try_copy();
1236
1237 if !region_size.is_power_of_two() {
1238 return Err(MultiRegionError::NotPowerOfTwo(region_size));
1239 }
1240 if region_size < PAGE_SIZE64 {
1241 return Err(MultiRegionError::RegionSizeTooSmall(region_size));
1242 }
1243 let region_bits = region_size.trailing_zeros();
1244
1245 let max_region_count = 1 << (63 - region_bits);
1246
1247 let region_count = imps.len().next_power_of_two();
1248 if region_count > max_region_count {
1249 return Err(MultiRegionError::TooManyRegions {
1250 region_count,
1251 max_region_count,
1252 region_size,
1253 });
1254 }
1255
1256 let valid_bits = region_bits + region_count.trailing_zeros();
1257 assert!(valid_bits < 64);
1258 let invalid_mask = !0 << valid_bits;
1259
1260 let mut regions = vec![MemoryRegion::default(); region_count];
1261 for (imp, region) in imps.iter().zip(&mut regions) {
1262 let Some(imp) = imp else { continue };
1263 let backing_size = imp.max_address();
1264 if backing_size > region_size {
1265 return Err(MultiRegionError::BackingTooLarge {
1266 backing_size,
1267 region_size,
1268 });
1269 }
1270 *region = MemoryRegion::new(imp);
1271 }
1272
1273 let region_def = RegionDefinition {
1274 invalid_mask,
1275 region_mask: region_size - 1,
1276 region_bits,
1277 };
1278
1279 imps.resize_with(region_count, || None);
1280 let imp = MultiRegionGuestMemoryAccess { imps, region_def };
1281
1282 let inner = GuestMemoryInner {
1283 debug_name: debug_name.into(),
1284 region_def,
1285 regions,
1286 imp,
1287 allocated: false,
1288 };
1289
1290 Ok(Self {
1291 inner: Arc::new(inner),
1292 })
1293 }
1294
1295 pub fn allocate(size: usize) -> Self {
1305 Self::new_inner("heap".into(), AlignedHeapMemory::new(size), true)
1306 }
1307
1308 pub fn into_inner_buf(self) -> Result<AlignedHeapMemory, Self> {
1314 if !self.inner.allocated {
1315 return Err(self);
1316 }
1317 let inner = unsafe {
1320 Arc::<GuestMemoryInner<AlignedHeapMemory>>::from_raw(Arc::into_raw(self.inner).cast())
1321 };
1322 let inner = Arc::try_unwrap(inner).map_err(|inner| Self { inner })?;
1323 Ok(inner.imp)
1324 }
1325
1326 pub fn inner_buf(&self) -> Option<&[AtomicU8]> {
1329 if !self.inner.allocated {
1330 return None;
1331 }
1332 let inner = unsafe { &*core::ptr::from_ref(&self.inner.imp).cast::<AlignedHeapMemory>() };
1335 Some(inner)
1336 }
1337
1338 pub fn inner_buf_mut(&mut self) -> Option<&mut [u8]> {
1342 if !self.inner.allocated {
1343 return None;
1344 }
1345 let inner = Arc::get_mut(&mut self.inner)?;
1346 let imp = unsafe { &mut *core::ptr::from_mut(&mut inner.imp).cast::<AlignedHeapMemory>() };
1349 Some(imp.as_mut())
1350 }
1351
1352 pub fn empty() -> Self {
1354 GuestMemory::new("empty", Empty)
1355 }
1356
1357 fn wrap_err(
1358 &self,
1359 gpa_len: Option<(u64, u64)>,
1360 op: GuestMemoryOperation,
1361 err: GuestMemoryBackingError,
1362 ) -> GuestMemoryError {
1363 let range = gpa_len.map(|(gpa, len)| (gpa..gpa.wrapping_add(len)));
1364 GuestMemoryError::new(&self.inner.debug_name, range, op, err)
1365 }
1366
1367 fn with_op<T>(
1368 &self,
1369 gpa_len: Option<(u64, u64)>,
1370 op: GuestMemoryOperation,
1371 f: impl FnOnce() -> Result<T, GuestMemoryBackingError>,
1372 ) -> Result<T, GuestMemoryError> {
1373 f().map_err(|err| self.wrap_err(gpa_len, op, err))
1374 }
1375
1376 pub fn subrange(
1381 &self,
1382 offset: u64,
1383 len: u64,
1384 allow_preemptive_locking: bool,
1385 ) -> Result<GuestMemory, GuestMemoryError> {
1386 self.with_op(Some((offset, len)), GuestMemoryOperation::Subrange, || {
1387 if let Some(guest_memory) =
1388 self.inner
1389 .imp
1390 .subrange(offset, len, allow_preemptive_locking)?
1391 {
1392 Ok(guest_memory)
1393 } else {
1394 create_memory_subrange(self.inner.clone(), offset, len, allow_preemptive_locking)
1395 }
1396 })
1397 }
1398
1399 pub fn lockable_subrange(
1401 &self,
1402 offset: u64,
1403 len: u64,
1404 ) -> Result<GuestMemory, GuestMemoryError> {
1405 self.subrange(offset, len, true)
1407 }
1408
1409 pub fn full_mapping(&self) -> Option<(*mut u8, usize)> {
1414 if let [region] = self.inner.regions.as_slice() {
1415 #[cfg(feature = "bitmap")]
1416 if region.bitmaps.is_some() {
1417 return None;
1418 }
1419 region
1420 .mapping
1421 .map(|SendPtrU8(ptr)| (ptr.as_ptr(), region.len as usize))
1422 } else {
1423 None
1424 }
1425 }
1426
1427 pub fn iova(&self, gpa: u64) -> Option<u64> {
1430 let (region, offset, _) = self.inner.region(gpa, 1).ok()?;
1431 Some(region.base_iova? + offset)
1432 }
1433
1434 fn mapping_range(
1439 &self,
1440 access_type: AccessType,
1441 gpa: u64,
1442 len: usize,
1443 ) -> Result<Option<*mut u8>, GuestMemoryBackingError> {
1444 let (region, offset, _) = self.inner.region(gpa, len as u64)?;
1445 if let Some(SendPtrU8(ptr)) = region.mapping {
1446 loop {
1447 let fault_offset = unsafe {
1449 match region.check_access(access_type, offset, len as u64) {
1450 Ok(()) => return Ok(Some(ptr.as_ptr().add(offset as usize))),
1451 Err(n) => n,
1452 }
1453 };
1454
1455 match self.inner.imp.page_fault(
1457 gpa + fault_offset,
1458 len - fault_offset as usize,
1459 access_type == AccessType::Write,
1460 true,
1461 ) {
1462 PageFaultAction::Fail(err) => {
1463 return Err(GuestMemoryBackingError::new(
1464 err.kind,
1465 gpa + fault_offset,
1466 err.err,
1467 ));
1468 }
1469 PageFaultAction::Retry => {}
1470 PageFaultAction::Fallback => break,
1471 }
1472 }
1473 }
1474 Ok(None)
1475 }
1476
1477 fn run_on_mapping<T, P>(
1484 &self,
1485 access_type: AccessType,
1486 gpa: u64,
1487 len: usize,
1488 mut param: P,
1489 mut f: impl FnMut(&mut P, *mut u8) -> Result<T, sparse_mmap::MemoryError>,
1490 fallback: impl FnOnce(&mut P) -> Result<T, GuestMemoryBackingError>,
1491 ) -> Result<T, GuestMemoryBackingError> {
1492 let op = || {
1493 let Some(mapping) = self.mapping_range(access_type, gpa, len)? else {
1494 return fallback(&mut param);
1495 };
1496
1497 loop {
1499 match f(&mut param, mapping) {
1500 Ok(t) => return Ok(t),
1501 Err(fault) => {
1502 match self.inner.imp.page_fault(
1503 gpa + fault.offset() as u64,
1504 len - fault.offset(),
1505 access_type == AccessType::Write,
1506 false,
1507 ) {
1508 PageFaultAction::Fail(err) => {
1509 return Err(GuestMemoryBackingError::new(
1510 err.kind,
1511 gpa + fault.offset() as u64,
1512 err.err,
1513 ));
1514 }
1515 PageFaultAction::Retry => {}
1516 PageFaultAction::Fallback => return fallback(&mut param),
1517 }
1518 }
1519 }
1520 }
1521 };
1522 #[cfg(feature = "bitmap")]
1526 return rcu().run(op);
1527 #[cfg(not(feature = "bitmap"))]
1528 op()
1529 }
1530
1531 unsafe fn write_ptr(
1535 &self,
1536 gpa: u64,
1537 src: *const u8,
1538 len: usize,
1539 ) -> Result<(), GuestMemoryBackingError> {
1540 if len == 0 {
1541 return Ok(());
1542 }
1543 self.run_on_mapping(
1544 AccessType::Write,
1545 gpa,
1546 len,
1547 (),
1548 |(), dest| {
1549 unsafe { sparse_mmap::try_copy(src, dest, len) }
1553 },
1554 |()| {
1555 unsafe { self.inner.imp.write_fallback(gpa, src, len) }
1558 },
1559 )
1560 }
1561
1562 pub fn write_at(&self, gpa: u64, src: &[u8]) -> Result<(), GuestMemoryError> {
1564 self.with_op(
1565 Some((gpa, src.len() as u64)),
1566 GuestMemoryOperation::Write,
1567 || self.write_at_inner(gpa, src),
1568 )
1569 }
1570
1571 fn write_at_inner(&self, gpa: u64, src: &[u8]) -> Result<(), GuestMemoryBackingError> {
1572 unsafe { self.write_ptr(gpa, src.as_ptr(), src.len()) }
1574 }
1575
1576 pub fn write_from_atomic(&self, gpa: u64, src: &[AtomicU8]) -> Result<(), GuestMemoryError> {
1578 self.with_op(
1579 Some((gpa, src.len() as u64)),
1580 GuestMemoryOperation::Write,
1581 || {
1582 unsafe { self.write_ptr(gpa, src.as_ptr().cast(), src.len()) }
1584 },
1585 )
1586 }
1587
1588 pub fn fill_at(&self, gpa: u64, val: u8, len: usize) -> Result<(), GuestMemoryError> {
1590 self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Fill, || {
1591 self.fill_at_inner(gpa, val, len)
1592 })
1593 }
1594
1595 fn fill_at_inner(&self, gpa: u64, val: u8, len: usize) -> Result<(), GuestMemoryBackingError> {
1596 if len == 0 {
1597 return Ok(());
1598 }
1599 self.run_on_mapping(
1600 AccessType::Write,
1601 gpa,
1602 len,
1603 (),
1604 |(), dest| {
1605 unsafe { sparse_mmap::try_write_bytes(dest, val, len) }
1607 },
1608 |()| self.inner.imp.fill_fallback(gpa, val, len),
1609 )
1610 }
1611
1612 unsafe fn read_ptr(
1617 &self,
1618 gpa: u64,
1619 dest: *mut u8,
1620 len: usize,
1621 ) -> Result<(), GuestMemoryBackingError> {
1622 if len == 0 {
1623 return Ok(());
1624 }
1625 self.run_on_mapping(
1626 AccessType::Read,
1627 gpa,
1628 len,
1629 (),
1630 |(), src| {
1631 unsafe { sparse_mmap::try_copy(src, dest, len) }
1635 },
1636 |()| {
1637 unsafe { self.inner.imp.read_fallback(gpa, dest, len) }
1640 },
1641 )
1642 }
1643
1644 fn read_at_inner(&self, gpa: u64, dest: &mut [u8]) -> Result<(), GuestMemoryBackingError> {
1645 unsafe { self.read_ptr(gpa, dest.as_mut_ptr(), dest.len()) }
1647 }
1648
1649 pub fn read_at(&self, gpa: u64, dest: &mut [u8]) -> Result<(), GuestMemoryError> {
1651 self.with_op(
1652 Some((gpa, dest.len() as u64)),
1653 GuestMemoryOperation::Read,
1654 || self.read_at_inner(gpa, dest),
1655 )
1656 }
1657
1658 pub fn read_to_atomic(&self, gpa: u64, dest: &[AtomicU8]) -> Result<(), GuestMemoryError> {
1660 self.with_op(
1661 Some((gpa, dest.len() as u64)),
1662 GuestMemoryOperation::Read,
1663 || unsafe { self.read_ptr(gpa, dest.as_ptr() as *mut u8, dest.len()) },
1665 )
1666 }
1667
1668 pub fn write_plain<T: IntoBytes + Immutable + KnownLayout>(
1680 &self,
1681 gpa: u64,
1682 b: &T,
1683 ) -> Result<(), GuestMemoryError> {
1684 let len = size_of::<T>();
1686 self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Write, || {
1687 self.run_on_mapping(
1688 AccessType::Write,
1689 gpa,
1690 len,
1691 (),
1692 |(), dest| {
1693 match len {
1694 1 | 2 | 4 | 8 => {
1695 unsafe { sparse_mmap::try_write_volatile(dest.cast(), b) }
1698 }
1699 _ => {
1700 unsafe { sparse_mmap::try_copy(b.as_bytes().as_ptr(), dest, len) }
1703 }
1704 }
1705 },
1706 |()| {
1707 unsafe {
1709 self.inner
1710 .imp
1711 .write_fallback(gpa, b.as_bytes().as_ptr(), len)
1712 }
1713 },
1714 )
1715 })
1716 }
1717
1718 pub fn compare_exchange<T: IntoBytes + FromBytes + Immutable + KnownLayout + Copy>(
1720 &self,
1721 gpa: u64,
1722 current: T,
1723 new: T,
1724 ) -> Result<Result<T, T>, GuestMemoryError> {
1725 let len = size_of_val(&new);
1726 self.with_op(
1727 Some((gpa, len as u64)),
1728 GuestMemoryOperation::CompareExchange,
1729 || {
1730 self.run_on_mapping(
1732 AccessType::Write,
1733 gpa,
1734 len,
1735 (),
1736 |(), dest| {
1737 unsafe { sparse_mmap::try_compare_exchange(dest.cast(), current, new) }
1740 },
1741 |()| {
1742 let mut current = current;
1743 let success = self.inner.imp.compare_exchange_fallback(
1744 gpa,
1745 current.as_mut_bytes(),
1746 new.as_bytes(),
1747 )?;
1748
1749 Ok(if success { Ok(new) } else { Err(current) })
1750 },
1751 )
1752 },
1753 )
1754 }
1755
1756 pub fn compare_exchange_bytes<T: IntoBytes + FromBytes + Immutable + KnownLayout + ?Sized>(
1758 &self,
1759 gpa: u64,
1760 current: &mut T,
1761 new: &T,
1762 ) -> Result<bool, GuestMemoryError> {
1763 let len = size_of_val(new);
1764 assert_eq!(size_of_val(current), len);
1765 self.with_op(
1766 Some((gpa, len as u64)),
1767 GuestMemoryOperation::CompareExchange,
1768 || {
1769 self.run_on_mapping(
1771 AccessType::Write,
1772 gpa,
1773 len,
1774 current,
1775 |current, dest| {
1776 unsafe { sparse_mmap::try_compare_exchange_ref(dest, *current, new) }
1779 },
1780 |current| {
1781 let success = self.inner.imp.compare_exchange_fallback(
1782 gpa,
1783 current.as_mut_bytes(),
1784 new.as_bytes(),
1785 )?;
1786
1787 Ok(success)
1788 },
1789 )
1790 },
1791 )
1792 }
1793
1794 pub fn read_plain<T: FromBytes + Immutable + KnownLayout>(
1806 &self,
1807 gpa: u64,
1808 ) -> Result<T, GuestMemoryError> {
1809 let len = size_of::<T>();
1811 self.with_op(Some((gpa, len as u64)), GuestMemoryOperation::Read, || {
1812 self.run_on_mapping(
1813 AccessType::Read,
1814 gpa,
1815 len,
1816 (),
1817 |(), src| {
1818 match len {
1819 1 | 2 | 4 | 8 => {
1820 unsafe { sparse_mmap::try_read_volatile(src.cast::<T>()) }
1823 }
1824 _ => {
1825 let mut obj = std::mem::MaybeUninit::<T>::zeroed();
1826 unsafe { sparse_mmap::try_copy(src, obj.as_mut_ptr().cast(), len)? };
1829 Ok(unsafe { obj.assume_init() })
1831 }
1832 }
1833 },
1834 |()| {
1835 let mut obj = std::mem::MaybeUninit::<T>::zeroed();
1836 unsafe {
1839 self.inner
1840 .imp
1841 .read_fallback(gpa, obj.as_mut_ptr().cast(), len)?;
1842 }
1843 Ok(unsafe { obj.assume_init() })
1845 },
1846 )
1847 })
1848 }
1849
1850 fn probe_page_for_lock(
1851 &self,
1852 with_kernel_access: bool,
1853 gpa: u64,
1854 ) -> Result<*const AtomicU8, GuestMemoryBackingError> {
1855 let (region, offset, _) = self.inner.region(gpa, 1)?;
1856 let Some(SendPtrU8(ptr)) = region.mapping else {
1857 return Err(GuestMemoryBackingError::other(gpa, NotLockable));
1858 };
1859 if with_kernel_access {
1861 self.inner.imp.expose_va(gpa, 1)?;
1862 }
1863 let mut b = [0];
1864 self.read_at_inner(gpa, &mut b)?;
1867 let page = unsafe { ptr.as_ptr().add(offset as usize) };
1870 Ok(page.cast())
1871 }
1872
1873 pub fn lock_gpns(
1874 &self,
1875 with_kernel_access: bool,
1876 gpns: &[u64],
1877 ) -> Result<LockedPages, GuestMemoryError> {
1878 self.with_op(None, GuestMemoryOperation::Lock, || {
1879 let mut pages = Vec::with_capacity(gpns.len());
1880 for &gpn in gpns {
1881 let gpa = gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?;
1882 let page = self.probe_page_for_lock(with_kernel_access, gpa)?;
1883 pages.push(PagePtr(page));
1884 }
1885 Ok(LockedPages {
1886 pages: pages.into_boxed_slice(),
1887 _mem: self.inner.clone(),
1888 })
1889 })
1890 }
1891
1892 pub fn probe_gpns(&self, gpns: &[u64]) -> Result<(), GuestMemoryError> {
1893 self.with_op(None, GuestMemoryOperation::Probe, || {
1894 for &gpn in gpns {
1895 let mut b = [0];
1896 self.read_at_inner(
1897 gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?,
1898 &mut b,
1899 )?;
1900 }
1901 Ok(())
1902 })
1903 }
1904
1905 pub fn probe_gpa_readable(&self, gpa: u64) -> Result<(), GuestMemoryErrorKind> {
1907 let mut b = [0];
1908 self.read_at_inner(gpa, &mut b).map_err(|err| err.kind)
1909 }
1910
1911 fn dangerous_access_pre_locked_memory(&self, gpa: u64, len: usize) -> &[AtomicU8] {
1923 let addr = self
1924 .mapping_range(AccessType::Write, gpa, len)
1925 .unwrap()
1926 .unwrap();
1927 unsafe { std::slice::from_raw_parts(addr.cast(), len) }
1932 }
1933
1934 fn op_range<F: FnMut(u64, Range<usize>) -> Result<(), GuestMemoryBackingError>>(
1935 &self,
1936 op: GuestMemoryOperation,
1937 range: &PagedRange<'_>,
1938 mut f: F,
1939 ) -> Result<(), GuestMemoryError> {
1940 self.with_op(None, op, || {
1941 let gpns = range.gpns();
1942 let offset = range.offset();
1943
1944 let mut byte_index = 0;
1948 let mut len = range.len();
1949 let mut page = 0;
1950 if offset % PAGE_SIZE != 0 {
1951 let head_len = std::cmp::min(len, PAGE_SIZE - (offset % PAGE_SIZE));
1952 let addr = gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?
1953 + offset as u64 % PAGE_SIZE64;
1954 f(addr, byte_index..byte_index + head_len)?;
1955 byte_index += head_len;
1956 len -= head_len;
1957 page += 1;
1958 }
1959 while len >= PAGE_SIZE {
1960 f(
1961 gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?,
1962 byte_index..byte_index + PAGE_SIZE,
1963 )?;
1964 byte_index += PAGE_SIZE;
1965 len -= PAGE_SIZE;
1966 page += 1;
1967 }
1968 if len > 0 {
1969 f(
1970 gpn_to_gpa(gpns[page]).map_err(GuestMemoryBackingError::gpn)?,
1971 byte_index..byte_index + len,
1972 )?;
1973 }
1974
1975 Ok(())
1976 })
1977 }
1978
1979 pub fn write_range(&self, range: &PagedRange<'_>, data: &[u8]) -> Result<(), GuestMemoryError> {
1980 assert!(data.len() == range.len());
1981 self.op_range(GuestMemoryOperation::Write, range, move |addr, r| {
1982 self.write_at_inner(addr, &data[r])
1983 })
1984 }
1985
1986 pub fn fill_range(&self, range: &PagedRange<'_>, val: u8) -> Result<(), GuestMemoryError> {
1987 self.op_range(GuestMemoryOperation::Fill, range, move |addr, r| {
1988 self.fill_at_inner(addr, val, r.len())
1989 })
1990 }
1991
1992 pub fn zero_range(&self, range: &PagedRange<'_>) -> Result<(), GuestMemoryError> {
1993 self.op_range(GuestMemoryOperation::Fill, range, move |addr, r| {
1994 self.fill_at_inner(addr, 0, r.len())
1995 })
1996 }
1997
1998 pub fn read_range(
1999 &self,
2000 range: &PagedRange<'_>,
2001 data: &mut [u8],
2002 ) -> Result<(), GuestMemoryError> {
2003 assert!(data.len() == range.len());
2004 self.op_range(GuestMemoryOperation::Read, range, move |addr, r| {
2005 self.read_at_inner(addr, &mut data[r])
2006 })
2007 }
2008
2009 pub fn write_range_from_atomic(
2010 &self,
2011 range: &PagedRange<'_>,
2012 data: &[AtomicU8],
2013 ) -> Result<(), GuestMemoryError> {
2014 assert!(data.len() == range.len());
2015 self.op_range(GuestMemoryOperation::Write, range, move |addr, r| {
2016 let src = &data[r];
2017 unsafe { self.write_ptr(addr, src.as_ptr().cast(), src.len()) }
2019 })
2020 }
2021
2022 pub fn read_range_to_atomic(
2023 &self,
2024 range: &PagedRange<'_>,
2025 data: &[AtomicU8],
2026 ) -> Result<(), GuestMemoryError> {
2027 assert!(data.len() == range.len());
2028 self.op_range(GuestMemoryOperation::Read, range, move |addr, r| {
2029 let dest = &data[r];
2030 unsafe { self.read_ptr(addr, dest.as_ptr().cast_mut().cast(), dest.len()) }
2032 })
2033 }
2034
2035 pub fn lock_range<T: LockedRange>(
2042 &self,
2043 paged_range: PagedRange<'_>,
2044 mut locked_range: T,
2045 ) -> Result<LockedRangeImpl<T>, GuestMemoryError> {
2046 self.with_op(None, GuestMemoryOperation::Lock, || {
2047 let gpns = paged_range.gpns();
2048 for &gpn in gpns {
2049 let gpa = gpn_to_gpa(gpn).map_err(GuestMemoryBackingError::gpn)?;
2050 self.probe_page_for_lock(true, gpa)?;
2051 }
2052 for range in paged_range.ranges() {
2053 let range = range.map_err(GuestMemoryBackingError::gpn)?;
2054 locked_range.push_sub_range(
2055 self.dangerous_access_pre_locked_memory(range.start, range.len() as usize),
2056 );
2057 }
2058 Ok(LockedRangeImpl {
2059 _mem: self.inner.clone(),
2060 inner: locked_range,
2061 })
2062 })
2063 }
2064}
2065
2066#[derive(Debug, Error)]
2067#[error("invalid guest page number {0:#x}")]
2068pub struct InvalidGpn(u64);
2069
2070fn gpn_to_gpa(gpn: u64) -> Result<u64, InvalidGpn> {
2071 gpn.checked_mul(PAGE_SIZE64).ok_or(InvalidGpn(gpn))
2072}
2073
2074#[derive(Debug, Copy, Clone, Default)]
2075struct RegionDefinition {
2076 invalid_mask: u64,
2077 region_mask: u64,
2078 region_bits: u32,
2079}
2080
2081impl RegionDefinition {
2082 fn region(&self, gpa: u64, len: u64) -> Result<(usize, u64), GuestMemoryBackingError> {
2083 if (gpa | len) & self.invalid_mask != 0 {
2084 return Err(GuestMemoryBackingError::new(
2085 GuestMemoryErrorKind::OutOfRange,
2086 gpa,
2087 OutOfRange,
2088 ));
2089 }
2090 let offset = gpa & self.region_mask;
2091 if offset.wrapping_add(len) & !self.region_mask != 0 {
2092 return Err(GuestMemoryBackingError::new(
2093 GuestMemoryErrorKind::OutOfRange,
2094 gpa,
2095 OutOfRange,
2096 ));
2097 }
2098 let index = (gpa >> self.region_bits) as usize;
2099 Ok((index, offset))
2100 }
2101}
2102
2103impl GuestMemoryInner {
2104 fn region(
2105 &self,
2106 gpa: u64,
2107 len: u64,
2108 ) -> Result<(&MemoryRegion, u64, usize), GuestMemoryBackingError> {
2109 let (index, offset) = self.region_def.region(gpa, len)?;
2110 let region = &self.regions[index];
2111 if offset + len > region.len {
2112 return Err(GuestMemoryBackingError::new(
2113 GuestMemoryErrorKind::OutOfRange,
2114 gpa,
2115 OutOfRange,
2116 ));
2117 }
2118 Ok((&self.regions[index], offset, index))
2119 }
2120}
2121
2122#[derive(Clone)]
2123pub struct LockedPages {
2124 pages: Box<[PagePtr]>,
2125 _mem: Arc<GuestMemoryInner>,
2127}
2128
2129impl Debug for LockedPages {
2130 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
2131 f.debug_struct("LockedPages")
2132 .field("page_count", &self.pages.len())
2133 .finish()
2134 }
2135}
2136
2137#[derive(Copy, Clone, Debug)]
2138struct PagePtr(#[expect(dead_code)] *const AtomicU8);
2140
2141unsafe impl Send for PagePtr {}
2144unsafe impl Sync for PagePtr {}
2146
2147pub type Page = [AtomicU8; PAGE_SIZE];
2148
2149impl LockedPages {
2150 #[inline]
2151 pub fn pages(&self) -> &[&Page] {
2152 unsafe { std::slice::from_raw_parts(self.pages.as_ptr().cast::<&Page>(), self.pages.len()) }
2156 }
2157}
2158
2159impl<'a> AsRef<[&'a Page]> for &'a LockedPages {
2160 fn as_ref(&self) -> &[&'a Page] {
2161 self.pages()
2162 }
2163}
2164
2165pub trait LockedRange {
2170 fn push_sub_range(&mut self, sub_range: &[AtomicU8]);
2172
2173 fn pop_sub_range(&mut self) -> Option<(*const AtomicU8, usize)>;
2175}
2176
2177pub struct LockedRangeImpl<T: LockedRange> {
2178 _mem: Arc<GuestMemoryInner>,
2179 inner: T,
2180}
2181
2182impl<T: LockedRange> LockedRangeImpl<T> {
2183 pub fn get(&self) -> &T {
2184 &self.inner
2185 }
2186}
2187
2188impl<T: LockedRange> Drop for LockedRangeImpl<T> {
2189 fn drop(&mut self) {
2190 }
2196}
2197
2198#[derive(Debug, Error)]
2199pub enum AccessError {
2200 #[error("memory access error")]
2201 Memory(#[from] GuestMemoryError),
2202 #[error("out of range: {0:#x} < {1:#x}")]
2203 OutOfRange(usize, usize),
2204 #[error("write attempted to read-only memory")]
2205 ReadOnly,
2206}
2207
2208pub trait MemoryRead {
2209 fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError>;
2210 fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError>;
2211 fn len(&self) -> usize;
2212
2213 fn read_plain<T: IntoBytes + FromBytes + Immutable + KnownLayout>(
2214 &mut self,
2215 ) -> Result<T, AccessError> {
2216 let mut value: T = FromZeros::new_zeroed();
2217 self.read(value.as_mut_bytes())?;
2218 Ok(value)
2219 }
2220
2221 fn read_n<T: IntoBytes + FromBytes + Immutable + KnownLayout + Copy>(
2222 &mut self,
2223 len: usize,
2224 ) -> Result<Vec<T>, AccessError> {
2225 let mut value = vec![FromZeros::new_zeroed(); len];
2226 self.read(value.as_mut_bytes())?;
2227 Ok(value)
2228 }
2229
2230 fn read_all(&mut self) -> Result<Vec<u8>, AccessError> {
2231 let mut value = vec![0; self.len()];
2232 self.read(&mut value)?;
2233 Ok(value)
2234 }
2235
2236 fn limit(self, len: usize) -> Limit<Self>
2237 where
2238 Self: Sized,
2239 {
2240 let len = len.min(self.len());
2241 Limit { inner: self, len }
2242 }
2243}
2244
2245pub trait MemoryWrite {
2246 fn write(&mut self, data: &[u8]) -> Result<(), AccessError>;
2247 fn zero(&mut self, len: usize) -> Result<(), AccessError> {
2248 self.fill(0, len)
2249 }
2250 fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError>;
2251 fn len(&self) -> usize;
2252
2253 fn limit(self, len: usize) -> Limit<Self>
2254 where
2255 Self: Sized,
2256 {
2257 let len = len.min(self.len());
2258 Limit { inner: self, len }
2259 }
2260}
2261
2262impl MemoryRead for &'_ [u8] {
2263 fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
2264 if self.len() < data.len() {
2265 return Err(AccessError::OutOfRange(self.len(), data.len()));
2266 }
2267 let (source, rest) = self.split_at(data.len());
2268 data.copy_from_slice(source);
2269 *self = rest;
2270 Ok(self)
2271 }
2272
2273 fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
2274 if self.len() < len {
2275 return Err(AccessError::OutOfRange(self.len(), len));
2276 }
2277 *self = &self[len..];
2278 Ok(self)
2279 }
2280
2281 fn len(&self) -> usize {
2282 <[u8]>::len(self)
2283 }
2284}
2285
2286impl MemoryWrite for &mut [u8] {
2287 fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
2288 if self.len() < data.len() {
2289 return Err(AccessError::OutOfRange(self.len(), data.len()));
2290 }
2291 let (dest, rest) = std::mem::take(self).split_at_mut(data.len());
2292 dest.copy_from_slice(data);
2293 *self = rest;
2294 Ok(())
2295 }
2296
2297 fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError> {
2298 if self.len() < len {
2299 return Err(AccessError::OutOfRange(self.len(), len));
2300 }
2301 let (dest, rest) = std::mem::take(self).split_at_mut(len);
2302 dest.fill(val);
2303 *self = rest;
2304 Ok(())
2305 }
2306
2307 fn len(&self) -> usize {
2308 <[u8]>::len(self)
2309 }
2310}
2311
2312#[derive(Debug, Clone)]
2313pub struct Limit<T> {
2314 inner: T,
2315 len: usize,
2316}
2317
2318impl<T: MemoryRead> MemoryRead for Limit<T> {
2319 fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
2320 let len = data.len();
2321 if len > self.len {
2322 return Err(AccessError::OutOfRange(self.len, len));
2323 }
2324 self.inner.read(data)?;
2325 self.len -= len;
2326 Ok(self)
2327 }
2328
2329 fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
2330 if len > self.len {
2331 return Err(AccessError::OutOfRange(self.len, len));
2332 }
2333 self.inner.skip(len)?;
2334 self.len -= len;
2335 Ok(self)
2336 }
2337
2338 fn len(&self) -> usize {
2339 self.len
2340 }
2341}
2342
2343impl<T: MemoryWrite> MemoryWrite for Limit<T> {
2344 fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
2345 let len = data.len();
2346 if len > self.len {
2347 return Err(AccessError::OutOfRange(self.len, len));
2348 }
2349 self.inner.write(data)?;
2350 self.len -= len;
2351 Ok(())
2352 }
2353
2354 fn fill(&mut self, val: u8, len: usize) -> Result<(), AccessError> {
2355 if len > self.len {
2356 return Err(AccessError::OutOfRange(self.len, len));
2357 }
2358 self.inner.fill(val, len)?;
2359 self.len -= len;
2360 Ok(())
2361 }
2362
2363 fn len(&self) -> usize {
2364 self.len
2365 }
2366}
2367
2368pub trait MappableGuestMemory: Send + Sync {
2371 fn map_to_guest(&mut self, gpa: u64, writable: bool) -> io::Result<()>;
2377
2378 fn unmap_from_guest(&mut self);
2379}
2380
2381pub trait MappedMemoryRegion: Send + Sync {
2384 fn map(
2388 &self,
2389 offset: usize,
2390 section: &dyn AsMappableRef,
2391 file_offset: u64,
2392 len: usize,
2393 writable: bool,
2394 ) -> io::Result<()>;
2395
2396 fn unmap(&self, offset: usize, len: usize) -> io::Result<()>;
2398}
2399
2400pub trait MemoryMapper: Send + Sync {
2402 fn new_region(
2407 &self,
2408 len: usize,
2409 debug_name: String,
2410 ) -> io::Result<(Box<dyn MappableGuestMemory>, Arc<dyn MappedMemoryRegion>)>;
2411}
2412
2413pub trait DoorbellRegistration: Send + Sync {
2415 fn register_doorbell(
2417 &self,
2418 guest_address: u64,
2419 value: Option<u64>,
2420 length: Option<u32>,
2421 event: &Event,
2422 ) -> io::Result<Box<dyn Send + Sync>>;
2423}
2424
2425pub trait MapRom: Send + Sync {
2427 fn map_rom(&self, gpa: u64, offset: u64, len: u64) -> io::Result<Box<dyn UnmapRom>>;
2431
2432 fn len(&self) -> u64;
2434}
2435
2436pub trait UnmapRom: Send + Sync {
2438 fn unmap_rom(self);
2440}
2441
2442#[cfg(test)]
2443#[expect(clippy::undocumented_unsafe_blocks)]
2444mod tests {
2445 use crate::GuestMemory;
2446 use crate::PAGE_SIZE64;
2447 use crate::PageFaultAction;
2448 use crate::PageFaultError;
2449 use sparse_mmap::SparseMapping;
2450 use std::ptr::NonNull;
2451 use std::sync::Arc;
2452 use thiserror::Error;
2453
2454 pub struct GuestMemoryMapping {
2459 mapping: SparseMapping,
2460 #[cfg(feature = "bitmap")]
2461 bitmap: Option<Vec<u8>>,
2462 }
2463
2464 unsafe impl crate::GuestMemoryAccess for GuestMemoryMapping {
2465 fn mapping(&self) -> Option<NonNull<u8>> {
2466 NonNull::new(self.mapping.as_ptr().cast())
2467 }
2468
2469 fn max_address(&self) -> u64 {
2470 self.mapping.len() as u64
2471 }
2472
2473 #[cfg(feature = "bitmap")]
2474 fn access_bitmap(&self) -> Option<crate::BitmapInfo> {
2475 self.bitmap.as_ref().map(|bm| crate::BitmapInfo {
2476 read_bitmap: NonNull::new(bm.as_ptr().cast_mut()).unwrap(),
2477 write_bitmap: NonNull::new(bm.as_ptr().cast_mut()).unwrap(),
2478 bit_offset: 0,
2479 })
2480 }
2481 }
2482
2483 const PAGE_SIZE: usize = 4096;
2484 const SIZE_1MB: usize = 1048576;
2485
2486 fn create_test_mapping() -> GuestMemoryMapping {
2493 let mapping = SparseMapping::new(SIZE_1MB * 4).unwrap();
2494 mapping.alloc(0, SIZE_1MB).unwrap();
2495 mapping.alloc(2 * SIZE_1MB, SIZE_1MB).unwrap();
2496 mapping
2497 .alloc(3 * SIZE_1MB + PAGE_SIZE, SIZE_1MB - PAGE_SIZE)
2498 .unwrap();
2499
2500 GuestMemoryMapping {
2501 mapping,
2502 #[cfg(feature = "bitmap")]
2503 bitmap: None,
2504 }
2505 }
2506
2507 #[test]
2508 fn test_basic_read_write() {
2509 let mapping = create_test_mapping();
2510 let gm = GuestMemory::new("test", mapping);
2511
2512 let addr = 0;
2514 let result = gm.read_plain::<u8>(addr);
2515 assert_eq!(result.unwrap(), 0);
2516
2517 let write_buffer = [1, 2, 3, 4, 5];
2519 let mut read_buffer = [0; 5];
2520 gm.write_at(0, &write_buffer).unwrap();
2521 gm.read_at(0, &mut read_buffer).unwrap();
2522 assert_eq!(write_buffer, read_buffer);
2523 assert_eq!(gm.read_plain::<u8>(0).unwrap(), 1);
2524 assert_eq!(gm.read_plain::<u8>(1).unwrap(), 2);
2525 assert_eq!(gm.read_plain::<u8>(2).unwrap(), 3);
2526 assert_eq!(gm.read_plain::<u8>(3).unwrap(), 4);
2527 assert_eq!(gm.read_plain::<u8>(4).unwrap(), 5);
2528
2529 let addr = 2 * SIZE_1MB as u64;
2531 let write_buffer: Vec<u8> = (0..PAGE_SIZE).map(|x| x as u8).collect();
2532 let mut read_buffer: Vec<u8> = (0..PAGE_SIZE).map(|_| 0).collect();
2533 gm.write_at(addr, write_buffer.as_slice()).unwrap();
2534 gm.read_at(addr, read_buffer.as_mut_slice()).unwrap();
2535 assert_eq!(write_buffer, read_buffer);
2536
2537 let write_buffer: Vec<u8> = (0..SIZE_1MB).map(|x| x as u8).collect();
2539 let mut read_buffer: Vec<u8> = (0..SIZE_1MB).map(|_| 0).collect();
2540 gm.write_at(addr, write_buffer.as_slice()).unwrap();
2541 gm.read_at(addr, read_buffer.as_mut_slice()).unwrap();
2542 assert_eq!(write_buffer, read_buffer);
2543
2544 let addr = SIZE_1MB as u64;
2546 let result = gm.read_plain::<u8>(addr);
2547 assert!(result.is_err());
2548 }
2549
2550 #[test]
2551 fn test_multi() {
2552 let len = SIZE_1MB * 4;
2553 let mapping = SparseMapping::new(len).unwrap();
2554 mapping.alloc(0, len).unwrap();
2555 let mapping = Arc::new(GuestMemoryMapping {
2556 mapping,
2557 #[cfg(feature = "bitmap")]
2558 bitmap: None,
2559 });
2560 let region_len = 1 << 30;
2561 let gm = GuestMemory::new_multi_region(
2562 "test",
2563 region_len,
2564 vec![Some(mapping.clone()), None, Some(mapping.clone())],
2565 )
2566 .unwrap();
2567
2568 let mut b = [0];
2569 let len = len as u64;
2570 gm.read_at(0, &mut b).unwrap();
2571 gm.read_at(len, &mut b).unwrap_err();
2572 gm.read_at(region_len, &mut b).unwrap_err();
2573 gm.read_at(2 * region_len, &mut b).unwrap();
2574 gm.read_at(2 * region_len + len, &mut b).unwrap_err();
2575 gm.read_at(3 * region_len, &mut b).unwrap_err();
2576 }
2577
2578 #[cfg(feature = "bitmap")]
2579 #[test]
2580 fn test_bitmap() {
2581 let len = PAGE_SIZE * 4;
2582 let mapping = SparseMapping::new(len).unwrap();
2583 mapping.alloc(0, len).unwrap();
2584 let bitmap = vec![0b0101];
2585 let mapping = Arc::new(GuestMemoryMapping {
2586 mapping,
2587 bitmap: Some(bitmap),
2588 });
2589 let gm = GuestMemory::new("test", mapping);
2590
2591 gm.read_plain::<[u8; 1]>(0).unwrap();
2592 gm.read_plain::<[u8; 1]>(PAGE_SIZE64 - 1).unwrap();
2593 gm.read_plain::<[u8; 2]>(PAGE_SIZE64 - 1).unwrap_err();
2594 gm.read_plain::<[u8; 1]>(PAGE_SIZE64).unwrap_err();
2595 gm.read_plain::<[u8; 1]>(PAGE_SIZE64 * 2).unwrap();
2596 gm.read_plain::<[u8; PAGE_SIZE * 2]>(0).unwrap_err();
2597 }
2598
2599 struct FaultingMapping {
2600 mapping: SparseMapping,
2601 }
2602
2603 #[derive(Debug, Error)]
2604 #[error("fault")]
2605 struct Fault;
2606
2607 unsafe impl crate::GuestMemoryAccess for FaultingMapping {
2608 fn mapping(&self) -> Option<NonNull<u8>> {
2609 NonNull::new(self.mapping.as_ptr().cast())
2610 }
2611
2612 fn max_address(&self) -> u64 {
2613 self.mapping.len() as u64
2614 }
2615
2616 fn page_fault(
2617 &self,
2618 address: u64,
2619 _len: usize,
2620 write: bool,
2621 bitmap_failure: bool,
2622 ) -> PageFaultAction {
2623 assert!(!bitmap_failure);
2624 let qlen = self.mapping.len() as u64 / 4;
2625 if address < qlen || address >= 3 * qlen {
2626 return PageFaultAction::Fail(PageFaultError::other(Fault));
2627 }
2628 let page_address = (address as usize) & !(PAGE_SIZE - 1);
2629 if address >= 2 * qlen {
2630 if write {
2631 return PageFaultAction::Fail(PageFaultError::other(Fault));
2632 }
2633 self.mapping.map_zero(page_address, PAGE_SIZE).unwrap();
2634 } else {
2635 self.mapping.alloc(page_address, PAGE_SIZE).unwrap();
2636 }
2637 PageFaultAction::Retry
2638 }
2639 }
2640
2641 impl FaultingMapping {
2642 fn new(len: usize) -> Self {
2643 let mapping = SparseMapping::new(len).unwrap();
2644 FaultingMapping { mapping }
2645 }
2646 }
2647
2648 #[test]
2649 fn test_fault() {
2650 let len = PAGE_SIZE * 4;
2651 let mapping = FaultingMapping::new(len);
2652 let gm = GuestMemory::new("test", mapping);
2653
2654 gm.write_plain::<u8>(0, &0).unwrap_err();
2655 gm.read_plain::<u8>(PAGE_SIZE64 - 1).unwrap_err();
2656 gm.read_plain::<u8>(PAGE_SIZE64).unwrap();
2657 gm.write_plain::<u8>(PAGE_SIZE64, &0).unwrap();
2658 gm.write_plain::<u16>(PAGE_SIZE64 * 3 - 1, &0).unwrap_err();
2659 gm.read_plain::<u16>(PAGE_SIZE64 * 3 - 1).unwrap_err();
2660 gm.read_plain::<u8>(PAGE_SIZE64 * 3 - 1).unwrap();
2661 gm.write_plain::<u8>(PAGE_SIZE64 * 3 - 1, &0).unwrap_err();
2662 }
2663
2664 #[test]
2665 fn test_allocated() {
2666 let mut gm = GuestMemory::allocate(0x10000);
2667 let pattern = [0x42; 0x10000];
2668 gm.write_at(0, &pattern).unwrap();
2669 assert_eq!(gm.inner_buf_mut().unwrap(), &pattern);
2670 gm.inner_buf().unwrap();
2671 let gm2 = gm.clone();
2672 assert!(gm.inner_buf_mut().is_none());
2673 gm.inner_buf().unwrap();
2674 let mut gm = gm.into_inner_buf().unwrap_err();
2675 drop(gm2);
2676 assert_eq!(gm.inner_buf_mut().unwrap(), &pattern);
2677 gm.into_inner_buf().unwrap();
2678 }
2679}