sparse_mmap/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Memory-related abstractions.
5
6// UNSAFETY: Manual pointer manipulation, dealing with mmap, and a signal handler.
7#![expect(unsafe_code)]
8#![expect(missing_docs)]
9#![expect(clippy::undocumented_unsafe_blocks, clippy::missing_safety_doc)]
10
11pub mod alloc;
12pub mod unix;
13pub mod windows;
14
15pub use sys::AsMappableRef;
16pub use sys::Mappable;
17pub use sys::MappableRef;
18pub use sys::SparseMapping;
19pub use sys::alloc_shared_memory;
20pub use sys::new_mappable_from_file;
21
22use std::mem::MaybeUninit;
23use std::sync::atomic::AtomicU8;
24use thiserror::Error;
25#[cfg(unix)]
26use unix as sys;
27#[cfg(windows)]
28use windows as sys;
29use zerocopy::FromBytes;
30use zerocopy::Immutable;
31use zerocopy::IntoBytes;
32use zerocopy::KnownLayout;
33
34#[derive(Debug, Error)]
35pub enum SparseMappingError {
36    #[error("out of bounds")]
37    OutOfBounds,
38    #[error(transparent)]
39    Memory(trycopy::MemoryError),
40}
41
42impl SparseMapping {
43    /// Gets the supported page size for sparse mappings.
44    pub fn page_size() -> usize {
45        sys::page_size()
46    }
47
48    fn check(&self, offset: usize, len: usize) -> Result<(), SparseMappingError> {
49        if self.len() < offset || self.len() - offset < len {
50            return Err(SparseMappingError::OutOfBounds);
51        }
52        Ok(())
53    }
54
55    /// Reads a type `T` from `offset` in the sparse mapping using a single read instruction.
56    ///
57    /// Panics if `T` is not 1, 2, 4, or 8 bytes in size.
58    pub fn read_volatile<T: FromBytes + Immutable + KnownLayout>(
59        &self,
60        offset: usize,
61    ) -> Result<T, SparseMappingError> {
62        assert!(self.is_local(), "cannot read from remote mappings");
63
64        self.check(offset, size_of::<T>())?;
65        // SAFETY: the bounds have been checked above.
66        unsafe { trycopy::try_read_volatile(self.as_ptr().byte_add(offset).cast()) }
67            .map_err(SparseMappingError::Memory)
68    }
69
70    /// Writes a type `T` at `offset` in the sparse mapping using a single write instruciton.
71    ///
72    /// Panics if `T` is not 1, 2, 4, or 8 bytes in size.
73    pub fn write_volatile<T: IntoBytes + Immutable + KnownLayout>(
74        &self,
75        offset: usize,
76        value: &T,
77    ) -> Result<(), SparseMappingError> {
78        assert!(self.is_local(), "cannot write to remote mappings");
79
80        self.check(offset, size_of::<T>())?;
81        // SAFETY: the bounds have been checked above.
82        unsafe { trycopy::try_write_volatile(self.as_ptr().byte_add(offset).cast(), value) }
83            .map_err(SparseMappingError::Memory)
84    }
85
86    /// Tries to write into the sparse mapping.
87    pub fn write_at(&self, offset: usize, data: &[u8]) -> Result<(), SparseMappingError> {
88        assert!(self.is_local(), "cannot write to remote mappings");
89
90        self.check(offset, data.len())?;
91        // SAFETY: the bounds have been checked above.
92        unsafe {
93            let dest = self.as_ptr().cast::<u8>().add(offset);
94            trycopy::try_copy(data.as_ptr(), dest, data.len()).map_err(SparseMappingError::Memory)
95        }
96    }
97
98    /// Tries to read from the sparse mapping.
99    pub fn read_at(&self, offset: usize, data: &mut [u8]) -> Result<(), SparseMappingError> {
100        assert!(self.is_local(), "cannot read from remote mappings");
101
102        self.check(offset, data.len())?;
103        // SAFETY: the bounds have been checked above.
104        unsafe {
105            let src = (self.as_ptr() as *const u8).add(offset);
106            trycopy::try_copy(src, data.as_mut_ptr(), data.len())
107                .map_err(SparseMappingError::Memory)
108        }
109    }
110
111    /// Tries to read a type `T` from `offset`.
112    pub fn read_plain<T: FromBytes + Immutable + KnownLayout>(
113        &self,
114        offset: usize,
115    ) -> Result<T, SparseMappingError> {
116        if matches!(size_of::<T>(), 1 | 2 | 4 | 8) {
117            self.read_volatile(offset)
118        } else {
119            let mut obj = MaybeUninit::<T>::uninit();
120            // SAFETY: `obj` is a valid target for writes.
121            unsafe {
122                self.read_at(
123                    offset,
124                    std::slice::from_raw_parts_mut(obj.as_mut_ptr().cast::<u8>(), size_of::<T>()),
125                )?;
126            }
127            // SAFETY: `obj` was fully initialized by `read_at`.
128            Ok(unsafe { obj.assume_init() })
129        }
130    }
131
132    /// Tries to fill a region of the sparse mapping with `val`.
133    pub fn fill_at(&self, offset: usize, val: u8, len: usize) -> Result<(), SparseMappingError> {
134        assert!(self.is_local(), "cannot fill remote mappings");
135
136        self.check(offset, len)?;
137        // SAFETY: the bounds have been checked above.
138        unsafe {
139            let dest = self.as_ptr().cast::<u8>().add(offset);
140            trycopy::try_write_bytes(dest, val, len).map_err(SparseMappingError::Memory)
141        }
142    }
143
144    /// Gets a slice for accessing the mapped data directly.
145    ///
146    /// This is safe from a Rust memory model perspective, since the underlying
147    /// VA is either mapped and is owned in a shared state by this object (in
148    /// which case &[AtomicU8] access from multiple threads is fine), or the VA
149    /// is not mapped but is reserved and so will not be mapped by another Rust
150    /// object.
151    ///
152    /// In the latter case, actually accessing the data may cause a fault, which
153    /// will likely lead to a process crash, so care must nonetheless be taken
154    /// when using this method.
155    pub fn atomic_slice(&self, start: usize, len: usize) -> &[AtomicU8] {
156        assert!(self.len() >= start && self.len() - start >= len);
157        // SAFETY: slice is within the mapped range
158        unsafe { std::slice::from_raw_parts((self.as_ptr() as *const AtomicU8).add(start), len) }
159    }
160}
161
162#[cfg(test)]
163mod tests {
164    use super::*;
165
166    static BUF: [u8; 65536] = [0xcc; 65536];
167
168    fn test_with(range_size: usize) {
169        let page_size = SparseMapping::page_size();
170
171        let mapping = SparseMapping::new(range_size).unwrap();
172        mapping.alloc(page_size, page_size).unwrap();
173        let slice = unsafe {
174            std::slice::from_raw_parts_mut(mapping.as_ptr().add(page_size).cast::<u8>(), page_size)
175        };
176        slice.copy_from_slice(&BUF[..page_size]);
177        mapping.unmap(page_size, page_size).unwrap();
178
179        mapping.alloc(range_size - page_size, page_size).unwrap();
180        let slice = unsafe {
181            std::slice::from_raw_parts_mut(
182                mapping.as_ptr().add(range_size - page_size).cast::<u8>(),
183                page_size,
184            )
185        };
186        slice.copy_from_slice(&BUF[..page_size]);
187        mapping.unmap(range_size - page_size, page_size).unwrap();
188        drop(mapping);
189    }
190
191    #[test]
192    fn test_sparse_mapping() {
193        test_with(0x100000);
194        test_with(0x200000);
195        test_with(0x200000 + SparseMapping::page_size());
196        test_with(0x40000000);
197        test_with(0x40000000 + SparseMapping::page_size());
198    }
199
200    #[test]
201    fn test_overlapping_mappings() {
202        #![expect(clippy::identity_op)]
203
204        let page_size = SparseMapping::page_size();
205        let mapping = SparseMapping::new(0x10 * page_size).unwrap();
206        mapping.alloc(0x1 * page_size, 0x4 * page_size).unwrap();
207        mapping.alloc(0x1 * page_size, 0x2 * page_size).unwrap();
208        mapping.alloc(0x2 * page_size, 0x3 * page_size).unwrap();
209        mapping.alloc(0, 0x10 * page_size).unwrap();
210        mapping.alloc(0x8 * page_size, 0x8 * page_size).unwrap();
211        mapping.unmap(0xc * page_size, 0x2 * page_size).unwrap();
212        mapping.alloc(0x9 * page_size, 0x4 * page_size).unwrap();
213        mapping.unmap(0x3 * page_size, 0xb * page_size).unwrap();
214
215        mapping.alloc(0x5 * page_size, 0x4 * page_size).unwrap();
216        mapping.alloc(0x6 * page_size, 0x2 * page_size).unwrap();
217        mapping.alloc(0x6 * page_size, 0x1 * page_size).unwrap();
218        mapping.alloc(0x4 * page_size, 0x3 * page_size).unwrap();
219
220        let shmem = alloc_shared_memory(0x4 * page_size).unwrap();
221        mapping
222            .map_file(0x5 * page_size, 0x4 * page_size, &shmem, 0, true)
223            .unwrap();
224        mapping
225            .map_file(0x6 * page_size, 0x2 * page_size, &shmem, 0, true)
226            .unwrap();
227        mapping
228            .map_file(0x6 * page_size, 0x1 * page_size, &shmem, 0, true)
229            .unwrap();
230        mapping
231            .map_file(0x4 * page_size, 0x3 * page_size, &shmem, 0, true)
232            .unwrap();
233
234        drop(mapping);
235    }
236}