sparse_mmap/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Memory-related abstractions.
5
6// UNSAFETY: Manual pointer manipulation, dealing with mmap, and a signal handler.
7#![expect(unsafe_code)]
8#![expect(missing_docs)]
9#![expect(clippy::undocumented_unsafe_blocks, clippy::missing_safety_doc)]
10
11pub mod alloc;
12mod trycopy_windows_arm64;
13mod trycopy_windows_x64;
14pub mod unix;
15pub mod windows;
16
17pub use sys::AsMappableRef;
18pub use sys::Mappable;
19pub use sys::MappableRef;
20pub use sys::SparseMapping;
21pub use sys::alloc_shared_memory;
22pub use sys::new_mappable_from_file;
23
24use std::mem::MaybeUninit;
25use std::sync::atomic::AtomicU8;
26use thiserror::Error;
27#[cfg(unix)]
28use unix as sys;
29#[cfg(windows)]
30use windows as sys;
31use zerocopy::FromBytes;
32use zerocopy::Immutable;
33use zerocopy::IntoBytes;
34use zerocopy::KnownLayout;
35
36/// Must be called before using try_copy on Unix platforms.
37pub fn initialize_try_copy() {
38    #[cfg(unix)]
39    {
40        static INIT: std::sync::Once = std::sync::Once::new();
41        INIT.call_once(|| unsafe {
42            let err = install_signal_handlers();
43            if err != 0 {
44                panic!(
45                    "could not install signal handlers: {}",
46                    std::io::Error::from_raw_os_error(err)
47                )
48            }
49        });
50    }
51}
52
53unsafe extern "C" {
54    #[cfg(unix)]
55    fn install_signal_handlers() -> i32;
56
57    fn try_memmove(
58        dest: *mut u8,
59        src: *const u8,
60        length: usize,
61        failure: *mut AccessFailure,
62    ) -> i32;
63    fn try_memset(dest: *mut u8, c: i32, length: usize, failure: *mut AccessFailure) -> i32;
64    fn try_cmpxchg8(
65        dest: *mut u8,
66        expected: &mut u8,
67        desired: u8,
68        failure: *mut AccessFailure,
69    ) -> i32;
70    fn try_cmpxchg16(
71        dest: *mut u16,
72        expected: &mut u16,
73        desired: u16,
74        failure: *mut AccessFailure,
75    ) -> i32;
76    fn try_cmpxchg32(
77        dest: *mut u32,
78        expected: &mut u32,
79        desired: u32,
80        failure: *mut AccessFailure,
81    ) -> i32;
82    fn try_cmpxchg64(
83        dest: *mut u64,
84        expected: &mut u64,
85        desired: u64,
86        failure: *mut AccessFailure,
87    ) -> i32;
88    fn try_read8(dest: *mut u8, src: *const u8, failure: *mut AccessFailure) -> i32;
89    fn try_read16(dest: *mut u16, src: *const u16, failure: *mut AccessFailure) -> i32;
90    fn try_read32(dest: *mut u32, src: *const u32, failure: *mut AccessFailure) -> i32;
91    fn try_read64(dest: *mut u64, src: *const u64, failure: *mut AccessFailure) -> i32;
92    fn try_write8(dest: *mut u8, value: u8, failure: *mut AccessFailure) -> i32;
93    fn try_write16(dest: *mut u16, value: u16, failure: *mut AccessFailure) -> i32;
94    fn try_write32(dest: *mut u32, value: u32, failure: *mut AccessFailure) -> i32;
95    fn try_write64(dest: *mut u64, value: u64, failure: *mut AccessFailure) -> i32;
96}
97
98#[repr(C)]
99struct AccessFailure {
100    address: *mut u8,
101    #[cfg(unix)]
102    si_signo: i32,
103    #[cfg(unix)]
104    si_code: i32,
105}
106
107#[derive(Debug, Error)]
108#[error("failed to {} memory", if self.is_write { "write" } else { "read" })]
109pub struct MemoryError {
110    offset: usize,
111    is_write: bool,
112    #[source]
113    source: OsAccessError,
114}
115
116#[derive(Debug, Error)]
117enum OsAccessError {
118    #[cfg(windows)]
119    #[error("access violation")]
120    AccessViolation,
121    #[cfg(unix)]
122    #[error("SIGSEGV (si_code = {0:x}")]
123    Sigsegv(u32),
124    #[cfg(unix)]
125    #[error("SIGSEGV (si_code = {0:x}")]
126    Sigbus(u32),
127}
128
129impl MemoryError {
130    fn new(src: Option<*const u8>, dest: *mut u8, len: usize, failure: &AccessFailure) -> Self {
131        let (offset, is_write) = if failure.address.is_null() {
132            // In the case of a general protection fault (#GP) the provided address is zero.
133            (0, src.is_none())
134        } else if (dest..dest.wrapping_add(len)).contains(&failure.address) {
135            (failure.address as usize - dest as usize, true)
136        } else if let Some(src) = src {
137            if (src..src.wrapping_add(len)).contains(&failure.address.cast_const()) {
138                (failure.address as usize - src as usize, false)
139            } else {
140                panic!(
141                    "invalid failure address: {:p} src: {:p} dest: {:p} len: {:#x}",
142                    failure.address, src, dest, len
143                );
144            }
145        } else {
146            panic!(
147                "invalid failure address: {:p} src: None dest: {:p} len: {:#x}",
148                failure.address, dest, len
149            );
150        };
151        #[cfg(windows)]
152        let source = OsAccessError::AccessViolation;
153        #[cfg(unix)]
154        let source = match failure.si_signo {
155            libc::SIGSEGV => OsAccessError::Sigsegv(failure.si_code as u32),
156            libc::SIGBUS => OsAccessError::Sigbus(failure.si_code as u32),
157            _ => {
158                panic!(
159                    "unexpected signal: {} src: {:?} dest: {:p} len: {:#x}",
160                    failure.si_signo, src, dest, len
161                );
162            }
163        };
164        Self {
165            offset,
166            is_write,
167            source,
168        }
169    }
170
171    /// Returns the byte offset into the buffer at which the access violation
172    /// occurred.
173    pub fn offset(&self) -> usize {
174        self.offset
175    }
176}
177
178/// Copies `count` elements from `src` to `dest`. `src` and `dest` may overlap.
179/// Fails on access violation/SIGSEGV. Note that on case of failure, some of the
180/// bytes (even partial elements) may already have been copied.
181///
182/// This also fails if initialize_try_copy has not been called.
183///
184/// # Safety
185///
186/// This routine is safe to use if the memory pointed to by `src` or `dest` is
187/// being concurrently mutated.
188///
189/// WARNING: This routine should only be used when you know that `src` and
190/// `dest` are valid, reserved addresses but you do not know if they are mapped
191/// with the appropriate protection. For example, this routine is useful if
192/// `dest` is a sparse mapping where some pages are mapped with
193/// PAGE_NOACCESS/PROT_NONE, and some are mapped with PAGE_READWRITE/PROT_WRITE.
194pub unsafe fn try_copy<T>(src: *const T, dest: *mut T, count: usize) -> Result<(), MemoryError> {
195    let mut failure = MaybeUninit::uninit();
196    // SAFETY: guaranteed by caller.
197    let ret = unsafe {
198        try_memmove(
199            dest.cast::<u8>(),
200            src.cast::<u8>(),
201            count * size_of::<T>(),
202            failure.as_mut_ptr(),
203        )
204    };
205    match ret {
206        0 => Ok(()),
207        _ => Err(MemoryError::new(
208            Some(src.cast()),
209            dest.cast(),
210            count,
211            // SAFETY: failure is initialized in the failure path.
212            unsafe { failure.assume_init_ref() },
213        )),
214    }
215}
216
217/// Writes `count` bytes of the value `val` to `dest`. Fails on access
218/// violation/SIGSEGV. Note that on case of failure, some of the bytes (even
219/// partial elements) may already have been written.
220///
221/// This also fails if initialize_try_copy has not been called.
222///
223/// # Safety
224///
225/// This routine is safe to use if the memory pointed to by `dest` is being
226/// concurrently mutated.
227///
228/// WARNING: This routine should only be used when you know that `dest` is
229/// valid, reserved addresses but you do not know if they are mapped with the
230/// appropriate protection. For example, this routine is useful if `dest` is a
231/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
232/// some are mapped with PAGE_READWRITE/PROT_WRITE.
233pub unsafe fn try_write_bytes<T>(dest: *mut T, val: u8, count: usize) -> Result<(), MemoryError> {
234    let mut failure = MaybeUninit::uninit();
235    // SAFETY: guaranteed by caller.
236    let ret = unsafe {
237        try_memset(
238            dest.cast::<u8>(),
239            val.into(),
240            count * size_of::<T>(),
241            failure.as_mut_ptr(),
242        )
243    };
244    match ret {
245        0 => Ok(()),
246        _ => Err(MemoryError::new(
247            None,
248            dest.cast(),
249            count,
250            // SAFETY: failure is initialized in the failure path.
251            unsafe { failure.assume_init_ref() },
252        )),
253    }
254}
255
256/// Atomically swaps the value at `dest` with `new` when `*dest` is `current`,
257/// using a sequentially-consistent memory ordering.
258///
259/// Returns `Ok(Ok(new))` if the swap was successful, `Ok(Err(*dest))` if the
260/// swap failed, or `Err(MemoryError::AccessViolation)` if the swap could not be
261/// attempted due to an access violation.
262///
263/// Panics if the size is not 1, 2, 4, or 8 bytes.
264///
265/// # Safety
266///
267/// This routine is safe to use if the memory pointed to by `dest` is being
268/// concurrently mutated.
269///
270/// WARNING: This routine should only be used when you know that `dest` is
271/// valid, reserved addresses but you do not know if they are mapped with the
272/// appropriate protection. For example, this routine is useful if `dest` is a
273/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
274/// some are mapped with PAGE_READWRITE/PROT_WRITE.
275pub unsafe fn try_compare_exchange<T: IntoBytes + FromBytes + Immutable + KnownLayout>(
276    dest: *mut T,
277    mut current: T,
278    new: T,
279) -> Result<Result<T, T>, MemoryError> {
280    let mut failure = MaybeUninit::uninit();
281    // SAFETY: guaranteed by caller
282    let ret = unsafe {
283        match size_of::<T>() {
284            1 => try_cmpxchg8(
285                dest.cast(),
286                std::mem::transmute::<&mut T, &mut u8>(&mut current),
287                std::mem::transmute_copy::<T, u8>(&new),
288                failure.as_mut_ptr(),
289            ),
290            2 => try_cmpxchg16(
291                dest.cast(),
292                std::mem::transmute::<&mut T, &mut u16>(&mut current),
293                std::mem::transmute_copy::<T, u16>(&new),
294                failure.as_mut_ptr(),
295            ),
296            4 => try_cmpxchg32(
297                dest.cast(),
298                std::mem::transmute::<&mut T, &mut u32>(&mut current),
299                std::mem::transmute_copy::<T, u32>(&new),
300                failure.as_mut_ptr(),
301            ),
302            8 => try_cmpxchg64(
303                dest.cast(),
304                std::mem::transmute::<&mut T, &mut u64>(&mut current),
305                std::mem::transmute_copy::<T, u64>(&new),
306                failure.as_mut_ptr(),
307            ),
308            _ => panic!("unsupported size"),
309        }
310    };
311    match ret {
312        n if n > 0 => Ok(Ok(new)),
313        0 => Ok(Err(current)),
314        _ => Err(MemoryError::new(
315            None,
316            dest.cast(),
317            size_of::<T>(),
318            // SAFETY: failure is initialized in the failure path.
319            unsafe { failure.assume_init_ref() },
320        )),
321    }
322}
323
324/// Atomically swaps the value at `dest` with `new` when `*dest` is `current`,
325/// using a sequentially-consistent memory ordering.
326///
327/// Returns `Ok(true)` if the swap was successful, `Ok(false)` if the swap
328/// failed (after updating `current`), or `Err(MemoryError::AccessViolation)` if
329/// the swap could not be attempted due to an access violation.
330///
331/// Panics if `current` and `new` are not the same size or that size is not
332/// 1, 2, 4, or 8 bytes.
333///
334/// # Safety
335///
336/// This routine is safe to use if the memory pointed to by `dest` is being
337/// concurrently mutated.
338///
339/// WARNING: This routine should only be used when you know that `dest` is
340/// valid, reserved addresses but you do not know if they are mapped with the
341/// appropriate protection. For example, this routine is useful if `dest` is a
342/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
343/// some are mapped with PAGE_READWRITE/PROT_WRITE.
344pub unsafe fn try_compare_exchange_ref<
345    T: IntoBytes + FromBytes + Immutable + KnownLayout + ?Sized,
346>(
347    dest: *mut u8,
348    current: &mut T,
349    new: &T,
350) -> Result<bool, MemoryError> {
351    let mut failure = MaybeUninit::uninit();
352    // SAFETY: guaranteed by caller
353    let ret = unsafe {
354        match (size_of_val(current), size_of_val(new)) {
355            (1, 1) => try_cmpxchg8(
356                dest,
357                &mut *current.as_mut_bytes().as_mut_ptr(),
358                new.as_bytes()[0],
359                failure.as_mut_ptr(),
360            ),
361            (2, 2) => try_cmpxchg16(
362                dest.cast(),
363                &mut *current.as_mut_bytes().as_mut_ptr().cast(),
364                u16::from_ne_bytes(new.as_bytes().try_into().unwrap()),
365                failure.as_mut_ptr(),
366            ),
367            (4, 4) => try_cmpxchg32(
368                dest.cast(),
369                &mut *current.as_mut_bytes().as_mut_ptr().cast(),
370                u32::from_ne_bytes(new.as_bytes().try_into().unwrap()),
371                failure.as_mut_ptr(),
372            ),
373            (8, 8) => try_cmpxchg64(
374                dest.cast(),
375                &mut *current.as_mut_bytes().as_mut_ptr().cast(),
376                u64::from_ne_bytes(new.as_bytes().try_into().unwrap()),
377                failure.as_mut_ptr(),
378            ),
379            _ => panic!("unsupported or mismatched size"),
380        }
381    };
382    if ret < 0 {
383        return Err(MemoryError::new(
384            None,
385            dest.cast(),
386            size_of_val(current),
387            // SAFETY: failure is initialized in the failure path.
388            unsafe { failure.assume_init_ref() },
389        ));
390    }
391    Ok(ret > 0)
392}
393
394/// Reads the value at `src` treating the pointer as a volatile access.
395///
396/// Returns `Ok(T)` if the read was successful, or `Err(MemoryError)` if the
397/// read was unsuccessful.
398///
399/// Panics if the size is not 1, 2, 4, or 8 bytes.
400///
401/// # Safety
402///
403/// This routine is safe to use if the memory pointed to by `src` is being
404/// concurrently mutated.
405///
406/// WARNING: This routine should only be used when you know that `src` is
407/// valid, reserved addresses but you do not know if they are mapped with the
408/// appropriate protection. For example, this routine is useful if `src` is a
409/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
410/// some are mapped with PAGE_READWRITE/PROT_WRITE.
411pub unsafe fn try_read_volatile<T: FromBytes + Immutable + KnownLayout>(
412    src: *const T,
413) -> Result<T, MemoryError> {
414    let mut dest = MaybeUninit::<T>::uninit();
415    let mut failure = MaybeUninit::uninit();
416    // SAFETY: guaranteed by caller
417    let ret = unsafe {
418        match size_of::<T>() {
419            1 => try_read8(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
420            2 => try_read16(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
421            4 => try_read32(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
422            8 => try_read64(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
423            _ => panic!("unsupported size"),
424        }
425    };
426    match ret {
427        0 => {
428            // SAFETY: dest was fully initialized by try_read.
429            Ok(unsafe { dest.assume_init() })
430        }
431        _ => Err(MemoryError::new(
432            Some(src.cast()),
433            dest.as_mut_ptr().cast(),
434            size_of::<T>(),
435            // SAFETY: failure is initialized in the failure path.
436            unsafe { failure.assume_init_ref() },
437        )),
438    }
439}
440
441/// Writes `value` at `dest` treating the pointer as a volatile access.
442///
443/// Returns `Ok(())` if the write was successful, or `Err(MemoryError)` if the
444/// write was unsuccessful.
445///
446/// Panics if the size is not 1, 2, 4, or 8 bytes.
447///
448/// # Safety
449///
450/// This routine is safe to use if the memory pointed to by `dest` is being
451/// concurrently mutated.
452///
453/// WARNING: This routine should only be used when you know that `dest` is
454/// valid, reserved addresses but you do not know if they are mapped with the
455/// appropriate protection. For example, this routine is useful if `dest` is a
456/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
457/// some are mapped with PAGE_READWRITE/PROT_WRITE.
458pub unsafe fn try_write_volatile<T: IntoBytes + Immutable + KnownLayout>(
459    dest: *mut T,
460    value: &T,
461) -> Result<(), MemoryError> {
462    let mut failure = MaybeUninit::uninit();
463    // SAFETY: guaranteed by caller
464    let ret = unsafe {
465        match size_of::<T>() {
466            1 => try_write8(
467                dest.cast(),
468                std::mem::transmute_copy(value),
469                failure.as_mut_ptr(),
470            ),
471            2 => try_write16(
472                dest.cast(),
473                std::mem::transmute_copy(value),
474                failure.as_mut_ptr(),
475            ),
476            4 => try_write32(
477                dest.cast(),
478                std::mem::transmute_copy(value),
479                failure.as_mut_ptr(),
480            ),
481            8 => try_write64(
482                dest.cast(),
483                std::mem::transmute_copy(value),
484                failure.as_mut_ptr(),
485            ),
486            _ => panic!("unsupported size"),
487        }
488    };
489    match ret {
490        0 => Ok(()),
491        _ => Err(MemoryError::new(
492            None,
493            dest.cast(),
494            size_of::<T>(),
495            // SAFETY: failure is initialized in the failure path.
496            unsafe { failure.assume_init_ref() },
497        )),
498    }
499}
500
501#[derive(Debug, Error)]
502pub enum SparseMappingError {
503    #[error("out of bounds")]
504    OutOfBounds,
505    #[error(transparent)]
506    Memory(MemoryError),
507}
508
509impl SparseMapping {
510    /// Gets the supported page size for sparse mappings.
511    pub fn page_size() -> usize {
512        sys::page_size()
513    }
514
515    /// Tries to write into the sparse mapping.
516    pub fn write_at(&self, offset: usize, data: &[u8]) -> Result<(), SparseMappingError> {
517        assert!(self.is_local(), "cannot write to remote mappings");
518
519        if self.len() < offset || self.len() - offset < data.len() {
520            return Err(SparseMappingError::OutOfBounds);
521        }
522        // SAFETY: the bounds have been checked above.
523        unsafe {
524            let dest = self.as_ptr().cast::<u8>().add(offset);
525            try_copy(data.as_ptr(), dest, data.len()).map_err(SparseMappingError::Memory)
526        }
527    }
528
529    /// Tries to read from the sparse mapping.
530    pub fn read_at(&self, offset: usize, data: &mut [u8]) -> Result<(), SparseMappingError> {
531        assert!(self.is_local(), "cannot read from remote mappings");
532
533        if self.len() < offset || self.len() - offset < data.len() {
534            return Err(SparseMappingError::OutOfBounds);
535        }
536        // SAFETY: the bounds have been checked above.
537        unsafe {
538            let src = (self.as_ptr() as *const u8).add(offset);
539            try_copy(src, data.as_mut_ptr(), data.len()).map_err(SparseMappingError::Memory)
540        }
541    }
542
543    /// Tries to read a type `T` from `offset`.
544    pub fn read_plain<T: FromBytes + Immutable + KnownLayout>(
545        &self,
546        offset: usize,
547    ) -> Result<T, SparseMappingError> {
548        let mut obj = MaybeUninit::<T>::uninit();
549        // SAFETY: `obj` is a valid target for writes.
550        unsafe {
551            self.read_at(
552                offset,
553                std::slice::from_raw_parts_mut(obj.as_mut_ptr().cast::<u8>(), size_of::<T>()),
554            )?;
555        }
556        // SAFETY: `obj` was fully initialized by `read_at`.
557        Ok(unsafe { obj.assume_init() })
558    }
559
560    /// Tries to fill a region of the sparse mapping with `val`.
561    pub fn fill_at(&self, offset: usize, val: u8, len: usize) -> Result<(), SparseMappingError> {
562        assert!(self.is_local(), "cannot fill remote mappings");
563
564        if self.len() < offset || self.len() - offset < len {
565            return Err(SparseMappingError::OutOfBounds);
566        }
567        // SAFETY: the bounds have been checked above.
568        unsafe {
569            let dest = self.as_ptr().cast::<u8>().add(offset);
570            try_write_bytes(dest, val, len).map_err(SparseMappingError::Memory)
571        }
572    }
573
574    /// Gets a slice for accessing the mapped data directly.
575    ///
576    /// This is safe from a Rust memory model perspective, since the underlying
577    /// VA is either mapped and is owned in a shared state by this object (in
578    /// which case &[AtomicU8] access from multiple threads is fine), or the VA
579    /// is not mapped but is reserved and so will not be mapped by another Rust
580    /// object.
581    ///
582    /// In the latter case, actually accessing the data may cause a fault, which
583    /// will likely lead to a process crash, so care must nonetheless be taken
584    /// when using this method.
585    pub fn atomic_slice(&self, start: usize, len: usize) -> &[AtomicU8] {
586        assert!(self.len() >= start && self.len() - start >= len);
587        // SAFETY: slice is within the mapped range
588        unsafe { std::slice::from_raw_parts((self.as_ptr() as *const AtomicU8).add(start), len) }
589    }
590}
591
592#[cfg(test)]
593mod tests {
594    use super::*;
595
596    #[derive(Copy, Clone, Debug)]
597    enum Primitive {
598        Read,
599        Write,
600        CompareAndSwap,
601    }
602
603    #[repr(u32)]
604    #[derive(Copy, Clone, Debug, Eq, PartialEq)]
605    enum Size {
606        Bit8 = 8,
607        Bit16 = 16,
608        Bit32 = 32,
609        Bit64 = 64,
610    }
611
612    fn test_unsafe_primitive(primitive: Primitive, size: Size) {
613        // NOTE: this test provides a very basic validation of
614        // the compare-and-swap operation, mostly to check that
615        // the failures address in returned correctly. See other tests
616        // for more.
617        let mut dest = !0u64;
618        let dest_addr = std::ptr::from_mut(&mut dest).cast::<()>();
619        let src = 0x5555_5555_5555_5555u64;
620        let src_addr = std::ptr::from_ref(&src).cast::<()>();
621        let bad_addr_mut = 0x100 as *mut (); // Within 0..0x1000
622        let bad_addr = bad_addr_mut.cast_const();
623        let nonsense_addr = !0u64 as *mut ();
624        let expected = if size != Size::Bit64 {
625            dest.wrapping_shl(size as u32) | src.wrapping_shr(64 - (size as u32))
626        } else {
627            src
628        };
629        let mut af = AccessFailure {
630            address: nonsense_addr.cast(),
631            #[cfg(unix)]
632            si_signo: 0,
633            #[cfg(unix)]
634            si_code: 0,
635        };
636        let af_addr = &mut af as *mut _;
637
638        let res = unsafe {
639            match size {
640                Size::Bit8 => match primitive {
641                    Primitive::Read => try_read8(dest_addr.cast(), src_addr.cast(), af_addr),
642                    Primitive::Write => try_write8(dest_addr.cast(), src as u8, af_addr),
643                    Primitive::CompareAndSwap => {
644                        1 - try_cmpxchg8(dest_addr.cast(), &mut (dest as u8), src as u8, af_addr)
645                    }
646                },
647                Size::Bit16 => match primitive {
648                    Primitive::Read => try_read16(dest_addr.cast(), src_addr.cast(), af_addr),
649                    Primitive::Write => try_write16(dest_addr.cast(), src as u16, af_addr),
650                    Primitive::CompareAndSwap => {
651                        1 - try_cmpxchg16(dest_addr.cast(), &mut (dest as u16), src as u16, af_addr)
652                    }
653                },
654                Size::Bit32 => match primitive {
655                    Primitive::Read => try_read32(dest_addr.cast(), src_addr.cast(), af_addr),
656                    Primitive::Write => try_write32(dest_addr.cast(), src as u32, af_addr),
657                    Primitive::CompareAndSwap => {
658                        1 - try_cmpxchg32(dest_addr.cast(), &mut (dest as u32), src as u32, af_addr)
659                    }
660                },
661                Size::Bit64 => match primitive {
662                    Primitive::Read => try_read64(dest_addr.cast(), src_addr.cast(), af_addr),
663                    Primitive::Write => try_write64(dest_addr.cast(), src, af_addr),
664                    Primitive::CompareAndSwap => {
665                        1 - try_cmpxchg64(dest_addr.cast(), &mut { dest }, src, af_addr)
666                    }
667                },
668            }
669        };
670        assert_eq!(
671            dest, expected,
672            "Expected value must match the result for {primitive:?} and {size:?}"
673        );
674        assert_eq!(
675            res, 0,
676            "Success should be returned for {primitive:?} and {size:?}"
677        );
678        assert_eq!(
679            af.address,
680            nonsense_addr.cast(),
681            "Fault address must not be set for {primitive:?} and {size:?}"
682        );
683
684        let res = unsafe {
685            match size {
686                Size::Bit8 => match primitive {
687                    Primitive::Read => try_read8(dest_addr.cast(), bad_addr.cast(), af_addr),
688                    Primitive::Write => try_write8(bad_addr_mut.cast(), src as u8, af_addr),
689                    Primitive::CompareAndSwap => {
690                        try_cmpxchg8(bad_addr_mut.cast(), &mut (dest as u8), src as u8, af_addr)
691                    }
692                },
693                Size::Bit16 => match primitive {
694                    Primitive::Read => try_read16(dest_addr.cast(), bad_addr.cast(), af_addr),
695                    Primitive::Write => try_write16(bad_addr_mut.cast(), src as u16, af_addr),
696                    Primitive::CompareAndSwap => {
697                        try_cmpxchg16(bad_addr_mut.cast(), &mut (dest as u16), src as u16, af_addr)
698                    }
699                },
700                Size::Bit32 => match primitive {
701                    Primitive::Read => try_read32(dest_addr.cast(), bad_addr.cast(), af_addr),
702                    Primitive::Write => try_write32(bad_addr_mut.cast(), src as u32, af_addr),
703                    Primitive::CompareAndSwap => {
704                        try_cmpxchg32(bad_addr_mut.cast(), &mut (dest as u32), src as u32, af_addr)
705                    }
706                },
707                Size::Bit64 => match primitive {
708                    Primitive::Read => try_read64(dest_addr.cast(), bad_addr.cast(), af_addr),
709                    Primitive::Write => try_write64(bad_addr_mut.cast(), src, af_addr),
710                    Primitive::CompareAndSwap => {
711                        try_cmpxchg64(bad_addr_mut.cast(), &mut { dest }, src, af_addr)
712                    }
713                },
714            }
715        };
716        assert_eq!(
717            dest, expected,
718            "Fault preserved source and destination for {primitive:?} and {size:?}"
719        );
720        assert_eq!(
721            res, -1,
722            "Error code must be returned for {primitive:?} and {size:?}"
723        );
724        assert_eq!(
725            af.address,
726            bad_addr_mut.cast(),
727            "Fault address must be set for {primitive:?} and {size:?}"
728        );
729    }
730
731    #[test]
732    fn test_unsafe_primitives() {
733        initialize_try_copy();
734
735        for primitive in [Primitive::Read, Primitive::Write, Primitive::CompareAndSwap] {
736            for size in [Size::Bit8, Size::Bit16, Size::Bit32, Size::Bit64] {
737                test_unsafe_primitive(primitive, size);
738            }
739        }
740    }
741
742    static BUF: [u8; 65536] = [0xcc; 65536];
743
744    fn test_with(range_size: usize) {
745        let page_size = SparseMapping::page_size();
746
747        let mapping = SparseMapping::new(range_size).unwrap();
748        mapping.alloc(page_size, page_size).unwrap();
749        let slice = unsafe {
750            std::slice::from_raw_parts_mut(mapping.as_ptr().add(page_size).cast::<u8>(), page_size)
751        };
752        slice.copy_from_slice(&BUF[..page_size]);
753        mapping.unmap(page_size, page_size).unwrap();
754
755        mapping.alloc(range_size - page_size, page_size).unwrap();
756        let slice = unsafe {
757            std::slice::from_raw_parts_mut(
758                mapping.as_ptr().add(range_size - page_size).cast::<u8>(),
759                page_size,
760            )
761        };
762        slice.copy_from_slice(&BUF[..page_size]);
763        mapping.unmap(range_size - page_size, page_size).unwrap();
764        drop(mapping);
765    }
766
767    #[test]
768    fn test_sparse_mapping() {
769        test_with(0x100000);
770        test_with(0x200000);
771        test_with(0x200000 + SparseMapping::page_size());
772        test_with(0x40000000);
773        test_with(0x40000000 + SparseMapping::page_size());
774    }
775
776    #[test]
777    fn test_try_copy() {
778        initialize_try_copy();
779
780        let mapping = SparseMapping::new(2 * 1024 * 1024).unwrap();
781        let page_size = SparseMapping::page_size();
782        mapping.alloc(page_size, page_size).unwrap();
783        let base = mapping.as_ptr().cast::<u8>();
784        unsafe {
785            try_copy(BUF.as_ptr(), base, 100).unwrap_err();
786            try_copy(BUF.as_ptr(), base.add(page_size), 100).unwrap();
787            try_copy(BUF.as_ptr(), base.add(page_size), page_size + 1).unwrap_err();
788        }
789    }
790
791    #[test]
792    fn test_cmpxchg() {
793        initialize_try_copy();
794
795        let page_size = SparseMapping::page_size();
796        let mapping = SparseMapping::new(page_size * 2).unwrap();
797        mapping.alloc(0, page_size).unwrap();
798        let base = mapping.as_ptr().cast::<u8>();
799        unsafe {
800            assert_eq!(try_compare_exchange(base.add(8), 0, 1).unwrap().unwrap(), 1);
801            assert_eq!(
802                try_compare_exchange(base.add(8), 0, 2)
803                    .unwrap()
804                    .unwrap_err(),
805                1
806            );
807            assert_eq!(
808                try_compare_exchange(base.cast::<u64>().add(1), 1, 2)
809                    .unwrap()
810                    .unwrap(),
811                2
812            );
813            assert!(try_compare_exchange_ref(base.add(8), &mut [2u8, 0], &[3, 0]).unwrap());
814            try_compare_exchange(base.add(page_size), 0, 2).unwrap_err();
815        }
816    }
817
818    #[test]
819    fn test_overlapping_mappings() {
820        #![expect(clippy::identity_op)]
821
822        let page_size = SparseMapping::page_size();
823        let mapping = SparseMapping::new(0x10 * page_size).unwrap();
824        mapping.alloc(0x1 * page_size, 0x4 * page_size).unwrap();
825        mapping.alloc(0x1 * page_size, 0x2 * page_size).unwrap();
826        mapping.alloc(0x2 * page_size, 0x3 * page_size).unwrap();
827        mapping.alloc(0, 0x10 * page_size).unwrap();
828        mapping.alloc(0x8 * page_size, 0x8 * page_size).unwrap();
829        mapping.unmap(0xc * page_size, 0x2 * page_size).unwrap();
830        mapping.alloc(0x9 * page_size, 0x4 * page_size).unwrap();
831        mapping.unmap(0x3 * page_size, 0xb * page_size).unwrap();
832
833        mapping.alloc(0x5 * page_size, 0x4 * page_size).unwrap();
834        mapping.alloc(0x6 * page_size, 0x2 * page_size).unwrap();
835        mapping.alloc(0x6 * page_size, 0x1 * page_size).unwrap();
836        mapping.alloc(0x4 * page_size, 0x3 * page_size).unwrap();
837
838        let shmem = alloc_shared_memory(0x4 * page_size).unwrap();
839        mapping
840            .map_file(0x5 * page_size, 0x4 * page_size, &shmem, 0, true)
841            .unwrap();
842        mapping
843            .map_file(0x6 * page_size, 0x2 * page_size, &shmem, 0, true)
844            .unwrap();
845        mapping
846            .map_file(0x6 * page_size, 0x1 * page_size, &shmem, 0, true)
847            .unwrap();
848        mapping
849            .map_file(0x4 * page_size, 0x3 * page_size, &shmem, 0, true)
850            .unwrap();
851
852        drop(mapping);
853    }
854}