sparse_mmap/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Memory-related abstractions.
5
6// UNSAFETY: Manual pointer manipulation, dealing with mmap, and a signal handler.
7#![expect(unsafe_code)]
8#![expect(missing_docs)]
9#![expect(clippy::undocumented_unsafe_blocks, clippy::missing_safety_doc)]
10
11pub mod alloc;
12mod trycopy_windows_arm64;
13mod trycopy_windows_x64;
14pub mod unix;
15pub mod windows;
16
17pub use sys::AsMappableRef;
18pub use sys::Mappable;
19pub use sys::MappableRef;
20pub use sys::SparseMapping;
21pub use sys::alloc_shared_memory;
22pub use sys::new_mappable_from_file;
23
24use std::mem::MaybeUninit;
25use std::sync::atomic::AtomicU8;
26use thiserror::Error;
27#[cfg(unix)]
28use unix as sys;
29#[cfg(windows)]
30use windows as sys;
31use zerocopy::FromBytes;
32use zerocopy::Immutable;
33use zerocopy::IntoBytes;
34use zerocopy::KnownLayout;
35
36/// Must be called before using try_copy on Unix platforms.
37pub fn initialize_try_copy() {
38    #[cfg(unix)]
39    {
40        static INIT: std::sync::Once = std::sync::Once::new();
41        INIT.call_once(|| unsafe {
42            let err = install_signal_handlers();
43            if err != 0 {
44                panic!(
45                    "could not install signal handlers: {}",
46                    std::io::Error::from_raw_os_error(err)
47                )
48            }
49        });
50    }
51}
52
53unsafe extern "C" {
54    #[cfg(unix)]
55    fn install_signal_handlers() -> i32;
56
57    fn try_memmove(
58        dest: *mut u8,
59        src: *const u8,
60        length: usize,
61        failure: *mut AccessFailure,
62    ) -> i32;
63    fn try_memset(dest: *mut u8, c: i32, length: usize, failure: *mut AccessFailure) -> i32;
64    fn try_cmpxchg8(
65        dest: *mut u8,
66        expected: &mut u8,
67        desired: u8,
68        failure: *mut AccessFailure,
69    ) -> i32;
70    fn try_cmpxchg16(
71        dest: *mut u16,
72        expected: &mut u16,
73        desired: u16,
74        failure: *mut AccessFailure,
75    ) -> i32;
76    fn try_cmpxchg32(
77        dest: *mut u32,
78        expected: &mut u32,
79        desired: u32,
80        failure: *mut AccessFailure,
81    ) -> i32;
82    fn try_cmpxchg64(
83        dest: *mut u64,
84        expected: &mut u64,
85        desired: u64,
86        failure: *mut AccessFailure,
87    ) -> i32;
88    fn try_read8(dest: *mut u8, src: *const u8, failure: *mut AccessFailure) -> i32;
89    fn try_read16(dest: *mut u16, src: *const u16, failure: *mut AccessFailure) -> i32;
90    fn try_read32(dest: *mut u32, src: *const u32, failure: *mut AccessFailure) -> i32;
91    fn try_read64(dest: *mut u64, src: *const u64, failure: *mut AccessFailure) -> i32;
92    fn try_write8(dest: *mut u8, value: u8, failure: *mut AccessFailure) -> i32;
93    fn try_write16(dest: *mut u16, value: u16, failure: *mut AccessFailure) -> i32;
94    fn try_write32(dest: *mut u32, value: u32, failure: *mut AccessFailure) -> i32;
95    fn try_write64(dest: *mut u64, value: u64, failure: *mut AccessFailure) -> i32;
96}
97
98#[repr(C)]
99struct AccessFailure {
100    address: *mut u8,
101    #[cfg(unix)]
102    si_signo: i32,
103    #[cfg(unix)]
104    si_code: i32,
105}
106
107#[derive(Debug, Error)]
108#[error("failed to {} memory", if self.is_write { "write" } else { "read" })]
109pub struct MemoryError {
110    offset: usize,
111    is_write: bool,
112    #[source]
113    source: OsAccessError,
114}
115
116#[derive(Debug, Error)]
117enum OsAccessError {
118    #[cfg(windows)]
119    #[error("access violation")]
120    AccessViolation,
121    #[cfg(unix)]
122    #[error("SIGSEGV (si_code = {0:x}")]
123    Sigsegv(u32),
124    #[cfg(unix)]
125    #[error("SIGSEGV (si_code = {0:x}")]
126    Sigbus(u32),
127}
128
129impl MemoryError {
130    fn new(src: Option<*const u8>, dest: *mut u8, len: usize, failure: &AccessFailure) -> Self {
131        let (offset, is_write) = if failure.address.is_null() {
132            // In the case of a general protection fault (#GP) the provided address is zero.
133            (0, src.is_none())
134        } else if (dest..dest.wrapping_add(len)).contains(&failure.address) {
135            (failure.address as usize - dest as usize, true)
136        } else if let Some(src) = src {
137            if (src..src.wrapping_add(len)).contains(&failure.address.cast_const()) {
138                (failure.address as usize - src as usize, false)
139            } else {
140                panic!(
141                    "invalid failure address: {:p} src: {:p} dest: {:p} len: {:#x}",
142                    failure.address, src, dest, len
143                );
144            }
145        } else {
146            panic!(
147                "invalid failure address: {:p} src: None dest: {:p} len: {:#x}",
148                failure.address, dest, len
149            );
150        };
151        #[cfg(windows)]
152        let source = OsAccessError::AccessViolation;
153        #[cfg(unix)]
154        let source = match failure.si_signo {
155            libc::SIGSEGV => OsAccessError::Sigsegv(failure.si_code as u32),
156            libc::SIGBUS => OsAccessError::Sigbus(failure.si_code as u32),
157            _ => {
158                panic!(
159                    "unexpected signal: {} src: {:?} dest: {:p} len: {:#x}",
160                    failure.si_signo, src, dest, len
161                );
162            }
163        };
164        Self {
165            offset,
166            is_write,
167            source,
168        }
169    }
170
171    /// Returns the byte offset into the buffer at which the access violation
172    /// occurred.
173    pub fn offset(&self) -> usize {
174        self.offset
175    }
176}
177
178/// Copies `count` elements from `src` to `dest`. `src` and `dest` may overlap.
179/// Fails on access violation/SIGSEGV. Note that on case of failure, some of the
180/// bytes (even partial elements) may already have been copied.
181///
182/// This also fails if initialize_try_copy has not been called.
183///
184/// # Safety
185///
186/// This routine is safe to use if the memory pointed to by `src` or `dest` is
187/// being concurrently mutated.
188///
189/// WARNING: This routine should only be used when you know that `src` and
190/// `dest` are valid, reserved addresses but you do not know if they are mapped
191/// with the appropriate protection. For example, this routine is useful if
192/// `dest` is a sparse mapping where some pages are mapped with
193/// PAGE_NOACCESS/PROT_NONE, and some are mapped with PAGE_READWRITE/PROT_WRITE.
194pub unsafe fn try_copy<T>(src: *const T, dest: *mut T, count: usize) -> Result<(), MemoryError> {
195    let mut failure = MaybeUninit::uninit();
196    // SAFETY: guaranteed by caller.
197    let ret = unsafe {
198        try_memmove(
199            dest.cast::<u8>(),
200            src.cast::<u8>(),
201            count * size_of::<T>(),
202            failure.as_mut_ptr(),
203        )
204    };
205    match ret {
206        0 => Ok(()),
207        _ => Err(MemoryError::new(
208            Some(src.cast()),
209            dest.cast(),
210            count,
211            // SAFETY: failure is initialized in the failure path.
212            unsafe { failure.assume_init_ref() },
213        )),
214    }
215}
216
217/// Writes `count` bytes of the value `val` to `dest`. Fails on access
218/// violation/SIGSEGV. Note that on case of failure, some of the bytes (even
219/// partial elements) may already have been written.
220///
221/// This also fails if initialize_try_copy has not been called.
222///
223/// # Safety
224///
225/// This routine is safe to use if the memory pointed to by `dest` is being
226/// concurrently mutated.
227///
228/// WARNING: This routine should only be used when you know that `dest` is
229/// valid, reserved addresses but you do not know if they are mapped with the
230/// appropriate protection. For example, this routine is useful if `dest` is a
231/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
232/// some are mapped with PAGE_READWRITE/PROT_WRITE.
233pub unsafe fn try_write_bytes<T>(dest: *mut T, val: u8, count: usize) -> Result<(), MemoryError> {
234    let mut failure = MaybeUninit::uninit();
235    // SAFETY: guaranteed by caller.
236    let ret = unsafe {
237        try_memset(
238            dest.cast::<u8>(),
239            val.into(),
240            count * size_of::<T>(),
241            failure.as_mut_ptr(),
242        )
243    };
244    match ret {
245        0 => Ok(()),
246        _ => Err(MemoryError::new(
247            None,
248            dest.cast(),
249            count,
250            // SAFETY: failure is initialized in the failure path.
251            unsafe { failure.assume_init_ref() },
252        )),
253    }
254}
255
256/// Atomically swaps the value at `dest` with `new` when `*dest` is `current`,
257/// using a sequentially-consistent memory ordering.
258///
259/// Returns `Ok(Ok(new))` if the swap was successful, `Ok(Err(*dest))` if the
260/// swap failed, or `Err(MemoryError::AccessViolation)` if the swap could not be
261/// attempted due to an access violation.
262///
263/// Panics if the size is not 1, 2, 4, or 8 bytes.
264///
265/// # Safety
266///
267/// This routine is safe to use if the memory pointed to by `dest` is being
268/// concurrently mutated.
269///
270/// WARNING: This routine should only be used when you know that `dest` is
271/// valid, reserved addresses but you do not know if they are mapped with the
272/// appropriate protection. For example, this routine is useful if `dest` is a
273/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
274/// some are mapped with PAGE_READWRITE/PROT_WRITE.
275pub unsafe fn try_compare_exchange<T: IntoBytes + FromBytes + Immutable + KnownLayout>(
276    dest: *mut T,
277    mut current: T,
278    new: T,
279) -> Result<Result<T, T>, MemoryError> {
280    let mut failure = MaybeUninit::uninit();
281    // SAFETY: guaranteed by caller
282    let ret = unsafe {
283        match size_of::<T>() {
284            1 => try_cmpxchg8(
285                dest.cast(),
286                std::mem::transmute::<&mut T, &mut u8>(&mut current),
287                std::mem::transmute_copy::<T, u8>(&new),
288                failure.as_mut_ptr(),
289            ),
290            2 => try_cmpxchg16(
291                dest.cast(),
292                std::mem::transmute::<&mut T, &mut u16>(&mut current),
293                std::mem::transmute_copy::<T, u16>(&new),
294                failure.as_mut_ptr(),
295            ),
296            4 => try_cmpxchg32(
297                dest.cast(),
298                std::mem::transmute::<&mut T, &mut u32>(&mut current),
299                std::mem::transmute_copy::<T, u32>(&new),
300                failure.as_mut_ptr(),
301            ),
302            8 => try_cmpxchg64(
303                dest.cast(),
304                std::mem::transmute::<&mut T, &mut u64>(&mut current),
305                std::mem::transmute_copy::<T, u64>(&new),
306                failure.as_mut_ptr(),
307            ),
308            _ => panic!("unsupported size"),
309        }
310    };
311    match ret {
312        n if n > 0 => Ok(Ok(new)),
313        0 => Ok(Err(current)),
314        _ => Err(MemoryError::new(
315            None,
316            dest.cast(),
317            size_of::<T>(),
318            // SAFETY: failure is initialized in the failure path.
319            unsafe { failure.assume_init_ref() },
320        )),
321    }
322}
323
324/// Atomically swaps the value at `dest` with `new` when `*dest` is `current`,
325/// using a sequentially-consistent memory ordering.
326///
327/// Returns `Ok(true)` if the swap was successful, `Ok(false)` if the swap
328/// failed (after updating `current`), or `Err(MemoryError::AccessViolation)` if
329/// the swap could not be attempted due to an access violation.
330///
331/// Panics if `current` and `new` are not the same size or that size is not
332/// 1, 2, 4, or 8 bytes.
333///
334/// # Safety
335///
336/// This routine is safe to use if the memory pointed to by `dest` is being
337/// concurrently mutated.
338///
339/// WARNING: This routine should only be used when you know that `dest` is
340/// valid, reserved addresses but you do not know if they are mapped with the
341/// appropriate protection. For example, this routine is useful if `dest` is a
342/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
343/// some are mapped with PAGE_READWRITE/PROT_WRITE.
344pub unsafe fn try_compare_exchange_ref<
345    T: IntoBytes + FromBytes + Immutable + KnownLayout + ?Sized,
346>(
347    dest: *mut u8,
348    current: &mut T,
349    new: &T,
350) -> Result<bool, MemoryError> {
351    let mut failure = MaybeUninit::uninit();
352    // SAFETY: guaranteed by caller
353    let ret = unsafe {
354        match (size_of_val(current), size_of_val(new)) {
355            (1, 1) => try_cmpxchg8(
356                dest,
357                &mut *current.as_mut_bytes().as_mut_ptr(),
358                new.as_bytes()[0],
359                failure.as_mut_ptr(),
360            ),
361            (2, 2) => try_cmpxchg16(
362                dest.cast(),
363                &mut *current.as_mut_bytes().as_mut_ptr().cast(),
364                u16::from_ne_bytes(new.as_bytes().try_into().unwrap()),
365                failure.as_mut_ptr(),
366            ),
367            (4, 4) => try_cmpxchg32(
368                dest.cast(),
369                &mut *current.as_mut_bytes().as_mut_ptr().cast(),
370                u32::from_ne_bytes(new.as_bytes().try_into().unwrap()),
371                failure.as_mut_ptr(),
372            ),
373            (8, 8) => try_cmpxchg64(
374                dest.cast(),
375                &mut *current.as_mut_bytes().as_mut_ptr().cast(),
376                u64::from_ne_bytes(new.as_bytes().try_into().unwrap()),
377                failure.as_mut_ptr(),
378            ),
379            _ => panic!("unsupported or mismatched size"),
380        }
381    };
382    if ret < 0 {
383        return Err(MemoryError::new(
384            None,
385            dest.cast(),
386            size_of_val(current),
387            // SAFETY: failure is initialized in the failure path.
388            unsafe { failure.assume_init_ref() },
389        ));
390    }
391    Ok(ret > 0)
392}
393
394/// Reads the value at `src` treating the pointer as a volatile access.
395///
396/// Returns `Ok(T)` if the read was successful, or `Err(MemoryError)` if the
397/// read was unsuccessful.
398///
399/// Panics if the size is not 1, 2, 4, or 8 bytes.
400///
401/// # Safety
402///
403/// This routine is safe to use if the memory pointed to by `src` is being
404/// concurrently mutated.
405///
406/// WARNING: This routine should only be used when you know that `src` is
407/// valid, reserved addresses but you do not know if they are mapped with the
408/// appropriate protection. For example, this routine is useful if `src` is a
409/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
410/// some are mapped with PAGE_READWRITE/PROT_WRITE.
411pub unsafe fn try_read_volatile<T: FromBytes + Immutable + KnownLayout>(
412    src: *const T,
413) -> Result<T, MemoryError> {
414    let mut dest = MaybeUninit::<T>::uninit();
415    let mut failure = MaybeUninit::uninit();
416    // SAFETY: guaranteed by caller
417    let ret = unsafe {
418        match size_of::<T>() {
419            1 => try_read8(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
420            2 => try_read16(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
421            4 => try_read32(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
422            8 => try_read64(dest.as_mut_ptr().cast(), src.cast(), failure.as_mut_ptr()),
423            _ => panic!("unsupported size"),
424        }
425    };
426    match ret {
427        0 => {
428            // SAFETY: dest was fully initialized by try_read.
429            Ok(unsafe { dest.assume_init() })
430        }
431        _ => Err(MemoryError::new(
432            Some(src.cast()),
433            dest.as_mut_ptr().cast(),
434            size_of::<T>(),
435            // SAFETY: failure is initialized in the failure path.
436            unsafe { failure.assume_init_ref() },
437        )),
438    }
439}
440
441/// Writes `value` at `dest` treating the pointer as a volatile access.
442///
443/// Returns `Ok(())` if the write was successful, or `Err(MemoryError)` if the
444/// write was unsuccessful.
445///
446/// Panics if the size is not 1, 2, 4, or 8 bytes.
447///
448/// # Safety
449///
450/// This routine is safe to use if the memory pointed to by `dest` is being
451/// concurrently mutated.
452///
453/// WARNING: This routine should only be used when you know that `dest` is
454/// valid, reserved addresses but you do not know if they are mapped with the
455/// appropriate protection. For example, this routine is useful if `dest` is a
456/// sparse mapping where some pages are mapped with PAGE_NOACCESS/PROT_NONE, and
457/// some are mapped with PAGE_READWRITE/PROT_WRITE.
458pub unsafe fn try_write_volatile<T: IntoBytes + Immutable + KnownLayout>(
459    dest: *mut T,
460    value: &T,
461) -> Result<(), MemoryError> {
462    let mut failure = MaybeUninit::uninit();
463    // SAFETY: guaranteed by caller
464    let ret = unsafe {
465        match size_of::<T>() {
466            1 => try_write8(
467                dest.cast(),
468                std::mem::transmute_copy(value),
469                failure.as_mut_ptr(),
470            ),
471            2 => try_write16(
472                dest.cast(),
473                std::mem::transmute_copy(value),
474                failure.as_mut_ptr(),
475            ),
476            4 => try_write32(
477                dest.cast(),
478                std::mem::transmute_copy(value),
479                failure.as_mut_ptr(),
480            ),
481            8 => try_write64(
482                dest.cast(),
483                std::mem::transmute_copy(value),
484                failure.as_mut_ptr(),
485            ),
486            _ => panic!("unsupported size"),
487        }
488    };
489    match ret {
490        0 => Ok(()),
491        _ => Err(MemoryError::new(
492            None,
493            dest.cast(),
494            size_of::<T>(),
495            // SAFETY: failure is initialized in the failure path.
496            unsafe { failure.assume_init_ref() },
497        )),
498    }
499}
500
501#[derive(Debug, Error)]
502pub enum SparseMappingError {
503    #[error("out of bounds")]
504    OutOfBounds,
505    #[error(transparent)]
506    Memory(MemoryError),
507}
508
509impl SparseMapping {
510    /// Gets the supported page size for sparse mappings.
511    pub fn page_size() -> usize {
512        sys::page_size()
513    }
514
515    fn check(&self, offset: usize, len: usize) -> Result<(), SparseMappingError> {
516        if self.len() < offset || self.len() - offset < len {
517            return Err(SparseMappingError::OutOfBounds);
518        }
519        Ok(())
520    }
521
522    /// Reads a type `T` from `offset` in the sparse mapping using a single read instruction.
523    ///
524    /// Panics if `T` is not 1, 2, 4, or 8 bytes in size.
525    pub fn read_volatile<T: FromBytes + Immutable + KnownLayout>(
526        &self,
527        offset: usize,
528    ) -> Result<T, SparseMappingError> {
529        assert!(self.is_local(), "cannot read from remote mappings");
530
531        self.check(offset, size_of::<T>())?;
532        // SAFETY: the bounds have been checked above.
533        unsafe { try_read_volatile(self.as_ptr().byte_add(offset).cast()) }
534            .map_err(SparseMappingError::Memory)
535    }
536
537    /// Writes a type `T` at `offset` in the sparse mapping using a single write instruciton.
538    ///
539    /// Panics if `T` is not 1, 2, 4, or 8 bytes in size.
540    pub fn write_volatile<T: IntoBytes + Immutable + KnownLayout>(
541        &self,
542        offset: usize,
543        value: &T,
544    ) -> Result<(), SparseMappingError> {
545        assert!(self.is_local(), "cannot write to remote mappings");
546
547        self.check(offset, size_of::<T>())?;
548        // SAFETY: the bounds have been checked above.
549        unsafe { try_write_volatile(self.as_ptr().byte_add(offset).cast(), value) }
550            .map_err(SparseMappingError::Memory)
551    }
552
553    /// Tries to write into the sparse mapping.
554    pub fn write_at(&self, offset: usize, data: &[u8]) -> Result<(), SparseMappingError> {
555        assert!(self.is_local(), "cannot write to remote mappings");
556
557        self.check(offset, data.len())?;
558        // SAFETY: the bounds have been checked above.
559        unsafe {
560            let dest = self.as_ptr().cast::<u8>().add(offset);
561            try_copy(data.as_ptr(), dest, data.len()).map_err(SparseMappingError::Memory)
562        }
563    }
564
565    /// Tries to read from the sparse mapping.
566    pub fn read_at(&self, offset: usize, data: &mut [u8]) -> Result<(), SparseMappingError> {
567        assert!(self.is_local(), "cannot read from remote mappings");
568
569        self.check(offset, data.len())?;
570        // SAFETY: the bounds have been checked above.
571        unsafe {
572            let src = (self.as_ptr() as *const u8).add(offset);
573            try_copy(src, data.as_mut_ptr(), data.len()).map_err(SparseMappingError::Memory)
574        }
575    }
576
577    /// Tries to read a type `T` from `offset`.
578    pub fn read_plain<T: FromBytes + Immutable + KnownLayout>(
579        &self,
580        offset: usize,
581    ) -> Result<T, SparseMappingError> {
582        if matches!(size_of::<T>(), 1 | 2 | 4 | 8) {
583            self.read_volatile(offset)
584        } else {
585            let mut obj = MaybeUninit::<T>::uninit();
586            // SAFETY: `obj` is a valid target for writes.
587            unsafe {
588                self.read_at(
589                    offset,
590                    std::slice::from_raw_parts_mut(obj.as_mut_ptr().cast::<u8>(), size_of::<T>()),
591                )?;
592            }
593            // SAFETY: `obj` was fully initialized by `read_at`.
594            Ok(unsafe { obj.assume_init() })
595        }
596    }
597
598    /// Tries to fill a region of the sparse mapping with `val`.
599    pub fn fill_at(&self, offset: usize, val: u8, len: usize) -> Result<(), SparseMappingError> {
600        assert!(self.is_local(), "cannot fill remote mappings");
601
602        self.check(offset, len)?;
603        // SAFETY: the bounds have been checked above.
604        unsafe {
605            let dest = self.as_ptr().cast::<u8>().add(offset);
606            try_write_bytes(dest, val, len).map_err(SparseMappingError::Memory)
607        }
608    }
609
610    /// Gets a slice for accessing the mapped data directly.
611    ///
612    /// This is safe from a Rust memory model perspective, since the underlying
613    /// VA is either mapped and is owned in a shared state by this object (in
614    /// which case &[AtomicU8] access from multiple threads is fine), or the VA
615    /// is not mapped but is reserved and so will not be mapped by another Rust
616    /// object.
617    ///
618    /// In the latter case, actually accessing the data may cause a fault, which
619    /// will likely lead to a process crash, so care must nonetheless be taken
620    /// when using this method.
621    pub fn atomic_slice(&self, start: usize, len: usize) -> &[AtomicU8] {
622        assert!(self.len() >= start && self.len() - start >= len);
623        // SAFETY: slice is within the mapped range
624        unsafe { std::slice::from_raw_parts((self.as_ptr() as *const AtomicU8).add(start), len) }
625    }
626}
627
628#[cfg(test)]
629mod tests {
630    use super::*;
631
632    #[derive(Copy, Clone, Debug)]
633    enum Primitive {
634        Read,
635        Write,
636        CompareAndSwap,
637    }
638
639    #[repr(u32)]
640    #[derive(Copy, Clone, Debug, Eq, PartialEq)]
641    enum Size {
642        Bit8 = 8,
643        Bit16 = 16,
644        Bit32 = 32,
645        Bit64 = 64,
646    }
647
648    fn test_unsafe_primitive(primitive: Primitive, size: Size) {
649        // NOTE: this test provides a very basic validation of
650        // the compare-and-swap operation, mostly to check that
651        // the failures address in returned correctly. See other tests
652        // for more.
653        let mut dest = !0u64;
654        let dest_addr = std::ptr::from_mut(&mut dest).cast::<()>();
655        let src = 0x5555_5555_5555_5555u64;
656        let src_addr = std::ptr::from_ref(&src).cast::<()>();
657        let bad_addr_mut = 0x100 as *mut (); // Within 0..0x1000
658        let bad_addr = bad_addr_mut.cast_const();
659        let nonsense_addr = !0u64 as *mut ();
660        let expected = if size != Size::Bit64 {
661            dest.wrapping_shl(size as u32) | src.wrapping_shr(64 - (size as u32))
662        } else {
663            src
664        };
665        let mut af = AccessFailure {
666            address: nonsense_addr.cast(),
667            #[cfg(unix)]
668            si_signo: 0,
669            #[cfg(unix)]
670            si_code: 0,
671        };
672        let af_addr = &mut af as *mut _;
673
674        let res = unsafe {
675            match size {
676                Size::Bit8 => match primitive {
677                    Primitive::Read => try_read8(dest_addr.cast(), src_addr.cast(), af_addr),
678                    Primitive::Write => try_write8(dest_addr.cast(), src as u8, af_addr),
679                    Primitive::CompareAndSwap => {
680                        1 - try_cmpxchg8(dest_addr.cast(), &mut (dest as u8), src as u8, af_addr)
681                    }
682                },
683                Size::Bit16 => match primitive {
684                    Primitive::Read => try_read16(dest_addr.cast(), src_addr.cast(), af_addr),
685                    Primitive::Write => try_write16(dest_addr.cast(), src as u16, af_addr),
686                    Primitive::CompareAndSwap => {
687                        1 - try_cmpxchg16(dest_addr.cast(), &mut (dest as u16), src as u16, af_addr)
688                    }
689                },
690                Size::Bit32 => match primitive {
691                    Primitive::Read => try_read32(dest_addr.cast(), src_addr.cast(), af_addr),
692                    Primitive::Write => try_write32(dest_addr.cast(), src as u32, af_addr),
693                    Primitive::CompareAndSwap => {
694                        1 - try_cmpxchg32(dest_addr.cast(), &mut (dest as u32), src as u32, af_addr)
695                    }
696                },
697                Size::Bit64 => match primitive {
698                    Primitive::Read => try_read64(dest_addr.cast(), src_addr.cast(), af_addr),
699                    Primitive::Write => try_write64(dest_addr.cast(), src, af_addr),
700                    Primitive::CompareAndSwap => {
701                        1 - try_cmpxchg64(dest_addr.cast(), &mut { dest }, src, af_addr)
702                    }
703                },
704            }
705        };
706        assert_eq!(
707            dest, expected,
708            "Expected value must match the result for {primitive:?} and {size:?}"
709        );
710        assert_eq!(
711            res, 0,
712            "Success should be returned for {primitive:?} and {size:?}"
713        );
714        assert_eq!(
715            af.address,
716            nonsense_addr.cast(),
717            "Fault address must not be set for {primitive:?} and {size:?}"
718        );
719
720        let res = unsafe {
721            match size {
722                Size::Bit8 => match primitive {
723                    Primitive::Read => try_read8(dest_addr.cast(), bad_addr.cast(), af_addr),
724                    Primitive::Write => try_write8(bad_addr_mut.cast(), src as u8, af_addr),
725                    Primitive::CompareAndSwap => {
726                        try_cmpxchg8(bad_addr_mut.cast(), &mut (dest as u8), src as u8, af_addr)
727                    }
728                },
729                Size::Bit16 => match primitive {
730                    Primitive::Read => try_read16(dest_addr.cast(), bad_addr.cast(), af_addr),
731                    Primitive::Write => try_write16(bad_addr_mut.cast(), src as u16, af_addr),
732                    Primitive::CompareAndSwap => {
733                        try_cmpxchg16(bad_addr_mut.cast(), &mut (dest as u16), src as u16, af_addr)
734                    }
735                },
736                Size::Bit32 => match primitive {
737                    Primitive::Read => try_read32(dest_addr.cast(), bad_addr.cast(), af_addr),
738                    Primitive::Write => try_write32(bad_addr_mut.cast(), src as u32, af_addr),
739                    Primitive::CompareAndSwap => {
740                        try_cmpxchg32(bad_addr_mut.cast(), &mut (dest as u32), src as u32, af_addr)
741                    }
742                },
743                Size::Bit64 => match primitive {
744                    Primitive::Read => try_read64(dest_addr.cast(), bad_addr.cast(), af_addr),
745                    Primitive::Write => try_write64(bad_addr_mut.cast(), src, af_addr),
746                    Primitive::CompareAndSwap => {
747                        try_cmpxchg64(bad_addr_mut.cast(), &mut { dest }, src, af_addr)
748                    }
749                },
750            }
751        };
752        assert_eq!(
753            dest, expected,
754            "Fault preserved source and destination for {primitive:?} and {size:?}"
755        );
756        assert_eq!(
757            res, -1,
758            "Error code must be returned for {primitive:?} and {size:?}"
759        );
760        assert_eq!(
761            af.address,
762            bad_addr_mut.cast(),
763            "Fault address must be set for {primitive:?} and {size:?}"
764        );
765    }
766
767    #[test]
768    fn test_unsafe_primitives() {
769        initialize_try_copy();
770
771        for primitive in [Primitive::Read, Primitive::Write, Primitive::CompareAndSwap] {
772            for size in [Size::Bit8, Size::Bit16, Size::Bit32, Size::Bit64] {
773                test_unsafe_primitive(primitive, size);
774            }
775        }
776    }
777
778    static BUF: [u8; 65536] = [0xcc; 65536];
779
780    fn test_with(range_size: usize) {
781        let page_size = SparseMapping::page_size();
782
783        let mapping = SparseMapping::new(range_size).unwrap();
784        mapping.alloc(page_size, page_size).unwrap();
785        let slice = unsafe {
786            std::slice::from_raw_parts_mut(mapping.as_ptr().add(page_size).cast::<u8>(), page_size)
787        };
788        slice.copy_from_slice(&BUF[..page_size]);
789        mapping.unmap(page_size, page_size).unwrap();
790
791        mapping.alloc(range_size - page_size, page_size).unwrap();
792        let slice = unsafe {
793            std::slice::from_raw_parts_mut(
794                mapping.as_ptr().add(range_size - page_size).cast::<u8>(),
795                page_size,
796            )
797        };
798        slice.copy_from_slice(&BUF[..page_size]);
799        mapping.unmap(range_size - page_size, page_size).unwrap();
800        drop(mapping);
801    }
802
803    #[test]
804    fn test_sparse_mapping() {
805        test_with(0x100000);
806        test_with(0x200000);
807        test_with(0x200000 + SparseMapping::page_size());
808        test_with(0x40000000);
809        test_with(0x40000000 + SparseMapping::page_size());
810    }
811
812    #[test]
813    fn test_try_copy() {
814        initialize_try_copy();
815
816        let mapping = SparseMapping::new(2 * 1024 * 1024).unwrap();
817        let page_size = SparseMapping::page_size();
818        mapping.alloc(page_size, page_size).unwrap();
819        let base = mapping.as_ptr().cast::<u8>();
820        unsafe {
821            try_copy(BUF.as_ptr(), base, 100).unwrap_err();
822            try_copy(BUF.as_ptr(), base.add(page_size), 100).unwrap();
823            try_copy(BUF.as_ptr(), base.add(page_size), page_size + 1).unwrap_err();
824        }
825    }
826
827    #[test]
828    fn test_cmpxchg() {
829        initialize_try_copy();
830
831        let page_size = SparseMapping::page_size();
832        let mapping = SparseMapping::new(page_size * 2).unwrap();
833        mapping.alloc(0, page_size).unwrap();
834        let base = mapping.as_ptr().cast::<u8>();
835        unsafe {
836            assert_eq!(try_compare_exchange(base.add(8), 0, 1).unwrap().unwrap(), 1);
837            assert_eq!(
838                try_compare_exchange(base.add(8), 0, 2)
839                    .unwrap()
840                    .unwrap_err(),
841                1
842            );
843            assert_eq!(
844                try_compare_exchange(base.cast::<u64>().add(1), 1, 2)
845                    .unwrap()
846                    .unwrap(),
847                2
848            );
849            assert!(try_compare_exchange_ref(base.add(8), &mut [2u8, 0], &[3, 0]).unwrap());
850            try_compare_exchange(base.add(page_size), 0, 2).unwrap_err();
851        }
852    }
853
854    #[test]
855    fn test_overlapping_mappings() {
856        #![expect(clippy::identity_op)]
857
858        let page_size = SparseMapping::page_size();
859        let mapping = SparseMapping::new(0x10 * page_size).unwrap();
860        mapping.alloc(0x1 * page_size, 0x4 * page_size).unwrap();
861        mapping.alloc(0x1 * page_size, 0x2 * page_size).unwrap();
862        mapping.alloc(0x2 * page_size, 0x3 * page_size).unwrap();
863        mapping.alloc(0, 0x10 * page_size).unwrap();
864        mapping.alloc(0x8 * page_size, 0x8 * page_size).unwrap();
865        mapping.unmap(0xc * page_size, 0x2 * page_size).unwrap();
866        mapping.alloc(0x9 * page_size, 0x4 * page_size).unwrap();
867        mapping.unmap(0x3 * page_size, 0xb * page_size).unwrap();
868
869        mapping.alloc(0x5 * page_size, 0x4 * page_size).unwrap();
870        mapping.alloc(0x6 * page_size, 0x2 * page_size).unwrap();
871        mapping.alloc(0x6 * page_size, 0x1 * page_size).unwrap();
872        mapping.alloc(0x4 * page_size, 0x3 * page_size).unwrap();
873
874        let shmem = alloc_shared_memory(0x4 * page_size).unwrap();
875        mapping
876            .map_file(0x5 * page_size, 0x4 * page_size, &shmem, 0, true)
877            .unwrap();
878        mapping
879            .map_file(0x6 * page_size, 0x2 * page_size, &shmem, 0, true)
880            .unwrap();
881        mapping
882            .map_file(0x6 * page_size, 0x1 * page_size, &shmem, 0, true)
883            .unwrap();
884        mapping
885            .map_file(0x4 * page_size, 0x3 * page_size, &shmem, 0, true)
886            .unwrap();
887
888        drop(mapping);
889    }
890}