#![warn(missing_docs)]
#![forbid(unsafe_code)]
#![no_std]
#[cfg(feature = "std")]
extern crate std;
use core::iter::Iterator;
use core::iter::Peekable;
use core::ops::Range;
const PAGE_SIZE: u64 = 4096;
const TWO_MB: u64 = 0x20_0000;
const ONE_GB: u64 = 0x4000_0000;
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(
feature = "mesh",
derive(mesh_protobuf::Protobuf),
mesh(package = "topology")
)]
#[cfg_attr(feature = "inspect", derive(inspect::Inspect), inspect(display))]
pub struct MemoryRange {
#[cfg_attr(feature = "mesh", mesh(1))]
start: u64,
#[cfg_attr(feature = "mesh", mesh(2))]
end: u64,
}
impl core::fmt::Display for MemoryRange {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:#x}-{:#x}", self.start(), self.end())
}
}
impl TryFrom<Range<u64>> for MemoryRange {
type Error = InvalidMemoryRange;
fn try_from(range: Range<u64>) -> Result<Self, Self::Error> {
Self::try_new(range)
}
}
impl TryFrom<Range<usize>> for MemoryRange {
type Error = InvalidMemoryRange;
fn try_from(range: Range<usize>) -> Result<Self, Self::Error> {
Self::try_new(range.start as u64..range.end as u64)
}
}
#[derive(Debug)]
#[cfg_attr(
feature = "std",
derive(thiserror::Error),
error("unaligned or invalid memory range: {start:#x}-{end:#x}")
)]
#[cfg_attr(not(feature = "std"), allow(dead_code))]
pub struct InvalidMemoryRange {
start: u64,
end: u64,
}
impl MemoryRange {
pub const MAX_ADDRESS: u64 = u64::MAX & !(PAGE_SIZE - 1);
#[track_caller]
pub const fn new(range: Range<u64>) -> Self {
assert!(range.start & (PAGE_SIZE - 1) == 0);
assert!(range.end & (PAGE_SIZE - 1) == 0);
assert!(range.start <= range.end);
Self {
start: range.start,
end: range.end,
}
}
pub const fn try_new(range: Range<u64>) -> Result<Self, InvalidMemoryRange> {
if range.start & (PAGE_SIZE - 1) != 0
|| range.end & (PAGE_SIZE - 1) != 0
|| range.start > range.end
{
return Err(InvalidMemoryRange {
start: range.start,
end: range.end,
});
}
Ok(Self {
start: range.start,
end: range.end,
})
}
pub fn bounding(range: Range<u64>) -> Self {
assert!(range.start <= range.end);
assert!(range.end < u64::MAX - PAGE_SIZE);
let start = range.start & !(PAGE_SIZE - 1);
let end = (range.end + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1);
Self::new(start..end)
}
pub fn from_4k_gpn_range(range: Range<u64>) -> Self {
const MAX: u64 = u64::MAX / PAGE_SIZE;
assert!(range.start <= MAX);
assert!(range.end <= MAX);
Self::new(range.start * PAGE_SIZE..range.end * PAGE_SIZE)
}
pub const EMPTY: Self = Self::new(0..0);
pub fn start(&self) -> u64 {
self.start
}
pub fn start_4k_gpn(&self) -> u64 {
self.start / PAGE_SIZE
}
pub fn end_4k_gpn(&self) -> u64 {
self.end / PAGE_SIZE
}
pub fn page_count_4k(&self) -> u64 {
(self.end - self.start) / PAGE_SIZE
}
pub fn page_count_2m(&self) -> u64 {
(self.end - self.start).div_ceil(TWO_MB)
}
pub fn end(&self) -> u64 {
self.end
}
pub fn len(&self) -> u64 {
self.end() - self.start()
}
pub fn is_empty(&self) -> bool {
self.start == self.end
}
pub fn alignment(&self, base: u64) -> u64 {
let order = ((base + self.start()) | (base + self.end())).trailing_zeros();
1 << order
}
pub fn aligned_subrange(&self, alignment: u64) -> Self {
assert!(alignment.is_power_of_two());
let start = (self.start + alignment - 1) & !(alignment - 1);
let end = self.end & !(alignment - 1);
if start <= end {
Self::new(start..end)
} else {
Self::EMPTY
}
}
pub fn overlaps(&self, other: &Self) -> bool {
self.end > other.start && self.start < other.end
}
pub fn contains(&self, other: &Self) -> bool {
self.start <= other.start && self.end >= other.end
}
pub fn contains_addr(&self, addr: u64) -> bool {
(self.start..self.end).contains(&addr)
}
pub fn offset_of(&self, addr: u64) -> Option<u64> {
if self.contains_addr(addr) {
Some(addr - self.start)
} else {
None
}
}
pub fn intersection(&self, other: &Self) -> Self {
let start = self.start.max(other.start);
let end = self.end.min(other.end);
if start <= end {
Self::new(start..end)
} else {
Self::EMPTY
}
}
#[track_caller]
pub fn split_at_offset(&self, offset: u64) -> (Self, Self) {
assert!(offset <= self.len());
assert!(offset % PAGE_SIZE == 0);
(
Self {
start: self.start,
end: self.start + offset,
},
Self {
start: self.start + offset,
end: self.end,
},
)
}
}
impl From<MemoryRange> for Range<u64> {
fn from(range: MemoryRange) -> Self {
Range {
start: range.start(),
end: range.end(),
}
}
}
#[derive(Debug, Clone)]
pub struct AlignedSubranges {
range: MemoryRange,
offset: u64,
max_len: u64,
}
impl AlignedSubranges {
pub fn new(range: MemoryRange) -> Self {
Self {
range,
offset: 0,
max_len: u64::MAX,
}
}
pub fn with_offset(self, offset: u64) -> Self {
Self { offset, ..self }
}
pub fn with_max_range_len(self, max_len: u64) -> Self {
Self { max_len, ..self }
}
}
impl Iterator for AlignedSubranges {
type Item = MemoryRange;
fn next(&mut self) -> Option<Self::Item> {
if self.range.is_empty() {
return None;
}
let start = self.range.start() + self.offset;
let mut end = self.range.end() + self.offset;
if end - start > self.max_len {
end = start + self.max_len;
}
let mut align = |page_size| {
let page_mask: u64 = page_size - 1;
if (start + page_mask) & !page_mask >= end & !page_mask {
return;
}
if start & page_mask != 0 {
end = end.min((start + page_mask) & !page_mask);
} else {
end &= !page_mask;
}
};
align(TWO_MB);
align(ONE_GB);
let start = start - self.offset;
let end = end - self.offset;
self.range = MemoryRange::new(end..self.range.end());
Some(MemoryRange::new(start..end))
}
}
pub fn overlapping_ranges(
left: impl IntoIterator<Item = MemoryRange>,
right: impl IntoIterator<Item = MemoryRange>,
) -> impl Iterator<Item = MemoryRange> {
walk_ranges(
left.into_iter().map(|r| (r, ())),
right.into_iter().map(|r| (r, ())),
)
.filter_map(|(r, c)| match c {
RangeWalkResult::Both((), ()) => Some(r),
_ => None,
})
}
pub fn subtract_ranges(
left: impl IntoIterator<Item = MemoryRange>,
right: impl IntoIterator<Item = MemoryRange>,
) -> impl Iterator<Item = MemoryRange> {
walk_ranges(
left.into_iter().map(|r| (r, ())),
right.into_iter().map(|r| (r, ())),
)
.filter_map(|(r, c)| match c {
RangeWalkResult::Left(()) => Some(r),
RangeWalkResult::Neither | RangeWalkResult::Right(()) | RangeWalkResult::Both((), ()) => {
None
}
})
}
pub fn walk_ranges<T: Clone, U: Clone>(
left: impl IntoIterator<Item = (MemoryRange, T)>,
right: impl IntoIterator<Item = (MemoryRange, U)>,
) -> impl Iterator<Item = (MemoryRange, RangeWalkResult<T, U>)> {
RangeWalkIter {
pos: 0,
left: PeekableSorted::new(left),
right: PeekableSorted::new(right),
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum RangeWalkResult<T, U> {
Neither,
Left(T),
Right(U),
Both(T, U),
}
struct RangeWalkIter<I: Iterator, J: Iterator> {
pos: u64,
left: PeekableSorted<I>,
right: PeekableSorted<J>,
}
struct PeekableSorted<I: Iterator> {
iter: I,
#[allow(clippy::option_option)] item: Option<Option<I::Item>>,
}
impl<I: Iterator<Item = (MemoryRange, T)>, T> PeekableSorted<I> {
fn new(iter: impl IntoIterator<IntoIter = I>) -> Self {
Self {
iter: iter.into_iter(),
item: None,
}
}
fn peek_in_range_ensure_sorted(&mut self, pos: u64, msg: &str) -> Option<&(MemoryRange, T)> {
loop {
let r = self
.item
.get_or_insert_with(|| {
let r = self.iter.next()?;
assert!(r.0.start() >= pos, "{msg} not sorted");
Some(r)
})
.as_ref()?;
if !r.0.is_empty() && r.0.end() > pos {
return Some(self.item.as_ref().unwrap().as_ref().unwrap());
}
self.item = None;
}
}
}
impl<
I: Iterator<Item = (MemoryRange, T)>,
J: Iterator<Item = (MemoryRange, U)>,
T: Clone,
U: Clone,
> Iterator for RangeWalkIter<I, J>
{
type Item = (MemoryRange, RangeWalkResult<T, U>);
fn next(&mut self) -> Option<Self::Item> {
if self.pos == MemoryRange::MAX_ADDRESS {
return None;
}
let left = self.left.peek_in_range_ensure_sorted(self.pos, "left");
let right = self.right.peek_in_range_ensure_sorted(self.pos, "right");
let (end, c) = match (left, right) {
(Some(&(left, ref t)), Some(&(right, ref u))) => {
if self.pos < left.start() {
if self.pos < right.start() {
(left.start().min(right.start()), RangeWalkResult::Neither)
} else {
(
left.start().min(right.end()),
RangeWalkResult::Right(u.clone()),
)
}
} else if self.pos < right.start() {
(
right.start().min(left.end()),
RangeWalkResult::Left(t.clone()),
)
} else {
(
left.end().min(right.end()),
RangeWalkResult::Both(t.clone(), u.clone()),
)
}
}
(Some(&(left, ref t)), None) => {
if self.pos < left.start() {
(left.start, RangeWalkResult::Neither)
} else {
(left.end(), RangeWalkResult::Left(t.clone()))
}
}
(None, Some(&(right, ref u))) => {
if self.pos < right.start() {
(right.start, RangeWalkResult::Neither)
} else {
(right.end(), RangeWalkResult::Right(u.clone()))
}
}
(None, None) => (MemoryRange::MAX_ADDRESS, RangeWalkResult::Neither),
};
let r = MemoryRange::new(self.pos..end);
self.pos = end;
Some((r, c))
}
}
pub fn flatten_ranges(
ranges: impl IntoIterator<Item = MemoryRange>,
) -> impl Iterator<Item = MemoryRange> {
FlattenIter {
iter: ranges.into_iter().map(|r| (r, ())).peekable(),
}
.map(|(r, _)| r)
}
pub fn flatten_equivalent_ranges<T: PartialEq>(
ranges: impl IntoIterator<Item = (MemoryRange, T)>,
) -> impl Iterator<Item = (MemoryRange, T)> {
FlattenIter {
iter: ranges.into_iter().peekable(),
}
}
struct FlattenIter<I: Iterator> {
iter: Peekable<I>,
}
impl<I: Iterator<Item = (MemoryRange, T)>, T: PartialEq> Iterator for FlattenIter<I> {
type Item = (MemoryRange, T);
fn next(&mut self) -> Option<Self::Item> {
let (first, typ) = self.iter.next()?;
let mut start = first.start();
let mut end = first.end();
while let Some((r, _t)) = self.iter.next_if(|(r, t)| {
assert!(r.start() >= start, "ranges are not sorted");
assert!(r.start() >= end, "ranges overlap");
r.start() == end && &typ == t
}) {
start = r.start();
end = end.max(r.end());
}
Some((MemoryRange::new(first.start()..end), typ))
}
}
#[cfg(test)]
mod tests {
extern crate alloc;
use super::MemoryRange;
use super::TWO_MB;
use crate::flatten_equivalent_ranges;
use crate::flatten_ranges;
use crate::overlapping_ranges;
use crate::subtract_ranges;
use crate::AlignedSubranges;
use alloc::vec;
use alloc::vec::Vec;
const KB: u64 = 1024;
const MB: u64 = 1024 * KB;
const GB: u64 = 1024 * MB;
#[test]
fn test_align() {
#[derive(Clone, Debug, PartialEq, Copy)]
struct AlignedRangeResult {
range: MemoryRange,
page_size: u64,
}
let compare_with_base = |r1: Vec<MemoryRange>, base: u64, er: Vec<AlignedRangeResult>| {
let result: Vec<_> = r1
.iter()
.flat_map(|range| AlignedSubranges::new(*range).with_offset(base))
.collect();
assert_eq!(result.len(), er.len());
for (pos, range) in result.iter().enumerate() {
assert_eq!(*range, er[pos].range);
assert_eq!(range.alignment(base), er[pos].page_size);
}
};
let compare = |r1: Vec<MemoryRange>, er: Vec<AlignedRangeResult>| {
compare_with_base(r1, 0, er);
};
fn b_mr(start: u64, end: u64) -> MemoryRange {
MemoryRange::new(start..end)
}
fn b_arr(range: MemoryRange, page_size: u64) -> AlignedRangeResult {
AlignedRangeResult { range, page_size }
}
let ram: Vec<_> = vec![b_mr(0x0, 4 * KB)];
let expected_res: Vec<_> = vec![b_arr(ram[0], 1 << 12)];
compare(ram, expected_res);
let ram: Vec<_> = vec![b_mr(0x0, 2 * MB)];
let expected_res: Vec<_> = vec![b_arr(ram[0], 1 << 21)];
compare(ram, expected_res);
let ram: Vec<_> = vec![b_mr(0x0, GB)];
let expected_res: Vec<_> = vec![b_arr(ram[0], 1 << 30)];
compare(ram, expected_res);
let ram: Vec<_> = vec![b_mr(6 * MB, 12 * MB + 4 * KB)];
let expected_res: Vec<_> = vec![
b_arr(b_mr(6 * MB, 12 * MB), TWO_MB),
b_arr(b_mr(12 * MB, 12 * MB + 4 * KB), 1 << 12),
];
compare(ram, expected_res);
let ram: Vec<_> = vec![b_mr(5 * MB + 400 * KB, 12 * MB + 400 * KB)];
let expected_res: Vec<_> = vec![
b_arr(b_mr(5 * MB + 400 * KB, 6 * MB), 1 << 14),
b_arr(b_mr(6 * MB, 12 * MB), TWO_MB),
b_arr(b_mr(12 * MB, 12 * MB + 400 * KB), 1 << 14),
];
compare(ram, expected_res);
let ram: Vec<_> = vec![b_mr(GB + 501 * MB, 3 * GB + 503 * MB)];
let expected_res: Vec<_> = vec![
b_arr(b_mr(GB + 501 * MB, GB + 502 * MB), 1 << 20),
b_arr(b_mr(GB + 502 * MB, 2 * GB), 1 << 21),
b_arr(b_mr(2 * GB, 3 * GB), 1 << 30),
b_arr(b_mr(3 * GB, 3 * GB + 502 * MB), 1 << 21),
b_arr(b_mr(3 * GB + 502 * MB, 3 * GB + 503 * MB), 1 << 20),
];
compare(ram, expected_res);
let ram: Vec<_> = vec![b_mr(4 * MB + 8 * KB, 6 * MB + 8 * KB)];
let base = 2 * MB - 8 * KB;
let expected_res: Vec<_> = vec![b_arr(ram[0], TWO_MB)];
compare_with_base(ram, base, expected_res);
let ram: Vec<_> = vec![b_mr(4 * MB + 8 * KB, 6 * MB + 8 * KB)];
let expected_res: Vec<_> = vec![b_arr(ram[0], 1 << 13)];
compare_with_base(ram, 0, expected_res);
}
#[test]
fn test_overlapping_ranges() {
let left = [
MemoryRange::new(0..4 * MB),
MemoryRange::new(8 * MB..12 * MB),
MemoryRange::new(12 * MB..12 * MB),
MemoryRange::new(16 * MB..20 * MB),
MemoryRange::new(24 * MB..32 * MB),
MemoryRange::new(40 * MB..48 * MB),
];
let right = [
MemoryRange::new(2 * MB..6 * MB),
MemoryRange::new(10 * MB..11 * MB),
MemoryRange::new(11 * MB..11 * MB),
MemoryRange::new(11 * MB..13 * MB),
MemoryRange::new(15 * MB..22 * MB),
MemoryRange::new(26 * MB..30 * MB),
];
let result: Vec<_> = overlapping_ranges(left, right).collect();
assert_eq!(
result.as_slice(),
&[
MemoryRange::new(2 * MB..4 * MB),
MemoryRange::new(10 * MB..11 * MB),
MemoryRange::new(11 * MB..12 * MB),
MemoryRange::new(16 * MB..20 * MB),
MemoryRange::new(26 * MB..30 * MB),
]
);
}
#[test]
fn test_subtract_ranges() {
let left = [
MemoryRange::new(0..4 * MB),
MemoryRange::new(8 * MB..12 * MB),
MemoryRange::new(12 * MB..12 * MB),
MemoryRange::new(16 * MB..20 * MB),
MemoryRange::new(24 * MB..32 * MB),
MemoryRange::new(40 * MB..48 * MB),
];
let right = [
MemoryRange::new(2 * MB..6 * MB),
MemoryRange::new(10 * MB..11 * MB),
MemoryRange::new(11 * MB..11 * MB),
MemoryRange::new(11 * MB..13 * MB),
MemoryRange::new(15 * MB..22 * MB),
MemoryRange::new(26 * MB..30 * MB),
];
let result: Vec<_> = subtract_ranges(left, right).collect();
assert_eq!(
result.as_slice(),
&[
MemoryRange::new(0..2 * MB),
MemoryRange::new(8 * MB..10 * MB),
MemoryRange::new(24 * MB..26 * MB),
MemoryRange::new(30 * MB..32 * MB),
MemoryRange::new(40 * MB..48 * MB),
]
);
}
#[test]
#[should_panic(expected = "left not sorted")]
fn test_panic_unsorted_overlapping_left() {
overlapping_ranges(
[MemoryRange::new(MB..2 * MB), MemoryRange::new(0..MB)],
[MemoryRange::new(3 * MB..4 * MB)],
)
.for_each(|_| ());
}
#[test]
#[should_panic(expected = "right not sorted")]
fn test_panic_unsorted_overlapping_right() {
overlapping_ranges(
[
MemoryRange::new(MB..2 * MB),
MemoryRange::new(3 * MB..4 * MB),
],
[MemoryRange::new(0..MB), MemoryRange::new(0..MB)],
)
.for_each(|_| ());
}
#[test]
#[should_panic(expected = "left not sorted")]
fn test_panic_unsorted_subtract_left() {
subtract_ranges(
[MemoryRange::new(MB..2 * MB), MemoryRange::new(0..MB)],
[MemoryRange::new(MB..2 * MB)],
)
.for_each(|_| ());
}
#[test]
#[should_panic(expected = "right not sorted")]
fn test_panic_unsorted_subtract_right() {
subtract_ranges(
[
MemoryRange::new(MB..2 * MB),
MemoryRange::new(3 * MB..4 * MB),
],
[MemoryRange::new(MB..2 * MB), MemoryRange::new(MB..2 * MB)],
)
.for_each(|_| ());
}
#[test]
fn test_aligned_subrange() {
let test_cases = &[
(0..0, MB, 0..0),
(0..MB, MB, 0..MB),
(4 * KB..MB + 4 * KB, MB, MB..MB),
(MB..5 * MB, 2 * MB, 2 * MB..4 * MB),
];
for (range, alignment, expected_aligned_range) in test_cases.iter().cloned() {
assert_eq!(
MemoryRange::new(range).aligned_subrange(alignment),
MemoryRange::new(expected_aligned_range)
);
}
}
#[test]
fn test_flatten_ranges() {
let ranges =
[0..4, 5..7, 7..11, 13..20, 20..25, 35..36].map(MemoryRange::from_4k_gpn_range);
let result = [0..4, 5..11, 13..25, 35..36].map(MemoryRange::from_4k_gpn_range);
assert!(flatten_ranges(ranges).eq(result));
}
#[test]
fn test_flatten_equiv_ranges() {
#[derive(Clone, Copy, PartialEq, Eq)]
enum Color {
Red,
Blue,
}
let ranges = [0..4, 5..7, 7..11, 11..12, 13..20, 20..25, 35..36]
.map(MemoryRange::from_4k_gpn_range)
.into_iter()
.zip([
Color::Red,
Color::Red,
Color::Red,
Color::Blue,
Color::Red,
Color::Red,
Color::Blue,
]);
let result = [0..4, 5..11, 11..12, 13..25, 35..36]
.map(MemoryRange::from_4k_gpn_range)
.into_iter()
.zip([Color::Red, Color::Red, Color::Blue, Color::Red, Color::Blue]);
assert!(flatten_equivalent_ranges(ranges).eq(result));
}
#[test]
#[should_panic(expected = "ranges are not sorted")]
fn test_flatten_ranges_not_sorted() {
flatten_ranges([0..4, 5..7, 3..8].map(MemoryRange::from_4k_gpn_range)).for_each(|_| ());
}
#[test]
#[should_panic(expected = "ranges overlap")]
fn test_flatten_ranges_overlap() {
flatten_ranges([0..6, 5..7, 9..12].map(MemoryRange::from_4k_gpn_range)).for_each(|_| ());
}
}