1use guestmem::ranges::PagedRange;
5use smallvec::SmallVec;
6use smallvec::smallvec;
7use thiserror::Error;
8use zerocopy::FromBytes;
9use zerocopy::FromZeros;
10use zerocopy::Immutable;
11use zerocopy::IntoBytes;
12use zerocopy::KnownLayout;
13
14const PAGE_SIZE: usize = 4096;
15
16pub type GpnList = SmallVec<[u64; 64]>;
17
18pub fn zeroed_gpn_list(len: usize) -> GpnList {
19 smallvec![FromZeros::new_zeroed(); len]
20}
21
22#[repr(C)]
23#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
24pub struct GpaRange {
25 pub len: u32,
26 pub offset: u32,
27}
28
29#[derive(Debug, Default, Clone)]
30pub struct MultiPagedRangeBuf<T: AsRef<[u64]>> {
31 buf: T,
32 count: usize,
33}
34
35impl<T: AsRef<[u64]>> MultiPagedRangeBuf<T> {
36 pub fn validate(count: usize, buf: &[u64]) -> Result<(), Error> {
37 let mut rem: &[u64] = buf;
38 for _ in 0..count {
39 let (_, rest) = parse(rem)?;
40 rem = rest;
41 }
42 Ok(())
43 }
44
45 pub fn new(count: usize, buf: T) -> Result<Self, Error> {
46 Self::validate(count, buf.as_ref())?;
47 Ok(MultiPagedRangeBuf { buf, count })
48 }
49
50 pub fn subrange(
51 &self,
52 offset: usize,
53 len: usize,
54 ) -> Result<MultiPagedRangeBuf<GpnList>, Error> {
55 if len == 0 {
56 return Ok(MultiPagedRangeBuf::<GpnList>::empty());
57 }
58
59 let mut sub_buf = GpnList::new();
60 let mut remaining_offset = offset;
61 let mut remaining_length = len;
62 let mut range_count = 0;
63 for range in self.iter() {
64 let cur_offset = if remaining_offset == 0 {
65 0
66 } else if remaining_offset > range.len() {
67 remaining_offset -= range.len();
68 continue;
69 } else {
70 let remaining = remaining_offset;
71 remaining_offset = 0;
72 remaining
73 };
74
75 let sub_range = match range.try_subrange(cur_offset, remaining_length) {
76 Some(sub_range) => sub_range,
77 None => range,
78 };
79
80 sub_buf.push(u64::from_le_bytes(
81 GpaRange {
82 len: sub_range.len() as u32,
83 offset: sub_range.offset() as u32,
84 }
85 .as_bytes()
86 .try_into()
87 .unwrap(),
88 ));
89 sub_buf.extend_from_slice(sub_range.gpns());
90 range_count += 1;
91 remaining_length -= sub_range.len();
92 if remaining_length == 0 {
93 break;
94 }
95 }
96
97 if remaining_length > 0 {
98 Err(Error::RangeTooSmall)
99 } else {
100 MultiPagedRangeBuf::<GpnList>::new(range_count, sub_buf)
101 }
102 }
103
104 pub fn empty() -> Self
105 where
106 T: Default,
107 {
108 Self {
109 buf: Default::default(),
110 count: 0,
111 }
112 }
113
114 pub fn iter(&self) -> MultiPagedRangeIter<'_> {
115 MultiPagedRangeIter {
116 buf: self.buf.as_ref(),
117 count: self.count,
118 }
119 }
120
121 pub fn range_count(&self) -> usize {
122 self.count
123 }
124
125 pub fn first(&self) -> Option<PagedRange<'_>> {
126 self.iter().next()
127 }
128
129 pub fn contiguous_aligned(&self) -> Option<PagedRange<'_>> {
132 if self.count != 1 {
133 return None;
134 }
135 let first = self.first()?;
136 if first.offset() != 0 || first.len() % PAGE_SIZE != 0 {
137 return None;
138 }
139 Some(first)
140 }
141
142 pub fn range_buffer(&self) -> &[u64] {
143 self.buf.as_ref()
144 }
145
146 pub fn into_buffer(self) -> T {
147 self.buf
148 }
149}
150
151impl MultiPagedRangeBuf<&'static [u64]> {
152 pub const fn empty_const() -> Self {
153 Self { buf: &[], count: 0 }
154 }
155}
156
157impl<'a, T: AsRef<[u64]> + Default> IntoIterator for &'a MultiPagedRangeBuf<T> {
158 type Item = PagedRange<'a>;
159 type IntoIter = MultiPagedRangeIter<'a>;
160
161 fn into_iter(self) -> Self::IntoIter {
162 self.iter()
163 }
164}
165
166impl<'a> FromIterator<PagedRange<'a>> for MultiPagedRangeBuf<GpnList> {
167 fn from_iter<I: IntoIterator<Item = PagedRange<'a>>>(iter: I) -> MultiPagedRangeBuf<GpnList> {
168 let mut page_count = 0;
169 let buf: GpnList = iter
170 .into_iter()
171 .map(|range| {
172 let mut buf: GpnList = smallvec![u64::from_le_bytes(
173 GpaRange {
174 len: range.len() as u32,
175 offset: range.offset() as u32,
176 }
177 .as_bytes()
178 .try_into()
179 .unwrap()
180 )];
181 buf.extend_from_slice(range.gpns());
182 page_count += 1;
183 buf
184 })
185 .collect::<Vec<GpnList>>()
186 .into_iter()
187 .flatten()
188 .collect();
189 MultiPagedRangeBuf::<GpnList>::new(page_count, buf).unwrap()
190 }
191}
192
193#[derive(Clone, Debug)]
194pub struct MultiPagedRangeIter<'a> {
195 buf: &'a [u64],
196 count: usize,
197}
198
199impl<'a> Iterator for MultiPagedRangeIter<'a> {
200 type Item = PagedRange<'a>;
201
202 fn next(&mut self) -> Option<Self::Item> {
203 if self.count == 0 {
204 return None;
205 }
206 let hdr = GpaRange::read_from_prefix(self.buf[0].as_bytes())
207 .unwrap()
208 .0; let page_count = ((hdr.offset + hdr.len) as usize).div_ceil(PAGE_SIZE); let (this, rest) = self.buf.split_at(page_count + 1);
211 let range = PagedRange::new(hdr.offset as usize, hdr.len as usize, &this[1..]).unwrap();
212 self.count -= 1;
213 self.buf = rest;
214 Some(range)
215 }
216}
217
218#[derive(Debug, Error)]
219pub enum Error {
220 #[error("empty range")]
221 EmptyRange,
222 #[error("empty byte count")]
223 EmptyByteCount,
224 #[error("range too small")]
225 RangeTooSmall,
226 #[error("integer overflow")]
227 Overflow,
228}
229
230fn parse(buf: &[u64]) -> Result<(PagedRange<'_>, &[u64]), Error> {
231 let (hdr, gpas) = buf.split_first().ok_or(Error::EmptyRange)?;
232 let byte_count = *hdr as u32;
233 if byte_count == 0 {
234 return Err(Error::EmptyByteCount);
235 }
236 let byte_offset = (*hdr >> 32) as u32 & 0xfff;
237 let pages = (byte_count
238 .checked_add(4095)
239 .ok_or(Error::Overflow)?
240 .checked_add(byte_offset)
241 .ok_or(Error::Overflow)?) as usize
242 / PAGE_SIZE;
243 if gpas.len() < pages {
244 return Err(Error::RangeTooSmall);
245 }
246 let (gpas, rest) = gpas.split_at(pages);
247 assert!(!gpas.is_empty());
248 Ok((
249 PagedRange::new(byte_offset as usize, byte_count as usize, gpas)
250 .expect("already validated"),
251 rest,
252 ))
253}