vmbus_ring/
gparange.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4use guestmem::ranges::PagedRange;
5use smallvec::SmallVec;
6use smallvec::smallvec;
7use thiserror::Error;
8use zerocopy::FromBytes;
9use zerocopy::FromZeros;
10use zerocopy::Immutable;
11use zerocopy::IntoBytes;
12use zerocopy::KnownLayout;
13
14const PAGE_SIZE: usize = 4096;
15
16pub type GpnList = SmallVec<[u64; 64]>;
17
18pub fn zeroed_gpn_list(len: usize) -> GpnList {
19    smallvec![FromZeros::new_zeroed(); len]
20}
21
22#[repr(C)]
23#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
24pub struct GpaRange {
25    pub len: u32,
26    pub offset: u32,
27}
28
29#[derive(Debug, Default, Clone)]
30pub struct MultiPagedRangeBuf<T: AsRef<[u64]>> {
31    buf: T,
32    count: usize,
33}
34
35impl<T: AsRef<[u64]>> MultiPagedRangeBuf<T> {
36    pub fn validate(count: usize, buf: &[u64]) -> Result<(), Error> {
37        let mut rem: &[u64] = buf;
38        for _ in 0..count {
39            let (_, rest) = parse(rem)?;
40            rem = rest;
41        }
42        Ok(())
43    }
44
45    pub fn new(count: usize, buf: T) -> Result<Self, Error> {
46        Self::validate(count, buf.as_ref())?;
47        Ok(MultiPagedRangeBuf { buf, count })
48    }
49
50    pub fn subrange(
51        &self,
52        offset: usize,
53        len: usize,
54    ) -> Result<MultiPagedRangeBuf<GpnList>, Error> {
55        if len == 0 {
56            return Ok(MultiPagedRangeBuf::<GpnList>::empty());
57        }
58
59        let mut sub_buf = GpnList::new();
60        let mut remaining_offset = offset;
61        let mut remaining_length = len;
62        let mut range_count = 0;
63        for range in self.iter() {
64            if let Some(n) = remaining_offset.checked_sub(range.len()) {
65                remaining_offset = n;
66                continue;
67            }
68            let cur_offset = std::mem::take(&mut remaining_offset);
69            // Determine how many bytes we can take from this range after applying cur_offset.
70            let available_here = range.len() - cur_offset;
71            let take_len = available_here.min(remaining_length);
72            let sub_range = range.subrange(cur_offset, take_len);
73
74            sub_buf.push(u64::from_le_bytes(
75                GpaRange {
76                    len: sub_range.len() as u32,
77                    offset: sub_range.offset() as u32,
78                }
79                .as_bytes()
80                .try_into()
81                .unwrap(),
82            ));
83            sub_buf.extend_from_slice(sub_range.gpns());
84            range_count += 1;
85            remaining_length -= sub_range.len();
86            if remaining_length == 0 {
87                break;
88            }
89        }
90
91        if remaining_length > 0 {
92            Err(Error::RangeTooSmall)
93        } else {
94            MultiPagedRangeBuf::<GpnList>::new(range_count, sub_buf)
95        }
96    }
97
98    pub fn empty() -> Self
99    where
100        T: Default,
101    {
102        Self {
103            buf: Default::default(),
104            count: 0,
105        }
106    }
107
108    pub fn iter(&self) -> MultiPagedRangeIter<'_> {
109        MultiPagedRangeIter {
110            buf: self.buf.as_ref(),
111            count: self.count,
112        }
113    }
114
115    pub fn range_count(&self) -> usize {
116        self.count
117    }
118
119    pub fn first(&self) -> Option<PagedRange<'_>> {
120        self.iter().next()
121    }
122
123    /// Validates that this multi range consists of exactly one range that is
124    /// page aligned. Returns that range.
125    pub fn contiguous_aligned(&self) -> Option<PagedRange<'_>> {
126        if self.count != 1 {
127            return None;
128        }
129        let first = self.first()?;
130        if first.offset() != 0 || first.len() % PAGE_SIZE != 0 {
131            return None;
132        }
133        Some(first)
134    }
135
136    pub fn range_buffer(&self) -> &[u64] {
137        self.buf.as_ref()
138    }
139
140    pub fn into_buffer(self) -> T {
141        self.buf
142    }
143}
144
145impl MultiPagedRangeBuf<&'static [u64]> {
146    pub const fn empty_const() -> Self {
147        Self { buf: &[], count: 0 }
148    }
149}
150
151impl<'a, T: AsRef<[u64]> + Default> IntoIterator for &'a MultiPagedRangeBuf<T> {
152    type Item = PagedRange<'a>;
153    type IntoIter = MultiPagedRangeIter<'a>;
154
155    fn into_iter(self) -> Self::IntoIter {
156        self.iter()
157    }
158}
159
160impl<'a> FromIterator<PagedRange<'a>> for MultiPagedRangeBuf<GpnList> {
161    fn from_iter<I: IntoIterator<Item = PagedRange<'a>>>(iter: I) -> MultiPagedRangeBuf<GpnList> {
162        let mut page_count = 0;
163        let buf: GpnList = iter
164            .into_iter()
165            .map(|range| {
166                let mut buf: GpnList = smallvec![u64::from_le_bytes(
167                    GpaRange {
168                        len: range.len() as u32,
169                        offset: range.offset() as u32,
170                    }
171                    .as_bytes()
172                    .try_into()
173                    .unwrap()
174                )];
175                buf.extend_from_slice(range.gpns());
176                page_count += 1;
177                buf
178            })
179            .collect::<Vec<GpnList>>()
180            .into_iter()
181            .flatten()
182            .collect();
183        MultiPagedRangeBuf::<GpnList>::new(page_count, buf).unwrap()
184    }
185}
186
187#[derive(Clone, Debug)]
188pub struct MultiPagedRangeIter<'a> {
189    buf: &'a [u64],
190    count: usize,
191}
192
193impl<'a> Iterator for MultiPagedRangeIter<'a> {
194    type Item = PagedRange<'a>;
195
196    fn next(&mut self) -> Option<Self::Item> {
197        if self.count == 0 {
198            return None;
199        }
200        let hdr = GpaRange::read_from_prefix(self.buf[0].as_bytes())
201            .unwrap()
202            .0; // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
203        let page_count = ((hdr.offset + hdr.len) as usize).div_ceil(PAGE_SIZE); // N.B. already validated
204        let (this, rest) = self.buf.split_at(page_count + 1);
205        let range = PagedRange::new(hdr.offset as usize, hdr.len as usize, &this[1..]).unwrap();
206        self.count -= 1;
207        self.buf = rest;
208        Some(range)
209    }
210}
211
212#[derive(Debug, Error)]
213pub enum Error {
214    #[error("empty range")]
215    EmptyRange,
216    #[error("empty byte count")]
217    EmptyByteCount,
218    #[error("range too small")]
219    RangeTooSmall,
220    #[error("byte offset too large")]
221    OffsetTooLarge,
222    #[error("integer overflow")]
223    Overflow,
224}
225
226fn parse(buf: &[u64]) -> Result<(PagedRange<'_>, &[u64]), Error> {
227    let (hdr, gpas) = buf.split_first().ok_or(Error::EmptyRange)?;
228    let byte_count = *hdr as u32;
229    if byte_count == 0 {
230        return Err(Error::EmptyByteCount);
231    }
232    let byte_offset = (*hdr >> 32) as u32;
233    if byte_offset > 0xfff {
234        return Err(Error::OffsetTooLarge);
235    }
236    let pages = (byte_count
237        .checked_add(4095)
238        .ok_or(Error::Overflow)?
239        .checked_add(byte_offset)
240        .ok_or(Error::Overflow)?) as usize
241        / PAGE_SIZE;
242    if gpas.len() < pages {
243        return Err(Error::RangeTooSmall);
244    }
245    let (gpas, rest) = gpas.split_at(pages);
246    assert!(!gpas.is_empty());
247    Ok((
248        PagedRange::new(byte_offset as usize, byte_count as usize, gpas)
249            .expect("already validated"),
250        rest,
251    ))
252}
253
254#[cfg(test)]
255mod tests {
256    use super::*;
257    use guestmem::ranges::PagedRange;
258
259    #[test]
260    fn large_offset() {
261        // Encode a header with offset having bits above the 12-bit page offset (0x1000)
262        let hdr = GpaRange {
263            len: 1,
264            offset: 0x1000,
265        };
266        let mut buf: GpnList = GpnList::new();
267        buf.push(u64::from_le_bytes(hdr.as_bytes().try_into().unwrap()));
268        buf.push(0xdead_beef);
269
270        // validate() should not accept the buffer
271        let err = MultiPagedRangeBuf::new(1, buf).unwrap_err();
272        assert!(matches!(err, Error::OffsetTooLarge));
273    }
274
275    // subrange should error when the requested span exceeds available bytes after offset.
276    #[test]
277    fn subrange_errors_when_span_beyond_total() {
278        // Build a single-range buffer with 200 bytes starting at offset 100 within its first page.
279        let gpns = [0x1000_u64];
280        let range = PagedRange::new(100, 200, &gpns).expect("valid paged range");
281        let ranges: MultiPagedRangeBuf<GpnList> = std::iter::once(range).collect();
282
283        // Request a subrange starting 50 bytes into the buffer, of length 200 bytes.
284        // Only 150 bytes remain (200 - 50), so this should be an error.
285        let err = ranges.subrange(50, 200).unwrap_err();
286        assert!(matches!(err, Error::RangeTooSmall));
287    }
288
289    // subrange across multiple ranges should split into partial
290    // pieces with correct offsets, lengths, and page lists.
291    #[test]
292    fn subrange_spans_multiple_ranges() {
293        let gpns1 = [1_u64, 2_u64];
294        let gpns2 = [3_u64, 4_u64];
295        // Two ranges: [100..400) over gpns1 and [0..500) over gpns2
296        let r1 = PagedRange::new(100, 300, &gpns1).expect("r1");
297        let r2 = PagedRange::new(0, 500, &gpns2).expect("r2");
298        let ranges: MultiPagedRangeBuf<GpnList> = vec![r1, r2].into_iter().collect();
299
300        // Take subrange starting 250 bytes into the concatenated ranges, length 200.
301        // This yields 50 bytes from r1 (offset 350) and 150 bytes from r2 (offset 0).
302        let sub = ranges.subrange(250, 200).expect("subrange ok");
303        assert_eq!(sub.range_count(), 2);
304
305        let mut it = sub.iter();
306        let a = it.next().expect("first slice");
307        assert_eq!(a.offset(), 350);
308        assert_eq!(a.len(), 50);
309        assert_eq!(a.gpns(), &gpns1[..1]);
310
311        let b = it.next().expect("second slice");
312        assert_eq!(b.offset(), 0);
313        assert_eq!(b.len(), 150);
314        assert_eq!(b.gpns(), &gpns2[..1]);
315
316        assert!(it.next().is_none());
317    }
318}