vmbus_ring/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! This module implements the low-level interface to the VmBus ring buffer. The
5//! ring buffer resides in guest memory and is mapped into the host, allowing
6//! efficient transfer of variable-sized packets.
7//!
8//! Ring buffer packets have headers called descriptors, which can specify a
9//! transaction ID and metadata referring to memory outside the ring buffer.
10//! Each packet is a multiple of 8 bytes.
11//!
12//! In practice, ring buffers always come in pairs so that packets can be both
13//! sent and received. However, this module's interfaces operate on them singly.
14
15#![expect(missing_docs)]
16#![forbid(unsafe_code)]
17
18pub mod gparange;
19
20pub use pipe_protocol::*;
21pub use protocol::PAGE_SIZE;
22pub use protocol::TransferPageRange;
23
24use crate::gparange::GpaRange;
25use guestmem::AccessError;
26use guestmem::MemoryRead;
27use guestmem::MemoryWrite;
28use guestmem::ranges::PagedRange;
29use inspect::Inspect;
30use protocol::*;
31use safeatomic::AtomicSliceOps;
32use std::fmt::Debug;
33use std::sync::Arc;
34use std::sync::atomic::AtomicU8;
35use std::sync::atomic::AtomicU32;
36use std::sync::atomic::AtomicU64;
37use std::sync::atomic::Ordering;
38use thiserror::Error;
39use zerocopy::FromZeros;
40use zerocopy::IntoBytes;
41
42mod pipe_protocol {
43    use zerocopy::FromBytes;
44    use zerocopy::Immutable;
45    use zerocopy::IntoBytes;
46    use zerocopy::KnownLayout;
47
48    /// Pipe channel packets are prefixed with this header to allow for
49    /// non-8-multiple lengths.
50    #[repr(C)]
51    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
52    pub struct PipeHeader {
53        pub packet_type: u32,
54        pub len: u32,
55    }
56
57    /// Regular data packet.
58    pub const PIPE_PACKET_TYPE_DATA: u32 = 1;
59    /// Data packet that has been partially consumed, in which case the `len`
60    /// field's high word is the number of bytes already read. The opposite
61    /// endpoint will never write this type.
62    pub const PIPE_PACKET_TYPE_PARTIAL: u32 = 2;
63    /// Setup a GPA direct buffer for RDMA.
64    pub const PIPE_PACKET_TYPE_SETUP_GPA_DIRECT: u32 = 3;
65    /// Tear down a GPA direct buffer.
66    pub const PIPE_PACKET_TYPE_TEARDOWN_GPA_DIRECT: u32 = 4;
67
68    /// The maximum size of a pipe packet's payload.
69    pub const MAXIMUM_PIPE_PACKET_SIZE: usize = 16384;
70}
71
72mod protocol {
73    use crate::CONTROL_WORD_COUNT;
74    use inspect::Inspect;
75    use safeatomic::AtomicSliceOps;
76    use std::fmt::Debug;
77    use std::sync::atomic::AtomicU32;
78    use std::sync::atomic::Ordering;
79    use zerocopy::FromBytes;
80    use zerocopy::Immutable;
81    use zerocopy::IntoBytes;
82    use zerocopy::KnownLayout;
83
84    /// VmBus ring buffers are sized in multiples 4KB pages, with a 4KB control page.
85    pub const PAGE_SIZE: usize = 4096;
86
87    /// The descriptor header on every packet.
88    #[repr(C)]
89    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
90    pub struct PacketDescriptor {
91        pub packet_type: u16,
92        pub data_offset8: u16,
93        pub length8: u16,
94        pub flags: u16,
95        pub transaction_id: u64,
96    }
97
98    /// A control page accessor.
99    pub struct Control<'a>(pub &'a [AtomicU32; CONTROL_WORD_COUNT]);
100
101    impl<'a> Control<'a> {
102        pub fn from_page(page: &'a guestmem::Page) -> Option<Self> {
103            let slice = page.as_atomic_slice()?[..CONTROL_WORD_COUNT]
104                .try_into()
105                .unwrap();
106            Some(Self(slice))
107        }
108
109        pub fn inp(&self) -> &AtomicU32 {
110            &self.0[0]
111        }
112        pub fn outp(&self) -> &AtomicU32 {
113            &self.0[1]
114        }
115        pub fn interrupt_mask(&self) -> &AtomicU32 {
116            &self.0[2]
117        }
118        pub fn pending_send_size(&self) -> &AtomicU32 {
119            &self.0[3]
120        }
121        pub fn feature_bits(&self) -> &AtomicU32 {
122            &self.0[16]
123        }
124    }
125
126    impl Inspect for Control<'_> {
127        fn inspect(&self, req: inspect::Request<'_>) {
128            req.respond()
129                .hex("in", self.inp().load(Ordering::Relaxed))
130                .hex("out", self.outp().load(Ordering::Relaxed))
131                .hex(
132                    "interrupt_mask",
133                    self.interrupt_mask().load(Ordering::Relaxed),
134                )
135                .hex(
136                    "pending_send_size",
137                    self.pending_send_size().load(Ordering::Relaxed),
138                )
139                .hex("feature_bits", self.feature_bits().load(Ordering::Relaxed));
140        }
141    }
142
143    impl Debug for Control<'_> {
144        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
145            f.debug_struct("Control")
146                .field("inp", self.inp())
147                .field("outp", self.outp())
148                .field("interrupt_mask", self.interrupt_mask())
149                .field("pending_send_size", self.pending_send_size())
150                .field("feature_bits", self.feature_bits())
151                .finish()
152        }
153    }
154
155    /// If set, the endpoint supports sending signals when the number of free
156    /// bytes in the ring reaches or exceeds `pending_send_size`.
157    pub const FEATURE_SUPPORTS_PENDING_SEND_SIZE: u32 = 1;
158
159    /// A transfer range specifying a length and offset within a transfer page
160    /// set. Only used by NetVSP.
161    #[repr(C)]
162    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
163    pub struct TransferPageRange {
164        pub byte_count: u32,
165        pub byte_offset: u32,
166    }
167
168    /// The extended portion of the packet descriptor that describes a transfer
169    /// page packet.
170    #[repr(C)]
171    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
172    pub struct TransferPageHeader {
173        pub transfer_page_set_id: u16,
174        pub reserved: u16, // may have garbage non-zero values
175        pub range_count: u32,
176    }
177
178    /// The extended portion of the packet descriptor describing a GPA direct packet.
179    #[repr(C)]
180    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
181    pub struct GpaDirectHeader {
182        pub reserved: u32, // may have garbage non-zero values
183        pub range_count: u32,
184    }
185
186    pub const PACKET_FLAG_COMPLETION_REQUESTED: u16 = 1;
187
188    /// The packet footer.
189    #[repr(C)]
190    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
191    pub struct Footer {
192        pub reserved: u32,
193        /// The ring offset of the packet.
194        pub offset: u32,
195    }
196}
197
198#[derive(Copy, Clone, Debug, Error)]
199pub enum Error {
200    #[error("invalid ring buffer pointer")]
201    InvalidRingPointer,
202    #[error("invalid message length")]
203    InvalidMessageLength,
204    #[error("invalid data available")]
205    InvalidDataAvailable,
206    #[error("ring buffer too large")]
207    RingTooLarge,
208    #[error("invalid ring memory")]
209    InvalidRingMemory,
210    #[error("invalid descriptor offset or length")]
211    InvalidDescriptorLengths,
212    #[error("unknown packet descriptor flags")]
213    InvalidDescriptorFlags,
214    #[error("unknown packet descriptor type")]
215    InvalidDescriptorType,
216    #[error("invalid range count for gpa direct packet")]
217    InvalidDescriptorGpaDirectRangeCount,
218    #[error("the interrupt mask bit was supposed to be clear but it is set")]
219    InterruptsExternallyMasked,
220}
221
222#[derive(Copy, Clone, Debug, Error)]
223pub enum ReadError {
224    #[error("ring buffer empty")]
225    Empty,
226    #[error(transparent)]
227    Corrupt(#[from] Error),
228}
229
230#[derive(Copy, Clone, Debug, Error)]
231pub enum WriteError {
232    #[error("ring buffer full")]
233    Full(usize),
234    #[error(transparent)]
235    Corrupt(#[from] Error),
236}
237
238/// A range within a ring buffer.
239#[derive(Copy, Clone, Debug)]
240pub struct RingRange {
241    off: u32,
242    size: u32,
243}
244
245impl RingRange {
246    /// The empty range.
247    pub fn empty() -> Self {
248        RingRange { off: 0, size: 0 }
249    }
250
251    /// Retrieves a `MemoryWrite` that allows for writing to the range.
252    pub fn writer<'a, T: Ring>(&self, ring: &'a T) -> RingRangeWriter<'a, T::Memory> {
253        RingRangeWriter {
254            start: self.off,
255            end: self.off + self.size,
256            mem: ring.mem(),
257        }
258    }
259
260    /// Retrieves a `MemoryRead` that allows for writing to the range.
261    pub fn reader<'a, T: Ring>(&self, ring: &'a T) -> RingRangeReader<'a, T::Memory> {
262        RingRangeReader {
263            start: self.off,
264            end: self.off + self.size,
265            mem: ring.mem(),
266        }
267    }
268
269    /// Returns the length of the range.
270    pub fn len(&self) -> usize {
271        self.size as usize
272    }
273
274    /// Checks if this range is empty.
275    pub fn is_empty(&self) -> bool {
276        self.size == 0
277    }
278}
279
280/// A type implementing `MemoryRead` accessing a `RingRange`.
281pub struct RingRangeReader<'a, T> {
282    start: u32,
283    end: u32,
284    mem: &'a T,
285}
286
287impl<T: RingMem> MemoryRead for RingRangeReader<'_, T> {
288    fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
289        if self.len() < data.len() {
290            return Err(AccessError::OutOfRange(self.len(), data.len()));
291        }
292        self.mem.read_at(self.start as usize, data);
293        self.start += data.len() as u32;
294        Ok(self)
295    }
296
297    fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
298        if self.len() < len {
299            return Err(AccessError::OutOfRange(self.len(), len));
300        }
301        self.start += len as u32;
302        Ok(self)
303    }
304
305    fn len(&self) -> usize {
306        (self.end - self.start) as usize
307    }
308}
309
310/// A type implementing `MemoryWrite` accessing a `RingRange`.
311pub struct RingRangeWriter<'a, T> {
312    start: u32,
313    end: u32,
314    mem: &'a T,
315}
316
317impl<T: RingMem> MemoryWrite for RingRangeWriter<'_, T> {
318    fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
319        if self.len() < data.len() {
320            return Err(AccessError::OutOfRange(self.len(), data.len()));
321        }
322        self.mem.write_at(self.start as usize, data);
323        self.start += data.len() as u32;
324        Ok(())
325    }
326
327    fn fill(&mut self, _val: u8, _len: usize) -> Result<(), AccessError> {
328        unimplemented!()
329    }
330
331    fn len(&self) -> usize {
332        (self.end - self.start) as usize
333    }
334}
335
336/// The alternate types of incoming packets. For packets with external data,
337/// includes a `RingRange` whose data is the variable portion of the packet
338/// descriptor.
339#[derive(Debug, Copy, Clone)]
340pub enum IncomingPacketType {
341    InBand,
342    Completion,
343    GpaDirect(u32, RingRange),
344    TransferPages(u16, u32, RingRange),
345}
346
347/// An incoming packet.
348#[derive(Debug)]
349pub struct IncomingPacket {
350    pub transaction_id: Option<u64>,
351    pub typ: IncomingPacketType,
352    pub payload: RingRange,
353}
354
355const PACKET_TYPE_IN_BAND: u16 = 6;
356const PACKET_TYPE_TRANSFER_PAGES: u16 = 0x7;
357const PACKET_TYPE_GPA_DIRECT: u16 = 0x9;
358const PACKET_TYPE_COMPLETION: u16 = 0xb;
359
360fn parse_packet<M: RingMem>(
361    ring: &M,
362    ring_off: u32,
363    avail: u32,
364) -> Result<(u32, IncomingPacket), ReadError> {
365    let mut desc = PacketDescriptor::new_zeroed();
366    ring.read_aligned(ring_off as usize, desc.as_mut_bytes());
367    let len = desc.length8 as u32 * 8;
368    if desc.length8 < desc.data_offset8 || desc.data_offset8 < 2 || avail < len {
369        return Err(ReadError::Corrupt(Error::InvalidDescriptorLengths));
370    }
371
372    if (desc.flags & !PACKET_FLAG_COMPLETION_REQUESTED) != 0 {
373        return Err(ReadError::Corrupt(Error::InvalidDescriptorFlags));
374    }
375    let transaction_id = if desc.flags & PACKET_FLAG_COMPLETION_REQUESTED != 0
376        || desc.packet_type == PACKET_TYPE_COMPLETION
377    {
378        Some(desc.transaction_id)
379    } else {
380        None
381    };
382    let typ = match desc.packet_type {
383        PACKET_TYPE_IN_BAND => IncomingPacketType::InBand,
384        PACKET_TYPE_COMPLETION => IncomingPacketType::Completion,
385        PACKET_TYPE_TRANSFER_PAGES => {
386            let mut tph = TransferPageHeader::new_zeroed();
387            ring.read_aligned(ring_off as usize + 16, tph.as_mut_bytes());
388            IncomingPacketType::TransferPages(
389                tph.transfer_page_set_id,
390                tph.range_count,
391                RingRange {
392                    off: ring_off + 24,
393                    size: desc.data_offset8 as u32 * 8 - 24,
394                },
395            )
396        }
397        PACKET_TYPE_GPA_DIRECT => {
398            let mut gph = GpaDirectHeader::new_zeroed();
399            ring.read_aligned(ring_off as usize + 16, gph.as_mut_bytes());
400            if gph.range_count == 0 {
401                return Err(ReadError::Corrupt(
402                    Error::InvalidDescriptorGpaDirectRangeCount,
403                ));
404            }
405            IncomingPacketType::GpaDirect(
406                gph.range_count,
407                RingRange {
408                    off: ring_off + 24,
409                    size: desc.data_offset8 as u32 * 8 - 24,
410                },
411            )
412        }
413        _ => return Err(ReadError::Corrupt(Error::InvalidDescriptorType)),
414    };
415    let payload = RingRange {
416        off: ring_off + desc.data_offset8 as u32 * 8,
417        size: (desc.length8 - desc.data_offset8) as u32 * 8,
418    };
419    Ok((
420        len,
421        IncomingPacket {
422            transaction_id,
423            typ,
424            payload,
425        },
426    ))
427}
428
429/// The size of the control region in 32-bit words.
430pub const CONTROL_WORD_COUNT: usize = 32;
431
432/// A trait for memory backing a ring buffer.
433pub trait RingMem: Send {
434    /// Returns the control page.
435    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT];
436
437    /// Reads from the data portion of the ring, wrapping (once) at the end of
438    /// the ring. Precondition: `addr + data.len() <= self.len() * 2`.
439    fn read_at(&self, addr: usize, data: &mut [u8]);
440
441    /// Reads from the data portion of the ring, as in [`RingMem::read_at`]. `addr` and
442    /// `data.len()` must be multiples of 8.
443    ///
444    /// `read_at` may be faster for large or variable-sized reads.
445    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
446        debug_assert!(addr % 8 == 0);
447        debug_assert!(data.len() % 8 == 0);
448        self.read_at(addr, data)
449    }
450
451    /// Writes to the data portion of the ring, wrapping (once) at the end of
452    /// the ring. Precondition: `addr + data.len() <= self.len() * 2`.
453    fn write_at(&self, addr: usize, data: &[u8]);
454
455    /// Writes to the data portion of the ring, as in [`RingMem::write_at`]. `addr` and
456    /// `data.len()` must be multiples of 8.
457    ///
458    /// `write_at` may be faster for large or variable-sized writes.
459    fn write_aligned(&self, addr: usize, data: &[u8]) {
460        debug_assert!(addr % 8 == 0);
461        debug_assert!(data.len() % 8 == 0);
462        self.write_at(addr, data)
463    }
464
465    /// Returns the length of the ring in bytes.
466    fn len(&self) -> usize;
467}
468
469/// Implementation of `RingMem` for references. Useful for tests.
470impl<T: RingMem + Sync> RingMem for &'_ T {
471    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
472        (*self).control()
473    }
474    fn read_at(&self, addr: usize, data: &mut [u8]) {
475        (*self).read_at(addr, data)
476    }
477    fn write_at(&self, addr: usize, data: &[u8]) {
478        (*self).write_at(addr, data)
479    }
480    fn len(&self) -> usize {
481        (*self).len()
482    }
483
484    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
485        (*self).read_aligned(addr, data)
486    }
487
488    fn write_aligned(&self, addr: usize, data: &[u8]) {
489        (*self).write_aligned(addr, data)
490    }
491}
492
493#[derive(Debug)]
494pub struct SingleMappedRingMem<T>(pub T);
495
496impl<T: AsRef<[AtomicU8]>> SingleMappedRingMem<T> {
497    fn control_range(&self) -> &[AtomicU8; PAGE_SIZE] {
498        self.0.as_ref()[..PAGE_SIZE].try_into().unwrap()
499    }
500
501    fn data(&self) -> &[AtomicU8] {
502        &self.0.as_ref()[PAGE_SIZE..]
503    }
504}
505
506impl<T: AsRef<[AtomicU8]> + Send> RingMem for SingleMappedRingMem<T> {
507    fn read_at(&self, mut addr: usize, data: &mut [u8]) {
508        if addr >= self.len() {
509            addr -= self.len();
510        }
511        let this_data = self.data();
512        if addr + data.len() <= self.len() {
513            this_data[addr..addr + data.len()].atomic_read(data);
514        } else {
515            let data_len = data.len();
516            let (first, last) = data.split_at_mut(self.len() - addr);
517            this_data[addr..].atomic_read(first);
518            this_data[..data_len - (self.len() - addr)].atomic_read(last);
519        }
520    }
521
522    fn write_at(&self, mut addr: usize, data: &[u8]) {
523        if addr > self.len() {
524            addr -= self.len();
525        }
526        let this_data = self.data();
527        if addr + data.len() <= self.len() {
528            this_data[addr..addr + data.len()].atomic_write(data);
529        } else {
530            let (first, last) = data.split_at(self.len() - addr);
531            this_data[addr..].atomic_write(first);
532            this_data[..data.len() - (self.len() - addr)].atomic_write(last);
533        }
534    }
535
536    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
537        self.control_range().as_atomic_slice().unwrap()[..CONTROL_WORD_COUNT]
538            .try_into()
539            .unwrap()
540    }
541
542    fn len(&self) -> usize {
543        self.data().len()
544    }
545}
546
547/// An implementation of `RingMem` over a flat allocation. Useful for tests.
548#[derive(Clone)]
549pub struct FlatRingMem {
550    inner: Arc<FlatRingInner>,
551}
552
553struct FlatRingInner {
554    control: [AtomicU32; CONTROL_WORD_COUNT],
555    data: Vec<AtomicU8>,
556}
557
558impl FlatRingMem {
559    /// Allocates a new memory.
560    pub fn new(len: usize) -> Self {
561        let mut data = Vec::new();
562        data.resize_with(len, Default::default);
563        Self {
564            inner: Arc::new(FlatRingInner {
565                control: [0; CONTROL_WORD_COUNT].map(Into::into),
566                data,
567            }),
568        }
569    }
570}
571
572impl Debug for FlatRingMem {
573    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
574        f.debug_struct("FlatRingMem").finish()
575    }
576}
577
578impl RingMem for FlatRingMem {
579    fn read_at(&self, mut addr: usize, data: &mut [u8]) {
580        if addr > self.len() {
581            addr -= self.len();
582        }
583        if addr + data.len() <= self.len() {
584            self.inner.data[addr..addr + data.len()].atomic_read(data);
585        } else {
586            let data_len = data.len();
587            let (first, last) = data.split_at_mut(self.len() - addr);
588            self.inner.data[addr..].atomic_read(first);
589            self.inner.data[..data_len - (self.len() - addr)].atomic_read(last);
590        }
591    }
592
593    fn write_at(&self, mut addr: usize, data: &[u8]) {
594        if addr > self.len() {
595            addr -= self.len();
596        }
597        if addr + data.len() <= self.len() {
598            self.inner.data[addr..addr + data.len()].atomic_write(data);
599        } else {
600            let (first, last) = data.split_at(self.len() - addr);
601            self.inner.data[addr..].atomic_write(first);
602            self.inner.data[..data.len() - (self.len() - addr)].atomic_write(last);
603        }
604    }
605
606    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
607        &self.inner.control
608    }
609
610    fn len(&self) -> usize {
611        self.inner.data.len()
612    }
613}
614
615/// A trait for ring buffer memory divided into discontiguous pages.
616pub trait PagedMemory: Send {
617    /// Returns the control page.
618    fn control(&self) -> &[AtomicU8; PAGE_SIZE];
619    /// Returns the number of data pages.
620    fn data_page_count(&self) -> usize;
621    /// Returns a data page.
622    ///
623    /// For performance reasons, `page` may be in `0..data_page_count*2`,
624    /// representing the ring logically mapped twice consecutively. The
625    /// implementation should return the same page for `n` and `n +
626    /// data_page_count`.
627    fn data(&self, page: usize) -> &[AtomicU8; PAGE_SIZE];
628}
629
630/// An implementation of [`RingMem`] on top of discontiguous pages.
631#[derive(Debug, Clone)]
632pub struct PagedRingMem<T>(T);
633
634impl<T: PagedMemory> PagedRingMem<T> {
635    /// Returns a new ring memory wrapping a type implementing [`PagedMemory`].
636    pub fn new(inner: T) -> Self {
637        Self(inner)
638    }
639}
640
641impl<T: PagedMemory> RingMem for PagedRingMem<T> {
642    fn len(&self) -> usize {
643        self.0.data_page_count() * PAGE_SIZE
644    }
645
646    fn read_at(&self, mut addr: usize, mut data: &mut [u8]) {
647        while !data.is_empty() {
648            let page = addr / PAGE_SIZE;
649            let offset = addr % PAGE_SIZE;
650            let offset_end = PAGE_SIZE.min(offset + data.len());
651            let len = offset_end - offset;
652            let (this, next) = data.split_at_mut(len);
653            self.0.data(page)[offset..offset_end].atomic_read(this);
654            addr += len;
655            data = next;
656        }
657    }
658
659    fn write_at(&self, mut addr: usize, mut data: &[u8]) {
660        while !data.is_empty() {
661            let page = addr / PAGE_SIZE;
662            let offset = addr % PAGE_SIZE;
663            let offset_end = PAGE_SIZE.min(offset + data.len());
664            let len = offset_end - offset;
665            let (this, next) = data.split_at(len);
666            self.0.data(page)[offset..offset_end].atomic_write(this);
667            addr += len;
668            data = next;
669        }
670    }
671
672    #[inline]
673    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
674        debug_assert!(addr % 8 == 0);
675        debug_assert!(data.len() % 8 == 0);
676        for (i, b) in data.chunks_exact_mut(8).enumerate() {
677            let addr = (addr & !7) + i * 8;
678            let page = addr / PAGE_SIZE;
679            let offset = addr % PAGE_SIZE;
680            b.copy_from_slice(
681                &self.0.data(page)[offset..offset + 8]
682                    .as_atomic::<AtomicU64>()
683                    .unwrap()
684                    .load(Ordering::Relaxed)
685                    .to_ne_bytes(),
686            );
687        }
688    }
689
690    #[inline]
691    fn write_aligned(&self, addr: usize, data: &[u8]) {
692        debug_assert!(addr % 8 == 0);
693        debug_assert!(data.len() % 8 == 0);
694        for (i, b) in data.chunks_exact(8).enumerate() {
695            let addr = (addr & !7) + i * 8;
696            let page = addr / PAGE_SIZE;
697            let offset = addr % PAGE_SIZE;
698            self.0.data(page)[offset..offset + 8]
699                .as_atomic::<AtomicU64>()
700                .unwrap()
701                .store(u64::from_ne_bytes(b.try_into().unwrap()), Ordering::Relaxed);
702        }
703    }
704
705    #[inline]
706    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
707        self.0.control().as_atomic_slice().unwrap()[..CONTROL_WORD_COUNT]
708            .try_into()
709            .unwrap()
710    }
711}
712
713/// Information about an outgoing packet.
714#[derive(Debug)]
715pub struct OutgoingPacket<'a> {
716    pub transaction_id: u64,
717    pub size: usize,
718    pub typ: OutgoingPacketType<'a>,
719}
720
721/// The outgoing packet type variants.
722#[derive(Debug, Copy, Clone)]
723pub enum OutgoingPacketType<'a> {
724    /// A non-transactional data packet.
725    InBandNoCompletion,
726    /// A transactional data packet.
727    InBandWithCompletion,
728    /// A completion packet.
729    Completion,
730    /// A GPA direct packet, which can reference memory outside the ring by address.
731    ///
732    /// Not supported on the host side of the ring.
733    GpaDirect(&'a [PagedRange<'a>]),
734    /// A transfer page packet, which can reference memory outside the ring by a
735    /// buffer ID and a set of offsets into some pre-established buffer
736    /// (typically a GPADL).
737    ///
738    /// Used by networking. Should not be used in new devices--just embed the
739    /// buffer offsets in the device-specific packet payload.
740    TransferPages(u16, &'a [TransferPageRange]),
741}
742
743/// Namespace type with methods to compute packet sizes, for use with
744/// `set_pending_send_size`.
745pub struct PacketSize(());
746
747impl PacketSize {
748    /// Computes the size of an in-band packet.
749    pub const fn in_band(payload_len: usize) -> usize {
750        size_of::<PacketDescriptor>() + ((payload_len + 7) & !7) + size_of::<Footer>()
751    }
752
753    /// Computes the size of a completion packet.
754    pub const fn completion(payload_len: usize) -> usize {
755        Self::in_band(payload_len)
756    }
757
758    // Computes the size of a gpa direct packet.
759    // pub fn gpa_direct()
760
761    /// Computes the size of a transfer page packet.
762    pub const fn transfer_pages(count: usize, payload_len: usize) -> usize {
763        Self::in_band(payload_len)
764            + size_of::<TransferPageHeader>()
765            + count * size_of::<TransferPageRange>()
766    }
767}
768
769/// A trait shared by the incoming and outgoing ring buffers. Used primarily
770/// with `RingRange::reader` and `RingRange::writer`.
771pub trait Ring {
772    /// The underlying memory type.
773    type Memory: RingMem;
774
775    /// The backing memory of the ring buffer.
776    fn mem(&self) -> &Self::Memory;
777}
778
779/// The interface to the receiving endpoint of a ring buffer.
780#[derive(Debug)]
781pub struct IncomingRing<M: RingMem> {
782    inner: InnerRing<M>,
783}
784
785impl<M: RingMem> Inspect for IncomingRing<M> {
786    fn inspect(&self, req: inspect::Request<'_>) {
787        self.inner.inspect(req);
788    }
789}
790
791/// The current incoming ring state.
792#[derive(Debug, Clone, Inspect)]
793pub struct IncomingOffset {
794    #[inspect(hex)]
795    cached_in: u32,
796    #[inspect(hex)]
797    committed_out: u32,
798    #[inspect(hex)]
799    next_out: u32,
800}
801
802impl IncomingOffset {
803    /// Reverts the removal of packets that have not yet been committed.
804    pub fn revert(&mut self) {
805        self.next_out = self.committed_out;
806    }
807}
808
809impl<M: RingMem> Ring for IncomingRing<M> {
810    type Memory = M;
811    fn mem(&self) -> &Self::Memory {
812        &self.inner.mem
813    }
814}
815
816impl<M: RingMem> IncomingRing<M> {
817    /// Returns a new incoming ring. Fails if the ring memory is not sized or
818    /// aligned correctly or if the ring control data is corrupt.
819    pub fn new(mem: M) -> Result<Self, Error> {
820        let inner = InnerRing::new(mem)?;
821        // Start with interrupts masked.
822        let control = inner.control();
823        control.interrupt_mask().store(1, Ordering::Relaxed);
824        Ok(Self { inner })
825    }
826
827    /// Indicates whether pending send size notification is supported on
828    /// the vmbus ring.
829    pub fn supports_pending_send_size(&self) -> bool {
830        let feature_bits = self.inner.control().feature_bits().load(Ordering::Relaxed);
831        (feature_bits & FEATURE_SUPPORTS_PENDING_SEND_SIZE) != 0
832    }
833
834    /// Enables or disables the interrupt mask, declaring to the opposite
835    /// endpoint that interrupts should not or should be sent for a ring
836    /// empty-to-non-empty transition.
837    pub fn set_interrupt_mask(&self, state: bool) {
838        self.inner
839            .control()
840            .interrupt_mask()
841            .store(state as u32, Ordering::SeqCst);
842    }
843
844    /// Verifies that interrupts are currently unmasked.
845    ///
846    /// This can be used to check that ring state is consistent.
847    pub fn verify_interrupts_unmasked(&self) -> Result<(), Error> {
848        if self
849            .inner
850            .control()
851            .interrupt_mask()
852            .load(Ordering::Relaxed)
853            == 0
854        {
855            Ok(())
856        } else {
857            Err(Error::InterruptsExternallyMasked)
858        }
859    }
860
861    /// Returns the current incoming offset, for passing to `read` and
862    /// `commit_read`.
863    pub fn incoming(&self) -> Result<IncomingOffset, Error> {
864        let control = self.inner.control();
865        let next_out = self
866            .inner
867            .validate(control.outp().load(Ordering::Relaxed))?;
868        let cached_in = self.inner.validate(control.inp().load(Ordering::Relaxed))?;
869        Ok(IncomingOffset {
870            next_out,
871            cached_in,
872            committed_out: next_out,
873        })
874    }
875
876    /// Returns true if there are any packets to read.
877    pub fn can_read(&self, incoming: &mut IncomingOffset) -> Result<bool, Error> {
878        let can_read = if incoming.next_out != incoming.cached_in {
879            true
880        } else {
881            let inp = self
882                .inner
883                .validate(self.inner.control().inp().load(Ordering::Acquire))?;
884            // Cache the new offset to ensure a stable result.
885            incoming.cached_in = inp;
886            incoming.next_out != inp
887        };
888        Ok(can_read)
889    }
890
891    /// Commits a series of packet reads, returning whether the opposite
892    /// endpoint should be signaled.
893    pub fn commit_read(&self, ptrs: &mut IncomingOffset) -> bool {
894        if ptrs.committed_out == ptrs.next_out {
895            return false;
896        }
897        let control = self.inner.control();
898        control.outp().store(ptrs.next_out, Ordering::SeqCst);
899        let pending_send_size = control.pending_send_size().load(Ordering::SeqCst);
900        // Some implementations set the pending send size to the size of the
901        // ring minus 1. The intent is that a signal arrive when the ring is
902        // completely empty, but this is invalid since the maximum writable ring
903        // size in the size of the ring minus 8. Mask off the low bits to work
904        // around this.
905        let pending_send_size = pending_send_size & !7;
906        let signal = if pending_send_size != 0 {
907            if let Ok(inp) = self.inner.validate(control.inp().load(Ordering::SeqCst)) {
908                let old_free = self.inner.free(inp, ptrs.committed_out);
909                let new_free = self.inner.free(inp, ptrs.next_out);
910                old_free < pending_send_size && new_free >= pending_send_size
911            } else {
912                false
913            }
914        } else {
915            false
916        };
917        ptrs.committed_out = ptrs.next_out;
918        signal
919    }
920
921    /// Parses the next packet descriptor, returning the parsed information and
922    /// a range that can be used to read the packet. The caller should commit
923    /// the read with `commit_read` to free up space in the ring.
924    pub fn read(&self, ptrs: &mut IncomingOffset) -> Result<IncomingPacket, ReadError> {
925        let outp = ptrs.next_out;
926        let mut inp = ptrs.cached_in;
927        if inp == outp {
928            inp = self
929                .inner
930                .validate(self.inner.control().inp().load(Ordering::Acquire))?;
931            if inp == outp {
932                return Err(ReadError::Empty);
933            }
934            ptrs.cached_in = inp;
935        }
936        let avail = self.inner.available(inp, outp);
937        if avail < 16 {
938            return Err(ReadError::Corrupt(Error::InvalidDataAvailable));
939        }
940        let (len, packet) = parse_packet(&self.inner.mem, outp, avail)?;
941        ptrs.next_out = self
942            .inner
943            .add_pointer(outp, len + size_of::<Footer>() as u32);
944
945        Ok(packet)
946    }
947}
948
949/// The sending side of a ring buffer.
950#[derive(Debug)]
951pub struct OutgoingRing<M: RingMem> {
952    inner: InnerRing<M>,
953}
954
955impl<M: RingMem> Inspect for OutgoingRing<M> {
956    fn inspect(&self, req: inspect::Request<'_>) {
957        self.inner.inspect(req);
958    }
959}
960
961/// An outgoing ring offset, used to determine the position to write packets to.
962#[derive(Debug, Clone, Inspect)]
963pub struct OutgoingOffset {
964    #[inspect(hex)]
965    cached_out: u32,
966    #[inspect(hex)]
967    committed_in: u32,
968    #[inspect(hex)]
969    next_in: u32,
970}
971
972impl OutgoingOffset {
973    /// Reverts the insertion of packets that have not yet been committed.
974    pub fn revert(&mut self) {
975        self.next_in = self.committed_in;
976    }
977}
978
979impl<M: RingMem> Ring for OutgoingRing<M> {
980    type Memory = M;
981    fn mem(&self) -> &Self::Memory {
982        &self.inner.mem
983    }
984}
985
986impl<M: RingMem> OutgoingRing<M> {
987    /// Returns a new outgoing ring over `mem`.
988    pub fn new(mem: M) -> Result<Self, Error> {
989        let inner = InnerRing::new(mem)?;
990        // Report to the opposite endpoint that we will send interrupts for a
991        // ring full to ring non-full transition. Feature bits are set by the
992        // sending side.
993        let control = inner.control();
994        control
995            .feature_bits()
996            .store(FEATURE_SUPPORTS_PENDING_SEND_SIZE, Ordering::Relaxed);
997        // Start with no interrupt requested.
998        control.pending_send_size().store(0, Ordering::Relaxed);
999        Ok(Self { inner })
1000    }
1001
1002    /// Returns the current outgoing offset, for passing to `write` and
1003    /// ultimately `commit_write`.
1004    pub fn outgoing(&self) -> Result<OutgoingOffset, Error> {
1005        let control = self.inner.control();
1006        let next_in = self.inner.validate(control.inp().load(Ordering::Relaxed))?;
1007        let cached_out = self
1008            .inner
1009            .validate(control.outp().load(Ordering::Relaxed))?;
1010        Ok(OutgoingOffset {
1011            cached_out,
1012            committed_in: next_in,
1013            next_in,
1014        })
1015    }
1016
1017    /// Sets the pending send size: the number of bytes that should be free in
1018    /// the ring before the opposite endpoint sends a ring-non-full signal.
1019    ///
1020    /// Fails if the packet size is larger than the ring's maximum packet size.
1021    pub fn set_pending_send_size(&self, len: usize) -> Result<(), Error> {
1022        if len > self.maximum_packet_size() {
1023            return Err(Error::InvalidMessageLength);
1024        }
1025        self.inner
1026            .control()
1027            .pending_send_size()
1028            .store((len as u32 + 7) & !7, Ordering::SeqCst);
1029
1030        Ok(())
1031    }
1032
1033    /// Returns the maximum packet size that can fit in the ring.
1034    pub fn maximum_packet_size(&self) -> usize {
1035        self.inner.len() as usize - 8
1036    }
1037
1038    /// Returns whether a packet can fit in the ring starting at the specified
1039    /// offset.
1040    pub fn can_write(&self, ptrs: &mut OutgoingOffset, len: usize) -> Result<bool, Error> {
1041        let can_write = if self.inner.free(ptrs.next_in, ptrs.cached_out) as usize >= len {
1042            true
1043        } else {
1044            let outp = self
1045                .inner
1046                .validate(self.inner.control().outp().load(Ordering::Relaxed))?;
1047
1048            // Cache the new offset to ensure a stable result.
1049            ptrs.cached_out = outp;
1050            self.inner.free(ptrs.next_in, outp) as usize >= len
1051        };
1052        Ok(can_write)
1053    }
1054
1055    /// Commits a series of writes that ended at the specified offset, returning
1056    /// whether the opposite endpoint should be signaled.
1057    pub fn commit_write(&self, ptrs: &mut OutgoingOffset) -> bool {
1058        if ptrs.committed_in == ptrs.next_in {
1059            return false;
1060        }
1061        let inp = ptrs.next_in;
1062
1063        // Update the ring offset and check if the opposite endpoint needs to be
1064        // signaled. This is the case only if interrupts are unmasked and the
1065        // ring was previously empty before this write.
1066        let control = self.inner.control();
1067        control.inp().store(inp, Ordering::SeqCst);
1068        let needs_interrupt = control.interrupt_mask().load(Ordering::SeqCst) == 0
1069            && control.outp().load(Ordering::SeqCst) == ptrs.committed_in;
1070
1071        ptrs.committed_in = inp;
1072        needs_interrupt
1073    }
1074
1075    /// Writes the header of the next packet and returns the ring range for the
1076    /// payload. The caller should write the payload, then commit the write (or
1077    /// multiple writes) with `commit_write`.
1078    ///
1079    /// Returns `Err(RingFull(len))` if the ring is full, where `len` is the
1080    /// number of bytes needed to write the requested packet.
1081    pub fn write(
1082        &self,
1083        ptrs: &mut OutgoingOffset,
1084        packet: &OutgoingPacket<'_>,
1085    ) -> Result<RingRange, WriteError> {
1086        const DESCRIPTOR_SIZE: usize = size_of::<PacketDescriptor>();
1087        let (packet_type, header_size, flags) = match packet.typ {
1088            OutgoingPacketType::InBandNoCompletion => (PACKET_TYPE_IN_BAND, DESCRIPTOR_SIZE, 0),
1089            OutgoingPacketType::InBandWithCompletion => (
1090                PACKET_TYPE_IN_BAND,
1091                DESCRIPTOR_SIZE,
1092                PACKET_FLAG_COMPLETION_REQUESTED,
1093            ),
1094            OutgoingPacketType::Completion => (PACKET_TYPE_COMPLETION, DESCRIPTOR_SIZE, 0),
1095            OutgoingPacketType::GpaDirect(ranges) => (
1096                PACKET_TYPE_GPA_DIRECT,
1097                DESCRIPTOR_SIZE
1098                    + size_of::<GpaDirectHeader>()
1099                    + ranges.iter().fold(0, |a, range| {
1100                        a + size_of::<GpaRange>() + size_of_val(range.gpns())
1101                    }),
1102                PACKET_FLAG_COMPLETION_REQUESTED,
1103            ),
1104            OutgoingPacketType::TransferPages(_, ranges) => (
1105                PACKET_TYPE_TRANSFER_PAGES,
1106                DESCRIPTOR_SIZE + size_of::<TransferPageHeader>() + size_of_val(ranges),
1107                PACKET_FLAG_COMPLETION_REQUESTED,
1108            ),
1109        };
1110        let msg_len = (packet.size + header_size).div_ceil(8) * 8;
1111        let total_msg_len = (msg_len + size_of::<Footer>()) as u32;
1112        if total_msg_len >= self.inner.len() - 8 {
1113            return Err(WriteError::Corrupt(Error::InvalidMessageLength));
1114        }
1115        let inp = ptrs.next_in;
1116        let mut outp = ptrs.cached_out;
1117        if self.inner.free(inp, outp) < total_msg_len {
1118            outp = self
1119                .inner
1120                .validate(self.inner.control().outp().load(Ordering::Relaxed))?;
1121            if self.inner.free(inp, outp) < total_msg_len {
1122                return Err(WriteError::Full(total_msg_len as usize));
1123            }
1124            ptrs.cached_out = outp;
1125        }
1126        let desc = PacketDescriptor {
1127            packet_type,
1128            data_offset8: header_size as u16 / 8,
1129            length8: (msg_len / 8) as u16,
1130            flags,
1131            transaction_id: packet.transaction_id,
1132        };
1133
1134        let footer = Footer {
1135            reserved: 0,
1136            offset: inp,
1137        };
1138
1139        let off = inp as usize;
1140        self.inner.mem.write_aligned(off, desc.as_bytes());
1141        match packet.typ {
1142            OutgoingPacketType::GpaDirect(ranges) => {
1143                let mut writer = RingRange {
1144                    off: (off + DESCRIPTOR_SIZE) as u32,
1145                    size: header_size as u32,
1146                }
1147                .writer(self);
1148                let gpa_header = GpaDirectHeader {
1149                    reserved: 0,
1150                    range_count: ranges.len() as u32,
1151                };
1152                writer
1153                    .write(gpa_header.as_bytes())
1154                    .map_err(|_| WriteError::Corrupt(Error::InvalidMessageLength))?;
1155
1156                for range in ranges {
1157                    let gpa_rng = GpaRange {
1158                        len: range.len() as u32,
1159                        offset: range.offset() as u32,
1160                    };
1161                    writer
1162                        .write(gpa_rng.as_bytes())
1163                        .map_err(|_| WriteError::Corrupt(Error::InvalidMessageLength))?;
1164                    writer
1165                        .write(range.gpns().as_bytes())
1166                        .map_err(|_| WriteError::Corrupt(Error::InvalidMessageLength))?;
1167                }
1168            }
1169            OutgoingPacketType::TransferPages(tp_id, ranges) => {
1170                let tp_header = TransferPageHeader {
1171                    transfer_page_set_id: tp_id,
1172                    reserved: 0,
1173                    range_count: ranges.len() as u32,
1174                };
1175                self.inner
1176                    .mem
1177                    .write_aligned(off + DESCRIPTOR_SIZE, tp_header.as_bytes());
1178                for (i, range) in ranges.iter().enumerate() {
1179                    self.inner.mem.write_aligned(
1180                        off + DESCRIPTOR_SIZE + size_of_val(&tp_header) + i * 8,
1181                        range.as_bytes(),
1182                    );
1183                }
1184            }
1185            _ => (),
1186        }
1187
1188        self.inner
1189            .mem
1190            .write_aligned(off + msg_len, footer.as_bytes());
1191        ptrs.next_in = self.inner.add_pointer(inp, total_msg_len);
1192        Ok(RingRange {
1193            off: inp + header_size as u32,
1194            size: packet.size as u32,
1195        })
1196    }
1197}
1198
1199struct InnerRing<M: RingMem> {
1200    mem: M,
1201    size: u32,
1202}
1203
1204impl<M: RingMem> Inspect for InnerRing<M> {
1205    fn inspect(&self, req: inspect::Request<'_>) {
1206        req.respond()
1207            .hex("ring_size", self.size)
1208            .field("control", self.control());
1209    }
1210}
1211
1212/// Inspects ring buffer state without creating an IncomingRing or OutgoingRing
1213/// structure.
1214///
1215/// # Panics
1216///
1217/// Panics if control_page is not aligned.
1218pub fn inspect_ring(control_page: &guestmem::Page, response: &mut inspect::Response<'_>) {
1219    let control = Control::from_page(control_page).expect("control page is not aligned");
1220    response.field("control", control);
1221}
1222
1223/// Returns whether a ring buffer is in a state where the receiving end might
1224/// need a signal.
1225pub fn reader_needs_signal(control_page: &guestmem::Page) -> bool {
1226    Control::from_page(control_page).is_some_and(|control| {
1227        control.interrupt_mask().load(Ordering::Relaxed) == 0
1228            && (control.inp().load(Ordering::Relaxed) != control.outp().load(Ordering::Relaxed))
1229    })
1230}
1231
1232/// Returns whether a ring buffer is in a state where the sending end might need
1233/// a signal.
1234pub fn writer_needs_signal(control_page: &guestmem::Page, ring_size: u32) -> bool {
1235    Control::from_page(control_page).is_some_and(|control| {
1236        let pending_size = control.pending_send_size().load(Ordering::Relaxed);
1237        pending_size != 0
1238            && ring_free(
1239                ring_size,
1240                control.inp().load(Ordering::Relaxed),
1241                control.outp().load(Ordering::Relaxed),
1242            ) >= pending_size
1243    })
1244}
1245
1246impl<M: RingMem> Debug for InnerRing<M> {
1247    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1248        f.debug_struct("InnerRing")
1249            .field("control", &self.control())
1250            .field("size", &self.size)
1251            .finish()
1252    }
1253}
1254
1255impl<M: RingMem> InnerRing<M> {
1256    pub fn new(mem: M) -> Result<Self, Error> {
1257        let ring_size = u32::try_from(mem.len()).map_err(|_| Error::InvalidRingMemory)?;
1258        if ring_size % 4096 != 0 {
1259            return Err(Error::InvalidRingMemory);
1260        }
1261        let ring = InnerRing {
1262            mem,
1263            size: ring_size,
1264        };
1265        Ok(ring)
1266    }
1267
1268    fn control(&self) -> Control<'_> {
1269        Control(self.mem.control())
1270    }
1271
1272    fn len(&self) -> u32 {
1273        self.size
1274    }
1275
1276    fn validate(&self, p: u32) -> Result<u32, Error> {
1277        if p >= self.size || p % 8 != 0 {
1278            Err(Error::InvalidRingPointer)
1279        } else {
1280            Ok(p)
1281        }
1282    }
1283
1284    fn add_pointer(&self, p: u32, off: u32) -> u32 {
1285        let np = p + off;
1286        if np >= self.size {
1287            assert!(np < self.size * 2);
1288            np - self.size
1289        } else {
1290            np
1291        }
1292    }
1293
1294    fn available(&self, inp: u32, outp: u32) -> u32 {
1295        if inp > outp {
1296            // |____outp....inp_____|
1297            inp - outp
1298        } else {
1299            // |....inp____outp.....|
1300            self.size + inp - outp
1301        }
1302    }
1303
1304    fn free(&self, inp: u32, outp: u32) -> u32 {
1305        ring_free(self.size, inp, outp)
1306    }
1307}
1308
1309fn ring_free(size: u32, inp: u32, outp: u32) -> u32 {
1310    // It's not possible to fully fill the ring since that state would be
1311    // indistinguishable from the empty ring. So subtract 8 bytes from the
1312    // result.
1313    if outp > inp {
1314        // |....inp____outp.....|
1315        outp - inp - 8
1316    } else {
1317        // |____outp....inp_____|
1318        size - (inp - outp) - 8
1319    }
1320}
1321
1322#[cfg(test)]
1323mod tests {
1324    use super::*;
1325
1326    fn write_simple<T: RingMem>(out_ring: &mut OutgoingRing<T>, buf: &[u8]) -> Option<bool> {
1327        let mut outgoing = out_ring.outgoing().unwrap();
1328        match out_ring.write(
1329            &mut outgoing,
1330            &OutgoingPacket {
1331                typ: OutgoingPacketType::InBandNoCompletion,
1332                size: buf.len(),
1333                transaction_id: 0,
1334            },
1335        ) {
1336            Ok(range) => {
1337                range.writer(out_ring).write(buf).unwrap();
1338                Some(out_ring.commit_write(&mut outgoing))
1339            }
1340            Err(WriteError::Full(_)) => None,
1341            Err(err) => panic!("{}", err),
1342        }
1343    }
1344
1345    fn read_simple<T: RingMem>(in_ring: &mut IncomingRing<T>) -> (Vec<u8>, bool) {
1346        let mut incoming = in_ring.incoming().unwrap();
1347        let msg = in_ring
1348            .read(&mut incoming)
1349            .unwrap()
1350            .payload
1351            .reader(in_ring)
1352            .read_all()
1353            .unwrap();
1354        let signal = in_ring.commit_read(&mut incoming);
1355        (msg, signal)
1356    }
1357
1358    #[test]
1359    fn test_ring() {
1360        let rmem = FlatRingMem::new(16384);
1361        let mut in_ring = IncomingRing::new(&rmem).unwrap();
1362        in_ring.set_interrupt_mask(false);
1363        let mut out_ring = OutgoingRing::new(&rmem).unwrap();
1364
1365        let p = &[1, 2, 3, 4, 5, 6, 7, 8];
1366        assert!(write_simple(&mut out_ring, p).unwrap());
1367
1368        let (msg, signal) = read_simple(&mut in_ring);
1369        assert!(!signal);
1370
1371        assert_eq!(p, &msg[..]);
1372    }
1373
1374    #[test]
1375    fn test_interrupt_mask() {
1376        let rmem = FlatRingMem::new(16384);
1377        let mut in_ring = IncomingRing::new(&rmem).unwrap();
1378        let mut out_ring = OutgoingRing::new(&rmem).unwrap();
1379
1380        // Interrupts are masked, so no signal is expected.
1381        assert!(!write_simple(&mut out_ring, &[1, 2, 3]).unwrap());
1382        assert!(!read_simple(&mut in_ring).1);
1383
1384        // Unmask interrupts, then try again, expecting a signal this time.
1385        in_ring.set_interrupt_mask(false);
1386        assert!(write_simple(&mut out_ring, &[1, 2, 3]).unwrap());
1387        assert!(!read_simple(&mut in_ring).1);
1388    }
1389
1390    #[test]
1391    fn test_pending_send_size() {
1392        let rmem = FlatRingMem::new(16384);
1393        let mut in_ring = IncomingRing::new(&rmem).unwrap();
1394        let mut out_ring = OutgoingRing::new(&rmem).unwrap();
1395
1396        // Fill the ring up with some packets.
1397        write_simple(&mut out_ring, &[1; 4000]).unwrap();
1398        write_simple(&mut out_ring, &[2; 4000]).unwrap();
1399        write_simple(&mut out_ring, &[3; 4000]).unwrap();
1400        write_simple(&mut out_ring, &[4; 4000]).unwrap();
1401        assert!(write_simple(&mut out_ring, &[5; 4000]).is_none());
1402
1403        // No pending send size yet.
1404        assert!(!read_simple(&mut in_ring).1);
1405
1406        // Fill the ring back up.
1407        write_simple(&mut out_ring, &[5; 4000]).unwrap();
1408        assert!(write_simple(&mut out_ring, &[6; 4000]).is_none());
1409
1410        // Set a pending send size for two packets worth of space (packet size +
1411        // 16 bytes for the descriptor and 8 bytes for the footer).
1412        out_ring.set_pending_send_size(4024 * 2).unwrap();
1413
1414        // There should be a signal after two packets, then no more signals.
1415        assert!(!read_simple(&mut in_ring).1);
1416        assert!(read_simple(&mut in_ring).1);
1417        assert!(!read_simple(&mut in_ring).1);
1418    }
1419}