vmbus_ring/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! This module implements the low-level interface to the VmBus ring buffer. The
5//! ring buffer resides in guest memory and is mapped into the host, allowing
6//! efficient transfer of variable-sized packets.
7//!
8//! Ring buffer packets have headers called descriptors, which can specify a
9//! transaction ID and metadata referring to memory outside the ring buffer.
10//! Each packet is a multiple of 8 bytes.
11//!
12//! In practice, ring buffers always come in pairs so that packets can be both
13//! sent and received. However, this module's interfaces operate on them singly.
14
15#![expect(missing_docs)]
16#![forbid(unsafe_code)]
17
18pub mod gparange;
19
20pub use pipe_protocol::*;
21pub use protocol::PAGE_SIZE;
22pub use protocol::TransferPageRange;
23
24use crate::gparange::GpaRange;
25use guestmem::AccessError;
26use guestmem::MemoryRead;
27use guestmem::MemoryWrite;
28use guestmem::ranges::PagedRange;
29use inspect::Inspect;
30use protocol::*;
31use safeatomic::AtomicSliceOps;
32use std::fmt::Debug;
33use std::sync::Arc;
34use std::sync::atomic::AtomicU8;
35use std::sync::atomic::AtomicU32;
36use std::sync::atomic::AtomicU64;
37use std::sync::atomic::Ordering;
38use thiserror::Error;
39use zerocopy::FromZeros;
40use zerocopy::IntoBytes;
41
42mod pipe_protocol {
43    use zerocopy::FromBytes;
44    use zerocopy::Immutable;
45    use zerocopy::IntoBytes;
46    use zerocopy::KnownLayout;
47
48    /// Pipe channel packets are prefixed with this header to allow for
49    /// non-8-multiple lengths.
50    #[repr(C)]
51    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
52    pub struct PipeHeader {
53        pub packet_type: u32,
54        pub len: u32,
55    }
56
57    /// Regular data packet.
58    pub const PIPE_PACKET_TYPE_DATA: u32 = 1;
59    /// Data packet that has been partially consumed, in which case the `len`
60    /// field's high word is the number of bytes already read. The opposite
61    /// endpoint will never write this type.
62    pub const PIPE_PACKET_TYPE_PARTIAL: u32 = 2;
63    /// Setup a GPA direct buffer for RDMA.
64    pub const PIPE_PACKET_TYPE_SETUP_GPA_DIRECT: u32 = 3;
65    /// Tear down a GPA direct buffer.
66    pub const PIPE_PACKET_TYPE_TEARDOWN_GPA_DIRECT: u32 = 4;
67
68    /// The maximum size of a pipe packet's payload.
69    pub const MAXIMUM_PIPE_PACKET_SIZE: usize = 16384;
70}
71
72mod protocol {
73    use crate::CONTROL_WORD_COUNT;
74    use inspect::Inspect;
75    use std::fmt::Debug;
76    use std::sync::atomic::AtomicU32;
77    use std::sync::atomic::Ordering;
78    use zerocopy::FromBytes;
79    use zerocopy::Immutable;
80    use zerocopy::IntoBytes;
81    use zerocopy::KnownLayout;
82
83    /// VmBus ring buffers are sized in multiples 4KB pages, with a 4KB control page.
84    pub const PAGE_SIZE: usize = 4096;
85
86    /// The descriptor header on every packet.
87    #[repr(C)]
88    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
89    pub struct PacketDescriptor {
90        pub packet_type: u16,
91        pub data_offset8: u16,
92        pub length8: u16,
93        pub flags: u16,
94        pub transaction_id: u64,
95    }
96
97    /// A control page accessor.
98    pub struct Control<'a>(pub &'a [AtomicU32; CONTROL_WORD_COUNT]);
99
100    impl Control<'_> {
101        pub fn inp(&self) -> &AtomicU32 {
102            &self.0[0]
103        }
104        pub fn outp(&self) -> &AtomicU32 {
105            &self.0[1]
106        }
107        pub fn interrupt_mask(&self) -> &AtomicU32 {
108            &self.0[2]
109        }
110        pub fn pending_send_size(&self) -> &AtomicU32 {
111            &self.0[3]
112        }
113        pub fn feature_bits(&self) -> &AtomicU32 {
114            &self.0[16]
115        }
116    }
117
118    impl Inspect for Control<'_> {
119        fn inspect(&self, req: inspect::Request<'_>) {
120            req.respond()
121                .hex("in", self.inp().load(Ordering::Relaxed))
122                .hex("out", self.outp().load(Ordering::Relaxed))
123                .hex(
124                    "interrupt_mask",
125                    self.interrupt_mask().load(Ordering::Relaxed),
126                )
127                .hex(
128                    "pending_send_size",
129                    self.pending_send_size().load(Ordering::Relaxed),
130                )
131                .hex("feature_bits", self.feature_bits().load(Ordering::Relaxed));
132        }
133    }
134
135    impl Debug for Control<'_> {
136        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
137            f.debug_struct("Control")
138                .field("inp", self.inp())
139                .field("outp", self.outp())
140                .field("interrupt_mask", self.interrupt_mask())
141                .field("pending_send_size", self.pending_send_size())
142                .field("feature_bits", self.feature_bits())
143                .finish()
144        }
145    }
146
147    /// If set, the endpoint supports sending signals when the number of free
148    /// bytes in the ring reaches or exceeds `pending_send_size`.
149    pub const FEATURE_SUPPORTS_PENDING_SEND_SIZE: u32 = 1;
150
151    /// A transfer range specifying a length and offset within a transfer page
152    /// set. Only used by NetVSP.
153    #[repr(C)]
154    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
155    pub struct TransferPageRange {
156        pub byte_count: u32,
157        pub byte_offset: u32,
158    }
159
160    /// The extended portion of the packet descriptor that describes a transfer
161    /// page packet.
162    #[repr(C)]
163    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
164    pub struct TransferPageHeader {
165        pub transfer_page_set_id: u16,
166        pub reserved: u16, // may have garbage non-zero values
167        pub range_count: u32,
168    }
169
170    /// The extended portion of the packet descriptor describing a GPA direct packet.
171    #[repr(C)]
172    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
173    pub struct GpaDirectHeader {
174        pub reserved: u32, // may have garbage non-zero values
175        pub range_count: u32,
176    }
177
178    pub const PACKET_FLAG_COMPLETION_REQUESTED: u16 = 1;
179
180    /// The packet footer.
181    #[repr(C)]
182    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
183    pub struct Footer {
184        pub reserved: u32,
185        /// The ring offset of the packet.
186        pub offset: u32,
187    }
188}
189
190#[derive(Copy, Clone, Debug, Error)]
191pub enum Error {
192    #[error("invalid ring buffer pointer")]
193    InvalidRingPointer,
194    #[error("invalid message length")]
195    InvalidMessageLength,
196    #[error("invalid data available")]
197    InvalidDataAvailable,
198    #[error("ring buffer too large")]
199    RingTooLarge,
200    #[error("invalid ring memory")]
201    InvalidRingMemory,
202    #[error("invalid descriptor offset or length")]
203    InvalidDescriptorLengths,
204    #[error("unknown packet descriptor flags")]
205    InvalidDescriptorFlags,
206    #[error("unknown packet descriptor type")]
207    InvalidDescriptorType,
208    #[error("invalid range count for gpa direct packet")]
209    InvalidDescriptorGpaDirectRangeCount,
210    #[error("the interrupt mask bit was supposed to be clear but it is set")]
211    InterruptsExternallyMasked,
212}
213
214#[derive(Copy, Clone, Debug, Error)]
215pub enum ReadError {
216    #[error("ring buffer empty")]
217    Empty,
218    #[error(transparent)]
219    Corrupt(#[from] Error),
220}
221
222#[derive(Copy, Clone, Debug, Error)]
223pub enum WriteError {
224    #[error("ring buffer full")]
225    Full(usize),
226    #[error(transparent)]
227    Corrupt(#[from] Error),
228}
229
230/// A range within a ring buffer.
231#[derive(Copy, Clone, Debug)]
232pub struct RingRange {
233    off: u32,
234    size: u32,
235}
236
237impl RingRange {
238    /// The empty range.
239    pub fn empty() -> Self {
240        RingRange { off: 0, size: 0 }
241    }
242
243    /// Retrieves a `MemoryWrite` that allows for writing to the range.
244    pub fn writer<'a, T: Ring>(&self, ring: &'a T) -> RingRangeWriter<'a, T::Memory> {
245        RingRangeWriter {
246            start: self.off,
247            end: self.off + self.size,
248            mem: ring.mem(),
249        }
250    }
251
252    /// Retrieves a `MemoryRead` that allows for writing to the range.
253    pub fn reader<'a, T: Ring>(&self, ring: &'a T) -> RingRangeReader<'a, T::Memory> {
254        RingRangeReader {
255            start: self.off,
256            end: self.off + self.size,
257            mem: ring.mem(),
258        }
259    }
260
261    /// Returns the length of the range.
262    pub fn len(&self) -> usize {
263        self.size as usize
264    }
265
266    /// Checks if this range is empty.
267    pub fn is_empty(&self) -> bool {
268        self.size == 0
269    }
270}
271
272/// A type implementing `MemoryRead` accessing a `RingRange`.
273pub struct RingRangeReader<'a, T> {
274    start: u32,
275    end: u32,
276    mem: &'a T,
277}
278
279impl<T: RingMem> MemoryRead for RingRangeReader<'_, T> {
280    fn read(&mut self, data: &mut [u8]) -> Result<&mut Self, AccessError> {
281        if self.len() < data.len() {
282            return Err(AccessError::OutOfRange(self.len(), data.len()));
283        }
284        self.mem.read_at(self.start as usize, data);
285        self.start += data.len() as u32;
286        Ok(self)
287    }
288
289    fn skip(&mut self, len: usize) -> Result<&mut Self, AccessError> {
290        if self.len() < len {
291            return Err(AccessError::OutOfRange(self.len(), len));
292        }
293        self.start += len as u32;
294        Ok(self)
295    }
296
297    fn len(&self) -> usize {
298        (self.end - self.start) as usize
299    }
300}
301
302/// A type implementing `MemoryWrite` accessing a `RingRange`.
303pub struct RingRangeWriter<'a, T> {
304    start: u32,
305    end: u32,
306    mem: &'a T,
307}
308
309impl<T: RingMem> MemoryWrite for RingRangeWriter<'_, T> {
310    fn write(&mut self, data: &[u8]) -> Result<(), AccessError> {
311        if self.len() < data.len() {
312            return Err(AccessError::OutOfRange(self.len(), data.len()));
313        }
314        self.mem.write_at(self.start as usize, data);
315        self.start += data.len() as u32;
316        Ok(())
317    }
318
319    fn fill(&mut self, _val: u8, _len: usize) -> Result<(), AccessError> {
320        unimplemented!()
321    }
322
323    fn len(&self) -> usize {
324        (self.end - self.start) as usize
325    }
326}
327
328/// The alternate types of incoming packets. For packets with external data,
329/// includes a `RingRange` whose data is the variable portion of the packet
330/// descriptor.
331#[derive(Debug, Copy, Clone)]
332pub enum IncomingPacketType {
333    InBand,
334    Completion,
335    GpaDirect(u32, RingRange),
336    TransferPages(u16, u32, RingRange),
337}
338
339/// An incoming packet.
340#[derive(Debug)]
341pub struct IncomingPacket {
342    pub transaction_id: Option<u64>,
343    pub typ: IncomingPacketType,
344    pub payload: RingRange,
345}
346
347const PACKET_TYPE_IN_BAND: u16 = 6;
348const PACKET_TYPE_TRANSFER_PAGES: u16 = 0x7;
349const PACKET_TYPE_GPA_DIRECT: u16 = 0x9;
350const PACKET_TYPE_COMPLETION: u16 = 0xb;
351
352fn parse_packet<M: RingMem>(
353    ring: &M,
354    ring_off: u32,
355    avail: u32,
356) -> Result<(u32, IncomingPacket), ReadError> {
357    let mut desc = PacketDescriptor::new_zeroed();
358    ring.read_aligned(ring_off as usize, desc.as_mut_bytes());
359    let len = desc.length8 as u32 * 8;
360    if desc.length8 < desc.data_offset8 || desc.data_offset8 < 2 || avail < len {
361        return Err(ReadError::Corrupt(Error::InvalidDescriptorLengths));
362    }
363
364    if (desc.flags & !PACKET_FLAG_COMPLETION_REQUESTED) != 0 {
365        return Err(ReadError::Corrupt(Error::InvalidDescriptorFlags));
366    }
367    let transaction_id = if desc.flags & PACKET_FLAG_COMPLETION_REQUESTED != 0
368        || desc.packet_type == PACKET_TYPE_COMPLETION
369    {
370        Some(desc.transaction_id)
371    } else {
372        None
373    };
374    let typ = match desc.packet_type {
375        PACKET_TYPE_IN_BAND => IncomingPacketType::InBand,
376        PACKET_TYPE_COMPLETION => IncomingPacketType::Completion,
377        PACKET_TYPE_TRANSFER_PAGES => {
378            let mut tph = TransferPageHeader::new_zeroed();
379            ring.read_aligned(ring_off as usize + 16, tph.as_mut_bytes());
380            IncomingPacketType::TransferPages(
381                tph.transfer_page_set_id,
382                tph.range_count,
383                RingRange {
384                    off: ring_off + 24,
385                    size: desc.data_offset8 as u32 * 8 - 24,
386                },
387            )
388        }
389        PACKET_TYPE_GPA_DIRECT => {
390            let mut gph = GpaDirectHeader::new_zeroed();
391            ring.read_aligned(ring_off as usize + 16, gph.as_mut_bytes());
392            if gph.range_count == 0 {
393                return Err(ReadError::Corrupt(
394                    Error::InvalidDescriptorGpaDirectRangeCount,
395                ));
396            }
397            IncomingPacketType::GpaDirect(
398                gph.range_count,
399                RingRange {
400                    off: ring_off + 24,
401                    size: desc.data_offset8 as u32 * 8 - 24,
402                },
403            )
404        }
405        _ => return Err(ReadError::Corrupt(Error::InvalidDescriptorType)),
406    };
407    let payload = RingRange {
408        off: ring_off + desc.data_offset8 as u32 * 8,
409        size: (desc.length8 - desc.data_offset8) as u32 * 8,
410    };
411    Ok((
412        len,
413        IncomingPacket {
414            transaction_id,
415            typ,
416            payload,
417        },
418    ))
419}
420
421/// The size of the control region in 32-bit words.
422pub const CONTROL_WORD_COUNT: usize = 32;
423
424/// A trait for memory backing a ring buffer.
425pub trait RingMem: Send {
426    /// Returns the control page.
427    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT];
428
429    /// Reads from the data portion of the ring, wrapping (once) at the end of
430    /// the ring. Precondition: `addr + data.len() <= self.len() * 2`.
431    fn read_at(&self, addr: usize, data: &mut [u8]);
432
433    /// Reads from the data portion of the ring, as in [`RingMem::read_at`]. `addr` and
434    /// `data.len()` must be multiples of 8.
435    ///
436    /// `read_at` may be faster for large or variable-sized reads.
437    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
438        debug_assert!(addr % 8 == 0);
439        debug_assert!(data.len() % 8 == 0);
440        self.read_at(addr, data)
441    }
442
443    /// Writes to the data portion of the ring, wrapping (once) at the end of
444    /// the ring. Precondition: `addr + data.len() <= self.len() * 2`.
445    fn write_at(&self, addr: usize, data: &[u8]);
446
447    /// Writes to the data portion of the ring, as in [`RingMem::write_at`]. `addr` and
448    /// `data.len()` must be multiples of 8.
449    ///
450    /// `write_at` may be faster for large or variable-sized writes.
451    fn write_aligned(&self, addr: usize, data: &[u8]) {
452        debug_assert!(addr % 8 == 0);
453        debug_assert!(data.len() % 8 == 0);
454        self.write_at(addr, data)
455    }
456
457    /// Returns the length of the ring in bytes.
458    fn len(&self) -> usize;
459}
460
461/// Implementation of `RingMem` for references. Useful for tests.
462impl<T: RingMem + Sync> RingMem for &'_ T {
463    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
464        (*self).control()
465    }
466    fn read_at(&self, addr: usize, data: &mut [u8]) {
467        (*self).read_at(addr, data)
468    }
469    fn write_at(&self, addr: usize, data: &[u8]) {
470        (*self).write_at(addr, data)
471    }
472    fn len(&self) -> usize {
473        (*self).len()
474    }
475
476    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
477        (*self).read_aligned(addr, data)
478    }
479
480    fn write_aligned(&self, addr: usize, data: &[u8]) {
481        (*self).write_aligned(addr, data)
482    }
483}
484
485#[derive(Debug)]
486pub struct SingleMappedRingMem<T>(pub T);
487
488impl<T: AsRef<[AtomicU8]>> SingleMappedRingMem<T> {
489    fn control_range(&self) -> &[AtomicU8; PAGE_SIZE] {
490        self.0.as_ref()[..PAGE_SIZE].try_into().unwrap()
491    }
492
493    fn data(&self) -> &[AtomicU8] {
494        &self.0.as_ref()[PAGE_SIZE..]
495    }
496}
497
498impl<T: AsRef<[AtomicU8]> + Send> RingMem for SingleMappedRingMem<T> {
499    fn read_at(&self, mut addr: usize, data: &mut [u8]) {
500        if addr >= self.len() {
501            addr -= self.len();
502        }
503        let this_data = self.data();
504        if addr + data.len() <= self.len() {
505            this_data[addr..addr + data.len()].atomic_read(data);
506        } else {
507            let data_len = data.len();
508            let (first, last) = data.split_at_mut(self.len() - addr);
509            this_data[addr..].atomic_read(first);
510            this_data[..data_len - (self.len() - addr)].atomic_read(last);
511        }
512    }
513
514    fn write_at(&self, mut addr: usize, data: &[u8]) {
515        if addr > self.len() {
516            addr -= self.len();
517        }
518        let this_data = self.data();
519        if addr + data.len() <= self.len() {
520            this_data[addr..addr + data.len()].atomic_write(data);
521        } else {
522            let (first, last) = data.split_at(self.len() - addr);
523            this_data[addr..].atomic_write(first);
524            this_data[..data.len() - (self.len() - addr)].atomic_write(last);
525        }
526    }
527
528    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
529        self.control_range().as_atomic_slice().unwrap()[..CONTROL_WORD_COUNT]
530            .try_into()
531            .unwrap()
532    }
533
534    fn len(&self) -> usize {
535        self.data().len()
536    }
537}
538
539/// An implementation of `RingMem` over a flat allocation. Useful for tests.
540#[derive(Clone)]
541pub struct FlatRingMem {
542    inner: Arc<FlatRingInner>,
543}
544
545struct FlatRingInner {
546    control: [AtomicU32; CONTROL_WORD_COUNT],
547    data: Vec<AtomicU8>,
548}
549
550impl FlatRingMem {
551    /// Allocates a new memory.
552    pub fn new(len: usize) -> Self {
553        let mut data = Vec::new();
554        data.resize_with(len, Default::default);
555        Self {
556            inner: Arc::new(FlatRingInner {
557                control: [0; CONTROL_WORD_COUNT].map(Into::into),
558                data,
559            }),
560        }
561    }
562}
563
564impl Debug for FlatRingMem {
565    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
566        f.debug_struct("FlatRingMem").finish()
567    }
568}
569
570impl RingMem for FlatRingMem {
571    fn read_at(&self, mut addr: usize, data: &mut [u8]) {
572        if addr > self.len() {
573            addr -= self.len();
574        }
575        if addr + data.len() <= self.len() {
576            self.inner.data[addr..addr + data.len()].atomic_read(data);
577        } else {
578            let data_len = data.len();
579            let (first, last) = data.split_at_mut(self.len() - addr);
580            self.inner.data[addr..].atomic_read(first);
581            self.inner.data[..data_len - (self.len() - addr)].atomic_read(last);
582        }
583    }
584
585    fn write_at(&self, mut addr: usize, data: &[u8]) {
586        if addr > self.len() {
587            addr -= self.len();
588        }
589        if addr + data.len() <= self.len() {
590            self.inner.data[addr..addr + data.len()].atomic_write(data);
591        } else {
592            let (first, last) = data.split_at(self.len() - addr);
593            self.inner.data[addr..].atomic_write(first);
594            self.inner.data[..data.len() - (self.len() - addr)].atomic_write(last);
595        }
596    }
597
598    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
599        &self.inner.control
600    }
601
602    fn len(&self) -> usize {
603        self.inner.data.len()
604    }
605}
606
607/// A trait for ring buffer memory divided into discontiguous pages.
608pub trait PagedMemory: Send {
609    /// Returns the control page.
610    fn control(&self) -> &[AtomicU8; PAGE_SIZE];
611    /// Returns the number of data pages.
612    fn data_page_count(&self) -> usize;
613    /// Returns a data page.
614    ///
615    /// For performance reasons, `page` may be in `0..data_page_count*2`,
616    /// representing the ring logically mapped twice consecutively. The
617    /// implementation should return the same page for `n` and `n +
618    /// data_page_count`.
619    fn data(&self, page: usize) -> &[AtomicU8; PAGE_SIZE];
620}
621
622/// An implementation of [`RingMem`] on top of discontiguous pages.
623#[derive(Debug, Clone)]
624pub struct PagedRingMem<T>(T);
625
626impl<T: PagedMemory> PagedRingMem<T> {
627    /// Returns a new ring memory wrapping a type implementing [`PagedMemory`].
628    pub fn new(inner: T) -> Self {
629        Self(inner)
630    }
631}
632
633impl<T: PagedMemory> RingMem for PagedRingMem<T> {
634    fn len(&self) -> usize {
635        self.0.data_page_count() * PAGE_SIZE
636    }
637
638    fn read_at(&self, mut addr: usize, mut data: &mut [u8]) {
639        while !data.is_empty() {
640            let page = addr / PAGE_SIZE;
641            let offset = addr % PAGE_SIZE;
642            let offset_end = PAGE_SIZE.min(offset + data.len());
643            let len = offset_end - offset;
644            let (this, next) = data.split_at_mut(len);
645            self.0.data(page)[offset..offset_end].atomic_read(this);
646            addr += len;
647            data = next;
648        }
649    }
650
651    fn write_at(&self, mut addr: usize, mut data: &[u8]) {
652        while !data.is_empty() {
653            let page = addr / PAGE_SIZE;
654            let offset = addr % PAGE_SIZE;
655            let offset_end = PAGE_SIZE.min(offset + data.len());
656            let len = offset_end - offset;
657            let (this, next) = data.split_at(len);
658            self.0.data(page)[offset..offset_end].atomic_write(this);
659            addr += len;
660            data = next;
661        }
662    }
663
664    #[inline]
665    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
666        debug_assert!(addr % 8 == 0);
667        debug_assert!(data.len() % 8 == 0);
668        for (i, b) in data.chunks_exact_mut(8).enumerate() {
669            let addr = (addr & !7) + i * 8;
670            let page = addr / PAGE_SIZE;
671            let offset = addr % PAGE_SIZE;
672            b.copy_from_slice(
673                &self.0.data(page)[offset..offset + 8]
674                    .as_atomic::<AtomicU64>()
675                    .unwrap()
676                    .load(Ordering::Relaxed)
677                    .to_ne_bytes(),
678            );
679        }
680    }
681
682    #[inline]
683    fn write_aligned(&self, addr: usize, data: &[u8]) {
684        debug_assert!(addr % 8 == 0);
685        debug_assert!(data.len() % 8 == 0);
686        for (i, b) in data.chunks_exact(8).enumerate() {
687            let addr = (addr & !7) + i * 8;
688            let page = addr / PAGE_SIZE;
689            let offset = addr % PAGE_SIZE;
690            self.0.data(page)[offset..offset + 8]
691                .as_atomic::<AtomicU64>()
692                .unwrap()
693                .store(u64::from_ne_bytes(b.try_into().unwrap()), Ordering::Relaxed);
694        }
695    }
696
697    #[inline]
698    fn control(&self) -> &[AtomicU32; CONTROL_WORD_COUNT] {
699        self.0.control().as_atomic_slice().unwrap()[..CONTROL_WORD_COUNT]
700            .try_into()
701            .unwrap()
702    }
703}
704
705/// Information about an outgoing packet.
706#[derive(Debug)]
707pub struct OutgoingPacket<'a> {
708    pub transaction_id: u64,
709    pub size: usize,
710    pub typ: OutgoingPacketType<'a>,
711}
712
713/// The outgoing packet type variants.
714#[derive(Debug, Copy, Clone)]
715pub enum OutgoingPacketType<'a> {
716    /// A non-transactional data packet.
717    InBandNoCompletion,
718    /// A transactional data packet.
719    InBandWithCompletion,
720    /// A completion packet.
721    Completion,
722    /// A GPA direct packet, which can reference memory outside the ring by address.
723    ///
724    /// Not supported on the host side of the ring.
725    GpaDirect(&'a [PagedRange<'a>]),
726    /// A transfer page packet, which can reference memory outside the ring by a
727    /// buffer ID and a set of offsets into some pre-established buffer
728    /// (typically a GPADL).
729    ///
730    /// Used by networking. Should not be used in new devices--just embed the
731    /// buffer offsets in the device-specific packet payload.
732    TransferPages(u16, &'a [TransferPageRange]),
733}
734
735/// Namespace type with methods to compute packet sizes, for use with
736/// `set_pending_send_size`.
737pub struct PacketSize(());
738
739impl PacketSize {
740    /// Computes the size of an in-band packet.
741    pub const fn in_band(payload_len: usize) -> usize {
742        size_of::<PacketDescriptor>() + ((payload_len + 7) & !7) + size_of::<Footer>()
743    }
744
745    /// Computes the size of a completion packet.
746    pub const fn completion(payload_len: usize) -> usize {
747        Self::in_band(payload_len)
748    }
749
750    // Computes the size of a gpa direct packet.
751    // pub fn gpa_direct()
752
753    /// Computes the size of a transfer page packet.
754    pub const fn transfer_pages(count: usize, payload_len: usize) -> usize {
755        Self::in_band(payload_len)
756            + size_of::<TransferPageHeader>()
757            + count * size_of::<TransferPageRange>()
758    }
759}
760
761/// A trait shared by the incoming and outgoing ring buffers. Used primarily
762/// with `RingRange::reader` and `RingRange::writer`.
763pub trait Ring {
764    /// The underlying memory type.
765    type Memory: RingMem;
766
767    /// The backing memory of the ring buffer.
768    fn mem(&self) -> &Self::Memory;
769}
770
771/// The interface to the receiving endpoint of a ring buffer.
772#[derive(Debug)]
773pub struct IncomingRing<M: RingMem> {
774    inner: InnerRing<M>,
775}
776
777impl<M: RingMem> Inspect for IncomingRing<M> {
778    fn inspect(&self, req: inspect::Request<'_>) {
779        self.inner.inspect(req);
780    }
781}
782
783/// The current incoming ring state.
784#[derive(Debug, Clone, Inspect)]
785pub struct IncomingOffset {
786    #[inspect(hex)]
787    cached_in: u32,
788    #[inspect(hex)]
789    committed_out: u32,
790    #[inspect(hex)]
791    next_out: u32,
792}
793
794impl IncomingOffset {
795    /// Reverts the removal of packets that have not yet been committed.
796    pub fn revert(&mut self) {
797        self.next_out = self.committed_out;
798    }
799}
800
801impl<M: RingMem> Ring for IncomingRing<M> {
802    type Memory = M;
803    fn mem(&self) -> &Self::Memory {
804        &self.inner.mem
805    }
806}
807
808impl<M: RingMem> IncomingRing<M> {
809    /// Returns a new incoming ring. Fails if the ring memory is not sized or
810    /// aligned correctly or if the ring control data is corrupt.
811    pub fn new(mem: M) -> Result<Self, Error> {
812        let inner = InnerRing::new(mem)?;
813        // Start with interrupts masked.
814        let control = inner.control();
815        control.interrupt_mask().store(1, Ordering::Relaxed);
816        Ok(Self { inner })
817    }
818
819    /// Indicates whether pending send size notification is supported on
820    /// the vmbus ring.
821    pub fn supports_pending_send_size(&self) -> bool {
822        let feature_bits = self.inner.control().feature_bits().load(Ordering::Relaxed);
823        (feature_bits & FEATURE_SUPPORTS_PENDING_SEND_SIZE) != 0
824    }
825
826    /// Enables or disables the interrupt mask, declaring to the opposite
827    /// endpoint that interrupts should not or should be sent for a ring
828    /// empty-to-non-empty transition.
829    pub fn set_interrupt_mask(&self, state: bool) {
830        self.inner
831            .control()
832            .interrupt_mask()
833            .store(state as u32, Ordering::SeqCst);
834    }
835
836    /// Verifies that interrupts are currently unmasked.
837    ///
838    /// This can be used to check that ring state is consistent.
839    pub fn verify_interrupts_unmasked(&self) -> Result<(), Error> {
840        if self
841            .inner
842            .control()
843            .interrupt_mask()
844            .load(Ordering::Relaxed)
845            == 0
846        {
847            Ok(())
848        } else {
849            Err(Error::InterruptsExternallyMasked)
850        }
851    }
852
853    /// Returns the current incoming offset, for passing to `read` and
854    /// `commit_read`.
855    pub fn incoming(&self) -> Result<IncomingOffset, Error> {
856        let control = self.inner.control();
857        let next_out = self
858            .inner
859            .validate(control.outp().load(Ordering::Relaxed))?;
860        let cached_in = self.inner.validate(control.inp().load(Ordering::Relaxed))?;
861        Ok(IncomingOffset {
862            next_out,
863            cached_in,
864            committed_out: next_out,
865        })
866    }
867
868    /// Returns true if there are any packets to read.
869    pub fn can_read(&self, incoming: &mut IncomingOffset) -> Result<bool, Error> {
870        let can_read = if incoming.next_out != incoming.cached_in {
871            true
872        } else {
873            let inp = self
874                .inner
875                .validate(self.inner.control().inp().load(Ordering::Acquire))?;
876            // Cache the new offset to ensure a stable result.
877            incoming.cached_in = inp;
878            incoming.next_out != inp
879        };
880        Ok(can_read)
881    }
882
883    /// Commits a series of packet reads, returning whether the opposite
884    /// endpoint should be signaled.
885    pub fn commit_read(&self, ptrs: &mut IncomingOffset) -> bool {
886        if ptrs.committed_out == ptrs.next_out {
887            return false;
888        }
889        let control = self.inner.control();
890        control.outp().store(ptrs.next_out, Ordering::SeqCst);
891        let pending_send_size = control.pending_send_size().load(Ordering::SeqCst);
892        // Some implementations set the pending send size to the size of the
893        // ring minus 1. The intent is that a signal arrive when the ring is
894        // completely empty, but this is invalid since the maximum writable ring
895        // size in the size of the ring minus 8. Mask off the low bits to work
896        // around this.
897        let pending_send_size = pending_send_size & !7;
898        let signal = if pending_send_size != 0 {
899            if let Ok(inp) = self.inner.validate(control.inp().load(Ordering::SeqCst)) {
900                let old_free = self.inner.free(inp, ptrs.committed_out);
901                let new_free = self.inner.free(inp, ptrs.next_out);
902                old_free < pending_send_size && new_free >= pending_send_size
903            } else {
904                false
905            }
906        } else {
907            false
908        };
909        ptrs.committed_out = ptrs.next_out;
910        signal
911    }
912
913    /// Parses the next packet descriptor, returning the parsed information and
914    /// a range that can be used to read the packet. The caller should commit
915    /// the read with `commit_read` to free up space in the ring.
916    pub fn read(&self, ptrs: &mut IncomingOffset) -> Result<IncomingPacket, ReadError> {
917        let outp = ptrs.next_out;
918        let mut inp = ptrs.cached_in;
919        if inp == outp {
920            inp = self
921                .inner
922                .validate(self.inner.control().inp().load(Ordering::Acquire))?;
923            if inp == outp {
924                return Err(ReadError::Empty);
925            }
926            ptrs.cached_in = inp;
927        }
928        let avail = self.inner.available(inp, outp);
929        if avail < 16 {
930            return Err(ReadError::Corrupt(Error::InvalidDataAvailable));
931        }
932        let (len, packet) = parse_packet(&self.inner.mem, outp, avail)?;
933        ptrs.next_out = self
934            .inner
935            .add_pointer(outp, len + size_of::<Footer>() as u32);
936
937        Ok(packet)
938    }
939}
940
941/// The sending side of a ring buffer.
942#[derive(Debug)]
943pub struct OutgoingRing<M: RingMem> {
944    inner: InnerRing<M>,
945}
946
947impl<M: RingMem> Inspect for OutgoingRing<M> {
948    fn inspect(&self, req: inspect::Request<'_>) {
949        self.inner.inspect(req);
950    }
951}
952
953/// An outgoing ring offset, used to determine the position to write packets to.
954#[derive(Debug, Clone, Inspect)]
955pub struct OutgoingOffset {
956    #[inspect(hex)]
957    cached_out: u32,
958    #[inspect(hex)]
959    committed_in: u32,
960    #[inspect(hex)]
961    next_in: u32,
962}
963
964impl OutgoingOffset {
965    /// Reverts the insertion of packets that have not yet been committed.
966    pub fn revert(&mut self) {
967        self.next_in = self.committed_in;
968    }
969}
970
971impl<M: RingMem> Ring for OutgoingRing<M> {
972    type Memory = M;
973    fn mem(&self) -> &Self::Memory {
974        &self.inner.mem
975    }
976}
977
978impl<M: RingMem> OutgoingRing<M> {
979    /// Returns a new outgoing ring over `mem`.
980    pub fn new(mem: M) -> Result<Self, Error> {
981        let inner = InnerRing::new(mem)?;
982        // Report to the opposite endpoint that we will send interrupts for a
983        // ring full to ring non-full transition. Feature bits are set by the
984        // sending side.
985        let control = inner.control();
986        control
987            .feature_bits()
988            .store(FEATURE_SUPPORTS_PENDING_SEND_SIZE, Ordering::Relaxed);
989        // Start with no interrupt requested.
990        control.pending_send_size().store(0, Ordering::Relaxed);
991        Ok(Self { inner })
992    }
993
994    /// Returns the current outgoing offset, for passing to `write` and
995    /// ultimately `commit_write`.
996    pub fn outgoing(&self) -> Result<OutgoingOffset, Error> {
997        let control = self.inner.control();
998        let next_in = self.inner.validate(control.inp().load(Ordering::Relaxed))?;
999        let cached_out = self
1000            .inner
1001            .validate(control.outp().load(Ordering::Relaxed))?;
1002        Ok(OutgoingOffset {
1003            cached_out,
1004            committed_in: next_in,
1005            next_in,
1006        })
1007    }
1008
1009    /// Sets the pending send size: the number of bytes that should be free in
1010    /// the ring before the opposite endpoint sends a ring-non-full signal.
1011    ///
1012    /// Fails if the packet size is larger than the ring's maximum packet size.
1013    pub fn set_pending_send_size(&self, len: usize) -> Result<(), Error> {
1014        if len > self.maximum_packet_size() {
1015            return Err(Error::InvalidMessageLength);
1016        }
1017        self.inner
1018            .control()
1019            .pending_send_size()
1020            .store((len as u32 + 7) & !7, Ordering::SeqCst);
1021
1022        Ok(())
1023    }
1024
1025    /// Returns the maximum packet size that can fit in the ring.
1026    pub fn maximum_packet_size(&self) -> usize {
1027        self.inner.len() as usize - 8
1028    }
1029
1030    /// Returns whether a packet can fit in the ring starting at the specified
1031    /// offset.
1032    pub fn can_write(&self, ptrs: &mut OutgoingOffset, len: usize) -> Result<bool, Error> {
1033        let can_write = if self.inner.free(ptrs.next_in, ptrs.cached_out) as usize >= len {
1034            true
1035        } else {
1036            let outp = self
1037                .inner
1038                .validate(self.inner.control().outp().load(Ordering::Relaxed))?;
1039
1040            // Cache the new offset to ensure a stable result.
1041            ptrs.cached_out = outp;
1042            self.inner.free(ptrs.next_in, outp) as usize >= len
1043        };
1044        Ok(can_write)
1045    }
1046
1047    /// Commits a series of writes that ended at the specified offset, returning
1048    /// whether the opposite endpoint should be signaled.
1049    pub fn commit_write(&self, ptrs: &mut OutgoingOffset) -> bool {
1050        if ptrs.committed_in == ptrs.next_in {
1051            return false;
1052        }
1053        let inp = ptrs.next_in;
1054
1055        // Update the ring offset and check if the opposite endpoint needs to be
1056        // signaled. This is the case only if interrupts are unmasked and the
1057        // ring was previously empty before this write.
1058        let control = self.inner.control();
1059        control.inp().store(inp, Ordering::SeqCst);
1060        let needs_interrupt = control.interrupt_mask().load(Ordering::SeqCst) == 0
1061            && control.outp().load(Ordering::SeqCst) == ptrs.committed_in;
1062
1063        ptrs.committed_in = inp;
1064        needs_interrupt
1065    }
1066
1067    /// Writes the header of the next packet and returns the ring range for the
1068    /// payload. The caller should write the payload, then commit the write (or
1069    /// multiple writes) with `commit_write`.
1070    ///
1071    /// Returns `Err(RingFull(len))` if the ring is full, where `len` is the
1072    /// number of bytes needed to write the requested packet.
1073    pub fn write(
1074        &self,
1075        ptrs: &mut OutgoingOffset,
1076        packet: &OutgoingPacket<'_>,
1077    ) -> Result<RingRange, WriteError> {
1078        const DESCRIPTOR_SIZE: usize = size_of::<PacketDescriptor>();
1079        let (packet_type, header_size, flags) = match packet.typ {
1080            OutgoingPacketType::InBandNoCompletion => (PACKET_TYPE_IN_BAND, DESCRIPTOR_SIZE, 0),
1081            OutgoingPacketType::InBandWithCompletion => (
1082                PACKET_TYPE_IN_BAND,
1083                DESCRIPTOR_SIZE,
1084                PACKET_FLAG_COMPLETION_REQUESTED,
1085            ),
1086            OutgoingPacketType::Completion => (PACKET_TYPE_COMPLETION, DESCRIPTOR_SIZE, 0),
1087            OutgoingPacketType::GpaDirect(ranges) => (
1088                PACKET_TYPE_GPA_DIRECT,
1089                DESCRIPTOR_SIZE
1090                    + size_of::<GpaDirectHeader>()
1091                    + ranges.iter().fold(0, |a, range| {
1092                        a + size_of::<GpaRange>() + size_of_val(range.gpns())
1093                    }),
1094                PACKET_FLAG_COMPLETION_REQUESTED,
1095            ),
1096            OutgoingPacketType::TransferPages(_, ranges) => (
1097                PACKET_TYPE_TRANSFER_PAGES,
1098                DESCRIPTOR_SIZE + size_of::<TransferPageHeader>() + size_of_val(ranges),
1099                PACKET_FLAG_COMPLETION_REQUESTED,
1100            ),
1101        };
1102        let msg_len = (packet.size + header_size).div_ceil(8) * 8;
1103        let total_msg_len = (msg_len + size_of::<Footer>()) as u32;
1104        if total_msg_len >= self.inner.len() - 8 {
1105            return Err(WriteError::Corrupt(Error::InvalidMessageLength));
1106        }
1107        let inp = ptrs.next_in;
1108        let mut outp = ptrs.cached_out;
1109        if self.inner.free(inp, outp) < total_msg_len {
1110            outp = self
1111                .inner
1112                .validate(self.inner.control().outp().load(Ordering::Relaxed))?;
1113            if self.inner.free(inp, outp) < total_msg_len {
1114                return Err(WriteError::Full(total_msg_len as usize));
1115            }
1116            ptrs.cached_out = outp;
1117        }
1118        let desc = PacketDescriptor {
1119            packet_type,
1120            data_offset8: header_size as u16 / 8,
1121            length8: (msg_len / 8) as u16,
1122            flags,
1123            transaction_id: packet.transaction_id,
1124        };
1125
1126        let footer = Footer {
1127            reserved: 0,
1128            offset: inp,
1129        };
1130
1131        let off = inp as usize;
1132        self.inner.mem.write_aligned(off, desc.as_bytes());
1133        match packet.typ {
1134            OutgoingPacketType::GpaDirect(ranges) => {
1135                let mut writer = RingRange {
1136                    off: (off + DESCRIPTOR_SIZE) as u32,
1137                    size: header_size as u32,
1138                }
1139                .writer(self);
1140                let gpa_header = GpaDirectHeader {
1141                    reserved: 0,
1142                    range_count: ranges.len() as u32,
1143                };
1144                writer
1145                    .write(gpa_header.as_bytes())
1146                    .map_err(|_| WriteError::Corrupt(Error::InvalidMessageLength))?;
1147
1148                for range in ranges {
1149                    let gpa_rng = GpaRange {
1150                        len: range.len() as u32,
1151                        offset: range.offset() as u32,
1152                    };
1153                    writer
1154                        .write(gpa_rng.as_bytes())
1155                        .map_err(|_| WriteError::Corrupt(Error::InvalidMessageLength))?;
1156                    writer
1157                        .write(range.gpns().as_bytes())
1158                        .map_err(|_| WriteError::Corrupt(Error::InvalidMessageLength))?;
1159                }
1160            }
1161            OutgoingPacketType::TransferPages(tp_id, ranges) => {
1162                let tp_header = TransferPageHeader {
1163                    transfer_page_set_id: tp_id,
1164                    reserved: 0,
1165                    range_count: ranges.len() as u32,
1166                };
1167                self.inner
1168                    .mem
1169                    .write_aligned(off + DESCRIPTOR_SIZE, tp_header.as_bytes());
1170                for (i, range) in ranges.iter().enumerate() {
1171                    self.inner.mem.write_aligned(
1172                        off + DESCRIPTOR_SIZE + size_of_val(&tp_header) + i * 8,
1173                        range.as_bytes(),
1174                    );
1175                }
1176            }
1177            _ => (),
1178        }
1179
1180        self.inner
1181            .mem
1182            .write_aligned(off + msg_len, footer.as_bytes());
1183        ptrs.next_in = self.inner.add_pointer(inp, total_msg_len);
1184        Ok(RingRange {
1185            off: inp + header_size as u32,
1186            size: packet.size as u32,
1187        })
1188    }
1189}
1190
1191struct InnerRing<M: RingMem> {
1192    mem: M,
1193    size: u32,
1194}
1195
1196impl<M: RingMem> Inspect for InnerRing<M> {
1197    fn inspect(&self, req: inspect::Request<'_>) {
1198        req.respond()
1199            .hex("ring_size", self.size)
1200            .field("control", self.control());
1201    }
1202}
1203
1204/// Inspects ring buffer state without creating an IncomingRing or OutgoingRing
1205/// structure.
1206///
1207/// # Panics
1208///
1209/// Panics if control_page is not aligned.
1210pub fn inspect_ring(control_page: &guestmem::Page, response: &mut inspect::Response<'_>) {
1211    let control = control_page.as_atomic_slice().unwrap()[..CONTROL_WORD_COUNT]
1212        .try_into()
1213        .unwrap();
1214
1215    response.field("control", Control(control));
1216}
1217
1218/// Returns whether a ring buffer is in a state where the receiving end might
1219/// need a signal.
1220pub fn reader_needs_signal<M: RingMem>(mem: M) -> bool {
1221    InnerRing::new(mem).is_ok_and(|ring| {
1222        let control = ring.control();
1223        control.interrupt_mask().load(Ordering::Relaxed) == 0
1224            && (control.inp().load(Ordering::Relaxed) != control.outp().load(Ordering::Relaxed))
1225    })
1226}
1227
1228/// Returns whether a ring buffer is in a state where the sending end might need
1229/// a signal.
1230pub fn writer_needs_signal<M: RingMem>(mem: M) -> bool {
1231    InnerRing::new(mem).is_ok_and(|ring| {
1232        let control = ring.control();
1233        let pending_size = control.pending_send_size().load(Ordering::Relaxed);
1234        pending_size != 0
1235            && ring.free(
1236                control.inp().load(Ordering::Relaxed),
1237                control.outp().load(Ordering::Relaxed),
1238            ) >= pending_size
1239    })
1240}
1241
1242impl<M: RingMem> Debug for InnerRing<M> {
1243    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1244        f.debug_struct("InnerRing")
1245            .field("control", &self.control())
1246            .field("size", &self.size)
1247            .finish()
1248    }
1249}
1250
1251impl<M: RingMem> InnerRing<M> {
1252    pub fn new(mem: M) -> Result<Self, Error> {
1253        let ring_size = u32::try_from(mem.len()).map_err(|_| Error::InvalidRingMemory)?;
1254        if ring_size % 4096 != 0 {
1255            return Err(Error::InvalidRingMemory);
1256        }
1257        let ring = InnerRing {
1258            mem,
1259            size: ring_size,
1260        };
1261        Ok(ring)
1262    }
1263
1264    fn control(&self) -> Control<'_> {
1265        Control(self.mem.control())
1266    }
1267
1268    fn len(&self) -> u32 {
1269        self.size
1270    }
1271
1272    fn validate(&self, p: u32) -> Result<u32, Error> {
1273        if p >= self.size || p % 8 != 0 {
1274            Err(Error::InvalidRingPointer)
1275        } else {
1276            Ok(p)
1277        }
1278    }
1279
1280    fn add_pointer(&self, p: u32, off: u32) -> u32 {
1281        let np = p + off;
1282        if np >= self.size {
1283            assert!(np < self.size * 2);
1284            np - self.size
1285        } else {
1286            np
1287        }
1288    }
1289
1290    fn available(&self, inp: u32, outp: u32) -> u32 {
1291        if inp > outp {
1292            // |____outp....inp_____|
1293            inp - outp
1294        } else {
1295            // |....inp____outp.....|
1296            self.size + inp - outp
1297        }
1298    }
1299
1300    fn free(&self, inp: u32, outp: u32) -> u32 {
1301        // It's not possible to fully fill the ring since that state would be
1302        // indistinguishable from the empty ring. So subtract 8 bytes from the
1303        // result.
1304        if outp > inp {
1305            // |....inp____outp.....|
1306            outp - inp - 8
1307        } else {
1308            // |____outp....inp_____|
1309            self.size - (inp - outp) - 8
1310        }
1311    }
1312}
1313
1314#[cfg(test)]
1315mod tests {
1316    use super::*;
1317
1318    fn write_simple<T: RingMem>(out_ring: &mut OutgoingRing<T>, buf: &[u8]) -> Option<bool> {
1319        let mut outgoing = out_ring.outgoing().unwrap();
1320        match out_ring.write(
1321            &mut outgoing,
1322            &OutgoingPacket {
1323                typ: OutgoingPacketType::InBandNoCompletion,
1324                size: buf.len(),
1325                transaction_id: 0,
1326            },
1327        ) {
1328            Ok(range) => {
1329                range.writer(out_ring).write(buf).unwrap();
1330                Some(out_ring.commit_write(&mut outgoing))
1331            }
1332            Err(WriteError::Full(_)) => None,
1333            Err(err) => panic!("{}", err),
1334        }
1335    }
1336
1337    fn read_simple<T: RingMem>(in_ring: &mut IncomingRing<T>) -> (Vec<u8>, bool) {
1338        let mut incoming = in_ring.incoming().unwrap();
1339        let msg = in_ring
1340            .read(&mut incoming)
1341            .unwrap()
1342            .payload
1343            .reader(in_ring)
1344            .read_all()
1345            .unwrap();
1346        let signal = in_ring.commit_read(&mut incoming);
1347        (msg, signal)
1348    }
1349
1350    #[test]
1351    fn test_ring() {
1352        let rmem = FlatRingMem::new(16384);
1353        let mut in_ring = IncomingRing::new(&rmem).unwrap();
1354        in_ring.set_interrupt_mask(false);
1355        let mut out_ring = OutgoingRing::new(&rmem).unwrap();
1356
1357        let p = &[1, 2, 3, 4, 5, 6, 7, 8];
1358        assert!(write_simple(&mut out_ring, p).unwrap());
1359
1360        let (msg, signal) = read_simple(&mut in_ring);
1361        assert!(!signal);
1362
1363        assert_eq!(p, &msg[..]);
1364    }
1365
1366    #[test]
1367    fn test_interrupt_mask() {
1368        let rmem = FlatRingMem::new(16384);
1369        let mut in_ring = IncomingRing::new(&rmem).unwrap();
1370        let mut out_ring = OutgoingRing::new(&rmem).unwrap();
1371
1372        // Interrupts are masked, so no signal is expected.
1373        assert!(!write_simple(&mut out_ring, &[1, 2, 3]).unwrap());
1374        assert!(!read_simple(&mut in_ring).1);
1375
1376        // Unmask interrupts, then try again, expecting a signal this time.
1377        in_ring.set_interrupt_mask(false);
1378        assert!(write_simple(&mut out_ring, &[1, 2, 3]).unwrap());
1379        assert!(!read_simple(&mut in_ring).1);
1380    }
1381
1382    #[test]
1383    fn test_pending_send_size() {
1384        let rmem = FlatRingMem::new(16384);
1385        let mut in_ring = IncomingRing::new(&rmem).unwrap();
1386        let mut out_ring = OutgoingRing::new(&rmem).unwrap();
1387
1388        // Fill the ring up with some packets.
1389        write_simple(&mut out_ring, &[1; 4000]).unwrap();
1390        write_simple(&mut out_ring, &[2; 4000]).unwrap();
1391        write_simple(&mut out_ring, &[3; 4000]).unwrap();
1392        write_simple(&mut out_ring, &[4; 4000]).unwrap();
1393        assert!(write_simple(&mut out_ring, &[5; 4000]).is_none());
1394
1395        // No pending send size yet.
1396        assert!(!read_simple(&mut in_ring).1);
1397
1398        // Fill the ring back up.
1399        write_simple(&mut out_ring, &[5; 4000]).unwrap();
1400        assert!(write_simple(&mut out_ring, &[6; 4000]).is_none());
1401
1402        // Set a pending send size for two packets worth of space (packet size +
1403        // 16 bytes for the descriptor and 8 bytes for the footer).
1404        out_ring.set_pending_send_size(4024 * 2).unwrap();
1405
1406        // There should be a signal after two packets, then no more signals.
1407        assert!(!read_simple(&mut in_ring).1);
1408        assert!(read_simple(&mut in_ring).1);
1409        assert!(!read_simple(&mut in_ring).1);
1410    }
1411}