vmbus_channel/
gpadl_ring.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! GPADL-backed ring buffers.
5
6use crate::ChannelClosed;
7use crate::RawAsyncChannel;
8use crate::SignalVmbusChannel;
9use crate::bus::OpenData;
10use crate::bus::OpenRequest;
11use crate::channel::DeviceResources;
12use crate::gpadl::GpadlMapView;
13use crate::gpadl::GpadlView;
14use crate::gpadl::UnknownGpadlId;
15use guestmem::GuestMemory;
16use guestmem::GuestMemoryError;
17use guestmem::LockedPages;
18use pal_async::driver::Driver;
19use ring::IncomingRing;
20use ring::OutgoingRing;
21use std::fmt::Debug;
22use std::sync::atomic::AtomicU8;
23use std::sync::atomic::AtomicU32;
24use vmbus_ring as ring;
25use vmcore::interrupt::Interrupt;
26use vmcore::notify::PolledNotify;
27
28/// A GPADL view that has exactly one page-aligned range.
29#[derive(Clone)]
30pub struct AlignedGpadlView {
31    gpadl: GpadlView,
32    offset: u32,
33    len: u32,
34}
35
36impl AlignedGpadlView {
37    /// Validates that `gpadl` is aligned and wraps it.
38    pub fn new(gpadl: GpadlView) -> Result<Self, GpadlView> {
39        if gpadl.range_count() != 1 {
40            return Err(gpadl);
41        }
42        let range = gpadl.first().unwrap();
43        if range.len() % ring::PAGE_SIZE != 0 || range.offset() != 0 {
44            return Err(gpadl);
45        }
46        let count = range.gpns().len() as u32;
47        Ok(AlignedGpadlView {
48            gpadl,
49            offset: 0,
50            len: count,
51        })
52    }
53
54    /// Splits the range into two aligned ranges at the page number `offset`.
55    pub fn split(
56        self,
57        offset: u32,
58    ) -> Result<(AlignedGpadlView, AlignedGpadlView), AlignedGpadlView> {
59        if offset == 0 || self.len <= offset {
60            return Err(self);
61        }
62        let left = AlignedGpadlView {
63            gpadl: self.gpadl.clone(),
64            offset: 0,
65            len: offset,
66        };
67        let right = AlignedGpadlView {
68            gpadl: self.gpadl,
69            offset,
70            len: self.len - offset,
71        };
72        Ok((left, right))
73    }
74
75    /// Returns the GPN array for this range.
76    pub fn gpns(&self) -> &[u64] {
77        &self.gpadl.first().unwrap().gpns()
78            [self.offset as usize..self.offset as usize + self.len as usize]
79    }
80}
81
82#[derive(Clone)]
83struct GpadlPagedMemory {
84    _gpadl: AlignedGpadlView,
85    pages: LockedPages,
86}
87
88impl GpadlPagedMemory {
89    fn new(gpadl: AlignedGpadlView, mem: &GuestMemory) -> Result<Self, GuestMemoryError> {
90        // Store the data gpns twice in a row to make lookup easier.
91        let gpns: Vec<u64> = gpadl
92            .gpns()
93            .iter()
94            .chain(gpadl.gpns().iter().skip(1))
95            .copied()
96            .collect();
97        let pages = mem.lock_gpns(false, &gpns)?;
98        Ok(Self {
99            _gpadl: gpadl,
100            pages,
101        })
102    }
103}
104
105impl ring::PagedMemory for GpadlPagedMemory {
106    fn control(&self) -> &[AtomicU8; ring::PAGE_SIZE] {
107        self.pages.pages()[0]
108    }
109
110    #[inline]
111    fn data(&self, page: usize) -> &[AtomicU8; ring::PAGE_SIZE] {
112        self.pages.pages()[page + 1]
113    }
114
115    fn data_page_count(&self) -> usize {
116        (self.pages.pages().len() - 1) / 2
117    }
118}
119
120impl Debug for GpadlPagedMemory {
121    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
122        f.debug_struct("GpadlPagedMemory").finish()
123    }
124}
125
126/// An implementation of [`vmbus_ring::RingMem`] backed by an aligned GPADL
127/// view.
128#[derive(Debug, Clone)]
129pub struct GpadlRingMem {
130    ring: ring::PagedRingMem<GpadlPagedMemory>,
131}
132
133impl GpadlRingMem {
134    /// Creates a new ring memory backed by `gpadl` and `mem`.
135    pub fn new(gpadl: AlignedGpadlView, mem: &GuestMemory) -> Result<Self, GuestMemoryError> {
136        Ok(Self {
137            ring: ring::PagedRingMem::new(GpadlPagedMemory::new(gpadl, mem)?),
138        })
139    }
140}
141
142impl ring::RingMem for GpadlRingMem {
143    #[inline]
144    fn len(&self) -> usize {
145        self.ring.len()
146    }
147
148    #[inline]
149    fn read_at(&self, addr: usize, data: &mut [u8]) {
150        self.ring.read_at(addr, data)
151    }
152
153    #[inline]
154    fn write_at(&self, addr: usize, data: &[u8]) {
155        self.ring.write_at(addr, data)
156    }
157
158    #[inline]
159    fn read_aligned(&self, addr: usize, data: &mut [u8]) {
160        self.ring.read_aligned(addr, data)
161    }
162
163    #[inline]
164    fn write_aligned(&self, addr: usize, data: &[u8]) {
165        self.ring.write_aligned(addr, data)
166    }
167
168    #[inline]
169    fn control(&self) -> &[AtomicU32; vmbus_ring::CONTROL_WORD_COUNT] {
170        self.ring.control()
171    }
172}
173
174/// A ring buffer error.
175#[derive(Debug, thiserror::Error)]
176pub enum Error {
177    /// invalid ring buffer gpadl
178    #[error("invalid ring buffer gpadl")]
179    InvalidRingGpadl,
180    /// gpadl ID is invalid
181    #[error(transparent)]
182    UnknownGpadlId(#[from] UnknownGpadlId),
183    /// memory error accessing the ring
184    #[error(transparent)]
185    Memory(#[from] GuestMemoryError),
186    /// ring buffer error
187    #[error(transparent)]
188    Ring(#[from] ring::Error),
189    /// driver error
190    #[error("io driver error")]
191    Driver(#[source] std::io::Error),
192}
193
194/// Creates a set of incoming and outgoing rings for a channel.
195pub fn make_rings(
196    mem: &GuestMemory,
197    gpadl_map: &GpadlMapView,
198    open_data: &OpenData,
199) -> Result<(IncomingRing<GpadlRingMem>, OutgoingRing<GpadlRingMem>), Error> {
200    let gpadl = AlignedGpadlView::new(gpadl_map.map(open_data.ring_gpadl_id)?)
201        .map_err(|_| Error::InvalidRingGpadl)?;
202    let (in_gpadl, out_gpadl) = gpadl
203        .split(open_data.ring_offset)
204        .map_err(|_| Error::InvalidRingGpadl)?;
205    Ok((
206        IncomingRing::new(GpadlRingMem::new(in_gpadl, mem)?)?,
207        OutgoingRing::new(GpadlRingMem::new(out_gpadl, mem)?)?,
208    ))
209}
210
211/// Creates a raw channel from input parameters passed to [`crate::channel::VmbusDevice::open`].
212pub fn gpadl_channel(
213    driver: &(impl Driver + ?Sized),
214    resources: &DeviceResources,
215    open_request: &OpenRequest,
216    channel_idx: u16,
217) -> Result<RawAsyncChannel<GpadlRingMem>, Error> {
218    let (in_ring, out_ring) = make_rings(
219        resources.offer_resources.ring_memory(open_request),
220        &resources.gpadl_map,
221        &open_request.open_data,
222    )?;
223
224    let event = Box::new(GpadlChannelSignal {
225        event: resources.channels[channel_idx as usize]
226            .event
227            .clone()
228            .pollable(driver)
229            .map_err(Error::Driver)?,
230
231        interrupt: open_request.interrupt.clone(),
232    });
233
234    Ok(RawAsyncChannel {
235        in_ring,
236        out_ring,
237        signal: event,
238    })
239}
240
241struct GpadlChannelSignal {
242    event: PolledNotify,
243    interrupt: Interrupt,
244}
245
246impl SignalVmbusChannel for GpadlChannelSignal {
247    fn signal_remote(&self) {
248        self.interrupt.deliver();
249    }
250
251    fn poll_for_signal(
252        &self,
253        cx: &mut std::task::Context<'_>,
254    ) -> std::task::Poll<Result<(), ChannelClosed>> {
255        // Use the event directly for incoming signals, without ever returning
256        // [`ChannelClosed`]. It is expected that the caller will handle the
257        // channel closed case explicitly, in
258        // [`crate::channel::VmbusDevice::close`], rather than relying on getting a
259        // failure when reading or writing a packet in a ring buffer.
260        self.event.poll_wait(cx).map(Ok)
261    }
262}