1#![expect(unsafe_code)]
9
10use guestmem::AccessError;
11use guestmem::GuestMemory;
12use guestmem::LockedRange;
13use guestmem::LockedRangeImpl;
14use guestmem::MemoryRead;
15use guestmem::MemoryWrite;
16use guestmem::ranges::PagedRange;
17use safeatomic::AsAtomicBytes;
18use smallvec::SmallVec;
19use std::marker::PhantomData;
20use std::ops::Deref;
21use std::sync::atomic::AtomicU8;
22use std::sync::atomic::AtomicUsize;
23use std::sync::atomic::Ordering;
24use zerocopy::FromBytes;
25use zerocopy::Immutable;
26use zerocopy::IntoBytes;
27use zerocopy::KnownLayout;
28
29#[derive(Debug, Copy, Clone)]
31#[repr(C)]
32pub struct AtomicIoVec {
33 pub address: *const AtomicU8,
35 pub len: usize,
37}
38
39impl Default for AtomicIoVec {
40 fn default() -> Self {
41 Self {
42 address: std::ptr::null(),
43 len: 0,
44 }
45 }
46}
47
48impl From<&'_ [AtomicU8]> for AtomicIoVec {
49 fn from(p: &'_ [AtomicU8]) -> Self {
50 Self {
51 address: p.as_ptr(),
52 len: p.len(),
53 }
54 }
55}
56
57impl AtomicIoVec {
58 pub unsafe fn as_slice_unchecked(&self) -> &[AtomicU8] {
64 unsafe { std::slice::from_raw_parts(self.address, self.len) }
66 }
67}
68
69unsafe impl Send for AtomicIoVec {}
72unsafe impl Sync for AtomicIoVec {}
74
75#[derive(Debug, Copy, Clone, Default)]
78#[repr(transparent)]
79pub struct IoBuffer<'a> {
80 io_vec: AtomicIoVec,
81 phantom: PhantomData<&'a AtomicU8>,
82}
83
84impl<'a> IoBuffer<'a> {
85 pub fn new(buffer: &'a [AtomicU8]) -> Self {
87 Self {
88 io_vec: AtomicIoVec {
89 address: buffer.as_ptr(),
90 len: buffer.len(),
91 },
92 phantom: PhantomData,
93 }
94 }
95
96 pub unsafe fn from_io_vec(io_vec: &AtomicIoVec) -> &Self {
101 unsafe { std::mem::transmute(io_vec) }
103 }
104
105 pub unsafe fn from_io_vecs(io_vecs: &[AtomicIoVec]) -> &[Self] {
110 unsafe { std::mem::transmute(io_vecs) }
112 }
113
114 pub fn as_ptr(&self) -> *const AtomicU8 {
116 self.io_vec.address
117 }
118
119 pub fn len(&self) -> usize {
121 self.io_vec.len
122 }
123}
124
125impl Deref for IoBuffer<'_> {
126 type Target = [AtomicU8];
127
128 fn deref(&self) -> &Self::Target {
129 unsafe { self.io_vec.as_slice_unchecked() }
132 }
133}
134
135const PAGE_SIZE: usize = 4096;
136
137#[repr(C, align(4096))]
138#[derive(Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
139struct Page([u8; PAGE_SIZE]);
140
141const ZERO_PAGE: Page = Page([0; PAGE_SIZE]);
142
143pub struct BounceBuffer {
145 pages: Vec<Page>,
146 io_vec: AtomicIoVec,
147}
148
149impl BounceBuffer {
150 pub fn new(size: usize) -> Self {
152 let mut pages = vec![ZERO_PAGE; size.div_ceil(PAGE_SIZE)];
153 let io_vec = pages.as_mut_bytes()[..size].as_atomic_bytes().into();
154 BounceBuffer { pages, io_vec }
155 }
156
157 fn len(&self) -> usize {
158 self.io_vec.len
159 }
160
161 pub fn as_mut_bytes(&mut self) -> &mut [u8] {
163 unsafe { std::slice::from_raw_parts_mut(self.pages.as_mut_ptr().cast::<u8>(), self.len()) }
167 }
168
169 pub fn io_vecs(&self) -> &[IoBuffer<'_>] {
173 std::slice::from_ref({
174 unsafe { IoBuffer::from_io_vec(&self.io_vec) }
176 })
177 }
178}
179
180pub struct LockedIoBuffers(LockedRangeImpl<LockedIoVecs>);
182
183impl LockedIoBuffers {
184 pub fn io_vecs(&self) -> &[IoBuffer<'_>] {
186 unsafe { IoBuffer::from_io_vecs(&self.0.get().0) }
190 }
191}
192
193struct LockedIoVecs(SmallVec<[AtomicIoVec; 64]>);
194
195impl LockedIoVecs {
196 fn new() -> Self {
197 Self(Default::default())
198 }
199}
200
201impl LockedRange for LockedIoVecs {
202 fn push_sub_range(&mut self, sub_range: &[AtomicU8]) {
203 self.0.push(sub_range.into());
204 }
205
206 fn pop_sub_range(&mut self) -> Option<(*const AtomicU8, usize)> {
207 self.0.pop().map(|buffer| (buffer.address, buffer.len))
208 }
209}
210
211#[derive(Clone, Debug)]
213pub struct RequestBuffers<'a> {
214 range: PagedRange<'a>,
215 guest_memory: &'a GuestMemory,
216 is_write: bool,
217}
218
219impl<'a> RequestBuffers<'a> {
220 pub fn new(guest_memory: &'a GuestMemory, range: PagedRange<'a>, is_write: bool) -> Self {
222 Self {
223 range,
224 guest_memory,
225 is_write,
226 }
227 }
228
229 pub fn is_empty(&self) -> bool {
231 self.range.is_empty()
232 }
233
234 pub fn len(&self) -> usize {
236 self.range.len()
237 }
238
239 pub fn guest_memory(&self) -> &GuestMemory {
241 self.guest_memory
242 }
243
244 pub fn range(&self) -> PagedRange<'_> {
246 self.range
247 }
248
249 pub fn is_aligned(&self, alignment: usize) -> bool {
254 assert!(alignment.is_power_of_two());
255 ((self.range.offset() | self.range.len() | PAGE_SIZE) & (alignment - 1)) == 0
256 }
257
258 pub fn writer(&self) -> impl MemoryWrite + '_ {
262 let range = if self.is_write {
263 self.range
264 } else {
265 PagedRange::empty()
266 };
267 range.writer(self.guest_memory)
268 }
269
270 pub fn reader(&self) -> impl MemoryRead + '_ {
272 self.range.reader(self.guest_memory)
273 }
274
275 pub fn lock(&self, for_write: bool) -> Result<LockedIoBuffers, AccessError> {
279 if for_write && !self.is_write {
280 return Err(AccessError::ReadOnly);
281 }
282 Ok(LockedIoBuffers(
283 self.guest_memory
284 .lock_range(self.range, LockedIoVecs::new())?,
285 ))
286 }
287
288 pub fn subrange(&self, offset: usize, len: usize) -> Self {
292 Self {
293 range: self.range.subrange(offset, len),
294 guest_memory: self.guest_memory,
295 is_write: self.is_write,
296 }
297 }
298}
299
300#[derive(Debug, Clone)]
302pub struct OwnedRequestBuffers {
303 gpns: Vec<u64>,
304 offset: usize,
305 len: usize,
306 is_write: bool,
307}
308
309impl OwnedRequestBuffers {
310 pub fn new(gpns: &[u64]) -> Self {
312 Self::new_unaligned(gpns, 0, gpns.len() * PAGE_SIZE)
313 }
314
315 pub fn new_unaligned(gpns: &[u64], offset: usize, len: usize) -> Self {
318 Self {
319 gpns: gpns.to_vec(),
320 offset,
321 len,
322 is_write: true,
323 }
324 }
325
326 pub fn linear(offset: u64, len: usize, is_write: bool) -> Self {
329 let start_page = offset / PAGE_SIZE as u64;
330 let end_page = offset + (len as u64).div_ceil(PAGE_SIZE as u64);
331 let gpns: Vec<u64> = (start_page..end_page).collect();
332 Self {
333 gpns,
334 offset: (offset % PAGE_SIZE as u64) as usize,
335 len,
336 is_write,
337 }
338 }
339
340 pub fn buffer<'a>(&'a self, guest_memory: &'a GuestMemory) -> RequestBuffers<'a> {
342 RequestBuffers::new(
343 guest_memory,
344 PagedRange::new(self.offset, self.len, &self.gpns).unwrap(),
345 self.is_write,
346 )
347 }
348
349 pub fn len(&self) -> usize {
351 self.len
352 }
353}
354
355pub struct TrackedBounceBuffer<'a> {
358 pub buffer: BounceBuffer,
360 free_pages: &'a AtomicUsize,
362 event: &'a event_listener::Event,
364}
365
366impl Drop for TrackedBounceBuffer<'_> {
367 fn drop(&mut self) {
368 let pages = self.buffer.len().div_ceil(4096);
369 self.free_pages.fetch_add(pages, Ordering::SeqCst);
370 self.event.notify(usize::MAX);
371 }
372}
373
374#[derive(Debug)]
378pub struct BounceBufferTracker {
379 free_pages: Vec<AtomicUsize>,
381 event: Vec<event_listener::Event>,
383}
384
385impl BounceBufferTracker {
386 pub fn new(max_bounce_buffer_pages: usize, threads: usize) -> Self {
388 let mut free_pages = Vec::with_capacity(threads);
389 let mut event = Vec::with_capacity(threads);
390
391 (0..threads).for_each(|_| {
392 event.push(event_listener::Event::new());
393 free_pages.push(AtomicUsize::new(max_bounce_buffer_pages));
394 });
395
396 Self { free_pages, event }
397 }
398
399 pub async fn acquire_bounce_buffers<'a, 'b>(
403 &'b self,
404 size: usize,
405 thread: usize,
406 ) -> Box<TrackedBounceBuffer<'a>>
407 where
408 'b: 'a,
409 {
410 let pages = size.div_ceil(4096);
411 let event = self.event.get(thread).unwrap();
412 let free_pages = self.free_pages.get(thread).unwrap();
413
414 loop {
415 let listener = event.listen();
416 if free_pages
417 .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| x.checked_sub(pages))
418 .is_ok()
419 {
420 break;
421 }
422 listener.await;
423 }
424
425 Box::new(TrackedBounceBuffer {
426 buffer: BounceBuffer::new(size),
427 free_pages,
428 event,
429 })
430 }
431}