user_driver/
page_allocator.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Allocator for pages within a pool.
5//!
6//! This is used for temporary allocations of per-queue DMA buffers, mainly for
7//! PRP lists.
8
9use crate::memory::MemoryBlock;
10use crate::memory::PAGE_SIZE;
11use crate::memory::PAGE_SIZE64;
12use guestmem::GuestMemory;
13use guestmem::GuestMemoryError;
14use guestmem::ranges::PagedRange;
15use inspect::Inspect;
16use parking_lot::Mutex;
17use std::sync::atomic::AtomicU8;
18
19#[derive(Inspect)]
20pub struct PageAllocator {
21    #[inspect(flatten)]
22    core: Mutex<PageAllocatorCore>,
23    #[inspect(skip)]
24    mem: MemoryBlock,
25    #[inspect(skip)]
26    event: event_listener::Event,
27    max: usize,
28}
29
30impl std::fmt::Debug for PageAllocator {
31    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
32        f.debug_struct("PageAllocator").finish()
33    }
34}
35
36impl PageAllocator {
37    pub fn new(mem: MemoryBlock) -> Self {
38        assert_eq!(mem.offset_in_page(), 0);
39        assert_eq!(mem.len() % PAGE_SIZE, 0);
40        let count = mem.len() / PAGE_SIZE;
41        Self {
42            core: Mutex::new(PageAllocatorCore::new(count)),
43            mem,
44            event: Default::default(),
45            max: count,
46        }
47    }
48
49    pub async fn alloc_pages(&self, n: usize) -> Option<ScopedPages<'_>> {
50        // A single page must be left over for the PRP list, so one request may
51        // not use all pages.
52        if self.max < n + 1 {
53            return None;
54        }
55        let mut core = loop {
56            let listener = {
57                let core = self.core.lock();
58                if core.remaining() >= n {
59                    break core;
60                }
61                // Fairness is pretty bad with this approach--small allocations
62                // could easily prevent a large allocation from ever succeeding.
63                // But we don't really have this use case right now, so this is OK.
64                self.event.listen()
65            };
66            listener.await;
67        };
68
69        let pfns = self.mem.pfns();
70        let pages = (0..n)
71            .map(|_| {
72                let n = core.alloc().unwrap();
73                ScopedPage {
74                    page_index: n,
75                    physical_address: pfns[n] * PAGE_SIZE64,
76                }
77            })
78            .collect();
79        Some(ScopedPages { alloc: self, pages })
80    }
81
82    pub async fn alloc_bytes(&self, n: usize) -> Option<ScopedPages<'_>> {
83        self.alloc_pages(n.div_ceil(PAGE_SIZE)).await
84    }
85}
86
87#[derive(Inspect)]
88struct PageAllocatorCore {
89    #[inspect(with = "|x| x.len()")]
90    free: Vec<usize>,
91}
92
93impl PageAllocatorCore {
94    fn new(count: usize) -> Self {
95        let free = (0..count).rev().collect();
96        Self { free }
97    }
98
99    fn remaining(&self) -> usize {
100        self.free.len()
101    }
102
103    fn alloc(&mut self) -> Option<usize> {
104        self.free.pop()
105    }
106
107    fn free(&mut self, n: usize) {
108        self.free.push(n);
109    }
110}
111
112pub struct ScopedPages<'a> {
113    alloc: &'a PageAllocator,
114    pages: Vec<ScopedPage>,
115}
116
117struct ScopedPage {
118    page_index: usize,
119    physical_address: u64,
120}
121
122impl ScopedPages<'_> {
123    pub fn page_count(&self) -> usize {
124        self.pages.len()
125    }
126
127    pub fn physical_address(&self, index: usize) -> u64 {
128        self.pages[index].physical_address
129    }
130
131    pub fn page_as_slice(&self, index: usize) -> &[AtomicU8] {
132        &self.alloc.mem.as_slice()[self.pages[index].page_index * PAGE_SIZE..][..PAGE_SIZE]
133    }
134
135    pub fn read(&self, data: &mut [u8]) {
136        assert!(data.len() <= self.pages.len() * PAGE_SIZE);
137        for (chunk, page) in data.chunks_mut(PAGE_SIZE).zip(&self.pages) {
138            self.alloc.mem.read_at(page.page_index * PAGE_SIZE, chunk);
139        }
140    }
141
142    pub fn copy_to_guest_memory(
143        &self,
144        guest_memory: &GuestMemory,
145        mem: PagedRange<'_>,
146    ) -> Result<(), GuestMemoryError> {
147        let mut remaining = mem.len();
148        for (i, page) in self.pages.iter().enumerate() {
149            let len = PAGE_SIZE.min(remaining);
150            remaining -= len;
151            guest_memory.write_range_from_atomic(
152                &mem.subrange(i * PAGE_SIZE, len),
153                &self.alloc.mem.as_slice()[page.page_index * PAGE_SIZE..][..len],
154            )?;
155        }
156        Ok(())
157    }
158
159    pub fn write(&self, data: &[u8]) {
160        assert!(data.len() <= self.pages.len() * PAGE_SIZE);
161        for (chunk, page) in data.chunks(PAGE_SIZE).zip(&self.pages) {
162            self.alloc.mem.write_at(page.page_index * PAGE_SIZE, chunk);
163        }
164    }
165
166    pub fn copy_from_guest_memory(
167        &self,
168        guest_memory: &GuestMemory,
169        mem: PagedRange<'_>,
170    ) -> Result<(), GuestMemoryError> {
171        let mut remaining = mem.len();
172        for (i, page) in self.pages.iter().enumerate() {
173            let len = PAGE_SIZE.min(remaining);
174            remaining -= len;
175            guest_memory.read_range_to_atomic(
176                &mem.subrange(i * PAGE_SIZE, len),
177                &self.alloc.mem.as_slice()[page.page_index * PAGE_SIZE..][..len],
178            )?;
179        }
180        Ok(())
181    }
182}
183
184impl Drop for ScopedPages<'_> {
185    fn drop(&mut self) {
186        let n = self.pages.len();
187        {
188            let mut core = self.alloc.core.lock();
189            for page in self.pages.drain(..) {
190                core.free(page.page_index);
191            }
192        }
193        self.alloc.event.notify_additional(n);
194    }
195}