user_driver_emulated_mock/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! This crate provides a collection of wrapper structs around things like devices and memory. Through the wrappers, it provides functionality to emulate devices such
5//! as Nvme and Mana and gives some additional control over things like [`GuestMemory`] to make testing devices easier.
6//! Everything in this crate is meant for TESTING PURPOSES ONLY and it should only ever be added as a dev-dependency (Few expceptions like using this for fuzzing)
7
8mod guest_memory_access_wrapper;
9
10use crate::guest_memory_access_wrapper::GuestMemoryAccessWrapper;
11
12use anyhow::Context;
13use chipset_device::mmio::MmioIntercept;
14use chipset_device::pci::PciConfigSpace;
15use guestmem::GuestMemory;
16use inspect::Inspect;
17use inspect::InspectMut;
18use memory_range::MemoryRange;
19use page_pool_alloc::PagePool;
20use page_pool_alloc::PagePoolAllocator;
21use page_pool_alloc::TestMapper;
22use parking_lot::Mutex;
23use pci_core::chipset_device_ext::PciChipsetDeviceExt;
24use pci_core::msi::MsiControl;
25use pci_core::msi::MsiInterruptSet;
26use pci_core::msi::MsiInterruptTarget;
27use std::sync::Arc;
28use user_driver::DeviceBacking;
29use user_driver::DeviceRegisterIo;
30use user_driver::DmaClient;
31use user_driver::interrupt::DeviceInterrupt;
32use user_driver::interrupt::DeviceInterruptSource;
33use user_driver::memory::PAGE_SIZE64;
34
35/// A wrapper around any user_driver device T. It provides device emulation by providing access to the memory shared with the device and thus
36/// allowing the user to control device behaviour to a certain extent. Can be used with devices such as the `NvmeController`
37pub struct EmulatedDevice<T, U> {
38    device: Arc<Mutex<T>>,
39    controller: MsiController,
40    dma_client: Arc<U>,
41    bar0_len: usize,
42}
43
44impl<T: InspectMut, U> Inspect for EmulatedDevice<T, U> {
45    fn inspect(&self, req: inspect::Request<'_>) {
46        self.device.lock().inspect_mut(req);
47    }
48}
49
50struct MsiController {
51    events: Arc<[DeviceInterruptSource]>,
52}
53
54impl MsiController {
55    fn new(n: usize) -> Self {
56        Self {
57            events: (0..n).map(|_| DeviceInterruptSource::new()).collect(),
58        }
59    }
60}
61
62impl MsiInterruptTarget for MsiController {
63    fn new_interrupt(&self) -> Box<dyn MsiControl> {
64        let events = self.events.clone();
65        Box::new(move |address, _data| {
66            let index = address as usize;
67            if let Some(event) = events.get(index) {
68                tracing::debug!(index, "signaling interrupt");
69                event.signal_uncached();
70            } else {
71                tracing::info!("interrupt ignored");
72            }
73        })
74    }
75}
76
77impl<T: PciConfigSpace + MmioIntercept, U: DmaClient> EmulatedDevice<T, U> {
78    /// Creates a new emulated device, wrapping `device` of type T, using the provided MSI Interrupt Set. Dma_client should point to memory
79    /// shared with the device.
80    pub fn new(mut device: T, msi_set: MsiInterruptSet, dma_client: Arc<U>) -> Self {
81        // Connect an interrupt controller.
82        let controller = MsiController::new(msi_set.len());
83        msi_set.connect(&controller);
84
85        let bars = device.probe_bar_masks();
86        let bar0_len = !(bars[0] & !0xf) as usize + 1;
87
88        // Enable BAR0 at 0, BAR4 at X.
89        device.pci_cfg_write(0x20, 0).unwrap();
90        device.pci_cfg_write(0x24, 0x1).unwrap();
91        device
92            .pci_cfg_write(
93                0x4,
94                pci_core::spec::cfg_space::Command::new()
95                    .with_mmio_enabled(true)
96                    .into_bits() as u32,
97            )
98            .unwrap();
99
100        // Enable MSIX.
101        for i in 0u64..64 {
102            device
103                .mmio_write((0x1 << 32) + i * 16, &i.to_ne_bytes())
104                .unwrap();
105            device
106                .mmio_write((0x1 << 32) + i * 16 + 12, &0u32.to_ne_bytes())
107                .unwrap();
108        }
109        device.pci_cfg_write(0x40, 0x80000000).unwrap();
110
111        Self {
112            device: Arc::new(Mutex::new(device)),
113            controller,
114            dma_client,
115            bar0_len,
116        }
117    }
118}
119
120/// A memory mapping for an [`EmulatedDevice`].
121#[derive(Inspect)]
122pub struct Mapping<T> {
123    #[inspect(skip)]
124    device: Arc<Mutex<T>>,
125    addr: u64,
126    len: usize,
127}
128
129impl<T: 'static + Send + InspectMut + MmioIntercept, U: 'static + Send + DmaClient> DeviceBacking
130    for EmulatedDevice<T, U>
131{
132    type Registers = Mapping<T>;
133
134    fn id(&self) -> &str {
135        "emulated"
136    }
137
138    fn map_bar(&mut self, n: u8) -> anyhow::Result<Self::Registers> {
139        if n != 0 {
140            anyhow::bail!("invalid bar {n}");
141        }
142        Ok(Mapping {
143            device: self.device.clone(),
144            addr: (n as u64) << 32,
145            len: self.bar0_len,
146        })
147    }
148
149    fn dma_client(&self) -> Arc<dyn DmaClient> {
150        self.dma_client.clone()
151    }
152
153    fn max_interrupt_count(&self) -> u32 {
154        self.controller.events.len() as u32
155    }
156
157    fn map_interrupt(&mut self, msix: u32, _cpu: u32) -> anyhow::Result<DeviceInterrupt> {
158        Ok(self
159            .controller
160            .events
161            .get(msix as usize)
162            .with_context(|| format!("invalid msix index {msix}"))?
163            .new_target())
164    }
165}
166
167impl<T: MmioIntercept + Send> DeviceRegisterIo for Mapping<T> {
168    fn len(&self) -> usize {
169        self.len
170    }
171
172    fn read_u32(&self, offset: usize) -> u32 {
173        let mut n = [0; 4];
174        self.device
175            .lock()
176            .mmio_read(self.addr + offset as u64, &mut n)
177            .unwrap();
178        u32::from_ne_bytes(n)
179    }
180
181    fn read_u64(&self, offset: usize) -> u64 {
182        let mut n = [0; 8];
183        self.device
184            .lock()
185            .mmio_read(self.addr + offset as u64, &mut n)
186            .unwrap();
187        u64::from_ne_bytes(n)
188    }
189
190    fn write_u32(&self, offset: usize, data: u32) {
191        self.device
192            .lock()
193            .mmio_write(self.addr + offset as u64, &data.to_ne_bytes())
194            .unwrap();
195    }
196
197    fn write_u64(&self, offset: usize, data: u64) {
198        self.device
199            .lock()
200            .mmio_write(self.addr + offset as u64, &data.to_ne_bytes())
201            .unwrap();
202    }
203}
204
205/// A wrapper around the [`TestMapper`] that generates both [`GuestMemory`] and [`PagePoolAllocator`] backed
206/// by the same underlying memory. Meant to provide shared memory for testing devices.
207pub struct DeviceTestMemory {
208    guest_mem: GuestMemory,
209    payload_mem: GuestMemory,
210    _pool: PagePool,
211    allocator: Arc<PagePoolAllocator>,
212}
213
214impl DeviceTestMemory {
215    /// Creates test memory that leverages the [`TestMapper`] as the backing. It creates 3 accessors for the underlying memory:
216    /// guest_memory [`GuestMemory`] - Has access to the entire range.
217    /// payload_memory [`GuestMemory`] - Has access to the second half of the range.
218    /// dma_client [`PagePoolAllocator`] - Has access to the first half of the range.
219    /// If the `allow_dma` switch is enabled, both guest_memory and payload_memory will report a base_iova of 0.
220    pub fn new(num_pages: u64, allow_dma: bool, pool_name: &str) -> Self {
221        let test_mapper = TestMapper::new(num_pages).unwrap();
222        let sparse_mmap = test_mapper.sparse_mapping();
223        let guest_mem = GuestMemoryAccessWrapper::create_test_guest_memory(sparse_mmap, allow_dma);
224        let pool = PagePool::new(
225            &[MemoryRange::from_4k_gpn_range(0..num_pages / 2)],
226            test_mapper,
227        )
228        .unwrap();
229
230        // Save page pool so that it is not dropped.
231        let allocator = pool.allocator(pool_name.into()).unwrap();
232        let range_half = num_pages / 2 * PAGE_SIZE64;
233        Self {
234            guest_mem: guest_mem.clone(),
235            payload_mem: guest_mem.subrange(range_half, range_half, false).unwrap(),
236            _pool: pool,
237            allocator: Arc::new(allocator),
238        }
239    }
240
241    /// Returns [`GuestMemory`] accessor to the underlying memory. Reports base_iova as 0 if `allow_dma` switch is enabled.
242    pub fn guest_memory(&self) -> GuestMemory {
243        self.guest_mem.clone()
244    }
245
246    /// Returns [`GuestMemory`] accessor to the second half of underlying memory. Reports base_iova as 0 if `allow_dma` switch is enabled.
247    pub fn payload_mem(&self) -> GuestMemory {
248        self.payload_mem.clone()
249    }
250
251    /// Returns [`PagePoolAllocator`] with access to the first half of the underlying memory.
252    pub fn dma_client(&self) -> Arc<PagePoolAllocator> {
253        self.allocator.clone()
254    }
255}