1use crate::BAR0_LEN;
7use crate::DOORBELL_STRIDE_BITS;
8use crate::IOCQES;
9use crate::IOSQES;
10use crate::MAX_QES;
11use crate::NVME_VERSION;
12use crate::NvmeControllerClient;
13use crate::PAGE_MASK;
14use crate::VENDOR_ID;
15use crate::spec;
16use crate::workers::IoQueueEntrySizes;
17use crate::workers::NvmeWorkers;
18use chipset_device::ChipsetDevice;
19use chipset_device::io::IoError;
20use chipset_device::io::IoError::InvalidRegister;
21use chipset_device::io::IoResult;
22use chipset_device::mmio::MmioIntercept;
23use chipset_device::mmio::RegisterMmioIntercept;
24use chipset_device::pci::PciConfigSpace;
25use device_emulators::ReadWriteRequestType;
26use device_emulators::read_as_u32_chunks;
27use device_emulators::write_as_u32_chunks;
28use guestmem::GuestMemory;
29use guid::Guid;
30use inspect::Inspect;
31use inspect::InspectMut;
32use parking_lot::Mutex;
33use pci_core::capabilities::msix::MsixEmulator;
34use pci_core::capabilities::pci_express::PciExpressCapability;
35use pci_core::cfg_space_emu::BarMemoryKind;
36use pci_core::cfg_space_emu::ConfigSpaceType0Emulator;
37use pci_core::cfg_space_emu::DeviceBars;
38use pci_core::msi::MsiTarget;
39use pci_core::spec::hwid::ClassCode;
40use pci_core::spec::hwid::HardwareIds;
41use pci_core::spec::hwid::ProgrammingInterface;
42use pci_core::spec::hwid::Subclass;
43use std::sync::Arc;
44use vmcore::device_state::ChangeDeviceState;
45use vmcore::save_restore::SaveError;
46use vmcore::save_restore::SaveRestore;
47use vmcore::save_restore::SavedStateNotSupported;
48use vmcore::vm_task::VmTaskDriverSource;
49
50#[derive(InspectMut)]
52pub struct NvmeController {
53 cfg_space: ConfigSpaceType0Emulator,
54 #[inspect(skip)]
55 msix: MsixEmulator,
56
57 registers: RegState,
58 #[inspect(skip)]
59 qe_sizes: Arc<Mutex<IoQueueEntrySizes>>,
60 #[inspect(flatten, mut)]
61 workers: NvmeWorkers,
62}
63
64#[derive(Inspect)]
65struct RegState {
66 #[inspect(hex)]
67 interrupt_mask: u32,
68 cc: spec::Cc,
69 csts: spec::Csts,
70 aqa: spec::Aqa,
71 #[inspect(hex)]
72 asq: u64,
73 #[inspect(hex)]
74 acq: u64,
75}
76
77impl RegState {
78 fn new() -> Self {
79 Self {
80 interrupt_mask: 0,
81 cc: spec::Cc::new(),
82 csts: spec::Csts::new(),
83 aqa: spec::Aqa::new(),
84 asq: 0,
85 acq: 0,
86 }
87 }
88}
89
90const CAP: spec::Cap = spec::Cap::new()
91 .with_dstrd(DOORBELL_STRIDE_BITS - 2)
92 .with_mqes_z(MAX_QES - 1)
93 .with_cqr(true)
94 .with_css_nvm(true)
95 .with_to(!0);
96
97#[derive(Debug, Copy, Clone)]
99pub struct NvmeControllerCaps {
100 pub msix_count: u16,
102 pub max_io_queues: u16,
104 pub subsystem_id: Guid,
107}
108
109impl NvmeController {
110 pub fn new(
112 driver_source: &VmTaskDriverSource,
113 guest_memory: GuestMemory,
114 msi_target: &MsiTarget,
115 register_mmio: &mut dyn RegisterMmioIntercept,
116 caps: NvmeControllerCaps,
117 ) -> Self {
118 let (msix, msix_cap) = MsixEmulator::new(4, caps.msix_count, msi_target);
119 let bars = DeviceBars::new()
120 .bar0(
121 BAR0_LEN,
122 BarMemoryKind::Intercept(register_mmio.new_io_region("bar0", BAR0_LEN)),
123 )
124 .bar4(
125 msix.bar_len(),
126 BarMemoryKind::Intercept(register_mmio.new_io_region("msix", msix.bar_len())),
127 );
128
129 let cfg_space = ConfigSpaceType0Emulator::new(
130 HardwareIds {
131 vendor_id: VENDOR_ID,
132 device_id: 0x00a9,
133 revision_id: 0,
134 prog_if: ProgrammingInterface::MASS_STORAGE_CONTROLLER_NON_VOLATILE_MEMORY_NVME,
135 sub_class: Subclass::MASS_STORAGE_CONTROLLER_NON_VOLATILE_MEMORY,
136 base_class: ClassCode::MASS_STORAGE_CONTROLLER,
137 type0_sub_vendor_id: 0,
138 type0_sub_system_id: 0,
139 },
140 vec![
141 Box::new(msix_cap),
142 Box::new(PciExpressCapability::new(
143 pci_core::spec::caps::pci_express::DevicePortType::Endpoint,
144 None,
145 )),
146 ],
147 bars,
148 );
149
150 let interrupts = (0..caps.msix_count)
151 .map(|i| msix.interrupt(i).unwrap())
152 .collect();
153
154 let qe_sizes = Arc::new(Default::default());
155 let admin = NvmeWorkers::new(
156 driver_source,
157 guest_memory,
158 interrupts,
159 caps.max_io_queues,
160 caps.max_io_queues,
161 Arc::clone(&qe_sizes),
162 caps.subsystem_id,
163 );
164
165 Self {
166 cfg_space,
167 msix,
168 registers: RegState::new(),
169 workers: admin,
170 qe_sizes,
171 }
172 }
173
174 pub fn client(&self) -> NvmeControllerClient {
176 self.workers.client()
177 }
178
179 pub fn read_bar0(&mut self, addr: u64, data: &mut [u8]) -> IoResult {
181 if data.len() < 4 {
182 return IoResult::Err(IoError::InvalidAccessSize);
183 }
184 if addr & (data.len() as u64 - 1) != 0 {
185 return IoResult::Err(IoError::UnalignedAccess);
186 }
187
188 let d: Option<u64> = match spec::Register(addr & !7) {
190 spec::Register::CAP => Some(CAP.into()),
191 spec::Register::ASQ => Some(self.registers.asq),
192 spec::Register::ACQ => Some(self.registers.acq),
193 spec::Register::BPMBL => Some(0),
194 _ => None,
195 };
196 if let Some(d) = d {
197 if data.len() == 8 {
198 data.copy_from_slice(&d.to_ne_bytes());
199 } else if addr & 7 == 0 {
200 data.copy_from_slice(&(d as u32).to_ne_bytes());
201 } else {
202 data.copy_from_slice(&((d >> 32) as u32).to_ne_bytes());
203 }
204 return IoResult::Ok;
205 }
206
207 if data.len() != 4 {
208 return IoResult::Err(IoError::InvalidAccessSize);
209 }
210
211 let d: u32 = match spec::Register(addr) {
213 spec::Register::VS => NVME_VERSION,
214 spec::Register::INTMS => self.registers.interrupt_mask,
215 spec::Register::INTMC => self.registers.interrupt_mask,
216 spec::Register::CC => self.registers.cc.into(),
217 spec::Register::RESERVED => 0,
218 spec::Register::CSTS => self.get_csts(),
219 spec::Register::NSSR => 0,
220 spec::Register::AQA => self.registers.aqa.into(),
221 spec::Register::CMBLOC => 0,
222 spec::Register::CMBSZ => 0,
223 spec::Register::BPINFO => 0,
224 spec::Register::BPRSEL => 0,
225 _ => return IoResult::Err(InvalidRegister),
226 };
227 data.copy_from_slice(&d.to_ne_bytes());
228 IoResult::Ok
229 }
230
231 pub fn write_bar0(&mut self, addr: u64, data: &[u8]) -> IoResult {
233 if addr >= 0x1000 {
234 let base = addr - 0x1000;
236 let db_id = base >> DOORBELL_STRIDE_BITS;
237 if (db_id << DOORBELL_STRIDE_BITS) != base {
238 return IoResult::Err(InvalidRegister);
239 }
240 let Ok(data) = data.try_into() else {
241 return IoResult::Err(IoError::InvalidAccessSize);
242 };
243 let value = u32::from_ne_bytes(data);
244 let db_id = match u16::try_from(db_id) {
245 Ok(id) => id,
246 Err(_) => return IoResult::Err(InvalidRegister),
247 };
248 self.workers.doorbell(db_id, value);
249 return IoResult::Ok;
250 }
251
252 if data.len() < 4 {
253 return IoResult::Err(IoError::InvalidAccessSize);
254 }
255 if addr & (data.len() as u64 - 1) != 0 {
256 return IoResult::Err(IoError::UnalignedAccess);
257 }
258
259 let update_reg = |x: u64| {
260 if data.len() == 8 {
261 u64::from_ne_bytes(data.try_into().unwrap())
262 } else {
263 let data = u32::from_ne_bytes(data.try_into().unwrap()) as u64;
264 if addr & 7 == 0 {
265 (x & !(u32::MAX as u64)) | data
266 } else {
267 (x & u32::MAX as u64) | (data << 32)
268 }
269 }
270 };
271
272 let handled = match spec::Register(addr & !7) {
274 spec::Register::ASQ => {
275 if !self.registers.cc.en() {
276 self.registers.asq = update_reg(self.registers.asq) & PAGE_MASK;
277 } else {
278 tracelimit::warn_ratelimited!("attempt to set asq while enabled");
279 }
280 true
281 }
282 spec::Register::ACQ => {
283 if !self.registers.cc.en() {
284 self.registers.acq = update_reg(self.registers.acq) & PAGE_MASK;
285 } else {
286 tracelimit::warn_ratelimited!("attempt to set acq while enabled");
287 }
288 true
289 }
290 _ => false,
291 };
292 if handled {
293 return IoResult::Ok;
294 }
295
296 let Ok(data) = data.try_into() else {
297 return IoResult::Err(IoError::InvalidAccessSize);
298 };
299 let data = u32::from_ne_bytes(data);
300
301 match spec::Register(addr) {
303 spec::Register::INTMS => self.registers.interrupt_mask |= data,
304 spec::Register::INTMC => self.registers.interrupt_mask &= !data,
305 spec::Register::CC => self.set_cc(data.into()),
306 spec::Register::AQA => self.registers.aqa = data.into(),
307 _ => return IoResult::Err(InvalidRegister),
308 }
309 IoResult::Ok
310 }
311
312 fn set_cc(&mut self, cc: spec::Cc) {
313 tracing::debug!(?cc, "set cc");
314
315 if cc.mps() != 0 {
316 tracelimit::warn_ratelimited!(
317 "This implementation only supports memory page sizes of 4K."
318 );
319 self.fatal_error();
320 return;
321 }
322
323 if cc.css() != 0 {
324 tracelimit::warn_ratelimited!("This implementation only supports the NVM command set.");
325 self.fatal_error();
326 return;
327 }
328
329 if let 2..=6 = cc.ams() {
330 tracelimit::warn_ratelimited!("Undefined arbitration mechanism.");
331 self.fatal_error();
332 }
333
334 let mask: u32 = u32::from(
335 spec::Cc::new()
336 .with_en(true)
337 .with_shn(0b11)
338 .with_iosqes(0b1111)
339 .with_iocqes(0b1111),
340 );
341 let mut cc: spec::Cc = (u32::from(cc) & mask).into();
342
343 if cc.shn() != 0 {
344 self.registers.csts.set_shst(0b10);
348 }
349
350 if cc.en() != self.registers.cc.en() {
351 if cc.en() {
352 if cc.iocqes() == 0 {
354 cc.set_iocqes(IOCQES);
355 } else if cc.iocqes() != IOCQES {
356 tracelimit::warn_ratelimited!(
357 "This implementation only supports CQEs of the default size."
358 );
359 self.fatal_error();
360 return;
361 }
362
363 if cc.iosqes() == 0 {
364 cc.set_iosqes(IOSQES);
365 } else if cc.iosqes() != IOSQES {
366 tracelimit::warn_ratelimited!(
367 "This implementation only supports SQEs of the default size."
368 );
369 self.fatal_error();
370 return;
371 }
372
373 if self.registers.csts.rdy() {
374 tracelimit::warn_ratelimited!("enabling during reset");
375 return;
376 }
377 if cc.shn() == 0 {
378 self.registers.csts.set_shst(0);
379 }
380
381 self.workers.enable(
382 self.registers.asq,
383 self.registers.aqa.asqs_z().max(1) + 1,
384 self.registers.acq,
385 self.registers.aqa.acqs_z().max(1) + 1,
386 );
387 } else if self.registers.csts.rdy() {
388 self.workers.controller_reset();
389 } else {
390 tracelimit::warn_ratelimited!("disabling while not ready");
391 return;
392 }
393 }
394
395 self.registers.cc = cc;
396 *self.qe_sizes.lock() = IoQueueEntrySizes {
397 sqe_bits: cc.iosqes(),
398 cqe_bits: cc.iocqes(),
399 };
400 }
401
402 fn get_csts(&mut self) -> u32 {
403 if !self.registers.cc.en() && self.registers.csts.rdy() {
404 if self.workers.poll_controller_reset() {
406 self.registers.csts = 0.into();
408 self.registers.cc = 0.into();
409 self.registers.interrupt_mask = 0;
410 }
411 } else if self.registers.cc.en() && !self.registers.csts.rdy() {
412 if self.workers.poll_enabled() {
413 self.registers.csts.set_rdy(true);
414 }
415 }
416
417 let csts = self.registers.csts;
418 tracing::debug!(?csts, "get csts");
419 csts.into()
420 }
421
422 pub fn fatal_error(&mut self) {
425 self.registers.csts.set_cfs(true);
426 }
427}
428
429impl ChangeDeviceState for NvmeController {
430 fn start(&mut self) {}
431
432 async fn stop(&mut self) {}
433
434 async fn reset(&mut self) {
435 let Self {
436 cfg_space,
437 msix: _,
438 registers,
439 qe_sizes,
440 workers,
441 } = self;
442 workers.reset().await;
443 cfg_space.reset();
444 *registers = RegState::new();
445 *qe_sizes.lock() = Default::default();
446 }
447}
448
449impl ChipsetDevice for NvmeController {
450 fn supports_mmio(&mut self) -> Option<&mut dyn MmioIntercept> {
451 Some(self)
452 }
453
454 fn supports_pci(&mut self) -> Option<&mut dyn PciConfigSpace> {
455 Some(self)
456 }
457}
458
459impl MmioIntercept for NvmeController {
460 fn mmio_read(&mut self, addr: u64, data: &mut [u8]) -> IoResult {
461 match self.cfg_space.find_bar(addr) {
462 Some((0, offset)) => self.read_bar0(offset, data),
463 Some((4, offset)) => {
464 read_as_u32_chunks(offset, data, |offset| self.msix.read_u32(offset));
465 IoResult::Ok
466 }
467 _ => IoResult::Err(InvalidRegister),
468 }
469 }
470
471 fn mmio_write(&mut self, addr: u64, data: &[u8]) -> IoResult {
472 match self.cfg_space.find_bar(addr) {
473 Some((0, offset)) => self.write_bar0(offset, data),
474 Some((4, offset)) => {
475 write_as_u32_chunks(offset, data, |offset, ty| match ty {
476 ReadWriteRequestType::Read => Some(self.msix.read_u32(offset)),
477 ReadWriteRequestType::Write(val) => {
478 self.msix.write_u32(offset, val);
479 None
480 }
481 });
482 IoResult::Ok
483 }
484 _ => IoResult::Err(InvalidRegister),
485 }
486 }
487}
488
489impl PciConfigSpace for NvmeController {
490 fn pci_cfg_read(&mut self, offset: u16, value: &mut u32) -> IoResult {
491 self.cfg_space.read_u32(offset, value)
492 }
493
494 fn pci_cfg_write(&mut self, offset: u16, value: u32) -> IoResult {
495 self.cfg_space.write_u32(offset, value)
496 }
497}
498
499impl SaveRestore for NvmeController {
500 type SavedState = SavedStateNotSupported;
501
502 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
503 Err(SaveError::NotSupported)
504 }
505
506 fn restore(
507 &mut self,
508 state: Self::SavedState,
509 ) -> Result<(), vmcore::save_restore::RestoreError> {
510 match state {}
511 }
512}