1use crate::PciInterruptPin;
10use crate::bar_mapping::BarMappings;
11use crate::capabilities::PciCapability;
12use crate::spec::cfg_space;
13use crate::spec::hwid::HardwareIds;
14use chipset_device::io::IoError;
15use chipset_device::io::IoResult;
16use chipset_device::mmio::ControlMmioIntercept;
17use guestmem::MappableGuestMemory;
18use inspect::Inspect;
19use std::sync::Arc;
20use std::sync::atomic::AtomicBool;
21use std::sync::atomic::Ordering;
22use vmcore::line_interrupt::LineInterrupt;
23
24const SUPPORTED_COMMAND_BITS: u16 = cfg_space::Command::new()
25 .with_pio_enabled(true)
26 .with_mmio_enabled(true)
27 .with_bus_master(true)
28 .with_special_cycles(true)
29 .with_enable_memory_write_invalidate(true)
30 .with_vga_palette_snoop(true)
31 .with_parity_error_response(true)
32 .with_enable_serr(true)
33 .with_enable_fast_b2b(true)
34 .with_intx_disable(true)
35 .into_bits();
36
37#[derive(Debug, Inspect)]
40pub struct IntxInterrupt {
41 pin: PciInterruptPin,
42 line: LineInterrupt,
43 interrupt_disabled: AtomicBool,
44 interrupt_status: AtomicBool,
45}
46
47impl IntxInterrupt {
48 pub fn set_level(&self, high: bool) {
53 tracing::debug!(
54 disabled = ?self.interrupt_disabled,
55 status = ?self.interrupt_status,
56 ?high,
57 %self.line,
58 "set_level"
59 );
60
61 self.interrupt_status.store(high, Ordering::SeqCst);
63
64 if self.interrupt_disabled.load(Ordering::SeqCst) {
66 self.line.set_level(false);
67 } else {
68 self.line.set_level(high);
69 }
70 }
71
72 fn set_disabled(&self, disabled: bool) {
73 tracing::debug!(
74 disabled = ?self.interrupt_disabled,
75 status = ?self.interrupt_status,
76 ?disabled,
77 %self.line,
78 "set_disabled"
79 );
80
81 self.interrupt_disabled.store(disabled, Ordering::SeqCst);
82 if disabled {
83 self.line.set_level(false)
84 } else {
85 if self.interrupt_status.load(Ordering::SeqCst) {
86 self.line.set_level(true)
87 }
88 }
89 }
90}
91
92#[derive(Debug, Inspect)]
93struct ConfigSpaceType0EmulatorState {
94 command: cfg_space::Command,
96 #[inspect(with = "inspect_helpers::bars")]
98 base_addresses: [u32; 6],
99 interrupt_line: u8,
104 latency_timer: u8,
106}
107
108impl ConfigSpaceType0EmulatorState {
109 fn new() -> Self {
110 Self {
111 latency_timer: 0,
112 command: cfg_space::Command::new(),
113 base_addresses: [0; 6],
114 interrupt_line: 0,
115 }
116 }
117}
118
119#[derive(Inspect)]
124pub struct ConfigSpaceType0Emulator {
125 #[inspect(with = "inspect_helpers::bars")]
127 bar_masks: [u32; 6],
128 hardware_ids: HardwareIds,
129 multi_function_bit: bool,
130
131 #[inspect(with = r#"|x| inspect::iter_by_index(x).prefix("bar")"#)]
133 mapped_memory: [Option<BarMemoryKind>; 6],
134 #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
135 capabilities: Vec<Box<dyn PciCapability>>,
136 intx_interrupt: Option<Arc<IntxInterrupt>>,
137
138 active_bars: BarMappings,
140
141 state: ConfigSpaceType0EmulatorState,
143}
144
145mod inspect_helpers {
146 use super::*;
147
148 pub(crate) fn bars(bars: &[u32; 6]) -> impl Inspect + '_ {
149 inspect::AsHex(inspect::iter_by_index(bars).prefix("bar"))
150 }
151}
152
153#[derive(Inspect)]
155#[inspect(tag = "kind")]
156pub enum BarMemoryKind {
157 Intercept(#[inspect(rename = "handle")] Box<dyn ControlMmioIntercept>),
159 SharedMem(#[inspect(skip)] Box<dyn MappableGuestMemory>),
161 Dummy,
163}
164
165impl std::fmt::Debug for BarMemoryKind {
166 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
167 match self {
168 Self::Intercept(control) => {
169 write!(f, "Intercept(region_name: {}, ..)", control.region_name())
170 }
171 Self::SharedMem(_) => write!(f, "Mmap(..)"),
172 Self::Dummy => write!(f, "Dummy"),
173 }
174 }
175}
176
177impl BarMemoryKind {
178 fn map_to_guest(&mut self, gpa: u64) -> std::io::Result<()> {
179 match self {
180 BarMemoryKind::Intercept(control) => {
181 control.map(gpa);
182 Ok(())
183 }
184 BarMemoryKind::SharedMem(control) => control.map_to_guest(gpa, true),
185 BarMemoryKind::Dummy => Ok(()),
186 }
187 }
188
189 fn unmap_from_guest(&mut self) {
190 match self {
191 BarMemoryKind::Intercept(control) => control.unmap(),
192 BarMemoryKind::SharedMem(control) => control.unmap_from_guest(),
193 BarMemoryKind::Dummy => {}
194 }
195 }
196}
197
198#[derive(Debug)]
203pub struct DeviceBars {
204 bars: [Option<(u64, BarMemoryKind)>; 6],
205}
206
207impl DeviceBars {
208 pub fn new() -> DeviceBars {
210 DeviceBars {
211 bars: Default::default(),
212 }
213 }
214
215 pub fn bar0(mut self, len: u64, memory: BarMemoryKind) -> Self {
217 self.bars[0] = Some((len, memory));
218 self
219 }
220
221 pub fn bar2(mut self, len: u64, memory: BarMemoryKind) -> Self {
223 self.bars[2] = Some((len, memory));
224 self
225 }
226
227 pub fn bar4(mut self, len: u64, memory: BarMemoryKind) -> Self {
229 self.bars[4] = Some((len, memory));
230 self
231 }
232}
233
234impl ConfigSpaceType0Emulator {
235 pub fn new(
237 hardware_ids: HardwareIds,
238 capabilities: Vec<Box<dyn PciCapability>>,
239 bars: DeviceBars,
240 ) -> Self {
241 let mut bar_masks = [0; 6];
242 let mut mapped_memory = {
243 const NONE: Option<BarMemoryKind> = None;
244 [NONE; 6]
245 };
246 for (bar_index, bar) in bars.bars.into_iter().enumerate() {
247 let (len, mapped) = match bar {
248 Some(bar) => bar,
249 None => continue,
250 };
251 assert!(bar_index < 5);
253 const MIN_BAR_SIZE: u64 = 4096;
257 let len = std::cmp::max(len.next_power_of_two(), MIN_BAR_SIZE);
258 let mask64 = !(len - 1);
259 bar_masks[bar_index] = cfg_space::BarEncodingBits::from_bits(mask64 as u32)
260 .with_type_64_bit(true)
261 .into_bits();
262 bar_masks[bar_index + 1] = (mask64 >> 32) as u32;
263 mapped_memory[bar_index] = Some(mapped);
264 }
265
266 Self {
267 bar_masks,
268 hardware_ids,
269 multi_function_bit: false,
270
271 active_bars: Default::default(),
272
273 mapped_memory,
274 capabilities,
275 intx_interrupt: None,
276
277 state: ConfigSpaceType0EmulatorState {
278 command: cfg_space::Command::new(),
279 base_addresses: [0; 6],
280 interrupt_line: 0,
281 latency_timer: 0,
282 },
283 }
284 }
285
286 pub fn with_multi_function_bit(mut self, bit: bool) -> Self {
288 self.multi_function_bit = bit;
289 self
290 }
291
292 pub fn set_interrupt_pin(
296 &mut self,
297 pin: PciInterruptPin,
298 line: LineInterrupt,
299 ) -> Arc<IntxInterrupt> {
300 let intx_interrupt = Arc::new(IntxInterrupt {
301 pin,
302 line,
303 interrupt_disabled: AtomicBool::new(false),
304 interrupt_status: AtomicBool::new(false),
305 });
306 self.intx_interrupt = Some(intx_interrupt.clone());
307 intx_interrupt
308 }
309
310 pub fn reset(&mut self) {
312 self.state = ConfigSpaceType0EmulatorState::new();
313
314 self.sync_command_register(self.state.command);
315
316 for cap in &mut self.capabilities {
317 cap.reset();
318 }
319
320 if let Some(intx) = &mut self.intx_interrupt {
321 intx.set_level(false);
322 }
323 }
324
325 fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
326 let mut cap_offset = 0;
327 for i in 0..self.capabilities.len() {
328 let cap_size = self.capabilities[i].len() as u16;
329 if offset < cap_offset + cap_size {
330 return Some((i, offset - cap_offset));
331 }
332 cap_offset += cap_size;
333 }
334 None
335 }
336
337 pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
339 use cfg_space::HeaderType00;
340
341 *value = match HeaderType00(offset) {
342 HeaderType00::DEVICE_VENDOR => {
343 (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
344 }
345 HeaderType00::STATUS_COMMAND => {
346 let mut status =
347 cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
348
349 if let Some(intx_interrupt) = &self.intx_interrupt {
350 if intx_interrupt.interrupt_status.load(Ordering::SeqCst) {
351 status.set_interrupt_status(true);
352 }
353 }
354
355 (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
356 }
357 HeaderType00::CLASS_REVISION => {
358 (u8::from(self.hardware_ids.base_class) as u32) << 24
359 | (u8::from(self.hardware_ids.sub_class) as u32) << 16
360 | (u8::from(self.hardware_ids.prog_if) as u32) << 8
361 | self.hardware_ids.revision_id as u32
362 }
363 HeaderType00::BIST_HEADER => {
364 let mut v = (self.state.latency_timer as u32) << 8;
365 if self.multi_function_bit {
366 v |= 0x80 << 16;
368 }
369 v
370 }
371 HeaderType00::BAR0
372 | HeaderType00::BAR1
373 | HeaderType00::BAR2
374 | HeaderType00::BAR3
375 | HeaderType00::BAR4
376 | HeaderType00::BAR5 => {
377 self.state.base_addresses[(offset - HeaderType00::BAR0.0) as usize / 4]
378 }
379 HeaderType00::CARDBUS_CIS_PTR => 0,
380 HeaderType00::SUBSYSTEM_ID => {
381 (self.hardware_ids.type0_sub_system_id as u32) << 16
382 | self.hardware_ids.type0_sub_vendor_id as u32
383 }
384 HeaderType00::EXPANSION_ROM_BASE => 0,
385 HeaderType00::RESERVED_CAP_PTR => {
386 if self.capabilities.is_empty() {
387 0
388 } else {
389 0x40
390 }
391 }
392 HeaderType00::RESERVED => 0,
393 HeaderType00::LATENCY_INTERRUPT => {
394 let interrupt_pin = if let Some(intx_interrupt) = &self.intx_interrupt {
395 match intx_interrupt.pin {
396 PciInterruptPin::IntA => 1,
397 PciInterruptPin::IntB => 2,
398 PciInterruptPin::IntC => 3,
399 PciInterruptPin::IntD => 4,
400 }
401 } else {
402 0
403 };
404 self.state.interrupt_line as u32 | (interrupt_pin as u32) << 8
405 }
406 _ if (0x40..0x100).contains(&offset) => {
408 if let Some((cap_index, cap_offset)) =
409 self.get_capability_index_and_offset(offset - 0x40)
410 {
411 let mut value = self.capabilities[cap_index].read_u32(cap_offset);
412 if cap_offset == 0 {
413 let next = if cap_index < self.capabilities.len() - 1 {
414 offset as u32 + self.capabilities[cap_index].len() as u32
415 } else {
416 0
417 };
418 assert!(value & 0xff00 == 0);
419 value |= next << 8;
420 }
421 value
422 } else {
423 tracelimit::warn_ratelimited!(offset, "unhandled config space read");
424 return IoResult::Err(IoError::InvalidRegister);
425 }
426 }
427 _ if (0x100..0x1000).contains(&offset) => {
428 if offset == 0x100 {
430 tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
431 0x000ffff
432 } else {
433 tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
434 return IoResult::Err(IoError::InvalidRegister);
435 }
436 }
437 _ => {
438 tracelimit::warn_ratelimited!(offset, "unexpected config space read");
439 return IoResult::Err(IoError::InvalidRegister);
440 }
441 };
442
443 IoResult::Ok
444 }
445
446 fn update_intx_disable(&mut self, command: cfg_space::Command) {
447 if let Some(intx_interrupt) = &self.intx_interrupt {
448 intx_interrupt.set_disabled(command.intx_disable())
449 }
450 }
451
452 fn update_mmio_enabled(&mut self, command: cfg_space::Command) {
453 if command.mmio_enabled() {
454 self.active_bars = BarMappings::parse(&self.state.base_addresses, &self.bar_masks);
455 for (bar, mapping) in self.mapped_memory.iter_mut().enumerate() {
456 if let Some(mapping) = mapping {
457 let base = self.active_bars.get(bar as u8).expect("bar exists");
458 match mapping.map_to_guest(base) {
459 Ok(_) => {}
460 Err(err) => {
461 tracelimit::error_ratelimited!(
462 error = &err as &dyn std::error::Error,
463 bar,
464 base,
465 "failed to map bar",
466 )
467 }
468 }
469 }
470 }
471 } else {
472 self.active_bars = Default::default();
473 for mapping in self.mapped_memory.iter_mut().flatten() {
474 mapping.unmap_from_guest();
475 }
476 }
477 }
478
479 fn sync_command_register(&mut self, command: cfg_space::Command) {
480 self.update_intx_disable(command);
481 self.update_mmio_enabled(command);
482 }
483
484 pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
486 use cfg_space::HeaderType00;
487
488 match HeaderType00(offset) {
489 HeaderType00::STATUS_COMMAND => {
490 let mut command = cfg_space::Command::from_bits(val as u16);
491 if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
492 tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
493 command =
495 cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
496 };
497
498 if self.state.command.intx_disable() != command.intx_disable() {
499 self.update_intx_disable(command)
500 }
501
502 if self.state.command.mmio_enabled() != command.mmio_enabled() {
503 self.update_mmio_enabled(command)
504 }
505
506 self.state.command = command;
507 }
508 HeaderType00::BIST_HEADER => {
509 let timer_val = (val >> 8) as u8;
511 self.state.latency_timer = timer_val;
512 }
513 HeaderType00::BAR0
514 | HeaderType00::BAR1
515 | HeaderType00::BAR2
516 | HeaderType00::BAR3
517 | HeaderType00::BAR4
518 | HeaderType00::BAR5 => {
519 if !self.state.command.mmio_enabled() {
520 let bar_index = (offset - HeaderType00::BAR0.0) as usize / 4;
521 let mut bar_value = val & self.bar_masks[bar_index];
522 if bar_index & 1 == 0 && self.bar_masks[bar_index] != 0 {
523 bar_value = cfg_space::BarEncodingBits::from_bits(bar_value)
524 .with_type_64_bit(true)
525 .into_bits();
526 }
527 self.state.base_addresses[bar_index] = bar_value;
528 }
529 }
530 HeaderType00::LATENCY_INTERRUPT => {
531 self.state.interrupt_line = ((val & 0xff00) >> 8) as u8;
532 }
533 _ if offset < 0x40 && offset % 4 == 0 => (),
535 _ if (0x40..0x100).contains(&offset) => {
537 if let Some((cap_index, cap_offset)) =
538 self.get_capability_index_and_offset(offset - 0x40)
539 {
540 self.capabilities[cap_index].write_u32(cap_offset, val);
541 } else {
542 tracelimit::warn_ratelimited!(
543 offset,
544 value = val,
545 "unhandled config space write"
546 );
547 return IoResult::Err(IoError::InvalidRegister);
548 }
549 }
550 _ if (0x100..0x1000).contains(&offset) => {
551 tracelimit::warn_ratelimited!(
553 offset,
554 value = val,
555 "unhandled extended config space write"
556 );
557 return IoResult::Err(IoError::InvalidRegister);
558 }
559 _ => {
560 tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
561 return IoResult::Err(IoError::InvalidRegister);
562 }
563 }
564
565 IoResult::Ok
566 }
567
568 pub fn find_bar(&self, address: u64) -> Option<(u8, u16)> {
570 self.active_bars.find(address)
571 }
572}
573
574mod save_restore {
575 use super::*;
576 use thiserror::Error;
577 use vmcore::save_restore::RestoreError;
578 use vmcore::save_restore::SaveError;
579 use vmcore::save_restore::SaveRestore;
580
581 mod state {
582 use mesh::payload::Protobuf;
583 use vmcore::save_restore::SavedStateBlob;
584 use vmcore::save_restore::SavedStateRoot;
585
586 #[derive(Protobuf, SavedStateRoot)]
587 #[mesh(package = "pci.cfg_space_emu")]
588 pub struct SavedState {
589 #[mesh(1)]
590 pub command: u16,
591 #[mesh(2)]
592 pub base_addresses: [u32; 6],
593 #[mesh(3)]
594 pub interrupt_line: u8,
595 #[mesh(4)]
596 pub latency_timer: u8,
597 #[mesh(5)]
598 pub capabilities: Vec<(String, SavedStateBlob)>,
599 }
600 }
601
602 #[derive(Debug, Error)]
603 enum ConfigSpaceRestoreError {
604 #[error("found invalid config bits in saved state")]
605 InvalidConfigBits,
606 #[error("found unexpected capability {0}")]
607 InvalidCap(String),
608 }
609
610 impl SaveRestore for ConfigSpaceType0Emulator {
611 type SavedState = state::SavedState;
612
613 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
614 let ConfigSpaceType0EmulatorState {
615 command,
616 base_addresses,
617 interrupt_line,
618 latency_timer,
619 } = self.state;
620
621 let saved_state = state::SavedState {
622 command: command.into_bits(),
623 base_addresses,
624 interrupt_line,
625 latency_timer,
626 capabilities: self
627 .capabilities
628 .iter_mut()
629 .map(|cap| {
630 let id = cap.label().to_owned();
631 Ok((id, cap.save()?))
632 })
633 .collect::<Result<_, _>>()?,
634 };
635
636 Ok(saved_state)
637 }
638
639 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
640 let state::SavedState {
641 command,
642 base_addresses,
643 interrupt_line,
644 latency_timer,
645 capabilities,
646 } = state;
647
648 self.state = ConfigSpaceType0EmulatorState {
649 command: cfg_space::Command::from_bits(command),
650 base_addresses,
651 interrupt_line,
652 latency_timer,
653 };
654
655 if command & !SUPPORTED_COMMAND_BITS != 0 {
656 return Err(RestoreError::InvalidSavedState(
657 ConfigSpaceRestoreError::InvalidConfigBits.into(),
658 ));
659 }
660
661 self.sync_command_register(self.state.command);
662 for (id, entry) in capabilities {
663 tracing::debug!(save_id = id.as_str(), "restoring pci capability");
664
665 let mut restored = false;
668 for cap in self.capabilities.iter_mut() {
669 if cap.label() == id {
670 cap.restore(entry)?;
671 restored = true;
672 break;
673 }
674 }
675
676 if !restored {
677 return Err(RestoreError::InvalidSavedState(
678 ConfigSpaceRestoreError::InvalidCap(id).into(),
679 ));
680 }
681 }
682
683 Ok(())
684 }
685 }
686}