1use crate::PciInterruptPin;
10use crate::bar_mapping::BarMappings;
11use crate::capabilities::PciCapability;
12use crate::spec::cfg_space;
13use crate::spec::hwid::HardwareIds;
14use chipset_device::io::IoError;
15use chipset_device::io::IoResult;
16use chipset_device::mmio::ControlMmioIntercept;
17use guestmem::MappableGuestMemory;
18use inspect::Inspect;
19use std::ops::RangeInclusive;
20use std::sync::Arc;
21use std::sync::atomic::AtomicBool;
22use std::sync::atomic::Ordering;
23use vmcore::line_interrupt::LineInterrupt;
24
25const SUPPORTED_COMMAND_BITS: u16 = cfg_space::Command::new()
26 .with_pio_enabled(true)
27 .with_mmio_enabled(true)
28 .with_bus_master(true)
29 .with_special_cycles(true)
30 .with_enable_memory_write_invalidate(true)
31 .with_vga_palette_snoop(true)
32 .with_parity_error_response(true)
33 .with_enable_serr(true)
34 .with_enable_fast_b2b(true)
35 .with_intx_disable(true)
36 .into_bits();
37
38#[derive(Debug, Inspect)]
41pub struct IntxInterrupt {
42 pin: PciInterruptPin,
43 line: LineInterrupt,
44 interrupt_disabled: AtomicBool,
45 interrupt_status: AtomicBool,
46}
47
48impl IntxInterrupt {
49 pub fn set_level(&self, high: bool) {
54 tracing::debug!(
55 disabled = ?self.interrupt_disabled,
56 status = ?self.interrupt_status,
57 ?high,
58 %self.line,
59 "set_level"
60 );
61
62 self.interrupt_status.store(high, Ordering::SeqCst);
64
65 if self.interrupt_disabled.load(Ordering::SeqCst) {
67 self.line.set_level(false);
68 } else {
69 self.line.set_level(high);
70 }
71 }
72
73 fn set_disabled(&self, disabled: bool) {
74 tracing::debug!(
75 disabled = ?self.interrupt_disabled,
76 status = ?self.interrupt_status,
77 ?disabled,
78 %self.line,
79 "set_disabled"
80 );
81
82 self.interrupt_disabled.store(disabled, Ordering::SeqCst);
83 if disabled {
84 self.line.set_level(false)
85 } else {
86 if self.interrupt_status.load(Ordering::SeqCst) {
87 self.line.set_level(true)
88 }
89 }
90 }
91}
92
93#[derive(Debug, Inspect)]
94struct ConfigSpaceType0EmulatorState {
95 command: cfg_space::Command,
97 #[inspect(with = "inspect_helpers::bars")]
99 base_addresses: [u32; 6],
100 interrupt_line: u8,
105 latency_timer: u8,
107}
108
109impl ConfigSpaceType0EmulatorState {
110 fn new() -> Self {
111 Self {
112 latency_timer: 0,
113 command: cfg_space::Command::new(),
114 base_addresses: [0; 6],
115 interrupt_line: 0,
116 }
117 }
118}
119
120#[derive(Inspect)]
125pub struct ConfigSpaceType0Emulator {
126 #[inspect(with = "inspect_helpers::bars")]
128 bar_masks: [u32; 6],
129 hardware_ids: HardwareIds,
130 multi_function_bit: bool,
131
132 #[inspect(with = r#"|x| inspect::iter_by_index(x).prefix("bar")"#)]
134 mapped_memory: [Option<BarMemoryKind>; 6],
135 #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
136 capabilities: Vec<Box<dyn PciCapability>>,
137 intx_interrupt: Option<Arc<IntxInterrupt>>,
138
139 active_bars: BarMappings,
141
142 state: ConfigSpaceType0EmulatorState,
144}
145
146mod inspect_helpers {
147 use super::*;
148
149 pub(crate) fn bars(bars: &[u32; 6]) -> impl Inspect + '_ {
150 inspect::AsHex(inspect::iter_by_index(bars).prefix("bar"))
151 }
152}
153
154#[derive(Inspect)]
156#[inspect(tag = "kind")]
157pub enum BarMemoryKind {
158 Intercept(#[inspect(rename = "handle")] Box<dyn ControlMmioIntercept>),
160 SharedMem(#[inspect(skip)] Box<dyn MappableGuestMemory>),
162 Dummy,
164}
165
166impl std::fmt::Debug for BarMemoryKind {
167 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
168 match self {
169 Self::Intercept(control) => {
170 write!(f, "Intercept(region_name: {}, ..)", control.region_name())
171 }
172 Self::SharedMem(_) => write!(f, "Mmap(..)"),
173 Self::Dummy => write!(f, "Dummy"),
174 }
175 }
176}
177
178impl BarMemoryKind {
179 fn map_to_guest(&mut self, gpa: u64) -> std::io::Result<()> {
180 match self {
181 BarMemoryKind::Intercept(control) => {
182 control.map(gpa);
183 Ok(())
184 }
185 BarMemoryKind::SharedMem(control) => control.map_to_guest(gpa, true),
186 BarMemoryKind::Dummy => Ok(()),
187 }
188 }
189
190 fn unmap_from_guest(&mut self) {
191 match self {
192 BarMemoryKind::Intercept(control) => control.unmap(),
193 BarMemoryKind::SharedMem(control) => control.unmap_from_guest(),
194 BarMemoryKind::Dummy => {}
195 }
196 }
197}
198
199#[derive(Debug)]
204pub struct DeviceBars {
205 bars: [Option<(u64, BarMemoryKind)>; 6],
206}
207
208impl DeviceBars {
209 pub fn new() -> DeviceBars {
211 DeviceBars {
212 bars: Default::default(),
213 }
214 }
215
216 pub fn bar0(mut self, len: u64, memory: BarMemoryKind) -> Self {
218 self.bars[0] = Some((len, memory));
219 self
220 }
221
222 pub fn bar2(mut self, len: u64, memory: BarMemoryKind) -> Self {
224 self.bars[2] = Some((len, memory));
225 self
226 }
227
228 pub fn bar4(mut self, len: u64, memory: BarMemoryKind) -> Self {
230 self.bars[4] = Some((len, memory));
231 self
232 }
233}
234
235impl ConfigSpaceType0Emulator {
236 pub fn new(
238 hardware_ids: HardwareIds,
239 capabilities: Vec<Box<dyn PciCapability>>,
240 bars: DeviceBars,
241 ) -> Self {
242 let mut bar_masks = [0; 6];
243 let mut mapped_memory = {
244 const NONE: Option<BarMemoryKind> = None;
245 [NONE; 6]
246 };
247 for (bar_index, bar) in bars.bars.into_iter().enumerate() {
248 let (len, mapped) = match bar {
249 Some(bar) => bar,
250 None => continue,
251 };
252 assert!(bar_index < 5);
254 const MIN_BAR_SIZE: u64 = 4096;
258 let len = std::cmp::max(len.next_power_of_two(), MIN_BAR_SIZE);
259 let mask64 = !(len - 1);
260 bar_masks[bar_index] = cfg_space::BarEncodingBits::from_bits(mask64 as u32)
261 .with_type_64_bit(true)
262 .into_bits();
263 bar_masks[bar_index + 1] = (mask64 >> 32) as u32;
264 mapped_memory[bar_index] = Some(mapped);
265 }
266
267 Self {
268 bar_masks,
269 hardware_ids,
270 multi_function_bit: false,
271
272 active_bars: Default::default(),
273
274 mapped_memory,
275 capabilities,
276 intx_interrupt: None,
277
278 state: ConfigSpaceType0EmulatorState {
279 command: cfg_space::Command::new(),
280 base_addresses: [0; 6],
281 interrupt_line: 0,
282 latency_timer: 0,
283 },
284 }
285 }
286
287 pub fn with_multi_function_bit(mut self, bit: bool) -> Self {
289 self.multi_function_bit = bit;
290 self
291 }
292
293 pub fn set_interrupt_pin(
297 &mut self,
298 pin: PciInterruptPin,
299 line: LineInterrupt,
300 ) -> Arc<IntxInterrupt> {
301 let intx_interrupt = Arc::new(IntxInterrupt {
302 pin,
303 line,
304 interrupt_disabled: AtomicBool::new(false),
305 interrupt_status: AtomicBool::new(false),
306 });
307 self.intx_interrupt = Some(intx_interrupt.clone());
308 intx_interrupt
309 }
310
311 pub fn reset(&mut self) {
313 self.state = ConfigSpaceType0EmulatorState::new();
314
315 self.sync_command_register(self.state.command);
316
317 for cap in &mut self.capabilities {
318 cap.reset();
319 }
320
321 if let Some(intx) = &mut self.intx_interrupt {
322 intx.set_level(false);
323 }
324 }
325
326 fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
327 let mut cap_offset = 0;
328 for i in 0..self.capabilities.len() {
329 let cap_size = self.capabilities[i].len() as u16;
330 if offset < cap_offset + cap_size {
331 return Some((i, offset - cap_offset));
332 }
333 cap_offset += cap_size;
334 }
335 None
336 }
337
338 pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
340 use cfg_space::HeaderType00;
341
342 *value = match HeaderType00(offset) {
343 HeaderType00::DEVICE_VENDOR => {
344 (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
345 }
346 HeaderType00::STATUS_COMMAND => {
347 let mut status =
348 cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
349
350 if let Some(intx_interrupt) = &self.intx_interrupt {
351 if intx_interrupt.interrupt_status.load(Ordering::SeqCst) {
352 status.set_interrupt_status(true);
353 }
354 }
355
356 (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
357 }
358 HeaderType00::CLASS_REVISION => {
359 (u8::from(self.hardware_ids.base_class) as u32) << 24
360 | (u8::from(self.hardware_ids.sub_class) as u32) << 16
361 | (u8::from(self.hardware_ids.prog_if) as u32) << 8
362 | self.hardware_ids.revision_id as u32
363 }
364 HeaderType00::BIST_HEADER => {
365 let mut v = (self.state.latency_timer as u32) << 8;
366 if self.multi_function_bit {
367 v |= 0x80 << 16;
369 }
370 v
371 }
372 HeaderType00::BAR0
373 | HeaderType00::BAR1
374 | HeaderType00::BAR2
375 | HeaderType00::BAR3
376 | HeaderType00::BAR4
377 | HeaderType00::BAR5 => {
378 self.state.base_addresses[(offset - HeaderType00::BAR0.0) as usize / 4]
379 }
380 HeaderType00::CARDBUS_CIS_PTR => 0,
381 HeaderType00::SUBSYSTEM_ID => {
382 (self.hardware_ids.type0_sub_system_id as u32) << 16
383 | self.hardware_ids.type0_sub_vendor_id as u32
384 }
385 HeaderType00::EXPANSION_ROM_BASE => 0,
386 HeaderType00::RESERVED_CAP_PTR => {
387 if self.capabilities.is_empty() {
388 0
389 } else {
390 0x40
391 }
392 }
393 HeaderType00::RESERVED => 0,
394 HeaderType00::LATENCY_INTERRUPT => {
395 let interrupt_pin = if let Some(intx_interrupt) = &self.intx_interrupt {
396 match intx_interrupt.pin {
397 PciInterruptPin::IntA => 1,
398 PciInterruptPin::IntB => 2,
399 PciInterruptPin::IntC => 3,
400 PciInterruptPin::IntD => 4,
401 }
402 } else {
403 0
404 };
405 self.state.interrupt_line as u32 | (interrupt_pin as u32) << 8
406 }
407 _ if (0x40..0x100).contains(&offset) => {
409 if let Some((cap_index, cap_offset)) =
410 self.get_capability_index_and_offset(offset - 0x40)
411 {
412 let mut value = self.capabilities[cap_index].read_u32(cap_offset);
413 if cap_offset == 0 {
414 let next = if cap_index < self.capabilities.len() - 1 {
415 offset as u32 + self.capabilities[cap_index].len() as u32
416 } else {
417 0
418 };
419 assert!(value & 0xff00 == 0);
420 value |= next << 8;
421 }
422 value
423 } else {
424 tracelimit::warn_ratelimited!(offset, "unhandled config space read");
425 return IoResult::Err(IoError::InvalidRegister);
426 }
427 }
428 _ if (0x100..0x1000).contains(&offset) => {
429 if offset == 0x100 {
431 tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
432 0x000ffff
433 } else {
434 tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
435 return IoResult::Err(IoError::InvalidRegister);
436 }
437 }
438 _ => {
439 tracelimit::warn_ratelimited!(offset, "unexpected config space read");
440 return IoResult::Err(IoError::InvalidRegister);
441 }
442 };
443
444 IoResult::Ok
445 }
446
447 fn update_intx_disable(&mut self, command: cfg_space::Command) {
448 if let Some(intx_interrupt) = &self.intx_interrupt {
449 intx_interrupt.set_disabled(command.intx_disable())
450 }
451 }
452
453 fn update_mmio_enabled(&mut self, command: cfg_space::Command) {
454 if command.mmio_enabled() {
455 self.active_bars = BarMappings::parse(&self.state.base_addresses, &self.bar_masks);
456 for (bar, mapping) in self.mapped_memory.iter_mut().enumerate() {
457 if let Some(mapping) = mapping {
458 let base = self.active_bars.get(bar as u8).expect("bar exists");
459 match mapping.map_to_guest(base) {
460 Ok(_) => {}
461 Err(err) => {
462 tracelimit::error_ratelimited!(
463 error = &err as &dyn std::error::Error,
464 bar,
465 base,
466 "failed to map bar",
467 )
468 }
469 }
470 }
471 }
472 } else {
473 self.active_bars = Default::default();
474 for mapping in self.mapped_memory.iter_mut().flatten() {
475 mapping.unmap_from_guest();
476 }
477 }
478 }
479
480 fn sync_command_register(&mut self, command: cfg_space::Command) {
481 self.update_intx_disable(command);
482 self.update_mmio_enabled(command);
483 }
484
485 pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
487 use cfg_space::HeaderType00;
488
489 match HeaderType00(offset) {
490 HeaderType00::STATUS_COMMAND => {
491 let mut command = cfg_space::Command::from_bits(val as u16);
492 if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
493 tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
494 command =
496 cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
497 };
498
499 if self.state.command.intx_disable() != command.intx_disable() {
500 self.update_intx_disable(command)
501 }
502
503 if self.state.command.mmio_enabled() != command.mmio_enabled() {
504 self.update_mmio_enabled(command)
505 }
506
507 self.state.command = command;
508 }
509 HeaderType00::BIST_HEADER => {
510 let timer_val = (val >> 8) as u8;
512 self.state.latency_timer = timer_val;
513 }
514 HeaderType00::BAR0
515 | HeaderType00::BAR1
516 | HeaderType00::BAR2
517 | HeaderType00::BAR3
518 | HeaderType00::BAR4
519 | HeaderType00::BAR5 => {
520 if !self.state.command.mmio_enabled() {
521 let bar_index = (offset - HeaderType00::BAR0.0) as usize / 4;
522 let mut bar_value = val & self.bar_masks[bar_index];
523 if bar_index & 1 == 0 && self.bar_masks[bar_index] != 0 {
524 bar_value = cfg_space::BarEncodingBits::from_bits(bar_value)
525 .with_type_64_bit(true)
526 .into_bits();
527 }
528 self.state.base_addresses[bar_index] = bar_value;
529 }
530 }
531 HeaderType00::LATENCY_INTERRUPT => {
532 self.state.interrupt_line = ((val & 0xff00) >> 8) as u8;
533 }
534 _ if offset < 0x40 && offset.is_multiple_of(4) => (),
536 _ if (0x40..0x100).contains(&offset) => {
538 if let Some((cap_index, cap_offset)) =
539 self.get_capability_index_and_offset(offset - 0x40)
540 {
541 self.capabilities[cap_index].write_u32(cap_offset, val);
542 } else {
543 tracelimit::warn_ratelimited!(
544 offset,
545 value = val,
546 "unhandled config space write"
547 );
548 return IoResult::Err(IoError::InvalidRegister);
549 }
550 }
551 _ if (0x100..0x1000).contains(&offset) => {
552 tracelimit::warn_ratelimited!(
554 offset,
555 value = val,
556 "unhandled extended config space write"
557 );
558 return IoResult::Err(IoError::InvalidRegister);
559 }
560 _ => {
561 tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
562 return IoResult::Err(IoError::InvalidRegister);
563 }
564 }
565
566 IoResult::Ok
567 }
568
569 pub fn find_bar(&self, address: u64) -> Option<(u8, u16)> {
571 self.active_bars.find(address)
572 }
573}
574
575#[derive(Debug, Inspect)]
576struct ConfigSpaceType1EmulatorState {
577 command: cfg_space::Command,
579 subordinate_bus_number: u8,
582 secondary_bus_number: u8,
586 primary_bus_number: u8,
589 memory_base: u16,
593 memory_limit: u16,
597 prefetch_base: u16,
602 prefetch_limit: u16,
607 prefetch_base_upper: u32,
612 prefetch_limit_upper: u32,
617}
618
619impl ConfigSpaceType1EmulatorState {
620 fn new() -> Self {
621 Self {
622 command: cfg_space::Command::new(),
623 subordinate_bus_number: 0,
624 secondary_bus_number: 0,
625 primary_bus_number: 0,
626 memory_base: 0,
627 memory_limit: 0,
628 prefetch_base: 0,
629 prefetch_limit: 0,
630 prefetch_base_upper: 0,
631 prefetch_limit_upper: 0,
632 }
633 }
634}
635
636#[derive(Inspect)]
642pub struct ConfigSpaceType1Emulator {
643 hardware_ids: HardwareIds,
644 #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
645 capabilities: Vec<Box<dyn PciCapability>>,
646 state: ConfigSpaceType1EmulatorState,
647}
648
649impl ConfigSpaceType1Emulator {
650 pub fn new(hardware_ids: HardwareIds, capabilities: Vec<Box<dyn PciCapability>>) -> Self {
652 Self {
653 hardware_ids,
654 capabilities,
655 state: ConfigSpaceType1EmulatorState::new(),
656 }
657 }
658
659 pub fn reset(&mut self) {
661 self.state = ConfigSpaceType1EmulatorState::new();
662
663 for cap in &mut self.capabilities {
664 cap.reset();
665 }
666 }
667
668 pub fn assigned_bus_range(&self) -> RangeInclusive<u8> {
670 let secondary = self.state.secondary_bus_number;
671 let subordinate = self.state.subordinate_bus_number;
672 if secondary <= subordinate {
673 secondary..=subordinate
674 } else {
675 0..=0
676 }
677 }
678
679 fn decode_memory_range(&self, base_register: u16, limit_register: u16) -> (u32, u32) {
680 let base_addr = ((base_register & !0b1111) as u32) << 16;
681 let limit_addr = ((limit_register & !0b1111) as u32) << 16 | 0xF_FFFF;
682 (base_addr, limit_addr)
683 }
684
685 pub fn assigned_memory_range(&self) -> Option<RangeInclusive<u32>> {
688 let (base_addr, limit_addr) =
689 self.decode_memory_range(self.state.memory_base, self.state.memory_limit);
690 if self.state.command.mmio_enabled() && base_addr <= limit_addr {
691 Some(base_addr..=limit_addr)
692 } else {
693 None
694 }
695 }
696
697 pub fn assigned_prefetch_range(&self) -> Option<RangeInclusive<u64>> {
700 let (base_low, limit_low) =
701 self.decode_memory_range(self.state.prefetch_base, self.state.prefetch_limit);
702 let base_addr = (self.state.prefetch_base_upper as u64) << 32 | base_low as u64;
703 let limit_addr = (self.state.prefetch_limit_upper as u64) << 32 | limit_low as u64;
704 if self.state.command.mmio_enabled() && base_addr <= limit_addr {
705 Some(base_addr..=limit_addr)
706 } else {
707 None
708 }
709 }
710
711 fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
712 let mut cap_offset = 0;
713 for i in 0..self.capabilities.len() {
714 let cap_size = self.capabilities[i].len() as u16;
715 if offset < cap_offset + cap_size {
716 return Some((i, offset - cap_offset));
717 }
718 cap_offset += cap_size;
719 }
720 None
721 }
722
723 pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
725 use cfg_space::HeaderType01;
726
727 *value = match HeaderType01(offset) {
728 HeaderType01::DEVICE_VENDOR => {
729 (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
730 }
731 HeaderType01::STATUS_COMMAND => {
732 let status =
733 cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
734
735 (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
736 }
737 HeaderType01::CLASS_REVISION => {
738 (u8::from(self.hardware_ids.base_class) as u32) << 24
739 | (u8::from(self.hardware_ids.sub_class) as u32) << 16
740 | (u8::from(self.hardware_ids.prog_if) as u32) << 8
741 | self.hardware_ids.revision_id as u32
742 }
743 HeaderType01::BIST_HEADER => {
744 0x00010000
746 }
747 HeaderType01::BAR0 => 0,
748 HeaderType01::BAR1 => 0,
749 HeaderType01::LATENCY_BUS_NUMBERS => {
750 (self.state.subordinate_bus_number as u32) << 16
751 | (self.state.secondary_bus_number as u32) << 8
752 | self.state.primary_bus_number as u32
753 }
754 HeaderType01::SEC_STATUS_IO_RANGE => 0,
755 HeaderType01::MEMORY_RANGE => {
756 (self.state.memory_limit as u32) << 16 | self.state.memory_base as u32
757 }
758 HeaderType01::PREFETCH_RANGE => {
759 ((self.state.prefetch_limit | 0b0001) as u32) << 16
762 | (self.state.prefetch_base | 0b0001) as u32
763 }
764 HeaderType01::PREFETCH_BASE_UPPER => self.state.prefetch_base_upper,
765 HeaderType01::PREFETCH_LIMIT_UPPER => self.state.prefetch_limit_upper,
766 HeaderType01::IO_RANGE_UPPER => 0,
767 HeaderType01::RESERVED_CAP_PTR => {
768 if self.capabilities.is_empty() {
769 0
770 } else {
771 0x40
772 }
773 }
774 HeaderType01::EXPANSION_ROM_BASE => 0,
775 HeaderType01::BRDIGE_CTRL_INTERRUPT => 0,
776 _ if (0x40..0x100).contains(&offset) => {
778 if let Some((cap_index, cap_offset)) =
779 self.get_capability_index_and_offset(offset - 0x40)
780 {
781 let mut value = self.capabilities[cap_index].read_u32(cap_offset);
782 if cap_offset == 0 {
783 let next = if cap_index < self.capabilities.len() - 1 {
784 offset as u32 + self.capabilities[cap_index].len() as u32
785 } else {
786 0
787 };
788 assert!(value & 0xff00 == 0);
789 value |= next << 8;
790 }
791 value
792 } else {
793 tracelimit::warn_ratelimited!(offset, "unhandled config space read");
794 return IoResult::Err(IoError::InvalidRegister);
795 }
796 }
797 _ if (0x100..0x1000).contains(&offset) => {
798 if offset == 0x100 {
800 tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
801 0x000ffff
802 } else {
803 tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
804 return IoResult::Err(IoError::InvalidRegister);
805 }
806 }
807 _ => {
808 tracelimit::warn_ratelimited!(offset, "unexpected config space read");
809 return IoResult::Err(IoError::InvalidRegister);
810 }
811 };
812
813 IoResult::Ok
814 }
815
816 pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
818 use cfg_space::HeaderType01;
819
820 match HeaderType01(offset) {
821 HeaderType01::STATUS_COMMAND => {
822 let mut command = cfg_space::Command::from_bits(val as u16);
823 if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
824 tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
825 command =
827 cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
828 };
829
830 self.state.command = command;
834 }
835 HeaderType01::LATENCY_BUS_NUMBERS => {
836 self.state.subordinate_bus_number = (val >> 16) as u8;
837 self.state.secondary_bus_number = (val >> 8) as u8;
838 self.state.primary_bus_number = val as u8;
839 }
840 HeaderType01::MEMORY_RANGE => {
841 self.state.memory_base = val as u16;
842 self.state.memory_limit = (val >> 16) as u16;
843 }
844 HeaderType01::PREFETCH_RANGE => {
845 self.state.prefetch_base = val as u16;
846 self.state.prefetch_limit = (val >> 16) as u16;
847 }
848 HeaderType01::PREFETCH_BASE_UPPER => {
849 self.state.prefetch_base_upper = val;
850 }
851 HeaderType01::PREFETCH_LIMIT_UPPER => {
852 self.state.prefetch_limit_upper = val;
853 }
854 _ if offset < 0x40 && offset.is_multiple_of(4) => (),
856 _ if (0x40..0x100).contains(&offset) => {
858 if let Some((cap_index, cap_offset)) =
859 self.get_capability_index_and_offset(offset - 0x40)
860 {
861 self.capabilities[cap_index].write_u32(cap_offset, val);
862 } else {
863 tracelimit::warn_ratelimited!(
864 offset,
865 value = val,
866 "unhandled config space write"
867 );
868 return IoResult::Err(IoError::InvalidRegister);
869 }
870 }
871 _ if (0x100..0x1000).contains(&offset) => {
872 tracelimit::warn_ratelimited!(
874 offset,
875 value = val,
876 "unhandled extended config space write"
877 );
878 return IoResult::Err(IoError::InvalidRegister);
879 }
880 _ => {
881 tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
882 return IoResult::Err(IoError::InvalidRegister);
883 }
884 }
885
886 IoResult::Ok
887 }
888}
889
890mod save_restore {
891 use super::*;
892 use thiserror::Error;
893 use vmcore::save_restore::RestoreError;
894 use vmcore::save_restore::SaveError;
895 use vmcore::save_restore::SaveRestore;
896
897 mod state {
898 use mesh::payload::Protobuf;
899 use vmcore::save_restore::SavedStateBlob;
900 use vmcore::save_restore::SavedStateRoot;
901
902 #[derive(Protobuf, SavedStateRoot)]
903 #[mesh(package = "pci.cfg_space_emu")]
904 pub struct SavedState {
905 #[mesh(1)]
906 pub command: u16,
907 #[mesh(2)]
908 pub base_addresses: [u32; 6],
909 #[mesh(3)]
910 pub interrupt_line: u8,
911 #[mesh(4)]
912 pub latency_timer: u8,
913 #[mesh(5)]
914 pub capabilities: Vec<(String, SavedStateBlob)>,
915 }
916 }
917
918 #[derive(Debug, Error)]
919 enum ConfigSpaceRestoreError {
920 #[error("found invalid config bits in saved state")]
921 InvalidConfigBits,
922 #[error("found unexpected capability {0}")]
923 InvalidCap(String),
924 }
925
926 impl SaveRestore for ConfigSpaceType0Emulator {
927 type SavedState = state::SavedState;
928
929 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
930 let ConfigSpaceType0EmulatorState {
931 command,
932 base_addresses,
933 interrupt_line,
934 latency_timer,
935 } = self.state;
936
937 let saved_state = state::SavedState {
938 command: command.into_bits(),
939 base_addresses,
940 interrupt_line,
941 latency_timer,
942 capabilities: self
943 .capabilities
944 .iter_mut()
945 .map(|cap| {
946 let id = cap.label().to_owned();
947 Ok((id, cap.save()?))
948 })
949 .collect::<Result<_, _>>()?,
950 };
951
952 Ok(saved_state)
953 }
954
955 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
956 let state::SavedState {
957 command,
958 base_addresses,
959 interrupt_line,
960 latency_timer,
961 capabilities,
962 } = state;
963
964 self.state = ConfigSpaceType0EmulatorState {
965 command: cfg_space::Command::from_bits(command),
966 base_addresses,
967 interrupt_line,
968 latency_timer,
969 };
970
971 if command & !SUPPORTED_COMMAND_BITS != 0 {
972 return Err(RestoreError::InvalidSavedState(
973 ConfigSpaceRestoreError::InvalidConfigBits.into(),
974 ));
975 }
976
977 self.sync_command_register(self.state.command);
978 for (id, entry) in capabilities {
979 tracing::debug!(save_id = id.as_str(), "restoring pci capability");
980
981 let mut restored = false;
984 for cap in self.capabilities.iter_mut() {
985 if cap.label() == id {
986 cap.restore(entry)?;
987 restored = true;
988 break;
989 }
990 }
991
992 if !restored {
993 return Err(RestoreError::InvalidSavedState(
994 ConfigSpaceRestoreError::InvalidCap(id).into(),
995 ));
996 }
997 }
998
999 Ok(())
1000 }
1001 }
1002}
1003
1004#[cfg(test)]
1005mod tests {
1006 use super::*;
1007 use crate::capabilities::read_only::ReadOnlyCapability;
1008 use crate::spec::hwid::ClassCode;
1009 use crate::spec::hwid::ProgrammingInterface;
1010 use crate::spec::hwid::Subclass;
1011
1012 fn create_type1_emulator(caps: Vec<Box<dyn PciCapability>>) -> ConfigSpaceType1Emulator {
1013 ConfigSpaceType1Emulator::new(
1014 HardwareIds {
1015 vendor_id: 0x1111,
1016 device_id: 0x2222,
1017 revision_id: 1,
1018 prog_if: ProgrammingInterface::NONE,
1019 sub_class: Subclass::BRIDGE_PCI_TO_PCI,
1020 base_class: ClassCode::BRIDGE,
1021 type0_sub_vendor_id: 0,
1022 type0_sub_system_id: 0,
1023 },
1024 caps,
1025 )
1026 }
1027
1028 fn read_cfg(emulator: &ConfigSpaceType1Emulator, offset: u16) -> u32 {
1029 let mut val = 0;
1030 emulator.read_u32(offset, &mut val).unwrap();
1031 val
1032 }
1033
1034 #[test]
1035 fn test_type1_probe() {
1036 let emu = create_type1_emulator(vec![]);
1037 assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1038 assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0); let emu = create_type1_emulator(vec![Box::new(ReadOnlyCapability::new("foo", 0))]);
1041 assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1042 assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0x10_0000); }
1044
1045 #[test]
1046 fn test_type1_bus_number_assignment() {
1047 let mut emu = create_type1_emulator(vec![]);
1048
1049 assert_eq!(read_cfg(&emu, 0x18), 0);
1052 assert_eq!(emu.assigned_bus_range(), 0..=0);
1053
1054 emu.write_u32(0x18, 0x0000_1000).unwrap();
1058 assert_eq!(read_cfg(&emu, 0x18), 0x0000_1000);
1059 assert_eq!(emu.assigned_bus_range(), 0..=0);
1060 emu.write_u32(0x18, 0x0012_1000).unwrap();
1061 assert_eq!(read_cfg(&emu, 0x18), 0x0012_1000);
1062 assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1063
1064 emu.write_u32(0x18, 0x0012_1033).unwrap();
1067 assert_eq!(read_cfg(&emu, 0x18), 0x0012_1033);
1068 assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1069
1070 emu.write_u32(0x18, 0x0047_4411).unwrap();
1072 assert_eq!(read_cfg(&emu, 0x18), 0x0047_4411);
1073 assert_eq!(emu.assigned_bus_range(), 0x44..=0x47);
1074
1075 emu.write_u32(0x18, 0x0088_8800).unwrap();
1077 assert_eq!(emu.assigned_bus_range(), 0x88..=0x88);
1078
1079 emu.write_u32(0x18, 0x0087_8800).unwrap();
1081 assert_eq!(emu.assigned_bus_range(), 0..=0);
1082 }
1083
1084 #[test]
1085 fn test_type1_memory_assignment() {
1086 const MMIO_ENABLED: u32 = 0x0000_0002;
1087 const MMIO_DISABLED: u32 = 0x0000_0000;
1088
1089 let mut emu = create_type1_emulator(vec![]);
1090 assert!(emu.assigned_memory_range().is_none());
1091
1092 emu.write_u32(0x20, 0xDEAD_BEEF).unwrap();
1095 assert!(emu.assigned_memory_range().is_none());
1096
1097 emu.write_u32(0x20, 0xFFF0_FF00).unwrap();
1099 assert!(emu.assigned_memory_range().is_none());
1100 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1102 assert_eq!(emu.assigned_memory_range(), Some(0xFF00_0000..=0xFFFF_FFFF));
1103 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1105 assert!(emu.assigned_memory_range().is_none());
1106
1107 emu.write_u32(0x20, 0xBBB0_BBB0).unwrap();
1109 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1110 assert_eq!(emu.assigned_memory_range(), Some(0xBBB0_0000..=0xBBBF_FFFF));
1111 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1112 assert!(emu.assigned_memory_range().is_none());
1113
1114 emu.write_u32(0x20, 0xAA00_BB00).unwrap();
1117 assert!(emu.assigned_memory_range().is_none());
1118 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1119 assert!(emu.assigned_memory_range().is_none());
1120 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1121 assert!(emu.assigned_memory_range().is_none());
1122 }
1123
1124 #[test]
1125 fn test_type1_prefetch_assignment() {
1126 const MMIO_ENABLED: u32 = 0x0000_0002;
1127 const MMIO_DISABLED: u32 = 0x0000_0000;
1128
1129 let mut emu = create_type1_emulator(vec![]);
1130 assert!(emu.assigned_prefetch_range().is_none());
1131
1132 emu.write_u32(0x24, 0xFFF0_FF00).unwrap(); emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); assert!(emu.assigned_prefetch_range().is_none());
1137 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1139 assert_eq!(
1140 emu.assigned_prefetch_range(),
1141 Some(0x00AA_BBCC_FF00_0000..=0x00DD_EEFF_FFFF_FFFF)
1142 );
1143 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1145 assert!(emu.assigned_prefetch_range().is_none());
1146
1147 emu.write_u32(0x24, 0xFF00_FFF0).unwrap(); emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); assert!(emu.assigned_prefetch_range().is_none());
1156 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1157 assert_eq!(
1158 emu.assigned_prefetch_range(),
1159 Some(0x00AA_BBCC_FFF0_0000..=0x00DD_EEFF_FF0F_FFFF)
1160 );
1161 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1162 assert!(emu.assigned_prefetch_range().is_none());
1163
1164 emu.write_u32(0x24, 0xDD00_DD00).unwrap(); emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); emu.write_u32(0x2C, 0x00AA_BBCC).unwrap(); assert!(emu.assigned_prefetch_range().is_none());
1169 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1170 assert_eq!(
1171 emu.assigned_prefetch_range(),
1172 Some(0x00AA_BBCC_DD00_0000..=0x00AA_BBCC_DD0F_FFFF)
1173 );
1174 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1175 assert!(emu.assigned_prefetch_range().is_none());
1176 }
1177}