1use crate::PciInterruptPin;
10use crate::bar_mapping::BarMappings;
11use crate::capabilities::PciCapability;
12use crate::spec::cfg_space;
13use crate::spec::hwid::HardwareIds;
14use chipset_device::io::IoError;
15use chipset_device::io::IoResult;
16use chipset_device::mmio::ControlMmioIntercept;
17use guestmem::MappableGuestMemory;
18use inspect::Inspect;
19use std::ops::RangeInclusive;
20use std::sync::Arc;
21use std::sync::atomic::AtomicBool;
22use std::sync::atomic::Ordering;
23use vmcore::line_interrupt::LineInterrupt;
24
25const SUPPORTED_COMMAND_BITS: u16 = cfg_space::Command::new()
26 .with_pio_enabled(true)
27 .with_mmio_enabled(true)
28 .with_bus_master(true)
29 .with_special_cycles(true)
30 .with_enable_memory_write_invalidate(true)
31 .with_vga_palette_snoop(true)
32 .with_parity_error_response(true)
33 .with_enable_serr(true)
34 .with_enable_fast_b2b(true)
35 .with_intx_disable(true)
36 .into_bits();
37
38#[derive(Debug, Inspect)]
41pub struct IntxInterrupt {
42 pin: PciInterruptPin,
43 line: LineInterrupt,
44 interrupt_disabled: AtomicBool,
45 interrupt_status: AtomicBool,
46}
47
48impl IntxInterrupt {
49 pub fn set_level(&self, high: bool) {
54 tracing::debug!(
55 disabled = ?self.interrupt_disabled,
56 status = ?self.interrupt_status,
57 ?high,
58 %self.line,
59 "set_level"
60 );
61
62 self.interrupt_status.store(high, Ordering::SeqCst);
64
65 if self.interrupt_disabled.load(Ordering::SeqCst) {
67 self.line.set_level(false);
68 } else {
69 self.line.set_level(high);
70 }
71 }
72
73 fn set_disabled(&self, disabled: bool) {
74 tracing::debug!(
75 disabled = ?self.interrupt_disabled,
76 status = ?self.interrupt_status,
77 ?disabled,
78 %self.line,
79 "set_disabled"
80 );
81
82 self.interrupt_disabled.store(disabled, Ordering::SeqCst);
83 if disabled {
84 self.line.set_level(false)
85 } else {
86 if self.interrupt_status.load(Ordering::SeqCst) {
87 self.line.set_level(true)
88 }
89 }
90 }
91}
92
93#[derive(Debug, Inspect)]
94struct ConfigSpaceType0EmulatorState {
95 command: cfg_space::Command,
97 #[inspect(with = "inspect_helpers::bars")]
99 base_addresses: [u32; 6],
100 interrupt_line: u8,
105 latency_timer: u8,
107}
108
109impl ConfigSpaceType0EmulatorState {
110 fn new() -> Self {
111 Self {
112 latency_timer: 0,
113 command: cfg_space::Command::new(),
114 base_addresses: [0; 6],
115 interrupt_line: 0,
116 }
117 }
118}
119
120#[derive(Inspect)]
125pub struct ConfigSpaceType0Emulator {
126 #[inspect(with = "inspect_helpers::bars")]
128 bar_masks: [u32; 6],
129 hardware_ids: HardwareIds,
130 multi_function_bit: bool,
131
132 #[inspect(with = r#"|x| inspect::iter_by_index(x).prefix("bar")"#)]
134 mapped_memory: [Option<BarMemoryKind>; 6],
135 #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
136 capabilities: Vec<Box<dyn PciCapability>>,
137 intx_interrupt: Option<Arc<IntxInterrupt>>,
138
139 active_bars: BarMappings,
141
142 state: ConfigSpaceType0EmulatorState,
144}
145
146mod inspect_helpers {
147 use super::*;
148
149 pub(crate) fn bars(bars: &[u32; 6]) -> impl Inspect + '_ {
150 inspect::AsHex(inspect::iter_by_index(bars).prefix("bar"))
151 }
152}
153
154#[derive(Inspect)]
156#[inspect(tag = "kind")]
157pub enum BarMemoryKind {
158 Intercept(#[inspect(rename = "handle")] Box<dyn ControlMmioIntercept>),
160 SharedMem(#[inspect(skip)] Box<dyn MappableGuestMemory>),
162 Dummy,
164}
165
166impl std::fmt::Debug for BarMemoryKind {
167 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
168 match self {
169 Self::Intercept(control) => {
170 write!(f, "Intercept(region_name: {}, ..)", control.region_name())
171 }
172 Self::SharedMem(_) => write!(f, "Mmap(..)"),
173 Self::Dummy => write!(f, "Dummy"),
174 }
175 }
176}
177
178impl BarMemoryKind {
179 fn map_to_guest(&mut self, gpa: u64) -> std::io::Result<()> {
180 match self {
181 BarMemoryKind::Intercept(control) => {
182 control.map(gpa);
183 Ok(())
184 }
185 BarMemoryKind::SharedMem(control) => control.map_to_guest(gpa, true),
186 BarMemoryKind::Dummy => Ok(()),
187 }
188 }
189
190 fn unmap_from_guest(&mut self) {
191 match self {
192 BarMemoryKind::Intercept(control) => control.unmap(),
193 BarMemoryKind::SharedMem(control) => control.unmap_from_guest(),
194 BarMemoryKind::Dummy => {}
195 }
196 }
197}
198
199#[derive(Debug)]
204pub struct DeviceBars {
205 bars: [Option<(u64, BarMemoryKind)>; 6],
206}
207
208impl DeviceBars {
209 pub fn new() -> DeviceBars {
211 DeviceBars {
212 bars: Default::default(),
213 }
214 }
215
216 pub fn bar0(mut self, len: u64, memory: BarMemoryKind) -> Self {
218 self.bars[0] = Some((len, memory));
219 self
220 }
221
222 pub fn bar2(mut self, len: u64, memory: BarMemoryKind) -> Self {
224 self.bars[2] = Some((len, memory));
225 self
226 }
227
228 pub fn bar4(mut self, len: u64, memory: BarMemoryKind) -> Self {
230 self.bars[4] = Some((len, memory));
231 self
232 }
233}
234
235impl ConfigSpaceType0Emulator {
236 pub fn new(
238 hardware_ids: HardwareIds,
239 capabilities: Vec<Box<dyn PciCapability>>,
240 bars: DeviceBars,
241 ) -> Self {
242 let mut bar_masks = [0; 6];
243 let mut mapped_memory = {
244 const NONE: Option<BarMemoryKind> = None;
245 [NONE; 6]
246 };
247 for (bar_index, bar) in bars.bars.into_iter().enumerate() {
248 let (len, mapped) = match bar {
249 Some(bar) => bar,
250 None => continue,
251 };
252 assert!(bar_index < 5);
254 const MIN_BAR_SIZE: u64 = 4096;
258 let len = std::cmp::max(len.next_power_of_two(), MIN_BAR_SIZE);
259 let mask64 = !(len - 1);
260 bar_masks[bar_index] = cfg_space::BarEncodingBits::from_bits(mask64 as u32)
261 .with_type_64_bit(true)
262 .into_bits();
263 bar_masks[bar_index + 1] = (mask64 >> 32) as u32;
264 mapped_memory[bar_index] = Some(mapped);
265 }
266
267 Self {
268 bar_masks,
269 hardware_ids,
270 multi_function_bit: false,
271
272 active_bars: Default::default(),
273
274 mapped_memory,
275 capabilities,
276 intx_interrupt: None,
277
278 state: ConfigSpaceType0EmulatorState {
279 command: cfg_space::Command::new(),
280 base_addresses: [0; 6],
281 interrupt_line: 0,
282 latency_timer: 0,
283 },
284 }
285 }
286
287 pub fn with_multi_function_bit(mut self, bit: bool) -> Self {
289 self.multi_function_bit = bit;
290 self
291 }
292
293 pub fn set_interrupt_pin(
297 &mut self,
298 pin: PciInterruptPin,
299 line: LineInterrupt,
300 ) -> Arc<IntxInterrupt> {
301 let intx_interrupt = Arc::new(IntxInterrupt {
302 pin,
303 line,
304 interrupt_disabled: AtomicBool::new(false),
305 interrupt_status: AtomicBool::new(false),
306 });
307 self.intx_interrupt = Some(intx_interrupt.clone());
308 intx_interrupt
309 }
310
311 pub fn reset(&mut self) {
313 self.state = ConfigSpaceType0EmulatorState::new();
314
315 self.sync_command_register(self.state.command);
316
317 for cap in &mut self.capabilities {
318 cap.reset();
319 }
320
321 if let Some(intx) = &mut self.intx_interrupt {
322 intx.set_level(false);
323 }
324 }
325
326 fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
327 let mut cap_offset = 0;
328 for i in 0..self.capabilities.len() {
329 let cap_size = self.capabilities[i].len() as u16;
330 if offset < cap_offset + cap_size {
331 return Some((i, offset - cap_offset));
332 }
333 cap_offset += cap_size;
334 }
335 None
336 }
337
338 pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
340 use cfg_space::HeaderType00;
341
342 *value = match HeaderType00(offset) {
343 HeaderType00::DEVICE_VENDOR => {
344 (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
345 }
346 HeaderType00::STATUS_COMMAND => {
347 let mut status =
348 cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
349
350 if let Some(intx_interrupt) = &self.intx_interrupt {
351 if intx_interrupt.interrupt_status.load(Ordering::SeqCst) {
352 status.set_interrupt_status(true);
353 }
354 }
355
356 (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
357 }
358 HeaderType00::CLASS_REVISION => {
359 (u8::from(self.hardware_ids.base_class) as u32) << 24
360 | (u8::from(self.hardware_ids.sub_class) as u32) << 16
361 | (u8::from(self.hardware_ids.prog_if) as u32) << 8
362 | self.hardware_ids.revision_id as u32
363 }
364 HeaderType00::BIST_HEADER => {
365 let mut v = (self.state.latency_timer as u32) << 8;
366 if self.multi_function_bit {
367 v |= 0x80 << 16;
369 }
370 v
371 }
372 HeaderType00::BAR0
373 | HeaderType00::BAR1
374 | HeaderType00::BAR2
375 | HeaderType00::BAR3
376 | HeaderType00::BAR4
377 | HeaderType00::BAR5 => {
378 self.state.base_addresses[(offset - HeaderType00::BAR0.0) as usize / 4]
379 }
380 HeaderType00::CARDBUS_CIS_PTR => 0,
381 HeaderType00::SUBSYSTEM_ID => {
382 (self.hardware_ids.type0_sub_system_id as u32) << 16
383 | self.hardware_ids.type0_sub_vendor_id as u32
384 }
385 HeaderType00::EXPANSION_ROM_BASE => 0,
386 HeaderType00::RESERVED_CAP_PTR => {
387 if self.capabilities.is_empty() {
388 0
389 } else {
390 0x40
391 }
392 }
393 HeaderType00::RESERVED => 0,
394 HeaderType00::LATENCY_INTERRUPT => {
395 let interrupt_pin = if let Some(intx_interrupt) = &self.intx_interrupt {
396 match intx_interrupt.pin {
397 PciInterruptPin::IntA => 1,
398 PciInterruptPin::IntB => 2,
399 PciInterruptPin::IntC => 3,
400 PciInterruptPin::IntD => 4,
401 }
402 } else {
403 0
404 };
405 self.state.interrupt_line as u32 | (interrupt_pin as u32) << 8
406 }
407 _ if (0x40..0x100).contains(&offset) => {
409 if let Some((cap_index, cap_offset)) =
410 self.get_capability_index_and_offset(offset - 0x40)
411 {
412 let mut value = self.capabilities[cap_index].read_u32(cap_offset);
413 if cap_offset == 0 {
414 let next = if cap_index < self.capabilities.len() - 1 {
415 offset as u32 + self.capabilities[cap_index].len() as u32
416 } else {
417 0
418 };
419 assert!(value & 0xff00 == 0);
420 value |= next << 8;
421 }
422 value
423 } else {
424 tracelimit::warn_ratelimited!(offset, "unhandled config space read");
425 return IoResult::Err(IoError::InvalidRegister);
426 }
427 }
428 _ if (0x100..0x1000).contains(&offset) => {
429 if offset == 0x100 {
431 tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
432 0x000ffff
433 } else {
434 tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
435 return IoResult::Err(IoError::InvalidRegister);
436 }
437 }
438 _ => {
439 tracelimit::warn_ratelimited!(offset, "unexpected config space read");
440 return IoResult::Err(IoError::InvalidRegister);
441 }
442 };
443
444 IoResult::Ok
445 }
446
447 fn update_intx_disable(&mut self, command: cfg_space::Command) {
448 if let Some(intx_interrupt) = &self.intx_interrupt {
449 intx_interrupt.set_disabled(command.intx_disable())
450 }
451 }
452
453 fn update_mmio_enabled(&mut self, command: cfg_space::Command) {
454 if command.mmio_enabled() {
455 self.active_bars = BarMappings::parse(&self.state.base_addresses, &self.bar_masks);
456 for (bar, mapping) in self.mapped_memory.iter_mut().enumerate() {
457 if let Some(mapping) = mapping {
458 let base = self.active_bars.get(bar as u8).expect("bar exists");
459 match mapping.map_to_guest(base) {
460 Ok(_) => {}
461 Err(err) => {
462 tracelimit::error_ratelimited!(
463 error = &err as &dyn std::error::Error,
464 bar,
465 base,
466 "failed to map bar",
467 )
468 }
469 }
470 }
471 }
472 } else {
473 self.active_bars = Default::default();
474 for mapping in self.mapped_memory.iter_mut().flatten() {
475 mapping.unmap_from_guest();
476 }
477 }
478 }
479
480 fn sync_command_register(&mut self, command: cfg_space::Command) {
481 self.update_intx_disable(command);
482 self.update_mmio_enabled(command);
483 }
484
485 pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
487 use cfg_space::HeaderType00;
488
489 match HeaderType00(offset) {
490 HeaderType00::STATUS_COMMAND => {
491 let mut command = cfg_space::Command::from_bits(val as u16);
492 if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
493 tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
494 command =
496 cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
497 };
498
499 if self.state.command.intx_disable() != command.intx_disable() {
500 self.update_intx_disable(command)
501 }
502
503 if self.state.command.mmio_enabled() != command.mmio_enabled() {
504 self.update_mmio_enabled(command)
505 }
506
507 self.state.command = command;
508 }
509 HeaderType00::BIST_HEADER => {
510 let timer_val = (val >> 8) as u8;
512 self.state.latency_timer = timer_val;
513 }
514 HeaderType00::BAR0
515 | HeaderType00::BAR1
516 | HeaderType00::BAR2
517 | HeaderType00::BAR3
518 | HeaderType00::BAR4
519 | HeaderType00::BAR5 => {
520 if !self.state.command.mmio_enabled() {
521 let bar_index = (offset - HeaderType00::BAR0.0) as usize / 4;
522 let mut bar_value = val & self.bar_masks[bar_index];
523 if bar_index & 1 == 0 && self.bar_masks[bar_index] != 0 {
524 bar_value = cfg_space::BarEncodingBits::from_bits(bar_value)
525 .with_type_64_bit(true)
526 .into_bits();
527 }
528 self.state.base_addresses[bar_index] = bar_value;
529 }
530 }
531 HeaderType00::LATENCY_INTERRUPT => {
532 self.state.interrupt_line = ((val & 0xff00) >> 8) as u8;
533 }
534 _ if offset < 0x40 && offset.is_multiple_of(4) => (),
536 _ if (0x40..0x100).contains(&offset) => {
538 if let Some((cap_index, cap_offset)) =
539 self.get_capability_index_and_offset(offset - 0x40)
540 {
541 self.capabilities[cap_index].write_u32(cap_offset, val);
542 } else {
543 tracelimit::warn_ratelimited!(
544 offset,
545 value = val,
546 "unhandled config space write"
547 );
548 return IoResult::Err(IoError::InvalidRegister);
549 }
550 }
551 _ if (0x100..0x1000).contains(&offset) => {
552 tracelimit::warn_ratelimited!(
554 offset,
555 value = val,
556 "unhandled extended config space write"
557 );
558 return IoResult::Err(IoError::InvalidRegister);
559 }
560 _ => {
561 tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
562 return IoResult::Err(IoError::InvalidRegister);
563 }
564 }
565
566 IoResult::Ok
567 }
568
569 pub fn find_bar(&self, address: u64) -> Option<(u8, u16)> {
571 self.active_bars.find(address)
572 }
573}
574
575#[derive(Debug, Inspect)]
576struct ConfigSpaceType1EmulatorState {
577 command: cfg_space::Command,
579 subordinate_bus_number: u8,
582 secondary_bus_number: u8,
586 primary_bus_number: u8,
589 memory_base: u16,
593 memory_limit: u16,
597 prefetch_base: u16,
602 prefetch_limit: u16,
607 prefetch_base_upper: u32,
612 prefetch_limit_upper: u32,
617}
618
619impl ConfigSpaceType1EmulatorState {
620 fn new() -> Self {
621 Self {
622 command: cfg_space::Command::new(),
623 subordinate_bus_number: 0,
624 secondary_bus_number: 0,
625 primary_bus_number: 0,
626 memory_base: 0,
627 memory_limit: 0,
628 prefetch_base: 0,
629 prefetch_limit: 0,
630 prefetch_base_upper: 0,
631 prefetch_limit_upper: 0,
632 }
633 }
634}
635
636#[derive(Inspect)]
642pub struct ConfigSpaceType1Emulator {
643 hardware_ids: HardwareIds,
644 #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
645 capabilities: Vec<Box<dyn PciCapability>>,
646 multi_function_bit: bool,
647 state: ConfigSpaceType1EmulatorState,
648}
649
650impl ConfigSpaceType1Emulator {
651 pub fn new(hardware_ids: HardwareIds, capabilities: Vec<Box<dyn PciCapability>>) -> Self {
653 Self {
654 hardware_ids,
655 capabilities,
656 multi_function_bit: false,
657 state: ConfigSpaceType1EmulatorState::new(),
658 }
659 }
660
661 pub fn reset(&mut self) {
663 self.state = ConfigSpaceType1EmulatorState::new();
664
665 for cap in &mut self.capabilities {
666 cap.reset();
667 }
668 }
669
670 pub fn with_multi_function_bit(mut self, multi_function: bool) -> Self {
672 self.multi_function_bit = multi_function;
673 self
674 }
675
676 pub fn assigned_bus_range(&self) -> RangeInclusive<u8> {
678 let secondary = self.state.secondary_bus_number;
679 let subordinate = self.state.subordinate_bus_number;
680 if secondary <= subordinate {
681 secondary..=subordinate
682 } else {
683 0..=0
684 }
685 }
686
687 fn decode_memory_range(&self, base_register: u16, limit_register: u16) -> (u32, u32) {
688 let base_addr = ((base_register & !0b1111) as u32) << 16;
689 let limit_addr = ((limit_register & !0b1111) as u32) << 16 | 0xF_FFFF;
690 (base_addr, limit_addr)
691 }
692
693 pub fn assigned_memory_range(&self) -> Option<RangeInclusive<u32>> {
696 let (base_addr, limit_addr) =
697 self.decode_memory_range(self.state.memory_base, self.state.memory_limit);
698 if self.state.command.mmio_enabled() && base_addr <= limit_addr {
699 Some(base_addr..=limit_addr)
700 } else {
701 None
702 }
703 }
704
705 pub fn assigned_prefetch_range(&self) -> Option<RangeInclusive<u64>> {
708 let (base_low, limit_low) =
709 self.decode_memory_range(self.state.prefetch_base, self.state.prefetch_limit);
710 let base_addr = (self.state.prefetch_base_upper as u64) << 32 | base_low as u64;
711 let limit_addr = (self.state.prefetch_limit_upper as u64) << 32 | limit_low as u64;
712 if self.state.command.mmio_enabled() && base_addr <= limit_addr {
713 Some(base_addr..=limit_addr)
714 } else {
715 None
716 }
717 }
718
719 fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
720 let mut cap_offset = 0;
721 for i in 0..self.capabilities.len() {
722 let cap_size = self.capabilities[i].len() as u16;
723 if offset < cap_offset + cap_size {
724 return Some((i, offset - cap_offset));
725 }
726 cap_offset += cap_size;
727 }
728 None
729 }
730
731 pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
733 use cfg_space::HeaderType01;
734
735 *value = match HeaderType01(offset) {
736 HeaderType01::DEVICE_VENDOR => {
737 (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
738 }
739 HeaderType01::STATUS_COMMAND => {
740 let status =
741 cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
742
743 (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
744 }
745 HeaderType01::CLASS_REVISION => {
746 (u8::from(self.hardware_ids.base_class) as u32) << 24
747 | (u8::from(self.hardware_ids.sub_class) as u32) << 16
748 | (u8::from(self.hardware_ids.prog_if) as u32) << 8
749 | self.hardware_ids.revision_id as u32
750 }
751 HeaderType01::BIST_HEADER => {
752 if self.multi_function_bit {
754 0x00810000 } else {
756 0x00010000 }
758 }
759 HeaderType01::BAR0 => 0,
760 HeaderType01::BAR1 => 0,
761 HeaderType01::LATENCY_BUS_NUMBERS => {
762 (self.state.subordinate_bus_number as u32) << 16
763 | (self.state.secondary_bus_number as u32) << 8
764 | self.state.primary_bus_number as u32
765 }
766 HeaderType01::SEC_STATUS_IO_RANGE => 0,
767 HeaderType01::MEMORY_RANGE => {
768 (self.state.memory_limit as u32) << 16 | self.state.memory_base as u32
769 }
770 HeaderType01::PREFETCH_RANGE => {
771 ((self.state.prefetch_limit | 0b0001) as u32) << 16
774 | (self.state.prefetch_base | 0b0001) as u32
775 }
776 HeaderType01::PREFETCH_BASE_UPPER => self.state.prefetch_base_upper,
777 HeaderType01::PREFETCH_LIMIT_UPPER => self.state.prefetch_limit_upper,
778 HeaderType01::IO_RANGE_UPPER => 0,
779 HeaderType01::RESERVED_CAP_PTR => {
780 if self.capabilities.is_empty() {
781 0
782 } else {
783 0x40
784 }
785 }
786 HeaderType01::EXPANSION_ROM_BASE => 0,
787 HeaderType01::BRDIGE_CTRL_INTERRUPT => 0,
788 _ if (0x40..0x100).contains(&offset) => {
790 if let Some((cap_index, cap_offset)) =
791 self.get_capability_index_and_offset(offset - 0x40)
792 {
793 let mut value = self.capabilities[cap_index].read_u32(cap_offset);
794 if cap_offset == 0 {
795 let next = if cap_index < self.capabilities.len() - 1 {
796 offset as u32 + self.capabilities[cap_index].len() as u32
797 } else {
798 0
799 };
800 assert!(value & 0xff00 == 0);
801 value |= next << 8;
802 }
803 value
804 } else {
805 tracelimit::warn_ratelimited!(offset, "unhandled config space read");
806 return IoResult::Err(IoError::InvalidRegister);
807 }
808 }
809 _ if (0x100..0x1000).contains(&offset) => {
810 if offset == 0x100 {
812 tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
813 0x000ffff
814 } else {
815 tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
816 return IoResult::Err(IoError::InvalidRegister);
817 }
818 }
819 _ => {
820 tracelimit::warn_ratelimited!(offset, "unexpected config space read");
821 return IoResult::Err(IoError::InvalidRegister);
822 }
823 };
824
825 IoResult::Ok
826 }
827
828 pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
830 use cfg_space::HeaderType01;
831
832 match HeaderType01(offset) {
833 HeaderType01::STATUS_COMMAND => {
834 let mut command = cfg_space::Command::from_bits(val as u16);
835 if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
836 tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
837 command =
839 cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
840 };
841
842 self.state.command = command;
846 }
847 HeaderType01::LATENCY_BUS_NUMBERS => {
848 self.state.subordinate_bus_number = (val >> 16) as u8;
849 self.state.secondary_bus_number = (val >> 8) as u8;
850 self.state.primary_bus_number = val as u8;
851 }
852 HeaderType01::MEMORY_RANGE => {
853 self.state.memory_base = val as u16;
854 self.state.memory_limit = (val >> 16) as u16;
855 }
856 HeaderType01::PREFETCH_RANGE => {
857 self.state.prefetch_base = val as u16;
858 self.state.prefetch_limit = (val >> 16) as u16;
859 }
860 HeaderType01::PREFETCH_BASE_UPPER => {
861 self.state.prefetch_base_upper = val;
862 }
863 HeaderType01::PREFETCH_LIMIT_UPPER => {
864 self.state.prefetch_limit_upper = val;
865 }
866 _ if offset < 0x40 && offset.is_multiple_of(4) => (),
868 _ if (0x40..0x100).contains(&offset) => {
870 if let Some((cap_index, cap_offset)) =
871 self.get_capability_index_and_offset(offset - 0x40)
872 {
873 self.capabilities[cap_index].write_u32(cap_offset, val);
874 } else {
875 tracelimit::warn_ratelimited!(
876 offset,
877 value = val,
878 "unhandled config space write"
879 );
880 return IoResult::Err(IoError::InvalidRegister);
881 }
882 }
883 _ if (0x100..0x1000).contains(&offset) => {
884 tracelimit::warn_ratelimited!(
886 offset,
887 value = val,
888 "unhandled extended config space write"
889 );
890 return IoResult::Err(IoError::InvalidRegister);
891 }
892 _ => {
893 tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
894 return IoResult::Err(IoError::InvalidRegister);
895 }
896 }
897
898 IoResult::Ok
899 }
900}
901
902mod save_restore {
903 use super::*;
904 use thiserror::Error;
905 use vmcore::save_restore::RestoreError;
906 use vmcore::save_restore::SaveError;
907 use vmcore::save_restore::SaveRestore;
908
909 mod state {
910 use mesh::payload::Protobuf;
911 use vmcore::save_restore::SavedStateBlob;
912 use vmcore::save_restore::SavedStateRoot;
913
914 #[derive(Protobuf, SavedStateRoot)]
915 #[mesh(package = "pci.cfg_space_emu")]
916 pub struct SavedState {
917 #[mesh(1)]
918 pub command: u16,
919 #[mesh(2)]
920 pub base_addresses: [u32; 6],
921 #[mesh(3)]
922 pub interrupt_line: u8,
923 #[mesh(4)]
924 pub latency_timer: u8,
925 #[mesh(5)]
926 pub capabilities: Vec<(String, SavedStateBlob)>,
927 }
928 }
929
930 #[derive(Debug, Error)]
931 enum ConfigSpaceRestoreError {
932 #[error("found invalid config bits in saved state")]
933 InvalidConfigBits,
934 #[error("found unexpected capability {0}")]
935 InvalidCap(String),
936 }
937
938 impl SaveRestore for ConfigSpaceType0Emulator {
939 type SavedState = state::SavedState;
940
941 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
942 let ConfigSpaceType0EmulatorState {
943 command,
944 base_addresses,
945 interrupt_line,
946 latency_timer,
947 } = self.state;
948
949 let saved_state = state::SavedState {
950 command: command.into_bits(),
951 base_addresses,
952 interrupt_line,
953 latency_timer,
954 capabilities: self
955 .capabilities
956 .iter_mut()
957 .map(|cap| {
958 let id = cap.label().to_owned();
959 Ok((id, cap.save()?))
960 })
961 .collect::<Result<_, _>>()?,
962 };
963
964 Ok(saved_state)
965 }
966
967 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
968 let state::SavedState {
969 command,
970 base_addresses,
971 interrupt_line,
972 latency_timer,
973 capabilities,
974 } = state;
975
976 self.state = ConfigSpaceType0EmulatorState {
977 command: cfg_space::Command::from_bits(command),
978 base_addresses,
979 interrupt_line,
980 latency_timer,
981 };
982
983 if command & !SUPPORTED_COMMAND_BITS != 0 {
984 return Err(RestoreError::InvalidSavedState(
985 ConfigSpaceRestoreError::InvalidConfigBits.into(),
986 ));
987 }
988
989 self.sync_command_register(self.state.command);
990 for (id, entry) in capabilities {
991 tracing::debug!(save_id = id.as_str(), "restoring pci capability");
992
993 let mut restored = false;
996 for cap in self.capabilities.iter_mut() {
997 if cap.label() == id {
998 cap.restore(entry)?;
999 restored = true;
1000 break;
1001 }
1002 }
1003
1004 if !restored {
1005 return Err(RestoreError::InvalidSavedState(
1006 ConfigSpaceRestoreError::InvalidCap(id).into(),
1007 ));
1008 }
1009 }
1010
1011 Ok(())
1012 }
1013 }
1014}
1015
1016#[cfg(test)]
1017mod tests {
1018 use super::*;
1019 use crate::capabilities::read_only::ReadOnlyCapability;
1020 use crate::spec::hwid::ClassCode;
1021 use crate::spec::hwid::ProgrammingInterface;
1022 use crate::spec::hwid::Subclass;
1023
1024 fn create_type1_emulator(caps: Vec<Box<dyn PciCapability>>) -> ConfigSpaceType1Emulator {
1025 ConfigSpaceType1Emulator::new(
1026 HardwareIds {
1027 vendor_id: 0x1111,
1028 device_id: 0x2222,
1029 revision_id: 1,
1030 prog_if: ProgrammingInterface::NONE,
1031 sub_class: Subclass::BRIDGE_PCI_TO_PCI,
1032 base_class: ClassCode::BRIDGE,
1033 type0_sub_vendor_id: 0,
1034 type0_sub_system_id: 0,
1035 },
1036 caps,
1037 )
1038 }
1039
1040 fn read_cfg(emulator: &ConfigSpaceType1Emulator, offset: u16) -> u32 {
1041 let mut val = 0;
1042 emulator.read_u32(offset, &mut val).unwrap();
1043 val
1044 }
1045
1046 #[test]
1047 fn test_type1_probe() {
1048 let emu = create_type1_emulator(vec![]);
1049 assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1050 assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0); let emu = create_type1_emulator(vec![Box::new(ReadOnlyCapability::new("foo", 0))]);
1053 assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1054 assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0x10_0000); }
1056
1057 #[test]
1058 fn test_type1_bus_number_assignment() {
1059 let mut emu = create_type1_emulator(vec![]);
1060
1061 assert_eq!(read_cfg(&emu, 0x18), 0);
1064 assert_eq!(emu.assigned_bus_range(), 0..=0);
1065
1066 emu.write_u32(0x18, 0x0000_1000).unwrap();
1070 assert_eq!(read_cfg(&emu, 0x18), 0x0000_1000);
1071 assert_eq!(emu.assigned_bus_range(), 0..=0);
1072 emu.write_u32(0x18, 0x0012_1000).unwrap();
1073 assert_eq!(read_cfg(&emu, 0x18), 0x0012_1000);
1074 assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1075
1076 emu.write_u32(0x18, 0x0012_1033).unwrap();
1079 assert_eq!(read_cfg(&emu, 0x18), 0x0012_1033);
1080 assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1081
1082 emu.write_u32(0x18, 0x0047_4411).unwrap();
1084 assert_eq!(read_cfg(&emu, 0x18), 0x0047_4411);
1085 assert_eq!(emu.assigned_bus_range(), 0x44..=0x47);
1086
1087 emu.write_u32(0x18, 0x0088_8800).unwrap();
1089 assert_eq!(emu.assigned_bus_range(), 0x88..=0x88);
1090
1091 emu.write_u32(0x18, 0x0087_8800).unwrap();
1093 assert_eq!(emu.assigned_bus_range(), 0..=0);
1094 }
1095
1096 #[test]
1097 fn test_type1_memory_assignment() {
1098 const MMIO_ENABLED: u32 = 0x0000_0002;
1099 const MMIO_DISABLED: u32 = 0x0000_0000;
1100
1101 let mut emu = create_type1_emulator(vec![]);
1102 assert!(emu.assigned_memory_range().is_none());
1103
1104 emu.write_u32(0x20, 0xDEAD_BEEF).unwrap();
1107 assert!(emu.assigned_memory_range().is_none());
1108
1109 emu.write_u32(0x20, 0xFFF0_FF00).unwrap();
1111 assert!(emu.assigned_memory_range().is_none());
1112 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1114 assert_eq!(emu.assigned_memory_range(), Some(0xFF00_0000..=0xFFFF_FFFF));
1115 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1117 assert!(emu.assigned_memory_range().is_none());
1118
1119 emu.write_u32(0x20, 0xBBB0_BBB0).unwrap();
1121 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1122 assert_eq!(emu.assigned_memory_range(), Some(0xBBB0_0000..=0xBBBF_FFFF));
1123 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1124 assert!(emu.assigned_memory_range().is_none());
1125
1126 emu.write_u32(0x20, 0xAA00_BB00).unwrap();
1129 assert!(emu.assigned_memory_range().is_none());
1130 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1131 assert!(emu.assigned_memory_range().is_none());
1132 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1133 assert!(emu.assigned_memory_range().is_none());
1134 }
1135
1136 #[test]
1137 fn test_type1_prefetch_assignment() {
1138 const MMIO_ENABLED: u32 = 0x0000_0002;
1139 const MMIO_DISABLED: u32 = 0x0000_0000;
1140
1141 let mut emu = create_type1_emulator(vec![]);
1142 assert!(emu.assigned_prefetch_range().is_none());
1143
1144 emu.write_u32(0x24, 0xFFF0_FF00).unwrap(); emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); assert!(emu.assigned_prefetch_range().is_none());
1149 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1151 assert_eq!(
1152 emu.assigned_prefetch_range(),
1153 Some(0x00AA_BBCC_FF00_0000..=0x00DD_EEFF_FFFF_FFFF)
1154 );
1155 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1157 assert!(emu.assigned_prefetch_range().is_none());
1158
1159 emu.write_u32(0x24, 0xFF00_FFF0).unwrap(); emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); assert!(emu.assigned_prefetch_range().is_none());
1168 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1169 assert_eq!(
1170 emu.assigned_prefetch_range(),
1171 Some(0x00AA_BBCC_FFF0_0000..=0x00DD_EEFF_FF0F_FFFF)
1172 );
1173 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1174 assert!(emu.assigned_prefetch_range().is_none());
1175
1176 emu.write_u32(0x24, 0xDD00_DD00).unwrap(); emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); emu.write_u32(0x2C, 0x00AA_BBCC).unwrap(); assert!(emu.assigned_prefetch_range().is_none());
1181 emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1182 assert_eq!(
1183 emu.assigned_prefetch_range(),
1184 Some(0x00AA_BBCC_DD00_0000..=0x00AA_BBCC_DD0F_FFFF)
1185 );
1186 emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1187 assert!(emu.assigned_prefetch_range().is_none());
1188 }
1189}