pci_core/
cfg_space_emu.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Helpers that implement standardized PCI configuration space functionality.
5//!
6//! To be clear: PCI devices are not required to use these helpers, and may
7//! choose to implement configuration space accesses manually.
8
9use crate::PciInterruptPin;
10use crate::bar_mapping::BarMappings;
11use crate::capabilities::PciCapability;
12use crate::spec::cfg_space;
13use crate::spec::hwid::HardwareIds;
14use chipset_device::io::IoError;
15use chipset_device::io::IoResult;
16use chipset_device::mmio::ControlMmioIntercept;
17use guestmem::MappableGuestMemory;
18use inspect::Inspect;
19use std::ops::RangeInclusive;
20use std::sync::Arc;
21use std::sync::atomic::AtomicBool;
22use std::sync::atomic::Ordering;
23use vmcore::line_interrupt::LineInterrupt;
24
25const SUPPORTED_COMMAND_BITS: u16 = cfg_space::Command::new()
26    .with_pio_enabled(true)
27    .with_mmio_enabled(true)
28    .with_bus_master(true)
29    .with_special_cycles(true)
30    .with_enable_memory_write_invalidate(true)
31    .with_vga_palette_snoop(true)
32    .with_parity_error_response(true)
33    .with_enable_serr(true)
34    .with_enable_fast_b2b(true)
35    .with_intx_disable(true)
36    .into_bits();
37
38/// A wrapper around a [`LineInterrupt`] that considers PCI configuration space
39/// interrupt control bits.
40#[derive(Debug, Inspect)]
41pub struct IntxInterrupt {
42    pin: PciInterruptPin,
43    line: LineInterrupt,
44    interrupt_disabled: AtomicBool,
45    interrupt_status: AtomicBool,
46}
47
48impl IntxInterrupt {
49    /// Sets the line level high or low.
50    ///
51    /// NOTE: whether or not this will actually trigger an interrupt will depend
52    /// the status of the Interrupt Disabled bit in the PCI configuration space.
53    pub fn set_level(&self, high: bool) {
54        tracing::debug!(
55            disabled = ?self.interrupt_disabled,
56            status = ?self.interrupt_status,
57            ?high,
58            %self.line,
59            "set_level"
60        );
61
62        // the actual config space bit is set unconditionally
63        self.interrupt_status.store(high, Ordering::SeqCst);
64
65        // ...but whether it also fires an interrupt is a different story
66        if self.interrupt_disabled.load(Ordering::SeqCst) {
67            self.line.set_level(false);
68        } else {
69            self.line.set_level(high);
70        }
71    }
72
73    fn set_disabled(&self, disabled: bool) {
74        tracing::debug!(
75            disabled = ?self.interrupt_disabled,
76            status = ?self.interrupt_status,
77            ?disabled,
78            %self.line,
79            "set_disabled"
80        );
81
82        self.interrupt_disabled.store(disabled, Ordering::SeqCst);
83        if disabled {
84            self.line.set_level(false)
85        } else {
86            if self.interrupt_status.load(Ordering::SeqCst) {
87                self.line.set_level(true)
88            }
89        }
90    }
91}
92
93#[derive(Debug, Inspect)]
94struct ConfigSpaceType0EmulatorState {
95    /// The command register
96    command: cfg_space::Command,
97    /// OS-configured BARs
98    #[inspect(with = "inspect_helpers::bars")]
99    base_addresses: [u32; 6],
100    /// The PCI device doesn't actually care about what value is stored here -
101    /// this register is just a bit of standardized "scratch space", ostensibly
102    /// for firmware to communicate IRQ assignments to the OS, but it can really
103    /// be used for just about anything.
104    interrupt_line: u8,
105    /// A read/write register that doesn't matter in virtualized contexts
106    latency_timer: u8,
107}
108
109impl ConfigSpaceType0EmulatorState {
110    fn new() -> Self {
111        Self {
112            latency_timer: 0,
113            command: cfg_space::Command::new(),
114            base_addresses: [0; 6],
115            interrupt_line: 0,
116        }
117    }
118}
119
120/// Emulator for the standard Type 0 PCI configuration space header.
121//
122// TODO: Figure out how to split this up and share the handling of common
123// registers (hardware IDs, command, status, etc.) with the type 1 emulator.
124#[derive(Inspect)]
125pub struct ConfigSpaceType0Emulator {
126    // Fixed configuration
127    #[inspect(with = "inspect_helpers::bars")]
128    bar_masks: [u32; 6],
129    hardware_ids: HardwareIds,
130    multi_function_bit: bool,
131
132    // Runtime glue
133    #[inspect(with = r#"|x| inspect::iter_by_index(x).prefix("bar")"#)]
134    mapped_memory: [Option<BarMemoryKind>; 6],
135    #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
136    capabilities: Vec<Box<dyn PciCapability>>,
137    intx_interrupt: Option<Arc<IntxInterrupt>>,
138
139    // Runtime book-keeping
140    active_bars: BarMappings,
141
142    // Volatile state
143    state: ConfigSpaceType0EmulatorState,
144}
145
146mod inspect_helpers {
147    use super::*;
148
149    pub(crate) fn bars(bars: &[u32; 6]) -> impl Inspect + '_ {
150        inspect::AsHex(inspect::iter_by_index(bars).prefix("bar"))
151    }
152}
153
154/// Different kinds of memory that a BAR can be backed by
155#[derive(Inspect)]
156#[inspect(tag = "kind")]
157pub enum BarMemoryKind {
158    /// BAR memory is routed to the device's `MmioIntercept` handler
159    Intercept(#[inspect(rename = "handle")] Box<dyn ControlMmioIntercept>),
160    /// BAR memory is routed to a shared memory region
161    SharedMem(#[inspect(skip)] Box<dyn MappableGuestMemory>),
162    /// **TESTING ONLY** BAR memory isn't backed by anything!
163    Dummy,
164}
165
166impl std::fmt::Debug for BarMemoryKind {
167    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
168        match self {
169            Self::Intercept(control) => {
170                write!(f, "Intercept(region_name: {}, ..)", control.region_name())
171            }
172            Self::SharedMem(_) => write!(f, "Mmap(..)"),
173            Self::Dummy => write!(f, "Dummy"),
174        }
175    }
176}
177
178impl BarMemoryKind {
179    fn map_to_guest(&mut self, gpa: u64) -> std::io::Result<()> {
180        match self {
181            BarMemoryKind::Intercept(control) => {
182                control.map(gpa);
183                Ok(())
184            }
185            BarMemoryKind::SharedMem(control) => control.map_to_guest(gpa, true),
186            BarMemoryKind::Dummy => Ok(()),
187        }
188    }
189
190    fn unmap_from_guest(&mut self) {
191        match self {
192            BarMemoryKind::Intercept(control) => control.unmap(),
193            BarMemoryKind::SharedMem(control) => control.unmap_from_guest(),
194            BarMemoryKind::Dummy => {}
195        }
196    }
197}
198
199/// Container type that describes a device's available BARs
200// TODO: support more advanced BAR configurations
201// e.g: mixed 32-bit and 64-bit
202// e.g: IO space BARs
203#[derive(Debug)]
204pub struct DeviceBars {
205    bars: [Option<(u64, BarMemoryKind)>; 6],
206}
207
208impl DeviceBars {
209    /// Create a new instance of [`DeviceBars`]
210    pub fn new() -> DeviceBars {
211        DeviceBars {
212            bars: Default::default(),
213        }
214    }
215
216    /// Set BAR0
217    pub fn bar0(mut self, len: u64, memory: BarMemoryKind) -> Self {
218        self.bars[0] = Some((len, memory));
219        self
220    }
221
222    /// Set BAR2
223    pub fn bar2(mut self, len: u64, memory: BarMemoryKind) -> Self {
224        self.bars[2] = Some((len, memory));
225        self
226    }
227
228    /// Set BAR4
229    pub fn bar4(mut self, len: u64, memory: BarMemoryKind) -> Self {
230        self.bars[4] = Some((len, memory));
231        self
232    }
233}
234
235impl ConfigSpaceType0Emulator {
236    /// Create a new [`ConfigSpaceType0Emulator`]
237    pub fn new(
238        hardware_ids: HardwareIds,
239        capabilities: Vec<Box<dyn PciCapability>>,
240        bars: DeviceBars,
241    ) -> Self {
242        let mut bar_masks = [0; 6];
243        let mut mapped_memory = {
244            const NONE: Option<BarMemoryKind> = None;
245            [NONE; 6]
246        };
247        for (bar_index, bar) in bars.bars.into_iter().enumerate() {
248            let (len, mapped) = match bar {
249                Some(bar) => bar,
250                None => continue,
251            };
252            // use 64-bit aware BARs
253            assert!(bar_index < 5);
254            // Round up regions to a power of 2, as required by PCI (and
255            // inherently required by the BAR representation). Round up to at
256            // least one page to avoid various problems in guest OSes.
257            const MIN_BAR_SIZE: u64 = 4096;
258            let len = std::cmp::max(len.next_power_of_two(), MIN_BAR_SIZE);
259            let mask64 = !(len - 1);
260            bar_masks[bar_index] = cfg_space::BarEncodingBits::from_bits(mask64 as u32)
261                .with_type_64_bit(true)
262                .into_bits();
263            bar_masks[bar_index + 1] = (mask64 >> 32) as u32;
264            mapped_memory[bar_index] = Some(mapped);
265        }
266
267        Self {
268            bar_masks,
269            hardware_ids,
270            multi_function_bit: false,
271
272            active_bars: Default::default(),
273
274            mapped_memory,
275            capabilities,
276            intx_interrupt: None,
277
278            state: ConfigSpaceType0EmulatorState {
279                command: cfg_space::Command::new(),
280                base_addresses: [0; 6],
281                interrupt_line: 0,
282                latency_timer: 0,
283            },
284        }
285    }
286
287    /// If the device is multi-function, enable bit 7 in the Header register.
288    pub fn with_multi_function_bit(mut self, bit: bool) -> Self {
289        self.multi_function_bit = bit;
290        self
291    }
292
293    /// If using legacy INT#x interrupts: wire a LineInterrupt to one of the 4
294    /// INT#x pins, returning an object that manages configuration space bits
295    /// when the device sets the interrupt level.
296    pub fn set_interrupt_pin(
297        &mut self,
298        pin: PciInterruptPin,
299        line: LineInterrupt,
300    ) -> Arc<IntxInterrupt> {
301        let intx_interrupt = Arc::new(IntxInterrupt {
302            pin,
303            line,
304            interrupt_disabled: AtomicBool::new(false),
305            interrupt_status: AtomicBool::new(false),
306        });
307        self.intx_interrupt = Some(intx_interrupt.clone());
308        intx_interrupt
309    }
310
311    /// Resets the configuration space state.
312    pub fn reset(&mut self) {
313        self.state = ConfigSpaceType0EmulatorState::new();
314
315        self.sync_command_register(self.state.command);
316
317        for cap in &mut self.capabilities {
318            cap.reset();
319        }
320
321        if let Some(intx) = &mut self.intx_interrupt {
322            intx.set_level(false);
323        }
324    }
325
326    fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
327        let mut cap_offset = 0;
328        for i in 0..self.capabilities.len() {
329            let cap_size = self.capabilities[i].len() as u16;
330            if offset < cap_offset + cap_size {
331                return Some((i, offset - cap_offset));
332            }
333            cap_offset += cap_size;
334        }
335        None
336    }
337
338    /// Read from the config space. `offset` must be 32-bit aligned.
339    pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
340        use cfg_space::HeaderType00;
341
342        *value = match HeaderType00(offset) {
343            HeaderType00::DEVICE_VENDOR => {
344                (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
345            }
346            HeaderType00::STATUS_COMMAND => {
347                let mut status =
348                    cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
349
350                if let Some(intx_interrupt) = &self.intx_interrupt {
351                    if intx_interrupt.interrupt_status.load(Ordering::SeqCst) {
352                        status.set_interrupt_status(true);
353                    }
354                }
355
356                (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
357            }
358            HeaderType00::CLASS_REVISION => {
359                (u8::from(self.hardware_ids.base_class) as u32) << 24
360                    | (u8::from(self.hardware_ids.sub_class) as u32) << 16
361                    | (u8::from(self.hardware_ids.prog_if) as u32) << 8
362                    | self.hardware_ids.revision_id as u32
363            }
364            HeaderType00::BIST_HEADER => {
365                let mut v = (self.state.latency_timer as u32) << 8;
366                if self.multi_function_bit {
367                    // enable top-most bit of the header register
368                    v |= 0x80 << 16;
369                }
370                v
371            }
372            HeaderType00::BAR0
373            | HeaderType00::BAR1
374            | HeaderType00::BAR2
375            | HeaderType00::BAR3
376            | HeaderType00::BAR4
377            | HeaderType00::BAR5 => {
378                self.state.base_addresses[(offset - HeaderType00::BAR0.0) as usize / 4]
379            }
380            HeaderType00::CARDBUS_CIS_PTR => 0,
381            HeaderType00::SUBSYSTEM_ID => {
382                (self.hardware_ids.type0_sub_system_id as u32) << 16
383                    | self.hardware_ids.type0_sub_vendor_id as u32
384            }
385            HeaderType00::EXPANSION_ROM_BASE => 0,
386            HeaderType00::RESERVED_CAP_PTR => {
387                if self.capabilities.is_empty() {
388                    0
389                } else {
390                    0x40
391                }
392            }
393            HeaderType00::RESERVED => 0,
394            HeaderType00::LATENCY_INTERRUPT => {
395                let interrupt_pin = if let Some(intx_interrupt) = &self.intx_interrupt {
396                    match intx_interrupt.pin {
397                        PciInterruptPin::IntA => 1,
398                        PciInterruptPin::IntB => 2,
399                        PciInterruptPin::IntC => 3,
400                        PciInterruptPin::IntD => 4,
401                    }
402                } else {
403                    0
404                };
405                self.state.interrupt_line as u32 | (interrupt_pin as u32) << 8
406            }
407            // rest of the range is reserved for extended device capabilities
408            _ if (0x40..0x100).contains(&offset) => {
409                if let Some((cap_index, cap_offset)) =
410                    self.get_capability_index_and_offset(offset - 0x40)
411                {
412                    let mut value = self.capabilities[cap_index].read_u32(cap_offset);
413                    if cap_offset == 0 {
414                        let next = if cap_index < self.capabilities.len() - 1 {
415                            offset as u32 + self.capabilities[cap_index].len() as u32
416                        } else {
417                            0
418                        };
419                        assert!(value & 0xff00 == 0);
420                        value |= next << 8;
421                    }
422                    value
423                } else {
424                    tracelimit::warn_ratelimited!(offset, "unhandled config space read");
425                    return IoResult::Err(IoError::InvalidRegister);
426                }
427            }
428            _ if (0x100..0x1000).contains(&offset) => {
429                // TODO: properly support extended pci express configuration space
430                if offset == 0x100 {
431                    tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
432                    0x000ffff
433                } else {
434                    tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
435                    return IoResult::Err(IoError::InvalidRegister);
436                }
437            }
438            _ => {
439                tracelimit::warn_ratelimited!(offset, "unexpected config space read");
440                return IoResult::Err(IoError::InvalidRegister);
441            }
442        };
443
444        IoResult::Ok
445    }
446
447    fn update_intx_disable(&mut self, command: cfg_space::Command) {
448        if let Some(intx_interrupt) = &self.intx_interrupt {
449            intx_interrupt.set_disabled(command.intx_disable())
450        }
451    }
452
453    fn update_mmio_enabled(&mut self, command: cfg_space::Command) {
454        if command.mmio_enabled() {
455            self.active_bars = BarMappings::parse(&self.state.base_addresses, &self.bar_masks);
456            for (bar, mapping) in self.mapped_memory.iter_mut().enumerate() {
457                if let Some(mapping) = mapping {
458                    let base = self.active_bars.get(bar as u8).expect("bar exists");
459                    match mapping.map_to_guest(base) {
460                        Ok(_) => {}
461                        Err(err) => {
462                            tracelimit::error_ratelimited!(
463                                error = &err as &dyn std::error::Error,
464                                bar,
465                                base,
466                                "failed to map bar",
467                            )
468                        }
469                    }
470                }
471            }
472        } else {
473            self.active_bars = Default::default();
474            for mapping in self.mapped_memory.iter_mut().flatten() {
475                mapping.unmap_from_guest();
476            }
477        }
478    }
479
480    fn sync_command_register(&mut self, command: cfg_space::Command) {
481        self.update_intx_disable(command);
482        self.update_mmio_enabled(command);
483    }
484
485    /// Write to the config space. `offset` must be 32-bit aligned.
486    pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
487        use cfg_space::HeaderType00;
488
489        match HeaderType00(offset) {
490            HeaderType00::STATUS_COMMAND => {
491                let mut command = cfg_space::Command::from_bits(val as u16);
492                if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
493                    tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
494                    // still do our best
495                    command =
496                        cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
497                };
498
499                if self.state.command.intx_disable() != command.intx_disable() {
500                    self.update_intx_disable(command)
501                }
502
503                if self.state.command.mmio_enabled() != command.mmio_enabled() {
504                    self.update_mmio_enabled(command)
505                }
506
507                self.state.command = command;
508            }
509            HeaderType00::BIST_HEADER => {
510                // allow writes to the latency timer
511                let timer_val = (val >> 8) as u8;
512                self.state.latency_timer = timer_val;
513            }
514            HeaderType00::BAR0
515            | HeaderType00::BAR1
516            | HeaderType00::BAR2
517            | HeaderType00::BAR3
518            | HeaderType00::BAR4
519            | HeaderType00::BAR5 => {
520                if !self.state.command.mmio_enabled() {
521                    let bar_index = (offset - HeaderType00::BAR0.0) as usize / 4;
522                    let mut bar_value = val & self.bar_masks[bar_index];
523                    if bar_index & 1 == 0 && self.bar_masks[bar_index] != 0 {
524                        bar_value = cfg_space::BarEncodingBits::from_bits(bar_value)
525                            .with_type_64_bit(true)
526                            .into_bits();
527                    }
528                    self.state.base_addresses[bar_index] = bar_value;
529                }
530            }
531            HeaderType00::LATENCY_INTERRUPT => {
532                self.state.interrupt_line = ((val & 0xff00) >> 8) as u8;
533            }
534            // all other base regs are noops
535            _ if offset < 0x40 && offset.is_multiple_of(4) => (),
536            // rest of the range is reserved for extended device capabilities
537            _ if (0x40..0x100).contains(&offset) => {
538                if let Some((cap_index, cap_offset)) =
539                    self.get_capability_index_and_offset(offset - 0x40)
540                {
541                    self.capabilities[cap_index].write_u32(cap_offset, val);
542                } else {
543                    tracelimit::warn_ratelimited!(
544                        offset,
545                        value = val,
546                        "unhandled config space write"
547                    );
548                    return IoResult::Err(IoError::InvalidRegister);
549                }
550            }
551            _ if (0x100..0x1000).contains(&offset) => {
552                // TODO: properly support extended pci express configuration space
553                tracelimit::warn_ratelimited!(
554                    offset,
555                    value = val,
556                    "unhandled extended config space write"
557                );
558                return IoResult::Err(IoError::InvalidRegister);
559            }
560            _ => {
561                tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
562                return IoResult::Err(IoError::InvalidRegister);
563            }
564        }
565
566        IoResult::Ok
567    }
568
569    /// Finds a BAR + offset by address.
570    pub fn find_bar(&self, address: u64) -> Option<(u8, u16)> {
571        self.active_bars.find(address)
572    }
573}
574
575#[derive(Debug, Inspect)]
576struct ConfigSpaceType1EmulatorState {
577    /// The command register
578    command: cfg_space::Command,
579    /// The subordinate bus number register. Software programs
580    /// this register with the highest bus number below the bridge.
581    subordinate_bus_number: u8,
582    /// The secondary bus number register. Software programs
583    /// this register with the bus number assigned to the secondary
584    /// side of the bridge.
585    secondary_bus_number: u8,
586    /// The primary bus number register. This is unused for PCI Express but
587    /// is supposed to be read/write for compability with legacy software.
588    primary_bus_number: u8,
589    /// The memory base register. Software programs the upper 12 bits of this
590    /// register with the upper 12 bits of a 32-bit base address of MMIO assigned
591    /// to the hierarchy under the bridge (the lower 20 bits are assumed to be 0s).
592    memory_base: u16,
593    /// The memory limit register. Software programs the upper 12 bits of this
594    /// register with the upper 12 bits of a 32-bit limit address of MMIO assigned
595    /// to the hierarchy under the bridge (the lower 20 bits are assumed to be 1s).
596    memory_limit: u16,
597    /// The prefetchable memory base register. Software programs the upper 12 bits of
598    /// this register with bits 20:31 of the base address of the prefetchable MMIO
599    /// window assigned to the hierarchy under the bridge. Bits 0:19 are assumed to
600    /// be 0s.
601    prefetch_base: u16,
602    /// The prefetchable memory limit register. Software programs the upper 12 bits of
603    /// this register with bits 20:31 of the limit address of the prefetchable MMIO
604    /// window assigned to the hierarchy under the bridge. Bits 0:19 are assumed to
605    /// be 1s.
606    prefetch_limit: u16,
607    /// The prefetchable memory base upper 32 bits register. When the bridge supports
608    /// 64-bit addressing for prefetchable memory, software programs this register
609    /// with the upper 32 bits of the base address of the prefetchable MMIO window
610    /// assigned to the hierarchy under the bridge.
611    prefetch_base_upper: u32,
612    /// The prefetchable memory limit upper 32 bits register. When the bridge supports
613    /// 64-bit addressing for prefetchable memory, software programs this register
614    /// with the upper 32 bits of the base address of the prefetchable MMIO window
615    /// assigned to the hierarchy under the bridge.
616    prefetch_limit_upper: u32,
617}
618
619impl ConfigSpaceType1EmulatorState {
620    fn new() -> Self {
621        Self {
622            command: cfg_space::Command::new(),
623            subordinate_bus_number: 0,
624            secondary_bus_number: 0,
625            primary_bus_number: 0,
626            memory_base: 0,
627            memory_limit: 0,
628            prefetch_base: 0,
629            prefetch_limit: 0,
630            prefetch_base_upper: 0,
631            prefetch_limit_upper: 0,
632        }
633    }
634}
635
636/// Emulator for the standard Type 1 PCI configuration space header.
637//
638// TODO: Figure out how to split this up and share the handling of common
639// registers (hardware IDs, command, status, etc.) with the type 0 emulator.
640// TODO: Support type 1 BARs (only two)
641#[derive(Inspect)]
642pub struct ConfigSpaceType1Emulator {
643    hardware_ids: HardwareIds,
644    #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
645    capabilities: Vec<Box<dyn PciCapability>>,
646    multi_function_bit: bool,
647    state: ConfigSpaceType1EmulatorState,
648}
649
650impl ConfigSpaceType1Emulator {
651    /// Create a new [`ConfigSpaceType1Emulator`]
652    pub fn new(hardware_ids: HardwareIds, capabilities: Vec<Box<dyn PciCapability>>) -> Self {
653        Self {
654            hardware_ids,
655            capabilities,
656            multi_function_bit: false,
657            state: ConfigSpaceType1EmulatorState::new(),
658        }
659    }
660
661    /// Resets the configuration space state.
662    pub fn reset(&mut self) {
663        self.state = ConfigSpaceType1EmulatorState::new();
664
665        for cap in &mut self.capabilities {
666            cap.reset();
667        }
668    }
669
670    /// Set the multi-function bit for this device.
671    pub fn with_multi_function_bit(mut self, multi_function: bool) -> Self {
672        self.multi_function_bit = multi_function;
673        self
674    }
675
676    /// Returns the range of bus numbers the bridge is programmed to decode.
677    pub fn assigned_bus_range(&self) -> RangeInclusive<u8> {
678        let secondary = self.state.secondary_bus_number;
679        let subordinate = self.state.subordinate_bus_number;
680        if secondary <= subordinate {
681            secondary..=subordinate
682        } else {
683            0..=0
684        }
685    }
686
687    fn decode_memory_range(&self, base_register: u16, limit_register: u16) -> (u32, u32) {
688        let base_addr = ((base_register & !0b1111) as u32) << 16;
689        let limit_addr = ((limit_register & !0b1111) as u32) << 16 | 0xF_FFFF;
690        (base_addr, limit_addr)
691    }
692
693    /// If memory decoding is currently enabled, and the memory window assignment is valid,
694    /// returns the 32-bit memory addresses the bridge is programmed to decode.
695    pub fn assigned_memory_range(&self) -> Option<RangeInclusive<u32>> {
696        let (base_addr, limit_addr) =
697            self.decode_memory_range(self.state.memory_base, self.state.memory_limit);
698        if self.state.command.mmio_enabled() && base_addr <= limit_addr {
699            Some(base_addr..=limit_addr)
700        } else {
701            None
702        }
703    }
704
705    /// If memory decoding is currently enabled, and the prefetchable memory window assignment
706    /// is valid, returns the 64-bit prefetchable memory addresses the bridge is programmed to decode.
707    pub fn assigned_prefetch_range(&self) -> Option<RangeInclusive<u64>> {
708        let (base_low, limit_low) =
709            self.decode_memory_range(self.state.prefetch_base, self.state.prefetch_limit);
710        let base_addr = (self.state.prefetch_base_upper as u64) << 32 | base_low as u64;
711        let limit_addr = (self.state.prefetch_limit_upper as u64) << 32 | limit_low as u64;
712        if self.state.command.mmio_enabled() && base_addr <= limit_addr {
713            Some(base_addr..=limit_addr)
714        } else {
715            None
716        }
717    }
718
719    fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
720        let mut cap_offset = 0;
721        for i in 0..self.capabilities.len() {
722            let cap_size = self.capabilities[i].len() as u16;
723            if offset < cap_offset + cap_size {
724                return Some((i, offset - cap_offset));
725            }
726            cap_offset += cap_size;
727        }
728        None
729    }
730
731    /// Read from the config space. `offset` must be 32-bit aligned.
732    pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
733        use cfg_space::HeaderType01;
734
735        *value = match HeaderType01(offset) {
736            HeaderType01::DEVICE_VENDOR => {
737                (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
738            }
739            HeaderType01::STATUS_COMMAND => {
740                let status =
741                    cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
742
743                (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
744            }
745            HeaderType01::CLASS_REVISION => {
746                (u8::from(self.hardware_ids.base_class) as u32) << 24
747                    | (u8::from(self.hardware_ids.sub_class) as u32) << 16
748                    | (u8::from(self.hardware_ids.prog_if) as u32) << 8
749                    | self.hardware_ids.revision_id as u32
750            }
751            HeaderType01::BIST_HEADER => {
752                // Header type 01 with optional multi-function bit
753                if self.multi_function_bit {
754                    0x00810000 // Header type 01 with multi-function bit (bit 23)
755                } else {
756                    0x00010000 // Header type 01 without multi-function bit
757                }
758            }
759            HeaderType01::BAR0 => 0,
760            HeaderType01::BAR1 => 0,
761            HeaderType01::LATENCY_BUS_NUMBERS => {
762                (self.state.subordinate_bus_number as u32) << 16
763                    | (self.state.secondary_bus_number as u32) << 8
764                    | self.state.primary_bus_number as u32
765            }
766            HeaderType01::SEC_STATUS_IO_RANGE => 0,
767            HeaderType01::MEMORY_RANGE => {
768                (self.state.memory_limit as u32) << 16 | self.state.memory_base as u32
769            }
770            HeaderType01::PREFETCH_RANGE => {
771                // Set the low bit in both the limit and base registers to indicate
772                // support for 64-bit addressing.
773                ((self.state.prefetch_limit | 0b0001) as u32) << 16
774                    | (self.state.prefetch_base | 0b0001) as u32
775            }
776            HeaderType01::PREFETCH_BASE_UPPER => self.state.prefetch_base_upper,
777            HeaderType01::PREFETCH_LIMIT_UPPER => self.state.prefetch_limit_upper,
778            HeaderType01::IO_RANGE_UPPER => 0,
779            HeaderType01::RESERVED_CAP_PTR => {
780                if self.capabilities.is_empty() {
781                    0
782                } else {
783                    0x40
784                }
785            }
786            HeaderType01::EXPANSION_ROM_BASE => 0,
787            HeaderType01::BRDIGE_CTRL_INTERRUPT => 0,
788            // rest of the range is reserved for device capabilities
789            _ if (0x40..0x100).contains(&offset) => {
790                if let Some((cap_index, cap_offset)) =
791                    self.get_capability_index_and_offset(offset - 0x40)
792                {
793                    let mut value = self.capabilities[cap_index].read_u32(cap_offset);
794                    if cap_offset == 0 {
795                        let next = if cap_index < self.capabilities.len() - 1 {
796                            offset as u32 + self.capabilities[cap_index].len() as u32
797                        } else {
798                            0
799                        };
800                        assert!(value & 0xff00 == 0);
801                        value |= next << 8;
802                    }
803                    value
804                } else {
805                    tracelimit::warn_ratelimited!(offset, "unhandled config space read");
806                    return IoResult::Err(IoError::InvalidRegister);
807                }
808            }
809            _ if (0x100..0x1000).contains(&offset) => {
810                // TODO: properly support extended pci express configuration space
811                if offset == 0x100 {
812                    tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
813                    0x000ffff
814                } else {
815                    tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
816                    return IoResult::Err(IoError::InvalidRegister);
817                }
818            }
819            _ => {
820                tracelimit::warn_ratelimited!(offset, "unexpected config space read");
821                return IoResult::Err(IoError::InvalidRegister);
822            }
823        };
824
825        IoResult::Ok
826    }
827
828    /// Write to the config space. `offset` must be 32-bit aligned.
829    pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
830        use cfg_space::HeaderType01;
831
832        match HeaderType01(offset) {
833            HeaderType01::STATUS_COMMAND => {
834                let mut command = cfg_space::Command::from_bits(val as u16);
835                if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
836                    tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
837                    // still do our best
838                    command =
839                        cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
840                };
841
842                // TODO: when the memory space enable bit is written, sanity check the programmed
843                // memory and prefetch ranges...
844
845                self.state.command = command;
846            }
847            HeaderType01::LATENCY_BUS_NUMBERS => {
848                self.state.subordinate_bus_number = (val >> 16) as u8;
849                self.state.secondary_bus_number = (val >> 8) as u8;
850                self.state.primary_bus_number = val as u8;
851            }
852            HeaderType01::MEMORY_RANGE => {
853                self.state.memory_base = val as u16;
854                self.state.memory_limit = (val >> 16) as u16;
855            }
856            HeaderType01::PREFETCH_RANGE => {
857                self.state.prefetch_base = val as u16;
858                self.state.prefetch_limit = (val >> 16) as u16;
859            }
860            HeaderType01::PREFETCH_BASE_UPPER => {
861                self.state.prefetch_base_upper = val;
862            }
863            HeaderType01::PREFETCH_LIMIT_UPPER => {
864                self.state.prefetch_limit_upper = val;
865            }
866            // all other base regs are noops
867            _ if offset < 0x40 && offset.is_multiple_of(4) => (),
868            // rest of the range is reserved for extended device capabilities
869            _ if (0x40..0x100).contains(&offset) => {
870                if let Some((cap_index, cap_offset)) =
871                    self.get_capability_index_and_offset(offset - 0x40)
872                {
873                    self.capabilities[cap_index].write_u32(cap_offset, val);
874                } else {
875                    tracelimit::warn_ratelimited!(
876                        offset,
877                        value = val,
878                        "unhandled config space write"
879                    );
880                    return IoResult::Err(IoError::InvalidRegister);
881                }
882            }
883            _ if (0x100..0x1000).contains(&offset) => {
884                // TODO: properly support extended pci express configuration space
885                tracelimit::warn_ratelimited!(
886                    offset,
887                    value = val,
888                    "unhandled extended config space write"
889                );
890                return IoResult::Err(IoError::InvalidRegister);
891            }
892            _ => {
893                tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
894                return IoResult::Err(IoError::InvalidRegister);
895            }
896        }
897
898        IoResult::Ok
899    }
900}
901
902mod save_restore {
903    use super::*;
904    use thiserror::Error;
905    use vmcore::save_restore::RestoreError;
906    use vmcore::save_restore::SaveError;
907    use vmcore::save_restore::SaveRestore;
908
909    mod state {
910        use mesh::payload::Protobuf;
911        use vmcore::save_restore::SavedStateBlob;
912        use vmcore::save_restore::SavedStateRoot;
913
914        #[derive(Protobuf, SavedStateRoot)]
915        #[mesh(package = "pci.cfg_space_emu")]
916        pub struct SavedState {
917            #[mesh(1)]
918            pub command: u16,
919            #[mesh(2)]
920            pub base_addresses: [u32; 6],
921            #[mesh(3)]
922            pub interrupt_line: u8,
923            #[mesh(4)]
924            pub latency_timer: u8,
925            #[mesh(5)]
926            pub capabilities: Vec<(String, SavedStateBlob)>,
927        }
928    }
929
930    #[derive(Debug, Error)]
931    enum ConfigSpaceRestoreError {
932        #[error("found invalid config bits in saved state")]
933        InvalidConfigBits,
934        #[error("found unexpected capability {0}")]
935        InvalidCap(String),
936    }
937
938    impl SaveRestore for ConfigSpaceType0Emulator {
939        type SavedState = state::SavedState;
940
941        fn save(&mut self) -> Result<Self::SavedState, SaveError> {
942            let ConfigSpaceType0EmulatorState {
943                command,
944                base_addresses,
945                interrupt_line,
946                latency_timer,
947            } = self.state;
948
949            let saved_state = state::SavedState {
950                command: command.into_bits(),
951                base_addresses,
952                interrupt_line,
953                latency_timer,
954                capabilities: self
955                    .capabilities
956                    .iter_mut()
957                    .map(|cap| {
958                        let id = cap.label().to_owned();
959                        Ok((id, cap.save()?))
960                    })
961                    .collect::<Result<_, _>>()?,
962            };
963
964            Ok(saved_state)
965        }
966
967        fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
968            let state::SavedState {
969                command,
970                base_addresses,
971                interrupt_line,
972                latency_timer,
973                capabilities,
974            } = state;
975
976            self.state = ConfigSpaceType0EmulatorState {
977                command: cfg_space::Command::from_bits(command),
978                base_addresses,
979                interrupt_line,
980                latency_timer,
981            };
982
983            if command & !SUPPORTED_COMMAND_BITS != 0 {
984                return Err(RestoreError::InvalidSavedState(
985                    ConfigSpaceRestoreError::InvalidConfigBits.into(),
986                ));
987            }
988
989            self.sync_command_register(self.state.command);
990            for (id, entry) in capabilities {
991                tracing::debug!(save_id = id.as_str(), "restoring pci capability");
992
993                // yes, yes, this is O(n^2), but devices never have more than a
994                // handful of caps, so it's totally fine.
995                let mut restored = false;
996                for cap in self.capabilities.iter_mut() {
997                    if cap.label() == id {
998                        cap.restore(entry)?;
999                        restored = true;
1000                        break;
1001                    }
1002                }
1003
1004                if !restored {
1005                    return Err(RestoreError::InvalidSavedState(
1006                        ConfigSpaceRestoreError::InvalidCap(id).into(),
1007                    ));
1008                }
1009            }
1010
1011            Ok(())
1012        }
1013    }
1014}
1015
1016#[cfg(test)]
1017mod tests {
1018    use super::*;
1019    use crate::capabilities::read_only::ReadOnlyCapability;
1020    use crate::spec::hwid::ClassCode;
1021    use crate::spec::hwid::ProgrammingInterface;
1022    use crate::spec::hwid::Subclass;
1023
1024    fn create_type1_emulator(caps: Vec<Box<dyn PciCapability>>) -> ConfigSpaceType1Emulator {
1025        ConfigSpaceType1Emulator::new(
1026            HardwareIds {
1027                vendor_id: 0x1111,
1028                device_id: 0x2222,
1029                revision_id: 1,
1030                prog_if: ProgrammingInterface::NONE,
1031                sub_class: Subclass::BRIDGE_PCI_TO_PCI,
1032                base_class: ClassCode::BRIDGE,
1033                type0_sub_vendor_id: 0,
1034                type0_sub_system_id: 0,
1035            },
1036            caps,
1037        )
1038    }
1039
1040    fn read_cfg(emulator: &ConfigSpaceType1Emulator, offset: u16) -> u32 {
1041        let mut val = 0;
1042        emulator.read_u32(offset, &mut val).unwrap();
1043        val
1044    }
1045
1046    #[test]
1047    fn test_type1_probe() {
1048        let emu = create_type1_emulator(vec![]);
1049        assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1050        assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0); // Capabilities pointer
1051
1052        let emu = create_type1_emulator(vec![Box::new(ReadOnlyCapability::new("foo", 0))]);
1053        assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1054        assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0x10_0000); // Capabilities pointer
1055    }
1056
1057    #[test]
1058    fn test_type1_bus_number_assignment() {
1059        let mut emu = create_type1_emulator(vec![]);
1060
1061        // The bus number (and latency timer) registers are
1062        // all default 0.
1063        assert_eq!(read_cfg(&emu, 0x18), 0);
1064        assert_eq!(emu.assigned_bus_range(), 0..=0);
1065
1066        // The bus numbers can be programmed one by one,
1067        // and the range may not be valid during the middle
1068        // of allocation.
1069        emu.write_u32(0x18, 0x0000_1000).unwrap();
1070        assert_eq!(read_cfg(&emu, 0x18), 0x0000_1000);
1071        assert_eq!(emu.assigned_bus_range(), 0..=0);
1072        emu.write_u32(0x18, 0x0012_1000).unwrap();
1073        assert_eq!(read_cfg(&emu, 0x18), 0x0012_1000);
1074        assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1075
1076        // The primary bus number register is read/write for compatability
1077        // but unused.
1078        emu.write_u32(0x18, 0x0012_1033).unwrap();
1079        assert_eq!(read_cfg(&emu, 0x18), 0x0012_1033);
1080        assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1081
1082        // Software can also just write the entire 4byte value at once
1083        emu.write_u32(0x18, 0x0047_4411).unwrap();
1084        assert_eq!(read_cfg(&emu, 0x18), 0x0047_4411);
1085        assert_eq!(emu.assigned_bus_range(), 0x44..=0x47);
1086
1087        // The subordinate bus number can equal the secondary bus number...
1088        emu.write_u32(0x18, 0x0088_8800).unwrap();
1089        assert_eq!(emu.assigned_bus_range(), 0x88..=0x88);
1090
1091        // ... but it cannot be less, that's a confused guest OS.
1092        emu.write_u32(0x18, 0x0087_8800).unwrap();
1093        assert_eq!(emu.assigned_bus_range(), 0..=0);
1094    }
1095
1096    #[test]
1097    fn test_type1_memory_assignment() {
1098        const MMIO_ENABLED: u32 = 0x0000_0002;
1099        const MMIO_DISABLED: u32 = 0x0000_0000;
1100
1101        let mut emu = create_type1_emulator(vec![]);
1102        assert!(emu.assigned_memory_range().is_none());
1103
1104        // The guest can write whatever it wants while MMIO
1105        // is disabled.
1106        emu.write_u32(0x20, 0xDEAD_BEEF).unwrap();
1107        assert!(emu.assigned_memory_range().is_none());
1108
1109        // The guest can program a valid resource assignment...
1110        emu.write_u32(0x20, 0xFFF0_FF00).unwrap();
1111        assert!(emu.assigned_memory_range().is_none());
1112        // ... enable memory decoding...
1113        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1114        assert_eq!(emu.assigned_memory_range(), Some(0xFF00_0000..=0xFFFF_FFFF));
1115        // ... then disable memory decoding it.
1116        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1117        assert!(emu.assigned_memory_range().is_none());
1118
1119        // Setting memory base equal to memory limit is a valid 1MB range.
1120        emu.write_u32(0x20, 0xBBB0_BBB0).unwrap();
1121        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1122        assert_eq!(emu.assigned_memory_range(), Some(0xBBB0_0000..=0xBBBF_FFFF));
1123        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1124        assert!(emu.assigned_memory_range().is_none());
1125
1126        // The guest can try to program an invalid assignment (base > limit), we
1127        // just won't decode it.
1128        emu.write_u32(0x20, 0xAA00_BB00).unwrap();
1129        assert!(emu.assigned_memory_range().is_none());
1130        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1131        assert!(emu.assigned_memory_range().is_none());
1132        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1133        assert!(emu.assigned_memory_range().is_none());
1134    }
1135
1136    #[test]
1137    fn test_type1_prefetch_assignment() {
1138        const MMIO_ENABLED: u32 = 0x0000_0002;
1139        const MMIO_DISABLED: u32 = 0x0000_0000;
1140
1141        let mut emu = create_type1_emulator(vec![]);
1142        assert!(emu.assigned_prefetch_range().is_none());
1143
1144        // The guest can program a valid prefetch range...
1145        emu.write_u32(0x24, 0xFFF0_FF00).unwrap(); // limit + base
1146        emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); // base upper
1147        emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); // limit upper
1148        assert!(emu.assigned_prefetch_range().is_none());
1149        // ... enable memory decoding...
1150        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1151        assert_eq!(
1152            emu.assigned_prefetch_range(),
1153            Some(0x00AA_BBCC_FF00_0000..=0x00DD_EEFF_FFFF_FFFF)
1154        );
1155        // ... then disable memory decoding it.
1156        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1157        assert!(emu.assigned_prefetch_range().is_none());
1158
1159        // The validity of the assignment is determined using the combined 64-bit
1160        // address, not the lower bits or the upper bits in isolation.
1161
1162        // Lower bits of the limit are greater than the lower bits of the
1163        // base, but the upper bits make that valid.
1164        emu.write_u32(0x24, 0xFF00_FFF0).unwrap(); // limit + base
1165        emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); // base upper
1166        emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); // limit upper
1167        assert!(emu.assigned_prefetch_range().is_none());
1168        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1169        assert_eq!(
1170            emu.assigned_prefetch_range(),
1171            Some(0x00AA_BBCC_FFF0_0000..=0x00DD_EEFF_FF0F_FFFF)
1172        );
1173        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1174        assert!(emu.assigned_prefetch_range().is_none());
1175
1176        // The base can equal the limit, which is a valid 1MB range.
1177        emu.write_u32(0x24, 0xDD00_DD00).unwrap(); // limit + base
1178        emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); // base upper
1179        emu.write_u32(0x2C, 0x00AA_BBCC).unwrap(); // limit upper
1180        assert!(emu.assigned_prefetch_range().is_none());
1181        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1182        assert_eq!(
1183            emu.assigned_prefetch_range(),
1184            Some(0x00AA_BBCC_DD00_0000..=0x00AA_BBCC_DD0F_FFFF)
1185        );
1186        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1187        assert!(emu.assigned_prefetch_range().is_none());
1188    }
1189}