pci_core/
cfg_space_emu.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Helpers that implement standardized PCI configuration space functionality.
5//!
6//! To be clear: PCI devices are not required to use these helpers, and may
7//! choose to implement configuration space accesses manually.
8
9use crate::PciInterruptPin;
10use crate::bar_mapping::BarMappings;
11use crate::capabilities::PciCapability;
12use crate::spec::cfg_space;
13use crate::spec::hwid::HardwareIds;
14use chipset_device::io::IoError;
15use chipset_device::io::IoResult;
16use chipset_device::mmio::ControlMmioIntercept;
17use guestmem::MappableGuestMemory;
18use inspect::Inspect;
19use std::ops::RangeInclusive;
20use std::sync::Arc;
21use std::sync::atomic::AtomicBool;
22use std::sync::atomic::Ordering;
23use vmcore::line_interrupt::LineInterrupt;
24
25const SUPPORTED_COMMAND_BITS: u16 = cfg_space::Command::new()
26    .with_pio_enabled(true)
27    .with_mmio_enabled(true)
28    .with_bus_master(true)
29    .with_special_cycles(true)
30    .with_enable_memory_write_invalidate(true)
31    .with_vga_palette_snoop(true)
32    .with_parity_error_response(true)
33    .with_enable_serr(true)
34    .with_enable_fast_b2b(true)
35    .with_intx_disable(true)
36    .into_bits();
37
38/// A wrapper around a [`LineInterrupt`] that considers PCI configuration space
39/// interrupt control bits.
40#[derive(Debug, Inspect)]
41pub struct IntxInterrupt {
42    pin: PciInterruptPin,
43    line: LineInterrupt,
44    interrupt_disabled: AtomicBool,
45    interrupt_status: AtomicBool,
46}
47
48impl IntxInterrupt {
49    /// Sets the line level high or low.
50    ///
51    /// NOTE: whether or not this will actually trigger an interrupt will depend
52    /// the status of the Interrupt Disabled bit in the PCI configuration space.
53    pub fn set_level(&self, high: bool) {
54        tracing::debug!(
55            disabled = ?self.interrupt_disabled,
56            status = ?self.interrupt_status,
57            ?high,
58            %self.line,
59            "set_level"
60        );
61
62        // the actual config space bit is set unconditionally
63        self.interrupt_status.store(high, Ordering::SeqCst);
64
65        // ...but whether it also fires an interrupt is a different story
66        if self.interrupt_disabled.load(Ordering::SeqCst) {
67            self.line.set_level(false);
68        } else {
69            self.line.set_level(high);
70        }
71    }
72
73    fn set_disabled(&self, disabled: bool) {
74        tracing::debug!(
75            disabled = ?self.interrupt_disabled,
76            status = ?self.interrupt_status,
77            ?disabled,
78            %self.line,
79            "set_disabled"
80        );
81
82        self.interrupt_disabled.store(disabled, Ordering::SeqCst);
83        if disabled {
84            self.line.set_level(false)
85        } else {
86            if self.interrupt_status.load(Ordering::SeqCst) {
87                self.line.set_level(true)
88            }
89        }
90    }
91}
92
93#[derive(Debug, Inspect)]
94struct ConfigSpaceType0EmulatorState {
95    /// The command register
96    command: cfg_space::Command,
97    /// OS-configured BARs
98    #[inspect(with = "inspect_helpers::bars")]
99    base_addresses: [u32; 6],
100    /// The PCI device doesn't actually care about what value is stored here -
101    /// this register is just a bit of standardized "scratch space", ostensibly
102    /// for firmware to communicate IRQ assignments to the OS, but it can really
103    /// be used for just about anything.
104    interrupt_line: u8,
105    /// A read/write register that doesn't matter in virtualized contexts
106    latency_timer: u8,
107}
108
109impl ConfigSpaceType0EmulatorState {
110    fn new() -> Self {
111        Self {
112            latency_timer: 0,
113            command: cfg_space::Command::new(),
114            base_addresses: [0; 6],
115            interrupt_line: 0,
116        }
117    }
118}
119
120/// Emulator for the standard Type 0 PCI configuration space header.
121//
122// TODO: Figure out how to split this up and share the handling of common
123// registers (hardware IDs, command, status, etc.) with the type 1 emulator.
124#[derive(Inspect)]
125pub struct ConfigSpaceType0Emulator {
126    // Fixed configuration
127    #[inspect(with = "inspect_helpers::bars")]
128    bar_masks: [u32; 6],
129    hardware_ids: HardwareIds,
130    multi_function_bit: bool,
131
132    // Runtime glue
133    #[inspect(with = r#"|x| inspect::iter_by_index(x).prefix("bar")"#)]
134    mapped_memory: [Option<BarMemoryKind>; 6],
135    #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
136    capabilities: Vec<Box<dyn PciCapability>>,
137    intx_interrupt: Option<Arc<IntxInterrupt>>,
138
139    // Runtime book-keeping
140    active_bars: BarMappings,
141
142    // Volatile state
143    state: ConfigSpaceType0EmulatorState,
144}
145
146mod inspect_helpers {
147    use super::*;
148
149    pub(crate) fn bars(bars: &[u32; 6]) -> impl Inspect + '_ {
150        inspect::AsHex(inspect::iter_by_index(bars).prefix("bar"))
151    }
152}
153
154/// Different kinds of memory that a BAR can be backed by
155#[derive(Inspect)]
156#[inspect(tag = "kind")]
157pub enum BarMemoryKind {
158    /// BAR memory is routed to the device's `MmioIntercept` handler
159    Intercept(#[inspect(rename = "handle")] Box<dyn ControlMmioIntercept>),
160    /// BAR memory is routed to a shared memory region
161    SharedMem(#[inspect(skip)] Box<dyn MappableGuestMemory>),
162    /// **TESTING ONLY** BAR memory isn't backed by anything!
163    Dummy,
164}
165
166impl std::fmt::Debug for BarMemoryKind {
167    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
168        match self {
169            Self::Intercept(control) => {
170                write!(f, "Intercept(region_name: {}, ..)", control.region_name())
171            }
172            Self::SharedMem(_) => write!(f, "Mmap(..)"),
173            Self::Dummy => write!(f, "Dummy"),
174        }
175    }
176}
177
178impl BarMemoryKind {
179    fn map_to_guest(&mut self, gpa: u64) -> std::io::Result<()> {
180        match self {
181            BarMemoryKind::Intercept(control) => {
182                control.map(gpa);
183                Ok(())
184            }
185            BarMemoryKind::SharedMem(control) => control.map_to_guest(gpa, true),
186            BarMemoryKind::Dummy => Ok(()),
187        }
188    }
189
190    fn unmap_from_guest(&mut self) {
191        match self {
192            BarMemoryKind::Intercept(control) => control.unmap(),
193            BarMemoryKind::SharedMem(control) => control.unmap_from_guest(),
194            BarMemoryKind::Dummy => {}
195        }
196    }
197}
198
199/// Container type that describes a device's available BARs
200// TODO: support more advanced BAR configurations
201// e.g: mixed 32-bit and 64-bit
202// e.g: IO space BARs
203#[derive(Debug)]
204pub struct DeviceBars {
205    bars: [Option<(u64, BarMemoryKind)>; 6],
206}
207
208impl DeviceBars {
209    /// Create a new instance of [`DeviceBars`]
210    pub fn new() -> DeviceBars {
211        DeviceBars {
212            bars: Default::default(),
213        }
214    }
215
216    /// Set BAR0
217    pub fn bar0(mut self, len: u64, memory: BarMemoryKind) -> Self {
218        self.bars[0] = Some((len, memory));
219        self
220    }
221
222    /// Set BAR2
223    pub fn bar2(mut self, len: u64, memory: BarMemoryKind) -> Self {
224        self.bars[2] = Some((len, memory));
225        self
226    }
227
228    /// Set BAR4
229    pub fn bar4(mut self, len: u64, memory: BarMemoryKind) -> Self {
230        self.bars[4] = Some((len, memory));
231        self
232    }
233}
234
235impl ConfigSpaceType0Emulator {
236    /// Create a new [`ConfigSpaceType0Emulator`]
237    pub fn new(
238        hardware_ids: HardwareIds,
239        capabilities: Vec<Box<dyn PciCapability>>,
240        bars: DeviceBars,
241    ) -> Self {
242        let mut bar_masks = [0; 6];
243        let mut mapped_memory = {
244            const NONE: Option<BarMemoryKind> = None;
245            [NONE; 6]
246        };
247        for (bar_index, bar) in bars.bars.into_iter().enumerate() {
248            let (len, mapped) = match bar {
249                Some(bar) => bar,
250                None => continue,
251            };
252            // use 64-bit aware BARs
253            assert!(bar_index < 5);
254            // Round up regions to a power of 2, as required by PCI (and
255            // inherently required by the BAR representation). Round up to at
256            // least one page to avoid various problems in guest OSes.
257            const MIN_BAR_SIZE: u64 = 4096;
258            let len = std::cmp::max(len.next_power_of_two(), MIN_BAR_SIZE);
259            let mask64 = !(len - 1);
260            bar_masks[bar_index] = cfg_space::BarEncodingBits::from_bits(mask64 as u32)
261                .with_type_64_bit(true)
262                .into_bits();
263            bar_masks[bar_index + 1] = (mask64 >> 32) as u32;
264            mapped_memory[bar_index] = Some(mapped);
265        }
266
267        Self {
268            bar_masks,
269            hardware_ids,
270            multi_function_bit: false,
271
272            active_bars: Default::default(),
273
274            mapped_memory,
275            capabilities,
276            intx_interrupt: None,
277
278            state: ConfigSpaceType0EmulatorState {
279                command: cfg_space::Command::new(),
280                base_addresses: [0; 6],
281                interrupt_line: 0,
282                latency_timer: 0,
283            },
284        }
285    }
286
287    /// If the device is multi-function, enable bit 7 in the Header register.
288    pub fn with_multi_function_bit(mut self, bit: bool) -> Self {
289        self.multi_function_bit = bit;
290        self
291    }
292
293    /// If using legacy INT#x interrupts: wire a LineInterrupt to one of the 4
294    /// INT#x pins, returning an object that manages configuration space bits
295    /// when the device sets the interrupt level.
296    pub fn set_interrupt_pin(
297        &mut self,
298        pin: PciInterruptPin,
299        line: LineInterrupt,
300    ) -> Arc<IntxInterrupt> {
301        let intx_interrupt = Arc::new(IntxInterrupt {
302            pin,
303            line,
304            interrupt_disabled: AtomicBool::new(false),
305            interrupt_status: AtomicBool::new(false),
306        });
307        self.intx_interrupt = Some(intx_interrupt.clone());
308        intx_interrupt
309    }
310
311    /// Resets the configuration space state.
312    pub fn reset(&mut self) {
313        self.state = ConfigSpaceType0EmulatorState::new();
314
315        self.sync_command_register(self.state.command);
316
317        for cap in &mut self.capabilities {
318            cap.reset();
319        }
320
321        if let Some(intx) = &mut self.intx_interrupt {
322            intx.set_level(false);
323        }
324    }
325
326    fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
327        let mut cap_offset = 0;
328        for i in 0..self.capabilities.len() {
329            let cap_size = self.capabilities[i].len() as u16;
330            if offset < cap_offset + cap_size {
331                return Some((i, offset - cap_offset));
332            }
333            cap_offset += cap_size;
334        }
335        None
336    }
337
338    /// Read from the config space. `offset` must be 32-bit aligned.
339    pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
340        use cfg_space::HeaderType00;
341
342        *value = match HeaderType00(offset) {
343            HeaderType00::DEVICE_VENDOR => {
344                (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
345            }
346            HeaderType00::STATUS_COMMAND => {
347                let mut status =
348                    cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
349
350                if let Some(intx_interrupt) = &self.intx_interrupt {
351                    if intx_interrupt.interrupt_status.load(Ordering::SeqCst) {
352                        status.set_interrupt_status(true);
353                    }
354                }
355
356                (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
357            }
358            HeaderType00::CLASS_REVISION => {
359                (u8::from(self.hardware_ids.base_class) as u32) << 24
360                    | (u8::from(self.hardware_ids.sub_class) as u32) << 16
361                    | (u8::from(self.hardware_ids.prog_if) as u32) << 8
362                    | self.hardware_ids.revision_id as u32
363            }
364            HeaderType00::BIST_HEADER => {
365                let mut v = (self.state.latency_timer as u32) << 8;
366                if self.multi_function_bit {
367                    // enable top-most bit of the header register
368                    v |= 0x80 << 16;
369                }
370                v
371            }
372            HeaderType00::BAR0
373            | HeaderType00::BAR1
374            | HeaderType00::BAR2
375            | HeaderType00::BAR3
376            | HeaderType00::BAR4
377            | HeaderType00::BAR5 => {
378                self.state.base_addresses[(offset - HeaderType00::BAR0.0) as usize / 4]
379            }
380            HeaderType00::CARDBUS_CIS_PTR => 0,
381            HeaderType00::SUBSYSTEM_ID => {
382                (self.hardware_ids.type0_sub_system_id as u32) << 16
383                    | self.hardware_ids.type0_sub_vendor_id as u32
384            }
385            HeaderType00::EXPANSION_ROM_BASE => 0,
386            HeaderType00::RESERVED_CAP_PTR => {
387                if self.capabilities.is_empty() {
388                    0
389                } else {
390                    0x40
391                }
392            }
393            HeaderType00::RESERVED => 0,
394            HeaderType00::LATENCY_INTERRUPT => {
395                let interrupt_pin = if let Some(intx_interrupt) = &self.intx_interrupt {
396                    match intx_interrupt.pin {
397                        PciInterruptPin::IntA => 1,
398                        PciInterruptPin::IntB => 2,
399                        PciInterruptPin::IntC => 3,
400                        PciInterruptPin::IntD => 4,
401                    }
402                } else {
403                    0
404                };
405                self.state.interrupt_line as u32 | (interrupt_pin as u32) << 8
406            }
407            // rest of the range is reserved for extended device capabilities
408            _ if (0x40..0x100).contains(&offset) => {
409                if let Some((cap_index, cap_offset)) =
410                    self.get_capability_index_and_offset(offset - 0x40)
411                {
412                    let mut value = self.capabilities[cap_index].read_u32(cap_offset);
413                    if cap_offset == 0 {
414                        let next = if cap_index < self.capabilities.len() - 1 {
415                            offset as u32 + self.capabilities[cap_index].len() as u32
416                        } else {
417                            0
418                        };
419                        assert!(value & 0xff00 == 0);
420                        value |= next << 8;
421                    }
422                    value
423                } else {
424                    tracelimit::warn_ratelimited!(offset, "unhandled config space read");
425                    return IoResult::Err(IoError::InvalidRegister);
426                }
427            }
428            _ if (0x100..0x1000).contains(&offset) => {
429                // TODO: properly support extended pci express configuration space
430                if offset == 0x100 {
431                    tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
432                    0x000ffff
433                } else {
434                    tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
435                    return IoResult::Err(IoError::InvalidRegister);
436                }
437            }
438            _ => {
439                tracelimit::warn_ratelimited!(offset, "unexpected config space read");
440                return IoResult::Err(IoError::InvalidRegister);
441            }
442        };
443
444        IoResult::Ok
445    }
446
447    fn update_intx_disable(&mut self, command: cfg_space::Command) {
448        if let Some(intx_interrupt) = &self.intx_interrupt {
449            intx_interrupt.set_disabled(command.intx_disable())
450        }
451    }
452
453    fn update_mmio_enabled(&mut self, command: cfg_space::Command) {
454        if command.mmio_enabled() {
455            self.active_bars = BarMappings::parse(&self.state.base_addresses, &self.bar_masks);
456            for (bar, mapping) in self.mapped_memory.iter_mut().enumerate() {
457                if let Some(mapping) = mapping {
458                    let base = self.active_bars.get(bar as u8).expect("bar exists");
459                    match mapping.map_to_guest(base) {
460                        Ok(_) => {}
461                        Err(err) => {
462                            tracelimit::error_ratelimited!(
463                                error = &err as &dyn std::error::Error,
464                                bar,
465                                base,
466                                "failed to map bar",
467                            )
468                        }
469                    }
470                }
471            }
472        } else {
473            self.active_bars = Default::default();
474            for mapping in self.mapped_memory.iter_mut().flatten() {
475                mapping.unmap_from_guest();
476            }
477        }
478    }
479
480    fn sync_command_register(&mut self, command: cfg_space::Command) {
481        self.update_intx_disable(command);
482        self.update_mmio_enabled(command);
483    }
484
485    /// Write to the config space. `offset` must be 32-bit aligned.
486    pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
487        use cfg_space::HeaderType00;
488
489        match HeaderType00(offset) {
490            HeaderType00::STATUS_COMMAND => {
491                let mut command = cfg_space::Command::from_bits(val as u16);
492                if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
493                    tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
494                    // still do our best
495                    command =
496                        cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
497                };
498
499                if self.state.command.intx_disable() != command.intx_disable() {
500                    self.update_intx_disable(command)
501                }
502
503                if self.state.command.mmio_enabled() != command.mmio_enabled() {
504                    self.update_mmio_enabled(command)
505                }
506
507                self.state.command = command;
508            }
509            HeaderType00::BIST_HEADER => {
510                // allow writes to the latency timer
511                let timer_val = (val >> 8) as u8;
512                self.state.latency_timer = timer_val;
513            }
514            HeaderType00::BAR0
515            | HeaderType00::BAR1
516            | HeaderType00::BAR2
517            | HeaderType00::BAR3
518            | HeaderType00::BAR4
519            | HeaderType00::BAR5 => {
520                if !self.state.command.mmio_enabled() {
521                    let bar_index = (offset - HeaderType00::BAR0.0) as usize / 4;
522                    let mut bar_value = val & self.bar_masks[bar_index];
523                    if bar_index & 1 == 0 && self.bar_masks[bar_index] != 0 {
524                        bar_value = cfg_space::BarEncodingBits::from_bits(bar_value)
525                            .with_type_64_bit(true)
526                            .into_bits();
527                    }
528                    self.state.base_addresses[bar_index] = bar_value;
529                }
530            }
531            HeaderType00::LATENCY_INTERRUPT => {
532                self.state.interrupt_line = ((val & 0xff00) >> 8) as u8;
533            }
534            // all other base regs are noops
535            _ if offset < 0x40 && offset.is_multiple_of(4) => (),
536            // rest of the range is reserved for extended device capabilities
537            _ if (0x40..0x100).contains(&offset) => {
538                if let Some((cap_index, cap_offset)) =
539                    self.get_capability_index_and_offset(offset - 0x40)
540                {
541                    self.capabilities[cap_index].write_u32(cap_offset, val);
542                } else {
543                    tracelimit::warn_ratelimited!(
544                        offset,
545                        value = val,
546                        "unhandled config space write"
547                    );
548                    return IoResult::Err(IoError::InvalidRegister);
549                }
550            }
551            _ if (0x100..0x1000).contains(&offset) => {
552                // TODO: properly support extended pci express configuration space
553                tracelimit::warn_ratelimited!(
554                    offset,
555                    value = val,
556                    "unhandled extended config space write"
557                );
558                return IoResult::Err(IoError::InvalidRegister);
559            }
560            _ => {
561                tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
562                return IoResult::Err(IoError::InvalidRegister);
563            }
564        }
565
566        IoResult::Ok
567    }
568
569    /// Finds a BAR + offset by address.
570    pub fn find_bar(&self, address: u64) -> Option<(u8, u16)> {
571        self.active_bars.find(address)
572    }
573}
574
575#[derive(Debug, Inspect)]
576struct ConfigSpaceType1EmulatorState {
577    /// The command register
578    command: cfg_space::Command,
579    /// The subordinate bus number register. Software programs
580    /// this register with the highest bus number below the bridge.
581    subordinate_bus_number: u8,
582    /// The secondary bus number register. Software programs
583    /// this register with the bus number assigned to the secondary
584    /// side of the bridge.
585    secondary_bus_number: u8,
586    /// The primary bus number register. This is unused for PCI Express but
587    /// is supposed to be read/write for compability with legacy software.
588    primary_bus_number: u8,
589    /// The memory base register. Software programs the upper 12 bits of this
590    /// register with the upper 12 bits of a 32-bit base address of MMIO assigned
591    /// to the hierarchy under the bridge (the lower 20 bits are assumed to be 0s).
592    memory_base: u16,
593    /// The memory limit register. Software programs the upper 12 bits of this
594    /// register with the upper 12 bits of a 32-bit limit address of MMIO assigned
595    /// to the hierarchy under the bridge (the lower 20 bits are assumed to be 1s).
596    memory_limit: u16,
597    /// The prefetchable memory base register. Software programs the upper 12 bits of
598    /// this register with bits 20:31 of the base address of the prefetchable MMIO
599    /// window assigned to the hierarchy under the bridge. Bits 0:19 are assumed to
600    /// be 0s.
601    prefetch_base: u16,
602    /// The prefetchable memory limit register. Software programs the upper 12 bits of
603    /// this register with bits 20:31 of the limit address of the prefetchable MMIO
604    /// window assigned to the hierarchy under the bridge. Bits 0:19 are assumed to
605    /// be 1s.
606    prefetch_limit: u16,
607    /// The prefetchable memory base upper 32 bits register. When the bridge supports
608    /// 64-bit addressing for prefetchable memory, software programs this register
609    /// with the upper 32 bits of the base address of the prefetchable MMIO window
610    /// assigned to the hierarchy under the bridge.
611    prefetch_base_upper: u32,
612    /// The prefetchable memory limit upper 32 bits register. When the bridge supports
613    /// 64-bit addressing for prefetchable memory, software programs this register
614    /// with the upper 32 bits of the base address of the prefetchable MMIO window
615    /// assigned to the hierarchy under the bridge.
616    prefetch_limit_upper: u32,
617}
618
619impl ConfigSpaceType1EmulatorState {
620    fn new() -> Self {
621        Self {
622            command: cfg_space::Command::new(),
623            subordinate_bus_number: 0,
624            secondary_bus_number: 0,
625            primary_bus_number: 0,
626            memory_base: 0,
627            memory_limit: 0,
628            prefetch_base: 0,
629            prefetch_limit: 0,
630            prefetch_base_upper: 0,
631            prefetch_limit_upper: 0,
632        }
633    }
634}
635
636/// Emulator for the standard Type 1 PCI configuration space header.
637//
638// TODO: Figure out how to split this up and share the handling of common
639// registers (hardware IDs, command, status, etc.) with the type 0 emulator.
640// TODO: Support type 1 BARs (only two)
641#[derive(Inspect)]
642pub struct ConfigSpaceType1Emulator {
643    hardware_ids: HardwareIds,
644    #[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
645    capabilities: Vec<Box<dyn PciCapability>>,
646    state: ConfigSpaceType1EmulatorState,
647}
648
649impl ConfigSpaceType1Emulator {
650    /// Create a new [`ConfigSpaceType1Emulator`]
651    pub fn new(hardware_ids: HardwareIds, capabilities: Vec<Box<dyn PciCapability>>) -> Self {
652        Self {
653            hardware_ids,
654            capabilities,
655            state: ConfigSpaceType1EmulatorState::new(),
656        }
657    }
658
659    /// Resets the configuration space state.
660    pub fn reset(&mut self) {
661        self.state = ConfigSpaceType1EmulatorState::new();
662
663        for cap in &mut self.capabilities {
664            cap.reset();
665        }
666    }
667
668    /// Returns the range of bus numbers the bridge is programmed to decode.
669    pub fn assigned_bus_range(&self) -> RangeInclusive<u8> {
670        let secondary = self.state.secondary_bus_number;
671        let subordinate = self.state.subordinate_bus_number;
672        if secondary <= subordinate {
673            secondary..=subordinate
674        } else {
675            0..=0
676        }
677    }
678
679    fn decode_memory_range(&self, base_register: u16, limit_register: u16) -> (u32, u32) {
680        let base_addr = ((base_register & !0b1111) as u32) << 16;
681        let limit_addr = ((limit_register & !0b1111) as u32) << 16 | 0xF_FFFF;
682        (base_addr, limit_addr)
683    }
684
685    /// If memory decoding is currently enabled, and the memory window assignment is valid,
686    /// returns the 32-bit memory addresses the bridge is programmed to decode.
687    pub fn assigned_memory_range(&self) -> Option<RangeInclusive<u32>> {
688        let (base_addr, limit_addr) =
689            self.decode_memory_range(self.state.memory_base, self.state.memory_limit);
690        if self.state.command.mmio_enabled() && base_addr <= limit_addr {
691            Some(base_addr..=limit_addr)
692        } else {
693            None
694        }
695    }
696
697    /// If memory decoding is currently enabled, and the prefetchable memory window assignment
698    /// is valid, returns the 64-bit prefetchable memory addresses the bridge is programmed to decode.
699    pub fn assigned_prefetch_range(&self) -> Option<RangeInclusive<u64>> {
700        let (base_low, limit_low) =
701            self.decode_memory_range(self.state.prefetch_base, self.state.prefetch_limit);
702        let base_addr = (self.state.prefetch_base_upper as u64) << 32 | base_low as u64;
703        let limit_addr = (self.state.prefetch_limit_upper as u64) << 32 | limit_low as u64;
704        if self.state.command.mmio_enabled() && base_addr <= limit_addr {
705            Some(base_addr..=limit_addr)
706        } else {
707            None
708        }
709    }
710
711    fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
712        let mut cap_offset = 0;
713        for i in 0..self.capabilities.len() {
714            let cap_size = self.capabilities[i].len() as u16;
715            if offset < cap_offset + cap_size {
716                return Some((i, offset - cap_offset));
717            }
718            cap_offset += cap_size;
719        }
720        None
721    }
722
723    /// Read from the config space. `offset` must be 32-bit aligned.
724    pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
725        use cfg_space::HeaderType01;
726
727        *value = match HeaderType01(offset) {
728            HeaderType01::DEVICE_VENDOR => {
729                (self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
730            }
731            HeaderType01::STATUS_COMMAND => {
732                let status =
733                    cfg_space::Status::new().with_capabilities_list(!self.capabilities.is_empty());
734
735                (status.into_bits() as u32) << 16 | self.state.command.into_bits() as u32
736            }
737            HeaderType01::CLASS_REVISION => {
738                (u8::from(self.hardware_ids.base_class) as u32) << 24
739                    | (u8::from(self.hardware_ids.sub_class) as u32) << 16
740                    | (u8::from(self.hardware_ids.prog_if) as u32) << 8
741                    | self.hardware_ids.revision_id as u32
742            }
743            HeaderType01::BIST_HEADER => {
744                // Header type 01
745                0x00010000
746            }
747            HeaderType01::BAR0 => 0,
748            HeaderType01::BAR1 => 0,
749            HeaderType01::LATENCY_BUS_NUMBERS => {
750                (self.state.subordinate_bus_number as u32) << 16
751                    | (self.state.secondary_bus_number as u32) << 8
752                    | self.state.primary_bus_number as u32
753            }
754            HeaderType01::SEC_STATUS_IO_RANGE => 0,
755            HeaderType01::MEMORY_RANGE => {
756                (self.state.memory_limit as u32) << 16 | self.state.memory_base as u32
757            }
758            HeaderType01::PREFETCH_RANGE => {
759                // Set the low bit in both the limit and base registers to indicate
760                // support for 64-bit addressing.
761                ((self.state.prefetch_limit | 0b0001) as u32) << 16
762                    | (self.state.prefetch_base | 0b0001) as u32
763            }
764            HeaderType01::PREFETCH_BASE_UPPER => self.state.prefetch_base_upper,
765            HeaderType01::PREFETCH_LIMIT_UPPER => self.state.prefetch_limit_upper,
766            HeaderType01::IO_RANGE_UPPER => 0,
767            HeaderType01::RESERVED_CAP_PTR => {
768                if self.capabilities.is_empty() {
769                    0
770                } else {
771                    0x40
772                }
773            }
774            HeaderType01::EXPANSION_ROM_BASE => 0,
775            HeaderType01::BRDIGE_CTRL_INTERRUPT => 0,
776            // rest of the range is reserved for device capabilities
777            _ if (0x40..0x100).contains(&offset) => {
778                if let Some((cap_index, cap_offset)) =
779                    self.get_capability_index_and_offset(offset - 0x40)
780                {
781                    let mut value = self.capabilities[cap_index].read_u32(cap_offset);
782                    if cap_offset == 0 {
783                        let next = if cap_index < self.capabilities.len() - 1 {
784                            offset as u32 + self.capabilities[cap_index].len() as u32
785                        } else {
786                            0
787                        };
788                        assert!(value & 0xff00 == 0);
789                        value |= next << 8;
790                    }
791                    value
792                } else {
793                    tracelimit::warn_ratelimited!(offset, "unhandled config space read");
794                    return IoResult::Err(IoError::InvalidRegister);
795                }
796            }
797            _ if (0x100..0x1000).contains(&offset) => {
798                // TODO: properly support extended pci express configuration space
799                if offset == 0x100 {
800                    tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
801                    0x000ffff
802                } else {
803                    tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
804                    return IoResult::Err(IoError::InvalidRegister);
805                }
806            }
807            _ => {
808                tracelimit::warn_ratelimited!(offset, "unexpected config space read");
809                return IoResult::Err(IoError::InvalidRegister);
810            }
811        };
812
813        IoResult::Ok
814    }
815
816    /// Write to the config space. `offset` must be 32-bit aligned.
817    pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
818        use cfg_space::HeaderType01;
819
820        match HeaderType01(offset) {
821            HeaderType01::STATUS_COMMAND => {
822                let mut command = cfg_space::Command::from_bits(val as u16);
823                if command.into_bits() & !SUPPORTED_COMMAND_BITS != 0 {
824                    tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
825                    // still do our best
826                    command =
827                        cfg_space::Command::from_bits(command.into_bits() & SUPPORTED_COMMAND_BITS);
828                };
829
830                // TODO: when the memory space enable bit is written, sanity check the programmed
831                // memory and prefetch ranges...
832
833                self.state.command = command;
834            }
835            HeaderType01::LATENCY_BUS_NUMBERS => {
836                self.state.subordinate_bus_number = (val >> 16) as u8;
837                self.state.secondary_bus_number = (val >> 8) as u8;
838                self.state.primary_bus_number = val as u8;
839            }
840            HeaderType01::MEMORY_RANGE => {
841                self.state.memory_base = val as u16;
842                self.state.memory_limit = (val >> 16) as u16;
843            }
844            HeaderType01::PREFETCH_RANGE => {
845                self.state.prefetch_base = val as u16;
846                self.state.prefetch_limit = (val >> 16) as u16;
847            }
848            HeaderType01::PREFETCH_BASE_UPPER => {
849                self.state.prefetch_base_upper = val;
850            }
851            HeaderType01::PREFETCH_LIMIT_UPPER => {
852                self.state.prefetch_limit_upper = val;
853            }
854            // all other base regs are noops
855            _ if offset < 0x40 && offset.is_multiple_of(4) => (),
856            // rest of the range is reserved for extended device capabilities
857            _ if (0x40..0x100).contains(&offset) => {
858                if let Some((cap_index, cap_offset)) =
859                    self.get_capability_index_and_offset(offset - 0x40)
860                {
861                    self.capabilities[cap_index].write_u32(cap_offset, val);
862                } else {
863                    tracelimit::warn_ratelimited!(
864                        offset,
865                        value = val,
866                        "unhandled config space write"
867                    );
868                    return IoResult::Err(IoError::InvalidRegister);
869                }
870            }
871            _ if (0x100..0x1000).contains(&offset) => {
872                // TODO: properly support extended pci express configuration space
873                tracelimit::warn_ratelimited!(
874                    offset,
875                    value = val,
876                    "unhandled extended config space write"
877                );
878                return IoResult::Err(IoError::InvalidRegister);
879            }
880            _ => {
881                tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
882                return IoResult::Err(IoError::InvalidRegister);
883            }
884        }
885
886        IoResult::Ok
887    }
888}
889
890mod save_restore {
891    use super::*;
892    use thiserror::Error;
893    use vmcore::save_restore::RestoreError;
894    use vmcore::save_restore::SaveError;
895    use vmcore::save_restore::SaveRestore;
896
897    mod state {
898        use mesh::payload::Protobuf;
899        use vmcore::save_restore::SavedStateBlob;
900        use vmcore::save_restore::SavedStateRoot;
901
902        #[derive(Protobuf, SavedStateRoot)]
903        #[mesh(package = "pci.cfg_space_emu")]
904        pub struct SavedState {
905            #[mesh(1)]
906            pub command: u16,
907            #[mesh(2)]
908            pub base_addresses: [u32; 6],
909            #[mesh(3)]
910            pub interrupt_line: u8,
911            #[mesh(4)]
912            pub latency_timer: u8,
913            #[mesh(5)]
914            pub capabilities: Vec<(String, SavedStateBlob)>,
915        }
916    }
917
918    #[derive(Debug, Error)]
919    enum ConfigSpaceRestoreError {
920        #[error("found invalid config bits in saved state")]
921        InvalidConfigBits,
922        #[error("found unexpected capability {0}")]
923        InvalidCap(String),
924    }
925
926    impl SaveRestore for ConfigSpaceType0Emulator {
927        type SavedState = state::SavedState;
928
929        fn save(&mut self) -> Result<Self::SavedState, SaveError> {
930            let ConfigSpaceType0EmulatorState {
931                command,
932                base_addresses,
933                interrupt_line,
934                latency_timer,
935            } = self.state;
936
937            let saved_state = state::SavedState {
938                command: command.into_bits(),
939                base_addresses,
940                interrupt_line,
941                latency_timer,
942                capabilities: self
943                    .capabilities
944                    .iter_mut()
945                    .map(|cap| {
946                        let id = cap.label().to_owned();
947                        Ok((id, cap.save()?))
948                    })
949                    .collect::<Result<_, _>>()?,
950            };
951
952            Ok(saved_state)
953        }
954
955        fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
956            let state::SavedState {
957                command,
958                base_addresses,
959                interrupt_line,
960                latency_timer,
961                capabilities,
962            } = state;
963
964            self.state = ConfigSpaceType0EmulatorState {
965                command: cfg_space::Command::from_bits(command),
966                base_addresses,
967                interrupt_line,
968                latency_timer,
969            };
970
971            if command & !SUPPORTED_COMMAND_BITS != 0 {
972                return Err(RestoreError::InvalidSavedState(
973                    ConfigSpaceRestoreError::InvalidConfigBits.into(),
974                ));
975            }
976
977            self.sync_command_register(self.state.command);
978            for (id, entry) in capabilities {
979                tracing::debug!(save_id = id.as_str(), "restoring pci capability");
980
981                // yes, yes, this is O(n^2), but devices never have more than a
982                // handful of caps, so it's totally fine.
983                let mut restored = false;
984                for cap in self.capabilities.iter_mut() {
985                    if cap.label() == id {
986                        cap.restore(entry)?;
987                        restored = true;
988                        break;
989                    }
990                }
991
992                if !restored {
993                    return Err(RestoreError::InvalidSavedState(
994                        ConfigSpaceRestoreError::InvalidCap(id).into(),
995                    ));
996                }
997            }
998
999            Ok(())
1000        }
1001    }
1002}
1003
1004#[cfg(test)]
1005mod tests {
1006    use super::*;
1007    use crate::capabilities::read_only::ReadOnlyCapability;
1008    use crate::spec::hwid::ClassCode;
1009    use crate::spec::hwid::ProgrammingInterface;
1010    use crate::spec::hwid::Subclass;
1011
1012    fn create_type1_emulator(caps: Vec<Box<dyn PciCapability>>) -> ConfigSpaceType1Emulator {
1013        ConfigSpaceType1Emulator::new(
1014            HardwareIds {
1015                vendor_id: 0x1111,
1016                device_id: 0x2222,
1017                revision_id: 1,
1018                prog_if: ProgrammingInterface::NONE,
1019                sub_class: Subclass::BRIDGE_PCI_TO_PCI,
1020                base_class: ClassCode::BRIDGE,
1021                type0_sub_vendor_id: 0,
1022                type0_sub_system_id: 0,
1023            },
1024            caps,
1025        )
1026    }
1027
1028    fn read_cfg(emulator: &ConfigSpaceType1Emulator, offset: u16) -> u32 {
1029        let mut val = 0;
1030        emulator.read_u32(offset, &mut val).unwrap();
1031        val
1032    }
1033
1034    #[test]
1035    fn test_type1_probe() {
1036        let emu = create_type1_emulator(vec![]);
1037        assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1038        assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0); // Capabilities pointer
1039
1040        let emu = create_type1_emulator(vec![Box::new(ReadOnlyCapability::new("foo", 0))]);
1041        assert_eq!(read_cfg(&emu, 0), 0x2222_1111);
1042        assert_eq!(read_cfg(&emu, 4) & 0x10_0000, 0x10_0000); // Capabilities pointer
1043    }
1044
1045    #[test]
1046    fn test_type1_bus_number_assignment() {
1047        let mut emu = create_type1_emulator(vec![]);
1048
1049        // The bus number (and latency timer) registers are
1050        // all default 0.
1051        assert_eq!(read_cfg(&emu, 0x18), 0);
1052        assert_eq!(emu.assigned_bus_range(), 0..=0);
1053
1054        // The bus numbers can be programmed one by one,
1055        // and the range may not be valid during the middle
1056        // of allocation.
1057        emu.write_u32(0x18, 0x0000_1000).unwrap();
1058        assert_eq!(read_cfg(&emu, 0x18), 0x0000_1000);
1059        assert_eq!(emu.assigned_bus_range(), 0..=0);
1060        emu.write_u32(0x18, 0x0012_1000).unwrap();
1061        assert_eq!(read_cfg(&emu, 0x18), 0x0012_1000);
1062        assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1063
1064        // The primary bus number register is read/write for compatability
1065        // but unused.
1066        emu.write_u32(0x18, 0x0012_1033).unwrap();
1067        assert_eq!(read_cfg(&emu, 0x18), 0x0012_1033);
1068        assert_eq!(emu.assigned_bus_range(), 0x10..=0x12);
1069
1070        // Software can also just write the entire 4byte value at once
1071        emu.write_u32(0x18, 0x0047_4411).unwrap();
1072        assert_eq!(read_cfg(&emu, 0x18), 0x0047_4411);
1073        assert_eq!(emu.assigned_bus_range(), 0x44..=0x47);
1074
1075        // The subordinate bus number can equal the secondary bus number...
1076        emu.write_u32(0x18, 0x0088_8800).unwrap();
1077        assert_eq!(emu.assigned_bus_range(), 0x88..=0x88);
1078
1079        // ... but it cannot be less, that's a confused guest OS.
1080        emu.write_u32(0x18, 0x0087_8800).unwrap();
1081        assert_eq!(emu.assigned_bus_range(), 0..=0);
1082    }
1083
1084    #[test]
1085    fn test_type1_memory_assignment() {
1086        const MMIO_ENABLED: u32 = 0x0000_0002;
1087        const MMIO_DISABLED: u32 = 0x0000_0000;
1088
1089        let mut emu = create_type1_emulator(vec![]);
1090        assert!(emu.assigned_memory_range().is_none());
1091
1092        // The guest can write whatever it wants while MMIO
1093        // is disabled.
1094        emu.write_u32(0x20, 0xDEAD_BEEF).unwrap();
1095        assert!(emu.assigned_memory_range().is_none());
1096
1097        // The guest can program a valid resource assignment...
1098        emu.write_u32(0x20, 0xFFF0_FF00).unwrap();
1099        assert!(emu.assigned_memory_range().is_none());
1100        // ... enable memory decoding...
1101        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1102        assert_eq!(emu.assigned_memory_range(), Some(0xFF00_0000..=0xFFFF_FFFF));
1103        // ... then disable memory decoding it.
1104        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1105        assert!(emu.assigned_memory_range().is_none());
1106
1107        // Setting memory base equal to memory limit is a valid 1MB range.
1108        emu.write_u32(0x20, 0xBBB0_BBB0).unwrap();
1109        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1110        assert_eq!(emu.assigned_memory_range(), Some(0xBBB0_0000..=0xBBBF_FFFF));
1111        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1112        assert!(emu.assigned_memory_range().is_none());
1113
1114        // The guest can try to program an invalid assignment (base > limit), we
1115        // just won't decode it.
1116        emu.write_u32(0x20, 0xAA00_BB00).unwrap();
1117        assert!(emu.assigned_memory_range().is_none());
1118        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1119        assert!(emu.assigned_memory_range().is_none());
1120        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1121        assert!(emu.assigned_memory_range().is_none());
1122    }
1123
1124    #[test]
1125    fn test_type1_prefetch_assignment() {
1126        const MMIO_ENABLED: u32 = 0x0000_0002;
1127        const MMIO_DISABLED: u32 = 0x0000_0000;
1128
1129        let mut emu = create_type1_emulator(vec![]);
1130        assert!(emu.assigned_prefetch_range().is_none());
1131
1132        // The guest can program a valid prefetch range...
1133        emu.write_u32(0x24, 0xFFF0_FF00).unwrap(); // limit + base
1134        emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); // base upper
1135        emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); // limit upper
1136        assert!(emu.assigned_prefetch_range().is_none());
1137        // ... enable memory decoding...
1138        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1139        assert_eq!(
1140            emu.assigned_prefetch_range(),
1141            Some(0x00AA_BBCC_FF00_0000..=0x00DD_EEFF_FFFF_FFFF)
1142        );
1143        // ... then disable memory decoding it.
1144        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1145        assert!(emu.assigned_prefetch_range().is_none());
1146
1147        // The validity of the assignment is determined using the combined 64-bit
1148        // address, not the lower bits or the upper bits in isolation.
1149
1150        // Lower bits of the limit are greater than the lower bits of the
1151        // base, but the upper bits make that valid.
1152        emu.write_u32(0x24, 0xFF00_FFF0).unwrap(); // limit + base
1153        emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); // base upper
1154        emu.write_u32(0x2C, 0x00DD_EEFF).unwrap(); // limit upper
1155        assert!(emu.assigned_prefetch_range().is_none());
1156        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1157        assert_eq!(
1158            emu.assigned_prefetch_range(),
1159            Some(0x00AA_BBCC_FFF0_0000..=0x00DD_EEFF_FF0F_FFFF)
1160        );
1161        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1162        assert!(emu.assigned_prefetch_range().is_none());
1163
1164        // The base can equal the limit, which is a valid 1MB range.
1165        emu.write_u32(0x24, 0xDD00_DD00).unwrap(); // limit + base
1166        emu.write_u32(0x28, 0x00AA_BBCC).unwrap(); // base upper
1167        emu.write_u32(0x2C, 0x00AA_BBCC).unwrap(); // limit upper
1168        assert!(emu.assigned_prefetch_range().is_none());
1169        emu.write_u32(0x4, MMIO_ENABLED).unwrap();
1170        assert_eq!(
1171            emu.assigned_prefetch_range(),
1172            Some(0x00AA_BBCC_DD00_0000..=0x00AA_BBCC_DD0F_FFFF)
1173        );
1174        emu.write_u32(0x4, MMIO_DISABLED).unwrap();
1175        assert!(emu.assigned_prefetch_range().is_none());
1176    }
1177}