#![warn(missing_docs)]
use crate::bar_mapping::BarMappings;
use crate::capabilities::PciCapability;
use crate::spec::cfg_space;
use crate::spec::hwid::HardwareIds;
use crate::PciInterruptPin;
use chipset_device::io::IoError;
use chipset_device::io::IoResult;
use chipset_device::mmio::ControlMmioIntercept;
use guestmem::MappableGuestMemory;
use inspect::Inspect;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use vmcore::line_interrupt::LineInterrupt;
#[derive(Debug, Inspect)]
pub struct IntxInterrupt {
pin: PciInterruptPin,
line: LineInterrupt,
interrupt_disabled: AtomicBool,
interrupt_status: AtomicBool,
}
impl IntxInterrupt {
pub fn set_level(&self, high: bool) {
tracing::debug!(
disabled = ?self.interrupt_disabled,
status = ?self.interrupt_status,
?high,
%self.line,
"set_level"
);
self.interrupt_status.store(high, Ordering::SeqCst);
if self.interrupt_disabled.load(Ordering::SeqCst) {
self.line.set_level(false);
} else {
self.line.set_level(high);
}
}
fn set_disabled(&self, disabled: bool) {
tracing::debug!(
disabled = ?self.interrupt_disabled,
status = ?self.interrupt_status,
?disabled,
%self.line,
"set_disabled"
);
self.interrupt_disabled.store(disabled, Ordering::SeqCst);
if disabled {
self.line.set_level(false)
} else {
if self.interrupt_status.load(Ordering::SeqCst) {
self.line.set_level(true)
}
}
}
}
#[derive(Debug, Inspect)]
struct ConfigSpaceType0EmulatorState {
command: cfg_space::Command,
#[inspect(with = "inspect_helpers::bars")]
base_addresses: [u32; 6],
interrupt_line: u8,
latency_timer: u8,
}
impl ConfigSpaceType0EmulatorState {
fn new() -> Self {
Self {
latency_timer: 0,
command: cfg_space::Command::empty(),
base_addresses: [0; 6],
interrupt_line: 0,
}
}
}
#[derive(Inspect)]
pub struct ConfigSpaceType0Emulator {
#[inspect(with = "inspect_helpers::bars")]
bar_masks: [u32; 6],
hardware_ids: HardwareIds,
multi_function_bit: bool,
#[inspect(with = r#"|x| inspect::iter_by_index(x).prefix("bar")"#)]
mapped_memory: [Option<BarMemoryKind>; 6],
#[inspect(with = "|x| inspect::iter_by_key(x.iter().map(|cap| (cap.label(), cap)))")]
capabilities: Vec<Box<dyn PciCapability>>,
intx_interrupt: Option<Arc<IntxInterrupt>>,
active_bars: BarMappings,
state: ConfigSpaceType0EmulatorState,
}
mod inspect_helpers {
use super::*;
pub(crate) fn bars(bars: &[u32; 6]) -> impl Inspect + '_ {
inspect::iter_by_index(bars)
.prefix("bar")
.map_value(inspect::AsHex)
}
}
#[derive(Inspect)]
#[inspect(tag = "kind")]
pub enum BarMemoryKind {
Intercept(#[inspect(rename = "handle")] Box<dyn ControlMmioIntercept>),
SharedMem(#[inspect(skip)] Box<dyn MappableGuestMemory>),
Dummy,
}
impl std::fmt::Debug for BarMemoryKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Intercept(control) => {
write!(f, "Intercept(region_name: {}, ..)", control.region_name())
}
Self::SharedMem(_) => write!(f, "Mmap(..)"),
Self::Dummy => write!(f, "Dummy"),
}
}
}
impl BarMemoryKind {
fn map_to_guest(&mut self, gpa: u64) -> std::io::Result<()> {
match self {
BarMemoryKind::Intercept(control) => {
control.map(gpa);
Ok(())
}
BarMemoryKind::SharedMem(control) => control.map_to_guest(gpa, true),
BarMemoryKind::Dummy => Ok(()),
}
}
fn unmap_from_guest(&mut self) {
match self {
BarMemoryKind::Intercept(control) => control.unmap(),
BarMemoryKind::SharedMem(control) => control.unmap_from_guest(),
BarMemoryKind::Dummy => {}
}
}
}
#[derive(Debug)]
pub struct DeviceBars {
bars: [Option<(u64, BarMemoryKind)>; 6],
}
impl DeviceBars {
pub fn new() -> DeviceBars {
DeviceBars {
bars: Default::default(),
}
}
pub fn bar0(mut self, len: u64, memory: BarMemoryKind) -> Self {
self.bars[0] = Some((len, memory));
self
}
pub fn bar2(mut self, len: u64, memory: BarMemoryKind) -> Self {
self.bars[2] = Some((len, memory));
self
}
pub fn bar4(mut self, len: u64, memory: BarMemoryKind) -> Self {
self.bars[4] = Some((len, memory));
self
}
}
impl ConfigSpaceType0Emulator {
pub fn new(
hardware_ids: HardwareIds,
capabilities: Vec<Box<dyn PciCapability>>,
bars: DeviceBars,
) -> Self {
let mut bar_masks = [0; 6];
let mut mapped_memory = {
const NONE: Option<BarMemoryKind> = None;
[NONE; 6]
};
for (bar_index, bar) in bars.bars.into_iter().enumerate() {
let (len, mapped) = match bar {
Some(bar) => bar,
None => continue,
};
assert!(bar_index < 5);
const MIN_BAR_SIZE: u64 = 4096;
let len = std::cmp::max(len.next_power_of_two(), MIN_BAR_SIZE);
let mask64 = !(len - 1);
bar_masks[bar_index] = mask64 as u32 | cfg_space::BarEncodingBits::TYPE_64_BIT.bits();
bar_masks[bar_index + 1] = (mask64 >> 32) as u32;
mapped_memory[bar_index] = Some(mapped);
}
Self {
bar_masks,
hardware_ids,
multi_function_bit: false,
active_bars: Default::default(),
mapped_memory,
capabilities,
intx_interrupt: None,
state: ConfigSpaceType0EmulatorState {
command: cfg_space::Command::empty(),
base_addresses: [0; 6],
interrupt_line: 0,
latency_timer: 0,
},
}
}
pub fn with_multi_function_bit(mut self, bit: bool) -> Self {
self.multi_function_bit = bit;
self
}
pub fn set_interrupt_pin(
&mut self,
pin: PciInterruptPin,
line: LineInterrupt,
) -> Arc<IntxInterrupt> {
let intx_interrupt = Arc::new(IntxInterrupt {
pin,
line,
interrupt_disabled: AtomicBool::new(false),
interrupt_status: AtomicBool::new(false),
});
self.intx_interrupt = Some(intx_interrupt.clone());
intx_interrupt
}
pub fn reset(&mut self) {
self.state = ConfigSpaceType0EmulatorState::new();
self.sync_command_register(self.state.command);
for cap in &mut self.capabilities {
cap.reset();
}
if let Some(intx) = &mut self.intx_interrupt {
intx.set_level(false);
}
}
fn get_capability_index_and_offset(&self, offset: u16) -> Option<(usize, u16)> {
let mut cap_offset = 0;
for i in 0..self.capabilities.len() {
let cap_size = self.capabilities[i].len() as u16;
if offset < cap_offset + cap_size {
return Some((i, offset - cap_offset));
}
cap_offset += cap_size;
}
None
}
pub fn read_u32(&self, offset: u16, value: &mut u32) -> IoResult {
use cfg_space::HeaderType00;
*value = match HeaderType00(offset) {
HeaderType00::DEVICE_VENDOR => {
(self.hardware_ids.device_id as u32) << 16 | self.hardware_ids.vendor_id as u32
}
HeaderType00::STATUS_COMMAND => {
let mut status = cfg_space::Status::empty();
if !self.capabilities.is_empty() {
status |= cfg_space::Status::CAPABILITIES_LIST;
}
if let Some(intx_interrupt) = &self.intx_interrupt {
if intx_interrupt.interrupt_status.load(Ordering::SeqCst) {
status |= cfg_space::Status::INTERRUPT_STATUS;
}
}
(status.bits() as u32) << 16 | self.state.command.bits() as u32
}
HeaderType00::CLASS_REVISION => {
(u8::from(self.hardware_ids.base_class) as u32) << 24
| (u8::from(self.hardware_ids.sub_class) as u32) << 16
| (u8::from(self.hardware_ids.prog_if) as u32) << 8
| self.hardware_ids.revision_id as u32
}
HeaderType00::BIST_HEADER => {
let mut v = (self.state.latency_timer as u32) << 8;
if self.multi_function_bit {
v |= 0x80 << 16;
}
v
}
HeaderType00::BAR0
| HeaderType00::BAR1
| HeaderType00::BAR2
| HeaderType00::BAR3
| HeaderType00::BAR4
| HeaderType00::BAR5 => {
self.state.base_addresses[(offset - HeaderType00::BAR0.0) as usize / 4]
}
HeaderType00::CARDBUS_CIS_PTR => 0,
HeaderType00::SUBSYSTEM_ID => {
(self.hardware_ids.type0_sub_system_id as u32) << 16
| self.hardware_ids.type0_sub_vendor_id as u32
}
HeaderType00::EXPANSION_ROM_BASE => 0,
HeaderType00::RESERVED_CAP_PTR => {
if self.capabilities.is_empty() {
0
} else {
0x40
}
}
HeaderType00::RESERVED => 0,
HeaderType00::LATENCY_INTERRUPT => {
let interrupt_pin = if let Some(intx_interrupt) = &self.intx_interrupt {
match intx_interrupt.pin {
PciInterruptPin::IntA => 1,
PciInterruptPin::IntB => 2,
PciInterruptPin::IntC => 3,
PciInterruptPin::IntD => 4,
}
} else {
0
};
self.state.interrupt_line as u32 | (interrupt_pin as u32) << 8
}
_ if (0x40..0x100).contains(&offset) => {
if let Some((cap_index, cap_offset)) =
self.get_capability_index_and_offset(offset - 0x40)
{
let mut value = self.capabilities[cap_index].read_u32(cap_offset);
if cap_offset == 0 {
let next = if cap_index < self.capabilities.len() - 1 {
offset as u32 + self.capabilities[cap_index].len() as u32
} else {
0
};
assert!(value & 0xff00 == 0);
value |= next << 8;
}
value
} else {
tracelimit::warn_ratelimited!(offset, "unhandled config space read");
return IoResult::Err(IoError::InvalidRegister);
}
}
_ if (0x100..0x1000).contains(&offset) => {
if offset == 0x100 {
tracelimit::warn_ratelimited!(offset, "unexpected pci express probe");
0x000ffff
} else {
tracelimit::warn_ratelimited!(offset, "unhandled extended config space read");
return IoResult::Err(IoError::InvalidRegister);
}
}
_ => {
tracelimit::warn_ratelimited!(offset, "unexpected config space read");
return IoResult::Err(IoError::InvalidRegister);
}
};
IoResult::Ok
}
fn update_intx_disable(&mut self, command: cfg_space::Command) {
if let Some(intx_interrupt) = &self.intx_interrupt {
intx_interrupt.set_disabled(command.contains(cfg_space::Command::INTX_DISABLE))
}
}
fn update_mmio_enabled(&mut self, command: cfg_space::Command) {
if command.contains(cfg_space::Command::MMIO_ENABLED) {
self.active_bars = BarMappings::parse(&self.state.base_addresses, &self.bar_masks);
for (bar, mapping) in self.mapped_memory.iter_mut().enumerate() {
if let Some(mapping) = mapping {
let base = self.active_bars.get(bar as u8).expect("bar exists");
match mapping.map_to_guest(base) {
Ok(_) => {}
Err(err) => {
tracelimit::error_ratelimited!(
error = &err as &dyn std::error::Error,
bar,
base,
"failed to map bar",
)
}
}
}
}
} else {
self.active_bars = Default::default();
for mapping in self.mapped_memory.iter_mut().flatten() {
mapping.unmap_from_guest();
}
}
}
fn sync_command_register(&mut self, command: cfg_space::Command) {
self.update_intx_disable(command);
self.update_mmio_enabled(command);
}
pub fn write_u32(&mut self, offset: u16, val: u32) -> IoResult {
use cfg_space::HeaderType00;
match HeaderType00(offset) {
HeaderType00::STATUS_COMMAND => {
let command = match cfg_space::Command::from_bits(val as u16) {
Some(command) => command,
None => {
tracelimit::warn_ratelimited!(offset, val, "setting invalid command bits");
cfg_space::Command::from_bits_truncate(val as u16)
}
};
if self
.state
.command
.contains(cfg_space::Command::INTX_DISABLE)
!= command.contains(cfg_space::Command::INTX_DISABLE)
{
self.update_intx_disable(command)
}
if self
.state
.command
.contains(cfg_space::Command::MMIO_ENABLED)
!= command.contains(cfg_space::Command::MMIO_ENABLED)
{
self.update_mmio_enabled(command)
}
self.state.command = command;
}
HeaderType00::BIST_HEADER => {
let timer_val = (val >> 8) as u8;
self.state.latency_timer = timer_val;
}
HeaderType00::BAR0
| HeaderType00::BAR1
| HeaderType00::BAR2
| HeaderType00::BAR3
| HeaderType00::BAR4
| HeaderType00::BAR5 => {
if !self
.state
.command
.contains(cfg_space::Command::MMIO_ENABLED)
{
let bar_index = (offset - HeaderType00::BAR0.0) as usize / 4;
let mut bar_value = val & self.bar_masks[bar_index];
if bar_index & 1 == 0 && self.bar_masks[bar_index] != 0 {
bar_value |= cfg_space::BarEncodingBits::TYPE_64_BIT.bits();
}
self.state.base_addresses[bar_index] = bar_value;
}
}
HeaderType00::LATENCY_INTERRUPT => {
self.state.interrupt_line = ((val & 0xff00) >> 8) as u8;
}
_ if offset < 0x40 && offset % 4 == 0 => (),
_ if (0x40..0x100).contains(&offset) => {
if let Some((cap_index, cap_offset)) =
self.get_capability_index_and_offset(offset - 0x40)
{
self.capabilities[cap_index].write_u32(cap_offset, val);
} else {
tracelimit::warn_ratelimited!(
offset,
value = val,
"unhandled config space write"
);
return IoResult::Err(IoError::InvalidRegister);
}
}
_ if (0x100..0x1000).contains(&offset) => {
tracelimit::warn_ratelimited!(
offset,
value = val,
"unhandled extended config space write"
);
return IoResult::Err(IoError::InvalidRegister);
}
_ => {
tracelimit::warn_ratelimited!(offset, value = val, "unexpected config space write");
return IoResult::Err(IoError::InvalidRegister);
}
}
IoResult::Ok
}
pub fn find_bar(&self, address: u64) -> Option<(u8, u16)> {
self.active_bars.find(address)
}
}
mod save_restore {
use super::*;
use thiserror::Error;
use vmcore::save_restore::RestoreError;
use vmcore::save_restore::SaveError;
use vmcore::save_restore::SaveRestore;
mod state {
use mesh::payload::Protobuf;
use vmcore::save_restore::SavedStateBlob;
use vmcore::save_restore::SavedStateRoot;
#[derive(Protobuf, SavedStateRoot)]
#[mesh(package = "pci.cfg_space_emu")]
pub struct SavedState {
#[mesh(1)]
pub command: u16,
#[mesh(2)]
pub base_addresses: [u32; 6],
#[mesh(3)]
pub interrupt_line: u8,
#[mesh(4)]
pub latency_timer: u8,
#[mesh(5)]
pub capabilities: Vec<(String, SavedStateBlob)>,
}
}
#[derive(Debug, Error)]
enum ConfigSpaceRestoreError {
#[error("found invalid config bits in saved state")]
InvalidConfigBits,
#[error("found unexpected capability {0}")]
InvalidCap(String),
}
impl SaveRestore for ConfigSpaceType0Emulator {
type SavedState = state::SavedState;
fn save(&mut self) -> Result<Self::SavedState, SaveError> {
let ConfigSpaceType0EmulatorState {
command,
base_addresses,
interrupt_line,
latency_timer,
} = self.state;
let saved_state = state::SavedState {
command: command.bits(),
base_addresses,
interrupt_line,
latency_timer,
capabilities: self
.capabilities
.iter_mut()
.map(|cap| {
let id = cap.label().to_owned();
Ok((id, cap.save()?))
})
.collect::<Result<_, _>>()?,
};
Ok(saved_state)
}
fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
let state::SavedState {
command,
base_addresses,
interrupt_line,
latency_timer,
capabilities,
} = state;
self.state = ConfigSpaceType0EmulatorState {
command: cfg_space::Command::from_bits(command).ok_or(
RestoreError::InvalidSavedState(
ConfigSpaceRestoreError::InvalidConfigBits.into(),
),
)?,
base_addresses,
interrupt_line,
latency_timer,
};
self.sync_command_register(self.state.command);
for (id, entry) in capabilities {
tracing::debug!(save_id = id.as_str(), "restoring pci capability");
let mut restored = false;
for cap in self.capabilities.iter_mut() {
if cap.label() == id {
cap.restore(entry)?;
restored = true;
break;
}
}
if !restored {
return Err(RestoreError::InvalidSavedState(
ConfigSpaceRestoreError::InvalidCap(id).into(),
));
}
}
Ok(())
}
}
}