vmotherboard/chipset/io_ranges/
mod.rs1use address_filter::AddressFilter;
11use address_filter::RangeKey;
12use chipset_device::ChipsetDevice;
13use closeable_mutex::CloseableMutex;
14use inspect::Inspect;
15use inspect_counters::SharedCounter;
16use parking_lot::RwLock;
17use range_map_vec::RangeMap;
18use std::ops::RangeInclusive;
19use std::sync::Arc;
20use std::sync::OnceLock;
21use std::sync::Weak;
22
23struct IoRangesInner<T> {
24 map: RangeMap<T, RangeEntry>,
25 trace_on: AddressFilter<T>,
26 break_on: AddressFilter<T>,
27 static_registration_conflicts: Option<Vec<IoRangeConflict<T>>>,
30 fallback_device: Option<Arc<CloseableMutex<dyn ChipsetDevice>>>,
31}
32
33#[derive(Debug, Clone)]
34pub struct IoRangeConflict<T> {
35 existing_dev_region: (Arc<str>, Arc<str>, RangeInclusive<T>),
36 conflict_dev_region: (Arc<str>, Arc<str>, RangeInclusive<T>),
37}
38
39impl<T> std::error::Error for IoRangeConflict<T> where T: std::fmt::LowerHex + core::fmt::Debug {}
40impl<T> std::fmt::Display for IoRangeConflict<T>
41where
42 T: std::fmt::LowerHex + core::fmt::Debug,
43{
44 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
45 write!(
46 f,
47 "{}/{}:{:#x?} conflicts with existing {}/{}:{:#x?}",
48 self.conflict_dev_region.0,
49 self.conflict_dev_region.1,
50 self.conflict_dev_region.2,
51 self.existing_dev_region.0,
52 self.existing_dev_region.1,
53 self.existing_dev_region.2,
54 )
55 }
56}
57
58#[derive(Inspect)]
59struct RangeEntry {
60 region_name: Arc<str>,
61 dev_name: Arc<str>,
62 #[inspect(rename = "device_is_init", with = "|x| x.upgrade().is_some()")]
63 dev: Weak<CloseableMutex<dyn ChipsetDevice>>,
64 read_count: SharedCounter,
65 write_count: SharedCounter,
66}
67
68#[derive(Clone)]
69pub struct IoRanges<T> {
70 inner: Arc<RwLock<IoRangesInner<T>>>,
71}
72
73impl<T: RangeKey> IoRanges<T> {
74 pub fn new(
75 trace_on_unknown: bool,
76 fallback_device: Option<Arc<CloseableMutex<dyn ChipsetDevice>>>,
77 ) -> Self {
78 Self {
79 inner: Arc::new(RwLock::new(IoRangesInner {
80 map: RangeMap::new(),
81 trace_on: AddressFilter::new(trace_on_unknown),
82 break_on: AddressFilter::new(false),
83 static_registration_conflicts: Some(Vec::new()),
84 fallback_device,
85 })),
86 }
87 }
88
89 pub fn register(
90 &self,
91 start: T,
92 end: T,
93 region_name: Arc<str>,
94 dev: Weak<CloseableMutex<dyn ChipsetDevice>>,
95 dev_name: Arc<str>,
96 ) -> Result<(), IoRangeConflict<T>> {
97 let mut inner = self.inner.write();
98 match inner.map.entry(start..=end) {
99 range_map_vec::Entry::Vacant(entry) => {
100 entry.insert(RangeEntry {
101 region_name,
102 dev,
103 dev_name,
104 read_count: Default::default(),
105 write_count: Default::default(),
106 });
107 Ok(())
108 }
109 range_map_vec::Entry::Overlapping(entry) => {
110 let existing_dev_region = {
111 let (start, end, entry) = entry.get();
112 (
113 entry.dev_name.clone(),
114 entry.region_name.clone(),
115 *start..=*end,
116 )
117 };
118 let conflict = IoRangeConflict {
119 existing_dev_region,
120 conflict_dev_region: (dev_name, region_name, start..=end),
121 };
122
123 if let Some(v) = inner.static_registration_conflicts.as_mut() {
124 v.push(conflict.clone())
125 }
126
127 Err(conflict)
128 }
129 }
130 }
131
132 pub fn revoke(&self, start: T) {
133 let mut inner = self.inner.write();
134 inner.map.remove(&start);
135 }
136
137 pub fn lookup(&self, addr: T, is_read: bool) -> LookupResult {
138 static UNKNOWN_DEVICE: OnceLock<Arc<CloseableMutex<dyn ChipsetDevice>>> = OnceLock::new();
139 static UNKNOWN_DEVICE_NAME: OnceLock<Arc<str>> = OnceLock::new();
140 static UNKNOWN_RANGE: OnceLock<Arc<str>> = OnceLock::new();
141
142 let inner = self.inner.read();
143 let entry = inner.map.get(&addr);
144 if let Some(entry) = entry {
145 if is_read {
146 entry.read_count.increment()
147 } else {
148 entry.write_count.increment()
149 }
150 }
151
152 let (dev, dev_name) =
153 entry
154 .and_then(|e| e.dev.upgrade().map(|d| (d, e.dev_name.clone())))
155 .unwrap_or_else(|| {
156 (
157 inner.fallback_device.clone().unwrap_or_else(|| {
158 UNKNOWN_DEVICE
159 .get_or_init(|| {
160 Arc::new(CloseableMutex::new(missing_dev::MissingDev::from_manifest(
161 missing_dev::MissingDevManifest::new(),
162 &mut chipset_device::mmio::ExternallyManagedMmioIntercepts,
163 &mut chipset_device::pio::ExternallyManagedPortIoIntercepts,
164 )))
165 })
166 .clone()
167 }),
168 UNKNOWN_DEVICE_NAME
169 .get_or_init(|| "<unknown>".into())
170 .clone(),
171 )
172 });
173
174 let trace = inner.trace_on.filtered(&addr, entry.is_some());
175 let trace = trace.then(|| {
176 entry.map_or_else(
177 || UNKNOWN_RANGE.get_or_init(|| "<unknown>".into()).clone(),
178 |e| e.region_name.clone(),
179 )
180 });
181 let debug_break = inner.break_on.filtered(&addr, entry.is_some());
182 LookupResult {
183 dev,
184 dev_name,
185 trace,
186 debug_break,
187 }
188 }
189
190 pub fn take_static_registration_conflicts(&mut self) -> Vec<IoRangeConflict<T>> {
191 self.inner
192 .write()
193 .static_registration_conflicts
194 .take()
195 .expect("must only be called once")
196 }
197
198 pub fn is_occupied(&self, addr: T) -> bool {
199 self.inner.read().map.contains(&addr)
200 }
201}
202
203pub struct LookupResult {
204 pub dev: Arc<CloseableMutex<dyn ChipsetDevice>>,
205 pub dev_name: Arc<str>,
206 pub trace: Option<Arc<str>>,
207 pub debug_break: bool,
208}
209
210impl<T: RangeKey> Inspect for IoRanges<T> {
211 fn inspect(&self, req: inspect::Request<'_>) {
212 let mut resp = req.respond();
213 let mut inner = self.inner.write();
214 resp.field_mut("trace_on", &mut inner.trace_on)
215 .field_mut("break_on", &mut inner.break_on);
216 for (range, entry) in inner.map.iter() {
217 resp.field(&format!("{:#x}-{:#x}", range.start(), range.end()), entry);
218 }
219 }
220}