1#![forbid(unsafe_code)]
7
8use hvdef::HV_PAGE_SIZE;
9use hvdef::HvMonitorPage;
10use hvdef::HvMonitorPageSmall;
11use inspect::Inspect;
12use std::mem::offset_of;
13use std::sync::Arc;
14use std::sync::atomic::AtomicU32;
15use std::sync::atomic::AtomicU64;
16use std::sync::atomic::Ordering;
17use zerocopy::FromZeros;
18use zerocopy::IntoBytes;
19
20const MAX_MONITORS: usize = 128;
22const INVALID_MONITOR_GPA: u64 = u64::MAX;
23const INVALID_CONNECTION_ID: u32 = !0;
24
25#[derive(Copy, Clone, Debug, Eq, PartialEq)]
27pub struct MonitorId(pub u8);
28
29impl MonitorId {
30 pub const INVALID: MonitorId = MonitorId(u8::MAX);
32}
33
34#[derive(Debug)]
36pub struct MonitorPage {
37 gpa: AtomicU64,
38 monitors: Arc<MonitorList>,
39}
40
41impl Inspect for MonitorPage {
42 fn inspect(&self, req: inspect::Request<'_>) {
43 let mut resp = req.respond();
44 if let Some(gpa) = self.gpa() {
45 resp.hex("gpa", gpa);
46 }
47 resp.field("monitors", &self.monitors);
48 }
49}
50
51#[derive(Debug)]
52struct MonitorList([AtomicU32; MAX_MONITORS]);
53
54impl MonitorList {
55 fn new() -> Self {
56 Self([INVALID_CONNECTION_ID; MAX_MONITORS].map(Into::into))
57 }
58
59 fn set(&self, monitor_id: MonitorId, connection_id: Option<u32>) {
60 let old_connection_id = self.0[monitor_id.0 as usize].swap(
61 connection_id.unwrap_or(INVALID_CONNECTION_ID),
62 Ordering::Relaxed,
63 );
64 assert!(
65 old_connection_id == INVALID_CONNECTION_ID || connection_id.is_none(),
66 "requested monitor ID {} already in use",
67 monitor_id.0
68 );
69 }
70
71 fn get(&self, monitor_id: MonitorId) -> Option<u32> {
72 let connection_id = self.0[monitor_id.0 as usize].load(Ordering::Relaxed);
73 if connection_id != INVALID_CONNECTION_ID {
74 Some(connection_id)
75 } else {
76 None
77 }
78 }
79}
80
81impl Inspect for MonitorList {
82 fn inspect(&self, req: inspect::Request<'_>) {
83 let mut resp: inspect::Response<'_> = req.respond();
84 for monitor_id in 0..MAX_MONITORS {
85 if let Some(connection_id) = self.get(MonitorId(monitor_id as u8)) {
86 resp.hex(&monitor_id.to_string(), connection_id);
87 }
88 }
89 }
90}
91
92impl MonitorPage {
93 pub fn new() -> Self {
95 Self {
96 gpa: AtomicU64::new(INVALID_MONITOR_GPA),
97 monitors: Arc::new(MonitorList::new()),
98 }
99 }
100
101 pub fn set_gpa(&self, gpa: Option<u64>) -> Option<u64> {
103 assert!(gpa.is_none_or(|gpa| gpa % HV_PAGE_SIZE == 0));
104 let old = self
105 .gpa
106 .swap(gpa.unwrap_or(INVALID_MONITOR_GPA), Ordering::Relaxed);
107
108 (old != INVALID_MONITOR_GPA).then_some(old)
109 }
110
111 pub fn gpa(&self) -> Option<u64> {
113 let gpa = self.gpa.load(Ordering::Relaxed);
114 (gpa != INVALID_MONITOR_GPA).then_some(gpa)
115 }
116
117 pub fn register_monitor(
124 &self,
125 monitor_id: MonitorId,
126 connection_id: u32,
127 ) -> Box<dyn Sync + Send> {
128 self.monitors.set(monitor_id, Some(connection_id));
129
130 tracing::trace!(monitor_id = monitor_id.0, "registered monitor");
131 Box::new(RegisteredMonitor {
132 monitors: self.monitors.clone(),
133 monitor_id,
134 })
135 }
136
137 pub fn write_bit(&self, page_bit: u32) -> Option<u32> {
140 const TRIGGER_GROUP_OFFSET: u32 = offset_of!(HvMonitorPage, trigger_group) as u32 * 8;
141 let trigger_bit = page_bit.checked_sub(TRIGGER_GROUP_OFFSET)?;
142 let group = trigger_bit / 64;
143 let trigger = trigger_bit % 64;
144 if group >= 4 || trigger >= 32 {
145 return None;
146 }
147 let monitor_id = group * 32 + trigger;
148 if let Some(connection_id) = self.monitors.get(MonitorId(monitor_id as u8)) {
149 Some(connection_id)
150 } else {
151 tracelimit::warn_ratelimited!(monitor_id, "monitor write for unknown id");
152 None
153 }
154 }
155
156 pub fn check_write(&self, gpa: u64, bytes: &[u8], mut signal: impl FnMut(u32)) -> bool {
159 let page_gpa = self.gpa.load(Ordering::Relaxed);
160 if page_gpa != gpa & !(HV_PAGE_SIZE - 1) {
161 return false;
162 }
163
164 if gpa + bytes.len() as u64 > page_gpa + size_of::<HvMonitorPageSmall>() as u64 {
165 tracelimit::warn_ratelimited!(gpa, "write to unused portion of monitor page");
166 return true;
168 }
169
170 let mut page = HvMonitorPageSmall::new_zeroed();
171 let offset = (gpa - page_gpa) as usize;
172 page.as_mut_bytes()[offset..offset + bytes.len()].copy_from_slice(bytes);
173 for (group_index, group) in page.trigger_group.iter().enumerate() {
174 let mut value = group.pending;
175 while value != 0 {
176 let index = value.trailing_zeros();
177 value &= !(1 << index);
178 let monitor_id = group_index * 32 + (index as usize);
179 if let Some(connection_id) = &self.monitors.get(MonitorId(monitor_id as u8)) {
180 signal(*connection_id);
181 } else {
182 tracelimit::warn_ratelimited!(monitor_id, "monitor write for unknown id");
183 }
184 }
185 }
186
187 true
188 }
189}
190
191struct RegisteredMonitor {
193 monitors: Arc<MonitorList>,
194 monitor_id: MonitorId,
195}
196
197impl Drop for RegisteredMonitor {
198 fn drop(&mut self) {
199 tracing::trace!(monitor_id = self.monitor_id.0, "unregistered monitor");
200 self.monitors.set(self.monitor_id, None);
201 }
202}
203
204#[cfg(test)]
205mod tests {
206 use super::*;
207 use std::mem::offset_of;
208
209 #[test]
210 fn test_set_gpa() {
211 let monitor = MonitorPage::new();
212 assert!(monitor.set_gpa(Some(0x123f000)).is_none());
213 assert_eq!(monitor.set_gpa(None), Some(0x123f000));
214 assert!(monitor.set_gpa(None).is_none());
215 }
216
217 #[test]
218 fn test_write() {
219 let monitor = MonitorPage::new();
220 monitor.set_gpa(Some(HV_PAGE_SIZE));
221 let _reg1 = monitor.register_monitor(MonitorId(5), 42);
222 let _reg1 = monitor.register_monitor(MonitorId(7), 47);
223 let _reg1 = monitor.register_monitor(MonitorId(9), 49);
224 let _reg2 = monitor.register_monitor(MonitorId(127), 500);
225 let mut page = HvMonitorPageSmall::new_zeroed();
226 page.trigger_group[0].pending = 1 << 5;
227
228 assert!(
230 !monitor.check_write(HV_PAGE_SIZE * 2, page.as_bytes(), |_| panic!(
231 "Should not be called."
232 ))
233 );
234
235 assert!(
236 !monitor.check_write(HV_PAGE_SIZE - 1, page.as_bytes(), |_| panic!(
237 "Should not be called."
238 ))
239 );
240
241 let mut triggered = Vec::new();
243 assert!(monitor.check_write(HV_PAGE_SIZE, page.as_bytes(), |id| triggered.push(id)));
244 assert_eq!(triggered, vec![42]);
245
246 page.trigger_state.set_group_enable(2);
248 page.trigger_group[0].pending = (1 << 5) | (1 << 6) | (1 << 7);
249 page.trigger_group[3].pending = 1 << 31;
250 triggered.clear();
251 assert!(monitor.check_write(HV_PAGE_SIZE, page.as_bytes(), |id| triggered.push(id)));
252 assert_eq!(triggered, vec![42, 47, 500]);
253
254 let pending = 1 << 9;
256 triggered.clear();
257 assert!(monitor.check_write(
258 HV_PAGE_SIZE + offset_of!(HvMonitorPageSmall, trigger_group) as u64,
259 pending.as_bytes(),
260 |id| triggered.push(id),
261 ));
262
263 assert_eq!(triggered, vec![49]);
264 }
265}