vmm_core/
synic.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4use hvdef::HvError;
5use hvdef::HvResult;
6use hvdef::Vtl;
7use inspect::Inspect;
8use parking_lot::Mutex;
9use std::collections::HashMap;
10use std::collections::hash_map;
11use std::fmt::Debug;
12use std::sync::Arc;
13use std::sync::Weak;
14use std::task::Context;
15use std::task::Poll;
16use virt::Synic;
17use virt::VpIndex;
18use vmcore::synic::EventPort;
19use vmcore::synic::GuestEventPort;
20use vmcore::synic::GuestMessagePort;
21use vmcore::synic::MessagePort;
22use vmcore::synic::MonitorInfo;
23use vmcore::synic::MonitorPageGpas;
24use vmcore::synic::SynicMonitorAccess;
25use vmcore::synic::SynicPortAccess;
26
27pub struct SynicPorts {
28    partition: Arc<dyn Synic>,
29    ports: Arc<PortMap>,
30}
31
32type PortMap = Mutex<HashMap<u32, Port>>;
33
34impl SynicPorts {
35    pub fn new(partition: Arc<dyn Synic>) -> Self {
36        Self {
37            partition,
38            ports: Default::default(),
39        }
40    }
41
42    pub fn on_post_message(
43        &self,
44        vtl: Vtl,
45        connection_id: u32,
46        secure: bool,
47        message: &[u8],
48    ) -> HvResult<()> {
49        let port = self.ports.lock().get(&connection_id).cloned();
50        if let Some(Port {
51            port_type: PortType::Message(port),
52            minimum_vtl,
53        }) = port
54        {
55            if vtl < minimum_vtl {
56                Err(HvError::OperationDenied)
57            } else if port.poll_handle_message(
58                &mut Context::from_waker(std::task::Waker::noop()),
59                message,
60                secure,
61            ) == Poll::Ready(())
62            {
63                Ok(())
64            } else {
65                // TODO: VMBus sometimes (in Azure?) returns HV_STATUS_TIMEOUT
66                //       here instead to force the guest to retry. Should we do
67                //       the same? Perhaps only for Linux VMs?
68                Err(HvError::InsufficientBuffers)
69            }
70        } else {
71            Err(HvError::InvalidConnectionId)
72        }
73    }
74
75    pub fn on_signal_event(&self, vtl: Vtl, connection_id: u32, flag_number: u16) -> HvResult<()> {
76        let port = self.ports.lock().get(&connection_id).cloned();
77        if let Some(Port {
78            port_type: PortType::Event(port),
79            minimum_vtl,
80        }) = port
81        {
82            if vtl < minimum_vtl {
83                Err(HvError::OperationDenied)
84            } else {
85                port.handle_event(flag_number);
86                Ok(())
87            }
88        } else {
89            Err(HvError::InvalidConnectionId)
90        }
91    }
92}
93
94impl SynicPortAccess for SynicPorts {
95    fn add_message_port(
96        &self,
97        connection_id: u32,
98        minimum_vtl: Vtl,
99        port: Arc<dyn MessagePort>,
100    ) -> Result<Box<dyn Sync + Send>, vmcore::synic::Error> {
101        match self.ports.lock().entry(connection_id) {
102            hash_map::Entry::Occupied(_) => {
103                return Err(vmcore::synic::Error::ConnectionIdInUse(connection_id));
104            }
105            hash_map::Entry::Vacant(e) => {
106                e.insert(Port {
107                    port_type: PortType::Message(port),
108                    minimum_vtl,
109                });
110            }
111        }
112        Ok(Box::new(PortHandle {
113            ports: Arc::downgrade(&self.ports),
114            connection_id,
115            _inner_handle: None,
116            _monitor: None,
117        }))
118    }
119
120    fn add_event_port(
121        &self,
122        connection_id: u32,
123        minimum_vtl: Vtl,
124        port: Arc<dyn EventPort>,
125        monitor_info: Option<MonitorInfo>,
126    ) -> Result<Box<dyn Sync + Send>, vmcore::synic::Error> {
127        // Create a direct port mapping in the hypervisor if an event was provided.
128        let inner_handle = if let Some(event) = port.os_event() {
129            self.partition
130                .new_host_event_port(connection_id, minimum_vtl, event)?
131        } else {
132            None
133        };
134
135        match self.ports.lock().entry(connection_id) {
136            hash_map::Entry::Occupied(_) => {
137                return Err(vmcore::synic::Error::ConnectionIdInUse(connection_id));
138            }
139            hash_map::Entry::Vacant(e) => {
140                e.insert(Port {
141                    port_type: PortType::Event(port),
142                    minimum_vtl,
143                });
144            }
145        }
146
147        let monitor = monitor_info.as_ref().and_then(|info| {
148            self.partition
149                .monitor_support()
150                .map(|monitor| monitor.register_monitor(info.monitor_id, connection_id))
151        });
152
153        Ok(Box::new(PortHandle {
154            ports: Arc::downgrade(&self.ports),
155            connection_id,
156            _inner_handle: inner_handle,
157            _monitor: monitor,
158        }))
159    }
160
161    fn new_guest_message_port(
162        &self,
163        vtl: Vtl,
164        vp: u32,
165        sint: u8,
166    ) -> Result<Box<(dyn GuestMessagePort)>, vmcore::synic::HypervisorError> {
167        Ok(Box::new(DirectGuestMessagePort {
168            partition: Arc::clone(&self.partition),
169            vtl,
170            vp: VpIndex::new(vp),
171            sint,
172        }))
173    }
174
175    fn new_guest_event_port(
176        &self,
177        _port_id: u32,
178        vtl: Vtl,
179        vp: u32,
180        sint: u8,
181        flag: u16,
182        _monitor_info: Option<MonitorInfo>,
183    ) -> Result<Box<(dyn GuestEventPort)>, vmcore::synic::HypervisorError> {
184        Ok(self.partition.new_guest_event_port(vtl, vp, sint, flag))
185    }
186
187    fn prefer_os_events(&self) -> bool {
188        self.partition.prefer_os_events()
189    }
190
191    fn monitor_support(&self) -> Option<&dyn SynicMonitorAccess> {
192        self.partition.monitor_support().and(Some(self))
193    }
194}
195
196impl SynicMonitorAccess for SynicPorts {
197    fn set_monitor_page(&self, vtl: Vtl, gpa: Option<MonitorPageGpas>) -> anyhow::Result<()> {
198        self.partition
199            .monitor_support()
200            .unwrap()
201            .set_monitor_page(vtl, gpa.map(|mp| mp.child_to_parent))
202    }
203}
204
205struct PortHandle {
206    ports: Weak<PortMap>,
207    connection_id: u32,
208    _inner_handle: Option<Box<dyn Sync + Send>>,
209    _monitor: Option<Box<dyn Sync + Send>>,
210}
211
212impl Drop for PortHandle {
213    fn drop(&mut self) {
214        if let Some(ports) = self.ports.upgrade() {
215            let entry = ports.lock().remove(&self.connection_id);
216            entry.expect("port was previously added");
217        }
218    }
219}
220
221#[derive(Debug, Clone)]
222struct Port {
223    port_type: PortType,
224    minimum_vtl: Vtl,
225}
226
227#[derive(Clone)]
228enum PortType {
229    Message(Arc<dyn MessagePort>),
230    Event(Arc<dyn EventPort>),
231}
232
233impl Debug for PortType {
234    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
235        f.pad(match self {
236            Self::Message(_) => "Port::Message",
237            Self::Event(_) => "Port::Event",
238        })
239    }
240}
241
242struct DirectGuestMessagePort {
243    partition: Arc<dyn Synic>,
244    vtl: Vtl,
245    vp: VpIndex,
246    sint: u8,
247}
248
249impl GuestMessagePort for DirectGuestMessagePort {
250    fn poll_post_message(&mut self, _cx: &mut Context<'_>, typ: u32, payload: &[u8]) -> Poll<()> {
251        self.partition
252            .post_message(self.vtl, self.vp, self.sint, typ, payload);
253
254        Poll::Ready(())
255    }
256
257    fn set_target_vp(&mut self, vp: u32) -> Result<(), vmcore::synic::HypervisorError> {
258        self.vp = VpIndex::new(vp);
259        Ok(())
260    }
261}
262
263impl Inspect for DirectGuestMessagePort {
264    fn inspect(&self, req: inspect::Request<'_>) {
265        req.respond().field("message_port_vp", self.vp.index());
266    }
267}