virt/
generic.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4mod partition_memory_map;
5
6pub use partition_memory_map::PartitionMemoryMap;
7pub use vm_topology::processor::VpIndex;
8
9use crate::CpuidLeaf;
10use crate::PartitionCapabilities;
11use crate::io::CpuIo;
12use crate::irqcon::ControlGic;
13use crate::irqcon::IoApicRouting;
14use crate::irqcon::MsiRequest;
15use crate::x86::DebugState;
16use crate::x86::HardwareBreakpoint;
17use guestmem::DoorbellRegistration;
18use guestmem::GuestMemory;
19use hvdef::Vtl;
20use inspect::Inspect;
21use inspect::InspectMut;
22use memory_range::MemoryRange;
23use pci_core::msi::SignalMsi;
24use std::cell::Cell;
25use std::convert::Infallible;
26use std::fmt::Debug;
27use std::future::Future;
28use std::future::poll_fn;
29use std::pin::pin;
30use std::sync::Arc;
31use std::sync::atomic::AtomicBool;
32use std::sync::atomic::Ordering;
33use std::task::Poll;
34use std::task::Waker;
35use vm_topology::memory::MemoryLayout;
36use vm_topology::processor::ProcessorTopology;
37use vmcore::monitor::MonitorId;
38use vmcore::reference_time::ReferenceTimeSource;
39use vmcore::synic::GuestEventPort;
40use vmcore::vmtime::VmTimeSource;
41use vmcore::vpci_msi::MapVpciInterrupt;
42use vmcore::vpci_msi::MsiAddressData;
43use vmcore::vpci_msi::RegisterInterruptError;
44use vmcore::vpci_msi::VpciInterruptParameters;
45
46pub trait Hypervisor: 'static {
47    /// The prototype partition type.
48    type ProtoPartition<'a>: ProtoPartition<Partition = Self::Partition>;
49    /// The partition type.
50    type Partition;
51    /// The error type when creating the partition.
52    type Error: std::error::Error + Send + Sync + 'static;
53
54    /// Returns whether this hypervisor is available on this machine.
55    fn is_available(&self) -> Result<bool, Self::Error>;
56
57    /// Returns a new prototype partition from the given configuration.
58    fn new_partition<'a>(
59        &'a mut self,
60        config: ProtoPartitionConfig<'a>,
61    ) -> Result<Self::ProtoPartition<'a>, Self::Error>;
62}
63
64/// Isolation type for a partition.
65#[derive(Eq, PartialEq, Debug, Copy, Clone, Inspect)]
66pub enum IsolationType {
67    /// No isolation.
68    None,
69    /// Hypervisor based isolation.
70    Vbs,
71    /// Secure nested paging (AMD SEV-SNP) - hardware based isolation.
72    Snp,
73    /// Trust domain extensions (Intel TDX) - hardware based isolation.
74    Tdx,
75}
76
77impl IsolationType {
78    /// Returns true if the isolation type is not `None`.
79    pub fn is_isolated(&self) -> bool {
80        !matches!(self, Self::None)
81    }
82
83    /// Returns whether the isolation type is hardware-backed.
84    pub fn is_hardware_isolated(&self) -> bool {
85        matches!(self, Self::Snp | Self::Tdx)
86    }
87}
88
89/// An unexpected isolation type was provided.
90#[derive(Debug)]
91pub struct UnexpectedIsolationType;
92
93impl IsolationType {
94    pub const fn from_hv(
95        value: hvdef::HvPartitionIsolationType,
96    ) -> Result<Self, UnexpectedIsolationType> {
97        match value {
98            hvdef::HvPartitionIsolationType::NONE => Ok(IsolationType::None),
99            hvdef::HvPartitionIsolationType::VBS => Ok(IsolationType::Vbs),
100            hvdef::HvPartitionIsolationType::SNP => Ok(IsolationType::Snp),
101            hvdef::HvPartitionIsolationType::TDX => Ok(IsolationType::Tdx),
102            _ => Err(UnexpectedIsolationType),
103        }
104    }
105
106    pub const fn to_hv(self) -> hvdef::HvPartitionIsolationType {
107        match self {
108            IsolationType::None => hvdef::HvPartitionIsolationType::NONE,
109            IsolationType::Vbs => hvdef::HvPartitionIsolationType::VBS,
110            IsolationType::Snp => hvdef::HvPartitionIsolationType::SNP,
111            IsolationType::Tdx => hvdef::HvPartitionIsolationType::TDX,
112        }
113    }
114}
115
116/// Page visibility types for isolated partitions.
117#[derive(Eq, PartialEq, Debug, Copy, Clone, Inspect)]
118pub enum PageVisibility {
119    /// The guest has exclusive access to the page, and no access from the host.
120    Exclusive,
121    /// The page has shared access with the guest and host.
122    Shared,
123}
124
125/// Prototype partition creation configuration.
126pub struct ProtoPartitionConfig<'a> {
127    /// The set of VPs to create.
128    pub processor_topology: &'a ProcessorTopology,
129    /// Microsoft hypervisor guest interface configuration.
130    pub hv_config: Option<HvConfig>,
131    /// VM time access.
132    pub vmtime: &'a VmTimeSource,
133    /// Use the user-mode APIC emulator, if supported.
134    pub user_mode_apic: bool,
135    /// Isolation type for this partition.
136    pub isolation: IsolationType,
137}
138
139/// Partition creation configuration.
140pub struct PartitionConfig<'a> {
141    /// The guest memory layout.
142    pub mem_layout: &'a MemoryLayout,
143    /// Guest memory access.
144    pub guest_memory: &'a GuestMemory,
145    /// Cpuid leaves to add to the default CPUID results.
146    pub cpuid: &'a [CpuidLeaf],
147    /// The offset of the VTL0 alias map. This maps VTL0's view of memory into
148    /// VTL2 at the specified offset (which must be a power of 2).
149    pub vtl0_alias_map: Option<u64>,
150}
151
152/// Trait for a prototype partition, one that is partially created but still
153/// needs final configuration.
154///
155/// This is separate from the partition so that it can be queried to determine
156/// the final partition configuration.
157pub trait ProtoPartition {
158    /// The partition type.
159    type Partition: Partition;
160    /// The VP binder type.
161    type ProcessorBinder: 'static + BindProcessor + Send;
162    /// The error type when creating the partition.
163    type Error: std::error::Error + Send + Sync + 'static;
164
165    /// Gets the default guest cpuid value for inputs `eax` and `ecx`.
166    #[cfg(guest_arch = "x86_64")]
167    fn cpuid(&self, eax: u32, ecx: u32) -> [u32; 4];
168
169    /// The maximum physical address width that processors and devices for this
170    /// partition can access.
171    ///
172    /// This may be smaller than what is reported to the guest via architectural
173    /// interfaces by default, and it may be larger or smaller than what the VMM
174    /// ultimately chooses to report to the guest.
175    fn max_physical_address_size(&self) -> u8;
176
177    /// Constructs the full partition.
178    fn build(
179        self,
180        config: PartitionConfig<'_>,
181    ) -> Result<(Self::Partition, Vec<Self::ProcessorBinder>), Self::Error>;
182}
183
184/// Trait used to bind a processor to the current thread.
185pub trait BindProcessor {
186    /// The processor object.
187    type Processor<'a>: Processor
188    where
189        Self: 'a;
190
191    /// A binding error.
192    type Error: std::error::Error + Send + Sync + 'static;
193
194    /// Binds the processor to the current thread.
195    fn bind(&mut self) -> Result<Self::Processor<'_>, Self::Error>;
196}
197
198/// Policy for the partition when mapping VTL0 memory late.
199#[derive(Eq, PartialEq, Debug, Copy, Clone)]
200pub enum LateMapVtl0MemoryPolicy {
201    /// Halt execution of the VP if VTL0 memory is accessed.
202    Halt,
203    /// Log the error but emulate the access with the instruction emulator.
204    Log,
205    /// Inject an exception into the guest.
206    InjectException,
207}
208
209/// Which ranges VTL2 is allowed to access before VTL0 ram is mapped.
210#[derive(Debug, Clone)]
211pub enum LateMapVtl0AllowedRanges {
212    /// Ask the memory layout what the vtl2_ram ranges are.
213    MemoryLayout,
214    /// These specific ranges are allowed.
215    Ranges(Vec<MemoryRange>),
216}
217
218/// Config used to determine late mapping VTL0 memory.
219#[derive(Debug, Clone)]
220pub struct LateMapVtl0MemoryConfig {
221    /// What ranges VTL2 are allowed to access before VTL0 memory is mapped.
222    /// Generally this consists of the ranges representing VTL2 ram.
223    pub allowed_ranges: LateMapVtl0AllowedRanges,
224    /// The policy for the partition mapping VTL0 memory late.
225    pub policy: LateMapVtl0MemoryPolicy,
226}
227
228/// VTL2 configuration.
229#[derive(Debug)]
230pub struct Vtl2Config {
231    /// If set, map VTL0 memory late after VTL2 has started. The current
232    /// heuristic is to defer mapping VTL0 memory until the first
233    /// [`hvdef::HypercallCode::HvCallModifyVtlProtectionMask`] hypercall is
234    /// made.
235    ///
236    /// Accesses before memory is mapped is determined by the specified config.
237    pub late_map_vtl0_memory: Option<LateMapVtl0MemoryConfig>,
238}
239
240/// Hypervisor configuration.
241#[derive(Debug)]
242pub struct HvConfig {
243    /// Use the hypervisor's in-built enlightenment support if available.
244    pub offload_enlightenments: bool,
245    /// Allow device assignment on the partition.
246    pub allow_device_assignment: bool,
247    /// Enable VTL2 support if set. Additional options are described by
248    /// [Vtl2Config].
249    pub vtl2: Option<Vtl2Config>,
250}
251
252/// Methods for manipulating a VM partition.
253pub trait Partition: 'static + Hv1 + Inspect + Send + Sync {
254    /// Returns a trait object to accept pages on behalf of the guest during the
255    /// initial start import flow.
256    fn supports_initial_accept_pages(
257        &self,
258    ) -> Option<&dyn AcceptInitialPages<Error = <Self as Hv1>::Error>> {
259        None
260    }
261
262    /// Returns a trait object to reset the partition, if supported.
263    fn supports_reset(&self) -> Option<&dyn ResetPartition<Error = <Self as Hv1>::Error>>;
264
265    /// Returns a trait object to reset VTL state, if supported.
266    fn supports_vtl_scrub(&self) -> Option<&dyn ScrubVtl<Error = <Self as Hv1>::Error>> {
267        None
268    }
269
270    /// Returns an interface for registering MMIO doorbells for this partition.
271    ///
272    /// Not all partitions support this.
273    fn doorbell_registration(
274        self: &Arc<Self>,
275        minimum_vtl: Vtl,
276    ) -> Option<Arc<dyn DoorbellRegistration>> {
277        let _ = minimum_vtl;
278        None
279    }
280
281    /// Requests an MSI for the specified VTL.
282    ///
283    /// On x86, the MSI format is the architectural APIC format.
284    ///
285    /// On ARM64, the MSI format is currently not defined, since we only support
286    /// Hyper-V-style VMs (which use synthetic MSIs via VPCI). In the future, we
287    /// may want to support either or both SPI- and ITS+LPI-based MSIs.
288    fn request_msi(&self, vtl: Vtl, request: MsiRequest);
289
290    /// Returns an MSI interrupt target for this partition, which can be used to
291    /// create MSI interrupts.
292    ///
293    /// Not all partitions support this.
294    fn as_signal_msi(self: &Arc<Self>, vtl: Vtl) -> Option<Arc<dyn SignalMsi>> {
295        let _ = vtl;
296        None
297    }
298
299    /// Get the partition capabilities for this partition.
300    fn caps(&self) -> &PartitionCapabilities;
301
302    /// Forces the run_vp call to yield to the scheduler (i.e. return
303    /// Poll::Pending).
304    fn request_yield(&self, vp_index: VpIndex);
305}
306
307/// X86-specific partition methods.
308pub trait X86Partition: Partition {
309    /// Gets the IO-APIC routing control for VTL0.
310    fn ioapic_routing(&self) -> Arc<dyn IoApicRouting>;
311
312    /// Pulses the specified APIC's local interrupt line (0 or 1).
313    fn pulse_lint(&self, vp_index: VpIndex, vtl: Vtl, lint: u8);
314}
315
316/// ARM64-specific partition methods.
317pub trait Aarch64Partition: Partition {
318    /// Returns an interface for accessing the GIC interrupt controller for `vtl`.
319    fn control_gic(&self, vtl: Vtl) -> Arc<dyn ControlGic>;
320}
321
322/// Extension trait for accepting initial pages.
323pub trait AcceptInitialPages {
324    type Error: std::error::Error;
325
326    /// Accepts initial pages on behalf of the guest.
327    ///
328    /// This can only be used during the load path during partition start to
329    /// accept pages on behalf of the guest that were set as part of the load
330    /// process. The host virtstack cannot accept pages on behalf of the guest
331    /// once it has started running.
332    fn accept_initial_pages(
333        &self,
334        pages: &[(MemoryRange, PageVisibility)],
335    ) -> Result<(), Self::Error>;
336}
337
338/// Extension trait for resetting the partition.
339pub trait ResetPartition {
340    type Error: std::error::Error;
341
342    /// Resets the partition, restoring all partition state to the initial
343    /// state.
344    ///
345    /// The caller must ensure that no VPs are running when this is called.
346    ///
347    /// This resets partition-level (VM-wide) state. After this completes,
348    /// the caller dispatches [`Processor::reset`] to each VP's thread to
349    /// reset per-VP state (registers, APIC, synic message queues, etc.).
350    ///
351    /// If this fails, the partition is in a bad state and cannot be resumed
352    /// until a subsequent reset call succeeds.
353    fn reset(&self) -> Result<(), Self::Error>;
354}
355
356/// Extension trait for scrubbing higher VTL state while leaving lower VTLs
357/// untouched.
358pub trait ScrubVtl {
359    type Error: std::error::Error;
360
361    /// Scrubs partition and VP state for `vtl`. This is useful for servicing
362    /// and restarting a higher VTL without touching the lower VTL.
363    ///
364    /// The caller must ensure that no VPs are running when this is called.
365    ///
366    /// This scrubs partition-level state. After this completes, the caller
367    /// dispatches [`Processor::scrub`] to each VP's thread to scrub per-VP
368    /// state for the specified VTL.
369    ///
370    /// Note that this does not reset page protections. This is necessary
371    /// because there may be devices assigned to lower VTLs, and they should not
372    /// be able to DMA to higher VTL memory during servicing.
373    fn scrub(&self, vtl: Vtl) -> Result<(), Self::Error>;
374}
375
376/// Provides access to partition state for save, restore, and reset.
377///
378/// This is not part of [`Partition`] because some scenarios do not require such
379/// access.
380pub trait PartitionAccessState {
381    type StateAccess<'a>: crate::vm::AccessVmState
382    where
383        Self: 'a;
384
385    /// Returns an object to access VM state for the specified VTL.
386    fn access_state(&self, vtl: Vtl) -> Self::StateAccess<'_>;
387}
388
389/// Change memory protections for lower VTLs. This can be used to share memory
390/// with a lower VTL or make memory accesses trigger an intercept. This is
391/// intended for dynamic state as initial memory protections are applied at VM
392/// start.
393pub trait VtlMemoryProtection {
394    /// Sets lower VTL permissions on a physical page.
395    ///
396    /// TODO: To remain generic may want to replace hvdef::HvMapGpaFlags with
397    ///       something else.
398    fn modify_vtl_page_setting(&self, pfn: u64, flags: hvdef::HvMapGpaFlags) -> anyhow::Result<()>;
399}
400
401pub trait Processor: InspectMut {
402    type StateAccess<'a>: crate::vp::AccessVpState
403    where
404        Self: 'a;
405
406    /// Sets the debug state: conditions under which the VP should exit for
407    /// debugging the guest. This including single stepping and hardware
408    /// breakpoints.
409    ///
410    /// TODO: generalize for non-x86 architectures.
411    fn set_debug_state(
412        &mut self,
413        vtl: Vtl,
414        state: Option<&DebugState>,
415    ) -> Result<(), <Self::StateAccess<'_> as crate::vp::AccessVpState>::Error>;
416
417    /// Runs the VP.
418    ///
419    /// Although this is an async function, it may block synchronously until
420    /// [`Partition::request_yield`] is called for this VP. Then its future must
421    /// return [`Poll::Pending`] at least once.
422    ///
423    /// Returns when an error occurs, the VP halts, or the VP is requested to
424    /// stop via `stop`.
425    #[expect(async_fn_in_trait)] // don't need or want Send bound
426    async fn run_vp(
427        &mut self,
428        stop: StopVp<'_>,
429        dev: &impl CpuIo,
430    ) -> Result<Infallible, VpHaltReason>;
431
432    /// Without running the VP, flushes any asynchronous requests from other
433    /// processors or objects that might affect this state, so that the object
434    /// can be saved/restored correctly.
435    fn flush_async_requests(&mut self);
436
437    /// Returns whether the specified VTL can be inspected on this processor.
438    ///
439    /// VTL0 is always inspectable.
440    fn vtl_inspectable(&self, vtl: Vtl) -> bool {
441        vtl == Vtl::Vtl0
442    }
443
444    /// Resets per-VP state after a partition-level reset.
445    ///
446    /// Called on each VP's thread while VPs are stopped, after
447    /// [`ResetPartition::reset`] has completed.
448    ///
449    /// The default implementation panics. Backends that support
450    /// [`ResetPartition`] must override this.
451    #[allow(unreachable_code)]
452    fn reset(&mut self) -> Result<(), impl std::error::Error + Send + Sync + 'static> {
453        Ok::<(), Infallible>(unimplemented!(
454            "Processor::reset not implemented for this backend"
455        ))
456    }
457
458    /// Scrubs per-VP state for a specific VTL.
459    ///
460    /// Called on each VP's thread while VPs are stopped, after
461    /// [`ScrubVtl::scrub`] has completed.
462    ///
463    /// The default implementation panics. Backends that support
464    /// [`ScrubVtl`] must override this.
465    #[allow(unreachable_code)]
466    fn scrub(&mut self, _vtl: Vtl) -> Result<(), impl std::error::Error + Send + Sync + 'static> {
467        Ok::<(), Infallible>(unimplemented!(
468            "Processor::scrub not implemented for this backend"
469        ))
470    }
471
472    fn access_state(&mut self, vtl: Vtl) -> Self::StateAccess<'_>;
473}
474
475/// A source for [`StopVp`].
476pub struct StopVpSource {
477    stop: Cell<bool>,
478    waker: Cell<Option<Waker>>,
479}
480
481impl StopVpSource {
482    /// Creates a new source.
483    pub fn new() -> Self {
484        Self {
485            stop: Cell::new(false),
486            waker: Cell::new(None),
487        }
488    }
489
490    /// Returns an object to wait for stops.
491    pub fn checker(&self) -> StopVp<'_> {
492        StopVp { source: self }
493    }
494
495    /// Initiates a VP stop.
496    ///
497    /// After this, calls to [`StopVp::check`] or [`StopVp::until_stop`] will
498    /// fail.
499    pub fn stop(&self) {
500        self.stop.set(true);
501        if let Some(waker) = self.waker.take() {
502            waker.wake();
503        }
504    }
505
506    /// Returns whether [`Self::stop`] has been called.
507    pub fn is_stopping(&self) -> bool {
508        self.stop.get()
509    }
510}
511
512/// Object to check for VP stop requests.
513pub struct StopVp<'a> {
514    source: &'a StopVpSource,
515}
516
517/// An error result that the VP stopped due to request.
518#[derive(Debug)]
519pub struct VpStopped(());
520
521impl StopVp<'_> {
522    /// Returns `Err(VpStopped(_))` if the VP should stop.
523    pub fn check(&self) -> Result<(), VpStopped> {
524        if self.source.stop.get() {
525            Err(VpStopped(()))
526        } else {
527            Ok(())
528        }
529    }
530
531    /// Runs `fut` until it completes or the VP should stop.
532    pub async fn until_stop<Fut: Future>(&mut self, fut: Fut) -> Result<Fut::Output, VpStopped> {
533        let mut fut = pin!(fut);
534        poll_fn(|cx| match fut.as_mut().poll(cx) {
535            Poll::Ready(r) => Poll::Ready(Ok(r)),
536            Poll::Pending => {
537                self.check()?;
538                self.source.waker.set(Some(cx.waker().clone()));
539                Poll::Pending
540            }
541        })
542        .await
543    }
544}
545
546/// An object that can be polled to see if a yield has been requested.
547#[derive(Debug)]
548pub struct NeedsYield {
549    yield_requested: AtomicBool,
550}
551
552impl NeedsYield {
553    /// Creates a new object.
554    pub fn new() -> Self {
555        Self {
556            yield_requested: false.into(),
557        }
558    }
559
560    /// Requests a yield.
561    ///
562    /// Returns whether a signal is necessary to ensure that the task yields
563    /// soon.
564    pub fn request_yield(&self) -> bool {
565        !self.yield_requested.swap(true, Ordering::Release)
566    }
567
568    /// Yields execution to the executor if `request_yield` has been called
569    /// since the last call to `maybe_yield`.
570    pub async fn maybe_yield(&self) {
571        poll_fn(|cx| {
572            if self.yield_requested.load(Ordering::Acquire) {
573                // Wake this task again to ensure it runs again.
574                cx.waker().wake_by_ref();
575                self.yield_requested.store(false, Ordering::Relaxed);
576                Poll::Pending
577            } else {
578                Poll::Ready(())
579            }
580        })
581        .await
582    }
583}
584
585/// The reason that [`Processor::run_vp`] returned.
586#[derive(Debug)]
587pub enum VpHaltReason {
588    /// The processor was requested to stop.
589    Stop(VpStopped),
590    /// The processor task should be restarted, possibly on a different thread.
591    Cancel,
592    /// The processor initiated a power off.
593    PowerOff,
594    /// The processor initiated a reboot.
595    Reset,
596    /// The processor initiated a hibernation.
597    Hibernate,
598    /// The processor triple faulted.
599    TripleFault {
600        /// The faulting VTL.
601        // FUTURE: move VTL state into `AccessVpState``.
602        vtl: Vtl,
603    },
604    /// Debugger single step.
605    SingleStep,
606    /// Debugger hardware breakpoint.
607    HwBreak(HardwareBreakpoint),
608}
609
610impl From<VpStopped> for VpHaltReason {
611    fn from(stop: VpStopped) -> Self {
612        Self::Stop(stop)
613    }
614}
615
616pub trait PartitionMemoryMapper {
617    /// Returns a memory mapper for the partition backing `vtl`.
618    fn memory_mapper(&self, vtl: Vtl) -> Arc<dyn PartitionMemoryMap>;
619}
620
621pub trait Hv1 {
622    type Error: std::error::Error + Send + Sync + 'static;
623    type Device: MapVpciInterrupt + SignalMsi;
624
625    fn reference_time_source(&self) -> Option<ReferenceTimeSource>;
626
627    fn new_virtual_device(
628        &self,
629    ) -> Option<&dyn DeviceBuilder<Device = Self::Device, Error = Self::Error>>;
630}
631
632pub trait DeviceBuilder: Hv1 {
633    fn build(&self, vtl: Vtl, device_id: u64) -> Result<Self::Device, Self::Error>;
634}
635
636pub enum UnimplementedDevice {}
637
638impl MapVpciInterrupt for UnimplementedDevice {
639    async fn register_interrupt(
640        &self,
641        _vector_count: u32,
642        _params: &VpciInterruptParameters<'_>,
643    ) -> Result<MsiAddressData, RegisterInterruptError> {
644        match *self {}
645    }
646
647    async fn unregister_interrupt(&self, _address: u64, _data: u32) {
648        match *self {}
649    }
650}
651
652impl SignalMsi for UnimplementedDevice {
653    fn signal_msi(&self, _rid: u32, _address: u64, _data: u32) {
654        match *self {}
655    }
656}
657
658pub trait Synic: Send + Sync {
659    /// Adds a fast path to signal `event` when the guest signals
660    /// `connection_id` from VTL >= `minimum_vtl`.
661    ///
662    /// Returns Ok(None) if this acceleration is not supported.
663    fn new_host_event_port(
664        &self,
665        connection_id: u32,
666        minimum_vtl: Vtl,
667        event: &pal_event::Event,
668    ) -> Result<Option<Box<dyn Sync + Send>>, vmcore::synic::Error> {
669        let _ = (connection_id, minimum_vtl, event);
670        Ok(None)
671    }
672
673    /// Posts a message to the guest.
674    fn post_message(&self, vtl: Vtl, vp: VpIndex, sint: u8, typ: u32, payload: &[u8]);
675
676    /// Creates a [`GuestEventPort`] for signaling VMBus channels in the guest.
677    fn new_guest_event_port(
678        &self,
679        vtl: Vtl,
680        vp: u32,
681        sint: u8,
682        flag: u16,
683    ) -> Box<dyn GuestEventPort>;
684
685    /// Returns whether callers should pass an OS event when creating event
686    /// ports, as opposed to passing a function to call.
687    ///
688    /// This is true when the hypervisor can more quickly dispatch an OS event
689    /// and resume the VP than it can take an intercept into user mode and call
690    /// a function.
691    fn prefer_os_events(&self) -> bool;
692
693    /// Returns an object for manipulating the monitor page, or None if monitor pages aren't
694    /// supported.
695    fn monitor_support(&self) -> Option<&dyn SynicMonitor> {
696        None
697    }
698}
699
700/// Provides monitor page functionality for a `Synic` implementation.
701pub trait SynicMonitor: Synic {
702    /// Registers a monitored interrupt. The returned struct will unregister the ID when dropped.
703    ///
704    /// # Panics
705    ///
706    /// Panics if monitor_id is already in use.
707    fn register_monitor(&self, monitor_id: MonitorId, connection_id: u32) -> Box<dyn Sync + Send>;
708
709    /// Sets the GPA of the monitor page currently in use.
710    fn set_monitor_page(&self, vtl: Vtl, gpa: Option<u64>) -> anyhow::Result<()>;
711
712    /// Allocates a monitor page and sets it as the monitor page currently in use. If allocating
713    /// monitor pages is not supported, returns `Ok(None)`.
714    ///
715    /// The page will be deallocated if the monitor page is subsequently changed or cleared using
716    /// [`SynicMonitor::set_monitor_page`].
717    fn allocate_monitor_page(&self, vtl: Vtl) -> anyhow::Result<Option<u64>> {
718        let _ = vtl;
719        Ok(None)
720    }
721}
722
723/// MNF support routines for the emulator
724pub trait EmulatorMonitorSupport {
725    /// Check if the specified write is inside the monitor page, and signal the associated
726    /// connection ID if it is.
727    #[must_use]
728    fn check_write(&self, gpa: u64, bytes: &[u8]) -> bool;
729
730    /// Check if the specified read is inside the monitor page, and fill the provided buffer
731    /// if it is.
732    #[must_use]
733    fn check_read(&self, gpa: u64, bytes: &mut [u8]) -> bool;
734}