virt_mshv_vtl/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Implementation of the Underhill hypervisor backend, which uses
5//! `/dev/mshv_vtl` to interact with the Microsoft hypervisor while running in
6//! VTL2.
7
8#![cfg(all(guest_is_native, target_os = "linux"))]
9
10mod devmsr;
11
12cfg_if::cfg_if!(
13    if #[cfg(guest_arch = "x86_64")] {
14        mod cvm_cpuid;
15        pub use processor::snp::SnpBacked;
16        pub use processor::tdx::TdxBacked;
17        use crate::processor::HardwareIsolatedBacking;
18        pub use crate::processor::mshv::x64::HypervisorBackedX86 as HypervisorBacked;
19        use crate::processor::mshv::x64::HypervisorBackedX86Shared as HypervisorBackedShared;
20        use bitvec::prelude::BitArray;
21        use bitvec::prelude::Lsb0;
22        use devmsr::MsrDevice;
23        use hv1_emulator::hv::ProcessorVtlHv;
24        use processor::LapicState;
25        use processor::snp::SnpBackedShared;
26        use processor::tdx::TdxBackedShared;
27        use std::arch::x86_64::CpuidResult;
28        use virt::CpuidLeaf;
29        use virt::state::StateElement;
30        use virt::vp::MpState;
31        /// Bitarray type for representing IRR bits in a x86-64 APIC
32        /// Each bit represent the 256 possible vectors.
33        type IrrBitmap = BitArray<[u32; 8], Lsb0>;
34    } else if #[cfg(guest_arch = "aarch64")] {
35        pub use crate::processor::mshv::arm64::HypervisorBackedArm64 as HypervisorBacked;
36        use crate::processor::mshv::arm64::HypervisorBackedArm64Shared as HypervisorBackedShared;
37    }
38);
39
40mod processor;
41pub use processor::Backing;
42pub use processor::UhProcessor;
43
44use anyhow::Context as AnyhowContext;
45use bitfield_struct::bitfield;
46use bitvec::boxed::BitBox;
47use bitvec::vec::BitVec;
48use cvm_tracing::CVM_ALLOWED;
49use guestmem::GuestMemory;
50use guestmem::GuestMemoryBackingError;
51use hcl::GuestVtl;
52use hcl::ioctl::Hcl;
53use hcl::ioctl::SetVsmPartitionConfigError;
54use hv1_emulator::hv::GlobalHv;
55use hv1_emulator::message_queues::MessageQueues;
56use hv1_emulator::synic::GlobalSynic;
57use hv1_emulator::synic::SintProxied;
58use hv1_structs::VtlArray;
59use hvdef::GuestCrashCtl;
60use hvdef::HV_PAGE_SHIFT;
61use hvdef::HV_PAGE_SIZE;
62use hvdef::HV_PAGE_SIZE_USIZE;
63use hvdef::HvError;
64use hvdef::HvMapGpaFlags;
65use hvdef::HvPartitionPrivilege;
66use hvdef::HvRegisterName;
67use hvdef::HvRegisterVsmPartitionConfig;
68use hvdef::HvRegisterVsmPartitionStatus;
69use hvdef::Vtl;
70use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_EXECUTE;
71use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_NONE;
72use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_READ_WRITE;
73use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_WRITE;
74use hvdef::hypercall::HostVisibilityType;
75use hvdef::hypercall::HvGuestOsId;
76use hvdef::hypercall::HvInputVtl;
77use hvdef::hypercall::HvInterceptParameters;
78use hvdef::hypercall::HvInterceptType;
79use inspect::Inspect;
80use inspect::InspectMut;
81use memory_range::MemoryRange;
82use pal::unix::affinity;
83use pal::unix::affinity::CpuSet;
84use pal_async::driver::Driver;
85use pal_async::driver::SpawnDriver;
86use pal_uring::IdleControl;
87use parking_lot::Mutex;
88use parking_lot::RwLock;
89use processor::BackingSharedParams;
90use processor::SidecarExitReason;
91use sidecar_client::NewSidecarClientError;
92use std::collections::HashMap;
93use std::ops::RangeInclusive;
94use std::os::fd::AsRawFd;
95use std::sync::Arc;
96use std::sync::Weak;
97use std::sync::atomic::AtomicBool;
98use std::sync::atomic::AtomicU8;
99use std::sync::atomic::AtomicU32;
100use std::sync::atomic::AtomicU64;
101use std::sync::atomic::Ordering;
102use std::task::Waker;
103use thiserror::Error;
104use user_driver::DmaClient;
105use virt::IsolationType;
106use virt::PartitionCapabilities;
107use virt::VpIndex;
108use virt::X86Partition;
109use virt::irqcon::IoApicRouting;
110use virt::irqcon::MsiRequest;
111use virt::x86::apic_software_device::ApicSoftwareDevices;
112use virt_support_apic::LocalApicSet;
113use vm_topology::memory::MemoryLayout;
114use vm_topology::processor::ProcessorTopology;
115use vm_topology::processor::TargetVpInfo;
116use vmcore::monitor::MonitorPage;
117use vmcore::reference_time::GetReferenceTime;
118use vmcore::reference_time::ReferenceTimeResult;
119use vmcore::reference_time::ReferenceTimeSource;
120use vmcore::vmtime::VmTimeSource;
121use x86defs::snp::REG_TWEAK_BITMAP_OFFSET;
122use x86defs::snp::REG_TWEAK_BITMAP_SIZE;
123use x86defs::tdx::TdCallResult;
124use zerocopy::FromBytes;
125use zerocopy::FromZeros;
126use zerocopy::Immutable;
127use zerocopy::IntoBytes;
128use zerocopy::KnownLayout;
129
130/// General error returned by operations.
131#[derive(Error, Debug)]
132#[expect(missing_docs)]
133pub enum Error {
134    #[error("hcl error")]
135    Hcl(#[source] hcl::ioctl::Error),
136    #[error("failed to open sidecar client")]
137    Sidecar(#[source] NewSidecarClientError),
138    #[error("failed to install {0:?} intercept: {1:?}")]
139    InstallIntercept(HvInterceptType, HvError),
140    #[error("failed to query hypervisor register {0:#x?}")]
141    Register(HvRegisterName, #[source] HvError),
142    #[error("failed to set vsm partition config register")]
143    VsmPartitionConfig(#[source] SetVsmPartitionConfigError),
144    #[error("failed to create virtual device")]
145    NewDevice(#[source] virt::x86::apic_software_device::DeviceIdInUse),
146    #[error("failed to create cpuid tables for cvm")]
147    #[cfg(guest_arch = "x86_64")]
148    CvmCpuid(#[source] cvm_cpuid::CpuidResultsError),
149    #[error("failed to update hypercall msr")]
150    UpdateHypercallMsr,
151    #[error("failed to update reference tsc msr")]
152    UpdateReferenceTsc,
153    #[error("failed to map overlay page")]
154    MapOverlay(#[source] std::io::Error),
155    #[error("failed to allocate shared visibility pages for overlay")]
156    AllocateSharedVisOverlay(#[source] anyhow::Error),
157    #[error("failed to open msr device")]
158    OpenMsr(#[source] std::io::Error),
159    #[error("cpuid did not contain valid TSC frequency information")]
160    BadCpuidTsc,
161    #[error("failed to read tsc frequency")]
162    ReadTscFrequency(#[source] std::io::Error),
163    #[error(
164        "tsc frequency mismatch between hypervisor ({hv}) and hardware {hw}, exceeds allowed error {allowed_error}"
165    )]
166    TscFrequencyMismatch {
167        hv: u64,
168        hw: u64,
169        allowed_error: u64,
170    },
171    #[error("failed to set vsm partition config: {0:?}")]
172    FailedToSetL2Ctls(TdCallResult),
173    #[error("debugging is configured but the binary does not have the gdb feature")]
174    InvalidDebugConfiguration,
175    #[error("failed to allocate TLB flush page")]
176    AllocateTlbFlushPage(#[source] anyhow::Error),
177    #[error("host does not support required cpu capabilities")]
178    Capabilities(virt::PartitionCapabilitiesError),
179    #[error("failed to get register")]
180    GetReg(#[source] hcl::ioctl::register::GetRegError),
181    #[error("failed to set register")]
182    SetReg(#[source] hcl::ioctl::register::SetRegError),
183}
184
185/// Error revoking guest VSM.
186#[derive(Error, Debug)]
187#[expect(missing_docs)]
188pub enum RevokeGuestVsmError {
189    #[error("failed to set vsm config")]
190    SetGuestVsmConfig(#[source] hcl::ioctl::register::SetRegError),
191    #[error("VTL 1 is already enabled")]
192    Vtl1AlreadyEnabled,
193}
194
195/// Underhill partition.
196#[derive(Inspect)]
197pub struct UhPartition {
198    #[inspect(flatten)]
199    inner: Arc<UhPartitionInner>,
200    // TODO: remove this extra indirection by refactoring some traits.
201    #[inspect(skip)]
202    interrupt_targets: VtlArray<Arc<UhInterruptTarget>, 2>,
203}
204
205/// Underhill partition.
206#[derive(Inspect)]
207#[inspect(extra = "UhPartitionInner::inspect_extra")]
208struct UhPartitionInner {
209    #[inspect(skip)]
210    hcl: Hcl,
211    #[inspect(skip)] // inspected separately
212    vps: Vec<UhVpInner>,
213    irq_routes: virt::irqcon::IrqRoutes,
214    caps: PartitionCapabilities,
215    #[inspect(skip)] // handled in `inspect_extra`
216    enter_modes: Mutex<EnterModes>,
217    #[inspect(skip)]
218    enter_modes_atomic: AtomicU8,
219    #[cfg(guest_arch = "x86_64")]
220    cpuid: virt::CpuidLeafSet,
221    lower_vtl_memory_layout: MemoryLayout,
222    gm: VtlArray<GuestMemory, 2>,
223    vtl0_kernel_exec_gm: GuestMemory,
224    vtl0_user_exec_gm: GuestMemory,
225    #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
226    #[inspect(skip)]
227    crash_notification_send: mesh::Sender<VtlCrash>,
228    monitor_page: MonitorPage,
229    #[inspect(skip)]
230    allocated_monitor_page: Mutex<Option<user_driver::memory::MemoryBlock>>,
231    software_devices: Option<ApicSoftwareDevices>,
232    #[inspect(skip)]
233    vmtime: VmTimeSource,
234    isolation: IsolationType,
235    #[inspect(with = "inspect::AtomicMut")]
236    no_sidecar_hotplug: AtomicBool,
237    use_mmio_hypercalls: bool,
238    backing_shared: BackingShared,
239    intercept_debug_exceptions: bool,
240    #[cfg(guest_arch = "x86_64")]
241    // N.B For now, only one device vector table i.e. for VTL0 only
242    #[inspect(hex, with = "|x| inspect::iter_by_index(x.read().into_inner())")]
243    device_vector_table: RwLock<IrrBitmap>,
244    vmbus_relay: bool,
245}
246
247#[derive(Inspect)]
248#[inspect(untagged)]
249enum BackingShared {
250    Hypervisor(#[inspect(flatten)] HypervisorBackedShared),
251    #[cfg(guest_arch = "x86_64")]
252    Snp(#[inspect(flatten)] SnpBackedShared),
253    #[cfg(guest_arch = "x86_64")]
254    Tdx(#[inspect(flatten)] TdxBackedShared),
255}
256
257impl BackingShared {
258    fn new(
259        isolation: IsolationType,
260        partition_params: &UhPartitionNewParams<'_>,
261        backing_shared_params: BackingSharedParams<'_>,
262    ) -> Result<BackingShared, Error> {
263        Ok(match isolation {
264            IsolationType::None | IsolationType::Vbs => {
265                assert!(backing_shared_params.cvm_state.is_none());
266                BackingShared::Hypervisor(HypervisorBackedShared::new(
267                    partition_params,
268                    backing_shared_params,
269                )?)
270            }
271            #[cfg(guest_arch = "x86_64")]
272            IsolationType::Snp => BackingShared::Snp(SnpBackedShared::new(
273                partition_params,
274                backing_shared_params,
275            )?),
276            #[cfg(guest_arch = "x86_64")]
277            IsolationType::Tdx => BackingShared::Tdx(TdxBackedShared::new(
278                partition_params,
279                backing_shared_params,
280            )?),
281            #[cfg(not(guest_arch = "x86_64"))]
282            _ => unreachable!(),
283        })
284    }
285
286    fn cvm_state(&self) -> Option<&UhCvmPartitionState> {
287        match self {
288            BackingShared::Hypervisor(_) => None,
289            #[cfg(guest_arch = "x86_64")]
290            BackingShared::Snp(SnpBackedShared { cvm, .. })
291            | BackingShared::Tdx(TdxBackedShared { cvm, .. }) => Some(cvm),
292        }
293    }
294
295    fn untrusted_synic(&self) -> Option<&GlobalSynic> {
296        match self {
297            BackingShared::Hypervisor(_) => None,
298            #[cfg(guest_arch = "x86_64")]
299            BackingShared::Snp(_) => None,
300            #[cfg(guest_arch = "x86_64")]
301            BackingShared::Tdx(s) => s.untrusted_synic.as_ref(),
302        }
303    }
304}
305
306#[derive(InspectMut, Copy, Clone)]
307struct EnterModes {
308    #[inspect(mut)]
309    first: EnterMode,
310    #[inspect(mut)]
311    second: EnterMode,
312}
313
314impl Default for EnterModes {
315    fn default() -> Self {
316        Self {
317            first: EnterMode::Fast,
318            second: EnterMode::IdleToVtl0,
319        }
320    }
321}
322
323impl From<EnterModes> for hcl::protocol::EnterModes {
324    fn from(value: EnterModes) -> Self {
325        Self::new()
326            .with_first(value.first.into())
327            .with_second(value.second.into())
328    }
329}
330
331#[derive(InspectMut, Copy, Clone)]
332enum EnterMode {
333    Fast,
334    PlayIdle,
335    IdleToVtl0,
336}
337
338impl From<EnterMode> for hcl::protocol::EnterMode {
339    fn from(value: EnterMode) -> Self {
340        match value {
341            EnterMode::Fast => Self::FAST,
342            EnterMode::PlayIdle => Self::PLAY_IDLE,
343            EnterMode::IdleToVtl0 => Self::IDLE_TO_VTL0,
344        }
345    }
346}
347
348#[cfg(guest_arch = "x86_64")]
349#[derive(Inspect)]
350struct GuestVsmVpState {
351    /// The pending event that VTL 1 wants to inject into VTL 0. Injected on
352    /// next exit to VTL 0.
353    #[inspect(with = "|x| x.as_ref().map(inspect::AsDebug)")]
354    vtl0_exit_pending_event: Option<hvdef::HvX64PendingExceptionEvent>,
355    reg_intercept: SecureRegisterInterceptState,
356}
357
358#[cfg(guest_arch = "x86_64")]
359impl GuestVsmVpState {
360    fn new() -> Self {
361        GuestVsmVpState {
362            vtl0_exit_pending_event: None,
363            reg_intercept: Default::default(),
364        }
365    }
366}
367
368#[cfg(guest_arch = "x86_64")]
369#[derive(Inspect)]
370/// VP state for CVMs.
371struct UhCvmVpState {
372    // Allocation handle for direct overlays
373    #[inspect(debug)]
374    direct_overlay_handle: user_driver::memory::MemoryBlock,
375    /// Used in VTL 2 exit code to determine which VTL to exit to.
376    exit_vtl: GuestVtl,
377    /// Hypervisor enlightenment emulator state.
378    hv: VtlArray<ProcessorVtlHv, 2>,
379    /// LAPIC state.
380    lapics: VtlArray<LapicState, 2>,
381    /// Guest VSM state for this vp. Some when VTL 1 is enabled.
382    vtl1: Option<GuestVsmVpState>,
383}
384
385#[cfg(guest_arch = "x86_64")]
386impl UhCvmVpState {
387    /// Creates a new CVM VP state.
388    pub(crate) fn new(
389        cvm_partition: &UhCvmPartitionState,
390        inner: &UhPartitionInner,
391        vp_info: &TargetVpInfo,
392        overlay_pages_required: usize,
393    ) -> Result<Self, Error> {
394        let direct_overlay_handle = cvm_partition
395            .shared_dma_client
396            .allocate_dma_buffer(overlay_pages_required * HV_PAGE_SIZE as usize)
397            .map_err(Error::AllocateSharedVisOverlay)?;
398
399        let apic_base = virt::vp::Apic::at_reset(&inner.caps, vp_info).apic_base;
400        let lapics = VtlArray::from_fn(|vtl| {
401            let apic_set = &cvm_partition.lapic[vtl];
402
403            // The APIC is software-enabled after reset for secure VTLs, to
404            // maintain compatibility with released versions of secure kernel
405            let mut lapic = apic_set.add_apic(vp_info, vtl == Vtl::Vtl1);
406            // Initialize APIC base to match the reset VM state.
407            lapic.set_apic_base(apic_base).unwrap();
408            // Only the VTL 0 non-BSP LAPICs should be in the WaitForSipi state.
409            let activity = if vtl == Vtl::Vtl0 && !vp_info.base.is_bsp() {
410                MpState::WaitForSipi
411            } else {
412                MpState::Running
413            };
414            LapicState::new(lapic, activity)
415        });
416
417        let hv = VtlArray::from_fn(|vtl| cvm_partition.hv.add_vp(vp_info.base.vp_index, vtl));
418
419        Ok(Self {
420            direct_overlay_handle,
421            exit_vtl: GuestVtl::Vtl0,
422            hv,
423            lapics,
424            vtl1: None,
425        })
426    }
427}
428
429#[cfg(guest_arch = "x86_64")]
430#[derive(Inspect, Default)]
431#[inspect(hex)]
432/// Configuration of VTL 1 registration for intercepts on certain registers
433pub struct SecureRegisterInterceptState {
434    #[inspect(with = "|&x| u64::from(x)")]
435    intercept_control: hvdef::HvRegisterCrInterceptControl,
436    cr0_mask: u64,
437    cr4_mask: u64,
438    // Writes to X86X_IA32_MSR_MISC_ENABLE are dropped, so this is only used so
439    // that get_vp_register returns the correct value from a set_vp_register
440    ia32_misc_enable_mask: u64,
441}
442
443/// Information about a redirected interrupt for a specific vector.
444/// Stored per-processor, indexed by the redirected vector number in VTL2.
445#[derive(Clone, Inspect)]
446struct ProxyRedirectVectorInfo {
447    /// Device ID that owns this interrupt
448    device_id: u64,
449    /// Original interrupt vector from the device
450    original_vector: u32,
451}
452
453#[derive(Inspect)]
454/// Partition-wide state for CVMs.
455struct UhCvmPartitionState {
456    #[cfg(guest_arch = "x86_64")]
457    vps_per_socket: u32,
458    /// VPs that have locked their TLB.
459    #[inspect(
460        with = "|arr| inspect::iter_by_index(arr.iter()).map_value(|bb| inspect::iter_by_index(bb.iter().map(|v| *v)))"
461    )]
462    tlb_locked_vps: VtlArray<BitBox<AtomicU64>, 2>,
463    #[inspect(with = "inspect::iter_by_index")]
464    vps: Vec<UhCvmVpInner>,
465    shared_memory: GuestMemory,
466    #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
467    #[inspect(skip)]
468    isolated_memory_protector: Arc<dyn ProtectIsolatedMemory>,
469    /// The emulated local APIC set.
470    lapic: VtlArray<LocalApicSet, 2>,
471    /// The emulated hypervisor state.
472    hv: GlobalHv<2>,
473    /// Guest VSM state.
474    guest_vsm: RwLock<GuestVsmState<CvmVtl1State>>,
475    /// Dma client for shared visibility pages.
476    shared_dma_client: Arc<dyn DmaClient>,
477    /// Dma client for private visibility pages.
478    private_dma_client: Arc<dyn DmaClient>,
479    hide_isolation: bool,
480    proxy_interrupt_redirect: bool,
481}
482
483#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
484impl UhCvmPartitionState {
485    fn vp_inner(&self, vp_index: u32) -> &UhCvmVpInner {
486        &self.vps[vp_index as usize]
487    }
488
489    fn is_lower_vtl_startup_denied(&self) -> bool {
490        matches!(
491            *self.guest_vsm.read(),
492            GuestVsmState::Enabled {
493                vtl1: CvmVtl1State {
494                    deny_lower_vtl_startup: true,
495                    ..
496                }
497            }
498        )
499    }
500}
501
502#[derive(Inspect)]
503/// Per-vp state for CVMs.
504struct UhCvmVpInner {
505    /// The current status of TLB locks
506    tlb_lock_info: VtlArray<TlbLockInfo, 2>,
507    /// Whether EnableVpVtl for VTL 1 has been called on this VP.
508    vtl1_enable_called: Mutex<bool>,
509    /// Whether the VP has been started via the StartVp hypercall.
510    started: AtomicBool,
511    /// Start context for StartVp and EnableVpVtl calls.
512    #[inspect(with = "|arr| inspect::iter_by_index(arr.iter().map(|v| v.lock().is_some()))")]
513    hv_start_enable_vtl_vp: VtlArray<Mutex<Option<Box<VpStartEnableVtl>>>, 2>,
514    /// Tracking of proxy redirect interrupts mapped on this VP.
515    #[inspect(with = "|x| inspect::adhoc(|req| inspect::iter_by_key(&*x.lock()).inspect(req))")]
516    proxy_redirect_interrupts: Mutex<HashMap<u32, ProxyRedirectVectorInfo>>,
517}
518
519#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
520#[derive(Inspect)]
521#[inspect(tag = "guest_vsm_state")]
522/// Partition-wide state for guest vsm.
523enum GuestVsmState<T: Inspect> {
524    NotPlatformSupported,
525    NotGuestEnabled,
526    Enabled {
527        #[inspect(flatten)]
528        vtl1: T,
529    },
530}
531
532impl<T: Inspect> GuestVsmState<T> {
533    pub fn from_availability(guest_vsm_available: bool) -> Self {
534        if guest_vsm_available {
535            GuestVsmState::NotGuestEnabled
536        } else {
537            GuestVsmState::NotPlatformSupported
538        }
539    }
540}
541
542#[derive(Inspect)]
543struct CvmVtl1State {
544    /// Whether VTL 1 has been enabled on any vp
545    enabled_on_any_vp: bool,
546    /// Whether guest memory should be zeroed before it resets.
547    zero_memory_on_reset: bool,
548    /// Whether a vp can be started or reset by a lower vtl.
549    deny_lower_vtl_startup: bool,
550    /// Whether Mode-Based Execution Control should be enforced on lower VTLs.
551    pub mbec_enabled: bool,
552    /// Whether shadow supervisor stack is enabled.
553    pub shadow_supervisor_stack_enabled: bool,
554    #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
555    io_read_intercepts: BitBox<u64>,
556    #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
557    io_write_intercepts: BitBox<u64>,
558}
559
560#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
561impl CvmVtl1State {
562    fn new(mbec_enabled: bool) -> Self {
563        Self {
564            enabled_on_any_vp: false,
565            zero_memory_on_reset: false,
566            deny_lower_vtl_startup: false,
567            mbec_enabled,
568            shadow_supervisor_stack_enabled: false,
569            io_read_intercepts: BitVec::repeat(false, u16::MAX as usize + 1).into_boxed_bitslice(),
570            io_write_intercepts: BitVec::repeat(false, u16::MAX as usize + 1).into_boxed_bitslice(),
571        }
572    }
573}
574
575#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
576struct TscReferenceTimeSource {
577    tsc_scale: u64,
578}
579
580#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
581impl TscReferenceTimeSource {
582    fn new(tsc_frequency: u64) -> Self {
583        TscReferenceTimeSource {
584            tsc_scale: (((10_000_000_u128) << 64) / tsc_frequency as u128) as u64,
585        }
586    }
587}
588
589/// A time implementation based on TSC.
590impl GetReferenceTime for TscReferenceTimeSource {
591    fn now(&self) -> ReferenceTimeResult {
592        #[cfg(guest_arch = "x86_64")]
593        {
594            let tsc = safe_intrinsics::rdtsc();
595            let ref_time = ((self.tsc_scale as u128 * tsc as u128) >> 64) as u64;
596            ReferenceTimeResult {
597                ref_time,
598                system_time: None,
599            }
600        }
601
602        #[cfg(guest_arch = "aarch64")]
603        {
604            todo!("AARCH64_TODO");
605        }
606    }
607}
608
609impl virt::irqcon::ControlGic for UhPartitionInner {
610    fn set_spi_irq(&self, irq_id: u32, high: bool) {
611        if let Err(err) = self.hcl.request_interrupt(
612            hvdef::HvInterruptControl::new()
613                .with_arm64_asserted(high)
614                .with_interrupt_type(hvdef::HvInterruptType::HvArm64InterruptTypeFixed),
615            0,
616            irq_id,
617            GuestVtl::Vtl0,
618        ) {
619            tracelimit::warn_ratelimited!(
620                error = &err as &dyn std::error::Error,
621                irq = irq_id,
622                asserted = high,
623                "failed to request spi"
624            );
625        }
626    }
627}
628
629impl virt::Aarch64Partition for UhPartition {
630    fn control_gic(&self, vtl: Vtl) -> Arc<dyn virt::irqcon::ControlGic> {
631        debug_assert!(vtl == Vtl::Vtl0);
632        self.inner.clone()
633    }
634}
635
636/// A wrapper around [`UhProcessor`] that is [`Send`].
637///
638/// This is used to instantiate the processor object on the correct thread,
639/// since all lower VTL processor state accesses must occur from the same
640/// processor at VTL2.
641pub struct UhProcessorBox {
642    partition: Arc<UhPartitionInner>,
643    vp_info: TargetVpInfo,
644}
645
646impl UhProcessorBox {
647    /// Returns the VP index.
648    pub fn vp_index(&self) -> VpIndex {
649        self.vp_info.base.vp_index
650    }
651
652    /// Returns the base CPU that manages this processor, when it is a sidecar
653    /// VP.
654    pub fn sidecar_base_cpu(&self) -> Option<u32> {
655        self.partition
656            .hcl
657            .sidecar_base_cpu(self.vp_info.base.vp_index.index())
658    }
659
660    /// Returns the processor object, bound to this thread.
661    ///
662    /// If `control` is provided, then this must be called on the VP's
663    /// associated thread pool thread, and it will dispatch the VP directly.
664    /// Otherwise, the processor will control the processor via the sidecar
665    /// kernel.
666    pub fn bind_processor<'a, T: Backing>(
667        &'a mut self,
668        driver: &impl Driver,
669        control: Option<&'a mut IdleControl>,
670    ) -> Result<UhProcessor<'a, T>, Error> {
671        if let Some(control) = &control {
672            let vp_index = self.vp_info.base.vp_index;
673
674            let mut current = Default::default();
675            affinity::get_current_thread_affinity(&mut current).unwrap();
676            assert_eq!(&current, CpuSet::new().set(vp_index.index()));
677
678            self.partition
679                .hcl
680                .set_poll_file(
681                    self.partition.vp(vp_index).unwrap().cpu_index,
682                    control.ring_fd().as_raw_fd(),
683                )
684                .map_err(Error::Hcl)?;
685        }
686
687        UhProcessor::new(driver, &self.partition, self.vp_info, control)
688    }
689
690    /// Sets the sidecar remove reason for the processor to be due to a task
691    /// running with the given name.
692    ///
693    /// This is useful for diagnostics.
694    pub fn set_sidecar_exit_due_to_task(&self, task: Arc<str>) {
695        self.partition
696            .vp(self.vp_info.base.vp_index)
697            .unwrap()
698            .set_sidecar_exit_reason(SidecarExitReason::TaskRequest(task))
699    }
700}
701
702#[derive(Debug, Inspect)]
703struct UhVpInner {
704    /// 32 bits per VTL: top bits are VTL 1, bottom bits are VTL 0.
705    wake_reasons: AtomicU64,
706    #[inspect(skip)]
707    waker: RwLock<Option<Waker>>,
708    message_queues: VtlArray<MessageQueues, 2>,
709    #[inspect(skip)]
710    vp_info: TargetVpInfo,
711    /// The Linux kernel's CPU index for this VP. This should be used instead of VpIndex
712    /// when interacting with non-MSHV kernel interfaces.
713    cpu_index: u32,
714    sidecar_exit_reason: Mutex<Option<SidecarExitReason>>,
715}
716
717impl UhVpInner {
718    pub fn vp_index(&self) -> VpIndex {
719        self.vp_info.base.vp_index
720    }
721}
722
723#[cfg_attr(not(guest_arch = "x86_64"), expect(dead_code))]
724#[derive(Debug, Inspect)]
725/// Which operation is setting the initial vp context
726enum InitialVpContextOperation {
727    /// The VP is being started via the StartVp hypercall.
728    StartVp,
729    /// The VP is being started via the EnableVpVtl hypercall.
730    EnableVpVtl,
731}
732
733#[cfg_attr(not(guest_arch = "x86_64"), expect(dead_code))]
734#[derive(Debug, Inspect)]
735/// State for handling StartVp/EnableVpVtl hypercalls.
736struct VpStartEnableVtl {
737    /// Which operation, startvp or enablevpvtl, is setting the initial vp
738    /// context
739    operation: InitialVpContextOperation,
740    #[inspect(skip)]
741    context: hvdef::hypercall::InitialVpContextX64,
742}
743
744#[derive(Debug, Inspect)]
745struct TlbLockInfo {
746    /// The set of VPs that are waiting for this VP to release the TLB lock.
747    #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
748    blocked_vps: BitBox<AtomicU64>,
749    /// The set of VPs that are holding the TLB lock and preventing this VP
750    /// from proceeding.
751    #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
752    blocking_vps: BitBox<AtomicU64>,
753    /// The count of blocking VPs. This should always be equivalent to
754    /// `blocking_vps.count_ones()`, however it is accessible in a single
755    /// atomic operation while counting is not.
756    blocking_vp_count: AtomicU32,
757    /// Whether the VP is sleeping due to a TLB lock.
758    sleeping: AtomicBool,
759}
760
761#[cfg_attr(not(guest_arch = "x86_64"), expect(dead_code))]
762impl TlbLockInfo {
763    fn new(vp_count: usize) -> Self {
764        Self {
765            blocked_vps: BitVec::repeat(false, vp_count).into_boxed_bitslice(),
766            blocking_vps: BitVec::repeat(false, vp_count).into_boxed_bitslice(),
767            blocking_vp_count: AtomicU32::new(0),
768            sleeping: false.into(),
769        }
770    }
771}
772
773#[bitfield(u32)]
774#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
775struct WakeReason {
776    extint: bool,
777    message_queues: bool,
778    hv_start_enable_vtl_vp: bool,
779    intcon: bool,
780    update_proxy_irr_filter: bool,
781    #[bits(27)]
782    _reserved: u32,
783}
784
785impl WakeReason {
786    // Convenient constants.
787    const EXTINT: Self = Self::new().with_extint(true);
788    const MESSAGE_QUEUES: Self = Self::new().with_message_queues(true);
789    #[cfg(guest_arch = "x86_64")]
790    const HV_START_ENABLE_VP_VTL: Self = Self::new().with_hv_start_enable_vtl_vp(true); // StartVp/EnableVpVtl handling
791    const INTCON: Self = Self::new().with_intcon(true);
792    #[cfg(guest_arch = "x86_64")]
793    const UPDATE_PROXY_IRR_FILTER: Self = Self::new().with_update_proxy_irr_filter(true);
794}
795
796#[bitfield(u32)]
797#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
798struct ExitActivity {
799    pending_event: bool,
800    #[bits(31)]
801    _reserved: u32,
802}
803
804/// Immutable access to useful bits of Partition state.
805impl UhPartition {
806    /// Revokes guest VSM.
807    pub fn revoke_guest_vsm(&self) -> Result<(), RevokeGuestVsmError> {
808        fn revoke<T: Inspect>(vsm_state: &mut GuestVsmState<T>) -> Result<(), RevokeGuestVsmError> {
809            if matches!(vsm_state, GuestVsmState::Enabled { .. }) {
810                return Err(RevokeGuestVsmError::Vtl1AlreadyEnabled);
811            }
812            *vsm_state = GuestVsmState::NotPlatformSupported;
813            Ok(())
814        }
815
816        match &self.inner.backing_shared {
817            BackingShared::Hypervisor(s) => {
818                revoke(&mut *s.guest_vsm.write())?;
819                self.inner
820                    .hcl
821                    .set_guest_vsm_partition_config(false)
822                    .map_err(RevokeGuestVsmError::SetGuestVsmConfig)?;
823            }
824            #[cfg(guest_arch = "x86_64")]
825            BackingShared::Snp(SnpBackedShared { cvm, .. })
826            | BackingShared::Tdx(TdxBackedShared { cvm, .. }) => {
827                revoke(&mut *cvm.guest_vsm.write())?;
828            }
829        };
830
831        Ok(())
832    }
833
834    /// Returns the current hypervisor reference time, in 100ns units.
835    pub fn reference_time(&self) -> u64 {
836        if let Some(hv) = self.inner.hv() {
837            hv.ref_time_source().now().ref_time
838        } else {
839            self.inner
840                .hcl
841                .reference_time()
842                .expect("should not fail to get the reference time")
843        }
844    }
845}
846
847impl virt::Partition for UhPartition {
848    fn supports_reset(&self) -> Option<&dyn virt::ResetPartition<Error = Self::Error>> {
849        None
850    }
851
852    fn caps(&self) -> &PartitionCapabilities {
853        &self.inner.caps
854    }
855
856    fn request_msi(&self, vtl: Vtl, request: MsiRequest) {
857        self.inner
858            .request_msi(vtl.try_into().expect("higher vtl not configured"), request)
859    }
860
861    fn request_yield(&self, _vp_index: VpIndex) {
862        unimplemented!()
863    }
864}
865
866impl X86Partition for UhPartition {
867    fn ioapic_routing(&self) -> Arc<dyn IoApicRouting> {
868        self.inner.clone()
869    }
870
871    fn pulse_lint(&self, vp_index: VpIndex, vtl: Vtl, lint: u8) {
872        let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
873        if let Some(apic) = &self.inner.lapic(vtl) {
874            apic.lint(vp_index, lint.into(), |vp_index| {
875                self.inner
876                    .vp(vp_index)
877                    .unwrap()
878                    .wake(vtl, WakeReason::INTCON);
879            });
880        } else if lint == 0 {
881            self.inner
882                .vp(vp_index)
883                .unwrap()
884                .wake(vtl, WakeReason::EXTINT);
885        } else {
886            unimplemented!()
887        }
888    }
889}
890
891impl UhPartitionInner {
892    fn vp(&self, index: VpIndex) -> Option<&'_ UhVpInner> {
893        self.vps.get(index.index() as usize)
894    }
895
896    fn lapic(&self, vtl: GuestVtl) -> Option<&LocalApicSet> {
897        self.backing_shared.cvm_state().map(|x| &x.lapic[vtl])
898    }
899
900    fn hv(&self) -> Option<&GlobalHv<2>> {
901        self.backing_shared.cvm_state().map(|x| &x.hv)
902    }
903
904    /// For requester VP to issue `proxy_irr_blocked` update to other VPs
905    #[cfg(guest_arch = "x86_64")]
906    fn request_proxy_irr_filter_update(
907        &self,
908        vtl: GuestVtl,
909        device_vector: u8,
910        req_vp_index: VpIndex,
911    ) {
912        tracing::debug!(
913            ?vtl,
914            device_vector,
915            req_vp_index = req_vp_index.index(),
916            "request_proxy_irr_filter_update"
917        );
918
919        // Add given vector to partition global device vector table (VTL0 only for now)
920        {
921            let mut device_vector_table = self.device_vector_table.write();
922            device_vector_table.set(device_vector as usize, true);
923        }
924
925        // Wake all other VPs for their `proxy_irr_blocked` filter update
926        for vp in self.vps.iter() {
927            if vp.vp_index() != req_vp_index {
928                vp.wake(vtl, WakeReason::UPDATE_PROXY_IRR_FILTER);
929            }
930        }
931    }
932
933    /// Get current partition global device irr vectors (VTL0 for now)
934    #[cfg(guest_arch = "x86_64")]
935    fn fill_device_vectors(&self, _vtl: GuestVtl, irr_vectors: &mut IrrBitmap) {
936        let device_vector_table = self.device_vector_table.read();
937        for idx in device_vector_table.iter_ones() {
938            irr_vectors.set(idx, true);
939        }
940    }
941
942    fn inspect_extra(&self, resp: &mut inspect::Response<'_>) {
943        let mut wake_vps = false;
944        resp.field_mut(
945            "enter_modes",
946            &mut inspect::adhoc_mut(|req| {
947                let update = req.is_update();
948                {
949                    let mut modes = self.enter_modes.lock();
950                    modes.inspect_mut(req);
951                    if update {
952                        self.enter_modes_atomic.store(
953                            hcl::protocol::EnterModes::from(*modes).into(),
954                            Ordering::Relaxed,
955                        );
956                        wake_vps = true;
957                    }
958                }
959            }),
960        );
961
962        // Wake VPs to propagate updates.
963        if wake_vps {
964            for vp in self.vps.iter() {
965                vp.wake_vtl2();
966            }
967        }
968    }
969
970    // TODO VBS GUEST VSM: enable for aarch64
971    #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
972    fn vsm_status(
973        &self,
974    ) -> Result<HvRegisterVsmPartitionStatus, hcl::ioctl::register::GetRegError> {
975        // TODO: It might be possible to cache VsmPartitionStatus.
976        self.hcl.get_vsm_partition_status()
977    }
978}
979
980impl virt::Synic for UhPartition {
981    fn post_message(&self, vtl: Vtl, vp_index: VpIndex, sint: u8, typ: u32, payload: &[u8]) {
982        let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
983        let Some(vp) = self.inner.vp(vp_index) else {
984            tracelimit::warn_ratelimited!(
985                CVM_ALLOWED,
986                vp = vp_index.index(),
987                "invalid vp target for post_message"
988            );
989            return;
990        };
991
992        vp.post_message(
993            vtl,
994            sint,
995            &hvdef::HvMessage::new(hvdef::HvMessageType(typ), 0, payload),
996        );
997    }
998
999    fn new_guest_event_port(
1000        &self,
1001        vtl: Vtl,
1002        vp: u32,
1003        sint: u8,
1004        flag: u16,
1005    ) -> Box<dyn vmcore::synic::GuestEventPort> {
1006        let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
1007        Box::new(UhEventPort {
1008            partition: Arc::downgrade(&self.inner),
1009            params: Arc::new(Mutex::new(UhEventPortParams {
1010                vp: VpIndex::new(vp),
1011                sint,
1012                flag,
1013                vtl,
1014            })),
1015        })
1016    }
1017
1018    fn prefer_os_events(&self) -> bool {
1019        false
1020    }
1021
1022    fn monitor_support(&self) -> Option<&dyn virt::SynicMonitor> {
1023        Some(self)
1024    }
1025}
1026
1027impl virt::SynicMonitor for UhPartition {
1028    fn set_monitor_page(&self, vtl: Vtl, gpa: Option<u64>) -> anyhow::Result<()> {
1029        // Keep this locked the whole function to avoid racing with allocate_monitor_page.
1030        let mut allocated_block = self.inner.allocated_monitor_page.lock();
1031        let old_gpa = self.inner.monitor_page.set_gpa(gpa);
1032
1033        // Take ownership of any allocated monitor page so it will be freed on function exit.
1034        let allocated_page = allocated_block.take();
1035        if let Some(old_gpa) = old_gpa {
1036            let allocated_gpa = allocated_page
1037                .as_ref()
1038                .map(|b| b.pfns()[0] << HV_PAGE_SHIFT);
1039
1040            // Revert the old page's permissions, using the appropriate method depending on
1041            // whether it was allocated or guest-supplied.
1042            let result = if allocated_gpa == Some(old_gpa) {
1043                let vtl = GuestVtl::try_from(vtl).unwrap();
1044                self.unregister_cvm_dma_overlay_page(vtl, old_gpa >> HV_PAGE_SHIFT)
1045            } else {
1046                self.inner
1047                    .hcl
1048                    .modify_vtl_protection_mask(
1049                        MemoryRange::new(old_gpa..old_gpa + HV_PAGE_SIZE),
1050                        hvdef::HV_MAP_GPA_PERMISSIONS_ALL,
1051                        HvInputVtl::CURRENT_VTL,
1052                    )
1053                    .map_err(|err| anyhow::anyhow!(err))
1054            };
1055
1056            result
1057                .context("failed to unregister old monitor page")
1058                .inspect_err(|_| {
1059                    // Leave the page unset if returning a failure.
1060                    self.inner.monitor_page.set_gpa(None);
1061                })?;
1062
1063            tracing::debug!(old_gpa, "unregistered monitor page");
1064        }
1065
1066        if let Some(gpa) = gpa {
1067            // Disallow VTL0 from writing to the page, so we'll get an intercept. Note that read
1068            // permissions must be enabled or this doesn't work correctly.
1069            self.inner
1070                .hcl
1071                .modify_vtl_protection_mask(
1072                    MemoryRange::new(gpa..gpa + HV_PAGE_SIZE),
1073                    HvMapGpaFlags::new().with_readable(true),
1074                    HvInputVtl::CURRENT_VTL,
1075                )
1076                .context("failed to register monitor page")
1077                .inspect_err(|_| {
1078                    // Leave the page unset if returning a failure.
1079                    self.inner.monitor_page.set_gpa(None);
1080                })?;
1081
1082            tracing::debug!(gpa, "registered monitor page");
1083        }
1084
1085        Ok(())
1086    }
1087
1088    fn register_monitor(
1089        &self,
1090        monitor_id: vmcore::monitor::MonitorId,
1091        connection_id: u32,
1092    ) -> Box<dyn Sync + Send> {
1093        self.inner
1094            .monitor_page
1095            .register_monitor(monitor_id, connection_id)
1096    }
1097
1098    fn allocate_monitor_page(&self, vtl: Vtl) -> anyhow::Result<Option<u64>> {
1099        let vtl = GuestVtl::try_from(vtl).unwrap();
1100
1101        // Allocating a monitor page is only supported for CVMs.
1102        let Some(state) = self.inner.backing_shared.cvm_state() else {
1103            return Ok(None);
1104        };
1105
1106        let mut allocated_block = self.inner.allocated_monitor_page.lock();
1107        if let Some(block) = allocated_block.as_ref() {
1108            // An allocated monitor page is already in use; no need to change it.
1109            let gpa = block.pfns()[0] << HV_PAGE_SHIFT;
1110            assert_eq!(self.inner.monitor_page.gpa(), Some(gpa));
1111            return Ok(Some(gpa));
1112        }
1113
1114        let block = state
1115            .private_dma_client
1116            .allocate_dma_buffer(HV_PAGE_SIZE_USIZE)
1117            .context("failed to allocate monitor page")?;
1118
1119        let gpn = block.pfns()[0];
1120        *allocated_block = Some(block);
1121        let gpa = gpn << HV_PAGE_SHIFT;
1122        let old_gpa = self.inner.monitor_page.set_gpa(Some(gpa));
1123        if let Some(old_gpa) = old_gpa {
1124            // The old GPA is guaranteed not to be allocated, since that was checked above, so
1125            // revert its permissions using the method for guest-supplied memory.
1126            self.inner
1127                .hcl
1128                .modify_vtl_protection_mask(
1129                    MemoryRange::new(old_gpa..old_gpa + HV_PAGE_SIZE),
1130                    hvdef::HV_MAP_GPA_PERMISSIONS_ALL,
1131                    HvInputVtl::CURRENT_VTL,
1132                )
1133                .context("failed to unregister old monitor page")
1134                .inspect_err(|_| {
1135                    // Leave the page unset if returning a failure.
1136                    self.inner.monitor_page.set_gpa(None);
1137                })?;
1138
1139            tracing::debug!(old_gpa, "unregistered monitor page");
1140        }
1141
1142        // Disallow VTL0 from writing to the page, so we'll get an intercept. Note that read
1143        // permissions must be enabled or this doesn't work correctly.
1144        self.register_cvm_dma_overlay_page(vtl, gpn, HvMapGpaFlags::new().with_readable(true))
1145            .context("failed to unregister monitor page")
1146            .inspect_err(|_| {
1147                // Leave the page unset if returning a failure.
1148                self.inner.monitor_page.set_gpa(None);
1149            })?;
1150
1151        tracing::debug!(gpa, "registered allocated monitor page");
1152
1153        Ok(Some(gpa))
1154    }
1155}
1156
1157impl UhPartitionInner {
1158    #[cfg(guest_arch = "x86_64")]
1159    pub(crate) fn synic_interrupt(
1160        &self,
1161        vp_index: VpIndex,
1162        vtl: GuestVtl,
1163    ) -> impl '_ + hv1_emulator::RequestInterrupt {
1164        // TODO CVM: optimize for SNP with secure avic to avoid internal wake
1165        // and for TDX to avoid trip to user mode
1166        move |vector, auto_eoi| {
1167            self.lapic(vtl).unwrap().synic_interrupt(
1168                vp_index,
1169                vector as u8,
1170                auto_eoi,
1171                |vp_index| self.vp(vp_index).unwrap().wake(vtl, WakeReason::INTCON),
1172            );
1173        }
1174    }
1175
1176    #[cfg(guest_arch = "aarch64")]
1177    fn synic_interrupt(
1178        &self,
1179        _vp_index: VpIndex,
1180        _vtl: GuestVtl,
1181    ) -> impl '_ + hv1_emulator::RequestInterrupt {
1182        move |_, _| {}
1183    }
1184}
1185
1186#[derive(Debug)]
1187struct UhEventPort {
1188    partition: Weak<UhPartitionInner>,
1189    params: Arc<Mutex<UhEventPortParams>>,
1190}
1191
1192#[derive(Debug, Copy, Clone)]
1193struct UhEventPortParams {
1194    vp: VpIndex,
1195    sint: u8,
1196    flag: u16,
1197    vtl: GuestVtl,
1198}
1199
1200impl vmcore::synic::GuestEventPort for UhEventPort {
1201    fn interrupt(&self) -> vmcore::interrupt::Interrupt {
1202        let partition = self.partition.clone();
1203        let params = self.params.clone();
1204        vmcore::interrupt::Interrupt::from_fn(move || {
1205            let UhEventPortParams {
1206                vp,
1207                sint,
1208                flag,
1209                vtl,
1210            } = *params.lock();
1211            let Some(partition) = partition.upgrade() else {
1212                return;
1213            };
1214            tracing::trace!(vp = vp.index(), sint, flag, "signal_event");
1215            if let Some(hv) = partition.hv() {
1216                match hv.synic[vtl].signal_event(
1217                    vp,
1218                    sint,
1219                    flag,
1220                    &mut partition.synic_interrupt(vp, vtl),
1221                ) {
1222                    Ok(_) => {}
1223                    Err(SintProxied) => {
1224                        tracing::trace!(
1225                            vp = vp.index(),
1226                            sint,
1227                            flag,
1228                            "forwarding event to untrusted synic"
1229                        );
1230                        if let Some(synic) = partition.backing_shared.untrusted_synic() {
1231                            synic
1232                                .signal_event(
1233                                    vp,
1234                                    sint,
1235                                    flag,
1236                                    &mut partition.synic_interrupt(vp, vtl),
1237                                )
1238                                .ok();
1239                        } else {
1240                            partition.hcl.signal_event_direct(vp.index(), sint, flag)
1241                        }
1242                    }
1243                }
1244            } else {
1245                partition.hcl.signal_event_direct(vp.index(), sint, flag);
1246            }
1247        })
1248    }
1249
1250    fn set_target_vp(&mut self, vp: u32) -> Result<(), vmcore::synic::HypervisorError> {
1251        self.params.lock().vp = VpIndex::new(vp);
1252        Ok(())
1253    }
1254}
1255
1256impl virt::Hv1 for UhPartition {
1257    type Error = Error;
1258    type Device = virt::x86::apic_software_device::ApicSoftwareDevice;
1259
1260    fn reference_time_source(&self) -> Option<ReferenceTimeSource> {
1261        Some(if let Some(hv) = self.inner.hv() {
1262            hv.ref_time_source().clone()
1263        } else {
1264            ReferenceTimeSource::from(self.inner.clone() as Arc<_>)
1265        })
1266    }
1267
1268    fn new_virtual_device(
1269        &self,
1270    ) -> Option<&dyn virt::DeviceBuilder<Device = Self::Device, Error = Self::Error>> {
1271        self.inner.software_devices.is_some().then_some(self)
1272    }
1273}
1274
1275impl GetReferenceTime for UhPartitionInner {
1276    fn now(&self) -> ReferenceTimeResult {
1277        ReferenceTimeResult {
1278            ref_time: self.hcl.reference_time().unwrap(),
1279            system_time: None,
1280        }
1281    }
1282}
1283
1284impl virt::DeviceBuilder for UhPartition {
1285    fn build(&self, vtl: Vtl, device_id: u64) -> Result<Self::Device, Self::Error> {
1286        let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
1287        let device = self
1288            .inner
1289            .software_devices
1290            .as_ref()
1291            .expect("checked in new_virtual_device")
1292            .new_device(self.interrupt_targets[vtl].clone(), device_id)
1293            .map_err(Error::NewDevice)?;
1294
1295        Ok(device)
1296    }
1297}
1298
1299struct UhInterruptTarget {
1300    partition: Arc<UhPartitionInner>,
1301    vtl: GuestVtl,
1302}
1303
1304impl pci_core::msi::MsiInterruptTarget for UhInterruptTarget {
1305    fn new_interrupt(&self) -> Box<dyn pci_core::msi::MsiControl> {
1306        let partition = self.partition.clone();
1307        let vtl = self.vtl;
1308        Box::new(move |address, data| partition.request_msi(vtl, MsiRequest { address, data }))
1309    }
1310}
1311
1312impl UhPartitionInner {
1313    fn request_msi(&self, vtl: GuestVtl, request: MsiRequest) {
1314        if let Some(lapic) = self.lapic(vtl) {
1315            tracing::trace!(?request, "interrupt");
1316            lapic.request_interrupt(request.address, request.data, |vp_index| {
1317                self.vp(vp_index).unwrap().wake(vtl, WakeReason::INTCON)
1318            });
1319        } else {
1320            let (address, data) = request.as_x86();
1321            if let Err(err) = self.hcl.request_interrupt(
1322                request.hv_x86_interrupt_control(),
1323                address.virt_destination().into(),
1324                data.vector().into(),
1325                vtl,
1326            ) {
1327                tracelimit::warn_ratelimited!(
1328                    CVM_ALLOWED,
1329                    error = &err as &dyn std::error::Error,
1330                    address = request.address,
1331                    data = request.data,
1332                    "failed to request msi"
1333                );
1334            }
1335        }
1336    }
1337}
1338
1339impl IoApicRouting for UhPartitionInner {
1340    fn set_irq_route(&self, irq: u8, request: Option<MsiRequest>) {
1341        self.irq_routes.set_irq_route(irq, request)
1342    }
1343
1344    // The IO-APIC is always hooked up to VTL0.
1345    fn assert_irq(&self, irq: u8) {
1346        self.irq_routes
1347            .assert_irq(irq, |request| self.request_msi(GuestVtl::Vtl0, request))
1348    }
1349}
1350
1351/// Configure the [`hvdef::HvRegisterVsmPartitionConfig`] register with the
1352/// values used by underhill.
1353fn set_vtl2_vsm_partition_config(hcl: &Hcl) -> Result<(), Error> {
1354    // Read available capabilities to determine what to enable.
1355    let caps = hcl.get_vsm_capabilities().map_err(Error::GetReg)?;
1356    let hardware_isolated = hcl.isolation().is_hardware_isolated();
1357    let isolated = hcl.isolation().is_isolated();
1358
1359    let config = HvRegisterVsmPartitionConfig::new()
1360        .with_default_vtl_protection_mask(0xF)
1361        .with_enable_vtl_protection(!hardware_isolated)
1362        .with_zero_memory_on_reset(!hardware_isolated)
1363        .with_intercept_cpuid_unimplemented(!hardware_isolated)
1364        .with_intercept_page(caps.intercept_page_available())
1365        .with_intercept_unrecoverable_exception(true)
1366        .with_intercept_not_present(caps.intercept_not_present_available() && !isolated)
1367        .with_intercept_acceptance(isolated)
1368        .with_intercept_enable_vtl_protection(isolated && !hardware_isolated)
1369        .with_intercept_system_reset(caps.intercept_system_reset_available());
1370
1371    hcl.set_vtl2_vsm_partition_config(config)
1372        .map_err(Error::SetReg)
1373}
1374
1375/// Configuration parameters supplied to [`UhProtoPartition::new`].
1376///
1377/// These do not include runtime resources.
1378pub struct UhPartitionNewParams<'a> {
1379    /// The isolation type for the partition.
1380    pub isolation: IsolationType,
1381    /// Hide isolation from the guest. The guest will run as if it is not
1382    /// isolated.
1383    pub hide_isolation: bool,
1384    /// The memory layout for lower VTLs.
1385    pub lower_vtl_memory_layout: &'a MemoryLayout,
1386    /// The guest processor topology.
1387    pub topology: &'a ProcessorTopology,
1388    /// The unparsed CVM cpuid info.
1389    // TODO: move parsing up a layer.
1390    pub cvm_cpuid_info: Option<&'a [u8]>,
1391    /// The unparsed CVM secrets page.
1392    pub snp_secrets: Option<&'a [u8]>,
1393    /// The virtual top of memory for hardware-isolated VMs.
1394    ///
1395    /// Must be a power of two.
1396    pub vtom: Option<u64>,
1397    /// Handle synic messages and events.
1398    ///
1399    /// On TDX, this prevents the hypervisor from getting vmtdcall exits.
1400    pub handle_synic: bool,
1401    /// Do not hotplug sidecar VPs on their first exit. Just continue running
1402    /// the VP remotely.
1403    pub no_sidecar_hotplug: bool,
1404    /// Use MMIO access hypercalls.
1405    pub use_mmio_hypercalls: bool,
1406    /// Intercept guest debug exceptions to support gdbstub.
1407    pub intercept_debug_exceptions: bool,
1408    /// Disable proxy interrupt redirection.
1409    pub disable_proxy_redirect: bool,
1410    /// Disable lower VTL timer virtualization.
1411    pub disable_lower_vtl_timer_virt: bool,
1412}
1413
1414/// Parameters to [`UhProtoPartition::build`].
1415pub struct UhLateParams<'a> {
1416    /// Guest memory for lower VTLs.
1417    pub gm: VtlArray<GuestMemory, 2>,
1418    /// Guest memory for VTL 0 kernel execute access.
1419    pub vtl0_kernel_exec_gm: GuestMemory,
1420    /// Guest memory for VTL 0 user execute access.
1421    pub vtl0_user_exec_gm: GuestMemory,
1422    /// The CPUID leaves to expose to the guest.
1423    #[cfg(guest_arch = "x86_64")]
1424    pub cpuid: Vec<CpuidLeaf>,
1425    /// The mesh sender to use for crash notifications.
1426    // FUTURE: remove mesh dependency from this layer.
1427    pub crash_notification_send: mesh::Sender<VtlCrash>,
1428    /// The VM time source.
1429    pub vmtime: &'a VmTimeSource,
1430    /// Parameters for CVMs only.
1431    pub cvm_params: Option<CvmLateParams>,
1432    /// vmbus_relay is enabled and active for partition
1433    pub vmbus_relay: bool,
1434}
1435
1436/// CVM-only parameters to [`UhProtoPartition::build`].
1437pub struct CvmLateParams {
1438    /// Guest memory for untrusted devices, like overlay pages.
1439    pub shared_gm: GuestMemory,
1440    /// An object to call to change host visibility on guest memory.
1441    pub isolated_memory_protector: Arc<dyn ProtectIsolatedMemory>,
1442    /// Dma client for shared visibility pages.
1443    pub shared_dma_client: Arc<dyn DmaClient>,
1444    /// Allocator for private visibility pages.
1445    pub private_dma_client: Arc<dyn DmaClient>,
1446}
1447
1448/// Represents a GPN that is either in guest memory or was allocated by dma_client.
1449#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1450pub enum GpnSource {
1451    /// The GPN is in regular guest RAM.
1452    GuestMemory,
1453    /// The GPN was allocated by dma_client and is not in guest RAM.
1454    Dma,
1455}
1456
1457/// Trait for CVM-related protections on guest memory.
1458pub trait ProtectIsolatedMemory: Send + Sync {
1459    /// Changes host visibility on guest memory.
1460    fn change_host_visibility(
1461        &self,
1462        vtl: GuestVtl,
1463        shared: bool,
1464        gpns: &[u64],
1465        tlb_access: &mut dyn TlbFlushLockAccess,
1466    ) -> Result<(), (HvError, usize)>;
1467
1468    /// Queries host visibility on guest memory.
1469    fn query_host_visibility(
1470        &self,
1471        gpns: &[u64],
1472        host_visibility: &mut [HostVisibilityType],
1473    ) -> Result<(), (HvError, usize)>;
1474
1475    /// Gets the default protections/permissions for VTL 0.
1476    fn default_vtl0_protections(&self) -> HvMapGpaFlags;
1477
1478    /// Changes the default protections/permissions for a VTL. For VBS-isolated
1479    /// VMs, the protections apply to all vtls lower than the specified one. For
1480    /// hardware-isolated VMs, they apply just to the given vtl.
1481    fn change_default_vtl_protections(
1482        &self,
1483        target_vtl: GuestVtl,
1484        protections: HvMapGpaFlags,
1485        tlb_access: &mut dyn TlbFlushLockAccess,
1486    ) -> Result<(), HvError>;
1487
1488    /// Changes the vtl protections on a range of guest memory.
1489    fn change_vtl_protections(
1490        &self,
1491        target_vtl: GuestVtl,
1492        gpns: &[u64],
1493        protections: HvMapGpaFlags,
1494        tlb_access: &mut dyn TlbFlushLockAccess,
1495    ) -> Result<(), (HvError, usize)>;
1496
1497    /// Registers a page as an overlay page by first validating it has the
1498    /// required permissions, optionally modifying them, then locking them.
1499    fn register_overlay_page(
1500        &self,
1501        vtl: GuestVtl,
1502        gpn: u64,
1503        gpn_source: GpnSource,
1504        check_perms: HvMapGpaFlags,
1505        new_perms: Option<HvMapGpaFlags>,
1506        tlb_access: &mut dyn TlbFlushLockAccess,
1507    ) -> Result<(), HvError>;
1508
1509    /// Unregisters an overlay page, removing its permission lock and restoring
1510    /// the previous permissions.
1511    fn unregister_overlay_page(
1512        &self,
1513        vtl: GuestVtl,
1514        gpn: u64,
1515        tlb_access: &mut dyn TlbFlushLockAccess,
1516    ) -> Result<(), HvError>;
1517
1518    /// Checks whether a page is currently registered as an overlay page.
1519    fn is_overlay_page(&self, vtl: GuestVtl, gpn: u64) -> bool;
1520
1521    /// Locks the permissions and mappings for a set of guest pages.
1522    fn lock_gpns(&self, vtl: GuestVtl, gpns: &[u64]) -> Result<(), GuestMemoryBackingError>;
1523
1524    /// Unlocks the permissions and mappings for a set of guest pages.
1525    ///
1526    /// Panics if asked to unlock a page that was not previously locked. The
1527    /// caller must ensure that the given slice has the same ordering as the
1528    /// one passed to `lock_gpns`.
1529    fn unlock_gpns(&self, vtl: GuestVtl, gpns: &[u64]);
1530
1531    /// Alerts the memory protector that vtl 1 is ready to set vtl protections
1532    /// on lower-vtl memory, and that these protections should be enforced.
1533    fn set_vtl1_protections_enabled(&self);
1534
1535    /// Whether VTL 1 is prepared to modify vtl protections on lower-vtl memory,
1536    /// and therefore whether these protections should be enforced.
1537    fn vtl1_protections_enabled(&self) -> bool;
1538}
1539
1540/// Trait for access to TLB flush and lock machinery.
1541pub trait TlbFlushLockAccess {
1542    /// Flush the entire TLB for all VPs for the given VTL.
1543    fn flush(&mut self, vtl: GuestVtl);
1544
1545    /// Flush the entire TLB for all VPs for all VTLs.
1546    fn flush_entire(&mut self);
1547
1548    /// Causes the specified VTL on the current VP to wait on all TLB locks.
1549    fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl);
1550}
1551
1552/// A partially built partition. Used to allow querying partition capabilities
1553/// before fully instantiating the partition.
1554pub struct UhProtoPartition<'a> {
1555    params: UhPartitionNewParams<'a>,
1556    hcl: Hcl,
1557    guest_vsm_available: bool,
1558    create_partition_available: bool,
1559    #[cfg(guest_arch = "x86_64")]
1560    cpuid: virt::CpuidLeafSet,
1561}
1562
1563impl<'a> UhProtoPartition<'a> {
1564    /// Creates a new prototype partition.
1565    ///
1566    /// `driver(cpu)` returns the driver to use for polling the sidecar device
1567    /// whose base CPU is `cpu`.
1568    pub fn new<T: SpawnDriver>(
1569        params: UhPartitionNewParams<'a>,
1570        driver: impl FnMut(u32) -> T,
1571    ) -> Result<Self, Error> {
1572        let hcl_isolation = match params.isolation {
1573            IsolationType::None => hcl::ioctl::IsolationType::None,
1574            IsolationType::Vbs => hcl::ioctl::IsolationType::Vbs,
1575            IsolationType::Snp => hcl::ioctl::IsolationType::Snp,
1576            IsolationType::Tdx => hcl::ioctl::IsolationType::Tdx,
1577        };
1578
1579        // Try to open the sidecar device, if it is present.
1580        let sidecar = sidecar_client::SidecarClient::new(driver).map_err(Error::Sidecar)?;
1581
1582        let hcl = Hcl::new(hcl_isolation, sidecar).map_err(Error::Hcl)?;
1583
1584        // Set the hypercalls that this process will use.
1585        let mut allowed_hypercalls = vec![
1586            hvdef::HypercallCode::HvCallGetVpRegisters,
1587            hvdef::HypercallCode::HvCallSetVpRegisters,
1588            hvdef::HypercallCode::HvCallInstallIntercept,
1589            hvdef::HypercallCode::HvCallTranslateVirtualAddress,
1590            hvdef::HypercallCode::HvCallPostMessageDirect,
1591            hvdef::HypercallCode::HvCallSignalEventDirect,
1592            hvdef::HypercallCode::HvCallModifyVtlProtectionMask,
1593            hvdef::HypercallCode::HvCallTranslateVirtualAddressEx,
1594            hvdef::HypercallCode::HvCallCheckSparseGpaPageVtlAccess,
1595            hvdef::HypercallCode::HvCallAssertVirtualInterrupt,
1596            hvdef::HypercallCode::HvCallGetVpIndexFromApicId,
1597            hvdef::HypercallCode::HvCallAcceptGpaPages,
1598            hvdef::HypercallCode::HvCallModifySparseGpaPageHostVisibility,
1599        ];
1600
1601        if params.isolation.is_hardware_isolated() {
1602            allowed_hypercalls.extend(vec![
1603                hvdef::HypercallCode::HvCallEnablePartitionVtl,
1604                hvdef::HypercallCode::HvCallRetargetDeviceInterrupt,
1605                hvdef::HypercallCode::HvCallEnableVpVtl,
1606            ]);
1607        }
1608
1609        if params.use_mmio_hypercalls {
1610            allowed_hypercalls.extend(vec![
1611                hvdef::HypercallCode::HvCallMemoryMappedIoRead,
1612                hvdef::HypercallCode::HvCallMemoryMappedIoWrite,
1613            ]);
1614        }
1615
1616        hcl.set_allowed_hypercalls(allowed_hypercalls.as_slice());
1617
1618        set_vtl2_vsm_partition_config(&hcl)?;
1619
1620        let privs = hcl
1621            .get_privileges_and_features_info()
1622            .map_err(Error::GetReg)?;
1623        let guest_vsm_available = Self::check_guest_vsm_support(privs, &hcl)?;
1624
1625        #[cfg(guest_arch = "x86_64")]
1626        let cpuid = match params.isolation {
1627            IsolationType::Snp => cvm_cpuid::CpuidResultsIsolationType::Snp {
1628                cpuid_pages: params.cvm_cpuid_info.unwrap(),
1629                vtom: params.vtom.unwrap(),
1630                access_vsm: guest_vsm_available,
1631            }
1632            .build()
1633            .map_err(Error::CvmCpuid)?,
1634
1635            IsolationType::Tdx => cvm_cpuid::CpuidResultsIsolationType::Tdx {
1636                topology: params.topology,
1637                vtom: params.vtom.unwrap(),
1638                access_vsm: guest_vsm_available,
1639            }
1640            .build()
1641            .map_err(Error::CvmCpuid)?,
1642            IsolationType::Vbs | IsolationType::None => Default::default(),
1643        };
1644
1645        Ok(UhProtoPartition {
1646            hcl,
1647            params,
1648            guest_vsm_available,
1649            create_partition_available: privs.create_partitions(),
1650            #[cfg(guest_arch = "x86_64")]
1651            cpuid,
1652        })
1653    }
1654
1655    /// Returns whether VSM support will be available to the guest.
1656    pub fn guest_vsm_available(&self) -> bool {
1657        self.guest_vsm_available
1658    }
1659
1660    /// Returns whether this partition has the create partitions hypercall
1661    /// available.
1662    pub fn create_partition_available(&self) -> bool {
1663        self.create_partition_available
1664    }
1665
1666    /// Returns a new Underhill partition.
1667    pub async fn build(
1668        self,
1669        late_params: UhLateParams<'_>,
1670    ) -> Result<(UhPartition, Vec<UhProcessorBox>), Error> {
1671        let Self {
1672            mut hcl,
1673            params,
1674            guest_vsm_available,
1675            create_partition_available: _,
1676            #[cfg(guest_arch = "x86_64")]
1677            cpuid,
1678        } = self;
1679        let isolation = params.isolation;
1680        let is_hardware_isolated = isolation.is_hardware_isolated();
1681
1682        // Intercept Debug Exceptions
1683        // On TDX because all OpenHCL TDs today have the debug policy bit set,
1684        // OpenHCL registers for the intercepts itself.
1685        // However, on non-TDX platforms hypervisor installs the
1686        // intercept on behalf of the guest.
1687        if params.intercept_debug_exceptions {
1688            if !cfg!(feature = "gdb") {
1689                return Err(Error::InvalidDebugConfiguration);
1690            }
1691
1692            cfg_if::cfg_if! {
1693                if #[cfg(guest_arch = "x86_64")] {
1694                    if isolation != IsolationType::Tdx {
1695                        let debug_exception_vector = 0x1;
1696                        hcl.register_intercept(
1697                            HvInterceptType::HvInterceptTypeException,
1698                            HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1699                            HvInterceptParameters::new_exception(debug_exception_vector),
1700                        )
1701                        .map_err(|err| Error::InstallIntercept(HvInterceptType::HvInterceptTypeException, err))?;
1702                    }
1703                } else {
1704                    return Err(Error::InvalidDebugConfiguration);
1705                }
1706            }
1707        }
1708
1709        if !is_hardware_isolated {
1710            if cfg!(guest_arch = "x86_64") {
1711                hcl.register_intercept(
1712                    HvInterceptType::HvInterceptTypeX64Msr,
1713                    HV_INTERCEPT_ACCESS_MASK_READ_WRITE,
1714                    HvInterceptParameters::new_zeroed(),
1715                )
1716                .map_err(|err| {
1717                    Error::InstallIntercept(HvInterceptType::HvInterceptTypeX64Msr, err)
1718                })?;
1719
1720                hcl.register_intercept(
1721                    HvInterceptType::HvInterceptTypeX64ApicEoi,
1722                    HV_INTERCEPT_ACCESS_MASK_WRITE,
1723                    HvInterceptParameters::new_zeroed(),
1724                )
1725                .map_err(|err| {
1726                    Error::InstallIntercept(HvInterceptType::HvInterceptTypeX64ApicEoi, err)
1727                })?;
1728            } else {
1729                if false {
1730                    todo!("AARCH64_TODO");
1731                }
1732            }
1733        }
1734
1735        if isolation == IsolationType::Snp {
1736            // SNP VMs register for the #VC exception to support reflect-VC.
1737            hcl.register_intercept(
1738                HvInterceptType::HvInterceptTypeException,
1739                HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1740                HvInterceptParameters::new_exception(0x1D),
1741            )
1742            .map_err(|err| {
1743                Error::InstallIntercept(HvInterceptType::HvInterceptTypeException, err)
1744            })?;
1745
1746            // Get the register tweak bitmap from secrets page.
1747            let mut bitmap = [0u8; 64];
1748            if let Some(secrets) = params.snp_secrets {
1749                bitmap.copy_from_slice(
1750                    &secrets
1751                        [REG_TWEAK_BITMAP_OFFSET..REG_TWEAK_BITMAP_OFFSET + REG_TWEAK_BITMAP_SIZE],
1752                );
1753            }
1754            hcl.set_snp_register_bitmap(bitmap);
1755        }
1756
1757        // Do per-VP HCL initialization.
1758        hcl.add_vps(
1759            params.topology.vp_count(),
1760            late_params
1761                .cvm_params
1762                .as_ref()
1763                .map(|x| &x.private_dma_client),
1764        )
1765        .map_err(Error::Hcl)?;
1766
1767        let vps: Vec<_> = params
1768            .topology
1769            .vps_arch()
1770            .map(|vp_info| {
1771                // TODO: determine CPU index, which in theory could be different
1772                // from the VP index, though this hasn't happened yet.
1773                let cpu_index = vp_info.base.vp_index.index();
1774                UhVpInner::new(cpu_index, vp_info)
1775            })
1776            .collect();
1777
1778        // Enable support for VPCI devices if the hypervisor supports it.
1779        #[cfg(guest_arch = "x86_64")]
1780        let software_devices = {
1781            let res = if !is_hardware_isolated {
1782                hcl.register_intercept(
1783                    HvInterceptType::HvInterceptTypeRetargetInterruptWithUnknownDeviceId,
1784                    HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1785                    HvInterceptParameters::new_zeroed(),
1786                )
1787            } else {
1788                Ok(())
1789            };
1790            match res {
1791                Ok(()) => Some(ApicSoftwareDevices::new(
1792                    params.topology.vps_arch().map(|vp| vp.apic_id).collect(),
1793                )),
1794                Err(HvError::InvalidParameter | HvError::AccessDenied) => None,
1795                Err(err) => {
1796                    return Err(Error::InstallIntercept(
1797                        HvInterceptType::HvInterceptTypeRetargetInterruptWithUnknownDeviceId,
1798                        err,
1799                    ));
1800                }
1801            }
1802        };
1803
1804        #[cfg(guest_arch = "aarch64")]
1805        let software_devices = None;
1806
1807        #[cfg(guest_arch = "aarch64")]
1808        let caps = virt::aarch64::Aarch64PartitionCapabilities {};
1809
1810        #[cfg(guest_arch = "x86_64")]
1811        let cpuid = UhPartition::construct_cpuid_results(
1812            cpuid,
1813            &late_params.cpuid,
1814            params.topology,
1815            isolation,
1816            params.hide_isolation,
1817        );
1818
1819        #[cfg(guest_arch = "x86_64")]
1820        let caps = UhPartition::construct_capabilities(
1821            params.topology,
1822            &cpuid,
1823            isolation,
1824            params.hide_isolation,
1825        )
1826        .map_err(Error::Capabilities)?;
1827
1828        if params.handle_synic && !matches!(isolation, IsolationType::Tdx) {
1829            // The hypervisor will manage the untrusted SINTs (or the whole
1830            // synic for non-hardware-isolated VMs), but some event ports
1831            // and message ports are implemented here. Register an intercept
1832            // to handle HvSignalEvent and HvPostMessage hypercalls when the
1833            // hypervisor doesn't recognize the connection ID.
1834            //
1835            // TDX manages this locally instead of through the hypervisor.
1836            hcl.register_intercept(
1837                HvInterceptType::HvInterceptTypeUnknownSynicConnection,
1838                HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1839                HvInterceptParameters::new_zeroed(),
1840            )
1841            .expect("registering synic intercept cannot fail");
1842        }
1843
1844        #[cfg(guest_arch = "x86_64")]
1845        let cvm_state = if is_hardware_isolated {
1846            let vsm_caps = hcl.get_vsm_capabilities().map_err(Error::GetReg)?;
1847            let proxy_interrupt_redirect_available =
1848                vsm_caps.proxy_interrupt_redirect_available() && !params.disable_proxy_redirect;
1849
1850            Some(Self::construct_cvm_state(
1851                &params,
1852                late_params.cvm_params.unwrap(),
1853                &caps,
1854                guest_vsm_available,
1855                proxy_interrupt_redirect_available,
1856            )?)
1857        } else {
1858            None
1859        };
1860        #[cfg(guest_arch = "aarch64")]
1861        let cvm_state = None;
1862
1863        let lower_vtl_timer_virt_available =
1864            hcl.supports_lower_vtl_timer_virt() && !params.disable_lower_vtl_timer_virt;
1865
1866        let backing_shared = BackingShared::new(
1867            isolation,
1868            &params,
1869            BackingSharedParams {
1870                cvm_state,
1871                #[cfg(guest_arch = "x86_64")]
1872                cpuid: &cpuid,
1873                hcl: &hcl,
1874                guest_vsm_available,
1875                lower_vtl_timer_virt_available,
1876            },
1877        )?;
1878
1879        let enter_modes = EnterModes::default();
1880
1881        let partition = Arc::new(UhPartitionInner {
1882            hcl,
1883            vps,
1884            irq_routes: Default::default(),
1885            caps,
1886            enter_modes: Mutex::new(enter_modes),
1887            enter_modes_atomic: u8::from(hcl::protocol::EnterModes::from(enter_modes)).into(),
1888            gm: late_params.gm,
1889            vtl0_kernel_exec_gm: late_params.vtl0_kernel_exec_gm,
1890            vtl0_user_exec_gm: late_params.vtl0_user_exec_gm,
1891            #[cfg(guest_arch = "x86_64")]
1892            cpuid,
1893            crash_notification_send: late_params.crash_notification_send,
1894            monitor_page: MonitorPage::new(),
1895            allocated_monitor_page: Mutex::new(None),
1896            software_devices,
1897            lower_vtl_memory_layout: params.lower_vtl_memory_layout.clone(),
1898            vmtime: late_params.vmtime.clone(),
1899            isolation,
1900            no_sidecar_hotplug: params.no_sidecar_hotplug.into(),
1901            use_mmio_hypercalls: params.use_mmio_hypercalls,
1902            backing_shared,
1903            #[cfg(guest_arch = "x86_64")]
1904            device_vector_table: RwLock::new(IrrBitmap::new(Default::default())),
1905            intercept_debug_exceptions: params.intercept_debug_exceptions,
1906            vmbus_relay: late_params.vmbus_relay,
1907        });
1908
1909        if cfg!(guest_arch = "x86_64") {
1910            // Intercept all IOs unless opted out.
1911            partition.manage_io_port_intercept_region(0, !0, true);
1912        }
1913
1914        let vps = params
1915            .topology
1916            .vps_arch()
1917            .map(|vp_info| UhProcessorBox {
1918                partition: partition.clone(),
1919                vp_info,
1920            })
1921            .collect();
1922
1923        Ok((
1924            UhPartition {
1925                inner: partition.clone(),
1926                interrupt_targets: VtlArray::from_fn(|vtl| {
1927                    Arc::new(UhInterruptTarget {
1928                        partition: partition.clone(),
1929                        vtl: vtl.try_into().unwrap(),
1930                    })
1931                }),
1932            },
1933            vps,
1934        ))
1935    }
1936}
1937
1938impl UhPartition {
1939    /// Gets the guest OS ID for VTL0.
1940    pub fn vtl0_guest_os_id(&self) -> Result<HvGuestOsId, hcl::ioctl::register::GetRegError> {
1941        // If Underhill is emulating the hypervisor interfaces, get this value
1942        // from the emulator. This happens when running under hardware isolation
1943        // or when configured for testing.
1944        let id = if let Some(hv) = self.inner.hv() {
1945            hv.guest_os_id(Vtl::Vtl0)
1946        } else {
1947            // Ask the hypervisor for this value.
1948            self.inner.hcl.get_guest_os_id(GuestVtl::Vtl0)?
1949        };
1950        Ok(id)
1951    }
1952
1953    /// Configures guest accesses to IO ports in `range` to go directly to the
1954    /// host.
1955    ///
1956    /// When the return value is dropped, the ports will be unregistered.
1957    pub fn register_host_io_port_fast_path(
1958        &self,
1959        range: RangeInclusive<u16>,
1960    ) -> HostIoPortFastPathHandle {
1961        // There is no way to provide a fast path for some hardware isolated
1962        // VM architectures. The devices that do use this facility are not
1963        // enabled on hardware isolated VMs.
1964        assert!(!self.inner.isolation.is_hardware_isolated());
1965
1966        self.inner
1967            .manage_io_port_intercept_region(*range.start(), *range.end(), false);
1968        HostIoPortFastPathHandle {
1969            inner: Arc::downgrade(&self.inner),
1970            begin: *range.start(),
1971            end: *range.end(),
1972        }
1973    }
1974
1975    /// Trigger the LINT1 interrupt vector on the LAPIC of the BSP.
1976    pub fn assert_debug_interrupt(&self, _vtl: u8) {
1977        #[cfg(guest_arch = "x86_64")]
1978        const LINT_INDEX_1: u8 = 1;
1979        #[cfg(guest_arch = "x86_64")]
1980        match self.inner.isolation {
1981            IsolationType::Snp => {
1982                tracing::error!(?_vtl, "Debug interrupts cannot be injected into SNP VMs",);
1983            }
1984            _ => {
1985                let bsp_index = VpIndex::new(0);
1986                self.pulse_lint(bsp_index, Vtl::try_from(_vtl).unwrap(), LINT_INDEX_1)
1987            }
1988        }
1989    }
1990
1991    /// Enables or disables the PM timer assist.
1992    pub fn set_pm_timer_assist(
1993        &self,
1994        port: Option<u16>,
1995    ) -> Result<(), hcl::ioctl::register::SetRegError> {
1996        self.inner.hcl.set_pm_timer_assist(port)
1997    }
1998
1999    /// Sets guest memory protections for a monitor page.
2000    fn register_cvm_dma_overlay_page(
2001        &self,
2002        vtl: GuestVtl,
2003        gpn: u64,
2004        new_perms: HvMapGpaFlags,
2005    ) -> anyhow::Result<()> {
2006        // How the monitor page is protected depends on the isolation type of the VM.
2007        match &self.inner.backing_shared {
2008            #[cfg(guest_arch = "x86_64")]
2009            BackingShared::Snp(snp_backed_shared) => snp_backed_shared
2010                .cvm
2011                .isolated_memory_protector
2012                .register_overlay_page(
2013                    vtl,
2014                    gpn,
2015                    // On a CVM, the monitor page is always DMA-allocated.
2016                    GpnSource::Dma,
2017                    HvMapGpaFlags::new(),
2018                    Some(new_perms),
2019                    &mut SnpBacked::tlb_flush_lock_access(
2020                        None,
2021                        self.inner.as_ref(),
2022                        snp_backed_shared,
2023                    ),
2024                )
2025                .map_err(|e| anyhow::anyhow!(e)),
2026            #[cfg(guest_arch = "x86_64")]
2027            BackingShared::Tdx(tdx_backed_shared) => tdx_backed_shared
2028                .cvm
2029                .isolated_memory_protector
2030                .register_overlay_page(
2031                    vtl,
2032                    gpn,
2033                    GpnSource::Dma,
2034                    HvMapGpaFlags::new(),
2035                    Some(new_perms),
2036                    &mut TdxBacked::tlb_flush_lock_access(
2037                        None,
2038                        self.inner.as_ref(),
2039                        tdx_backed_shared,
2040                    ),
2041                )
2042                .map_err(|e| anyhow::anyhow!(e)),
2043            BackingShared::Hypervisor(_) => {
2044                let _ = (vtl, gpn, new_perms);
2045                unreachable!()
2046            }
2047        }
2048    }
2049
2050    /// Reverts guest memory protections for a monitor page.
2051    fn unregister_cvm_dma_overlay_page(&self, vtl: GuestVtl, gpn: u64) -> anyhow::Result<()> {
2052        // How the monitor page is protected depends on the isolation type of the VM.
2053        match &self.inner.backing_shared {
2054            #[cfg(guest_arch = "x86_64")]
2055            BackingShared::Snp(snp_backed_shared) => snp_backed_shared
2056                .cvm
2057                .isolated_memory_protector
2058                .unregister_overlay_page(
2059                    vtl,
2060                    gpn,
2061                    &mut SnpBacked::tlb_flush_lock_access(
2062                        None,
2063                        self.inner.as_ref(),
2064                        snp_backed_shared,
2065                    ),
2066                )
2067                .map_err(|e| anyhow::anyhow!(e)),
2068            #[cfg(guest_arch = "x86_64")]
2069            BackingShared::Tdx(tdx_backed_shared) => tdx_backed_shared
2070                .cvm
2071                .isolated_memory_protector
2072                .unregister_overlay_page(
2073                    vtl,
2074                    gpn,
2075                    &mut TdxBacked::tlb_flush_lock_access(
2076                        None,
2077                        self.inner.as_ref(),
2078                        tdx_backed_shared,
2079                    ),
2080                )
2081                .map_err(|e| anyhow::anyhow!(e)),
2082            BackingShared::Hypervisor(_) => {
2083                let _ = (vtl, gpn);
2084                unreachable!()
2085            }
2086        }
2087    }
2088}
2089
2090impl UhProtoPartition<'_> {
2091    /// Whether Guest VSM is available to the guest. If so, for hardware CVMs,
2092    /// it is safe to expose Guest VSM support via cpuid.
2093    fn check_guest_vsm_support(privs: HvPartitionPrivilege, hcl: &Hcl) -> Result<bool, Error> {
2094        if !privs.access_vsm() {
2095            return Ok(false);
2096        }
2097
2098        let guest_vsm_config = hcl
2099            .get_guest_vsm_partition_config()
2100            .map_err(Error::GetReg)?;
2101        Ok(guest_vsm_config.maximum_vtl() >= u8::from(GuestVtl::Vtl1))
2102    }
2103
2104    #[cfg(guest_arch = "x86_64")]
2105    /// Constructs partition-wide CVM state.
2106    fn construct_cvm_state(
2107        params: &UhPartitionNewParams<'_>,
2108        late_params: CvmLateParams,
2109        caps: &PartitionCapabilities,
2110        guest_vsm_available: bool,
2111        proxy_interrupt_redirect_available: bool,
2112    ) -> Result<UhCvmPartitionState, Error> {
2113        use vmcore::reference_time::ReferenceTimeSource;
2114
2115        let vp_count = params.topology.vp_count() as usize;
2116        let vps = (0..vp_count)
2117            .map(|vp_index| UhCvmVpInner {
2118                tlb_lock_info: VtlArray::from_fn(|_| TlbLockInfo::new(vp_count)),
2119                vtl1_enable_called: Mutex::new(false),
2120                started: AtomicBool::new(vp_index == 0),
2121                hv_start_enable_vtl_vp: VtlArray::from_fn(|_| Mutex::new(None)),
2122                proxy_redirect_interrupts: Mutex::new(HashMap::new()),
2123            })
2124            .collect();
2125        let tlb_locked_vps =
2126            VtlArray::from_fn(|_| BitVec::repeat(false, vp_count).into_boxed_bitslice());
2127
2128        let lapic = VtlArray::from_fn(|_| {
2129            LocalApicSet::builder()
2130                .x2apic_capable(caps.x2apic)
2131                .hyperv_enlightenments(true)
2132                .build()
2133        });
2134
2135        let tsc_frequency = get_tsc_frequency(params.isolation)?;
2136        let ref_time = ReferenceTimeSource::new(TscReferenceTimeSource::new(tsc_frequency));
2137
2138        // If we're emulating the APIC, then we also must emulate the hypervisor
2139        // enlightenments, since the hypervisor can't support enlightenments
2140        // without also providing an APIC.
2141        //
2142        // Additionally, TDX provides hardware APIC emulation but we still need
2143        // to emulate the hypervisor enlightenments.
2144        let hv = GlobalHv::new(hv1_emulator::hv::GlobalHvParams {
2145            max_vp_count: params.topology.vp_count(),
2146            vendor: caps.vendor,
2147            tsc_frequency,
2148            ref_time,
2149            is_ref_time_backed_by_tsc: true,
2150        });
2151
2152        Ok(UhCvmPartitionState {
2153            vps_per_socket: params.topology.reserved_vps_per_socket(),
2154            tlb_locked_vps,
2155            vps,
2156            shared_memory: late_params.shared_gm,
2157            isolated_memory_protector: late_params.isolated_memory_protector,
2158            lapic,
2159            hv,
2160            guest_vsm: RwLock::new(GuestVsmState::from_availability(guest_vsm_available)),
2161            shared_dma_client: late_params.shared_dma_client,
2162            private_dma_client: late_params.private_dma_client,
2163            hide_isolation: params.hide_isolation,
2164            proxy_interrupt_redirect: proxy_interrupt_redirect_available,
2165        })
2166    }
2167}
2168
2169impl UhPartition {
2170    #[cfg(guest_arch = "x86_64")]
2171    /// Constructs the set of cpuid results to show to the guest
2172    fn construct_cpuid_results(
2173        cpuid: virt::CpuidLeafSet,
2174        initial_cpuid: &[CpuidLeaf],
2175        topology: &ProcessorTopology<vm_topology::processor::x86::X86Topology>,
2176        isolation: IsolationType,
2177        hide_isolation: bool,
2178    ) -> virt::CpuidLeafSet {
2179        let mut cpuid = cpuid.into_leaves();
2180        if isolation.is_hardware_isolated() {
2181            // Update the x2apic leaf based on the topology.
2182            let x2apic = match topology.apic_mode() {
2183                vm_topology::processor::x86::ApicMode::XApic => false,
2184                vm_topology::processor::x86::ApicMode::X2ApicSupported => true,
2185                vm_topology::processor::x86::ApicMode::X2ApicEnabled => true,
2186            };
2187            let ecx = x86defs::cpuid::VersionAndFeaturesEcx::new().with_x2_apic(x2apic);
2188            let ecx_mask = x86defs::cpuid::VersionAndFeaturesEcx::new().with_x2_apic(true);
2189            cpuid.push(
2190                CpuidLeaf::new(
2191                    x86defs::cpuid::CpuidFunction::VersionAndFeatures.0,
2192                    [0, 0, ecx.into(), 0],
2193                )
2194                .masked([0, 0, ecx_mask.into(), 0]),
2195            );
2196
2197            // Get the hypervisor version from the host. This is just for
2198            // reporting purposes, so it is safe even if the hypervisor is not
2199            // trusted.
2200            let hv_version = safe_intrinsics::cpuid(hvdef::HV_CPUID_FUNCTION_MS_HV_VERSION, 0);
2201
2202            // Perform final processing steps for synthetic leaves.
2203            hv1_emulator::cpuid::process_hv_cpuid_leaves(
2204                &mut cpuid,
2205                hide_isolation,
2206                [
2207                    hv_version.eax,
2208                    hv_version.ebx,
2209                    hv_version.ecx,
2210                    hv_version.edx,
2211                ],
2212            );
2213        }
2214        cpuid.extend(initial_cpuid);
2215        virt::CpuidLeafSet::new(cpuid)
2216    }
2217
2218    #[cfg(guest_arch = "x86_64")]
2219    /// Computes the partition capabilities
2220    fn construct_capabilities(
2221        topology: &ProcessorTopology,
2222        cpuid: &virt::CpuidLeafSet,
2223        isolation: IsolationType,
2224        hide_isolation: bool,
2225    ) -> Result<virt::x86::X86PartitionCapabilities, virt::x86::X86PartitionCapabilitiesError> {
2226        let mut native_cpuid_fn;
2227        let mut cvm_cpuid_fn;
2228
2229        // Determine the method to get cpuid results for the guest when
2230        // computing partition capabilities.
2231        let cpuid_fn: &mut dyn FnMut(u32, u32) -> [u32; 4] = if isolation.is_hardware_isolated() {
2232            // Use the filtered CPUID to determine capabilities.
2233            cvm_cpuid_fn = move |leaf, sub_leaf| cpuid.result(leaf, sub_leaf, &[0, 0, 0, 0]);
2234            &mut cvm_cpuid_fn
2235        } else {
2236            // Just use the native cpuid.
2237            native_cpuid_fn = |leaf, sub_leaf| {
2238                let CpuidResult { eax, ebx, ecx, edx } = safe_intrinsics::cpuid(leaf, sub_leaf);
2239                cpuid.result(leaf, sub_leaf, &[eax, ebx, ecx, edx])
2240            };
2241            &mut native_cpuid_fn
2242        };
2243
2244        // Compute and validate capabilities.
2245        let mut caps = virt::x86::X86PartitionCapabilities::from_cpuid(topology, cpuid_fn)?;
2246        match isolation {
2247            IsolationType::Tdx => {
2248                assert_eq!(caps.vtom.is_some(), !hide_isolation);
2249                // TDX 1.5 requires EFER.NXE to be set to 1, so set it at RESET/INIT.
2250                caps.nxe_forced_on = true;
2251            }
2252            IsolationType::Snp => {
2253                assert_eq!(caps.vtom.is_some(), !hide_isolation);
2254            }
2255            _ => {
2256                assert!(caps.vtom.is_none());
2257            }
2258        }
2259
2260        Ok(caps)
2261    }
2262}
2263
2264#[cfg(guest_arch = "x86_64")]
2265/// Gets the TSC frequency for the current platform.
2266fn get_tsc_frequency(isolation: IsolationType) -> Result<u64, Error> {
2267    // Always get the frequency from the hypervisor. It's believed that, as long
2268    // as the hypervisor is behaving, it will provide the most precise and accurate frequency.
2269    let msr = MsrDevice::new(0).map_err(Error::OpenMsr)?;
2270    let hv_frequency = msr
2271        .read_msr(hvdef::HV_X64_MSR_TSC_FREQUENCY)
2272        .map_err(Error::ReadTscFrequency)?;
2273
2274    // Get the hardware-advertised frequency and validate that the
2275    // hypervisor frequency is not too far off.
2276    let hw_info = match isolation {
2277        IsolationType::Tdx => {
2278            // TDX provides the TSC frequency via cpuid.
2279            let max_function =
2280                safe_intrinsics::cpuid(x86defs::cpuid::CpuidFunction::VendorAndMaxFunction.0, 0)
2281                    .eax;
2282
2283            if max_function < x86defs::cpuid::CpuidFunction::CoreCrystalClockInformation.0 {
2284                return Err(Error::BadCpuidTsc);
2285            }
2286            let result = safe_intrinsics::cpuid(
2287                x86defs::cpuid::CpuidFunction::CoreCrystalClockInformation.0,
2288                0,
2289            );
2290            let ratio_denom = result.eax;
2291            let ratio_num = result.ebx;
2292            let clock = result.ecx;
2293            if ratio_num == 0 || ratio_denom == 0 || clock == 0 {
2294                return Err(Error::BadCpuidTsc);
2295            }
2296            // TDX TSC is configurable in units of 25MHz, so allow up to 12.5MHz
2297            // error.
2298            let allowed_error = 12_500_000;
2299            Some((
2300                clock as u64 * ratio_num as u64 / ratio_denom as u64,
2301                allowed_error,
2302            ))
2303        }
2304        IsolationType::Snp => {
2305            // SNP currently does not provide the frequency.
2306            None
2307        }
2308        IsolationType::Vbs | IsolationType::None => None,
2309    };
2310
2311    if let Some((hw_frequency, allowed_error)) = hw_info {
2312        // Don't allow the frequencies to be different by more than the hardware
2313        // precision.
2314        let delta = hw_frequency.abs_diff(hv_frequency);
2315        if delta > allowed_error {
2316            return Err(Error::TscFrequencyMismatch {
2317                hv: hv_frequency,
2318                hw: hw_frequency,
2319                allowed_error,
2320            });
2321        }
2322    }
2323
2324    Ok(hv_frequency)
2325}
2326
2327impl UhPartitionInner {
2328    fn manage_io_port_intercept_region(&self, begin: u16, end: u16, active: bool) {
2329        if self.isolation.is_hardware_isolated() {
2330            return;
2331        }
2332
2333        static SKIP_RANGE: AtomicBool = AtomicBool::new(false);
2334
2335        let access_type_mask = if active {
2336            HV_INTERCEPT_ACCESS_MASK_READ_WRITE
2337        } else {
2338            HV_INTERCEPT_ACCESS_MASK_NONE
2339        };
2340
2341        // Try to register the whole range at once.
2342        if !SKIP_RANGE.load(Ordering::Relaxed) {
2343            match self.hcl.register_intercept(
2344                HvInterceptType::HvInterceptTypeX64IoPortRange,
2345                access_type_mask,
2346                HvInterceptParameters::new_io_port_range(begin..=end),
2347            ) {
2348                Ok(()) => return,
2349                Err(HvError::InvalidParameter) => {
2350                    // Probably a build that doesn't support range wrapping yet.
2351                    // Don't try again.
2352                    SKIP_RANGE.store(true, Ordering::Relaxed);
2353                    tracing::warn!(
2354                        CVM_ALLOWED,
2355                        "old hypervisor build; using slow path for intercept ranges"
2356                    );
2357                }
2358                Err(err) => {
2359                    panic!("io port range registration failure: {err:?}");
2360                }
2361            }
2362        }
2363
2364        // Fall back to registering one port at a time.
2365        for port in begin..=end {
2366            self.hcl
2367                .register_intercept(
2368                    HvInterceptType::HvInterceptTypeX64IoPort,
2369                    access_type_mask,
2370                    HvInterceptParameters::new_io_port(port),
2371                )
2372                .expect("registering io intercept cannot fail");
2373        }
2374    }
2375
2376    fn is_gpa_lower_vtl_ram(&self, gpa: u64) -> bool {
2377        // TODO: this probably should reflect changes to the memory map via PAM
2378        // registers. Right now this isn't an issue because the relevant region,
2379        // VGA, is handled on the host.
2380        self.lower_vtl_memory_layout
2381            .ram()
2382            .iter()
2383            .any(|m| m.range.contains_addr(gpa))
2384    }
2385
2386    fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
2387        // TODO: this probably should reflect changes to the memory map via PAM
2388        // registers. Right now this isn't an issue because the relevant region,
2389        // VGA, is handled on the host.
2390        if self.is_gpa_lower_vtl_ram(gpa) {
2391            // The monitor page is protected against lower VTL writes.
2392            !write || self.monitor_page.gpa() != Some(gpa & !(HV_PAGE_SIZE - 1))
2393        } else {
2394            false
2395        }
2396    }
2397}
2398
2399/// Handle returned by [`UhPartition::register_host_io_port_fast_path`].
2400///
2401/// When dropped, unregisters the IO ports so that they are no longer forwarded
2402/// to the host.
2403#[must_use]
2404pub struct HostIoPortFastPathHandle {
2405    inner: Weak<UhPartitionInner>,
2406    begin: u16,
2407    end: u16,
2408}
2409
2410impl Drop for HostIoPortFastPathHandle {
2411    fn drop(&mut self) {
2412        if let Some(inner) = self.inner.upgrade() {
2413            inner.manage_io_port_intercept_region(self.begin, self.end, true);
2414        }
2415    }
2416}
2417
2418/// The application level VTL crash data not suited for putting
2419/// on the wire.
2420///
2421/// FUTURE: move/remove this to standardize across virt backends.
2422#[derive(Copy, Clone, Debug)]
2423pub struct VtlCrash {
2424    /// The VP that crashed.
2425    pub vp_index: VpIndex,
2426    /// The VTL that crashed.
2427    pub last_vtl: GuestVtl,
2428    /// The crash control information.
2429    pub control: GuestCrashCtl,
2430    /// The crash parameters.
2431    pub parameters: [u64; 5],
2432}
2433
2434/// Validate that flags is a valid setting for VTL memory protection when
2435/// applied to VTL 1.
2436#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
2437fn validate_vtl_gpa_flags(
2438    flags: HvMapGpaFlags,
2439    mbec_enabled: bool,
2440    shadow_supervisor_stack_enabled: bool,
2441) -> bool {
2442    // Adjust is not allowed for VTL1.
2443    if flags.adjustable() {
2444        return false;
2445    }
2446
2447    // KX must equal UX unless MBEC is enabled. KX && !UX is invalid.
2448    if flags.kernel_executable() != flags.user_executable() {
2449        if (flags.kernel_executable() && !flags.user_executable()) || !mbec_enabled {
2450            return false;
2451        }
2452    }
2453
2454    // Read must be specified if anything else is specified.
2455    if flags.writable()
2456        || flags.kernel_executable()
2457        || flags.user_executable()
2458        || flags.supervisor_shadow_stack()
2459        || flags.paging_writability()
2460        || flags.verify_paging_writability()
2461    {
2462        if !flags.readable() {
2463            return false;
2464        }
2465    }
2466
2467    // Supervisor shadow stack protection is invalid if shadow stacks are disabled
2468    // or if execute is not specified.
2469    if flags.supervisor_shadow_stack()
2470        && ((!flags.kernel_executable() && !flags.user_executable())
2471            || shadow_supervisor_stack_enabled)
2472    {
2473        return false;
2474    }
2475
2476    true
2477}