1#![cfg(all(guest_is_native, target_os = "linux"))]
9
10mod devmsr;
11
12cfg_if::cfg_if!(
13 if #[cfg(guest_arch = "x86_64")] {
14 mod cvm_cpuid;
15 pub use processor::snp::SnpBacked;
16 pub use processor::tdx::TdxBacked;
17 use crate::processor::HardwareIsolatedBacking;
18 pub use crate::processor::mshv::x64::HypervisorBackedX86 as HypervisorBacked;
19 use crate::processor::mshv::x64::HypervisorBackedX86Shared as HypervisorBackedShared;
20 use bitvec::prelude::BitArray;
21 use bitvec::prelude::Lsb0;
22 use devmsr::MsrDevice;
23 use hv1_emulator::hv::ProcessorVtlHv;
24 use processor::LapicState;
25 use processor::snp::SnpBackedShared;
26 use processor::tdx::TdxBackedShared;
27 use std::arch::x86_64::CpuidResult;
28 use virt::CpuidLeaf;
29 use virt::state::StateElement;
30 use virt::vp::MpState;
31 type IrrBitmap = BitArray<[u32; 8], Lsb0>;
34 } else if #[cfg(guest_arch = "aarch64")] {
35 pub use crate::processor::mshv::arm64::HypervisorBackedArm64 as HypervisorBacked;
36 use crate::processor::mshv::arm64::HypervisorBackedArm64Shared as HypervisorBackedShared;
37 }
38);
39
40mod processor;
41pub use processor::Backing;
42pub use processor::UhProcessor;
43
44use anyhow::Context as AnyhowContext;
45use bitfield_struct::bitfield;
46use bitvec::boxed::BitBox;
47use bitvec::vec::BitVec;
48use cvm_tracing::CVM_ALLOWED;
49use guestmem::GuestMemory;
50use guestmem::GuestMemoryBackingError;
51use hcl::GuestVtl;
52use hcl::ioctl::Hcl;
53use hcl::ioctl::SetVsmPartitionConfigError;
54use hv1_emulator::hv::GlobalHv;
55use hv1_emulator::message_queues::MessageQueues;
56use hv1_emulator::synic::GlobalSynic;
57use hv1_emulator::synic::SintProxied;
58use hv1_structs::VtlArray;
59use hvdef::GuestCrashCtl;
60use hvdef::HV_PAGE_SHIFT;
61use hvdef::HV_PAGE_SIZE;
62use hvdef::HV_PAGE_SIZE_USIZE;
63use hvdef::HvError;
64use hvdef::HvMapGpaFlags;
65use hvdef::HvPartitionPrivilege;
66use hvdef::HvRegisterName;
67use hvdef::HvRegisterVsmPartitionConfig;
68use hvdef::HvRegisterVsmPartitionStatus;
69use hvdef::Vtl;
70use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_EXECUTE;
71use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_NONE;
72use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_READ_WRITE;
73use hvdef::hypercall::HV_INTERCEPT_ACCESS_MASK_WRITE;
74use hvdef::hypercall::HostVisibilityType;
75use hvdef::hypercall::HvGuestOsId;
76use hvdef::hypercall::HvInputVtl;
77use hvdef::hypercall::HvInterceptParameters;
78use hvdef::hypercall::HvInterceptType;
79use inspect::Inspect;
80use inspect::InspectMut;
81use memory_range::MemoryRange;
82use pal::unix::affinity;
83use pal::unix::affinity::CpuSet;
84use pal_async::driver::Driver;
85use pal_async::driver::SpawnDriver;
86use pal_uring::IdleControl;
87use parking_lot::Mutex;
88use parking_lot::RwLock;
89use processor::BackingSharedParams;
90use processor::SidecarExitReason;
91use sidecar_client::NewSidecarClientError;
92use std::collections::HashMap;
93use std::ops::RangeInclusive;
94use std::os::fd::AsRawFd;
95use std::sync::Arc;
96use std::sync::Weak;
97use std::sync::atomic::AtomicBool;
98use std::sync::atomic::AtomicU8;
99use std::sync::atomic::AtomicU32;
100use std::sync::atomic::AtomicU64;
101use std::sync::atomic::Ordering;
102use std::task::Waker;
103use thiserror::Error;
104use user_driver::DmaClient;
105use virt::IsolationType;
106use virt::PartitionCapabilities;
107use virt::VpIndex;
108use virt::X86Partition;
109use virt::irqcon::IoApicRouting;
110use virt::irqcon::MsiRequest;
111use virt::x86::apic_software_device::ApicSoftwareDevices;
112use virt_support_apic::LocalApicSet;
113use vm_topology::memory::MemoryLayout;
114use vm_topology::processor::ProcessorTopology;
115use vm_topology::processor::TargetVpInfo;
116use vmcore::monitor::MonitorPage;
117use vmcore::reference_time::GetReferenceTime;
118use vmcore::reference_time::ReferenceTimeResult;
119use vmcore::reference_time::ReferenceTimeSource;
120use vmcore::vmtime::VmTimeSource;
121use x86defs::snp::REG_TWEAK_BITMAP_OFFSET;
122use x86defs::snp::REG_TWEAK_BITMAP_SIZE;
123use x86defs::tdx::TdCallResult;
124use zerocopy::FromBytes;
125use zerocopy::FromZeros;
126use zerocopy::Immutable;
127use zerocopy::IntoBytes;
128use zerocopy::KnownLayout;
129
130#[derive(Error, Debug)]
132#[expect(missing_docs)]
133pub enum Error {
134 #[error("hcl error")]
135 Hcl(#[source] hcl::ioctl::Error),
136 #[error("failed to open sidecar client")]
137 Sidecar(#[source] NewSidecarClientError),
138 #[error("failed to install {0:?} intercept: {1:?}")]
139 InstallIntercept(HvInterceptType, HvError),
140 #[error("failed to query hypervisor register {0:#x?}")]
141 Register(HvRegisterName, #[source] HvError),
142 #[error("failed to set vsm partition config register")]
143 VsmPartitionConfig(#[source] SetVsmPartitionConfigError),
144 #[error("failed to create virtual device")]
145 NewDevice(#[source] virt::x86::apic_software_device::DeviceIdInUse),
146 #[error("failed to create cpuid tables for cvm")]
147 #[cfg(guest_arch = "x86_64")]
148 CvmCpuid(#[source] cvm_cpuid::CpuidResultsError),
149 #[error("failed to update hypercall msr")]
150 UpdateHypercallMsr,
151 #[error("failed to update reference tsc msr")]
152 UpdateReferenceTsc,
153 #[error("failed to map overlay page")]
154 MapOverlay(#[source] std::io::Error),
155 #[error("failed to allocate shared visibility pages for overlay")]
156 AllocateSharedVisOverlay(#[source] anyhow::Error),
157 #[error("failed to open msr device")]
158 OpenMsr(#[source] std::io::Error),
159 #[error("cpuid did not contain valid TSC frequency information")]
160 BadCpuidTsc,
161 #[error("failed to read tsc frequency")]
162 ReadTscFrequency(#[source] std::io::Error),
163 #[error(
164 "tsc frequency mismatch between hypervisor ({hv}) and hardware {hw}, exceeds allowed error {allowed_error}"
165 )]
166 TscFrequencyMismatch {
167 hv: u64,
168 hw: u64,
169 allowed_error: u64,
170 },
171 #[error("failed to set vsm partition config: {0:?}")]
172 FailedToSetL2Ctls(TdCallResult),
173 #[error("debugging is configured but the binary does not have the gdb feature")]
174 InvalidDebugConfiguration,
175 #[error("failed to allocate TLB flush page")]
176 AllocateTlbFlushPage(#[source] anyhow::Error),
177 #[error("host does not support required cpu capabilities")]
178 Capabilities(virt::PartitionCapabilitiesError),
179 #[error("failed to get register")]
180 GetReg(#[source] hcl::ioctl::register::GetRegError),
181 #[error("failed to set register")]
182 SetReg(#[source] hcl::ioctl::register::SetRegError),
183}
184
185#[derive(Error, Debug)]
187#[expect(missing_docs)]
188pub enum RevokeGuestVsmError {
189 #[error("failed to set vsm config")]
190 SetGuestVsmConfig(#[source] hcl::ioctl::register::SetRegError),
191 #[error("VTL 1 is already enabled")]
192 Vtl1AlreadyEnabled,
193}
194
195#[derive(Inspect)]
197pub struct UhPartition {
198 #[inspect(flatten)]
199 inner: Arc<UhPartitionInner>,
200 #[inspect(skip)]
202 interrupt_targets: VtlArray<Arc<UhInterruptTarget>, 2>,
203}
204
205#[derive(Inspect)]
207#[inspect(extra = "UhPartitionInner::inspect_extra")]
208struct UhPartitionInner {
209 #[inspect(skip)]
210 hcl: Hcl,
211 #[inspect(skip)] vps: Vec<UhVpInner>,
213 irq_routes: virt::irqcon::IrqRoutes,
214 caps: PartitionCapabilities,
215 #[inspect(skip)] enter_modes: Mutex<EnterModes>,
217 #[inspect(skip)]
218 enter_modes_atomic: AtomicU8,
219 #[cfg(guest_arch = "x86_64")]
220 cpuid: virt::CpuidLeafSet,
221 lower_vtl_memory_layout: MemoryLayout,
222 gm: VtlArray<GuestMemory, 2>,
223 vtl0_kernel_exec_gm: GuestMemory,
224 vtl0_user_exec_gm: GuestMemory,
225 #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
226 #[inspect(skip)]
227 crash_notification_send: mesh::Sender<VtlCrash>,
228 monitor_page: MonitorPage,
229 #[inspect(skip)]
230 allocated_monitor_page: Mutex<Option<user_driver::memory::MemoryBlock>>,
231 software_devices: Option<ApicSoftwareDevices>,
232 #[inspect(skip)]
233 vmtime: VmTimeSource,
234 isolation: IsolationType,
235 #[inspect(with = "inspect::AtomicMut")]
236 no_sidecar_hotplug: AtomicBool,
237 use_mmio_hypercalls: bool,
238 backing_shared: BackingShared,
239 intercept_debug_exceptions: bool,
240 #[cfg(guest_arch = "x86_64")]
241 #[inspect(hex, with = "|x| inspect::iter_by_index(x.read().into_inner())")]
243 device_vector_table: RwLock<IrrBitmap>,
244 vmbus_relay: bool,
245}
246
247#[derive(Inspect)]
248#[inspect(untagged)]
249enum BackingShared {
250 Hypervisor(#[inspect(flatten)] HypervisorBackedShared),
251 #[cfg(guest_arch = "x86_64")]
252 Snp(#[inspect(flatten)] SnpBackedShared),
253 #[cfg(guest_arch = "x86_64")]
254 Tdx(#[inspect(flatten)] TdxBackedShared),
255}
256
257impl BackingShared {
258 fn new(
259 isolation: IsolationType,
260 partition_params: &UhPartitionNewParams<'_>,
261 backing_shared_params: BackingSharedParams<'_>,
262 ) -> Result<BackingShared, Error> {
263 Ok(match isolation {
264 IsolationType::None | IsolationType::Vbs => {
265 assert!(backing_shared_params.cvm_state.is_none());
266 BackingShared::Hypervisor(HypervisorBackedShared::new(
267 partition_params,
268 backing_shared_params,
269 )?)
270 }
271 #[cfg(guest_arch = "x86_64")]
272 IsolationType::Snp => BackingShared::Snp(SnpBackedShared::new(
273 partition_params,
274 backing_shared_params,
275 )?),
276 #[cfg(guest_arch = "x86_64")]
277 IsolationType::Tdx => BackingShared::Tdx(TdxBackedShared::new(
278 partition_params,
279 backing_shared_params,
280 )?),
281 #[cfg(not(guest_arch = "x86_64"))]
282 _ => unreachable!(),
283 })
284 }
285
286 fn cvm_state(&self) -> Option<&UhCvmPartitionState> {
287 match self {
288 BackingShared::Hypervisor(_) => None,
289 #[cfg(guest_arch = "x86_64")]
290 BackingShared::Snp(SnpBackedShared { cvm, .. })
291 | BackingShared::Tdx(TdxBackedShared { cvm, .. }) => Some(cvm),
292 }
293 }
294
295 fn untrusted_synic(&self) -> Option<&GlobalSynic> {
296 match self {
297 BackingShared::Hypervisor(_) => None,
298 #[cfg(guest_arch = "x86_64")]
299 BackingShared::Snp(_) => None,
300 #[cfg(guest_arch = "x86_64")]
301 BackingShared::Tdx(s) => s.untrusted_synic.as_ref(),
302 }
303 }
304}
305
306#[derive(InspectMut, Copy, Clone)]
307struct EnterModes {
308 #[inspect(mut)]
309 first: EnterMode,
310 #[inspect(mut)]
311 second: EnterMode,
312}
313
314impl Default for EnterModes {
315 fn default() -> Self {
316 Self {
317 first: EnterMode::Fast,
318 second: EnterMode::IdleToVtl0,
319 }
320 }
321}
322
323impl From<EnterModes> for hcl::protocol::EnterModes {
324 fn from(value: EnterModes) -> Self {
325 Self::new()
326 .with_first(value.first.into())
327 .with_second(value.second.into())
328 }
329}
330
331#[derive(InspectMut, Copy, Clone)]
332enum EnterMode {
333 Fast,
334 PlayIdle,
335 IdleToVtl0,
336}
337
338impl From<EnterMode> for hcl::protocol::EnterMode {
339 fn from(value: EnterMode) -> Self {
340 match value {
341 EnterMode::Fast => Self::FAST,
342 EnterMode::PlayIdle => Self::PLAY_IDLE,
343 EnterMode::IdleToVtl0 => Self::IDLE_TO_VTL0,
344 }
345 }
346}
347
348#[cfg(guest_arch = "x86_64")]
349#[derive(Inspect)]
350struct GuestVsmVpState {
351 #[inspect(with = "|x| x.as_ref().map(inspect::AsDebug)")]
354 vtl0_exit_pending_event: Option<hvdef::HvX64PendingExceptionEvent>,
355 reg_intercept: SecureRegisterInterceptState,
356}
357
358#[cfg(guest_arch = "x86_64")]
359impl GuestVsmVpState {
360 fn new() -> Self {
361 GuestVsmVpState {
362 vtl0_exit_pending_event: None,
363 reg_intercept: Default::default(),
364 }
365 }
366}
367
368#[cfg(guest_arch = "x86_64")]
369#[derive(Inspect)]
370struct UhCvmVpState {
372 #[inspect(debug)]
374 direct_overlay_handle: user_driver::memory::MemoryBlock,
375 exit_vtl: GuestVtl,
377 hv: VtlArray<ProcessorVtlHv, 2>,
379 lapics: VtlArray<LapicState, 2>,
381 vtl1: Option<GuestVsmVpState>,
383}
384
385#[cfg(guest_arch = "x86_64")]
386impl UhCvmVpState {
387 pub(crate) fn new(
389 cvm_partition: &UhCvmPartitionState,
390 inner: &UhPartitionInner,
391 vp_info: &TargetVpInfo,
392 overlay_pages_required: usize,
393 ) -> Result<Self, Error> {
394 let direct_overlay_handle = cvm_partition
395 .shared_dma_client
396 .allocate_dma_buffer(overlay_pages_required * HV_PAGE_SIZE as usize)
397 .map_err(Error::AllocateSharedVisOverlay)?;
398
399 let apic_base = virt::vp::Apic::at_reset(&inner.caps, vp_info).apic_base;
400 let lapics = VtlArray::from_fn(|vtl| {
401 let apic_set = &cvm_partition.lapic[vtl];
402
403 let mut lapic = apic_set.add_apic(vp_info, vtl == Vtl::Vtl1);
406 lapic.set_apic_base(apic_base).unwrap();
408 let activity = if vtl == Vtl::Vtl0 && !vp_info.base.is_bsp() {
410 MpState::WaitForSipi
411 } else {
412 MpState::Running
413 };
414 LapicState::new(lapic, activity)
415 });
416
417 let hv = VtlArray::from_fn(|vtl| cvm_partition.hv.add_vp(vp_info.base.vp_index, vtl));
418
419 Ok(Self {
420 direct_overlay_handle,
421 exit_vtl: GuestVtl::Vtl0,
422 hv,
423 lapics,
424 vtl1: None,
425 })
426 }
427}
428
429#[cfg(guest_arch = "x86_64")]
430#[derive(Inspect, Default)]
431#[inspect(hex)]
432pub struct SecureRegisterInterceptState {
434 #[inspect(with = "|&x| u64::from(x)")]
435 intercept_control: hvdef::HvRegisterCrInterceptControl,
436 cr0_mask: u64,
437 cr4_mask: u64,
438 ia32_misc_enable_mask: u64,
441}
442
443#[derive(Clone, Inspect)]
446struct ProxyRedirectVectorInfo {
447 device_id: u64,
449 original_vector: u32,
451}
452
453#[derive(Inspect)]
454struct UhCvmPartitionState {
456 #[cfg(guest_arch = "x86_64")]
457 vps_per_socket: u32,
458 #[inspect(
460 with = "|arr| inspect::iter_by_index(arr.iter()).map_value(|bb| inspect::iter_by_index(bb.iter().map(|v| *v)))"
461 )]
462 tlb_locked_vps: VtlArray<BitBox<AtomicU64>, 2>,
463 #[inspect(with = "inspect::iter_by_index")]
464 vps: Vec<UhCvmVpInner>,
465 shared_memory: GuestMemory,
466 #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
467 #[inspect(skip)]
468 isolated_memory_protector: Arc<dyn ProtectIsolatedMemory>,
469 lapic: VtlArray<LocalApicSet, 2>,
471 hv: GlobalHv<2>,
473 guest_vsm: RwLock<GuestVsmState<CvmVtl1State>>,
475 shared_dma_client: Arc<dyn DmaClient>,
477 private_dma_client: Arc<dyn DmaClient>,
479 hide_isolation: bool,
480 proxy_interrupt_redirect: bool,
481}
482
483#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
484impl UhCvmPartitionState {
485 fn vp_inner(&self, vp_index: u32) -> &UhCvmVpInner {
486 &self.vps[vp_index as usize]
487 }
488
489 fn is_lower_vtl_startup_denied(&self) -> bool {
490 matches!(
491 *self.guest_vsm.read(),
492 GuestVsmState::Enabled {
493 vtl1: CvmVtl1State {
494 deny_lower_vtl_startup: true,
495 ..
496 }
497 }
498 )
499 }
500}
501
502#[derive(Inspect)]
503struct UhCvmVpInner {
505 tlb_lock_info: VtlArray<TlbLockInfo, 2>,
507 vtl1_enable_called: Mutex<bool>,
509 started: AtomicBool,
511 #[inspect(with = "|arr| inspect::iter_by_index(arr.iter().map(|v| v.lock().is_some()))")]
513 hv_start_enable_vtl_vp: VtlArray<Mutex<Option<Box<VpStartEnableVtl>>>, 2>,
514 #[inspect(with = "|x| inspect::adhoc(|req| inspect::iter_by_key(&*x.lock()).inspect(req))")]
516 proxy_redirect_interrupts: Mutex<HashMap<u32, ProxyRedirectVectorInfo>>,
517}
518
519#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
520#[derive(Inspect)]
521#[inspect(tag = "guest_vsm_state")]
522enum GuestVsmState<T: Inspect> {
524 NotPlatformSupported,
525 NotGuestEnabled,
526 Enabled {
527 #[inspect(flatten)]
528 vtl1: T,
529 },
530}
531
532impl<T: Inspect> GuestVsmState<T> {
533 pub fn from_availability(guest_vsm_available: bool) -> Self {
534 if guest_vsm_available {
535 GuestVsmState::NotGuestEnabled
536 } else {
537 GuestVsmState::NotPlatformSupported
538 }
539 }
540}
541
542#[derive(Inspect)]
543struct CvmVtl1State {
544 enabled_on_any_vp: bool,
546 zero_memory_on_reset: bool,
548 deny_lower_vtl_startup: bool,
550 pub mbec_enabled: bool,
552 pub shadow_supervisor_stack_enabled: bool,
554 #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
555 io_read_intercepts: BitBox<u64>,
556 #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
557 io_write_intercepts: BitBox<u64>,
558}
559
560#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
561impl CvmVtl1State {
562 fn new(mbec_enabled: bool) -> Self {
563 Self {
564 enabled_on_any_vp: false,
565 zero_memory_on_reset: false,
566 deny_lower_vtl_startup: false,
567 mbec_enabled,
568 shadow_supervisor_stack_enabled: false,
569 io_read_intercepts: BitVec::repeat(false, u16::MAX as usize + 1).into_boxed_bitslice(),
570 io_write_intercepts: BitVec::repeat(false, u16::MAX as usize + 1).into_boxed_bitslice(),
571 }
572 }
573}
574
575#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
576struct TscReferenceTimeSource {
577 tsc_scale: u64,
578}
579
580#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
581impl TscReferenceTimeSource {
582 fn new(tsc_frequency: u64) -> Self {
583 TscReferenceTimeSource {
584 tsc_scale: (((10_000_000_u128) << 64) / tsc_frequency as u128) as u64,
585 }
586 }
587}
588
589impl GetReferenceTime for TscReferenceTimeSource {
591 fn now(&self) -> ReferenceTimeResult {
592 #[cfg(guest_arch = "x86_64")]
593 {
594 let tsc = safe_intrinsics::rdtsc();
595 let ref_time = ((self.tsc_scale as u128 * tsc as u128) >> 64) as u64;
596 ReferenceTimeResult {
597 ref_time,
598 system_time: None,
599 }
600 }
601
602 #[cfg(guest_arch = "aarch64")]
603 {
604 todo!("AARCH64_TODO");
605 }
606 }
607}
608
609impl virt::irqcon::ControlGic for UhPartitionInner {
610 fn set_spi_irq(&self, irq_id: u32, high: bool) {
611 if let Err(err) = self.hcl.request_interrupt(
612 hvdef::HvInterruptControl::new()
613 .with_arm64_asserted(high)
614 .with_interrupt_type(hvdef::HvInterruptType::HvArm64InterruptTypeFixed),
615 0,
616 irq_id,
617 GuestVtl::Vtl0,
618 ) {
619 tracelimit::warn_ratelimited!(
620 error = &err as &dyn std::error::Error,
621 irq = irq_id,
622 asserted = high,
623 "failed to request spi"
624 );
625 }
626 }
627}
628
629impl virt::Aarch64Partition for UhPartition {
630 fn control_gic(&self, vtl: Vtl) -> Arc<dyn virt::irqcon::ControlGic> {
631 debug_assert!(vtl == Vtl::Vtl0);
632 self.inner.clone()
633 }
634}
635
636pub struct UhProcessorBox {
642 partition: Arc<UhPartitionInner>,
643 vp_info: TargetVpInfo,
644}
645
646impl UhProcessorBox {
647 pub fn vp_index(&self) -> VpIndex {
649 self.vp_info.base.vp_index
650 }
651
652 pub fn sidecar_base_cpu(&self) -> Option<u32> {
655 self.partition
656 .hcl
657 .sidecar_base_cpu(self.vp_info.base.vp_index.index())
658 }
659
660 pub fn bind_processor<'a, T: Backing>(
667 &'a mut self,
668 driver: &impl Driver,
669 control: Option<&'a mut IdleControl>,
670 ) -> Result<UhProcessor<'a, T>, Error> {
671 if let Some(control) = &control {
672 let vp_index = self.vp_info.base.vp_index;
673
674 let mut current = Default::default();
675 affinity::get_current_thread_affinity(&mut current).unwrap();
676 assert_eq!(¤t, CpuSet::new().set(vp_index.index()));
677
678 self.partition
679 .hcl
680 .set_poll_file(
681 self.partition.vp(vp_index).unwrap().cpu_index,
682 control.ring_fd().as_raw_fd(),
683 )
684 .map_err(Error::Hcl)?;
685 }
686
687 UhProcessor::new(driver, &self.partition, self.vp_info, control)
688 }
689
690 pub fn set_sidecar_exit_due_to_task(&self, task: Arc<str>) {
695 self.partition
696 .vp(self.vp_info.base.vp_index)
697 .unwrap()
698 .set_sidecar_exit_reason(SidecarExitReason::TaskRequest(task))
699 }
700}
701
702#[derive(Debug, Inspect)]
703struct UhVpInner {
704 wake_reasons: AtomicU64,
706 #[inspect(skip)]
707 waker: RwLock<Option<Waker>>,
708 message_queues: VtlArray<MessageQueues, 2>,
709 #[inspect(skip)]
710 vp_info: TargetVpInfo,
711 cpu_index: u32,
714 sidecar_exit_reason: Mutex<Option<SidecarExitReason>>,
715}
716
717impl UhVpInner {
718 pub fn vp_index(&self) -> VpIndex {
719 self.vp_info.base.vp_index
720 }
721}
722
723#[cfg_attr(not(guest_arch = "x86_64"), expect(dead_code))]
724#[derive(Debug, Inspect)]
725enum InitialVpContextOperation {
727 StartVp,
729 EnableVpVtl,
731}
732
733#[cfg_attr(not(guest_arch = "x86_64"), expect(dead_code))]
734#[derive(Debug, Inspect)]
735struct VpStartEnableVtl {
737 operation: InitialVpContextOperation,
740 #[inspect(skip)]
741 context: hvdef::hypercall::InitialVpContextX64,
742}
743
744#[derive(Debug, Inspect)]
745struct TlbLockInfo {
746 #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
748 blocked_vps: BitBox<AtomicU64>,
749 #[inspect(with = "|bb| inspect::iter_by_index(bb.iter().map(|v| *v))")]
752 blocking_vps: BitBox<AtomicU64>,
753 blocking_vp_count: AtomicU32,
757 sleeping: AtomicBool,
759}
760
761#[cfg_attr(not(guest_arch = "x86_64"), expect(dead_code))]
762impl TlbLockInfo {
763 fn new(vp_count: usize) -> Self {
764 Self {
765 blocked_vps: BitVec::repeat(false, vp_count).into_boxed_bitslice(),
766 blocking_vps: BitVec::repeat(false, vp_count).into_boxed_bitslice(),
767 blocking_vp_count: AtomicU32::new(0),
768 sleeping: false.into(),
769 }
770 }
771}
772
773#[bitfield(u32)]
774#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
775struct WakeReason {
776 extint: bool,
777 message_queues: bool,
778 hv_start_enable_vtl_vp: bool,
779 intcon: bool,
780 update_proxy_irr_filter: bool,
781 #[bits(27)]
782 _reserved: u32,
783}
784
785impl WakeReason {
786 const EXTINT: Self = Self::new().with_extint(true);
788 const MESSAGE_QUEUES: Self = Self::new().with_message_queues(true);
789 #[cfg(guest_arch = "x86_64")]
790 const HV_START_ENABLE_VP_VTL: Self = Self::new().with_hv_start_enable_vtl_vp(true); const INTCON: Self = Self::new().with_intcon(true);
792 #[cfg(guest_arch = "x86_64")]
793 const UPDATE_PROXY_IRR_FILTER: Self = Self::new().with_update_proxy_irr_filter(true);
794}
795
796#[bitfield(u32)]
797#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
798struct ExitActivity {
799 pending_event: bool,
800 #[bits(31)]
801 _reserved: u32,
802}
803
804impl UhPartition {
806 pub fn revoke_guest_vsm(&self) -> Result<(), RevokeGuestVsmError> {
808 fn revoke<T: Inspect>(vsm_state: &mut GuestVsmState<T>) -> Result<(), RevokeGuestVsmError> {
809 if matches!(vsm_state, GuestVsmState::Enabled { .. }) {
810 return Err(RevokeGuestVsmError::Vtl1AlreadyEnabled);
811 }
812 *vsm_state = GuestVsmState::NotPlatformSupported;
813 Ok(())
814 }
815
816 match &self.inner.backing_shared {
817 BackingShared::Hypervisor(s) => {
818 revoke(&mut *s.guest_vsm.write())?;
819 self.inner
820 .hcl
821 .set_guest_vsm_partition_config(false)
822 .map_err(RevokeGuestVsmError::SetGuestVsmConfig)?;
823 }
824 #[cfg(guest_arch = "x86_64")]
825 BackingShared::Snp(SnpBackedShared { cvm, .. })
826 | BackingShared::Tdx(TdxBackedShared { cvm, .. }) => {
827 revoke(&mut *cvm.guest_vsm.write())?;
828 }
829 };
830
831 Ok(())
832 }
833
834 pub fn reference_time(&self) -> u64 {
836 if let Some(hv) = self.inner.hv() {
837 hv.ref_time_source().now().ref_time
838 } else {
839 self.inner
840 .hcl
841 .reference_time()
842 .expect("should not fail to get the reference time")
843 }
844 }
845}
846
847impl virt::Partition for UhPartition {
848 fn supports_reset(&self) -> Option<&dyn virt::ResetPartition<Error = Self::Error>> {
849 None
850 }
851
852 fn caps(&self) -> &PartitionCapabilities {
853 &self.inner.caps
854 }
855
856 fn request_msi(&self, vtl: Vtl, request: MsiRequest) {
857 self.inner
858 .request_msi(vtl.try_into().expect("higher vtl not configured"), request)
859 }
860
861 fn request_yield(&self, _vp_index: VpIndex) {
862 unimplemented!()
863 }
864}
865
866impl X86Partition for UhPartition {
867 fn ioapic_routing(&self) -> Arc<dyn IoApicRouting> {
868 self.inner.clone()
869 }
870
871 fn pulse_lint(&self, vp_index: VpIndex, vtl: Vtl, lint: u8) {
872 let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
873 if let Some(apic) = &self.inner.lapic(vtl) {
874 apic.lint(vp_index, lint.into(), |vp_index| {
875 self.inner
876 .vp(vp_index)
877 .unwrap()
878 .wake(vtl, WakeReason::INTCON);
879 });
880 } else if lint == 0 {
881 self.inner
882 .vp(vp_index)
883 .unwrap()
884 .wake(vtl, WakeReason::EXTINT);
885 } else {
886 unimplemented!()
887 }
888 }
889}
890
891impl UhPartitionInner {
892 fn vp(&self, index: VpIndex) -> Option<&'_ UhVpInner> {
893 self.vps.get(index.index() as usize)
894 }
895
896 fn lapic(&self, vtl: GuestVtl) -> Option<&LocalApicSet> {
897 self.backing_shared.cvm_state().map(|x| &x.lapic[vtl])
898 }
899
900 fn hv(&self) -> Option<&GlobalHv<2>> {
901 self.backing_shared.cvm_state().map(|x| &x.hv)
902 }
903
904 #[cfg(guest_arch = "x86_64")]
906 fn request_proxy_irr_filter_update(
907 &self,
908 vtl: GuestVtl,
909 device_vector: u8,
910 req_vp_index: VpIndex,
911 ) {
912 tracing::debug!(
913 ?vtl,
914 device_vector,
915 req_vp_index = req_vp_index.index(),
916 "request_proxy_irr_filter_update"
917 );
918
919 {
921 let mut device_vector_table = self.device_vector_table.write();
922 device_vector_table.set(device_vector as usize, true);
923 }
924
925 for vp in self.vps.iter() {
927 if vp.vp_index() != req_vp_index {
928 vp.wake(vtl, WakeReason::UPDATE_PROXY_IRR_FILTER);
929 }
930 }
931 }
932
933 #[cfg(guest_arch = "x86_64")]
935 fn fill_device_vectors(&self, _vtl: GuestVtl, irr_vectors: &mut IrrBitmap) {
936 let device_vector_table = self.device_vector_table.read();
937 for idx in device_vector_table.iter_ones() {
938 irr_vectors.set(idx, true);
939 }
940 }
941
942 fn inspect_extra(&self, resp: &mut inspect::Response<'_>) {
943 let mut wake_vps = false;
944 resp.field_mut(
945 "enter_modes",
946 &mut inspect::adhoc_mut(|req| {
947 let update = req.is_update();
948 {
949 let mut modes = self.enter_modes.lock();
950 modes.inspect_mut(req);
951 if update {
952 self.enter_modes_atomic.store(
953 hcl::protocol::EnterModes::from(*modes).into(),
954 Ordering::Relaxed,
955 );
956 wake_vps = true;
957 }
958 }
959 }),
960 );
961
962 if wake_vps {
964 for vp in self.vps.iter() {
965 vp.wake_vtl2();
966 }
967 }
968 }
969
970 #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
972 fn vsm_status(
973 &self,
974 ) -> Result<HvRegisterVsmPartitionStatus, hcl::ioctl::register::GetRegError> {
975 self.hcl.get_vsm_partition_status()
977 }
978}
979
980impl virt::Synic for UhPartition {
981 fn post_message(&self, vtl: Vtl, vp_index: VpIndex, sint: u8, typ: u32, payload: &[u8]) {
982 let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
983 let Some(vp) = self.inner.vp(vp_index) else {
984 tracelimit::warn_ratelimited!(
985 CVM_ALLOWED,
986 vp = vp_index.index(),
987 "invalid vp target for post_message"
988 );
989 return;
990 };
991
992 vp.post_message(
993 vtl,
994 sint,
995 &hvdef::HvMessage::new(hvdef::HvMessageType(typ), 0, payload),
996 );
997 }
998
999 fn new_guest_event_port(
1000 &self,
1001 vtl: Vtl,
1002 vp: u32,
1003 sint: u8,
1004 flag: u16,
1005 ) -> Box<dyn vmcore::synic::GuestEventPort> {
1006 let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
1007 Box::new(UhEventPort {
1008 partition: Arc::downgrade(&self.inner),
1009 params: Arc::new(Mutex::new(UhEventPortParams {
1010 vp: VpIndex::new(vp),
1011 sint,
1012 flag,
1013 vtl,
1014 })),
1015 })
1016 }
1017
1018 fn prefer_os_events(&self) -> bool {
1019 false
1020 }
1021
1022 fn monitor_support(&self) -> Option<&dyn virt::SynicMonitor> {
1023 Some(self)
1024 }
1025}
1026
1027impl virt::SynicMonitor for UhPartition {
1028 fn set_monitor_page(&self, vtl: Vtl, gpa: Option<u64>) -> anyhow::Result<()> {
1029 let mut allocated_block = self.inner.allocated_monitor_page.lock();
1031 let old_gpa = self.inner.monitor_page.set_gpa(gpa);
1032
1033 let allocated_page = allocated_block.take();
1035 if let Some(old_gpa) = old_gpa {
1036 let allocated_gpa = allocated_page
1037 .as_ref()
1038 .map(|b| b.pfns()[0] << HV_PAGE_SHIFT);
1039
1040 let result = if allocated_gpa == Some(old_gpa) {
1043 let vtl = GuestVtl::try_from(vtl).unwrap();
1044 self.unregister_cvm_dma_overlay_page(vtl, old_gpa >> HV_PAGE_SHIFT)
1045 } else {
1046 self.inner
1047 .hcl
1048 .modify_vtl_protection_mask(
1049 MemoryRange::new(old_gpa..old_gpa + HV_PAGE_SIZE),
1050 hvdef::HV_MAP_GPA_PERMISSIONS_ALL,
1051 HvInputVtl::CURRENT_VTL,
1052 )
1053 .map_err(|err| anyhow::anyhow!(err))
1054 };
1055
1056 result
1057 .context("failed to unregister old monitor page")
1058 .inspect_err(|_| {
1059 self.inner.monitor_page.set_gpa(None);
1061 })?;
1062
1063 tracing::debug!(old_gpa, "unregistered monitor page");
1064 }
1065
1066 if let Some(gpa) = gpa {
1067 self.inner
1070 .hcl
1071 .modify_vtl_protection_mask(
1072 MemoryRange::new(gpa..gpa + HV_PAGE_SIZE),
1073 HvMapGpaFlags::new().with_readable(true),
1074 HvInputVtl::CURRENT_VTL,
1075 )
1076 .context("failed to register monitor page")
1077 .inspect_err(|_| {
1078 self.inner.monitor_page.set_gpa(None);
1080 })?;
1081
1082 tracing::debug!(gpa, "registered monitor page");
1083 }
1084
1085 Ok(())
1086 }
1087
1088 fn register_monitor(
1089 &self,
1090 monitor_id: vmcore::monitor::MonitorId,
1091 connection_id: u32,
1092 ) -> Box<dyn Sync + Send> {
1093 self.inner
1094 .monitor_page
1095 .register_monitor(monitor_id, connection_id)
1096 }
1097
1098 fn allocate_monitor_page(&self, vtl: Vtl) -> anyhow::Result<Option<u64>> {
1099 let vtl = GuestVtl::try_from(vtl).unwrap();
1100
1101 let Some(state) = self.inner.backing_shared.cvm_state() else {
1103 return Ok(None);
1104 };
1105
1106 let mut allocated_block = self.inner.allocated_monitor_page.lock();
1107 if let Some(block) = allocated_block.as_ref() {
1108 let gpa = block.pfns()[0] << HV_PAGE_SHIFT;
1110 assert_eq!(self.inner.monitor_page.gpa(), Some(gpa));
1111 return Ok(Some(gpa));
1112 }
1113
1114 let block = state
1115 .private_dma_client
1116 .allocate_dma_buffer(HV_PAGE_SIZE_USIZE)
1117 .context("failed to allocate monitor page")?;
1118
1119 let gpn = block.pfns()[0];
1120 *allocated_block = Some(block);
1121 let gpa = gpn << HV_PAGE_SHIFT;
1122 let old_gpa = self.inner.monitor_page.set_gpa(Some(gpa));
1123 if let Some(old_gpa) = old_gpa {
1124 self.inner
1127 .hcl
1128 .modify_vtl_protection_mask(
1129 MemoryRange::new(old_gpa..old_gpa + HV_PAGE_SIZE),
1130 hvdef::HV_MAP_GPA_PERMISSIONS_ALL,
1131 HvInputVtl::CURRENT_VTL,
1132 )
1133 .context("failed to unregister old monitor page")
1134 .inspect_err(|_| {
1135 self.inner.monitor_page.set_gpa(None);
1137 })?;
1138
1139 tracing::debug!(old_gpa, "unregistered monitor page");
1140 }
1141
1142 self.register_cvm_dma_overlay_page(vtl, gpn, HvMapGpaFlags::new().with_readable(true))
1145 .context("failed to unregister monitor page")
1146 .inspect_err(|_| {
1147 self.inner.monitor_page.set_gpa(None);
1149 })?;
1150
1151 tracing::debug!(gpa, "registered allocated monitor page");
1152
1153 Ok(Some(gpa))
1154 }
1155}
1156
1157impl UhPartitionInner {
1158 #[cfg(guest_arch = "x86_64")]
1159 pub(crate) fn synic_interrupt(
1160 &self,
1161 vp_index: VpIndex,
1162 vtl: GuestVtl,
1163 ) -> impl '_ + hv1_emulator::RequestInterrupt {
1164 move |vector, auto_eoi| {
1167 self.lapic(vtl).unwrap().synic_interrupt(
1168 vp_index,
1169 vector as u8,
1170 auto_eoi,
1171 |vp_index| self.vp(vp_index).unwrap().wake(vtl, WakeReason::INTCON),
1172 );
1173 }
1174 }
1175
1176 #[cfg(guest_arch = "aarch64")]
1177 fn synic_interrupt(
1178 &self,
1179 _vp_index: VpIndex,
1180 _vtl: GuestVtl,
1181 ) -> impl '_ + hv1_emulator::RequestInterrupt {
1182 move |_, _| {}
1183 }
1184}
1185
1186#[derive(Debug)]
1187struct UhEventPort {
1188 partition: Weak<UhPartitionInner>,
1189 params: Arc<Mutex<UhEventPortParams>>,
1190}
1191
1192#[derive(Debug, Copy, Clone)]
1193struct UhEventPortParams {
1194 vp: VpIndex,
1195 sint: u8,
1196 flag: u16,
1197 vtl: GuestVtl,
1198}
1199
1200impl vmcore::synic::GuestEventPort for UhEventPort {
1201 fn interrupt(&self) -> vmcore::interrupt::Interrupt {
1202 let partition = self.partition.clone();
1203 let params = self.params.clone();
1204 vmcore::interrupt::Interrupt::from_fn(move || {
1205 let UhEventPortParams {
1206 vp,
1207 sint,
1208 flag,
1209 vtl,
1210 } = *params.lock();
1211 let Some(partition) = partition.upgrade() else {
1212 return;
1213 };
1214 tracing::trace!(vp = vp.index(), sint, flag, "signal_event");
1215 if let Some(hv) = partition.hv() {
1216 match hv.synic[vtl].signal_event(
1217 vp,
1218 sint,
1219 flag,
1220 &mut partition.synic_interrupt(vp, vtl),
1221 ) {
1222 Ok(_) => {}
1223 Err(SintProxied) => {
1224 tracing::trace!(
1225 vp = vp.index(),
1226 sint,
1227 flag,
1228 "forwarding event to untrusted synic"
1229 );
1230 if let Some(synic) = partition.backing_shared.untrusted_synic() {
1231 synic
1232 .signal_event(
1233 vp,
1234 sint,
1235 flag,
1236 &mut partition.synic_interrupt(vp, vtl),
1237 )
1238 .ok();
1239 } else {
1240 partition.hcl.signal_event_direct(vp.index(), sint, flag)
1241 }
1242 }
1243 }
1244 } else {
1245 partition.hcl.signal_event_direct(vp.index(), sint, flag);
1246 }
1247 })
1248 }
1249
1250 fn set_target_vp(&mut self, vp: u32) -> Result<(), vmcore::synic::HypervisorError> {
1251 self.params.lock().vp = VpIndex::new(vp);
1252 Ok(())
1253 }
1254}
1255
1256impl virt::Hv1 for UhPartition {
1257 type Error = Error;
1258 type Device = virt::x86::apic_software_device::ApicSoftwareDevice;
1259
1260 fn reference_time_source(&self) -> Option<ReferenceTimeSource> {
1261 Some(if let Some(hv) = self.inner.hv() {
1262 hv.ref_time_source().clone()
1263 } else {
1264 ReferenceTimeSource::from(self.inner.clone() as Arc<_>)
1265 })
1266 }
1267
1268 fn new_virtual_device(
1269 &self,
1270 ) -> Option<&dyn virt::DeviceBuilder<Device = Self::Device, Error = Self::Error>> {
1271 self.inner.software_devices.is_some().then_some(self)
1272 }
1273}
1274
1275impl GetReferenceTime for UhPartitionInner {
1276 fn now(&self) -> ReferenceTimeResult {
1277 ReferenceTimeResult {
1278 ref_time: self.hcl.reference_time().unwrap(),
1279 system_time: None,
1280 }
1281 }
1282}
1283
1284impl virt::DeviceBuilder for UhPartition {
1285 fn build(&self, vtl: Vtl, device_id: u64) -> Result<Self::Device, Self::Error> {
1286 let vtl = GuestVtl::try_from(vtl).expect("higher vtl not configured");
1287 let device = self
1288 .inner
1289 .software_devices
1290 .as_ref()
1291 .expect("checked in new_virtual_device")
1292 .new_device(self.interrupt_targets[vtl].clone(), device_id)
1293 .map_err(Error::NewDevice)?;
1294
1295 Ok(device)
1296 }
1297}
1298
1299struct UhInterruptTarget {
1300 partition: Arc<UhPartitionInner>,
1301 vtl: GuestVtl,
1302}
1303
1304impl pci_core::msi::MsiInterruptTarget for UhInterruptTarget {
1305 fn new_interrupt(&self) -> Box<dyn pci_core::msi::MsiControl> {
1306 let partition = self.partition.clone();
1307 let vtl = self.vtl;
1308 Box::new(move |address, data| partition.request_msi(vtl, MsiRequest { address, data }))
1309 }
1310}
1311
1312impl UhPartitionInner {
1313 fn request_msi(&self, vtl: GuestVtl, request: MsiRequest) {
1314 if let Some(lapic) = self.lapic(vtl) {
1315 tracing::trace!(?request, "interrupt");
1316 lapic.request_interrupt(request.address, request.data, |vp_index| {
1317 self.vp(vp_index).unwrap().wake(vtl, WakeReason::INTCON)
1318 });
1319 } else {
1320 let (address, data) = request.as_x86();
1321 if let Err(err) = self.hcl.request_interrupt(
1322 request.hv_x86_interrupt_control(),
1323 address.virt_destination().into(),
1324 data.vector().into(),
1325 vtl,
1326 ) {
1327 tracelimit::warn_ratelimited!(
1328 CVM_ALLOWED,
1329 error = &err as &dyn std::error::Error,
1330 address = request.address,
1331 data = request.data,
1332 "failed to request msi"
1333 );
1334 }
1335 }
1336 }
1337}
1338
1339impl IoApicRouting for UhPartitionInner {
1340 fn set_irq_route(&self, irq: u8, request: Option<MsiRequest>) {
1341 self.irq_routes.set_irq_route(irq, request)
1342 }
1343
1344 fn assert_irq(&self, irq: u8) {
1346 self.irq_routes
1347 .assert_irq(irq, |request| self.request_msi(GuestVtl::Vtl0, request))
1348 }
1349}
1350
1351fn set_vtl2_vsm_partition_config(hcl: &Hcl) -> Result<(), Error> {
1354 let caps = hcl.get_vsm_capabilities().map_err(Error::GetReg)?;
1356 let hardware_isolated = hcl.isolation().is_hardware_isolated();
1357 let isolated = hcl.isolation().is_isolated();
1358
1359 let config = HvRegisterVsmPartitionConfig::new()
1360 .with_default_vtl_protection_mask(0xF)
1361 .with_enable_vtl_protection(!hardware_isolated)
1362 .with_zero_memory_on_reset(!hardware_isolated)
1363 .with_intercept_cpuid_unimplemented(!hardware_isolated)
1364 .with_intercept_page(caps.intercept_page_available())
1365 .with_intercept_unrecoverable_exception(true)
1366 .with_intercept_not_present(caps.intercept_not_present_available() && !isolated)
1367 .with_intercept_acceptance(isolated)
1368 .with_intercept_enable_vtl_protection(isolated && !hardware_isolated)
1369 .with_intercept_system_reset(caps.intercept_system_reset_available());
1370
1371 hcl.set_vtl2_vsm_partition_config(config)
1372 .map_err(Error::SetReg)
1373}
1374
1375pub struct UhPartitionNewParams<'a> {
1379 pub isolation: IsolationType,
1381 pub hide_isolation: bool,
1384 pub lower_vtl_memory_layout: &'a MemoryLayout,
1386 pub topology: &'a ProcessorTopology,
1388 pub cvm_cpuid_info: Option<&'a [u8]>,
1391 pub snp_secrets: Option<&'a [u8]>,
1393 pub vtom: Option<u64>,
1397 pub handle_synic: bool,
1401 pub no_sidecar_hotplug: bool,
1404 pub use_mmio_hypercalls: bool,
1406 pub intercept_debug_exceptions: bool,
1408 pub disable_proxy_redirect: bool,
1410 pub disable_lower_vtl_timer_virt: bool,
1412}
1413
1414pub struct UhLateParams<'a> {
1416 pub gm: VtlArray<GuestMemory, 2>,
1418 pub vtl0_kernel_exec_gm: GuestMemory,
1420 pub vtl0_user_exec_gm: GuestMemory,
1422 #[cfg(guest_arch = "x86_64")]
1424 pub cpuid: Vec<CpuidLeaf>,
1425 pub crash_notification_send: mesh::Sender<VtlCrash>,
1428 pub vmtime: &'a VmTimeSource,
1430 pub cvm_params: Option<CvmLateParams>,
1432 pub vmbus_relay: bool,
1434}
1435
1436pub struct CvmLateParams {
1438 pub shared_gm: GuestMemory,
1440 pub isolated_memory_protector: Arc<dyn ProtectIsolatedMemory>,
1442 pub shared_dma_client: Arc<dyn DmaClient>,
1444 pub private_dma_client: Arc<dyn DmaClient>,
1446}
1447
1448#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1450pub enum GpnSource {
1451 GuestMemory,
1453 Dma,
1455}
1456
1457pub trait ProtectIsolatedMemory: Send + Sync {
1459 fn change_host_visibility(
1461 &self,
1462 vtl: GuestVtl,
1463 shared: bool,
1464 gpns: &[u64],
1465 tlb_access: &mut dyn TlbFlushLockAccess,
1466 ) -> Result<(), (HvError, usize)>;
1467
1468 fn query_host_visibility(
1470 &self,
1471 gpns: &[u64],
1472 host_visibility: &mut [HostVisibilityType],
1473 ) -> Result<(), (HvError, usize)>;
1474
1475 fn default_vtl0_protections(&self) -> HvMapGpaFlags;
1477
1478 fn change_default_vtl_protections(
1482 &self,
1483 target_vtl: GuestVtl,
1484 protections: HvMapGpaFlags,
1485 tlb_access: &mut dyn TlbFlushLockAccess,
1486 ) -> Result<(), HvError>;
1487
1488 fn change_vtl_protections(
1490 &self,
1491 target_vtl: GuestVtl,
1492 gpns: &[u64],
1493 protections: HvMapGpaFlags,
1494 tlb_access: &mut dyn TlbFlushLockAccess,
1495 ) -> Result<(), (HvError, usize)>;
1496
1497 fn register_overlay_page(
1500 &self,
1501 vtl: GuestVtl,
1502 gpn: u64,
1503 gpn_source: GpnSource,
1504 check_perms: HvMapGpaFlags,
1505 new_perms: Option<HvMapGpaFlags>,
1506 tlb_access: &mut dyn TlbFlushLockAccess,
1507 ) -> Result<(), HvError>;
1508
1509 fn unregister_overlay_page(
1512 &self,
1513 vtl: GuestVtl,
1514 gpn: u64,
1515 tlb_access: &mut dyn TlbFlushLockAccess,
1516 ) -> Result<(), HvError>;
1517
1518 fn is_overlay_page(&self, vtl: GuestVtl, gpn: u64) -> bool;
1520
1521 fn lock_gpns(&self, vtl: GuestVtl, gpns: &[u64]) -> Result<(), GuestMemoryBackingError>;
1523
1524 fn unlock_gpns(&self, vtl: GuestVtl, gpns: &[u64]);
1530
1531 fn set_vtl1_protections_enabled(&self);
1534
1535 fn vtl1_protections_enabled(&self) -> bool;
1538}
1539
1540pub trait TlbFlushLockAccess {
1542 fn flush(&mut self, vtl: GuestVtl);
1544
1545 fn flush_entire(&mut self);
1547
1548 fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl);
1550}
1551
1552pub struct UhProtoPartition<'a> {
1555 params: UhPartitionNewParams<'a>,
1556 hcl: Hcl,
1557 guest_vsm_available: bool,
1558 create_partition_available: bool,
1559 #[cfg(guest_arch = "x86_64")]
1560 cpuid: virt::CpuidLeafSet,
1561}
1562
1563impl<'a> UhProtoPartition<'a> {
1564 pub fn new<T: SpawnDriver>(
1569 params: UhPartitionNewParams<'a>,
1570 driver: impl FnMut(u32) -> T,
1571 ) -> Result<Self, Error> {
1572 let hcl_isolation = match params.isolation {
1573 IsolationType::None => hcl::ioctl::IsolationType::None,
1574 IsolationType::Vbs => hcl::ioctl::IsolationType::Vbs,
1575 IsolationType::Snp => hcl::ioctl::IsolationType::Snp,
1576 IsolationType::Tdx => hcl::ioctl::IsolationType::Tdx,
1577 };
1578
1579 let sidecar = sidecar_client::SidecarClient::new(driver).map_err(Error::Sidecar)?;
1581
1582 let hcl = Hcl::new(hcl_isolation, sidecar).map_err(Error::Hcl)?;
1583
1584 let mut allowed_hypercalls = vec![
1586 hvdef::HypercallCode::HvCallGetVpRegisters,
1587 hvdef::HypercallCode::HvCallSetVpRegisters,
1588 hvdef::HypercallCode::HvCallInstallIntercept,
1589 hvdef::HypercallCode::HvCallTranslateVirtualAddress,
1590 hvdef::HypercallCode::HvCallPostMessageDirect,
1591 hvdef::HypercallCode::HvCallSignalEventDirect,
1592 hvdef::HypercallCode::HvCallModifyVtlProtectionMask,
1593 hvdef::HypercallCode::HvCallTranslateVirtualAddressEx,
1594 hvdef::HypercallCode::HvCallCheckSparseGpaPageVtlAccess,
1595 hvdef::HypercallCode::HvCallAssertVirtualInterrupt,
1596 hvdef::HypercallCode::HvCallGetVpIndexFromApicId,
1597 hvdef::HypercallCode::HvCallAcceptGpaPages,
1598 hvdef::HypercallCode::HvCallModifySparseGpaPageHostVisibility,
1599 ];
1600
1601 if params.isolation.is_hardware_isolated() {
1602 allowed_hypercalls.extend(vec![
1603 hvdef::HypercallCode::HvCallEnablePartitionVtl,
1604 hvdef::HypercallCode::HvCallRetargetDeviceInterrupt,
1605 hvdef::HypercallCode::HvCallEnableVpVtl,
1606 ]);
1607 }
1608
1609 if params.use_mmio_hypercalls {
1610 allowed_hypercalls.extend(vec![
1611 hvdef::HypercallCode::HvCallMemoryMappedIoRead,
1612 hvdef::HypercallCode::HvCallMemoryMappedIoWrite,
1613 ]);
1614 }
1615
1616 hcl.set_allowed_hypercalls(allowed_hypercalls.as_slice());
1617
1618 set_vtl2_vsm_partition_config(&hcl)?;
1619
1620 let privs = hcl
1621 .get_privileges_and_features_info()
1622 .map_err(Error::GetReg)?;
1623 let guest_vsm_available = Self::check_guest_vsm_support(privs, &hcl)?;
1624
1625 #[cfg(guest_arch = "x86_64")]
1626 let cpuid = match params.isolation {
1627 IsolationType::Snp => cvm_cpuid::CpuidResultsIsolationType::Snp {
1628 cpuid_pages: params.cvm_cpuid_info.unwrap(),
1629 vtom: params.vtom.unwrap(),
1630 access_vsm: guest_vsm_available,
1631 }
1632 .build()
1633 .map_err(Error::CvmCpuid)?,
1634
1635 IsolationType::Tdx => cvm_cpuid::CpuidResultsIsolationType::Tdx {
1636 topology: params.topology,
1637 vtom: params.vtom.unwrap(),
1638 access_vsm: guest_vsm_available,
1639 }
1640 .build()
1641 .map_err(Error::CvmCpuid)?,
1642 IsolationType::Vbs | IsolationType::None => Default::default(),
1643 };
1644
1645 Ok(UhProtoPartition {
1646 hcl,
1647 params,
1648 guest_vsm_available,
1649 create_partition_available: privs.create_partitions(),
1650 #[cfg(guest_arch = "x86_64")]
1651 cpuid,
1652 })
1653 }
1654
1655 pub fn guest_vsm_available(&self) -> bool {
1657 self.guest_vsm_available
1658 }
1659
1660 pub fn create_partition_available(&self) -> bool {
1663 self.create_partition_available
1664 }
1665
1666 pub async fn build(
1668 self,
1669 late_params: UhLateParams<'_>,
1670 ) -> Result<(UhPartition, Vec<UhProcessorBox>), Error> {
1671 let Self {
1672 mut hcl,
1673 params,
1674 guest_vsm_available,
1675 create_partition_available: _,
1676 #[cfg(guest_arch = "x86_64")]
1677 cpuid,
1678 } = self;
1679 let isolation = params.isolation;
1680 let is_hardware_isolated = isolation.is_hardware_isolated();
1681
1682 if params.intercept_debug_exceptions {
1688 if !cfg!(feature = "gdb") {
1689 return Err(Error::InvalidDebugConfiguration);
1690 }
1691
1692 cfg_if::cfg_if! {
1693 if #[cfg(guest_arch = "x86_64")] {
1694 if isolation != IsolationType::Tdx {
1695 let debug_exception_vector = 0x1;
1696 hcl.register_intercept(
1697 HvInterceptType::HvInterceptTypeException,
1698 HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1699 HvInterceptParameters::new_exception(debug_exception_vector),
1700 )
1701 .map_err(|err| Error::InstallIntercept(HvInterceptType::HvInterceptTypeException, err))?;
1702 }
1703 } else {
1704 return Err(Error::InvalidDebugConfiguration);
1705 }
1706 }
1707 }
1708
1709 if !is_hardware_isolated {
1710 if cfg!(guest_arch = "x86_64") {
1711 hcl.register_intercept(
1712 HvInterceptType::HvInterceptTypeX64Msr,
1713 HV_INTERCEPT_ACCESS_MASK_READ_WRITE,
1714 HvInterceptParameters::new_zeroed(),
1715 )
1716 .map_err(|err| {
1717 Error::InstallIntercept(HvInterceptType::HvInterceptTypeX64Msr, err)
1718 })?;
1719
1720 hcl.register_intercept(
1721 HvInterceptType::HvInterceptTypeX64ApicEoi,
1722 HV_INTERCEPT_ACCESS_MASK_WRITE,
1723 HvInterceptParameters::new_zeroed(),
1724 )
1725 .map_err(|err| {
1726 Error::InstallIntercept(HvInterceptType::HvInterceptTypeX64ApicEoi, err)
1727 })?;
1728 } else {
1729 if false {
1730 todo!("AARCH64_TODO");
1731 }
1732 }
1733 }
1734
1735 if isolation == IsolationType::Snp {
1736 hcl.register_intercept(
1738 HvInterceptType::HvInterceptTypeException,
1739 HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1740 HvInterceptParameters::new_exception(0x1D),
1741 )
1742 .map_err(|err| {
1743 Error::InstallIntercept(HvInterceptType::HvInterceptTypeException, err)
1744 })?;
1745
1746 let mut bitmap = [0u8; 64];
1748 if let Some(secrets) = params.snp_secrets {
1749 bitmap.copy_from_slice(
1750 &secrets
1751 [REG_TWEAK_BITMAP_OFFSET..REG_TWEAK_BITMAP_OFFSET + REG_TWEAK_BITMAP_SIZE],
1752 );
1753 }
1754 hcl.set_snp_register_bitmap(bitmap);
1755 }
1756
1757 hcl.add_vps(
1759 params.topology.vp_count(),
1760 late_params
1761 .cvm_params
1762 .as_ref()
1763 .map(|x| &x.private_dma_client),
1764 )
1765 .map_err(Error::Hcl)?;
1766
1767 let vps: Vec<_> = params
1768 .topology
1769 .vps_arch()
1770 .map(|vp_info| {
1771 let cpu_index = vp_info.base.vp_index.index();
1774 UhVpInner::new(cpu_index, vp_info)
1775 })
1776 .collect();
1777
1778 #[cfg(guest_arch = "x86_64")]
1780 let software_devices = {
1781 let res = if !is_hardware_isolated {
1782 hcl.register_intercept(
1783 HvInterceptType::HvInterceptTypeRetargetInterruptWithUnknownDeviceId,
1784 HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1785 HvInterceptParameters::new_zeroed(),
1786 )
1787 } else {
1788 Ok(())
1789 };
1790 match res {
1791 Ok(()) => Some(ApicSoftwareDevices::new(
1792 params.topology.vps_arch().map(|vp| vp.apic_id).collect(),
1793 )),
1794 Err(HvError::InvalidParameter | HvError::AccessDenied) => None,
1795 Err(err) => {
1796 return Err(Error::InstallIntercept(
1797 HvInterceptType::HvInterceptTypeRetargetInterruptWithUnknownDeviceId,
1798 err,
1799 ));
1800 }
1801 }
1802 };
1803
1804 #[cfg(guest_arch = "aarch64")]
1805 let software_devices = None;
1806
1807 #[cfg(guest_arch = "aarch64")]
1808 let caps = virt::aarch64::Aarch64PartitionCapabilities {};
1809
1810 #[cfg(guest_arch = "x86_64")]
1811 let cpuid = UhPartition::construct_cpuid_results(
1812 cpuid,
1813 &late_params.cpuid,
1814 params.topology,
1815 isolation,
1816 params.hide_isolation,
1817 );
1818
1819 #[cfg(guest_arch = "x86_64")]
1820 let caps = UhPartition::construct_capabilities(
1821 params.topology,
1822 &cpuid,
1823 isolation,
1824 params.hide_isolation,
1825 )
1826 .map_err(Error::Capabilities)?;
1827
1828 if params.handle_synic && !matches!(isolation, IsolationType::Tdx) {
1829 hcl.register_intercept(
1837 HvInterceptType::HvInterceptTypeUnknownSynicConnection,
1838 HV_INTERCEPT_ACCESS_MASK_EXECUTE,
1839 HvInterceptParameters::new_zeroed(),
1840 )
1841 .expect("registering synic intercept cannot fail");
1842 }
1843
1844 #[cfg(guest_arch = "x86_64")]
1845 let cvm_state = if is_hardware_isolated {
1846 let vsm_caps = hcl.get_vsm_capabilities().map_err(Error::GetReg)?;
1847 let proxy_interrupt_redirect_available =
1848 vsm_caps.proxy_interrupt_redirect_available() && !params.disable_proxy_redirect;
1849
1850 Some(Self::construct_cvm_state(
1851 ¶ms,
1852 late_params.cvm_params.unwrap(),
1853 &caps,
1854 guest_vsm_available,
1855 proxy_interrupt_redirect_available,
1856 )?)
1857 } else {
1858 None
1859 };
1860 #[cfg(guest_arch = "aarch64")]
1861 let cvm_state = None;
1862
1863 let lower_vtl_timer_virt_available =
1864 hcl.supports_lower_vtl_timer_virt() && !params.disable_lower_vtl_timer_virt;
1865
1866 let backing_shared = BackingShared::new(
1867 isolation,
1868 ¶ms,
1869 BackingSharedParams {
1870 cvm_state,
1871 #[cfg(guest_arch = "x86_64")]
1872 cpuid: &cpuid,
1873 hcl: &hcl,
1874 guest_vsm_available,
1875 lower_vtl_timer_virt_available,
1876 },
1877 )?;
1878
1879 let enter_modes = EnterModes::default();
1880
1881 let partition = Arc::new(UhPartitionInner {
1882 hcl,
1883 vps,
1884 irq_routes: Default::default(),
1885 caps,
1886 enter_modes: Mutex::new(enter_modes),
1887 enter_modes_atomic: u8::from(hcl::protocol::EnterModes::from(enter_modes)).into(),
1888 gm: late_params.gm,
1889 vtl0_kernel_exec_gm: late_params.vtl0_kernel_exec_gm,
1890 vtl0_user_exec_gm: late_params.vtl0_user_exec_gm,
1891 #[cfg(guest_arch = "x86_64")]
1892 cpuid,
1893 crash_notification_send: late_params.crash_notification_send,
1894 monitor_page: MonitorPage::new(),
1895 allocated_monitor_page: Mutex::new(None),
1896 software_devices,
1897 lower_vtl_memory_layout: params.lower_vtl_memory_layout.clone(),
1898 vmtime: late_params.vmtime.clone(),
1899 isolation,
1900 no_sidecar_hotplug: params.no_sidecar_hotplug.into(),
1901 use_mmio_hypercalls: params.use_mmio_hypercalls,
1902 backing_shared,
1903 #[cfg(guest_arch = "x86_64")]
1904 device_vector_table: RwLock::new(IrrBitmap::new(Default::default())),
1905 intercept_debug_exceptions: params.intercept_debug_exceptions,
1906 vmbus_relay: late_params.vmbus_relay,
1907 });
1908
1909 if cfg!(guest_arch = "x86_64") {
1910 partition.manage_io_port_intercept_region(0, !0, true);
1912 }
1913
1914 let vps = params
1915 .topology
1916 .vps_arch()
1917 .map(|vp_info| UhProcessorBox {
1918 partition: partition.clone(),
1919 vp_info,
1920 })
1921 .collect();
1922
1923 Ok((
1924 UhPartition {
1925 inner: partition.clone(),
1926 interrupt_targets: VtlArray::from_fn(|vtl| {
1927 Arc::new(UhInterruptTarget {
1928 partition: partition.clone(),
1929 vtl: vtl.try_into().unwrap(),
1930 })
1931 }),
1932 },
1933 vps,
1934 ))
1935 }
1936}
1937
1938impl UhPartition {
1939 pub fn vtl0_guest_os_id(&self) -> Result<HvGuestOsId, hcl::ioctl::register::GetRegError> {
1941 let id = if let Some(hv) = self.inner.hv() {
1945 hv.guest_os_id(Vtl::Vtl0)
1946 } else {
1947 self.inner.hcl.get_guest_os_id(GuestVtl::Vtl0)?
1949 };
1950 Ok(id)
1951 }
1952
1953 pub fn register_host_io_port_fast_path(
1958 &self,
1959 range: RangeInclusive<u16>,
1960 ) -> HostIoPortFastPathHandle {
1961 assert!(!self.inner.isolation.is_hardware_isolated());
1965
1966 self.inner
1967 .manage_io_port_intercept_region(*range.start(), *range.end(), false);
1968 HostIoPortFastPathHandle {
1969 inner: Arc::downgrade(&self.inner),
1970 begin: *range.start(),
1971 end: *range.end(),
1972 }
1973 }
1974
1975 pub fn assert_debug_interrupt(&self, _vtl: u8) {
1977 #[cfg(guest_arch = "x86_64")]
1978 const LINT_INDEX_1: u8 = 1;
1979 #[cfg(guest_arch = "x86_64")]
1980 match self.inner.isolation {
1981 IsolationType::Snp => {
1982 tracing::error!(?_vtl, "Debug interrupts cannot be injected into SNP VMs",);
1983 }
1984 _ => {
1985 let bsp_index = VpIndex::new(0);
1986 self.pulse_lint(bsp_index, Vtl::try_from(_vtl).unwrap(), LINT_INDEX_1)
1987 }
1988 }
1989 }
1990
1991 pub fn set_pm_timer_assist(
1993 &self,
1994 port: Option<u16>,
1995 ) -> Result<(), hcl::ioctl::register::SetRegError> {
1996 self.inner.hcl.set_pm_timer_assist(port)
1997 }
1998
1999 fn register_cvm_dma_overlay_page(
2001 &self,
2002 vtl: GuestVtl,
2003 gpn: u64,
2004 new_perms: HvMapGpaFlags,
2005 ) -> anyhow::Result<()> {
2006 match &self.inner.backing_shared {
2008 #[cfg(guest_arch = "x86_64")]
2009 BackingShared::Snp(snp_backed_shared) => snp_backed_shared
2010 .cvm
2011 .isolated_memory_protector
2012 .register_overlay_page(
2013 vtl,
2014 gpn,
2015 GpnSource::Dma,
2017 HvMapGpaFlags::new(),
2018 Some(new_perms),
2019 &mut SnpBacked::tlb_flush_lock_access(
2020 None,
2021 self.inner.as_ref(),
2022 snp_backed_shared,
2023 ),
2024 )
2025 .map_err(|e| anyhow::anyhow!(e)),
2026 #[cfg(guest_arch = "x86_64")]
2027 BackingShared::Tdx(tdx_backed_shared) => tdx_backed_shared
2028 .cvm
2029 .isolated_memory_protector
2030 .register_overlay_page(
2031 vtl,
2032 gpn,
2033 GpnSource::Dma,
2034 HvMapGpaFlags::new(),
2035 Some(new_perms),
2036 &mut TdxBacked::tlb_flush_lock_access(
2037 None,
2038 self.inner.as_ref(),
2039 tdx_backed_shared,
2040 ),
2041 )
2042 .map_err(|e| anyhow::anyhow!(e)),
2043 BackingShared::Hypervisor(_) => {
2044 let _ = (vtl, gpn, new_perms);
2045 unreachable!()
2046 }
2047 }
2048 }
2049
2050 fn unregister_cvm_dma_overlay_page(&self, vtl: GuestVtl, gpn: u64) -> anyhow::Result<()> {
2052 match &self.inner.backing_shared {
2054 #[cfg(guest_arch = "x86_64")]
2055 BackingShared::Snp(snp_backed_shared) => snp_backed_shared
2056 .cvm
2057 .isolated_memory_protector
2058 .unregister_overlay_page(
2059 vtl,
2060 gpn,
2061 &mut SnpBacked::tlb_flush_lock_access(
2062 None,
2063 self.inner.as_ref(),
2064 snp_backed_shared,
2065 ),
2066 )
2067 .map_err(|e| anyhow::anyhow!(e)),
2068 #[cfg(guest_arch = "x86_64")]
2069 BackingShared::Tdx(tdx_backed_shared) => tdx_backed_shared
2070 .cvm
2071 .isolated_memory_protector
2072 .unregister_overlay_page(
2073 vtl,
2074 gpn,
2075 &mut TdxBacked::tlb_flush_lock_access(
2076 None,
2077 self.inner.as_ref(),
2078 tdx_backed_shared,
2079 ),
2080 )
2081 .map_err(|e| anyhow::anyhow!(e)),
2082 BackingShared::Hypervisor(_) => {
2083 let _ = (vtl, gpn);
2084 unreachable!()
2085 }
2086 }
2087 }
2088}
2089
2090impl UhProtoPartition<'_> {
2091 fn check_guest_vsm_support(privs: HvPartitionPrivilege, hcl: &Hcl) -> Result<bool, Error> {
2094 if !privs.access_vsm() {
2095 return Ok(false);
2096 }
2097
2098 let guest_vsm_config = hcl
2099 .get_guest_vsm_partition_config()
2100 .map_err(Error::GetReg)?;
2101 Ok(guest_vsm_config.maximum_vtl() >= u8::from(GuestVtl::Vtl1))
2102 }
2103
2104 #[cfg(guest_arch = "x86_64")]
2105 fn construct_cvm_state(
2107 params: &UhPartitionNewParams<'_>,
2108 late_params: CvmLateParams,
2109 caps: &PartitionCapabilities,
2110 guest_vsm_available: bool,
2111 proxy_interrupt_redirect_available: bool,
2112 ) -> Result<UhCvmPartitionState, Error> {
2113 use vmcore::reference_time::ReferenceTimeSource;
2114
2115 let vp_count = params.topology.vp_count() as usize;
2116 let vps = (0..vp_count)
2117 .map(|vp_index| UhCvmVpInner {
2118 tlb_lock_info: VtlArray::from_fn(|_| TlbLockInfo::new(vp_count)),
2119 vtl1_enable_called: Mutex::new(false),
2120 started: AtomicBool::new(vp_index == 0),
2121 hv_start_enable_vtl_vp: VtlArray::from_fn(|_| Mutex::new(None)),
2122 proxy_redirect_interrupts: Mutex::new(HashMap::new()),
2123 })
2124 .collect();
2125 let tlb_locked_vps =
2126 VtlArray::from_fn(|_| BitVec::repeat(false, vp_count).into_boxed_bitslice());
2127
2128 let lapic = VtlArray::from_fn(|_| {
2129 LocalApicSet::builder()
2130 .x2apic_capable(caps.x2apic)
2131 .hyperv_enlightenments(true)
2132 .build()
2133 });
2134
2135 let tsc_frequency = get_tsc_frequency(params.isolation)?;
2136 let ref_time = ReferenceTimeSource::new(TscReferenceTimeSource::new(tsc_frequency));
2137
2138 let hv = GlobalHv::new(hv1_emulator::hv::GlobalHvParams {
2145 max_vp_count: params.topology.vp_count(),
2146 vendor: caps.vendor,
2147 tsc_frequency,
2148 ref_time,
2149 is_ref_time_backed_by_tsc: true,
2150 });
2151
2152 Ok(UhCvmPartitionState {
2153 vps_per_socket: params.topology.reserved_vps_per_socket(),
2154 tlb_locked_vps,
2155 vps,
2156 shared_memory: late_params.shared_gm,
2157 isolated_memory_protector: late_params.isolated_memory_protector,
2158 lapic,
2159 hv,
2160 guest_vsm: RwLock::new(GuestVsmState::from_availability(guest_vsm_available)),
2161 shared_dma_client: late_params.shared_dma_client,
2162 private_dma_client: late_params.private_dma_client,
2163 hide_isolation: params.hide_isolation,
2164 proxy_interrupt_redirect: proxy_interrupt_redirect_available,
2165 })
2166 }
2167}
2168
2169impl UhPartition {
2170 #[cfg(guest_arch = "x86_64")]
2171 fn construct_cpuid_results(
2173 cpuid: virt::CpuidLeafSet,
2174 initial_cpuid: &[CpuidLeaf],
2175 topology: &ProcessorTopology<vm_topology::processor::x86::X86Topology>,
2176 isolation: IsolationType,
2177 hide_isolation: bool,
2178 ) -> virt::CpuidLeafSet {
2179 let mut cpuid = cpuid.into_leaves();
2180 if isolation.is_hardware_isolated() {
2181 let x2apic = match topology.apic_mode() {
2183 vm_topology::processor::x86::ApicMode::XApic => false,
2184 vm_topology::processor::x86::ApicMode::X2ApicSupported => true,
2185 vm_topology::processor::x86::ApicMode::X2ApicEnabled => true,
2186 };
2187 let ecx = x86defs::cpuid::VersionAndFeaturesEcx::new().with_x2_apic(x2apic);
2188 let ecx_mask = x86defs::cpuid::VersionAndFeaturesEcx::new().with_x2_apic(true);
2189 cpuid.push(
2190 CpuidLeaf::new(
2191 x86defs::cpuid::CpuidFunction::VersionAndFeatures.0,
2192 [0, 0, ecx.into(), 0],
2193 )
2194 .masked([0, 0, ecx_mask.into(), 0]),
2195 );
2196
2197 let hv_version = safe_intrinsics::cpuid(hvdef::HV_CPUID_FUNCTION_MS_HV_VERSION, 0);
2201
2202 hv1_emulator::cpuid::process_hv_cpuid_leaves(
2204 &mut cpuid,
2205 hide_isolation,
2206 [
2207 hv_version.eax,
2208 hv_version.ebx,
2209 hv_version.ecx,
2210 hv_version.edx,
2211 ],
2212 );
2213 }
2214 cpuid.extend(initial_cpuid);
2215 virt::CpuidLeafSet::new(cpuid)
2216 }
2217
2218 #[cfg(guest_arch = "x86_64")]
2219 fn construct_capabilities(
2221 topology: &ProcessorTopology,
2222 cpuid: &virt::CpuidLeafSet,
2223 isolation: IsolationType,
2224 hide_isolation: bool,
2225 ) -> Result<virt::x86::X86PartitionCapabilities, virt::x86::X86PartitionCapabilitiesError> {
2226 let mut native_cpuid_fn;
2227 let mut cvm_cpuid_fn;
2228
2229 let cpuid_fn: &mut dyn FnMut(u32, u32) -> [u32; 4] = if isolation.is_hardware_isolated() {
2232 cvm_cpuid_fn = move |leaf, sub_leaf| cpuid.result(leaf, sub_leaf, &[0, 0, 0, 0]);
2234 &mut cvm_cpuid_fn
2235 } else {
2236 native_cpuid_fn = |leaf, sub_leaf| {
2238 let CpuidResult { eax, ebx, ecx, edx } = safe_intrinsics::cpuid(leaf, sub_leaf);
2239 cpuid.result(leaf, sub_leaf, &[eax, ebx, ecx, edx])
2240 };
2241 &mut native_cpuid_fn
2242 };
2243
2244 let mut caps = virt::x86::X86PartitionCapabilities::from_cpuid(topology, cpuid_fn)?;
2246 match isolation {
2247 IsolationType::Tdx => {
2248 assert_eq!(caps.vtom.is_some(), !hide_isolation);
2249 caps.nxe_forced_on = true;
2251 }
2252 IsolationType::Snp => {
2253 assert_eq!(caps.vtom.is_some(), !hide_isolation);
2254 }
2255 _ => {
2256 assert!(caps.vtom.is_none());
2257 }
2258 }
2259
2260 Ok(caps)
2261 }
2262}
2263
2264#[cfg(guest_arch = "x86_64")]
2265fn get_tsc_frequency(isolation: IsolationType) -> Result<u64, Error> {
2267 let msr = MsrDevice::new(0).map_err(Error::OpenMsr)?;
2270 let hv_frequency = msr
2271 .read_msr(hvdef::HV_X64_MSR_TSC_FREQUENCY)
2272 .map_err(Error::ReadTscFrequency)?;
2273
2274 let hw_info = match isolation {
2277 IsolationType::Tdx => {
2278 let max_function =
2280 safe_intrinsics::cpuid(x86defs::cpuid::CpuidFunction::VendorAndMaxFunction.0, 0)
2281 .eax;
2282
2283 if max_function < x86defs::cpuid::CpuidFunction::CoreCrystalClockInformation.0 {
2284 return Err(Error::BadCpuidTsc);
2285 }
2286 let result = safe_intrinsics::cpuid(
2287 x86defs::cpuid::CpuidFunction::CoreCrystalClockInformation.0,
2288 0,
2289 );
2290 let ratio_denom = result.eax;
2291 let ratio_num = result.ebx;
2292 let clock = result.ecx;
2293 if ratio_num == 0 || ratio_denom == 0 || clock == 0 {
2294 return Err(Error::BadCpuidTsc);
2295 }
2296 let allowed_error = 12_500_000;
2299 Some((
2300 clock as u64 * ratio_num as u64 / ratio_denom as u64,
2301 allowed_error,
2302 ))
2303 }
2304 IsolationType::Snp => {
2305 None
2307 }
2308 IsolationType::Vbs | IsolationType::None => None,
2309 };
2310
2311 if let Some((hw_frequency, allowed_error)) = hw_info {
2312 let delta = hw_frequency.abs_diff(hv_frequency);
2315 if delta > allowed_error {
2316 return Err(Error::TscFrequencyMismatch {
2317 hv: hv_frequency,
2318 hw: hw_frequency,
2319 allowed_error,
2320 });
2321 }
2322 }
2323
2324 Ok(hv_frequency)
2325}
2326
2327impl UhPartitionInner {
2328 fn manage_io_port_intercept_region(&self, begin: u16, end: u16, active: bool) {
2329 if self.isolation.is_hardware_isolated() {
2330 return;
2331 }
2332
2333 static SKIP_RANGE: AtomicBool = AtomicBool::new(false);
2334
2335 let access_type_mask = if active {
2336 HV_INTERCEPT_ACCESS_MASK_READ_WRITE
2337 } else {
2338 HV_INTERCEPT_ACCESS_MASK_NONE
2339 };
2340
2341 if !SKIP_RANGE.load(Ordering::Relaxed) {
2343 match self.hcl.register_intercept(
2344 HvInterceptType::HvInterceptTypeX64IoPortRange,
2345 access_type_mask,
2346 HvInterceptParameters::new_io_port_range(begin..=end),
2347 ) {
2348 Ok(()) => return,
2349 Err(HvError::InvalidParameter) => {
2350 SKIP_RANGE.store(true, Ordering::Relaxed);
2353 tracing::warn!(
2354 CVM_ALLOWED,
2355 "old hypervisor build; using slow path for intercept ranges"
2356 );
2357 }
2358 Err(err) => {
2359 panic!("io port range registration failure: {err:?}");
2360 }
2361 }
2362 }
2363
2364 for port in begin..=end {
2366 self.hcl
2367 .register_intercept(
2368 HvInterceptType::HvInterceptTypeX64IoPort,
2369 access_type_mask,
2370 HvInterceptParameters::new_io_port(port),
2371 )
2372 .expect("registering io intercept cannot fail");
2373 }
2374 }
2375
2376 fn is_gpa_lower_vtl_ram(&self, gpa: u64) -> bool {
2377 self.lower_vtl_memory_layout
2381 .ram()
2382 .iter()
2383 .any(|m| m.range.contains_addr(gpa))
2384 }
2385
2386 fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
2387 if self.is_gpa_lower_vtl_ram(gpa) {
2391 !write || self.monitor_page.gpa() != Some(gpa & !(HV_PAGE_SIZE - 1))
2393 } else {
2394 false
2395 }
2396 }
2397}
2398
2399#[must_use]
2404pub struct HostIoPortFastPathHandle {
2405 inner: Weak<UhPartitionInner>,
2406 begin: u16,
2407 end: u16,
2408}
2409
2410impl Drop for HostIoPortFastPathHandle {
2411 fn drop(&mut self) {
2412 if let Some(inner) = self.inner.upgrade() {
2413 inner.manage_io_port_intercept_region(self.begin, self.end, true);
2414 }
2415 }
2416}
2417
2418#[derive(Copy, Clone, Debug)]
2423pub struct VtlCrash {
2424 pub vp_index: VpIndex,
2426 pub last_vtl: GuestVtl,
2428 pub control: GuestCrashCtl,
2430 pub parameters: [u64; 5],
2432}
2433
2434#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
2437fn validate_vtl_gpa_flags(
2438 flags: HvMapGpaFlags,
2439 mbec_enabled: bool,
2440 shadow_supervisor_stack_enabled: bool,
2441) -> bool {
2442 if flags.adjustable() {
2444 return false;
2445 }
2446
2447 if flags.kernel_executable() != flags.user_executable() {
2449 if (flags.kernel_executable() && !flags.user_executable()) || !mbec_enabled {
2450 return false;
2451 }
2452 }
2453
2454 if flags.writable()
2456 || flags.kernel_executable()
2457 || flags.user_executable()
2458 || flags.supervisor_shadow_stack()
2459 || flags.paging_writability()
2460 || flags.verify_paging_writability()
2461 {
2462 if !flags.readable() {
2463 return false;
2464 }
2465 }
2466
2467 if flags.supervisor_shadow_stack()
2470 && ((!flags.kernel_executable() && !flags.user_executable())
2471 || shadow_supervisor_stack_enabled)
2472 {
2473 return false;
2474 }
2475
2476 true
2477}