hvdef/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Microsoft hypervisor definitions.
5
6#![expect(missing_docs)]
7#![forbid(unsafe_code)]
8#![no_std]
9
10use bitfield_struct::bitfield;
11use core::fmt::Debug;
12use core::mem::size_of;
13use open_enum::open_enum;
14use static_assertions::const_assert;
15use zerocopy::FromBytes;
16use zerocopy::FromZeros;
17use zerocopy::Immutable;
18use zerocopy::IntoBytes;
19use zerocopy::KnownLayout;
20
21pub const HV_PAGE_SIZE: u64 = 4096;
22pub const HV_PAGE_SIZE_USIZE: usize = 4096;
23pub const HV_PAGE_SHIFT: u64 = 12;
24
25pub const HV_PARTITION_ID_SELF: u64 = u64::MAX;
26pub const HV_VP_INDEX_SELF: u32 = 0xfffffffe;
27
28pub const HV_CPUID_FUNCTION_VERSION_AND_FEATURES: u32 = 0x00000001;
29pub const HV_CPUID_FUNCTION_HV_VENDOR_AND_MAX_FUNCTION: u32 = 0x40000000;
30pub const HV_CPUID_FUNCTION_HV_INTERFACE: u32 = 0x40000001;
31pub const HV_CPUID_FUNCTION_MS_HV_VERSION: u32 = 0x40000002;
32pub const HV_CPUID_FUNCTION_MS_HV_FEATURES: u32 = 0x40000003;
33pub const HV_CPUID_FUNCTION_MS_HV_ENLIGHTENMENT_INFORMATION: u32 = 0x40000004;
34pub const HV_CPUID_FUNCTION_MS_HV_IMPLEMENTATION_LIMITS: u32 = 0x40000005;
35pub const HV_CPUID_FUNCTION_MS_HV_HARDWARE_FEATURES: u32 = 0x40000006;
36pub const HV_CPUID_FUNCTION_MS_HV_ISOLATION_CONFIGURATION: u32 = 0x4000000C;
37
38pub const VIRTUALIZATION_STACK_CPUID_VENDOR: u32 = 0x40000080;
39pub const VIRTUALIZATION_STACK_CPUID_INTERFACE: u32 = 0x40000081;
40pub const VIRTUALIZATION_STACK_CPUID_PROPERTIES: u32 = 0x40000082;
41
42/// The result of querying the VIRTUALIZATION_STACK_CPUID_PROPERTIES leaf.
43///
44/// The current partition is considered "portable": the virtualization stack may
45/// attempt to bring up the partition on another physical machine.
46pub const VS1_PARTITION_PROPERTIES_EAX_IS_PORTABLE: u32 = 0x000000001;
47/// The current partition has a synthetic debug device available to it.
48pub const VS1_PARTITION_PROPERTIES_EAX_DEBUG_DEVICE_PRESENT: u32 = 0x000000002;
49/// Extended I/O APIC RTEs are supported for the current partition.
50pub const VS1_PARTITION_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE: u32 = 0x000000004;
51
52/// SMCCC UID for the Microsoft Hypervisor.
53pub const VENDOR_HYP_UID_MS_HYPERVISOR: [u32; 4] = [0x4d32ba58, 0xcd244764, 0x8eef6c75, 0x16597024];
54
55#[bitfield(u64)]
56pub struct HvPartitionPrivilege {
57    // access to virtual msrs
58    pub access_vp_runtime_msr: bool,
59    pub access_partition_reference_counter: bool,
60    pub access_synic_msrs: bool,
61    pub access_synthetic_timer_msrs: bool,
62    pub access_apic_msrs: bool,
63    pub access_hypercall_msrs: bool,
64    pub access_vp_index: bool,
65    pub access_reset_msr: bool,
66    pub access_stats_msr: bool,
67    pub access_partition_reference_tsc: bool,
68    pub access_guest_idle_msr: bool,
69    pub access_frequency_msrs: bool,
70    pub access_debug_msrs: bool,
71    pub access_reenlightenment_ctrls: bool,
72    pub access_root_scheduler_msr: bool,
73    pub access_tsc_invariant_controls: bool,
74    _reserved1: u16,
75
76    // Access to hypercalls
77    pub create_partitions: bool,
78    pub access_partition_id: bool,
79    pub access_memory_pool: bool,
80    pub adjust_message_buffers: bool,
81    pub post_messages: bool,
82    pub signal_events: bool,
83    pub create_port: bool,
84    pub connect_port: bool,
85    pub access_stats: bool,
86    #[bits(2)]
87    _reserved2: u64,
88    pub debugging: bool,
89    pub cpu_management: bool,
90    pub configure_profiler: bool,
91    pub access_vp_exit_tracing: bool,
92    pub enable_extended_gva_ranges_flush_va_list: bool,
93    pub access_vsm: bool,
94    pub access_vp_registers: bool,
95    _unused_bit: bool,
96    pub fast_hypercall_output: bool,
97    pub enable_extended_hypercalls: bool,
98    pub start_virtual_processor: bool,
99    pub isolation: bool,
100    #[bits(9)]
101    _reserved3: u64,
102}
103
104open_enum! {
105    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
106    pub enum HvPartitionIsolationType: u8 {
107        NONE = 0,
108        VBS = 1,
109        SNP = 2,
110        TDX = 3,
111    }
112}
113
114#[bitfield(u128)]
115#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
116pub struct HvFeatures {
117    #[bits(64)]
118    pub privileges: HvPartitionPrivilege,
119
120    #[bits(4)]
121    pub max_supported_cstate: u32,
122    pub hpet_needed_for_c3_power_state_deprecated: bool,
123    pub invariant_mperf_available: bool,
124    pub supervisor_shadow_stack_available: bool,
125    pub arch_pmu_available: bool,
126    pub exception_trap_intercept_available: bool,
127    #[bits(23)]
128    reserved: u32,
129
130    pub mwait_available_deprecated: bool,
131    pub guest_debugging_available: bool,
132    pub performance_monitors_available: bool,
133    pub cpu_dynamic_partitioning_available: bool,
134    pub xmm_registers_for_fast_hypercall_available: bool,
135    pub guest_idle_available: bool,
136    pub hypervisor_sleep_state_support_available: bool,
137    pub numa_distance_query_available: bool,
138    pub frequency_regs_available: bool,
139    pub synthetic_machine_check_available: bool,
140    pub guest_crash_regs_available: bool,
141    pub debug_regs_available: bool,
142    pub npiep1_available: bool,
143    pub disable_hypervisor_available: bool,
144    pub extended_gva_ranges_for_flush_virtual_address_list_available: bool,
145    pub fast_hypercall_output_available: bool,
146    pub svm_features_available: bool,
147    pub sint_polling_mode_available: bool,
148    pub hypercall_msr_lock_available: bool,
149    pub direct_synthetic_timers: bool,
150    pub register_pat_available: bool,
151    pub register_bndcfgs_available: bool,
152    pub watchdog_timer_available: bool,
153    pub synthetic_time_unhalted_timer_available: bool,
154    pub device_domains_available: bool,    // HDK only.
155    pub s1_device_domains_available: bool, // HDK only.
156    pub lbr_available: bool,
157    pub ipt_available: bool,
158    pub cross_vtl_flush_available: bool,
159    pub idle_spec_ctrl_available: bool,
160    pub translate_gva_flags_available: bool,
161    pub apic_eoi_intercept_available: bool,
162}
163
164impl HvFeatures {
165    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
166        zerocopy::transmute!(cpuid)
167    }
168
169    pub fn into_cpuid(self) -> [u32; 4] {
170        zerocopy::transmute!(self)
171    }
172}
173
174#[bitfield(u128)]
175pub struct HvEnlightenmentInformation {
176    pub use_hypercall_for_address_space_switch: bool,
177    pub use_hypercall_for_local_flush: bool,
178    pub use_hypercall_for_remote_flush_and_local_flush_entire: bool,
179    pub use_apic_msrs: bool,
180    pub use_hv_register_for_reset: bool,
181    pub use_relaxed_timing: bool,
182    pub use_dma_remapping_deprecated: bool,
183    pub use_interrupt_remapping_deprecated: bool,
184    pub use_x2_apic_msrs: bool,
185    pub deprecate_auto_eoi: bool,
186    pub use_synthetic_cluster_ipi: bool,
187    pub use_ex_processor_masks: bool,
188    pub nested: bool,
189    pub use_int_for_mbec_system_calls: bool,
190    pub use_vmcs_enlightenments: bool,
191    pub use_synced_timeline: bool,
192    pub core_scheduler_requested: bool,
193    pub use_direct_local_flush_entire: bool,
194    pub no_non_architectural_core_sharing: bool,
195    pub use_x2_apic: bool,
196    pub restore_time_on_resume: bool,
197    pub use_hypercall_for_mmio_access: bool,
198    pub use_gpa_pinning_hypercall: bool,
199    pub wake_vps: bool,
200    _reserved: u8,
201    pub long_spin_wait_count: u32,
202    #[bits(7)]
203    pub implemented_physical_address_bits: u32,
204    #[bits(25)]
205    _reserved1: u32,
206    _reserved2: u32,
207}
208
209#[bitfield(u128)]
210pub struct HvHardwareFeatures {
211    pub apic_overlay_assist_in_use: bool,
212    pub msr_bitmaps_in_use: bool,
213    pub architectural_performance_counters_in_use: bool,
214    pub second_level_address_translation_in_use: bool,
215    pub dma_remapping_in_use: bool,
216    pub interrupt_remapping_in_use: bool,
217    pub memory_patrol_scrubber_present: bool,
218    pub dma_protection_in_use: bool,
219    pub hpet_requested: bool,
220    pub synthetic_timers_volatile: bool,
221    #[bits(4)]
222    pub hypervisor_level: u32,
223    pub physical_destination_mode_required: bool,
224    pub use_vmfunc_for_alias_map_switch: bool,
225    pub hv_register_for_memory_zeroing_supported: bool,
226    pub unrestricted_guest_supported: bool,
227    pub rdt_afeatures_supported: bool,
228    pub rdt_mfeatures_supported: bool,
229    pub child_perfmon_pmu_supported: bool,
230    pub child_perfmon_lbr_supported: bool,
231    pub child_perfmon_ipt_supported: bool,
232    pub apic_emulation_supported: bool,
233    pub child_x2_apic_recommended: bool,
234    pub hardware_watchdog_reserved: bool,
235    pub device_access_tracking_supported: bool,
236    pub hardware_gpa_access_tracking_supported: bool,
237    #[bits(4)]
238    _reserved: u32,
239
240    pub device_domain_input_width: u8,
241    #[bits(24)]
242    _reserved1: u32,
243    _reserved2: u32,
244    _reserved3: u32,
245}
246
247#[bitfield(u128)]
248pub struct HvIsolationConfiguration {
249    pub paravisor_present: bool,
250    #[bits(31)]
251    pub _reserved0: u32,
252
253    #[bits(4)]
254    pub isolation_type: u8,
255    _reserved11: bool,
256    pub shared_gpa_boundary_active: bool,
257    #[bits(6)]
258    pub shared_gpa_boundary_bits: u8,
259    #[bits(20)]
260    _reserved12: u32,
261    _reserved2: u32,
262    _reserved3: u32,
263}
264
265open_enum! {
266    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
267    pub enum HypercallCode: u16 {
268        #![expect(non_upper_case_globals)]
269
270        HvCallSwitchVirtualAddressSpace = 0x0001,
271        HvCallFlushVirtualAddressSpace = 0x0002,
272        HvCallFlushVirtualAddressList = 0x0003,
273        HvCallNotifyLongSpinWait = 0x0008,
274        HvCallInvokeHypervisorDebugger = 0x000a,
275        HvCallSendSyntheticClusterIpi = 0x000b,
276        HvCallModifyVtlProtectionMask = 0x000c,
277        HvCallEnablePartitionVtl = 0x000d,
278        HvCallEnableVpVtl = 0x000f,
279        HvCallVtlCall = 0x0011,
280        HvCallVtlReturn = 0x0012,
281        HvCallFlushVirtualAddressSpaceEx = 0x0013,
282        HvCallFlushVirtualAddressListEx = 0x0014,
283        HvCallSendSyntheticClusterIpiEx = 0x0015,
284        HvCallInstallIntercept = 0x004d,
285        HvCallGetVpRegisters = 0x0050,
286        HvCallSetVpRegisters = 0x0051,
287        HvCallTranslateVirtualAddress = 0x0052,
288        HvCallPostMessage = 0x005C,
289        HvCallSignalEvent = 0x005D,
290        HvCallOutputDebugCharacter = 0x0071,
291        HvCallRetargetDeviceInterrupt = 0x007e,
292        HvCallNotifyPartitionEvent = 0x0087,
293        HvCallAssertVirtualInterrupt = 0x0094,
294        HvCallStartVirtualProcessor = 0x0099,
295        HvCallGetVpIndexFromApicId = 0x009A,
296        HvCallTranslateVirtualAddressEx = 0x00AC,
297        HvCallCheckForIoIntercept = 0x00ad,
298        HvCallFlushGuestPhysicalAddressSpace = 0x00AF,
299        HvCallFlushGuestPhysicalAddressList = 0x00B0,
300        HvCallSignalEventDirect = 0x00C0,
301        HvCallPostMessageDirect = 0x00C1,
302        HvCallCheckSparseGpaPageVtlAccess = 0x00D4,
303        HvCallAcceptGpaPages = 0x00D9,
304        HvCallModifySparseGpaPageHostVisibility = 0x00DB,
305        HvCallMemoryMappedIoRead = 0x0106,
306        HvCallMemoryMappedIoWrite = 0x0107,
307        HvCallPinGpaPageRanges = 0x0112,
308        HvCallUnpinGpaPageRanges = 0x0113,
309        HvCallQuerySparseGpaPageHostVisibility = 0x011C,
310
311        // Extended hypercalls.
312        HvExtCallQueryCapabilities = 0x8001,
313    }
314}
315
316pub const HV_X64_MSR_GUEST_OS_ID: u32 = 0x40000000;
317pub const HV_X64_MSR_HYPERCALL: u32 = 0x40000001;
318pub const HV_X64_MSR_VP_INDEX: u32 = 0x40000002;
319pub const HV_X64_MSR_TIME_REF_COUNT: u32 = 0x40000020;
320pub const HV_X64_MSR_REFERENCE_TSC: u32 = 0x40000021;
321pub const HV_X64_MSR_TSC_FREQUENCY: u32 = 0x40000022;
322pub const HV_X64_MSR_APIC_FREQUENCY: u32 = 0x40000023;
323pub const HV_X64_MSR_EOI: u32 = 0x40000070;
324pub const HV_X64_MSR_ICR: u32 = 0x40000071;
325pub const HV_X64_MSR_TPR: u32 = 0x40000072;
326pub const HV_X64_MSR_VP_ASSIST_PAGE: u32 = 0x40000073;
327pub const HV_X64_MSR_SCONTROL: u32 = 0x40000080;
328pub const HV_X64_MSR_SVERSION: u32 = 0x40000081;
329pub const HV_X64_MSR_SIEFP: u32 = 0x40000082;
330pub const HV_X64_MSR_SIMP: u32 = 0x40000083;
331pub const HV_X64_MSR_EOM: u32 = 0x40000084;
332pub const HV_X64_MSR_SINT0: u32 = 0x40000090;
333pub const HV_X64_MSR_SINT1: u32 = 0x40000091;
334pub const HV_X64_MSR_SINT2: u32 = 0x40000092;
335pub const HV_X64_MSR_SINT3: u32 = 0x40000093;
336pub const HV_X64_MSR_SINT4: u32 = 0x40000094;
337pub const HV_X64_MSR_SINT5: u32 = 0x40000095;
338pub const HV_X64_MSR_SINT6: u32 = 0x40000096;
339pub const HV_X64_MSR_SINT7: u32 = 0x40000097;
340pub const HV_X64_MSR_SINT8: u32 = 0x40000098;
341pub const HV_X64_MSR_SINT9: u32 = 0x40000099;
342pub const HV_X64_MSR_SINT10: u32 = 0x4000009a;
343pub const HV_X64_MSR_SINT11: u32 = 0x4000009b;
344pub const HV_X64_MSR_SINT12: u32 = 0x4000009c;
345pub const HV_X64_MSR_SINT13: u32 = 0x4000009d;
346pub const HV_X64_MSR_SINT14: u32 = 0x4000009e;
347pub const HV_X64_MSR_SINT15: u32 = 0x4000009f;
348pub const HV_X64_MSR_STIMER0_CONFIG: u32 = 0x400000b0;
349pub const HV_X64_MSR_STIMER0_COUNT: u32 = 0x400000b1;
350pub const HV_X64_MSR_STIMER1_CONFIG: u32 = 0x400000b2;
351pub const HV_X64_MSR_STIMER1_COUNT: u32 = 0x400000b3;
352pub const HV_X64_MSR_STIMER2_CONFIG: u32 = 0x400000b4;
353pub const HV_X64_MSR_STIMER2_COUNT: u32 = 0x400000b5;
354pub const HV_X64_MSR_STIMER3_CONFIG: u32 = 0x400000b6;
355pub const HV_X64_MSR_STIMER3_COUNT: u32 = 0x400000b7;
356pub const HV_X64_MSR_GUEST_IDLE: u32 = 0x400000F0;
357pub const HV_X64_MSR_GUEST_CRASH_P0: u32 = 0x40000100;
358pub const HV_X64_MSR_GUEST_CRASH_P1: u32 = 0x40000101;
359pub const HV_X64_MSR_GUEST_CRASH_P2: u32 = 0x40000102;
360pub const HV_X64_MSR_GUEST_CRASH_P3: u32 = 0x40000103;
361pub const HV_X64_MSR_GUEST_CRASH_P4: u32 = 0x40000104;
362pub const HV_X64_MSR_GUEST_CRASH_CTL: u32 = 0x40000105;
363
364pub const HV_X64_GUEST_CRASH_PARAMETER_MSRS: usize = 5;
365
366/// A hypervisor status code.
367///
368/// The non-success status codes are defined in [`HvError`].
369#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
370#[repr(transparent)]
371pub struct HvStatus(pub u16);
372
373impl HvStatus {
374    /// The success status code.
375    pub const SUCCESS: Self = Self(0);
376
377    /// Returns `Ok(())` if this is `HvStatus::SUCCESS`, otherwise returns an
378    /// `Err(err)` where `err` is the corresponding `HvError`.
379    pub fn result(self) -> HvResult<()> {
380        if let Ok(err) = self.0.try_into() {
381            Err(HvError(err))
382        } else {
383            Ok(())
384        }
385    }
386
387    /// Returns true if this is `HvStatus::SUCCESS`.
388    pub fn is_ok(self) -> bool {
389        self == Self::SUCCESS
390    }
391
392    /// Returns true if this is not `HvStatus::SUCCESS`.
393    pub fn is_err(self) -> bool {
394        self != Self::SUCCESS
395    }
396
397    const fn from_bits(bits: u16) -> Self {
398        Self(bits)
399    }
400
401    const fn into_bits(self) -> u16 {
402        self.0
403    }
404}
405
406impl From<Result<(), HvError>> for HvStatus {
407    fn from(err: Result<(), HvError>) -> Self {
408        err.err().map_or(Self::SUCCESS, |err| Self(err.0.get()))
409    }
410}
411
412impl Debug for HvStatus {
413    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
414        match self.result() {
415            Ok(()) => f.write_str("Success"),
416            Err(err) => Debug::fmt(&err, f),
417        }
418    }
419}
420
421/// An [`HvStatus`] value representing an error.
422//
423// DEVNOTE: use `NonZeroU16` to get a niche optimization, since 0 is reserved
424// for success.
425#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout)]
426#[repr(transparent)]
427pub struct HvError(core::num::NonZeroU16);
428
429impl From<core::num::NonZeroU16> for HvError {
430    fn from(err: core::num::NonZeroU16) -> Self {
431        Self(err)
432    }
433}
434
435impl Debug for HvError {
436    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
437        match self.debug_name() {
438            Some(name) => f.pad(name),
439            None => Debug::fmt(&self.0.get(), f),
440        }
441    }
442}
443
444impl core::fmt::Display for HvError {
445    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
446        match self.doc_str() {
447            Some(s) => f.write_str(s),
448            None => write!(f, "Hypervisor error {:#06x}", self.0),
449        }
450    }
451}
452
453impl core::error::Error for HvError {}
454
455macro_rules! hv_error {
456    ($ty:ty, $(#[doc = $doc:expr] $ident:ident = $val:expr),* $(,)?) => {
457
458        #[expect(non_upper_case_globals)]
459        impl $ty {
460            $(
461                #[doc = $doc]
462                pub const $ident: Self = Self(core::num::NonZeroU16::new($val).unwrap());
463            )*
464
465            fn debug_name(&self) -> Option<&'static str> {
466                Some(match self.0.get() {
467                    $(
468                        $val => stringify!($ident),
469                    )*
470                    _ => return None,
471                })
472            }
473
474            fn doc_str(&self) -> Option<&'static str> {
475                Some(match self.0.get() {
476                    $(
477                        $val => const { $doc.trim_ascii() },
478                    )*
479                    _ => return None,
480                })
481            }
482        }
483    };
484}
485
486// DEVNOTE: the doc comments here are also used as the runtime error strings.
487hv_error! {
488    HvError,
489    /// Invalid hypercall code
490    InvalidHypercallCode = 0x0002,
491    /// Invalid hypercall input
492    InvalidHypercallInput = 0x0003,
493    /// Invalid alignment
494    InvalidAlignment = 0x0004,
495    /// Invalid parameter
496    InvalidParameter = 0x0005,
497    /// Access denied
498    AccessDenied = 0x0006,
499    /// Invalid partition state
500    InvalidPartitionState = 0x0007,
501    /// Operation denied
502    OperationDenied = 0x0008,
503    /// Unknown property
504    UnknownProperty = 0x0009,
505    /// Property value out of range
506    PropertyValueOutOfRange = 0x000A,
507    /// Insufficient memory
508    InsufficientMemory = 0x000B,
509    /// Partition too deep
510    PartitionTooDeep = 0x000C,
511    /// Invalid partition ID
512    InvalidPartitionId = 0x000D,
513    /// Invalid VP index
514    InvalidVpIndex = 0x000E,
515    /// Not found
516    NotFound = 0x0010,
517    /// Invalid port ID
518    InvalidPortId = 0x0011,
519    /// Invalid connection ID
520    InvalidConnectionId = 0x0012,
521    /// Insufficient buffers
522    InsufficientBuffers = 0x0013,
523    /// Not acknowledged
524    NotAcknowledged = 0x0014,
525    /// Invalid VP state
526    InvalidVpState = 0x0015,
527    /// Acknowledged
528    Acknowledged = 0x0016,
529    /// Invalid save restore state
530    InvalidSaveRestoreState = 0x0017,
531    /// Invalid SynIC state
532    InvalidSynicState = 0x0018,
533    /// Object in use
534    ObjectInUse = 0x0019,
535    /// Invalid proximity domain info
536    InvalidProximityDomainInfo = 0x001A,
537    /// No data
538    NoData = 0x001B,
539    /// Inactive
540    Inactive = 0x001C,
541    /// No resources
542    NoResources = 0x001D,
543    /// Feature unavailable
544    FeatureUnavailable = 0x001E,
545    /// Partial packet
546    PartialPacket = 0x001F,
547    /// Processor feature not supported
548    ProcessorFeatureNotSupported = 0x0020,
549    /// Processor cache line flush size incompatible
550    ProcessorCacheLineFlushSizeIncompatible = 0x0030,
551    /// Insufficient buffer
552    InsufficientBuffer = 0x0033,
553    /// Incompatible processor
554    IncompatibleProcessor = 0x0037,
555    /// Insufficient device domains
556    InsufficientDeviceDomains = 0x0038,
557    /// CPUID feature validation error
558    CpuidFeatureValidationError = 0x003C,
559    /// CPUID XSAVE feature validation error
560    CpuidXsaveFeatureValidationError = 0x003D,
561    /// Processor startup timeout
562    ProcessorStartupTimeout = 0x003E,
563    /// SMX enabled
564    SmxEnabled = 0x003F,
565    /// Invalid LP index
566    InvalidLpIndex = 0x0041,
567    /// Invalid register value
568    InvalidRegisterValue = 0x0050,
569    /// Invalid VTL state
570    InvalidVtlState = 0x0051,
571    /// NX not detected
572    NxNotDetected = 0x0055,
573    /// Invalid device ID
574    InvalidDeviceId = 0x0057,
575    /// Invalid device state
576    InvalidDeviceState = 0x0058,
577    /// Pending page requests
578    PendingPageRequests = 0x0059,
579    /// Page request invalid
580    PageRequestInvalid = 0x0060,
581    /// Key already exists
582    KeyAlreadyExists = 0x0065,
583    /// Device already in domain
584    DeviceAlreadyInDomain = 0x0066,
585    /// Invalid CPU group ID
586    InvalidCpuGroupId = 0x006F,
587    /// Invalid CPU group state
588    InvalidCpuGroupState = 0x0070,
589    /// Operation failed
590    OperationFailed = 0x0071,
591    /// Not allowed with nested virtualization active
592    NotAllowedWithNestedVirtActive = 0x0072,
593    /// Insufficient root memory
594    InsufficientRootMemory = 0x0073,
595    /// Event buffer already freed
596    EventBufferAlreadyFreed = 0x0074,
597    /// The specified timeout expired before the operation completed.
598    Timeout = 0x0078,
599    /// The VTL specified for the operation is already in an enabled state.
600    VtlAlreadyEnabled = 0x0086,
601    /// Unknown register name
602    UnknownRegisterName = 0x0087,
603}
604
605/// A useful result type for hypervisor operations.
606pub type HvResult<T> = Result<T, HvError>;
607
608#[repr(u8)]
609#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
610pub enum Vtl {
611    Vtl0 = 0,
612    Vtl1 = 1,
613    Vtl2 = 2,
614}
615
616impl TryFrom<u8> for Vtl {
617    type Error = HvError;
618
619    fn try_from(value: u8) -> Result<Self, Self::Error> {
620        Ok(match value {
621            0 => Self::Vtl0,
622            1 => Self::Vtl1,
623            2 => Self::Vtl2,
624            _ => return Err(HvError::InvalidParameter),
625        })
626    }
627}
628
629impl From<Vtl> for u8 {
630    fn from(value: Vtl) -> Self {
631        value as u8
632    }
633}
634
635/// The contents of `HV_X64_MSR_GUEST_CRASH_CTL`
636#[bitfield(u64)]
637pub struct GuestCrashCtl {
638    #[bits(58)]
639    _reserved: u64,
640    // ID of the pre-OS environment
641    #[bits(3)]
642    pub pre_os_id: u8,
643    // Crash dump will not be captured
644    #[bits(1)]
645    pub no_crash_dump: bool,
646    // `HV_X64_MSR_GUEST_CRASH_P3` is the GPA of the message,
647    // `HV_X64_MSR_GUEST_CRASH_P4` is its length in bytes
648    #[bits(1)]
649    pub crash_message: bool,
650    // Log contents of crash parameter system registers
651    #[bits(1)]
652    pub crash_notify: bool,
653}
654
655#[repr(C, align(16))]
656#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
657pub struct AlignedU128([u8; 16]);
658
659impl AlignedU128 {
660    pub fn as_ne_bytes(&self) -> [u8; 16] {
661        self.0
662    }
663
664    pub fn from_ne_bytes(val: [u8; 16]) -> Self {
665        Self(val)
666    }
667}
668
669impl Debug for AlignedU128 {
670    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
671        Debug::fmt(&u128::from_ne_bytes(self.0), f)
672    }
673}
674
675impl From<u128> for AlignedU128 {
676    fn from(v: u128) -> Self {
677        Self(v.to_ne_bytes())
678    }
679}
680
681impl From<u64> for AlignedU128 {
682    fn from(v: u64) -> Self {
683        (v as u128).into()
684    }
685}
686
687impl From<u32> for AlignedU128 {
688    fn from(v: u32) -> Self {
689        (v as u128).into()
690    }
691}
692
693impl From<u16> for AlignedU128 {
694    fn from(v: u16) -> Self {
695        (v as u128).into()
696    }
697}
698
699impl From<u8> for AlignedU128 {
700    fn from(v: u8) -> Self {
701        (v as u128).into()
702    }
703}
704
705impl From<AlignedU128> for u128 {
706    fn from(v: AlignedU128) -> Self {
707        u128::from_ne_bytes(v.0)
708    }
709}
710
711open_enum! {
712    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
713    pub enum HvMessageType: u32 {
714        #![expect(non_upper_case_globals)]
715
716        HvMessageTypeNone = 0x00000000,
717
718        HvMessageTypeUnmappedGpa = 0x80000000,
719        HvMessageTypeGpaIntercept = 0x80000001,
720        HvMessageTypeUnacceptedGpa = 0x80000003,
721        HvMessageTypeGpaAttributeIntercept = 0x80000004,
722        HvMessageTypeEnablePartitionVtlIntercept = 0x80000005,
723        HvMessageTypeTimerExpired = 0x80000010,
724        HvMessageTypeInvalidVpRegisterValue = 0x80000020,
725        HvMessageTypeUnrecoverableException = 0x80000021,
726        HvMessageTypeUnsupportedFeature = 0x80000022,
727        HvMessageTypeTlbPageSizeMismatch = 0x80000023,
728        HvMessageTypeIommuFault = 0x80000024,
729        HvMessageTypeEventLogBufferComplete = 0x80000040,
730        HvMessageTypeHypercallIntercept = 0x80000050,
731        HvMessageTypeSynicEventIntercept = 0x80000060,
732        HvMessageTypeSynicSintIntercept = 0x80000061,
733        HvMessageTypeSynicSintDeliverable = 0x80000062,
734        HvMessageTypeAsyncCallCompletion = 0x80000070,
735        HvMessageTypeX64IoPortIntercept = 0x80010000,
736        HvMessageTypeMsrIntercept = 0x80010001,
737        HvMessageTypeX64CpuidIntercept = 0x80010002,
738        HvMessageTypeExceptionIntercept = 0x80010003,
739        HvMessageTypeX64ApicEoi = 0x80010004,
740        HvMessageTypeX64IommuPrq = 0x80010005,
741        HvMessageTypeRegisterIntercept = 0x80010006,
742        HvMessageTypeX64Halt = 0x80010007,
743        HvMessageTypeX64InterruptionDeliverable = 0x80010008,
744        HvMessageTypeX64SipiIntercept = 0x80010009,
745        HvMessageTypeX64RdtscIntercept = 0x8001000a,
746        HvMessageTypeX64ApicSmiIntercept = 0x8001000b,
747        HvMessageTypeArm64ResetIntercept = 0x8001000c,
748        HvMessageTypeX64ApicInitSipiIntercept = 0x8001000d,
749        HvMessageTypeX64ApicWriteIntercept = 0x8001000e,
750        HvMessageTypeX64ProxyInterruptIntercept = 0x8001000f,
751        HvMessageTypeX64IsolationCtrlRegIntercept = 0x80010010,
752        HvMessageTypeX64SnpGuestRequestIntercept = 0x80010011,
753        HvMessageTypeX64ExceptionTrapIntercept = 0x80010012,
754        HvMessageTypeX64SevVmgexitIntercept = 0x80010013,
755    }
756}
757
758impl Default for HvMessageType {
759    fn default() -> Self {
760        HvMessageType::HvMessageTypeNone
761    }
762}
763
764pub const HV_SYNIC_INTERCEPTION_SINT_INDEX: u8 = 0;
765
766pub const NUM_SINTS: usize = 16;
767pub const NUM_TIMERS: usize = 4;
768
769#[repr(C)]
770#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
771pub struct HvMessageHeader {
772    pub typ: HvMessageType,
773    pub len: u8,
774    pub flags: HvMessageFlags,
775    pub rsvd: u16,
776    pub id: u64,
777}
778
779#[bitfield(u8)]
780#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
781pub struct HvMessageFlags {
782    pub message_pending: bool,
783    #[bits(7)]
784    _reserved: u8,
785}
786
787pub const HV_MESSAGE_SIZE: usize = size_of::<HvMessage>();
788const_assert!(HV_MESSAGE_SIZE == 256);
789pub const HV_MESSAGE_PAYLOAD_SIZE: usize = 240;
790
791#[repr(C, align(16))]
792#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
793pub struct HvMessage {
794    pub header: HvMessageHeader,
795    pub payload_buffer: [u8; HV_MESSAGE_PAYLOAD_SIZE],
796}
797
798impl Default for HvMessage {
799    fn default() -> Self {
800        Self {
801            header: FromZeros::new_zeroed(),
802            payload_buffer: [0; 240],
803        }
804    }
805}
806
807impl HvMessage {
808    /// Constructs a new message. `payload` must fit into the payload field (240
809    /// bytes limit).
810    pub fn new(typ: HvMessageType, id: u64, payload: &[u8]) -> Self {
811        let mut msg = HvMessage {
812            header: HvMessageHeader {
813                typ,
814                len: payload.len() as u8,
815                flags: HvMessageFlags::new(),
816                rsvd: 0,
817                id,
818            },
819            payload_buffer: [0; 240],
820        };
821        msg.payload_buffer[..payload.len()].copy_from_slice(payload);
822        msg
823    }
824
825    pub fn payload(&self) -> &[u8] {
826        &self.payload_buffer[..self.header.len as usize]
827    }
828
829    pub fn as_message<T: MessagePayload>(&self) -> &T {
830        // Ensure invariants are met.
831        let () = T::CHECK;
832        T::ref_from_prefix(&self.payload_buffer).unwrap().0
833    }
834
835    pub fn as_message_mut<T: MessagePayload>(&mut self) -> &T {
836        // Ensure invariants are met.
837        let () = T::CHECK;
838        T::mut_from_prefix(&mut self.payload_buffer).unwrap().0
839    }
840}
841
842pub trait MessagePayload: KnownLayout + Immutable + IntoBytes + FromBytes + Sized {
843    /// Used to ensure this trait is only implemented on messages of the proper
844    /// size and alignment.
845    #[doc(hidden)]
846    const CHECK: () = {
847        assert!(size_of::<Self>() <= HV_MESSAGE_PAYLOAD_SIZE);
848        assert!(align_of::<Self>() <= align_of::<HvMessage>());
849    };
850}
851
852#[repr(C)]
853#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
854pub struct TimerMessagePayload {
855    pub timer_index: u32,
856    pub reserved: u32,
857    pub expiration_time: u64,
858    pub delivery_time: u64,
859}
860
861pub mod hypercall {
862    use super::*;
863    use core::ops::RangeInclusive;
864    use zerocopy::Unalign;
865
866    /// The hypercall input value.
867    #[bitfield(u64)]
868    pub struct Control {
869        /// The hypercall code.
870        pub code: u16,
871        /// If this hypercall is a fast hypercall.
872        pub fast: bool,
873        /// The variable header size, in qwords.
874        #[bits(10)]
875        pub variable_header_size: usize,
876        #[bits(4)]
877        _rsvd0: u8,
878        /// Specifies that the hypercall should be handled by the L0 hypervisor in a nested environment.
879        pub nested: bool,
880        /// The element count for rep hypercalls.
881        #[bits(12)]
882        pub rep_count: usize,
883        #[bits(4)]
884        _rsvd1: u8,
885        /// The first element to start processing in a rep hypercall.
886        #[bits(12)]
887        pub rep_start: usize,
888        #[bits(4)]
889        _rsvd2: u8,
890    }
891
892    /// The hypercall output value returned to the guest.
893    #[bitfield(u64)]
894    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
895    #[must_use]
896    pub struct HypercallOutput {
897        #[bits(16)]
898        pub call_status: HvStatus,
899        pub rsvd: u16,
900        #[bits(12)]
901        pub elements_processed: usize,
902        #[bits(20)]
903        pub rsvd2: u32,
904    }
905
906    impl From<HvError> for HypercallOutput {
907        fn from(e: HvError) -> Self {
908            Self::new().with_call_status(Err(e).into())
909        }
910    }
911
912    impl HypercallOutput {
913        /// A success output with zero elements processed.
914        pub const SUCCESS: Self = Self::new();
915
916        pub fn result(&self) -> Result<(), HvError> {
917            self.call_status().result()
918        }
919    }
920
921    #[repr(C)]
922    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
923    pub struct HvRegisterAssoc {
924        pub name: HvRegisterName,
925        pub pad: [u32; 3],
926        pub value: HvRegisterValue,
927    }
928
929    impl<N: Into<HvRegisterName>, T: Into<HvRegisterValue>> From<(N, T)> for HvRegisterAssoc {
930        fn from((name, value): (N, T)) -> Self {
931            Self {
932                name: name.into(),
933                pad: [0; 3],
934                value: value.into(),
935            }
936        }
937    }
938
939    impl<N: Copy + Into<HvRegisterName>, T: Copy + Into<HvRegisterValue>> From<&(N, T)>
940        for HvRegisterAssoc
941    {
942        fn from(&(name, value): &(N, T)) -> Self {
943            Self {
944                name: name.into(),
945                pad: [0; 3],
946                value: value.into(),
947            }
948        }
949    }
950
951    #[bitfield(u64)]
952    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
953    pub struct MsrHypercallContents {
954        pub enable: bool,
955        pub locked: bool,
956        #[bits(10)]
957        pub reserved_p: u64,
958        #[bits(52)]
959        pub gpn: u64,
960    }
961
962    #[repr(C, align(8))]
963    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
964    pub struct PostMessage {
965        pub connection_id: u32,
966        pub padding: u32,
967        pub message_type: u32,
968        pub payload_size: u32,
969        pub payload: [u8; 240],
970    }
971
972    #[repr(C, align(8))]
973    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
974    pub struct SignalEvent {
975        pub connection_id: u32,
976        pub flag_number: u16,
977        pub rsvd: u16,
978    }
979
980    #[repr(C)]
981    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
982    pub struct PostMessageDirect {
983        pub partition_id: u64,
984        pub vp_index: u32,
985        pub vtl: u8,
986        pub padding0: [u8; 3],
987        pub sint: u8,
988        pub padding1: [u8; 3],
989        pub message: Unalign<HvMessage>,
990        pub padding2: u32,
991    }
992
993    #[repr(C)]
994    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
995    pub struct SignalEventDirect {
996        pub target_partition: u64,
997        pub target_vp: u32,
998        pub target_vtl: u8,
999        pub target_sint: u8,
1000        pub flag_number: u16,
1001    }
1002
1003    #[repr(C)]
1004    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1005    pub struct SignalEventDirectOutput {
1006        pub newly_signaled: u8,
1007        pub rsvd: [u8; 7],
1008    }
1009
1010    #[repr(C)]
1011    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1012    pub struct InterruptEntry {
1013        pub source: HvInterruptSource,
1014        pub rsvd: u32,
1015        pub data: [u32; 2],
1016    }
1017
1018    open_enum! {
1019        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1020        pub enum HvInterruptSource: u32 {
1021            MSI = 1,
1022            IO_APIC = 2,
1023        }
1024    }
1025
1026    #[repr(C)]
1027    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1028    pub struct InterruptTarget {
1029        pub vector: u32,
1030        pub flags: HvInterruptTargetFlags,
1031        pub mask_or_format: u64,
1032    }
1033
1034    #[bitfield(u32)]
1035    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1036    pub struct HvInterruptTargetFlags {
1037        pub multicast: bool,
1038        pub processor_set: bool,
1039        #[bits(30)]
1040        pub reserved: u32,
1041    }
1042
1043    pub const HV_DEVICE_INTERRUPT_TARGET_MULTICAST: u32 = 1;
1044    pub const HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET: u32 = 2;
1045
1046    pub const HV_GENERIC_SET_SPARSE_4K: u64 = 0;
1047    pub const HV_GENERIC_SET_ALL: u64 = 1;
1048
1049    #[repr(C)]
1050    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1051    pub struct RetargetDeviceInterrupt {
1052        pub partition_id: u64,
1053        pub device_id: u64,
1054        pub entry: InterruptEntry,
1055        pub rsvd: u64,
1056        pub target_header: InterruptTarget,
1057    }
1058
1059    #[bitfield(u8)]
1060    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1061    pub struct HvInputVtl {
1062        #[bits(4)]
1063        pub target_vtl_value: u8,
1064        pub use_target_vtl: bool,
1065        #[bits(3)]
1066        pub reserved: u8,
1067    }
1068
1069    impl From<Vtl> for HvInputVtl {
1070        fn from(value: Vtl) -> Self {
1071            Self::from(Some(value))
1072        }
1073    }
1074
1075    impl From<Option<Vtl>> for HvInputVtl {
1076        fn from(value: Option<Vtl>) -> Self {
1077            Self::new()
1078                .with_use_target_vtl(value.is_some())
1079                .with_target_vtl_value(value.map_or(0, Into::into))
1080        }
1081    }
1082
1083    impl HvInputVtl {
1084        /// None = target current vtl
1085        pub fn target_vtl(&self) -> Result<Option<Vtl>, HvError> {
1086            if self.reserved() != 0 {
1087                return Err(HvError::InvalidParameter);
1088            }
1089            if self.use_target_vtl() {
1090                Ok(Some(self.target_vtl_value().try_into()?))
1091            } else {
1092                Ok(None)
1093            }
1094        }
1095
1096        pub const CURRENT_VTL: Self = Self::new();
1097    }
1098
1099    #[repr(C)]
1100    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1101    pub struct GetSetVpRegisters {
1102        pub partition_id: u64,
1103        pub vp_index: u32,
1104        pub target_vtl: HvInputVtl,
1105        pub rsvd: [u8; 3],
1106    }
1107
1108    open_enum::open_enum! {
1109        #[derive(Default)]
1110        pub enum HvGuestOsMicrosoftIds: u8 {
1111            UNDEFINED = 0x00,
1112            MSDOS = 0x01,
1113            WINDOWS_3X = 0x02,
1114            WINDOWS_9X = 0x03,
1115            WINDOWS_NT = 0x04,
1116            WINDOWS_CE = 0x05,
1117        }
1118    }
1119
1120    #[bitfield(u64)]
1121    pub struct HvGuestOsMicrosoft {
1122        #[bits(40)]
1123        _rsvd: u64,
1124        #[bits(8)]
1125        pub os_id: u8,
1126        // The top bit must be zero and the least significant 15 bits holds the value of the vendor id.
1127        #[bits(16)]
1128        pub vendor_id: u16,
1129    }
1130
1131    open_enum::open_enum! {
1132        #[derive(Default)]
1133        pub enum HvGuestOsOpenSourceType: u8 {
1134            UNDEFINED = 0x00,
1135            LINUX = 0x01,
1136            FREEBSD = 0x02,
1137            XEN = 0x03,
1138            ILLUMOS = 0x04,
1139        }
1140    }
1141
1142    #[bitfield(u64)]
1143    pub struct HvGuestOsOpenSource {
1144        #[bits(16)]
1145        pub build_no: u16,
1146        #[bits(32)]
1147        pub version: u32,
1148        #[bits(8)]
1149        pub os_id: u8,
1150        #[bits(7)]
1151        pub os_type: u8,
1152        #[bits(1)]
1153        pub is_open_source: bool,
1154    }
1155
1156    #[bitfield(u64)]
1157    pub struct HvGuestOsId {
1158        #[bits(63)]
1159        _rsvd: u64,
1160        is_open_source: bool,
1161    }
1162
1163    impl HvGuestOsId {
1164        pub fn microsoft(&self) -> Option<HvGuestOsMicrosoft> {
1165            (!self.is_open_source()).then(|| HvGuestOsMicrosoft::from(u64::from(*self)))
1166        }
1167
1168        pub fn open_source(&self) -> Option<HvGuestOsOpenSource> {
1169            (self.is_open_source()).then(|| HvGuestOsOpenSource::from(u64::from(*self)))
1170        }
1171
1172        pub fn as_u64(&self) -> u64 {
1173            self.0
1174        }
1175    }
1176
1177    pub const HV_INTERCEPT_ACCESS_MASK_NONE: u32 = 0x00;
1178    pub const HV_INTERCEPT_ACCESS_MASK_READ: u32 = 0x01;
1179    pub const HV_INTERCEPT_ACCESS_MASK_WRITE: u32 = 0x02;
1180    pub const HV_INTERCEPT_ACCESS_MASK_READ_WRITE: u32 =
1181        HV_INTERCEPT_ACCESS_MASK_READ | HV_INTERCEPT_ACCESS_MASK_WRITE;
1182    pub const HV_INTERCEPT_ACCESS_MASK_EXECUTE: u32 = 0x04;
1183
1184    open_enum::open_enum! {
1185        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1186        pub enum HvInterceptType: u32 {
1187            #![expect(non_upper_case_globals)]
1188            HvInterceptTypeX64IoPort = 0x00000000,
1189            HvInterceptTypeX64Msr = 0x00000001,
1190            HvInterceptTypeX64Cpuid = 0x00000002,
1191            HvInterceptTypeException = 0x00000003,
1192            HvInterceptTypeHypercall = 0x00000008,
1193            HvInterceptTypeUnknownSynicConnection = 0x0000000D,
1194            HvInterceptTypeX64ApicEoi = 0x0000000E,
1195            HvInterceptTypeRetargetInterruptWithUnknownDeviceId = 0x0000000F,
1196            HvInterceptTypeX64IoPortRange = 0x00000011,
1197        }
1198    }
1199
1200    #[repr(transparent)]
1201    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1202    pub struct HvInterceptParameters(u64);
1203
1204    impl HvInterceptParameters {
1205        pub fn new_io_port(port: u16) -> Self {
1206            Self(port as u64)
1207        }
1208
1209        pub fn new_io_port_range(ports: RangeInclusive<u16>) -> Self {
1210            let base = *ports.start() as u64;
1211            let end = *ports.end() as u64;
1212            Self(base | (end << 16))
1213        }
1214
1215        pub fn new_exception(vector: u16) -> Self {
1216            Self(vector as u64)
1217        }
1218
1219        pub fn io_port(&self) -> u16 {
1220            self.0 as u16
1221        }
1222
1223        pub fn io_port_range(&self) -> RangeInclusive<u16> {
1224            let base = self.0 as u16;
1225            let end = (self.0 >> 16) as u16;
1226            base..=end
1227        }
1228
1229        pub fn cpuid_index(&self) -> u32 {
1230            self.0 as u32
1231        }
1232
1233        pub fn exception(&self) -> u16 {
1234            self.0 as u16
1235        }
1236    }
1237
1238    #[repr(C)]
1239    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1240    pub struct InstallIntercept {
1241        pub partition_id: u64,
1242        pub access_type_mask: u32,
1243        pub intercept_type: HvInterceptType,
1244        pub intercept_parameters: HvInterceptParameters,
1245    }
1246
1247    #[repr(C)]
1248    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1249    pub struct AssertVirtualInterrupt {
1250        pub partition_id: u64,
1251        pub interrupt_control: HvInterruptControl,
1252        pub destination_address: u64,
1253        pub requested_vector: u32,
1254        pub target_vtl: u8,
1255        pub rsvd0: u8,
1256        pub rsvd1: u16,
1257    }
1258
1259    #[repr(C)]
1260    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1261    pub struct StartVirtualProcessorX64 {
1262        pub partition_id: u64,
1263        pub vp_index: u32,
1264        pub target_vtl: u8,
1265        pub rsvd0: u8,
1266        pub rsvd1: u16,
1267        pub vp_context: InitialVpContextX64,
1268    }
1269
1270    #[repr(C)]
1271    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1272    pub struct InitialVpContextX64 {
1273        pub rip: u64,
1274        pub rsp: u64,
1275        pub rflags: u64,
1276        pub cs: HvX64SegmentRegister,
1277        pub ds: HvX64SegmentRegister,
1278        pub es: HvX64SegmentRegister,
1279        pub fs: HvX64SegmentRegister,
1280        pub gs: HvX64SegmentRegister,
1281        pub ss: HvX64SegmentRegister,
1282        pub tr: HvX64SegmentRegister,
1283        pub ldtr: HvX64SegmentRegister,
1284        pub idtr: HvX64TableRegister,
1285        pub gdtr: HvX64TableRegister,
1286        pub efer: u64,
1287        pub cr0: u64,
1288        pub cr3: u64,
1289        pub cr4: u64,
1290        pub msr_cr_pat: u64,
1291    }
1292
1293    #[repr(C)]
1294    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1295    pub struct StartVirtualProcessorArm64 {
1296        pub partition_id: u64,
1297        pub vp_index: u32,
1298        pub target_vtl: u8,
1299        pub rsvd0: u8,
1300        pub rsvd1: u16,
1301        pub vp_context: InitialVpContextArm64,
1302    }
1303
1304    #[repr(C)]
1305    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1306    pub struct InitialVpContextArm64 {
1307        pub pc: u64,
1308        pub sp_elh: u64,
1309        pub sctlr_el1: u64,
1310        pub mair_el1: u64,
1311        pub tcr_el1: u64,
1312        pub vbar_el1: u64,
1313        pub ttbr0_el1: u64,
1314        pub ttbr1_el1: u64,
1315        pub x18: u64,
1316    }
1317
1318    impl InitialVpContextX64 {
1319        pub fn as_hv_register_assocs(&self) -> impl Iterator<Item = HvRegisterAssoc> + '_ {
1320            let regs = [
1321                (HvX64RegisterName::Rip, HvRegisterValue::from(self.rip)).into(),
1322                (HvX64RegisterName::Rsp, HvRegisterValue::from(self.rsp)).into(),
1323                (
1324                    HvX64RegisterName::Rflags,
1325                    HvRegisterValue::from(self.rflags),
1326                )
1327                    .into(),
1328                (HvX64RegisterName::Cs, HvRegisterValue::from(self.cs)).into(),
1329                (HvX64RegisterName::Ds, HvRegisterValue::from(self.ds)).into(),
1330                (HvX64RegisterName::Es, HvRegisterValue::from(self.es)).into(),
1331                (HvX64RegisterName::Fs, HvRegisterValue::from(self.fs)).into(),
1332                (HvX64RegisterName::Gs, HvRegisterValue::from(self.gs)).into(),
1333                (HvX64RegisterName::Ss, HvRegisterValue::from(self.ss)).into(),
1334                (HvX64RegisterName::Tr, HvRegisterValue::from(self.tr)).into(),
1335                (HvX64RegisterName::Ldtr, HvRegisterValue::from(self.ldtr)).into(),
1336                (HvX64RegisterName::Idtr, HvRegisterValue::from(self.idtr)).into(),
1337                (HvX64RegisterName::Gdtr, HvRegisterValue::from(self.gdtr)).into(),
1338                (HvX64RegisterName::Efer, HvRegisterValue::from(self.efer)).into(),
1339                (HvX64RegisterName::Cr0, HvRegisterValue::from(self.cr0)).into(),
1340                (HvX64RegisterName::Cr3, HvRegisterValue::from(self.cr3)).into(),
1341                (HvX64RegisterName::Cr4, HvRegisterValue::from(self.cr4)).into(),
1342                (
1343                    HvX64RegisterName::Pat,
1344                    HvRegisterValue::from(self.msr_cr_pat),
1345                )
1346                    .into(),
1347            ];
1348            regs.into_iter()
1349        }
1350    }
1351
1352    #[bitfield(u64)]
1353    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1354    pub struct TranslateGvaControlFlagsX64 {
1355        /// Request data read access
1356        pub validate_read: bool,
1357        /// Request data write access
1358        pub validate_write: bool,
1359        /// Request instruction fetch access.
1360        pub validate_execute: bool,
1361        /// Don't enforce any checks related to access mode (supervisor vs. user; SMEP and SMAP are treated
1362        /// as disabled).
1363        pub privilege_exempt: bool,
1364        /// Set the appropriate page table bits (i.e. access/dirty bit)
1365        pub set_page_table_bits: bool,
1366        /// Lock the TLB
1367        pub tlb_flush_inhibit: bool,
1368        /// Treat the access as a supervisor mode access irrespective of current mode.
1369        pub supervisor_access: bool,
1370        /// Treat the access as a user mode access irrespective of current mode.
1371        pub user_access: bool,
1372        /// Enforce the SMAP restriction on supervisor data access to user mode addresses if CR4.SMAP=1
1373        /// irrespective of current EFLAGS.AC i.e. the behavior for "implicit supervisor-mode accesses"
1374        /// (e.g. to the GDT, etc.) and when EFLAGS.AC=0. Does nothing if CR4.SMAP=0.
1375        pub enforce_smap: bool,
1376        /// Don't enforce the SMAP restriction on supervisor data access to user mode addresses irrespective
1377        /// of current EFLAGS.AC i.e. the behavior when EFLAGS.AC=1.
1378        pub override_smap: bool,
1379        /// Treat the access as a shadow stack access.
1380        pub shadow_stack: bool,
1381        #[bits(45)]
1382        _unused: u64,
1383        /// Target vtl
1384        input_vtl_value: u8,
1385    }
1386
1387    impl TranslateGvaControlFlagsX64 {
1388        pub fn input_vtl(&self) -> HvInputVtl {
1389            self.input_vtl_value().into()
1390        }
1391
1392        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1393            self.with_input_vtl_value(input_vtl.into())
1394        }
1395
1396        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1397            self.set_input_vtl_value(input_vtl.into())
1398        }
1399    }
1400
1401    #[bitfield(u64)]
1402    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1403    pub struct TranslateGvaControlFlagsArm64 {
1404        /// Request data read access
1405        pub validate_read: bool,
1406        /// Request data write access
1407        pub validate_write: bool,
1408        /// Request instruction fetch access.
1409        pub validate_execute: bool,
1410        _reserved0: bool,
1411        /// Set the appropriate page table bits (i.e. access/dirty bit)
1412        pub set_page_table_bits: bool,
1413        /// Lock the TLB
1414        pub tlb_flush_inhibit: bool,
1415        /// Treat the access as a supervisor mode access irrespective of current mode.
1416        pub supervisor_access: bool,
1417        /// Treat the access as a user mode access irrespective of current mode.
1418        pub user_access: bool,
1419        /// Restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN i.e.
1420        /// the behavior when PSTATE.PAN=1.
1421        pub pan_set: bool,
1422        /// Don't restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN
1423        /// i.e. the behavior when PSTATE.PAN=0.
1424        pub pan_clear: bool,
1425        #[bits(46)]
1426        _unused: u64,
1427        /// Target vtl
1428        #[bits(8)]
1429        input_vtl_value: u8,
1430    }
1431
1432    impl TranslateGvaControlFlagsArm64 {
1433        pub fn input_vtl(&self) -> HvInputVtl {
1434            self.input_vtl_value().into()
1435        }
1436
1437        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1438            self.with_input_vtl_value(input_vtl.into())
1439        }
1440
1441        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1442            self.set_input_vtl_value(input_vtl.into())
1443        }
1444    }
1445
1446    #[repr(C)]
1447    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1448    pub struct TranslateVirtualAddressX64 {
1449        pub partition_id: u64,
1450        pub vp_index: u32,
1451        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1452        pub reserved: u32,
1453        pub control_flags: TranslateGvaControlFlagsX64,
1454        pub gva_page: u64,
1455    }
1456
1457    #[repr(C)]
1458    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1459    pub struct TranslateVirtualAddressArm64 {
1460        pub partition_id: u64,
1461        pub vp_index: u32,
1462        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1463        pub reserved: u32,
1464        pub control_flags: TranslateGvaControlFlagsArm64,
1465        pub gva_page: u64,
1466    }
1467
1468    open_enum::open_enum! {
1469        pub enum TranslateGvaResultCode: u32 {
1470            SUCCESS = 0,
1471
1472            // Translation Failures
1473            PAGE_NOT_PRESENT = 1,
1474            PRIVILEGE_VIOLATION = 2,
1475            INVALID_PAGE_TABLE_FLAGS = 3,
1476
1477            // GPA access failures
1478            GPA_UNMAPPED = 4,
1479            GPA_NO_READ_ACCESS = 5,
1480            GPA_NO_WRITE_ACCESS = 6,
1481            GPA_ILLEGAL_OVERLAY_ACCESS = 7,
1482
1483            /// Intercept of the memory access by either
1484            /// - a higher VTL
1485            /// - a nested hypervisor (due to a violation of the nested page table)
1486            INTERCEPT = 8,
1487
1488            GPA_UNACCEPTED = 9,
1489        }
1490    }
1491
1492    #[bitfield(u64)]
1493    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1494    pub struct TranslateGvaResult {
1495        pub result_code: u32,
1496        pub cache_type: u8,
1497        pub overlay_page: bool,
1498        #[bits(23)]
1499        pub reserved: u32,
1500    }
1501
1502    #[repr(C)]
1503    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1504    pub struct TranslateVirtualAddressOutput {
1505        pub translation_result: TranslateGvaResult,
1506        pub gpa_page: u64,
1507    }
1508
1509    #[repr(C)]
1510    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1511    pub struct TranslateGvaResultExX64 {
1512        pub result: TranslateGvaResult,
1513        pub reserved: u64,
1514        pub event_info: HvX64PendingEvent,
1515    }
1516
1517    const_assert!(size_of::<TranslateGvaResultExX64>() == 0x30);
1518
1519    #[repr(C)]
1520    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1521    pub struct TranslateGvaResultExArm64 {
1522        pub result: TranslateGvaResult,
1523    }
1524
1525    const_assert!(size_of::<TranslateGvaResultExArm64>() == 0x8);
1526
1527    #[repr(C)]
1528    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1529    pub struct TranslateVirtualAddressExOutputX64 {
1530        pub translation_result: TranslateGvaResultExX64,
1531        pub gpa_page: u64,
1532        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1533        pub reserved: u64,
1534    }
1535
1536    const_assert!(size_of::<TranslateVirtualAddressExOutputX64>() == 0x40);
1537
1538    #[repr(C)]
1539    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1540    pub struct TranslateVirtualAddressExOutputArm64 {
1541        pub translation_result: TranslateGvaResultExArm64,
1542        pub gpa_page: u64,
1543    }
1544
1545    const_assert!(size_of::<TranslateVirtualAddressExOutputArm64>() == 0x10);
1546
1547    #[repr(C)]
1548    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1549    pub struct GetVpIndexFromApicId {
1550        pub partition_id: u64,
1551        pub target_vtl: u8,
1552        pub reserved: [u8; 7],
1553    }
1554
1555    #[repr(C)]
1556    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1557    pub struct EnableVpVtlX64 {
1558        pub partition_id: u64,
1559        pub vp_index: u32,
1560        pub target_vtl: u8,
1561        pub reserved: [u8; 3],
1562        pub vp_vtl_context: InitialVpContextX64,
1563    }
1564
1565    #[repr(C)]
1566    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1567    pub struct EnableVpVtlArm64 {
1568        pub partition_id: u64,
1569        pub vp_index: u32,
1570        pub target_vtl: u8,
1571        pub reserved: [u8; 3],
1572        pub vp_vtl_context: InitialVpContextArm64,
1573    }
1574
1575    #[repr(C)]
1576    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1577    pub struct ModifyVtlProtectionMask {
1578        pub partition_id: u64,
1579        pub map_flags: HvMapGpaFlags,
1580        pub target_vtl: HvInputVtl,
1581        pub reserved: [u8; 3],
1582    }
1583
1584    #[repr(C)]
1585    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1586    pub struct CheckSparseGpaPageVtlAccess {
1587        pub partition_id: u64,
1588        pub target_vtl: HvInputVtl,
1589        pub desired_access: u8,
1590        pub reserved0: u16,
1591        pub reserved1: u32,
1592    }
1593    const_assert!(size_of::<CheckSparseGpaPageVtlAccess>() == 0x10);
1594
1595    #[bitfield(u64)]
1596    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1597    pub struct CheckSparseGpaPageVtlAccessOutput {
1598        pub result_code: u8,
1599        pub denied_access: u8,
1600        #[bits(4)]
1601        pub intercepting_vtl: u32,
1602        #[bits(12)]
1603        _reserved0: u32,
1604        _reserved1: u32,
1605    }
1606    const_assert!(size_of::<CheckSparseGpaPageVtlAccessOutput>() == 0x8);
1607
1608    open_enum::open_enum! {
1609        pub enum CheckGpaPageVtlAccessResultCode: u32 {
1610            SUCCESS = 0,
1611            MEMORY_INTERCEPT = 1,
1612        }
1613    }
1614
1615    /// The number of VTLs for which permissions can be specified in a VTL permission set.
1616    pub const HV_VTL_PERMISSION_SET_SIZE: usize = 2;
1617
1618    #[repr(C)]
1619    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1620    pub struct VtlPermissionSet {
1621        /// VTL permissions for the GPA page, starting from VTL 1.
1622        pub vtl_permission_from_1: [u16; HV_VTL_PERMISSION_SET_SIZE],
1623    }
1624
1625    open_enum::open_enum! {
1626        pub enum AcceptMemoryType: u32 {
1627            ANY = 0,
1628            RAM = 1,
1629        }
1630    }
1631
1632    open_enum! {
1633        /// Host visibility used in hypercall inputs.
1634        ///
1635        /// NOTE: While this is a 2 bit set with the lower bit representing host
1636        /// read access and upper bit representing host write access, hardware
1637        /// platforms do not support that form of isolation. Only support
1638        /// private or full shared in this definition.
1639        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1640        pub enum HostVisibilityType: u8 {
1641            PRIVATE = 0,
1642            SHARED = 3,
1643        }
1644    }
1645
1646    // Used by bitfield-struct implicitly.
1647    impl HostVisibilityType {
1648        const fn from_bits(value: u8) -> Self {
1649            Self(value)
1650        }
1651
1652        const fn into_bits(value: Self) -> u8 {
1653            value.0
1654        }
1655    }
1656
1657    /// Attributes for accepting pages. See [`AcceptGpaPages`]
1658    #[bitfield(u32)]
1659    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1660    pub struct AcceptPagesAttributes {
1661        #[bits(6)]
1662        /// Supplies the expected memory type [`AcceptMemoryType`].
1663        pub memory_type: u32,
1664        #[bits(2)]
1665        /// Supplies the initial host visibility (exclusive, shared read-only, shared read-write).
1666        pub host_visibility: HostVisibilityType,
1667        #[bits(3)]
1668        /// Supplies the set of VTLs for which initial VTL permissions will be set.
1669        pub vtl_set: u32,
1670        #[bits(21)]
1671        _reserved: u32,
1672    }
1673
1674    #[repr(C)]
1675    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1676    pub struct AcceptGpaPages {
1677        /// Supplies the partition ID of the partition this request is for.
1678        pub partition_id: u64,
1679        /// Supplies attributes of the pages being accepted, such as whether
1680        /// they should be made host visible.
1681        pub page_attributes: AcceptPagesAttributes,
1682        /// Supplies the set of initial VTL permissions.
1683        pub vtl_permission_set: VtlPermissionSet,
1684        /// Supplies the GPA page number of the first page to modify.
1685        pub gpa_page_base: u64,
1686    }
1687    const_assert!(size_of::<AcceptGpaPages>() == 0x18);
1688
1689    /// Attributes for unaccepting pages. See [`UnacceptGpaPages`]
1690    #[bitfield(u32)]
1691    pub struct UnacceptPagesAttributes {
1692        #[bits(3)]
1693        pub vtl_set: u32,
1694        #[bits(29)]
1695        _reserved: u32,
1696    }
1697
1698    #[repr(C)]
1699    pub struct UnacceptGpaPages {
1700        /// Supplies the partition ID of the partition this request is for.
1701        pub partition_id: u64,
1702        /// Supplies the set of VTLs for which VTL permissions will be checked.
1703        pub page_attributes: UnacceptPagesAttributes,
1704        ///  Supplies the set of VTL permissions to check against.
1705        pub vtl_permission_set: VtlPermissionSet,
1706        /// Supplies the GPA page number of the first page to modify.
1707        pub gpa_page_base: u64,
1708    }
1709    const_assert!(size_of::<UnacceptGpaPages>() == 0x18);
1710
1711    #[bitfield(u32)]
1712    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1713    pub struct ModifyHostVisibility {
1714        #[bits(2)]
1715        pub host_visibility: HostVisibilityType,
1716        #[bits(30)]
1717        _reserved: u32,
1718    }
1719
1720    #[repr(C)]
1721    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1722    pub struct ModifySparsePageVisibility {
1723        pub partition_id: u64,
1724        pub host_visibility: ModifyHostVisibility,
1725        pub reserved: u32,
1726    }
1727
1728    #[repr(C)]
1729    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1730    pub struct QuerySparsePageVisibility {
1731        pub partition_id: u64,
1732    }
1733
1734    #[bitfield(u8)]
1735    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1736    pub struct EnablePartitionVtlFlags {
1737        pub enable_mbec: bool,
1738        pub enable_supervisor_shadow_stack: bool,
1739        pub enable_hardware_hvpt: bool,
1740        #[bits(5)]
1741        pub reserved: u8,
1742    }
1743
1744    #[repr(C)]
1745    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1746    pub struct EnablePartitionVtl {
1747        pub partition_id: u64,
1748        pub target_vtl: u8,
1749        pub flags: EnablePartitionVtlFlags,
1750        pub reserved_z0: u16,
1751        pub reserved_z1: u32,
1752    }
1753
1754    #[repr(C)]
1755    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1756    pub struct FlushVirtualAddressSpace {
1757        pub address_space: u64,
1758        pub flags: HvFlushFlags,
1759        pub processor_mask: u64,
1760    }
1761
1762    #[repr(C)]
1763    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1764    pub struct FlushVirtualAddressSpaceEx {
1765        pub address_space: u64,
1766        pub flags: HvFlushFlags,
1767        pub vp_set_format: u64,
1768        pub vp_set_valid_banks_mask: u64,
1769        // Followed by the variable-sized part of an HvVpSet
1770    }
1771
1772    #[repr(C)]
1773    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1774    pub struct PinUnpinGpaPageRangesHeader {
1775        pub reserved: u64,
1776    }
1777
1778    #[repr(C)]
1779    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1780    pub struct SendSyntheticClusterIpi {
1781        pub vector: u32,
1782        pub target_vtl: HvInputVtl,
1783        pub flags: u8,
1784        pub reserved: u16,
1785        pub processor_mask: u64,
1786    }
1787
1788    #[repr(C)]
1789    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1790    pub struct SendSyntheticClusterIpiEx {
1791        pub vector: u32,
1792        pub target_vtl: HvInputVtl,
1793        pub flags: u8,
1794        pub reserved: u16,
1795        pub vp_set_format: u64,
1796        pub vp_set_valid_banks_mask: u64,
1797        // Followed by the variable-sized part of an HvVpSet
1798    }
1799
1800    #[bitfield(u64)]
1801    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1802    pub struct HvFlushFlags {
1803        pub all_processors: bool,
1804        pub all_virtual_address_spaces: bool,
1805        pub non_global_mappings_only: bool,
1806        pub use_extended_range_format: bool,
1807        pub use_target_vtl: bool,
1808
1809        #[bits(3)]
1810        _reserved: u8,
1811
1812        pub target_vtl0: bool,
1813        pub target_vtl1: bool,
1814
1815        #[bits(54)]
1816        _reserved2: u64,
1817    }
1818
1819    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1820    #[repr(transparent)]
1821    pub struct HvGvaRange(pub u64);
1822
1823    impl From<u64> for HvGvaRange {
1824        fn from(value: u64) -> Self {
1825            Self(value)
1826        }
1827    }
1828
1829    impl From<HvGvaRange> for u64 {
1830        fn from(value: HvGvaRange) -> Self {
1831            value.0
1832        }
1833    }
1834
1835    impl HvGvaRange {
1836        pub fn as_simple(self) -> HvGvaRangeSimple {
1837            HvGvaRangeSimple(self.0)
1838        }
1839
1840        pub fn as_extended(self) -> HvGvaRangeExtended {
1841            HvGvaRangeExtended(self.0)
1842        }
1843
1844        pub fn as_extended_large_page(self) -> HvGvaRangeExtendedLargePage {
1845            HvGvaRangeExtendedLargePage(self.0)
1846        }
1847    }
1848
1849    #[bitfield(u64)]
1850    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1851    pub struct HvGvaRangeSimple {
1852        /// The number of pages beyond one.
1853        #[bits(12)]
1854        pub additional_pages: u64,
1855        /// The top 52 most significant bits of the guest virtual address.
1856        #[bits(52)]
1857        pub gva_page_number: u64,
1858    }
1859
1860    #[bitfield(u64)]
1861    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1862    pub struct HvGvaRangeExtended {
1863        /// The number of pages beyond one.
1864        #[bits(11)]
1865        pub additional_pages: u64,
1866        /// Is page size greater than 4 KB.
1867        pub large_page: bool,
1868        /// The top 52 most significant bits of the guest virtual address when `large_page`` is clear.
1869        #[bits(52)]
1870        pub gva_page_number: u64,
1871    }
1872
1873    #[bitfield(u64)]
1874    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1875    pub struct HvGvaRangeExtendedLargePage {
1876        /// The number of pages beyond one.
1877        #[bits(11)]
1878        pub additional_pages: u64,
1879        /// Is page size greater than 4 KB.
1880        pub large_page: bool,
1881        /// The page size when `large_page`` is set.
1882        /// false: 2 MB
1883        /// true: 1 GB
1884        pub page_size: bool,
1885        #[bits(8)]
1886        _reserved: u64,
1887        /// The top 43 most significant bits of the guest virtual address when `large_page`` is set.
1888        #[bits(43)]
1889        pub gva_large_page_number: u64,
1890    }
1891
1892    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1893    #[repr(transparent)]
1894    pub struct HvGpaRange(pub u64);
1895
1896    impl HvGpaRange {
1897        pub fn as_simple(self) -> HvGpaRangeSimple {
1898            HvGpaRangeSimple(self.0)
1899        }
1900
1901        pub fn as_extended(self) -> HvGpaRangeExtended {
1902            HvGpaRangeExtended(self.0)
1903        }
1904
1905        pub fn as_extended_large_page(self) -> HvGpaRangeExtendedLargePage {
1906            HvGpaRangeExtendedLargePage(self.0)
1907        }
1908    }
1909
1910    #[bitfield(u64)]
1911    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1912    pub struct HvGpaRangeSimple {
1913        /// The number of pages beyond one.
1914        #[bits(12)]
1915        pub additional_pages: u64,
1916        /// The top 52 most significant bits of the guest physical address.
1917        #[bits(52)]
1918        pub gpa_page_number: u64,
1919    }
1920
1921    #[bitfield(u64)]
1922    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1923    pub struct HvGpaRangeExtended {
1924        /// The number of pages beyond one.
1925        #[bits(11)]
1926        pub additional_pages: u64,
1927        /// Is page size greater than 4 KB.
1928        pub large_page: bool,
1929        /// The top 52 most significant bits of the guest physical address when `large_page`` is clear.
1930        #[bits(52)]
1931        pub gpa_page_number: u64,
1932    }
1933
1934    #[bitfield(u64)]
1935    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1936    pub struct HvGpaRangeExtendedLargePage {
1937        /// The number of pages beyond one.
1938        #[bits(11)]
1939        pub additional_pages: u64,
1940        /// Is page size greater than 4 KB.
1941        pub large_page: bool,
1942        /// The page size when `large_page`` is set.
1943        /// false: 2 MB
1944        /// true: 1 GB
1945        pub page_size: bool,
1946        #[bits(8)]
1947        _reserved: u64,
1948        /// The top 43 most significant bits of the guest physical address when `large_page`` is set.
1949        #[bits(43)]
1950        pub gpa_large_page_number: u64,
1951    }
1952
1953    pub const HV_HYPERCALL_MMIO_MAX_DATA_LENGTH: usize = 64;
1954
1955    #[repr(C)]
1956    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1957    pub struct MemoryMappedIoRead {
1958        pub gpa: u64,
1959        pub access_width: u32,
1960        pub reserved_z0: u32,
1961    }
1962
1963    #[repr(C)]
1964    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1965    pub struct MemoryMappedIoReadOutput {
1966        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
1967    }
1968
1969    #[repr(C)]
1970    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1971    pub struct MemoryMappedIoWrite {
1972        pub gpa: u64,
1973        pub access_width: u32,
1974        pub reserved_z0: u32,
1975        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
1976    }
1977}
1978
1979macro_rules! registers {
1980    ($name:ident {
1981        $(
1982            $(#[$vattr:meta])*
1983            $variant:ident = $value:expr
1984        ),*
1985        $(,)?
1986    }) => {
1987        open_enum! {
1988    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1989            pub enum $name: u32 {
1990        #![expect(non_upper_case_globals)]
1991                $($variant = $value,)*
1992                InstructionEmulationHints = 0x00000002,
1993                InternalActivityState = 0x00000004,
1994
1995        // Guest Crash Registers
1996                GuestCrashP0  = 0x00000210,
1997                GuestCrashP1  = 0x00000211,
1998                GuestCrashP2  = 0x00000212,
1999                GuestCrashP3  = 0x00000213,
2000                GuestCrashP4  = 0x00000214,
2001                GuestCrashCtl = 0x00000215,
2002
2003                PendingInterruption = 0x00010002,
2004                InterruptState = 0x00010003,
2005                PendingEvent0 = 0x00010004,
2006                PendingEvent1 = 0x00010005,
2007                DeliverabilityNotifications = 0x00010006,
2008
2009                GicrBaseGpa = 0x00063000,
2010
2011                VpRuntime = 0x00090000,
2012                GuestOsId = 0x00090002,
2013                VpIndex = 0x00090003,
2014                TimeRefCount = 0x00090004,
2015                CpuManagementVersion = 0x00090007,
2016                VpAssistPage = 0x00090013,
2017                VpRootSignalCount = 0x00090014,
2018                ReferenceTsc = 0x00090017,
2019                VpConfig = 0x00090018,
2020                Ghcb = 0x00090019,
2021                ReferenceTscSequence = 0x0009001A,
2022                GuestSchedulerEvent = 0x0009001B,
2023
2024                Sint0 = 0x000A0000,
2025                Sint1 = 0x000A0001,
2026                Sint2 = 0x000A0002,
2027                Sint3 = 0x000A0003,
2028                Sint4 = 0x000A0004,
2029                Sint5 = 0x000A0005,
2030                Sint6 = 0x000A0006,
2031                Sint7 = 0x000A0007,
2032                Sint8 = 0x000A0008,
2033                Sint9 = 0x000A0009,
2034                Sint10 = 0x000A000A,
2035                Sint11 = 0x000A000B,
2036                Sint12 = 0x000A000C,
2037                Sint13 = 0x000A000D,
2038                Sint14 = 0x000A000E,
2039                Sint15 = 0x000A000F,
2040                Scontrol = 0x000A0010,
2041                Sversion = 0x000A0011,
2042                Sifp = 0x000A0012,
2043                Sipp = 0x000A0013,
2044                Eom = 0x000A0014,
2045                Sirbp = 0x000A0015,
2046
2047                Stimer0Config = 0x000B0000,
2048                Stimer0Count = 0x000B0001,
2049                Stimer1Config = 0x000B0002,
2050                Stimer1Count = 0x000B0003,
2051                Stimer2Config = 0x000B0004,
2052                Stimer2Count = 0x000B0005,
2053                Stimer3Config = 0x000B0006,
2054                Stimer3Count = 0x000B0007,
2055                StimeUnhaltedTimerConfig = 0x000B0100,
2056                StimeUnhaltedTimerCount = 0x000B0101,
2057
2058                VsmCodePageOffsets = 0x000D0002,
2059                VsmVpStatus = 0x000D0003,
2060                VsmPartitionStatus = 0x000D0004,
2061                VsmVina = 0x000D0005,
2062                VsmCapabilities = 0x000D0006,
2063                VsmPartitionConfig = 0x000D0007,
2064                GuestVsmPartitionConfig = 0x000D0008,
2065                VsmVpSecureConfigVtl0 = 0x000D0010,
2066                VsmVpSecureConfigVtl1 = 0x000D0011,
2067                VsmVpSecureConfigVtl2 = 0x000D0012,
2068                VsmVpSecureConfigVtl3 = 0x000D0013,
2069                VsmVpSecureConfigVtl4 = 0x000D0014,
2070                VsmVpSecureConfigVtl5 = 0x000D0015,
2071                VsmVpSecureConfigVtl6 = 0x000D0016,
2072                VsmVpSecureConfigVtl7 = 0x000D0017,
2073                VsmVpSecureConfigVtl8 = 0x000D0018,
2074                VsmVpSecureConfigVtl9 = 0x000D0019,
2075                VsmVpSecureConfigVtl10 = 0x000D001A,
2076                VsmVpSecureConfigVtl11 = 0x000D001B,
2077                VsmVpSecureConfigVtl12 = 0x000D001C,
2078                VsmVpSecureConfigVtl13 = 0x000D001D,
2079                VsmVpSecureConfigVtl14 = 0x000D001E,
2080                VsmVpWaitForTlbLock = 0x000D0020,
2081            }
2082        }
2083
2084        impl From<HvRegisterName> for $name {
2085            fn from(name: HvRegisterName) -> Self {
2086                Self(name.0)
2087            }
2088        }
2089
2090        impl From<$name> for HvRegisterName {
2091            fn from(name: $name) -> Self {
2092                Self(name.0)
2093            }
2094        }
2095    };
2096}
2097
2098/// A hypervisor register for any architecture.
2099///
2100/// This exists only to pass registers through layers where the architecture
2101/// type has been lost. In general, you should use the arch-specific registers.
2102#[repr(C)]
2103#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2104pub struct HvRegisterName(pub u32);
2105
2106registers! {
2107    // Typed enum for registers that are shared across architectures.
2108    HvAllArchRegisterName {}
2109}
2110
2111impl From<HvAllArchRegisterName> for HvX64RegisterName {
2112    fn from(name: HvAllArchRegisterName) -> Self {
2113        Self(name.0)
2114    }
2115}
2116
2117impl From<HvAllArchRegisterName> for HvArm64RegisterName {
2118    fn from(name: HvAllArchRegisterName) -> Self {
2119        Self(name.0)
2120    }
2121}
2122
2123registers! {
2124    HvX64RegisterName {
2125        // X64 User-Mode Registers
2126        Rax = 0x00020000,
2127        Rcx = 0x00020001,
2128        Rdx = 0x00020002,
2129        Rbx = 0x00020003,
2130        Rsp = 0x00020004,
2131        Rbp = 0x00020005,
2132        Rsi = 0x00020006,
2133        Rdi = 0x00020007,
2134        R8 = 0x00020008,
2135        R9 = 0x00020009,
2136        R10 = 0x0002000a,
2137        R11 = 0x0002000b,
2138        R12 = 0x0002000c,
2139        R13 = 0x0002000d,
2140        R14 = 0x0002000e,
2141        R15 = 0x0002000f,
2142        Rip = 0x00020010,
2143        Rflags = 0x00020011,
2144
2145        // X64 Floating Point and Vector Registers
2146        Xmm0 = 0x00030000,
2147        Xmm1 = 0x00030001,
2148        Xmm2 = 0x00030002,
2149        Xmm3 = 0x00030003,
2150        Xmm4 = 0x00030004,
2151        Xmm5 = 0x00030005,
2152        Xmm6 = 0x00030006,
2153        Xmm7 = 0x00030007,
2154        Xmm8 = 0x00030008,
2155        Xmm9 = 0x00030009,
2156        Xmm10 = 0x0003000A,
2157        Xmm11 = 0x0003000B,
2158        Xmm12 = 0x0003000C,
2159        Xmm13 = 0x0003000D,
2160        Xmm14 = 0x0003000E,
2161        Xmm15 = 0x0003000F,
2162        FpMmx0 = 0x00030010,
2163        FpMmx1 = 0x00030011,
2164        FpMmx2 = 0x00030012,
2165        FpMmx3 = 0x00030013,
2166        FpMmx4 = 0x00030014,
2167        FpMmx5 = 0x00030015,
2168        FpMmx6 = 0x00030016,
2169        FpMmx7 = 0x00030017,
2170        FpControlStatus = 0x00030018,
2171        XmmControlStatus = 0x00030019,
2172
2173        // X64 Control Registers
2174        Cr0 = 0x00040000,
2175        Cr2 = 0x00040001,
2176        Cr3 = 0x00040002,
2177        Cr4 = 0x00040003,
2178        Cr8 = 0x00040004,
2179        Xfem = 0x00040005,
2180        // X64 Intermediate Control Registers
2181        IntermediateCr0 = 0x00041000,
2182        IntermediateCr3 = 0x00041002,
2183        IntermediateCr4 = 0x00041003,
2184        IntermediateCr8 = 0x00041004,
2185        // X64 Debug Registers
2186        Dr0 = 0x00050000,
2187        Dr1 = 0x00050001,
2188        Dr2 = 0x00050002,
2189        Dr3 = 0x00050003,
2190        Dr6 = 0x00050004,
2191        Dr7 = 0x00050005,
2192        // X64 Segment Registers
2193        Es = 0x00060000,
2194        Cs = 0x00060001,
2195        Ss = 0x00060002,
2196        Ds = 0x00060003,
2197        Fs = 0x00060004,
2198        Gs = 0x00060005,
2199        Ldtr = 0x00060006,
2200        Tr = 0x00060007,
2201        // X64 Table Registers
2202        Idtr = 0x00070000,
2203        Gdtr = 0x00070001,
2204        // X64 Virtualized MSRs
2205        Tsc = 0x00080000,
2206        Efer = 0x00080001,
2207        KernelGsBase = 0x00080002,
2208        ApicBase = 0x00080003,
2209        Pat = 0x00080004,
2210        SysenterCs = 0x00080005,
2211        SysenterEip = 0x00080006,
2212        SysenterEsp = 0x00080007,
2213        Star = 0x00080008,
2214        Lstar = 0x00080009,
2215        Cstar = 0x0008000a,
2216        Sfmask = 0x0008000b,
2217        InitialApicId = 0x0008000c,
2218        // X64 Cache control MSRs
2219        MsrMtrrCap = 0x0008000d,
2220        MsrMtrrDefType = 0x0008000e,
2221        MsrMtrrPhysBase0 = 0x00080010,
2222        MsrMtrrPhysBase1 = 0x00080011,
2223        MsrMtrrPhysBase2 = 0x00080012,
2224        MsrMtrrPhysBase3 = 0x00080013,
2225        MsrMtrrPhysBase4 = 0x00080014,
2226        MsrMtrrPhysBase5 = 0x00080015,
2227        MsrMtrrPhysBase6 = 0x00080016,
2228        MsrMtrrPhysBase7 = 0x00080017,
2229        MsrMtrrPhysBase8 = 0x00080018,
2230        MsrMtrrPhysBase9 = 0x00080019,
2231        MsrMtrrPhysBaseA = 0x0008001a,
2232        MsrMtrrPhysBaseB = 0x0008001b,
2233        MsrMtrrPhysBaseC = 0x0008001c,
2234        MsrMtrrPhysBaseD = 0x0008001d,
2235        MsrMtrrPhysBaseE = 0x0008001e,
2236        MsrMtrrPhysBaseF = 0x0008001f,
2237        MsrMtrrPhysMask0 = 0x00080040,
2238        MsrMtrrPhysMask1 = 0x00080041,
2239        MsrMtrrPhysMask2 = 0x00080042,
2240        MsrMtrrPhysMask3 = 0x00080043,
2241        MsrMtrrPhysMask4 = 0x00080044,
2242        MsrMtrrPhysMask5 = 0x00080045,
2243        MsrMtrrPhysMask6 = 0x00080046,
2244        MsrMtrrPhysMask7 = 0x00080047,
2245        MsrMtrrPhysMask8 = 0x00080048,
2246        MsrMtrrPhysMask9 = 0x00080049,
2247        MsrMtrrPhysMaskA = 0x0008004a,
2248        MsrMtrrPhysMaskB = 0x0008004b,
2249        MsrMtrrPhysMaskC = 0x0008004c,
2250        MsrMtrrPhysMaskD = 0x0008004d,
2251        MsrMtrrPhysMaskE = 0x0008004e,
2252        MsrMtrrPhysMaskF = 0x0008004f,
2253        MsrMtrrFix64k00000 = 0x00080070,
2254        MsrMtrrFix16k80000 = 0x00080071,
2255        MsrMtrrFix16kA0000 = 0x00080072,
2256        MsrMtrrFix4kC0000 = 0x00080073,
2257        MsrMtrrFix4kC8000 = 0x00080074,
2258        MsrMtrrFix4kD0000 = 0x00080075,
2259        MsrMtrrFix4kD8000 = 0x00080076,
2260        MsrMtrrFix4kE0000 = 0x00080077,
2261        MsrMtrrFix4kE8000 = 0x00080078,
2262        MsrMtrrFix4kF0000 = 0x00080079,
2263        MsrMtrrFix4kF8000 = 0x0008007a,
2264
2265        TscAux = 0x0008007B,
2266        Bndcfgs = 0x0008007C,
2267        DebugCtl = 0x0008007D,
2268        MCount = 0x0008007E,
2269        ACount = 0x0008007F,
2270
2271        SgxLaunchControl0 = 0x00080080,
2272        SgxLaunchControl1 = 0x00080081,
2273        SgxLaunchControl2 = 0x00080082,
2274        SgxLaunchControl3 = 0x00080083,
2275        SpecCtrl = 0x00080084,
2276        PredCmd = 0x00080085,
2277        VirtSpecCtrl = 0x00080086,
2278        TscVirtualOffset = 0x00080087,
2279        TsxCtrl = 0x00080088,
2280        MsrMcUpdatePatchLevel = 0x00080089,
2281        Available1 = 0x0008008A,
2282        Xss = 0x0008008B,
2283        UCet = 0x0008008C,
2284        SCet = 0x0008008D,
2285        Ssp = 0x0008008E,
2286        Pl0Ssp = 0x0008008F,
2287        Pl1Ssp = 0x00080090,
2288        Pl2Ssp = 0x00080091,
2289        Pl3Ssp = 0x00080092,
2290        InterruptSspTableAddr = 0x00080093,
2291        TscVirtualMultiplier = 0x00080094,
2292        TscDeadline = 0x00080095,
2293        TscAdjust = 0x00080096,
2294        Pasid = 0x00080097,
2295        UmwaitControl = 0x00080098,
2296        Xfd = 0x00080099,
2297        XfdErr = 0x0008009A,
2298
2299        Hypercall = 0x00090001,
2300        RegisterPage = 0x0009001C,
2301
2302        // Partition Timer Assist Registers
2303        EmulatedTimerPeriod = 0x00090030,
2304        EmulatedTimerControl = 0x00090031,
2305        PmTimerAssist = 0x00090032,
2306
2307        // AMD SEV configuration MSRs
2308        SevControl = 0x00090040,
2309
2310        CrInterceptControl = 0x000E0000,
2311        CrInterceptCr0Mask = 0x000E0001,
2312        CrInterceptCr4Mask = 0x000E0002,
2313        CrInterceptIa32MiscEnableMask = 0x000E0003,
2314    }
2315}
2316
2317registers! {
2318    HvArm64RegisterName {
2319        HypervisorVersion = 0x00000100,
2320        PrivilegesAndFeaturesInfo = 0x00000200,
2321        FeaturesInfo = 0x00000201,
2322        ImplementationLimitsInfo = 0x00000202,
2323        HardwareFeaturesInfo = 0x00000203,
2324        CpuManagementFeaturesInfo = 0x00000204,
2325        PasidFeaturesInfo = 0x00000205,
2326        SkipLevelFeaturesInfo = 0x00000206,
2327        NestedVirtFeaturesInfo = 0x00000207,
2328        IptFeaturesInfo = 0x00000208,
2329        IsolationConfiguration = 0x00000209,
2330
2331        X0 = 0x00020000,
2332        X1 = 0x00020001,
2333        X2 = 0x00020002,
2334        X3 = 0x00020003,
2335        X4 = 0x00020004,
2336        X5 = 0x00020005,
2337        X6 = 0x00020006,
2338        X7 = 0x00020007,
2339        X8 = 0x00020008,
2340        X9 = 0x00020009,
2341        X10 = 0x0002000A,
2342        X11 = 0x0002000B,
2343        X12 = 0x0002000C,
2344        X13 = 0x0002000D,
2345        X14 = 0x0002000E,
2346        X15 = 0x0002000F,
2347        X16 = 0x00020010,
2348        X17 = 0x00020011,
2349        X18 = 0x00020012,
2350        X19 = 0x00020013,
2351        X20 = 0x00020014,
2352        X21 = 0x00020015,
2353        X22 = 0x00020016,
2354        X23 = 0x00020017,
2355        X24 = 0x00020018,
2356        X25 = 0x00020019,
2357        X26 = 0x0002001A,
2358        X27 = 0x0002001B,
2359        X28 = 0x0002001C,
2360        XFp = 0x0002001D,
2361        XLr = 0x0002001E,
2362        XSp = 0x0002001F, // alias for either El0/x depending on Cpsr.SPSel
2363        XSpEl0 = 0x00020020,
2364        XSpElx = 0x00020021,
2365        XPc = 0x00020022,
2366        Cpsr = 0x00020023,
2367        SpsrEl2 = 0x00021002,
2368
2369        SctlrEl1 = 0x00040002,
2370        Ttbr0El1 = 0x00040005,
2371        Ttbr1El1 = 0x00040006,
2372        TcrEl1 = 0x00040007,
2373        EsrEl1 = 0x00040008,
2374        FarEl1 = 0x00040009,
2375        MairEl1 = 0x0004000b,
2376        VbarEl1 = 0x0004000c,
2377        ElrEl1 = 0x00040015,
2378    }
2379}
2380
2381#[repr(C)]
2382#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2383pub struct HvRegisterValue(pub AlignedU128);
2384
2385impl HvRegisterValue {
2386    pub fn as_u128(&self) -> u128 {
2387        self.0.into()
2388    }
2389
2390    pub fn as_u64(&self) -> u64 {
2391        self.as_u128() as u64
2392    }
2393
2394    pub fn as_u32(&self) -> u32 {
2395        self.as_u128() as u32
2396    }
2397
2398    pub fn as_u16(&self) -> u16 {
2399        self.as_u128() as u16
2400    }
2401
2402    pub fn as_u8(&self) -> u8 {
2403        self.as_u128() as u8
2404    }
2405
2406    pub fn as_table(&self) -> HvX64TableRegister {
2407        HvX64TableRegister::read_from_prefix(self.as_bytes())
2408            .unwrap()
2409            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2410    }
2411
2412    pub fn as_segment(&self) -> HvX64SegmentRegister {
2413        HvX64SegmentRegister::read_from_prefix(self.as_bytes())
2414            .unwrap()
2415            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2416    }
2417}
2418
2419impl From<u8> for HvRegisterValue {
2420    fn from(val: u8) -> Self {
2421        (val as u128).into()
2422    }
2423}
2424
2425impl From<u16> for HvRegisterValue {
2426    fn from(val: u16) -> Self {
2427        (val as u128).into()
2428    }
2429}
2430
2431impl From<u32> for HvRegisterValue {
2432    fn from(val: u32) -> Self {
2433        (val as u128).into()
2434    }
2435}
2436
2437impl From<u64> for HvRegisterValue {
2438    fn from(val: u64) -> Self {
2439        (val as u128).into()
2440    }
2441}
2442
2443impl From<u128> for HvRegisterValue {
2444    fn from(val: u128) -> Self {
2445        Self(val.into())
2446    }
2447}
2448
2449#[repr(C)]
2450#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2451pub struct HvX64TableRegister {
2452    pub pad: [u16; 3],
2453    pub limit: u16,
2454    pub base: u64,
2455}
2456
2457impl From<HvX64TableRegister> for HvRegisterValue {
2458    fn from(val: HvX64TableRegister) -> Self {
2459        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2460    }
2461}
2462
2463impl From<HvRegisterValue> for HvX64TableRegister {
2464    fn from(val: HvRegisterValue) -> Self {
2465        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2466    }
2467}
2468
2469#[repr(C)]
2470#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2471pub struct HvX64SegmentRegister {
2472    pub base: u64,
2473    pub limit: u32,
2474    pub selector: u16,
2475    pub attributes: u16,
2476}
2477
2478impl From<HvX64SegmentRegister> for HvRegisterValue {
2479    fn from(val: HvX64SegmentRegister) -> Self {
2480        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2481    }
2482}
2483
2484impl From<HvRegisterValue> for HvX64SegmentRegister {
2485    fn from(val: HvRegisterValue) -> Self {
2486        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2487    }
2488}
2489
2490#[bitfield(u64)]
2491#[derive(IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
2492pub struct HvDeliverabilityNotificationsRegister {
2493    /// x86_64 only.
2494    pub nmi_notification: bool,
2495    /// x86_64 only.
2496    pub interrupt_notification: bool,
2497    /// x86_64 only.
2498    #[bits(4)]
2499    /// Only used on x86_64.
2500    pub interrupt_priority: u8,
2501    #[bits(42)]
2502    pub reserved: u64,
2503    pub sints: u16,
2504}
2505
2506open_enum! {
2507    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2508    pub enum HvVtlEntryReason: u32 {
2509        /// This reason is reserved and is not used.
2510        RESERVED = 0,
2511
2512        /// Indicates entry due to a VTL call from a lower VTL.
2513        VTL_CALL = 1,
2514
2515        /// Indicates entry due to an interrupt targeted to the VTL.
2516        INTERRUPT = 2,
2517
2518        // Indicates an entry due to an intercept delivered via the intercept page.
2519        INTERCEPT = 3,
2520    }
2521}
2522
2523#[repr(C)]
2524#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2525pub struct HvVpVtlControl {
2526    //
2527    // The hypervisor updates the entry reason with an indication as to why the
2528    // VTL was entered on the virtual processor.
2529    //
2530    pub entry_reason: HvVtlEntryReason,
2531
2532    /// This flag determines whether the VINA interrupt line is asserted.
2533    pub vina_status: u8,
2534    pub reserved_z0: u8,
2535    pub reserved_z1: u16,
2536
2537    /// A guest updates the VtlReturn* fields to provide the register values to
2538    /// restore on VTL return.  The specific register values that are restored
2539    /// will vary based on whether the VTL is 32-bit or 64-bit: rax and rcx or
2540    /// eax, ecx, and edx.
2541    pub registers: [u64; 2],
2542}
2543
2544#[bitfield(u64)]
2545#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2546pub struct HvRegisterVsmVina {
2547    pub vector: u8,
2548    pub enabled: bool,
2549    pub auto_reset: bool,
2550    pub auto_eoi: bool,
2551    #[bits(53)]
2552    pub reserved: u64,
2553}
2554
2555#[repr(C)]
2556#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2557pub struct HvVpAssistPage {
2558    /// APIC assist for optimized EOI processing.
2559    pub apic_assist: u32,
2560    pub reserved_z0: u32,
2561
2562    /// VP-VTL control information
2563    pub vtl_control: HvVpVtlControl,
2564
2565    pub nested_enlightenments_control: u64,
2566    pub enlighten_vm_entry: u8,
2567    pub reserved_z1: [u8; 7],
2568    pub current_nested_vmcs: u64,
2569    pub synthetic_time_unhalted_timer_expired: u8,
2570    pub reserved_z2: [u8; 7],
2571    pub virtualization_fault_information: [u8; 40],
2572    pub reserved_z3: u64,
2573    pub intercept_message: HvMessage,
2574    pub vtl_return_actions: [u8; 256],
2575}
2576
2577#[repr(C)]
2578#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2579pub struct HvVpAssistPageActionSignalEvent {
2580    pub action_type: u64,
2581    pub target_vp: u32,
2582    pub target_vtl: u8,
2583    pub target_sint: u8,
2584    pub flag_number: u16,
2585}
2586
2587open_enum! {
2588    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2589    pub enum HvInterceptAccessType: u8 {
2590        READ = 0,
2591        WRITE = 1,
2592        EXECUTE = 2,
2593    }
2594}
2595
2596#[bitfield(u16)]
2597#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2598pub struct HvX64VpExecutionState {
2599    #[bits(2)]
2600    pub cpl: u8,
2601    pub cr0_pe: bool,
2602    pub cr0_am: bool,
2603    pub efer_lma: bool,
2604    pub debug_active: bool,
2605    pub interruption_pending: bool,
2606    #[bits(4)]
2607    pub vtl: u8,
2608    pub enclave_mode: bool,
2609    pub interrupt_shadow: bool,
2610    pub virtualization_fault_active: bool,
2611    #[bits(2)]
2612    pub reserved: u8,
2613}
2614
2615#[bitfield(u16)]
2616#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2617pub struct HvArm64VpExecutionState {
2618    #[bits(2)]
2619    pub cpl: u8,
2620    pub debug_active: bool,
2621    pub interruption_pending: bool,
2622    #[bits(4)]
2623    pub vtl: u8,
2624    pub virtualization_fault_active: bool,
2625    #[bits(7)]
2626    pub reserved: u8,
2627}
2628
2629#[repr(C)]
2630#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2631pub struct HvX64InterceptMessageHeader {
2632    pub vp_index: u32,
2633    pub instruction_length_and_cr8: u8,
2634    pub intercept_access_type: HvInterceptAccessType,
2635    pub execution_state: HvX64VpExecutionState,
2636    pub cs_segment: HvX64SegmentRegister,
2637    pub rip: u64,
2638    pub rflags: u64,
2639}
2640
2641impl MessagePayload for HvX64InterceptMessageHeader {}
2642
2643impl HvX64InterceptMessageHeader {
2644    pub fn instruction_len(&self) -> u8 {
2645        self.instruction_length_and_cr8 & 0xf
2646    }
2647
2648    pub fn cr8(&self) -> u8 {
2649        self.instruction_length_and_cr8 >> 4
2650    }
2651}
2652
2653#[repr(C)]
2654#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2655pub struct HvArm64InterceptMessageHeader {
2656    pub vp_index: u32,
2657    pub instruction_length: u8,
2658    pub intercept_access_type: HvInterceptAccessType,
2659    pub execution_state: HvArm64VpExecutionState,
2660    pub pc: u64,
2661    pub cspr: u64,
2662}
2663const_assert!(size_of::<HvArm64InterceptMessageHeader>() == 0x18);
2664
2665impl MessagePayload for HvArm64InterceptMessageHeader {}
2666
2667#[repr(transparent)]
2668#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2669pub struct HvX64IoPortAccessInfo(pub u8);
2670
2671impl HvX64IoPortAccessInfo {
2672    pub fn new(access_size: u8, string_op: bool, rep_prefix: bool) -> Self {
2673        let mut info = access_size & 0x7;
2674
2675        if string_op {
2676            info |= 0x8;
2677        }
2678
2679        if rep_prefix {
2680            info |= 0x10;
2681        }
2682
2683        Self(info)
2684    }
2685
2686    pub fn access_size(&self) -> u8 {
2687        self.0 & 0x7
2688    }
2689
2690    pub fn string_op(&self) -> bool {
2691        self.0 & 0x8 != 0
2692    }
2693
2694    pub fn rep_prefix(&self) -> bool {
2695        self.0 & 0x10 != 0
2696    }
2697}
2698
2699#[repr(C)]
2700#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2701pub struct HvX64IoPortInterceptMessage {
2702    pub header: HvX64InterceptMessageHeader,
2703    pub port_number: u16,
2704    pub access_info: HvX64IoPortAccessInfo,
2705    pub instruction_byte_count: u8,
2706    pub reserved: u32,
2707    pub rax: u64,
2708    pub instruction_bytes: [u8; 16],
2709    pub ds_segment: HvX64SegmentRegister,
2710    pub es_segment: HvX64SegmentRegister,
2711    pub rcx: u64,
2712    pub rsi: u64,
2713    pub rdi: u64,
2714}
2715
2716impl MessagePayload for HvX64IoPortInterceptMessage {}
2717
2718#[bitfield(u8)]
2719#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2720pub struct HvX64MemoryAccessInfo {
2721    pub gva_valid: bool,
2722    pub gva_gpa_valid: bool,
2723    pub hypercall_output_pending: bool,
2724    pub tlb_locked: bool,
2725    pub supervisor_shadow_stack: bool,
2726    #[bits(3)]
2727    pub reserved1: u8,
2728}
2729
2730#[bitfield(u8)]
2731#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2732pub struct HvArm64MemoryAccessInfo {
2733    pub gva_valid: bool,
2734    pub gva_gpa_valid: bool,
2735    pub hypercall_output_pending: bool,
2736    #[bits(5)]
2737    pub reserved1: u8,
2738}
2739
2740open_enum! {
2741    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2742    pub enum HvCacheType: u32 {
2743        #![expect(non_upper_case_globals)]
2744        HvCacheTypeUncached = 0,
2745        HvCacheTypeWriteCombining = 1,
2746        HvCacheTypeWriteThrough = 4,
2747        HvCacheTypeWriteProtected = 5,
2748        HvCacheTypeWriteBack = 6,
2749    }
2750}
2751
2752#[repr(C)]
2753#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2754pub struct HvX64MemoryInterceptMessage {
2755    pub header: HvX64InterceptMessageHeader,
2756    pub cache_type: HvCacheType,
2757    pub instruction_byte_count: u8,
2758    pub memory_access_info: HvX64MemoryAccessInfo,
2759    pub tpr_priority: u8,
2760    pub reserved: u8,
2761    pub guest_virtual_address: u64,
2762    pub guest_physical_address: u64,
2763    pub instruction_bytes: [u8; 16],
2764}
2765
2766impl MessagePayload for HvX64MemoryInterceptMessage {}
2767const_assert!(size_of::<HvX64MemoryInterceptMessage>() == 0x50);
2768
2769#[repr(C)]
2770#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2771pub struct HvArm64MemoryInterceptMessage {
2772    pub header: HvArm64InterceptMessageHeader,
2773    pub cache_type: HvCacheType,
2774    pub instruction_byte_count: u8,
2775    pub memory_access_info: HvArm64MemoryAccessInfo,
2776    pub reserved1: u16,
2777    pub instruction_bytes: [u8; 4],
2778    pub reserved2: u32,
2779    pub guest_virtual_address: u64,
2780    pub guest_physical_address: u64,
2781    pub syndrome: u64,
2782}
2783
2784impl MessagePayload for HvArm64MemoryInterceptMessage {}
2785const_assert!(size_of::<HvArm64MemoryInterceptMessage>() == 0x40);
2786
2787#[repr(C)]
2788#[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)]
2789pub struct HvArm64MmioInterceptMessage {
2790    pub header: HvArm64InterceptMessageHeader,
2791    pub guest_physical_address: u64,
2792    pub access_size: u32,
2793    pub data: [u8; 32],
2794    pub padding: u32,
2795}
2796
2797impl MessagePayload for HvArm64MmioInterceptMessage {}
2798const_assert!(size_of::<HvArm64MmioInterceptMessage>() == 0x48);
2799
2800#[repr(C)]
2801#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2802pub struct HvX64MsrInterceptMessage {
2803    pub header: HvX64InterceptMessageHeader,
2804    pub msr_number: u32,
2805    pub reserved: u32,
2806    pub rdx: u64,
2807    pub rax: u64,
2808}
2809
2810impl MessagePayload for HvX64MsrInterceptMessage {}
2811
2812#[repr(C)]
2813#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2814pub struct HvX64SipiInterceptMessage {
2815    pub header: HvX64InterceptMessageHeader,
2816    pub target_vp_index: u32,
2817    pub vector: u32,
2818}
2819
2820impl MessagePayload for HvX64SipiInterceptMessage {}
2821
2822#[repr(C)]
2823#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2824pub struct HvX64SynicSintDeliverableMessage {
2825    pub header: HvX64InterceptMessageHeader,
2826    pub deliverable_sints: u16,
2827    pub rsvd1: u16,
2828    pub rsvd2: u32,
2829}
2830
2831impl MessagePayload for HvX64SynicSintDeliverableMessage {}
2832
2833#[repr(C)]
2834#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2835pub struct HvArm64SynicSintDeliverableMessage {
2836    pub header: HvArm64InterceptMessageHeader,
2837    pub deliverable_sints: u16,
2838    pub rsvd1: u16,
2839    pub rsvd2: u32,
2840}
2841
2842impl MessagePayload for HvArm64SynicSintDeliverableMessage {}
2843
2844#[repr(C)]
2845#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2846pub struct HvX64InterruptionDeliverableMessage {
2847    pub header: HvX64InterceptMessageHeader,
2848    pub deliverable_type: HvX64PendingInterruptionType,
2849    pub rsvd: [u8; 3],
2850    pub rsvd2: u32,
2851}
2852
2853impl MessagePayload for HvX64InterruptionDeliverableMessage {}
2854
2855open_enum! {
2856    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2857    pub enum HvX64PendingInterruptionType: u8 {
2858        HV_X64_PENDING_INTERRUPT = 0,
2859        HV_X64_PENDING_NMI = 2,
2860        HV_X64_PENDING_EXCEPTION = 3,
2861        HV_X64_PENDING_SOFTWARE_INTERRUPT = 4,
2862        HV_X64_PENDING_PRIVILEGED_SOFTWARE_EXCEPTION = 5,
2863        HV_X64_PENDING_SOFTWARE_EXCEPTION = 6,
2864    }
2865}
2866
2867#[repr(C)]
2868#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2869pub struct HvX64HypercallInterceptMessage {
2870    pub header: HvX64InterceptMessageHeader,
2871    pub rax: u64,
2872    pub rbx: u64,
2873    pub rcx: u64,
2874    pub rdx: u64,
2875    pub r8: u64,
2876    pub rsi: u64,
2877    pub rdi: u64,
2878    pub xmm_registers: [AlignedU128; 6],
2879    pub flags: HvHypercallInterceptMessageFlags,
2880    pub rsvd2: [u32; 3],
2881}
2882
2883impl MessagePayload for HvX64HypercallInterceptMessage {}
2884
2885#[repr(C)]
2886#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2887pub struct HvArm64HypercallInterceptMessage {
2888    pub header: HvArm64InterceptMessageHeader,
2889    pub immediate: u16,
2890    pub reserved: u16,
2891    pub flags: HvHypercallInterceptMessageFlags,
2892    pub x: [u64; 18],
2893}
2894
2895impl MessagePayload for HvArm64HypercallInterceptMessage {}
2896
2897#[bitfield(u32)]
2898#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2899pub struct HvHypercallInterceptMessageFlags {
2900    pub is_isolated: bool,
2901    #[bits(31)]
2902    _reserved: u32,
2903}
2904
2905#[repr(C)]
2906#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2907pub struct HvX64CpuidInterceptMessage {
2908    pub header: HvX64InterceptMessageHeader,
2909    pub rax: u64,
2910    pub rcx: u64,
2911    pub rdx: u64,
2912    pub rbx: u64,
2913    pub default_result_rax: u64,
2914    pub default_result_rcx: u64,
2915    pub default_result_rdx: u64,
2916    pub default_result_rbx: u64,
2917}
2918
2919impl MessagePayload for HvX64CpuidInterceptMessage {}
2920
2921#[bitfield(u8)]
2922#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2923pub struct HvX64ExceptionInfo {
2924    pub error_code_valid: bool,
2925    pub software_exception: bool,
2926    #[bits(6)]
2927    reserved: u8,
2928}
2929
2930#[repr(C)]
2931#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2932pub struct HvX64ExceptionInterceptMessage {
2933    pub header: HvX64InterceptMessageHeader,
2934    pub vector: u16,
2935    pub exception_info: HvX64ExceptionInfo,
2936    pub instruction_byte_count: u8,
2937    pub error_code: u32,
2938    pub exception_parameter: u64,
2939    pub reserved: u64,
2940    pub instruction_bytes: [u8; 16],
2941    pub ds_segment: HvX64SegmentRegister,
2942    pub ss_segment: HvX64SegmentRegister,
2943    pub rax: u64,
2944    pub rcx: u64,
2945    pub rdx: u64,
2946    pub rbx: u64,
2947    pub rsp: u64,
2948    pub rbp: u64,
2949    pub rsi: u64,
2950    pub rdi: u64,
2951    pub r8: u64,
2952    pub r9: u64,
2953    pub r10: u64,
2954    pub r11: u64,
2955    pub r12: u64,
2956    pub r13: u64,
2957    pub r14: u64,
2958    pub r15: u64,
2959}
2960
2961impl MessagePayload for HvX64ExceptionInterceptMessage {}
2962
2963#[repr(C)]
2964#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2965pub struct HvInvalidVpRegisterMessage {
2966    pub vp_index: u32,
2967    pub reserved: u32,
2968}
2969
2970impl MessagePayload for HvInvalidVpRegisterMessage {}
2971
2972#[repr(C)]
2973#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2974pub struct HvX64ApicEoiMessage {
2975    pub vp_index: u32,
2976    pub interrupt_vector: u32,
2977}
2978
2979impl MessagePayload for HvX64ApicEoiMessage {}
2980
2981#[repr(C)]
2982#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2983pub struct HvX64UnrecoverableExceptionMessage {
2984    pub header: HvX64InterceptMessageHeader,
2985}
2986
2987impl MessagePayload for HvX64UnrecoverableExceptionMessage {}
2988
2989#[repr(C)]
2990#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2991pub struct HvX64HaltMessage {
2992    pub header: HvX64InterceptMessageHeader,
2993}
2994
2995impl MessagePayload for HvX64HaltMessage {}
2996
2997#[repr(C)]
2998#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2999pub struct HvArm64ResetInterceptMessage {
3000    pub header: HvArm64InterceptMessageHeader,
3001    pub reset_type: HvArm64ResetType,
3002    pub padding: u32,
3003}
3004
3005impl MessagePayload for HvArm64ResetInterceptMessage {}
3006
3007open_enum! {
3008    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3009    pub enum HvArm64ResetType: u32 {
3010        POWER_OFF = 0,
3011        REBOOT = 1,
3012    }
3013}
3014
3015#[bitfield(u8)]
3016#[derive(IntoBytes, Immutable, FromBytes)]
3017pub struct HvX64RegisterInterceptMessageFlags {
3018    pub is_memory_op: bool,
3019    #[bits(7)]
3020    _rsvd: u8,
3021}
3022
3023#[repr(C)]
3024#[derive(IntoBytes, Immutable, FromBytes)]
3025pub struct HvX64RegisterInterceptMessage {
3026    pub header: HvX64InterceptMessageHeader,
3027    pub flags: HvX64RegisterInterceptMessageFlags,
3028    pub rsvd: u8,
3029    pub rsvd2: u16,
3030    pub register_name: HvX64RegisterName,
3031    pub access_info: HvX64RegisterAccessInfo,
3032}
3033
3034#[repr(transparent)]
3035#[derive(IntoBytes, Immutable, FromBytes)]
3036pub struct HvX64RegisterAccessInfo(u128);
3037
3038impl HvX64RegisterAccessInfo {
3039    pub fn new_source_value(source_value: HvRegisterValue) -> Self {
3040        Self(source_value.as_u128())
3041    }
3042}
3043
3044open_enum! {
3045    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3046    pub enum HvInterruptType : u32  {
3047        #![expect(non_upper_case_globals)]
3048        HvArm64InterruptTypeFixed = 0x0000,
3049        HvX64InterruptTypeFixed = 0x0000,
3050        HvX64InterruptTypeLowestPriority = 0x0001,
3051        HvX64InterruptTypeSmi = 0x0002,
3052        HvX64InterruptTypeRemoteRead = 0x0003,
3053        HvX64InterruptTypeNmi = 0x0004,
3054        HvX64InterruptTypeInit = 0x0005,
3055        HvX64InterruptTypeSipi = 0x0006,
3056        HvX64InterruptTypeExtInt = 0x0007,
3057        HvX64InterruptTypeLocalInt0 = 0x0008,
3058        HvX64InterruptTypeLocalInt1 = 0x0009,
3059    }
3060}
3061
3062/// The declaration uses the fact the bits for the different
3063/// architectures don't intersect. When (if ever) they do,
3064/// will need to come up with a more elaborate abstraction.
3065/// The other possible downside is the lack of the compile-time
3066/// checks as adding that will require `guest_arch` support and
3067/// a large refactoring. To sum up, choosing expediency.
3068#[bitfield(u64)]
3069#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3070pub struct HvInterruptControl {
3071    interrupt_type_value: u32,
3072    pub x86_level_triggered: bool,
3073    pub x86_logical_destination_mode: bool,
3074    pub arm64_asserted: bool,
3075    #[bits(29)]
3076    pub unused: u32,
3077}
3078
3079impl HvInterruptControl {
3080    pub fn interrupt_type(&self) -> HvInterruptType {
3081        HvInterruptType(self.interrupt_type_value())
3082    }
3083
3084    pub fn set_interrupt_type(&mut self, ty: HvInterruptType) {
3085        self.set_interrupt_type_value(ty.0)
3086    }
3087
3088    pub fn with_interrupt_type(self, ty: HvInterruptType) -> Self {
3089        self.with_interrupt_type_value(ty.0)
3090    }
3091}
3092
3093#[bitfield(u64)]
3094pub struct HvRegisterVsmCapabilities {
3095    pub dr6_shared: bool,
3096    pub mbec_vtl_mask: u16,
3097    pub deny_lower_vtl_startup: bool,
3098    pub supervisor_shadow_stack: bool,
3099    pub hardware_hvpt_available: bool,
3100    pub software_hvpt_available: bool,
3101    #[bits(6)]
3102    pub hardware_hvpt_range_bits: u8,
3103    pub intercept_page_available: bool,
3104    pub return_action_available: bool,
3105    /// If the VTL0 view of memory is mapped to the high address space, which is
3106    /// the highest legal physical address bit.
3107    ///
3108    /// Only available in VTL2.
3109    pub vtl0_alias_map_available: bool,
3110    /// If the [`HvRegisterVsmPartitionConfig`] register has support for
3111    /// `intercept_not_present`.
3112    ///
3113    /// Only available in VTL2.
3114    pub intercept_not_present_available: bool,
3115    pub install_intercept_ex: bool,
3116    /// Only available in VTL2.
3117    pub intercept_system_reset_available: bool,
3118    #[bits(31)]
3119    pub reserved: u64,
3120}
3121
3122#[bitfield(u64)]
3123pub struct HvRegisterVsmPartitionConfig {
3124    pub enable_vtl_protection: bool,
3125    #[bits(4)]
3126    pub default_vtl_protection_mask: u8,
3127    pub zero_memory_on_reset: bool,
3128    pub deny_lower_vtl_startup: bool,
3129    pub intercept_acceptance: bool,
3130    pub intercept_enable_vtl_protection: bool,
3131    pub intercept_vp_startup: bool,
3132    pub intercept_cpuid_unimplemented: bool,
3133    pub intercept_unrecoverable_exception: bool,
3134    pub intercept_page: bool,
3135    pub intercept_restore_partition_time: bool,
3136    /// The hypervisor will send all unmapped GPA intercepts to VTL2 rather than
3137    /// the host.
3138    pub intercept_not_present: bool,
3139    pub intercept_system_reset: bool,
3140    #[bits(48)]
3141    pub reserved: u64,
3142}
3143
3144#[bitfield(u64)]
3145pub struct HvRegisterVsmPartitionStatus {
3146    #[bits(16)]
3147    pub enabled_vtl_set: u16,
3148    #[bits(4)]
3149    pub maximum_vtl: u8,
3150    #[bits(16)]
3151    pub mbec_enabled_vtl_set: u16,
3152    #[bits(4)]
3153    pub supervisor_shadow_stack_enabled_vtl_set: u8,
3154    #[bits(24)]
3155    pub reserved: u64,
3156}
3157
3158#[bitfield(u64)]
3159pub struct HvRegisterGuestVsmPartitionConfig {
3160    #[bits(4)]
3161    pub maximum_vtl: u8,
3162    #[bits(60)]
3163    pub reserved: u64,
3164}
3165
3166#[bitfield(u64)]
3167pub struct HvRegisterVsmVpStatus {
3168    #[bits(4)]
3169    pub active_vtl: u8,
3170    pub active_mbec_enabled: bool,
3171    #[bits(11)]
3172    pub reserved_mbz0: u16,
3173    #[bits(16)]
3174    pub enabled_vtl_set: u16,
3175    #[bits(32)]
3176    pub reserved_mbz1: u32,
3177}
3178
3179#[bitfield(u64)]
3180pub struct HvRegisterVsmCodePageOffsets {
3181    #[bits(12)]
3182    pub call_offset: u16,
3183    #[bits(12)]
3184    pub return_offset: u16,
3185    #[bits(40)]
3186    pub reserved: u64,
3187}
3188
3189#[repr(C)]
3190#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3191pub struct HvStimerState {
3192    pub undelivered_message_pending: u32,
3193    pub reserved: u32,
3194    pub config: u64,
3195    pub count: u64,
3196    pub adjustment: u64,
3197    pub undelivered_expiration_time: u64,
3198}
3199
3200#[repr(C)]
3201#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3202pub struct HvSyntheticTimersState {
3203    pub timers: [HvStimerState; 4],
3204    pub reserved: [u64; 5],
3205}
3206
3207#[bitfield(u64)]
3208pub struct HvInternalActivityRegister {
3209    pub startup_suspend: bool,
3210    pub halt_suspend: bool,
3211    pub idle_suspend: bool,
3212    #[bits(61)]
3213    pub reserved: u64,
3214}
3215
3216#[bitfield(u64)]
3217pub struct HvSynicSint {
3218    pub vector: u8,
3219    _reserved: u8,
3220    pub masked: bool,
3221    pub auto_eoi: bool,
3222    pub polling: bool,
3223    _reserved2: bool,
3224    pub proxy: bool,
3225    #[bits(43)]
3226    _reserved2: u64,
3227}
3228
3229#[bitfield(u64)]
3230pub struct HvSynicScontrol {
3231    pub enabled: bool,
3232    #[bits(63)]
3233    _reserved: u64,
3234}
3235
3236#[bitfield(u64)]
3237pub struct HvSynicSimpSiefp {
3238    pub enabled: bool,
3239    #[bits(11)]
3240    _reserved: u64,
3241    #[bits(52)]
3242    pub base_gpn: u64,
3243}
3244
3245#[bitfield(u64)]
3246pub struct HvSynicStimerConfig {
3247    pub enabled: bool,
3248    pub periodic: bool,
3249    pub lazy: bool,
3250    pub auto_enable: bool,
3251    // Note: On ARM64 the top 3 bits of apic_vector are reserved.
3252    pub apic_vector: u8,
3253    pub direct_mode: bool,
3254    #[bits(3)]
3255    pub _reserved1: u8,
3256    #[bits(4)]
3257    pub sint: u8,
3258    #[bits(44)]
3259    pub _reserved2: u64,
3260}
3261
3262pub const HV_X64_PENDING_EVENT_EXCEPTION: u8 = 0;
3263pub const HV_X64_PENDING_EVENT_MEMORY_INTERCEPT: u8 = 1;
3264pub const HV_X64_PENDING_EVENT_NESTED_MEMORY_INTERCEPT: u8 = 2;
3265pub const HV_X64_PENDING_EVENT_VIRTUALIZATION_FAULT: u8 = 3;
3266pub const HV_X64_PENDING_EVENT_HYPERCALL_OUTPUT: u8 = 4;
3267pub const HV_X64_PENDING_EVENT_EXT_INT: u8 = 5;
3268pub const HV_X64_PENDING_EVENT_SHADOW_IPT: u8 = 6;
3269
3270// Provides information about an exception.
3271#[bitfield(u128)]
3272pub struct HvX64PendingExceptionEvent {
3273    pub event_pending: bool,
3274    #[bits(3)]
3275    pub event_type: u8,
3276    #[bits(4)]
3277    pub reserved0: u8,
3278
3279    pub deliver_error_code: bool,
3280    #[bits(7)]
3281    pub reserved1: u8,
3282    pub vector: u16,
3283    pub error_code: u32,
3284    pub exception_parameter: u64,
3285}
3286
3287/// Provides information about a virtualization fault.
3288#[bitfield(u128)]
3289pub struct HvX64PendingVirtualizationFaultEvent {
3290    pub event_pending: bool,
3291    #[bits(3)]
3292    pub event_type: u8,
3293    #[bits(4)]
3294    pub reserved0: u8,
3295
3296    pub reserved1: u8,
3297    pub parameter0: u16,
3298    pub code: u32,
3299    pub parameter1: u64,
3300}
3301
3302/// Part of [`HvX64PendingEventMemoryIntercept`]
3303#[bitfield(u8)]
3304#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3305pub struct HvX64PendingEventMemoryInterceptPendingEventHeader {
3306    pub event_pending: bool,
3307    #[bits(3)]
3308    pub event_type: u8,
3309    #[bits(4)]
3310    _reserved0: u8,
3311}
3312
3313/// Part of [`HvX64PendingEventMemoryIntercept`]
3314#[bitfield(u8)]
3315#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3316pub struct HvX64PendingEventMemoryInterceptAccessFlags {
3317    /// Indicates if the guest linear address is valid.
3318    pub guest_linear_address_valid: bool,
3319    /// Indicates that the memory intercept was caused by an access to a guest physical address
3320    /// (instead of a page table as part of a page table walk).
3321    pub caused_by_gpa_access: bool,
3322    #[bits(6)]
3323    _reserved1: u8,
3324}
3325
3326/// Provides information about a memory intercept.
3327#[repr(C)]
3328#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3329pub struct HvX64PendingEventMemoryIntercept {
3330    pub event_header: HvX64PendingEventMemoryInterceptPendingEventHeader,
3331    /// VTL at which the memory intercept is targeted.
3332    /// Note: This field must be in Reg0.
3333    pub target_vtl: u8,
3334    /// Type of the memory access.
3335    pub access_type: HvInterceptAccessType,
3336    pub access_flags: HvX64PendingEventMemoryInterceptAccessFlags,
3337    pub _reserved2: u32,
3338    /// The guest linear address that caused the fault.
3339    pub guest_linear_address: u64,
3340    /// The guest physical address that caused the memory intercept.
3341    pub guest_physical_address: u64,
3342    pub _reserved3: u64,
3343}
3344const_assert!(size_of::<HvX64PendingEventMemoryIntercept>() == 0x20);
3345
3346//
3347// Provides information about pending hypercall output.
3348//
3349#[bitfield(u128)]
3350pub struct HvX64PendingHypercallOutputEvent {
3351    pub event_pending: bool,
3352    #[bits(3)]
3353    pub event_type: u8,
3354    #[bits(4)]
3355    pub reserved0: u8,
3356
3357    // Whether the hypercall has been retired.
3358    pub retired: bool,
3359
3360    #[bits(23)]
3361    pub reserved1: u32,
3362
3363    // Indicates the number of bytes to be written starting from OutputGpa.
3364    pub output_size: u32,
3365
3366    // Indicates the output GPA, which is not required to be page-aligned.
3367    pub output_gpa: u64,
3368}
3369
3370// Provides information about a directly asserted ExtInt.
3371#[bitfield(u128)]
3372pub struct HvX64PendingExtIntEvent {
3373    pub event_pending: bool,
3374    #[bits(3)]
3375    pub event_type: u8,
3376    #[bits(4)]
3377    pub reserved0: u8,
3378    pub vector: u8,
3379    #[bits(48)]
3380    pub reserved1: u64,
3381    pub reserved2: u64,
3382}
3383
3384// Provides information about pending IPT shadowing.
3385#[bitfield(u128)]
3386pub struct HvX64PendingShadowIptEvent {
3387    pub event_pending: bool,
3388    #[bits(4)]
3389    pub event_type: u8,
3390    #[bits(59)]
3391    pub reserved0: u64,
3392
3393    pub reserved1: u64,
3394}
3395
3396#[bitfield(u128)]
3397#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3398pub struct HvX64PendingEventReg0 {
3399    pub event_pending: bool,
3400    #[bits(3)]
3401    pub event_type: u8,
3402    #[bits(4)]
3403    pub reserved: u8,
3404    #[bits(120)]
3405    pub data: u128,
3406}
3407
3408#[repr(C)]
3409#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3410pub struct HvX64PendingEvent {
3411    pub reg_0: HvX64PendingEventReg0,
3412    pub reg_1: AlignedU128,
3413}
3414const_assert!(size_of::<HvX64PendingEvent>() == 0x20);
3415
3416impl From<HvX64PendingExceptionEvent> for HvX64PendingEvent {
3417    fn from(exception_event: HvX64PendingExceptionEvent) -> Self {
3418        HvX64PendingEvent {
3419            reg_0: HvX64PendingEventReg0::from(u128::from(exception_event)),
3420            reg_1: 0u128.into(),
3421        }
3422    }
3423}
3424
3425#[bitfield(u64)]
3426#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3427pub struct HvX64PendingInterruptionRegister {
3428    pub interruption_pending: bool,
3429    #[bits(3)]
3430    pub interruption_type: u8,
3431    pub deliver_error_code: bool,
3432    #[bits(4)]
3433    pub instruction_length: u8,
3434    pub nested_event: bool,
3435    #[bits(6)]
3436    pub reserved: u8,
3437    pub interruption_vector: u16,
3438    pub error_code: u32,
3439}
3440
3441#[bitfield(u64)]
3442#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3443pub struct HvX64InterruptStateRegister {
3444    pub interrupt_shadow: bool,
3445    pub nmi_masked: bool,
3446    #[bits(62)]
3447    pub reserved: u64,
3448}
3449
3450#[bitfield(u64)]
3451#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3452pub struct HvInstructionEmulatorHintsRegister {
3453    /// Indicates whether any secure VTL is enabled for the partition.
3454    pub partition_secure_vtl_enabled: bool,
3455    /// Indicates whether kernel or user execute control architecturally
3456    /// applies to execute accesses.
3457    pub mbec_user_execute_control: bool,
3458    #[bits(62)]
3459    pub _padding: u64,
3460}
3461
3462open_enum! {
3463    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3464    pub enum HvAarch64PendingEventType: u8 {
3465        EXCEPTION = 0,
3466        SYNTHETIC_EXCEPTION = 1,
3467        HYPERCALL_OUTPUT = 2,
3468    }
3469}
3470
3471// Support for bitfield structures.
3472impl HvAarch64PendingEventType {
3473    const fn from_bits(val: u8) -> Self {
3474        HvAarch64PendingEventType(val)
3475    }
3476
3477    const fn into_bits(self) -> u8 {
3478        self.0
3479    }
3480}
3481
3482#[bitfield[u8]]
3483#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3484pub struct HvAarch64PendingEventHeader {
3485    #[bits(1)]
3486    pub event_pending: bool,
3487    #[bits(3)]
3488    pub event_type: HvAarch64PendingEventType,
3489    #[bits(4)]
3490    pub reserved: u8,
3491}
3492
3493#[repr(C)]
3494#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3495pub struct HvAarch64PendingExceptionEvent {
3496    pub header: HvAarch64PendingEventHeader,
3497    pub _padding: [u8; 7],
3498    pub syndrome: u64,
3499    pub fault_address: u64,
3500}
3501
3502#[bitfield[u8]]
3503#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3504pub struct HvAarch64PendingHypercallOutputEventFlags {
3505    #[bits(1)]
3506    pub retired: u8,
3507    #[bits(7)]
3508    pub reserved: u8,
3509}
3510
3511#[repr(C)]
3512#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3513pub struct HvAarch64PendingHypercallOutputEvent {
3514    pub header: HvAarch64PendingEventHeader,
3515    pub flags: HvAarch64PendingHypercallOutputEventFlags,
3516    pub reserved: u16,
3517    pub output_size: u32,
3518    pub output_gpa: u64,
3519}
3520
3521#[repr(C)]
3522#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3523pub struct HvAarch64PendingEvent {
3524    pub header: HvAarch64PendingEventHeader,
3525    pub event_data: [u8; 15],
3526    pub _padding: [u64; 2],
3527}
3528
3529#[bitfield(u32)]
3530#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3531pub struct HvMapGpaFlags {
3532    pub readable: bool,
3533    pub writable: bool,
3534    pub kernel_executable: bool,
3535    pub user_executable: bool,
3536    pub supervisor_shadow_stack: bool,
3537    pub paging_writability: bool,
3538    pub verify_paging_writability: bool,
3539    #[bits(8)]
3540    _padding0: u32,
3541    pub adjustable: bool,
3542    #[bits(16)]
3543    _padding1: u32,
3544}
3545
3546/// [`HvMapGpaFlags`] with no permissions set
3547pub const HV_MAP_GPA_PERMISSIONS_NONE: HvMapGpaFlags = HvMapGpaFlags::new();
3548pub const HV_MAP_GPA_PERMISSIONS_ALL: HvMapGpaFlags = HvMapGpaFlags::new()
3549    .with_readable(true)
3550    .with_writable(true)
3551    .with_kernel_executable(true)
3552    .with_user_executable(true);
3553
3554#[repr(C)]
3555#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3556pub struct HvMonitorPage {
3557    pub trigger_state: HvMonitorTriggerState,
3558    pub reserved1: u32,
3559    pub trigger_group: [HvMonitorTriggerGroup; 4],
3560    pub reserved2: [u64; 3],
3561    pub next_check_time: [[u32; 32]; 4],
3562    pub latency: [[u16; 32]; 4],
3563    pub reserved3: [u64; 32],
3564    pub parameter: [[HvMonitorParameter; 32]; 4],
3565    pub reserved4: [u8; 1984],
3566}
3567
3568#[repr(C)]
3569#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3570pub struct HvMonitorPageSmall {
3571    pub trigger_state: HvMonitorTriggerState,
3572    pub reserved1: u32,
3573    pub trigger_group: [HvMonitorTriggerGroup; 4],
3574}
3575
3576#[repr(C)]
3577#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3578pub struct HvMonitorTriggerGroup {
3579    pub pending: u32,
3580    pub armed: u32,
3581}
3582
3583#[repr(C)]
3584#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3585pub struct HvMonitorParameter {
3586    pub connection_id: u32,
3587    pub flag_number: u16,
3588    pub reserved: u16,
3589}
3590
3591#[bitfield(u32)]
3592#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3593pub struct HvMonitorTriggerState {
3594    #[bits(4)]
3595    pub group_enable: u32,
3596    #[bits(28)]
3597    pub reserved: u32,
3598}
3599
3600#[bitfield(u64)]
3601#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3602pub struct HvPmTimerInfo {
3603    #[bits(16)]
3604    pub port: u16,
3605    #[bits(1)]
3606    pub width_24: bool,
3607    #[bits(1)]
3608    pub enabled: bool,
3609    #[bits(14)]
3610    pub reserved1: u32,
3611    #[bits(32)]
3612    pub reserved2: u32,
3613}
3614
3615#[bitfield(u64)]
3616#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3617pub struct HvX64RegisterSevControl {
3618    pub enable_encrypted_state: bool,
3619    #[bits(11)]
3620    _rsvd1: u64,
3621    #[bits(52)]
3622    pub vmsa_gpa_page_number: u64,
3623}
3624
3625#[bitfield(u64)]
3626#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3627pub struct HvRegisterReferenceTsc {
3628    pub enable: bool,
3629    #[bits(11)]
3630    pub reserved_p: u64,
3631    #[bits(52)]
3632    pub gpn: u64,
3633}
3634
3635#[repr(C)]
3636#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3637pub struct HvReferenceTscPage {
3638    pub tsc_sequence: u32,
3639    pub reserved1: u32,
3640    pub tsc_scale: u64,
3641    pub tsc_offset: i64,
3642    pub timeline_bias: u64,
3643    pub tsc_multiplier: u64,
3644    pub reserved2: [u64; 507],
3645}
3646
3647pub const HV_REFERENCE_TSC_SEQUENCE_INVALID: u32 = 0;
3648
3649#[bitfield(u64)]
3650#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3651pub struct HvX64VmgexitInterceptMessageFlags {
3652    pub ghcb_page_valid: bool,
3653    pub ghcb_request_error: bool,
3654    #[bits(62)]
3655    _reserved: u64,
3656}
3657
3658#[repr(C)]
3659#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3660pub struct HvX64VmgexitInterceptMessageGhcbPageStandard {
3661    pub ghcb_protocol_version: u16,
3662    _reserved: [u16; 3],
3663    pub sw_exit_code: u64,
3664    pub sw_exit_info1: u64,
3665    pub sw_exit_info2: u64,
3666    pub sw_scratch: u64,
3667}
3668
3669#[repr(C)]
3670#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3671pub struct HvX64VmgexitInterceptMessageGhcbPage {
3672    pub ghcb_usage: u32,
3673    _reserved: u32,
3674    pub standard: HvX64VmgexitInterceptMessageGhcbPageStandard,
3675}
3676
3677#[repr(C)]
3678#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3679pub struct HvX64VmgexitInterceptMessage {
3680    pub header: HvX64InterceptMessageHeader,
3681    pub ghcb_msr: u64,
3682    pub flags: HvX64VmgexitInterceptMessageFlags,
3683    pub ghcb_page: HvX64VmgexitInterceptMessageGhcbPage,
3684}
3685
3686impl MessagePayload for HvX64VmgexitInterceptMessage {}
3687
3688#[bitfield(u64)]
3689pub struct HvRegisterVpAssistPage {
3690    pub enabled: bool,
3691    #[bits(11)]
3692    _reserved: u64,
3693    #[bits(52)]
3694    pub gpa_page_number: u64,
3695}
3696
3697#[bitfield(u32)]
3698#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3699pub struct HvX64RegisterPageDirtyFlags {
3700    pub general_purpose: bool,
3701    pub instruction_pointer: bool,
3702    pub xmm: bool,
3703    pub segments: bool,
3704    pub flags: bool,
3705    #[bits(27)]
3706    reserved: u32,
3707}
3708
3709#[repr(C)]
3710#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3711pub struct HvX64RegisterPage {
3712    pub version: u16,
3713    pub is_valid: u8,
3714    pub vtl: u8,
3715    pub dirty: HvX64RegisterPageDirtyFlags,
3716    pub gp_registers: [u64; 16],
3717    pub rip: u64,
3718    pub rflags: u64,
3719    pub reserved: u64,
3720    pub xmm: [u128; 6],
3721    pub segment: [u128; 6],
3722    // Misc. control registers (cannot be set via this interface).
3723    pub cr0: u64,
3724    pub cr3: u64,
3725    pub cr4: u64,
3726    pub cr8: u64,
3727    pub efer: u64,
3728    pub dr7: u64,
3729    pub pending_interruption: HvX64PendingInterruptionRegister,
3730    pub interrupt_state: HvX64InterruptStateRegister,
3731    pub instruction_emulation_hints: HvInstructionEmulatorHintsRegister,
3732    pub reserved_end: [u8; 3672],
3733}
3734
3735const _: () = assert!(size_of::<HvX64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3736
3737#[bitfield(u32)]
3738#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3739pub struct HvAarch64RegisterPageDirtyFlags {
3740    _unused: bool,
3741    pub instruction_pointer: bool,
3742    pub processor_state: bool,
3743    pub control_registers: bool,
3744    #[bits(28)]
3745    reserved: u32,
3746}
3747
3748#[repr(C)]
3749#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3750pub struct HvAarch64RegisterPage {
3751    pub version: u16,
3752    pub is_valid: u8,
3753    pub vtl: u8,
3754    pub dirty: HvAarch64RegisterPageDirtyFlags,
3755    // Reserved.
3756    pub _rsvd: [u64; 33],
3757    // Instruction pointer.
3758    pub pc: u64,
3759    // Processor state.
3760    pub cpsr: u64,
3761    // Control registers.
3762    pub sctlr_el1: u64,
3763    pub tcr_el1: u64,
3764    // Reserved.
3765    pub reserved_end: [u8; 3792],
3766}
3767
3768const _: () = assert!(size_of::<HvAarch64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3769
3770#[bitfield(u64)]
3771pub struct HvRegisterVsmWpWaitForTlbLock {
3772    pub wait: bool,
3773    #[bits(63)]
3774    _reserved: u64,
3775}
3776
3777#[bitfield(u64)]
3778pub struct HvRegisterVsmVpSecureVtlConfig {
3779    pub mbec_enabled: bool,
3780    pub tlb_locked: bool,
3781    pub supervisor_shadow_stack_enabled: bool,
3782    pub hardware_hvpt_enabled: bool,
3783    #[bits(60)]
3784    _reserved: u64,
3785}
3786
3787#[bitfield(u64)]
3788pub struct HvRegisterCrInterceptControl {
3789    pub cr0_write: bool,
3790    pub cr4_write: bool,
3791    pub xcr0_write: bool,
3792    pub ia32_misc_enable_read: bool,
3793    pub ia32_misc_enable_write: bool,
3794    pub msr_lstar_read: bool,
3795    pub msr_lstar_write: bool,
3796    pub msr_star_read: bool,
3797    pub msr_star_write: bool,
3798    pub msr_cstar_read: bool,
3799    pub msr_cstar_write: bool,
3800    pub apic_base_msr_read: bool,
3801    pub apic_base_msr_write: bool,
3802    pub msr_efer_read: bool,
3803    pub msr_efer_write: bool,
3804    pub gdtr_write: bool,
3805    pub idtr_write: bool,
3806    pub ldtr_write: bool,
3807    pub tr_write: bool,
3808    pub msr_sysenter_cs_write: bool,
3809    pub msr_sysenter_eip_write: bool,
3810    pub msr_sysenter_esp_write: bool,
3811    pub msr_sfmask_write: bool,
3812    pub msr_tsc_aux_write: bool,
3813    pub msr_sgx_launch_control_write: bool,
3814    pub msr_xss_write: bool,
3815    pub msr_scet_write: bool,
3816    pub msr_pls_ssp_write: bool,
3817    pub msr_interrupt_ssp_table_addr_write: bool,
3818    #[bits(35)]
3819    _rsvd_z: u64,
3820}