hvdef/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Microsoft hypervisor definitions.
5
6#![expect(missing_docs)]
7#![forbid(unsafe_code)]
8#![no_std]
9
10pub mod vbs;
11
12use bitfield_struct::bitfield;
13use core::fmt::Debug;
14use core::mem::size_of;
15use open_enum::open_enum;
16use static_assertions::const_assert;
17use zerocopy::FromBytes;
18use zerocopy::FromZeros;
19use zerocopy::Immutable;
20use zerocopy::IntoBytes;
21use zerocopy::KnownLayout;
22
23pub const HV_PAGE_SIZE: u64 = 4096;
24pub const HV_PAGE_SIZE_USIZE: usize = 4096;
25pub const HV_PAGE_SHIFT: u64 = 12;
26
27pub const HV_PARTITION_ID_SELF: u64 = u64::MAX;
28pub const HV_VP_INDEX_SELF: u32 = 0xfffffffe;
29
30pub const HV_CPUID_FUNCTION_VERSION_AND_FEATURES: u32 = 0x00000001;
31pub const HV_CPUID_FUNCTION_HV_VENDOR_AND_MAX_FUNCTION: u32 = 0x40000000;
32pub const HV_CPUID_FUNCTION_HV_INTERFACE: u32 = 0x40000001;
33pub const HV_CPUID_FUNCTION_MS_HV_VERSION: u32 = 0x40000002;
34pub const HV_CPUID_FUNCTION_MS_HV_FEATURES: u32 = 0x40000003;
35pub const HV_CPUID_FUNCTION_MS_HV_ENLIGHTENMENT_INFORMATION: u32 = 0x40000004;
36pub const HV_CPUID_FUNCTION_MS_HV_IMPLEMENTATION_LIMITS: u32 = 0x40000005;
37pub const HV_CPUID_FUNCTION_MS_HV_HARDWARE_FEATURES: u32 = 0x40000006;
38pub const HV_CPUID_FUNCTION_MS_HV_ISOLATION_CONFIGURATION: u32 = 0x4000000C;
39
40pub const VIRTUALIZATION_STACK_CPUID_VENDOR: u32 = 0x40000080;
41pub const VIRTUALIZATION_STACK_CPUID_INTERFACE: u32 = 0x40000081;
42pub const VIRTUALIZATION_STACK_CPUID_PROPERTIES: u32 = 0x40000082;
43
44/// The result of querying the VIRTUALIZATION_STACK_CPUID_PROPERTIES leaf.
45///
46/// The current partition is considered "portable": the virtualization stack may
47/// attempt to bring up the partition on another physical machine.
48pub const VS1_PARTITION_PROPERTIES_EAX_IS_PORTABLE: u32 = 0x000000001;
49/// The current partition has a synthetic debug device available to it.
50pub const VS1_PARTITION_PROPERTIES_EAX_DEBUG_DEVICE_PRESENT: u32 = 0x000000002;
51/// Extended I/O APIC RTEs are supported for the current partition.
52pub const VS1_PARTITION_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE: u32 = 0x000000004;
53/// Confidential VMBus is available.
54pub const VS1_PARTITION_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE: u32 = 0x000000008;
55
56/// SMCCC UID for the Microsoft Hypervisor.
57pub const VENDOR_HYP_UID_MS_HYPERVISOR: [u32; 4] = [0x4d32ba58, 0xcd244764, 0x8eef6c75, 0x16597024];
58
59#[bitfield(u64)]
60pub struct HvPartitionPrivilege {
61    // access to virtual msrs
62    pub access_vp_runtime_msr: bool,
63    pub access_partition_reference_counter: bool,
64    pub access_synic_msrs: bool,
65    pub access_synthetic_timer_msrs: bool,
66    pub access_apic_msrs: bool,
67    pub access_hypercall_msrs: bool,
68    pub access_vp_index: bool,
69    pub access_reset_msr: bool,
70    pub access_stats_msr: bool,
71    pub access_partition_reference_tsc: bool,
72    pub access_guest_idle_msr: bool,
73    pub access_frequency_msrs: bool,
74    pub access_debug_msrs: bool,
75    pub access_reenlightenment_ctrls: bool,
76    pub access_root_scheduler_msr: bool,
77    pub access_tsc_invariant_controls: bool,
78    _reserved1: u16,
79
80    // Access to hypercalls
81    pub create_partitions: bool,
82    pub access_partition_id: bool,
83    pub access_memory_pool: bool,
84    pub adjust_message_buffers: bool,
85    pub post_messages: bool,
86    pub signal_events: bool,
87    pub create_port: bool,
88    pub connect_port: bool,
89    pub access_stats: bool,
90    #[bits(2)]
91    _reserved2: u64,
92    pub debugging: bool,
93    pub cpu_management: bool,
94    pub configure_profiler: bool,
95    pub access_vp_exit_tracing: bool,
96    pub enable_extended_gva_ranges_flush_va_list: bool,
97    pub access_vsm: bool,
98    pub access_vp_registers: bool,
99    _unused_bit: bool,
100    pub fast_hypercall_output: bool,
101    pub enable_extended_hypercalls: bool,
102    pub start_virtual_processor: bool,
103    pub isolation: bool,
104    #[bits(9)]
105    _reserved3: u64,
106}
107
108open_enum! {
109    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
110    pub enum HvPartitionIsolationType: u8 {
111        NONE = 0,
112        VBS = 1,
113        SNP = 2,
114        TDX = 3,
115    }
116}
117
118#[bitfield(u128)]
119#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
120pub struct HvFeatures {
121    #[bits(64)]
122    pub privileges: HvPartitionPrivilege,
123
124    #[bits(4)]
125    pub max_supported_cstate: u32,
126    pub hpet_needed_for_c3_power_state_deprecated: bool,
127    pub invariant_mperf_available: bool,
128    pub supervisor_shadow_stack_available: bool,
129    pub arch_pmu_available: bool,
130    pub exception_trap_intercept_available: bool,
131    #[bits(23)]
132    reserved: u32,
133
134    pub mwait_available_deprecated: bool,
135    pub guest_debugging_available: bool,
136    pub performance_monitors_available: bool,
137    pub cpu_dynamic_partitioning_available: bool,
138    pub xmm_registers_for_fast_hypercall_available: bool,
139    pub guest_idle_available: bool,
140    pub hypervisor_sleep_state_support_available: bool,
141    pub numa_distance_query_available: bool,
142    pub frequency_regs_available: bool,
143    pub synthetic_machine_check_available: bool,
144    pub guest_crash_regs_available: bool,
145    pub debug_regs_available: bool,
146    pub npiep1_available: bool,
147    pub disable_hypervisor_available: bool,
148    pub extended_gva_ranges_for_flush_virtual_address_list_available: bool,
149    pub fast_hypercall_output_available: bool,
150    pub svm_features_available: bool,
151    pub sint_polling_mode_available: bool,
152    pub hypercall_msr_lock_available: bool,
153    pub direct_synthetic_timers: bool,
154    pub register_pat_available: bool,
155    pub register_bndcfgs_available: bool,
156    pub watchdog_timer_available: bool,
157    pub synthetic_time_unhalted_timer_available: bool,
158    pub device_domains_available: bool,    // HDK only.
159    pub s1_device_domains_available: bool, // HDK only.
160    pub lbr_available: bool,
161    pub ipt_available: bool,
162    pub cross_vtl_flush_available: bool,
163    pub idle_spec_ctrl_available: bool,
164    pub translate_gva_flags_available: bool,
165    pub apic_eoi_intercept_available: bool,
166}
167
168impl HvFeatures {
169    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
170        zerocopy::transmute!(cpuid)
171    }
172
173    pub fn into_cpuid(self) -> [u32; 4] {
174        zerocopy::transmute!(self)
175    }
176}
177
178#[bitfield(u128)]
179#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
180pub struct HvEnlightenmentInformation {
181    pub use_hypercall_for_address_space_switch: bool,
182    pub use_hypercall_for_local_flush: bool,
183    pub use_hypercall_for_remote_flush_and_local_flush_entire: bool,
184    pub use_apic_msrs: bool,
185    pub use_hv_register_for_reset: bool,
186    pub use_relaxed_timing: bool,
187    pub use_dma_remapping_deprecated: bool,
188    pub use_interrupt_remapping_deprecated: bool,
189    pub use_x2_apic_msrs: bool,
190    pub deprecate_auto_eoi: bool,
191    pub use_synthetic_cluster_ipi: bool,
192    pub use_ex_processor_masks: bool,
193    pub nested: bool,
194    pub use_int_for_mbec_system_calls: bool,
195    pub use_vmcs_enlightenments: bool,
196    pub use_synced_timeline: bool,
197    pub core_scheduler_requested: bool,
198    pub use_direct_local_flush_entire: bool,
199    pub no_non_architectural_core_sharing: bool,
200    pub use_x2_apic: bool,
201    pub restore_time_on_resume: bool,
202    pub use_hypercall_for_mmio_access: bool,
203    pub use_gpa_pinning_hypercall: bool,
204    pub wake_vps: bool,
205    _reserved: u8,
206    pub long_spin_wait_count: u32,
207    #[bits(7)]
208    pub implemented_physical_address_bits: u32,
209    #[bits(25)]
210    _reserved1: u32,
211    _reserved2: u32,
212}
213
214impl HvEnlightenmentInformation {
215    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
216        zerocopy::transmute!(cpuid)
217    }
218
219    pub fn into_cpuid(self) -> [u32; 4] {
220        zerocopy::transmute!(self)
221    }
222}
223
224#[bitfield(u128)]
225pub struct HvHardwareFeatures {
226    pub apic_overlay_assist_in_use: bool,
227    pub msr_bitmaps_in_use: bool,
228    pub architectural_performance_counters_in_use: bool,
229    pub second_level_address_translation_in_use: bool,
230    pub dma_remapping_in_use: bool,
231    pub interrupt_remapping_in_use: bool,
232    pub memory_patrol_scrubber_present: bool,
233    pub dma_protection_in_use: bool,
234    pub hpet_requested: bool,
235    pub synthetic_timers_volatile: bool,
236    #[bits(4)]
237    pub hypervisor_level: u32,
238    pub physical_destination_mode_required: bool,
239    pub use_vmfunc_for_alias_map_switch: bool,
240    pub hv_register_for_memory_zeroing_supported: bool,
241    pub unrestricted_guest_supported: bool,
242    pub rdt_afeatures_supported: bool,
243    pub rdt_mfeatures_supported: bool,
244    pub child_perfmon_pmu_supported: bool,
245    pub child_perfmon_lbr_supported: bool,
246    pub child_perfmon_ipt_supported: bool,
247    pub apic_emulation_supported: bool,
248    pub child_x2_apic_recommended: bool,
249    pub hardware_watchdog_reserved: bool,
250    pub device_access_tracking_supported: bool,
251    pub hardware_gpa_access_tracking_supported: bool,
252    #[bits(4)]
253    _reserved: u32,
254
255    pub device_domain_input_width: u8,
256    #[bits(24)]
257    _reserved1: u32,
258    _reserved2: u32,
259    _reserved3: u32,
260}
261
262#[bitfield(u128)]
263pub struct HvIsolationConfiguration {
264    pub paravisor_present: bool,
265    #[bits(31)]
266    pub _reserved0: u32,
267
268    #[bits(4)]
269    pub isolation_type: u8,
270    _reserved11: bool,
271    pub shared_gpa_boundary_active: bool,
272    #[bits(6)]
273    pub shared_gpa_boundary_bits: u8,
274    #[bits(20)]
275    _reserved12: u32,
276    _reserved2: u32,
277    _reserved3: u32,
278}
279
280open_enum! {
281    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
282    pub enum HypercallCode: u16 {
283        #![expect(non_upper_case_globals)]
284
285        HvCallSwitchVirtualAddressSpace = 0x0001,
286        HvCallFlushVirtualAddressSpace = 0x0002,
287        HvCallFlushVirtualAddressList = 0x0003,
288        HvCallNotifyLongSpinWait = 0x0008,
289        HvCallInvokeHypervisorDebugger = 0x000a,
290        HvCallSendSyntheticClusterIpi = 0x000b,
291        HvCallModifyVtlProtectionMask = 0x000c,
292        HvCallEnablePartitionVtl = 0x000d,
293        HvCallEnableVpVtl = 0x000f,
294        HvCallVtlCall = 0x0011,
295        HvCallVtlReturn = 0x0012,
296        HvCallFlushVirtualAddressSpaceEx = 0x0013,
297        HvCallFlushVirtualAddressListEx = 0x0014,
298        HvCallSendSyntheticClusterIpiEx = 0x0015,
299        HvCallInstallIntercept = 0x004d,
300        HvCallGetVpRegisters = 0x0050,
301        HvCallSetVpRegisters = 0x0051,
302        HvCallTranslateVirtualAddress = 0x0052,
303        HvCallPostMessage = 0x005C,
304        HvCallSignalEvent = 0x005D,
305        HvCallOutputDebugCharacter = 0x0071,
306        HvCallGetSystemProperty = 0x007b,
307        HvCallRetargetDeviceInterrupt = 0x007e,
308        HvCallNotifyPartitionEvent = 0x0087,
309        HvCallAssertVirtualInterrupt = 0x0094,
310        HvCallStartVirtualProcessor = 0x0099,
311        HvCallGetVpIndexFromApicId = 0x009A,
312        HvCallTranslateVirtualAddressEx = 0x00AC,
313        HvCallCheckForIoIntercept = 0x00ad,
314        HvCallFlushGuestPhysicalAddressSpace = 0x00AF,
315        HvCallFlushGuestPhysicalAddressList = 0x00B0,
316        HvCallSignalEventDirect = 0x00C0,
317        HvCallPostMessageDirect = 0x00C1,
318        HvCallCheckSparseGpaPageVtlAccess = 0x00D4,
319        HvCallAcceptGpaPages = 0x00D9,
320        HvCallModifySparseGpaPageHostVisibility = 0x00DB,
321        HvCallMemoryMappedIoRead = 0x0106,
322        HvCallMemoryMappedIoWrite = 0x0107,
323        HvCallPinGpaPageRanges = 0x0112,
324        HvCallUnpinGpaPageRanges = 0x0113,
325        HvCallQuerySparseGpaPageHostVisibility = 0x011C,
326
327        // Extended hypercalls.
328        HvExtCallQueryCapabilities = 0x8001,
329
330        // VBS guest calls.
331        HvCallVbsVmCallReport = 0xC001,
332    }
333}
334
335pub const HV_X64_MSR_GUEST_OS_ID: u32 = 0x40000000;
336pub const HV_X64_MSR_HYPERCALL: u32 = 0x40000001;
337pub const HV_X64_MSR_VP_INDEX: u32 = 0x40000002;
338pub const HV_X64_MSR_TIME_REF_COUNT: u32 = 0x40000020;
339pub const HV_X64_MSR_REFERENCE_TSC: u32 = 0x40000021;
340pub const HV_X64_MSR_TSC_FREQUENCY: u32 = 0x40000022;
341pub const HV_X64_MSR_APIC_FREQUENCY: u32 = 0x40000023;
342pub const HV_X64_MSR_EOI: u32 = 0x40000070;
343pub const HV_X64_MSR_ICR: u32 = 0x40000071;
344pub const HV_X64_MSR_TPR: u32 = 0x40000072;
345pub const HV_X64_MSR_VP_ASSIST_PAGE: u32 = 0x40000073;
346pub const HV_X64_MSR_SCONTROL: u32 = 0x40000080;
347pub const HV_X64_MSR_SVERSION: u32 = 0x40000081;
348pub const HV_X64_MSR_SIEFP: u32 = 0x40000082;
349pub const HV_X64_MSR_SIMP: u32 = 0x40000083;
350pub const HV_X64_MSR_EOM: u32 = 0x40000084;
351pub const HV_X64_MSR_SINT0: u32 = 0x40000090;
352pub const HV_X64_MSR_SINT1: u32 = 0x40000091;
353pub const HV_X64_MSR_SINT2: u32 = 0x40000092;
354pub const HV_X64_MSR_SINT3: u32 = 0x40000093;
355pub const HV_X64_MSR_SINT4: u32 = 0x40000094;
356pub const HV_X64_MSR_SINT5: u32 = 0x40000095;
357pub const HV_X64_MSR_SINT6: u32 = 0x40000096;
358pub const HV_X64_MSR_SINT7: u32 = 0x40000097;
359pub const HV_X64_MSR_SINT8: u32 = 0x40000098;
360pub const HV_X64_MSR_SINT9: u32 = 0x40000099;
361pub const HV_X64_MSR_SINT10: u32 = 0x4000009a;
362pub const HV_X64_MSR_SINT11: u32 = 0x4000009b;
363pub const HV_X64_MSR_SINT12: u32 = 0x4000009c;
364pub const HV_X64_MSR_SINT13: u32 = 0x4000009d;
365pub const HV_X64_MSR_SINT14: u32 = 0x4000009e;
366pub const HV_X64_MSR_SINT15: u32 = 0x4000009f;
367pub const HV_X64_MSR_STIMER0_CONFIG: u32 = 0x400000b0;
368pub const HV_X64_MSR_STIMER0_COUNT: u32 = 0x400000b1;
369pub const HV_X64_MSR_STIMER1_CONFIG: u32 = 0x400000b2;
370pub const HV_X64_MSR_STIMER1_COUNT: u32 = 0x400000b3;
371pub const HV_X64_MSR_STIMER2_CONFIG: u32 = 0x400000b4;
372pub const HV_X64_MSR_STIMER2_COUNT: u32 = 0x400000b5;
373pub const HV_X64_MSR_STIMER3_CONFIG: u32 = 0x400000b6;
374pub const HV_X64_MSR_STIMER3_COUNT: u32 = 0x400000b7;
375pub const HV_X64_MSR_GUEST_IDLE: u32 = 0x400000F0;
376pub const HV_X64_MSR_GUEST_CRASH_P0: u32 = 0x40000100;
377pub const HV_X64_MSR_GUEST_CRASH_P1: u32 = 0x40000101;
378pub const HV_X64_MSR_GUEST_CRASH_P2: u32 = 0x40000102;
379pub const HV_X64_MSR_GUEST_CRASH_P3: u32 = 0x40000103;
380pub const HV_X64_MSR_GUEST_CRASH_P4: u32 = 0x40000104;
381pub const HV_X64_MSR_GUEST_CRASH_CTL: u32 = 0x40000105;
382
383pub const HV_X64_GUEST_CRASH_PARAMETER_MSRS: usize = 5;
384
385/// A hypervisor status code.
386///
387/// The non-success status codes are defined in [`HvError`].
388#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
389#[repr(transparent)]
390pub struct HvStatus(pub u16);
391
392impl HvStatus {
393    /// The success status code.
394    pub const SUCCESS: Self = Self(0);
395
396    /// Returns `Ok(())` if this is `HvStatus::SUCCESS`, otherwise returns an
397    /// `Err(err)` where `err` is the corresponding `HvError`.
398    pub fn result(self) -> HvResult<()> {
399        if let Ok(err) = self.0.try_into() {
400            Err(HvError(err))
401        } else {
402            Ok(())
403        }
404    }
405
406    /// Returns true if this is `HvStatus::SUCCESS`.
407    pub fn is_ok(self) -> bool {
408        self == Self::SUCCESS
409    }
410
411    /// Returns true if this is not `HvStatus::SUCCESS`.
412    pub fn is_err(self) -> bool {
413        self != Self::SUCCESS
414    }
415
416    const fn from_bits(bits: u16) -> Self {
417        Self(bits)
418    }
419
420    const fn into_bits(self) -> u16 {
421        self.0
422    }
423}
424
425impl From<Result<(), HvError>> for HvStatus {
426    fn from(err: Result<(), HvError>) -> Self {
427        err.err().map_or(Self::SUCCESS, |err| Self(err.0.get()))
428    }
429}
430
431impl Debug for HvStatus {
432    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
433        match self.result() {
434            Ok(()) => f.write_str("Success"),
435            Err(err) => Debug::fmt(&err, f),
436        }
437    }
438}
439
440/// An [`HvStatus`] value representing an error.
441//
442// DEVNOTE: use `NonZeroU16` to get a niche optimization, since 0 is reserved
443// for success.
444#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout)]
445#[repr(transparent)]
446pub struct HvError(core::num::NonZeroU16);
447
448impl From<core::num::NonZeroU16> for HvError {
449    fn from(err: core::num::NonZeroU16) -> Self {
450        Self(err)
451    }
452}
453
454impl Debug for HvError {
455    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
456        match self.debug_name() {
457            Some(name) => f.pad(name),
458            None => Debug::fmt(&self.0.get(), f),
459        }
460    }
461}
462
463impl core::fmt::Display for HvError {
464    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
465        match self.doc_str() {
466            Some(s) => f.write_str(s),
467            None => write!(f, "Hypervisor error {:#06x}", self.0),
468        }
469    }
470}
471
472impl core::error::Error for HvError {}
473
474macro_rules! hv_error {
475    ($ty:ty, $(#[doc = $doc:expr] $ident:ident = $val:expr),* $(,)?) => {
476
477        #[expect(non_upper_case_globals)]
478        impl $ty {
479            $(
480                #[doc = $doc]
481                pub const $ident: Self = Self(core::num::NonZeroU16::new($val).unwrap());
482            )*
483
484            fn debug_name(&self) -> Option<&'static str> {
485                Some(match self.0.get() {
486                    $(
487                        $val => stringify!($ident),
488                    )*
489                    _ => return None,
490                })
491            }
492
493            fn doc_str(&self) -> Option<&'static str> {
494                Some(match self.0.get() {
495                    $(
496                        $val => const { $doc.trim_ascii() },
497                    )*
498                    _ => return None,
499                })
500            }
501        }
502    };
503}
504
505// DEVNOTE: the doc comments here are also used as the runtime error strings.
506hv_error! {
507    HvError,
508    /// Invalid hypercall code
509    InvalidHypercallCode = 0x0002,
510    /// Invalid hypercall input
511    InvalidHypercallInput = 0x0003,
512    /// Invalid alignment
513    InvalidAlignment = 0x0004,
514    /// Invalid parameter
515    InvalidParameter = 0x0005,
516    /// Access denied
517    AccessDenied = 0x0006,
518    /// Invalid partition state
519    InvalidPartitionState = 0x0007,
520    /// Operation denied
521    OperationDenied = 0x0008,
522    /// Unknown property
523    UnknownProperty = 0x0009,
524    /// Property value out of range
525    PropertyValueOutOfRange = 0x000A,
526    /// Insufficient memory
527    InsufficientMemory = 0x000B,
528    /// Partition too deep
529    PartitionTooDeep = 0x000C,
530    /// Invalid partition ID
531    InvalidPartitionId = 0x000D,
532    /// Invalid VP index
533    InvalidVpIndex = 0x000E,
534    /// Not found
535    NotFound = 0x0010,
536    /// Invalid port ID
537    InvalidPortId = 0x0011,
538    /// Invalid connection ID
539    InvalidConnectionId = 0x0012,
540    /// Insufficient buffers
541    InsufficientBuffers = 0x0013,
542    /// Not acknowledged
543    NotAcknowledged = 0x0014,
544    /// Invalid VP state
545    InvalidVpState = 0x0015,
546    /// Acknowledged
547    Acknowledged = 0x0016,
548    /// Invalid save restore state
549    InvalidSaveRestoreState = 0x0017,
550    /// Invalid SynIC state
551    InvalidSynicState = 0x0018,
552    /// Object in use
553    ObjectInUse = 0x0019,
554    /// Invalid proximity domain info
555    InvalidProximityDomainInfo = 0x001A,
556    /// No data
557    NoData = 0x001B,
558    /// Inactive
559    Inactive = 0x001C,
560    /// No resources
561    NoResources = 0x001D,
562    /// Feature unavailable
563    FeatureUnavailable = 0x001E,
564    /// Partial packet
565    PartialPacket = 0x001F,
566    /// Processor feature not supported
567    ProcessorFeatureNotSupported = 0x0020,
568    /// Processor cache line flush size incompatible
569    ProcessorCacheLineFlushSizeIncompatible = 0x0030,
570    /// Insufficient buffer
571    InsufficientBuffer = 0x0033,
572    /// Incompatible processor
573    IncompatibleProcessor = 0x0037,
574    /// Insufficient device domains
575    InsufficientDeviceDomains = 0x0038,
576    /// CPUID feature validation error
577    CpuidFeatureValidationError = 0x003C,
578    /// CPUID XSAVE feature validation error
579    CpuidXsaveFeatureValidationError = 0x003D,
580    /// Processor startup timeout
581    ProcessorStartupTimeout = 0x003E,
582    /// SMX enabled
583    SmxEnabled = 0x003F,
584    /// Invalid LP index
585    InvalidLpIndex = 0x0041,
586    /// Invalid register value
587    InvalidRegisterValue = 0x0050,
588    /// Invalid VTL state
589    InvalidVtlState = 0x0051,
590    /// NX not detected
591    NxNotDetected = 0x0055,
592    /// Invalid device ID
593    InvalidDeviceId = 0x0057,
594    /// Invalid device state
595    InvalidDeviceState = 0x0058,
596    /// Pending page requests
597    PendingPageRequests = 0x0059,
598    /// Page request invalid
599    PageRequestInvalid = 0x0060,
600    /// Key already exists
601    KeyAlreadyExists = 0x0065,
602    /// Device already in domain
603    DeviceAlreadyInDomain = 0x0066,
604    /// Invalid CPU group ID
605    InvalidCpuGroupId = 0x006F,
606    /// Invalid CPU group state
607    InvalidCpuGroupState = 0x0070,
608    /// Operation failed
609    OperationFailed = 0x0071,
610    /// Not allowed with nested virtualization active
611    NotAllowedWithNestedVirtActive = 0x0072,
612    /// Insufficient root memory
613    InsufficientRootMemory = 0x0073,
614    /// Event buffer already freed
615    EventBufferAlreadyFreed = 0x0074,
616    /// The specified timeout expired before the operation completed.
617    Timeout = 0x0078,
618    /// The VTL specified for the operation is already in an enabled state.
619    VtlAlreadyEnabled = 0x0086,
620    /// Unknown register name
621    UnknownRegisterName = 0x0087,
622}
623
624/// A useful result type for hypervisor operations.
625pub type HvResult<T> = Result<T, HvError>;
626
627#[repr(u8)]
628#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
629pub enum Vtl {
630    Vtl0 = 0,
631    Vtl1 = 1,
632    Vtl2 = 2,
633}
634
635impl TryFrom<u8> for Vtl {
636    type Error = HvError;
637
638    fn try_from(value: u8) -> Result<Self, Self::Error> {
639        Ok(match value {
640            0 => Self::Vtl0,
641            1 => Self::Vtl1,
642            2 => Self::Vtl2,
643            _ => return Err(HvError::InvalidParameter),
644        })
645    }
646}
647
648impl From<Vtl> for u8 {
649    fn from(value: Vtl) -> Self {
650        value as u8
651    }
652}
653
654/// The contents of `HV_X64_MSR_GUEST_CRASH_CTL`
655#[bitfield(u64)]
656pub struct GuestCrashCtl {
657    #[bits(58)]
658    _reserved: u64,
659    // ID of the pre-OS environment
660    #[bits(3)]
661    pub pre_os_id: u8,
662    // Crash dump will not be captured
663    #[bits(1)]
664    pub no_crash_dump: bool,
665    // `HV_X64_MSR_GUEST_CRASH_P3` is the GPA of the message,
666    // `HV_X64_MSR_GUEST_CRASH_P4` is its length in bytes
667    #[bits(1)]
668    pub crash_message: bool,
669    // Log contents of crash parameter system registers
670    #[bits(1)]
671    pub crash_notify: bool,
672}
673
674#[repr(C, align(16))]
675#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
676pub struct AlignedU128([u8; 16]);
677
678impl AlignedU128 {
679    pub fn as_ne_bytes(&self) -> [u8; 16] {
680        self.0
681    }
682
683    pub fn from_ne_bytes(val: [u8; 16]) -> Self {
684        Self(val)
685    }
686}
687
688impl Debug for AlignedU128 {
689    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
690        Debug::fmt(&u128::from_ne_bytes(self.0), f)
691    }
692}
693
694impl From<u128> for AlignedU128 {
695    fn from(v: u128) -> Self {
696        Self(v.to_ne_bytes())
697    }
698}
699
700impl From<u64> for AlignedU128 {
701    fn from(v: u64) -> Self {
702        (v as u128).into()
703    }
704}
705
706impl From<u32> for AlignedU128 {
707    fn from(v: u32) -> Self {
708        (v as u128).into()
709    }
710}
711
712impl From<u16> for AlignedU128 {
713    fn from(v: u16) -> Self {
714        (v as u128).into()
715    }
716}
717
718impl From<u8> for AlignedU128 {
719    fn from(v: u8) -> Self {
720        (v as u128).into()
721    }
722}
723
724impl From<AlignedU128> for u128 {
725    fn from(v: AlignedU128) -> Self {
726        u128::from_ne_bytes(v.0)
727    }
728}
729
730open_enum! {
731    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
732    pub enum HvMessageType: u32 {
733        #![expect(non_upper_case_globals)]
734
735        HvMessageTypeNone = 0x00000000,
736
737        HvMessageTypeUnmappedGpa = 0x80000000,
738        HvMessageTypeGpaIntercept = 0x80000001,
739        HvMessageTypeUnacceptedGpa = 0x80000003,
740        HvMessageTypeGpaAttributeIntercept = 0x80000004,
741        HvMessageTypeEnablePartitionVtlIntercept = 0x80000005,
742        HvMessageTypeTimerExpired = 0x80000010,
743        HvMessageTypeInvalidVpRegisterValue = 0x80000020,
744        HvMessageTypeUnrecoverableException = 0x80000021,
745        HvMessageTypeUnsupportedFeature = 0x80000022,
746        HvMessageTypeTlbPageSizeMismatch = 0x80000023,
747        HvMessageTypeIommuFault = 0x80000024,
748        HvMessageTypeEventLogBufferComplete = 0x80000040,
749        HvMessageTypeHypercallIntercept = 0x80000050,
750        HvMessageTypeSynicEventIntercept = 0x80000060,
751        HvMessageTypeSynicSintIntercept = 0x80000061,
752        HvMessageTypeSynicSintDeliverable = 0x80000062,
753        HvMessageTypeAsyncCallCompletion = 0x80000070,
754        HvMessageTypeX64IoPortIntercept = 0x80010000,
755        HvMessageTypeMsrIntercept = 0x80010001,
756        HvMessageTypeX64CpuidIntercept = 0x80010002,
757        HvMessageTypeExceptionIntercept = 0x80010003,
758        HvMessageTypeX64ApicEoi = 0x80010004,
759        HvMessageTypeX64IommuPrq = 0x80010005,
760        HvMessageTypeRegisterIntercept = 0x80010006,
761        HvMessageTypeX64Halt = 0x80010007,
762        HvMessageTypeX64InterruptionDeliverable = 0x80010008,
763        HvMessageTypeX64SipiIntercept = 0x80010009,
764        HvMessageTypeX64RdtscIntercept = 0x8001000a,
765        HvMessageTypeX64ApicSmiIntercept = 0x8001000b,
766        HvMessageTypeArm64ResetIntercept = 0x8001000c,
767        HvMessageTypeX64ApicInitSipiIntercept = 0x8001000d,
768        HvMessageTypeX64ApicWriteIntercept = 0x8001000e,
769        HvMessageTypeX64ProxyInterruptIntercept = 0x8001000f,
770        HvMessageTypeX64IsolationCtrlRegIntercept = 0x80010010,
771        HvMessageTypeX64SnpGuestRequestIntercept = 0x80010011,
772        HvMessageTypeX64ExceptionTrapIntercept = 0x80010012,
773        HvMessageTypeX64SevVmgexitIntercept = 0x80010013,
774    }
775}
776
777impl Default for HvMessageType {
778    fn default() -> Self {
779        HvMessageType::HvMessageTypeNone
780    }
781}
782
783pub const HV_SYNIC_INTERCEPTION_SINT_INDEX: u8 = 0;
784
785pub const NUM_SINTS: usize = 16;
786pub const NUM_TIMERS: usize = 4;
787
788#[repr(C)]
789#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
790pub struct HvMessageHeader {
791    pub typ: HvMessageType,
792    pub len: u8,
793    pub flags: HvMessageFlags,
794    pub rsvd: u16,
795    pub id: u64,
796}
797
798#[bitfield(u8)]
799#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
800pub struct HvMessageFlags {
801    pub message_pending: bool,
802    #[bits(7)]
803    _reserved: u8,
804}
805
806pub const HV_MESSAGE_SIZE: usize = size_of::<HvMessage>();
807const_assert!(HV_MESSAGE_SIZE == 256);
808pub const HV_MESSAGE_PAYLOAD_SIZE: usize = 240;
809
810#[repr(C, align(16))]
811#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
812pub struct HvMessage {
813    pub header: HvMessageHeader,
814    pub payload_buffer: [u8; HV_MESSAGE_PAYLOAD_SIZE],
815}
816
817impl Default for HvMessage {
818    fn default() -> Self {
819        Self {
820            header: FromZeros::new_zeroed(),
821            payload_buffer: [0; 240],
822        }
823    }
824}
825
826impl HvMessage {
827    /// Constructs a new message. `payload` must fit into the payload field (240
828    /// bytes limit).
829    pub fn new(typ: HvMessageType, id: u64, payload: &[u8]) -> Self {
830        let mut msg = HvMessage {
831            header: HvMessageHeader {
832                typ,
833                len: payload.len() as u8,
834                flags: HvMessageFlags::new(),
835                rsvd: 0,
836                id,
837            },
838            payload_buffer: [0; 240],
839        };
840        msg.payload_buffer[..payload.len()].copy_from_slice(payload);
841        msg
842    }
843
844    pub fn payload(&self) -> &[u8] {
845        &self.payload_buffer[..self.header.len as usize]
846    }
847
848    pub fn as_message<T: MessagePayload>(&self) -> &T {
849        // Ensure invariants are met.
850        let () = T::CHECK;
851        T::ref_from_prefix(&self.payload_buffer).unwrap().0
852    }
853
854    pub fn as_message_mut<T: MessagePayload>(&mut self) -> &T {
855        // Ensure invariants are met.
856        let () = T::CHECK;
857        T::mut_from_prefix(&mut self.payload_buffer).unwrap().0
858    }
859}
860
861pub trait MessagePayload: KnownLayout + Immutable + IntoBytes + FromBytes + Sized {
862    /// Used to ensure this trait is only implemented on messages of the proper
863    /// size and alignment.
864    #[doc(hidden)]
865    const CHECK: () = {
866        assert!(size_of::<Self>() <= HV_MESSAGE_PAYLOAD_SIZE);
867        assert!(align_of::<Self>() <= align_of::<HvMessage>());
868    };
869}
870
871#[repr(C)]
872#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
873pub struct TimerMessagePayload {
874    pub timer_index: u32,
875    pub reserved: u32,
876    pub expiration_time: u64,
877    pub delivery_time: u64,
878}
879
880pub mod hypercall {
881    use super::*;
882    use core::ops::RangeInclusive;
883    use zerocopy::Unalign;
884
885    /// The hypercall input value.
886    #[bitfield(u64)]
887    pub struct Control {
888        /// The hypercall code.
889        pub code: u16,
890        /// If this hypercall is a fast hypercall.
891        pub fast: bool,
892        /// The variable header size, in qwords.
893        #[bits(10)]
894        pub variable_header_size: usize,
895        #[bits(4)]
896        _rsvd0: u8,
897        /// Specifies that the hypercall should be handled by the L0 hypervisor in a nested environment.
898        pub nested: bool,
899        /// The element count for rep hypercalls.
900        #[bits(12)]
901        pub rep_count: usize,
902        #[bits(4)]
903        _rsvd1: u8,
904        /// The first element to start processing in a rep hypercall.
905        #[bits(12)]
906        pub rep_start: usize,
907        #[bits(4)]
908        _rsvd2: u8,
909    }
910
911    /// The hypercall output value returned to the guest.
912    #[bitfield(u64)]
913    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
914    #[must_use]
915    pub struct HypercallOutput {
916        #[bits(16)]
917        pub call_status: HvStatus,
918        pub rsvd: u16,
919        #[bits(12)]
920        pub elements_processed: usize,
921        #[bits(20)]
922        pub rsvd2: u32,
923    }
924
925    impl From<HvError> for HypercallOutput {
926        fn from(e: HvError) -> Self {
927            Self::new().with_call_status(Err(e).into())
928        }
929    }
930
931    impl HypercallOutput {
932        /// A success output with zero elements processed.
933        pub const SUCCESS: Self = Self::new();
934
935        pub fn result(&self) -> Result<(), HvError> {
936            self.call_status().result()
937        }
938    }
939
940    #[repr(C)]
941    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
942    pub struct HvRegisterAssoc {
943        pub name: HvRegisterName,
944        pub pad: [u32; 3],
945        pub value: HvRegisterValue,
946    }
947
948    impl<N: Into<HvRegisterName>, T: Into<HvRegisterValue>> From<(N, T)> for HvRegisterAssoc {
949        fn from((name, value): (N, T)) -> Self {
950            Self {
951                name: name.into(),
952                pad: [0; 3],
953                value: value.into(),
954            }
955        }
956    }
957
958    impl<N: Copy + Into<HvRegisterName>, T: Copy + Into<HvRegisterValue>> From<&(N, T)>
959        for HvRegisterAssoc
960    {
961        fn from(&(name, value): &(N, T)) -> Self {
962            Self {
963                name: name.into(),
964                pad: [0; 3],
965                value: value.into(),
966            }
967        }
968    }
969
970    #[bitfield(u64)]
971    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
972    pub struct MsrHypercallContents {
973        pub enable: bool,
974        pub locked: bool,
975        #[bits(10)]
976        pub reserved_p: u64,
977        #[bits(52)]
978        pub gpn: u64,
979    }
980
981    #[repr(C, align(8))]
982    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
983    pub struct PostMessage {
984        pub connection_id: u32,
985        pub padding: u32,
986        pub message_type: u32,
987        pub payload_size: u32,
988        pub payload: [u8; 240],
989    }
990
991    #[repr(C, align(8))]
992    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
993    pub struct SignalEvent {
994        pub connection_id: u32,
995        pub flag_number: u16,
996        pub rsvd: u16,
997    }
998
999    #[repr(C)]
1000    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1001    pub struct PostMessageDirect {
1002        pub partition_id: u64,
1003        pub vp_index: u32,
1004        pub vtl: u8,
1005        pub padding0: [u8; 3],
1006        pub sint: u8,
1007        pub padding1: [u8; 3],
1008        pub message: Unalign<HvMessage>,
1009        pub padding2: u32,
1010    }
1011
1012    #[repr(C)]
1013    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1014    pub struct SignalEventDirect {
1015        pub target_partition: u64,
1016        pub target_vp: u32,
1017        pub target_vtl: u8,
1018        pub target_sint: u8,
1019        pub flag_number: u16,
1020    }
1021
1022    #[repr(C)]
1023    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1024    pub struct SignalEventDirectOutput {
1025        pub newly_signaled: u8,
1026        pub rsvd: [u8; 7],
1027    }
1028
1029    #[repr(C)]
1030    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1031    pub struct InterruptEntry {
1032        pub source: HvInterruptSource,
1033        pub rsvd: u32,
1034        pub data: [u32; 2],
1035    }
1036
1037    open_enum! {
1038        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1039        pub enum HvInterruptSource: u32 {
1040            MSI = 1,
1041            IO_APIC = 2,
1042        }
1043    }
1044
1045    #[repr(C)]
1046    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1047    pub struct InterruptTarget {
1048        pub vector: u32,
1049        pub flags: HvInterruptTargetFlags,
1050        pub mask_or_format: u64,
1051    }
1052
1053    #[bitfield(u32)]
1054    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1055    pub struct HvInterruptTargetFlags {
1056        pub multicast: bool,
1057        pub processor_set: bool,
1058        pub proxy_redirect: bool,
1059        #[bits(29)]
1060        pub reserved: u32,
1061    }
1062
1063    pub const HV_DEVICE_INTERRUPT_TARGET_MULTICAST: u32 = 1;
1064    pub const HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET: u32 = 2;
1065    pub const HV_DEVICE_INTERRUPT_TARGET_PROXY_REDIRECT: u32 = 4;
1066
1067    pub const HV_GENERIC_SET_SPARSE_4K: u64 = 0;
1068    pub const HV_GENERIC_SET_ALL: u64 = 1;
1069
1070    #[repr(C)]
1071    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1072    pub struct RetargetDeviceInterrupt {
1073        pub partition_id: u64,
1074        pub device_id: u64,
1075        pub entry: InterruptEntry,
1076        pub rsvd: u64,
1077        pub target_header: InterruptTarget,
1078    }
1079
1080    #[bitfield(u8)]
1081    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1082    pub struct HvInputVtl {
1083        #[bits(4)]
1084        pub target_vtl_value: u8,
1085        pub use_target_vtl: bool,
1086        #[bits(3)]
1087        pub reserved: u8,
1088    }
1089
1090    impl From<Vtl> for HvInputVtl {
1091        fn from(value: Vtl) -> Self {
1092            Self::from(Some(value))
1093        }
1094    }
1095
1096    impl From<Option<Vtl>> for HvInputVtl {
1097        fn from(value: Option<Vtl>) -> Self {
1098            Self::new()
1099                .with_use_target_vtl(value.is_some())
1100                .with_target_vtl_value(value.map_or(0, Into::into))
1101        }
1102    }
1103
1104    impl HvInputVtl {
1105        /// None = target current vtl
1106        pub fn target_vtl(&self) -> Result<Option<Vtl>, HvError> {
1107            if self.reserved() != 0 {
1108                return Err(HvError::InvalidParameter);
1109            }
1110            if self.use_target_vtl() {
1111                Ok(Some(self.target_vtl_value().try_into()?))
1112            } else {
1113                Ok(None)
1114            }
1115        }
1116
1117        pub const CURRENT_VTL: Self = Self::new();
1118    }
1119
1120    #[repr(C)]
1121    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1122    pub struct GetSetVpRegisters {
1123        pub partition_id: u64,
1124        pub vp_index: u32,
1125        pub target_vtl: HvInputVtl,
1126        pub rsvd: [u8; 3],
1127    }
1128
1129    open_enum::open_enum! {
1130        #[derive(Default)]
1131        pub enum HvGuestOsMicrosoftIds: u8 {
1132            UNDEFINED = 0x00,
1133            MSDOS = 0x01,
1134            WINDOWS_3X = 0x02,
1135            WINDOWS_9X = 0x03,
1136            WINDOWS_NT = 0x04,
1137            WINDOWS_CE = 0x05,
1138        }
1139    }
1140
1141    #[bitfield(u64)]
1142    pub struct HvGuestOsMicrosoft {
1143        #[bits(40)]
1144        _rsvd: u64,
1145        #[bits(8)]
1146        pub os_id: u8,
1147        // The top bit must be zero and the least significant 15 bits holds the value of the vendor id.
1148        #[bits(16)]
1149        pub vendor_id: u16,
1150    }
1151
1152    open_enum::open_enum! {
1153        #[derive(Default)]
1154        pub enum HvGuestOsOpenSourceType: u8 {
1155            UNDEFINED = 0x00,
1156            LINUX = 0x01,
1157            FREEBSD = 0x02,
1158            XEN = 0x03,
1159            ILLUMOS = 0x04,
1160        }
1161    }
1162
1163    #[bitfield(u64)]
1164    pub struct HvGuestOsOpenSource {
1165        #[bits(16)]
1166        pub build_no: u16,
1167        #[bits(32)]
1168        pub version: u32,
1169        #[bits(8)]
1170        pub os_id: u8,
1171        #[bits(7)]
1172        pub os_type: u8,
1173        #[bits(1)]
1174        pub is_open_source: bool,
1175    }
1176
1177    #[bitfield(u64)]
1178    pub struct HvGuestOsId {
1179        #[bits(63)]
1180        _rsvd: u64,
1181        is_open_source: bool,
1182    }
1183
1184    impl HvGuestOsId {
1185        pub fn microsoft(&self) -> Option<HvGuestOsMicrosoft> {
1186            (!self.is_open_source()).then(|| HvGuestOsMicrosoft::from(u64::from(*self)))
1187        }
1188
1189        pub fn open_source(&self) -> Option<HvGuestOsOpenSource> {
1190            (self.is_open_source()).then(|| HvGuestOsOpenSource::from(u64::from(*self)))
1191        }
1192
1193        pub fn as_u64(&self) -> u64 {
1194            self.0
1195        }
1196    }
1197
1198    pub const HV_INTERCEPT_ACCESS_MASK_NONE: u32 = 0x00;
1199    pub const HV_INTERCEPT_ACCESS_MASK_READ: u32 = 0x01;
1200    pub const HV_INTERCEPT_ACCESS_MASK_WRITE: u32 = 0x02;
1201    pub const HV_INTERCEPT_ACCESS_MASK_READ_WRITE: u32 =
1202        HV_INTERCEPT_ACCESS_MASK_READ | HV_INTERCEPT_ACCESS_MASK_WRITE;
1203    pub const HV_INTERCEPT_ACCESS_MASK_EXECUTE: u32 = 0x04;
1204
1205    open_enum::open_enum! {
1206        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1207        pub enum HvInterceptType: u32 {
1208            #![expect(non_upper_case_globals)]
1209            HvInterceptTypeX64IoPort = 0x00000000,
1210            HvInterceptTypeX64Msr = 0x00000001,
1211            HvInterceptTypeX64Cpuid = 0x00000002,
1212            HvInterceptTypeException = 0x00000003,
1213            HvInterceptTypeHypercall = 0x00000008,
1214            HvInterceptTypeUnknownSynicConnection = 0x0000000D,
1215            HvInterceptTypeX64ApicEoi = 0x0000000E,
1216            HvInterceptTypeRetargetInterruptWithUnknownDeviceId = 0x0000000F,
1217            HvInterceptTypeX64IoPortRange = 0x00000011,
1218        }
1219    }
1220
1221    #[repr(transparent)]
1222    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1223    pub struct HvInterceptParameters(u64);
1224
1225    impl HvInterceptParameters {
1226        pub fn new_io_port(port: u16) -> Self {
1227            Self(port as u64)
1228        }
1229
1230        pub fn new_io_port_range(ports: RangeInclusive<u16>) -> Self {
1231            let base = *ports.start() as u64;
1232            let end = *ports.end() as u64;
1233            Self(base | (end << 16))
1234        }
1235
1236        pub fn new_exception(vector: u16) -> Self {
1237            Self(vector as u64)
1238        }
1239
1240        pub fn io_port(&self) -> u16 {
1241            self.0 as u16
1242        }
1243
1244        pub fn io_port_range(&self) -> RangeInclusive<u16> {
1245            let base = self.0 as u16;
1246            let end = (self.0 >> 16) as u16;
1247            base..=end
1248        }
1249
1250        pub fn cpuid_index(&self) -> u32 {
1251            self.0 as u32
1252        }
1253
1254        pub fn exception(&self) -> u16 {
1255            self.0 as u16
1256        }
1257    }
1258
1259    #[repr(C)]
1260    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1261    pub struct InstallIntercept {
1262        pub partition_id: u64,
1263        pub access_type_mask: u32,
1264        pub intercept_type: HvInterceptType,
1265        pub intercept_parameters: HvInterceptParameters,
1266    }
1267
1268    #[repr(C)]
1269    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1270    pub struct AssertVirtualInterrupt {
1271        pub partition_id: u64,
1272        pub interrupt_control: HvInterruptControl,
1273        pub destination_address: u64,
1274        pub requested_vector: u32,
1275        pub target_vtl: u8,
1276        pub rsvd0: u8,
1277        pub rsvd1: u16,
1278    }
1279
1280    #[repr(C)]
1281    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1282    pub struct StartVirtualProcessorX64 {
1283        pub partition_id: u64,
1284        pub vp_index: u32,
1285        pub target_vtl: u8,
1286        pub rsvd0: u8,
1287        pub rsvd1: u16,
1288        pub vp_context: InitialVpContextX64,
1289    }
1290
1291    #[repr(C)]
1292    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1293    pub struct InitialVpContextX64 {
1294        pub rip: u64,
1295        pub rsp: u64,
1296        pub rflags: u64,
1297        pub cs: HvX64SegmentRegister,
1298        pub ds: HvX64SegmentRegister,
1299        pub es: HvX64SegmentRegister,
1300        pub fs: HvX64SegmentRegister,
1301        pub gs: HvX64SegmentRegister,
1302        pub ss: HvX64SegmentRegister,
1303        pub tr: HvX64SegmentRegister,
1304        pub ldtr: HvX64SegmentRegister,
1305        pub idtr: HvX64TableRegister,
1306        pub gdtr: HvX64TableRegister,
1307        pub efer: u64,
1308        pub cr0: u64,
1309        pub cr3: u64,
1310        pub cr4: u64,
1311        pub msr_cr_pat: u64,
1312    }
1313
1314    #[repr(C)]
1315    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1316    pub struct StartVirtualProcessorArm64 {
1317        pub partition_id: u64,
1318        pub vp_index: u32,
1319        pub target_vtl: u8,
1320        pub rsvd0: u8,
1321        pub rsvd1: u16,
1322        pub vp_context: InitialVpContextArm64,
1323    }
1324
1325    #[repr(C)]
1326    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1327    pub struct InitialVpContextArm64 {
1328        pub pc: u64,
1329        pub sp_elh: u64,
1330        pub sctlr_el1: u64,
1331        pub mair_el1: u64,
1332        pub tcr_el1: u64,
1333        pub vbar_el1: u64,
1334        pub ttbr0_el1: u64,
1335        pub ttbr1_el1: u64,
1336        pub x18: u64,
1337    }
1338
1339    impl InitialVpContextX64 {
1340        pub fn as_hv_register_assocs(&self) -> impl Iterator<Item = HvRegisterAssoc> + '_ {
1341            let regs = [
1342                (HvX64RegisterName::Rip, HvRegisterValue::from(self.rip)).into(),
1343                (HvX64RegisterName::Rsp, HvRegisterValue::from(self.rsp)).into(),
1344                (
1345                    HvX64RegisterName::Rflags,
1346                    HvRegisterValue::from(self.rflags),
1347                )
1348                    .into(),
1349                (HvX64RegisterName::Cs, HvRegisterValue::from(self.cs)).into(),
1350                (HvX64RegisterName::Ds, HvRegisterValue::from(self.ds)).into(),
1351                (HvX64RegisterName::Es, HvRegisterValue::from(self.es)).into(),
1352                (HvX64RegisterName::Fs, HvRegisterValue::from(self.fs)).into(),
1353                (HvX64RegisterName::Gs, HvRegisterValue::from(self.gs)).into(),
1354                (HvX64RegisterName::Ss, HvRegisterValue::from(self.ss)).into(),
1355                (HvX64RegisterName::Tr, HvRegisterValue::from(self.tr)).into(),
1356                (HvX64RegisterName::Ldtr, HvRegisterValue::from(self.ldtr)).into(),
1357                (HvX64RegisterName::Idtr, HvRegisterValue::from(self.idtr)).into(),
1358                (HvX64RegisterName::Gdtr, HvRegisterValue::from(self.gdtr)).into(),
1359                (HvX64RegisterName::Efer, HvRegisterValue::from(self.efer)).into(),
1360                (HvX64RegisterName::Cr0, HvRegisterValue::from(self.cr0)).into(),
1361                (HvX64RegisterName::Cr3, HvRegisterValue::from(self.cr3)).into(),
1362                (HvX64RegisterName::Cr4, HvRegisterValue::from(self.cr4)).into(),
1363                (
1364                    HvX64RegisterName::Pat,
1365                    HvRegisterValue::from(self.msr_cr_pat),
1366                )
1367                    .into(),
1368            ];
1369            regs.into_iter()
1370        }
1371    }
1372
1373    #[bitfield(u64)]
1374    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1375    pub struct TranslateGvaControlFlagsX64 {
1376        /// Request data read access
1377        pub validate_read: bool,
1378        /// Request data write access
1379        pub validate_write: bool,
1380        /// Request instruction fetch access.
1381        pub validate_execute: bool,
1382        /// Don't enforce any checks related to access mode (supervisor vs. user; SMEP and SMAP are treated
1383        /// as disabled).
1384        pub privilege_exempt: bool,
1385        /// Set the appropriate page table bits (i.e. access/dirty bit)
1386        pub set_page_table_bits: bool,
1387        /// Lock the TLB
1388        pub tlb_flush_inhibit: bool,
1389        /// Treat the access as a supervisor mode access irrespective of current mode.
1390        pub supervisor_access: bool,
1391        /// Treat the access as a user mode access irrespective of current mode.
1392        pub user_access: bool,
1393        /// Enforce the SMAP restriction on supervisor data access to user mode addresses if CR4.SMAP=1
1394        /// irrespective of current EFLAGS.AC i.e. the behavior for "implicit supervisor-mode accesses"
1395        /// (e.g. to the GDT, etc.) and when EFLAGS.AC=0. Does nothing if CR4.SMAP=0.
1396        pub enforce_smap: bool,
1397        /// Don't enforce the SMAP restriction on supervisor data access to user mode addresses irrespective
1398        /// of current EFLAGS.AC i.e. the behavior when EFLAGS.AC=1.
1399        pub override_smap: bool,
1400        /// Treat the access as a shadow stack access.
1401        pub shadow_stack: bool,
1402        #[bits(45)]
1403        _unused: u64,
1404        /// Target vtl
1405        input_vtl_value: u8,
1406    }
1407
1408    impl TranslateGvaControlFlagsX64 {
1409        pub fn input_vtl(&self) -> HvInputVtl {
1410            self.input_vtl_value().into()
1411        }
1412
1413        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1414            self.with_input_vtl_value(input_vtl.into())
1415        }
1416
1417        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1418            self.set_input_vtl_value(input_vtl.into())
1419        }
1420    }
1421
1422    #[bitfield(u64)]
1423    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1424    pub struct TranslateGvaControlFlagsArm64 {
1425        /// Request data read access
1426        pub validate_read: bool,
1427        /// Request data write access
1428        pub validate_write: bool,
1429        /// Request instruction fetch access.
1430        pub validate_execute: bool,
1431        _reserved0: bool,
1432        /// Set the appropriate page table bits (i.e. access/dirty bit)
1433        pub set_page_table_bits: bool,
1434        /// Lock the TLB
1435        pub tlb_flush_inhibit: bool,
1436        /// Treat the access as a supervisor mode access irrespective of current mode.
1437        pub supervisor_access: bool,
1438        /// Treat the access as a user mode access irrespective of current mode.
1439        pub user_access: bool,
1440        /// Restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN i.e.
1441        /// the behavior when PSTATE.PAN=1.
1442        pub pan_set: bool,
1443        /// Don't restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN
1444        /// i.e. the behavior when PSTATE.PAN=0.
1445        pub pan_clear: bool,
1446        #[bits(46)]
1447        _unused: u64,
1448        /// Target vtl
1449        #[bits(8)]
1450        input_vtl_value: u8,
1451    }
1452
1453    impl TranslateGvaControlFlagsArm64 {
1454        pub fn input_vtl(&self) -> HvInputVtl {
1455            self.input_vtl_value().into()
1456        }
1457
1458        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1459            self.with_input_vtl_value(input_vtl.into())
1460        }
1461
1462        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1463            self.set_input_vtl_value(input_vtl.into())
1464        }
1465    }
1466
1467    #[repr(C)]
1468    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1469    pub struct TranslateVirtualAddressX64 {
1470        pub partition_id: u64,
1471        pub vp_index: u32,
1472        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1473        pub reserved: u32,
1474        pub control_flags: TranslateGvaControlFlagsX64,
1475        pub gva_page: u64,
1476    }
1477
1478    #[repr(C)]
1479    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1480    pub struct TranslateVirtualAddressArm64 {
1481        pub partition_id: u64,
1482        pub vp_index: u32,
1483        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1484        pub reserved: u32,
1485        pub control_flags: TranslateGvaControlFlagsArm64,
1486        pub gva_page: u64,
1487    }
1488
1489    open_enum::open_enum! {
1490        pub enum TranslateGvaResultCode: u32 {
1491            SUCCESS = 0,
1492
1493            // Translation Failures
1494            PAGE_NOT_PRESENT = 1,
1495            PRIVILEGE_VIOLATION = 2,
1496            INVALID_PAGE_TABLE_FLAGS = 3,
1497
1498            // GPA access failures
1499            GPA_UNMAPPED = 4,
1500            GPA_NO_READ_ACCESS = 5,
1501            GPA_NO_WRITE_ACCESS = 6,
1502            GPA_ILLEGAL_OVERLAY_ACCESS = 7,
1503
1504            /// Intercept of the memory access by either
1505            /// - a higher VTL
1506            /// - a nested hypervisor (due to a violation of the nested page table)
1507            INTERCEPT = 8,
1508
1509            GPA_UNACCEPTED = 9,
1510        }
1511    }
1512
1513    #[bitfield(u64)]
1514    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1515    pub struct TranslateGvaResult {
1516        pub result_code: u32,
1517        pub cache_type: u8,
1518        pub overlay_page: bool,
1519        #[bits(23)]
1520        pub reserved: u32,
1521    }
1522
1523    #[repr(C)]
1524    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1525    pub struct TranslateVirtualAddressOutput {
1526        pub translation_result: TranslateGvaResult,
1527        pub gpa_page: u64,
1528    }
1529
1530    #[repr(C)]
1531    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1532    pub struct TranslateGvaResultExX64 {
1533        pub result: TranslateGvaResult,
1534        pub reserved: u64,
1535        pub event_info: HvX64PendingEvent,
1536    }
1537
1538    const_assert!(size_of::<TranslateGvaResultExX64>() == 0x30);
1539
1540    #[repr(C)]
1541    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1542    pub struct TranslateGvaResultExArm64 {
1543        pub result: TranslateGvaResult,
1544    }
1545
1546    const_assert!(size_of::<TranslateGvaResultExArm64>() == 0x8);
1547
1548    #[repr(C)]
1549    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1550    pub struct TranslateVirtualAddressExOutputX64 {
1551        pub translation_result: TranslateGvaResultExX64,
1552        pub gpa_page: u64,
1553        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1554        pub reserved: u64,
1555    }
1556
1557    const_assert!(size_of::<TranslateVirtualAddressExOutputX64>() == 0x40);
1558
1559    #[repr(C)]
1560    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1561    pub struct TranslateVirtualAddressExOutputArm64 {
1562        pub translation_result: TranslateGvaResultExArm64,
1563        pub gpa_page: u64,
1564    }
1565
1566    const_assert!(size_of::<TranslateVirtualAddressExOutputArm64>() == 0x10);
1567
1568    #[repr(C)]
1569    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1570    pub struct GetVpIndexFromApicId {
1571        pub partition_id: u64,
1572        pub target_vtl: u8,
1573        pub reserved: [u8; 7],
1574    }
1575
1576    #[repr(C)]
1577    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1578    pub struct EnableVpVtlX64 {
1579        pub partition_id: u64,
1580        pub vp_index: u32,
1581        pub target_vtl: u8,
1582        pub reserved: [u8; 3],
1583        pub vp_vtl_context: InitialVpContextX64,
1584    }
1585
1586    #[repr(C)]
1587    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1588    pub struct EnableVpVtlArm64 {
1589        pub partition_id: u64,
1590        pub vp_index: u32,
1591        pub target_vtl: u8,
1592        pub reserved: [u8; 3],
1593        pub vp_vtl_context: InitialVpContextArm64,
1594    }
1595
1596    #[repr(C)]
1597    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1598    pub struct ModifyVtlProtectionMask {
1599        pub partition_id: u64,
1600        pub map_flags: HvMapGpaFlags,
1601        pub target_vtl: HvInputVtl,
1602        pub reserved: [u8; 3],
1603    }
1604
1605    #[repr(C)]
1606    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1607    pub struct CheckSparseGpaPageVtlAccess {
1608        pub partition_id: u64,
1609        pub target_vtl: HvInputVtl,
1610        pub desired_access: u8,
1611        pub reserved0: u16,
1612        pub reserved1: u32,
1613    }
1614    const_assert!(size_of::<CheckSparseGpaPageVtlAccess>() == 0x10);
1615
1616    #[bitfield(u64)]
1617    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1618    pub struct CheckSparseGpaPageVtlAccessOutput {
1619        pub result_code: u8,
1620        pub denied_access: u8,
1621        #[bits(4)]
1622        pub intercepting_vtl: u32,
1623        #[bits(12)]
1624        _reserved0: u32,
1625        _reserved1: u32,
1626    }
1627    const_assert!(size_of::<CheckSparseGpaPageVtlAccessOutput>() == 0x8);
1628
1629    open_enum::open_enum! {
1630        pub enum CheckGpaPageVtlAccessResultCode: u32 {
1631            SUCCESS = 0,
1632            MEMORY_INTERCEPT = 1,
1633        }
1634    }
1635
1636    /// The number of VTLs for which permissions can be specified in a VTL permission set.
1637    pub const HV_VTL_PERMISSION_SET_SIZE: usize = 2;
1638
1639    #[repr(C)]
1640    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1641    pub struct VtlPermissionSet {
1642        /// VTL permissions for the GPA page, starting from VTL 1.
1643        pub vtl_permission_from_1: [u16; HV_VTL_PERMISSION_SET_SIZE],
1644    }
1645
1646    open_enum::open_enum! {
1647        pub enum AcceptMemoryType: u32 {
1648            ANY = 0,
1649            RAM = 1,
1650        }
1651    }
1652
1653    open_enum! {
1654        /// Host visibility used in hypercall inputs.
1655        ///
1656        /// NOTE: While this is a 2 bit set with the lower bit representing host
1657        /// read access and upper bit representing host write access, hardware
1658        /// platforms do not support that form of isolation. Only support
1659        /// private or full shared in this definition.
1660        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1661        pub enum HostVisibilityType: u8 {
1662            PRIVATE = 0,
1663            SHARED = 3,
1664        }
1665    }
1666
1667    // Used by bitfield-struct implicitly.
1668    impl HostVisibilityType {
1669        const fn from_bits(value: u8) -> Self {
1670            Self(value)
1671        }
1672
1673        const fn into_bits(value: Self) -> u8 {
1674            value.0
1675        }
1676    }
1677
1678    /// Attributes for accepting pages. See [`AcceptGpaPages`]
1679    #[bitfield(u32)]
1680    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1681    pub struct AcceptPagesAttributes {
1682        #[bits(6)]
1683        /// Supplies the expected memory type [`AcceptMemoryType`].
1684        pub memory_type: u32,
1685        #[bits(2)]
1686        /// Supplies the initial host visibility (exclusive, shared read-only, shared read-write).
1687        pub host_visibility: HostVisibilityType,
1688        #[bits(3)]
1689        /// Supplies the set of VTLs for which initial VTL permissions will be set.
1690        pub vtl_set: u32,
1691        #[bits(21)]
1692        _reserved: u32,
1693    }
1694
1695    #[repr(C)]
1696    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1697    pub struct AcceptGpaPages {
1698        /// Supplies the partition ID of the partition this request is for.
1699        pub partition_id: u64,
1700        /// Supplies attributes of the pages being accepted, such as whether
1701        /// they should be made host visible.
1702        pub page_attributes: AcceptPagesAttributes,
1703        /// Supplies the set of initial VTL permissions.
1704        pub vtl_permission_set: VtlPermissionSet,
1705        /// Supplies the GPA page number of the first page to modify.
1706        pub gpa_page_base: u64,
1707    }
1708    const_assert!(size_of::<AcceptGpaPages>() == 0x18);
1709
1710    /// Attributes for unaccepting pages. See [`UnacceptGpaPages`]
1711    #[bitfield(u32)]
1712    pub struct UnacceptPagesAttributes {
1713        #[bits(3)]
1714        pub vtl_set: u32,
1715        #[bits(29)]
1716        _reserved: u32,
1717    }
1718
1719    #[repr(C)]
1720    pub struct UnacceptGpaPages {
1721        /// Supplies the partition ID of the partition this request is for.
1722        pub partition_id: u64,
1723        /// Supplies the set of VTLs for which VTL permissions will be checked.
1724        pub page_attributes: UnacceptPagesAttributes,
1725        ///  Supplies the set of VTL permissions to check against.
1726        pub vtl_permission_set: VtlPermissionSet,
1727        /// Supplies the GPA page number of the first page to modify.
1728        pub gpa_page_base: u64,
1729    }
1730    const_assert!(size_of::<UnacceptGpaPages>() == 0x18);
1731
1732    #[bitfield(u32)]
1733    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1734    pub struct ModifyHostVisibility {
1735        #[bits(2)]
1736        pub host_visibility: HostVisibilityType,
1737        #[bits(30)]
1738        _reserved: u32,
1739    }
1740
1741    #[repr(C)]
1742    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1743    pub struct ModifySparsePageVisibility {
1744        pub partition_id: u64,
1745        pub host_visibility: ModifyHostVisibility,
1746        pub reserved: u32,
1747    }
1748
1749    #[repr(C)]
1750    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1751    pub struct QuerySparsePageVisibility {
1752        pub partition_id: u64,
1753    }
1754
1755    pub const VBS_VM_REPORT_DATA_SIZE: usize = 64;
1756    pub const VBS_VM_MAX_REPORT_SIZE: usize = 2048;
1757
1758    #[repr(C)]
1759    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1760    pub struct VbsVmCallReport {
1761        pub report_data: [u8; VBS_VM_REPORT_DATA_SIZE],
1762    }
1763
1764    #[repr(C)]
1765    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1766    pub struct VbsVmCallReportOutput {
1767        pub report: [u8; VBS_VM_MAX_REPORT_SIZE],
1768    }
1769
1770    #[bitfield(u8)]
1771    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1772    pub struct EnablePartitionVtlFlags {
1773        pub enable_mbec: bool,
1774        pub enable_supervisor_shadow_stack: bool,
1775        pub enable_hardware_hvpt: bool,
1776        #[bits(5)]
1777        pub reserved: u8,
1778    }
1779
1780    #[repr(C)]
1781    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1782    pub struct EnablePartitionVtl {
1783        pub partition_id: u64,
1784        pub target_vtl: u8,
1785        pub flags: EnablePartitionVtlFlags,
1786        pub reserved_z0: u16,
1787        pub reserved_z1: u32,
1788    }
1789
1790    #[repr(C)]
1791    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1792    pub struct FlushVirtualAddressSpace {
1793        pub address_space: u64,
1794        pub flags: HvFlushFlags,
1795        pub processor_mask: u64,
1796    }
1797
1798    #[repr(C)]
1799    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1800    pub struct FlushVirtualAddressSpaceEx {
1801        pub address_space: u64,
1802        pub flags: HvFlushFlags,
1803        pub vp_set_format: u64,
1804        pub vp_set_valid_banks_mask: u64,
1805        // Followed by the variable-sized part of an HvVpSet
1806    }
1807
1808    #[repr(C)]
1809    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1810    pub struct PinUnpinGpaPageRangesHeader {
1811        pub reserved: u64,
1812    }
1813
1814    #[repr(C)]
1815    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1816    pub struct SendSyntheticClusterIpi {
1817        pub vector: u32,
1818        pub target_vtl: HvInputVtl,
1819        pub flags: u8,
1820        pub reserved: u16,
1821        pub processor_mask: u64,
1822    }
1823
1824    #[repr(C)]
1825    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1826    pub struct SendSyntheticClusterIpiEx {
1827        pub vector: u32,
1828        pub target_vtl: HvInputVtl,
1829        pub flags: u8,
1830        pub reserved: u16,
1831        pub vp_set_format: u64,
1832        pub vp_set_valid_banks_mask: u64,
1833        // Followed by the variable-sized part of an HvVpSet
1834    }
1835
1836    #[bitfield(u64)]
1837    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1838    pub struct HvFlushFlags {
1839        pub all_processors: bool,
1840        pub all_virtual_address_spaces: bool,
1841        pub non_global_mappings_only: bool,
1842        pub use_extended_range_format: bool,
1843        pub use_target_vtl: bool,
1844
1845        #[bits(3)]
1846        _reserved: u8,
1847
1848        pub target_vtl0: bool,
1849        pub target_vtl1: bool,
1850
1851        #[bits(54)]
1852        _reserved2: u64,
1853    }
1854
1855    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1856    #[repr(transparent)]
1857    pub struct HvGvaRange(pub u64);
1858
1859    impl From<u64> for HvGvaRange {
1860        fn from(value: u64) -> Self {
1861            Self(value)
1862        }
1863    }
1864
1865    impl From<HvGvaRange> for u64 {
1866        fn from(value: HvGvaRange) -> Self {
1867            value.0
1868        }
1869    }
1870
1871    impl HvGvaRange {
1872        pub fn as_simple(self) -> HvGvaRangeSimple {
1873            HvGvaRangeSimple(self.0)
1874        }
1875
1876        pub fn as_extended(self) -> HvGvaRangeExtended {
1877            HvGvaRangeExtended(self.0)
1878        }
1879
1880        pub fn as_extended_large_page(self) -> HvGvaRangeExtendedLargePage {
1881            HvGvaRangeExtendedLargePage(self.0)
1882        }
1883    }
1884
1885    #[bitfield(u64)]
1886    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1887    pub struct HvGvaRangeSimple {
1888        /// The number of pages beyond one.
1889        #[bits(12)]
1890        pub additional_pages: u64,
1891        /// The top 52 most significant bits of the guest virtual address.
1892        #[bits(52)]
1893        pub gva_page_number: u64,
1894    }
1895
1896    #[bitfield(u64)]
1897    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1898    pub struct HvGvaRangeExtended {
1899        /// The number of pages beyond one.
1900        #[bits(11)]
1901        pub additional_pages: u64,
1902        /// Is page size greater than 4 KB.
1903        pub large_page: bool,
1904        /// The top 52 most significant bits of the guest virtual address when `large_page`` is clear.
1905        #[bits(52)]
1906        pub gva_page_number: u64,
1907    }
1908
1909    #[bitfield(u64)]
1910    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1911    pub struct HvGvaRangeExtendedLargePage {
1912        /// The number of pages beyond one.
1913        #[bits(11)]
1914        pub additional_pages: u64,
1915        /// Is page size greater than 4 KB.
1916        pub large_page: bool,
1917        /// The page size when `large_page`` is set.
1918        /// false: 2 MB
1919        /// true: 1 GB
1920        pub page_size: bool,
1921        #[bits(8)]
1922        _reserved: u64,
1923        /// The top 43 most significant bits of the guest virtual address when `large_page`` is set.
1924        #[bits(43)]
1925        pub gva_large_page_number: u64,
1926    }
1927
1928    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1929    #[repr(transparent)]
1930    pub struct HvGpaRange(pub u64);
1931
1932    impl HvGpaRange {
1933        pub fn as_simple(self) -> HvGpaRangeSimple {
1934            HvGpaRangeSimple(self.0)
1935        }
1936
1937        pub fn as_extended(self) -> HvGpaRangeExtended {
1938            HvGpaRangeExtended(self.0)
1939        }
1940
1941        pub fn as_extended_large_page(self) -> HvGpaRangeExtendedLargePage {
1942            HvGpaRangeExtendedLargePage(self.0)
1943        }
1944    }
1945
1946    #[bitfield(u64)]
1947    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1948    pub struct HvGpaRangeSimple {
1949        /// The number of pages beyond one.
1950        #[bits(12)]
1951        pub additional_pages: u64,
1952        /// The top 52 most significant bits of the guest physical address.
1953        #[bits(52)]
1954        pub gpa_page_number: u64,
1955    }
1956
1957    #[bitfield(u64)]
1958    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1959    pub struct HvGpaRangeExtended {
1960        /// The number of pages beyond one.
1961        #[bits(11)]
1962        pub additional_pages: u64,
1963        /// Is page size greater than 4 KB.
1964        pub large_page: bool,
1965        /// The top 52 most significant bits of the guest physical address when `large_page`` is clear.
1966        #[bits(52)]
1967        pub gpa_page_number: u64,
1968    }
1969
1970    #[bitfield(u64)]
1971    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1972    pub struct HvGpaRangeExtendedLargePage {
1973        /// The number of pages beyond one.
1974        #[bits(11)]
1975        pub additional_pages: u64,
1976        /// Is page size greater than 4 KB.
1977        pub large_page: bool,
1978        /// The page size when `large_page`` is set.
1979        /// false: 2 MB
1980        /// true: 1 GB
1981        pub page_size: bool,
1982        #[bits(8)]
1983        _reserved: u64,
1984        /// The top 43 most significant bits of the guest physical address when `large_page`` is set.
1985        #[bits(43)]
1986        pub gpa_large_page_number: u64,
1987    }
1988
1989    pub const HV_HYPERCALL_MMIO_MAX_DATA_LENGTH: usize = 64;
1990
1991    #[repr(C)]
1992    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1993    pub struct MemoryMappedIoRead {
1994        pub gpa: u64,
1995        pub access_width: u32,
1996        pub reserved_z0: u32,
1997    }
1998
1999    #[repr(C)]
2000    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2001    pub struct MemoryMappedIoReadOutput {
2002        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
2003    }
2004
2005    #[repr(C)]
2006    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2007    pub struct MemoryMappedIoWrite {
2008        pub gpa: u64,
2009        pub access_width: u32,
2010        pub reserved_z0: u32,
2011        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
2012    }
2013}
2014
2015macro_rules! registers {
2016    ($name:ident {
2017        $(
2018            $(#[$vattr:meta])*
2019            $variant:ident = $value:expr
2020        ),*
2021        $(,)?
2022    }) => {
2023        open_enum! {
2024    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2025            pub enum $name: u32 {
2026        #![expect(non_upper_case_globals)]
2027                $($variant = $value,)*
2028                InstructionEmulationHints = 0x00000002,
2029                InternalActivityState = 0x00000004,
2030
2031        // Guest Crash Registers
2032                GuestCrashP0  = 0x00000210,
2033                GuestCrashP1  = 0x00000211,
2034                GuestCrashP2  = 0x00000212,
2035                GuestCrashP3  = 0x00000213,
2036                GuestCrashP4  = 0x00000214,
2037                GuestCrashCtl = 0x00000215,
2038
2039                PendingInterruption = 0x00010002,
2040                InterruptState = 0x00010003,
2041                PendingEvent0 = 0x00010004,
2042                PendingEvent1 = 0x00010005,
2043                DeliverabilityNotifications = 0x00010006,
2044
2045                GicrBaseGpa = 0x00063000,
2046
2047                VpRuntime = 0x00090000,
2048                GuestOsId = 0x00090002,
2049                VpIndex = 0x00090003,
2050                TimeRefCount = 0x00090004,
2051                CpuManagementVersion = 0x00090007,
2052                VpAssistPage = 0x00090013,
2053                VpRootSignalCount = 0x00090014,
2054                ReferenceTsc = 0x00090017,
2055                VpConfig = 0x00090018,
2056                Ghcb = 0x00090019,
2057                ReferenceTscSequence = 0x0009001A,
2058                GuestSchedulerEvent = 0x0009001B,
2059
2060                Sint0 = 0x000A0000,
2061                Sint1 = 0x000A0001,
2062                Sint2 = 0x000A0002,
2063                Sint3 = 0x000A0003,
2064                Sint4 = 0x000A0004,
2065                Sint5 = 0x000A0005,
2066                Sint6 = 0x000A0006,
2067                Sint7 = 0x000A0007,
2068                Sint8 = 0x000A0008,
2069                Sint9 = 0x000A0009,
2070                Sint10 = 0x000A000A,
2071                Sint11 = 0x000A000B,
2072                Sint12 = 0x000A000C,
2073                Sint13 = 0x000A000D,
2074                Sint14 = 0x000A000E,
2075                Sint15 = 0x000A000F,
2076                Scontrol = 0x000A0010,
2077                Sversion = 0x000A0011,
2078                Sifp = 0x000A0012,
2079                Sipp = 0x000A0013,
2080                Eom = 0x000A0014,
2081                Sirbp = 0x000A0015,
2082
2083                Stimer0Config = 0x000B0000,
2084                Stimer0Count = 0x000B0001,
2085                Stimer1Config = 0x000B0002,
2086                Stimer1Count = 0x000B0003,
2087                Stimer2Config = 0x000B0004,
2088                Stimer2Count = 0x000B0005,
2089                Stimer3Config = 0x000B0006,
2090                Stimer3Count = 0x000B0007,
2091                StimeUnhaltedTimerConfig = 0x000B0100,
2092                StimeUnhaltedTimerCount = 0x000B0101,
2093
2094                VsmCodePageOffsets = 0x000D0002,
2095                VsmVpStatus = 0x000D0003,
2096                VsmPartitionStatus = 0x000D0004,
2097                VsmVina = 0x000D0005,
2098                VsmCapabilities = 0x000D0006,
2099                VsmPartitionConfig = 0x000D0007,
2100                GuestVsmPartitionConfig = 0x000D0008,
2101                VsmVpSecureConfigVtl0 = 0x000D0010,
2102                VsmVpSecureConfigVtl1 = 0x000D0011,
2103                VsmVpSecureConfigVtl2 = 0x000D0012,
2104                VsmVpSecureConfigVtl3 = 0x000D0013,
2105                VsmVpSecureConfigVtl4 = 0x000D0014,
2106                VsmVpSecureConfigVtl5 = 0x000D0015,
2107                VsmVpSecureConfigVtl6 = 0x000D0016,
2108                VsmVpSecureConfigVtl7 = 0x000D0017,
2109                VsmVpSecureConfigVtl8 = 0x000D0018,
2110                VsmVpSecureConfigVtl9 = 0x000D0019,
2111                VsmVpSecureConfigVtl10 = 0x000D001A,
2112                VsmVpSecureConfigVtl11 = 0x000D001B,
2113                VsmVpSecureConfigVtl12 = 0x000D001C,
2114                VsmVpSecureConfigVtl13 = 0x000D001D,
2115                VsmVpSecureConfigVtl14 = 0x000D001E,
2116                VsmVpWaitForTlbLock = 0x000D0020,
2117            }
2118        }
2119
2120        impl From<HvRegisterName> for $name {
2121            fn from(name: HvRegisterName) -> Self {
2122                Self(name.0)
2123            }
2124        }
2125
2126        impl From<$name> for HvRegisterName {
2127            fn from(name: $name) -> Self {
2128                Self(name.0)
2129            }
2130        }
2131    };
2132}
2133
2134/// A hypervisor register for any architecture.
2135///
2136/// This exists only to pass registers through layers where the architecture
2137/// type has been lost. In general, you should use the arch-specific registers.
2138#[repr(C)]
2139#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2140pub struct HvRegisterName(pub u32);
2141
2142registers! {
2143    // Typed enum for registers that are shared across architectures.
2144    HvAllArchRegisterName {}
2145}
2146
2147impl From<HvAllArchRegisterName> for HvX64RegisterName {
2148    fn from(name: HvAllArchRegisterName) -> Self {
2149        Self(name.0)
2150    }
2151}
2152
2153impl From<HvAllArchRegisterName> for HvArm64RegisterName {
2154    fn from(name: HvAllArchRegisterName) -> Self {
2155        Self(name.0)
2156    }
2157}
2158
2159registers! {
2160    HvX64RegisterName {
2161        // X64 User-Mode Registers
2162        Rax = 0x00020000,
2163        Rcx = 0x00020001,
2164        Rdx = 0x00020002,
2165        Rbx = 0x00020003,
2166        Rsp = 0x00020004,
2167        Rbp = 0x00020005,
2168        Rsi = 0x00020006,
2169        Rdi = 0x00020007,
2170        R8 = 0x00020008,
2171        R9 = 0x00020009,
2172        R10 = 0x0002000a,
2173        R11 = 0x0002000b,
2174        R12 = 0x0002000c,
2175        R13 = 0x0002000d,
2176        R14 = 0x0002000e,
2177        R15 = 0x0002000f,
2178        Rip = 0x00020010,
2179        Rflags = 0x00020011,
2180
2181        // X64 Floating Point and Vector Registers
2182        Xmm0 = 0x00030000,
2183        Xmm1 = 0x00030001,
2184        Xmm2 = 0x00030002,
2185        Xmm3 = 0x00030003,
2186        Xmm4 = 0x00030004,
2187        Xmm5 = 0x00030005,
2188        Xmm6 = 0x00030006,
2189        Xmm7 = 0x00030007,
2190        Xmm8 = 0x00030008,
2191        Xmm9 = 0x00030009,
2192        Xmm10 = 0x0003000A,
2193        Xmm11 = 0x0003000B,
2194        Xmm12 = 0x0003000C,
2195        Xmm13 = 0x0003000D,
2196        Xmm14 = 0x0003000E,
2197        Xmm15 = 0x0003000F,
2198        FpMmx0 = 0x00030010,
2199        FpMmx1 = 0x00030011,
2200        FpMmx2 = 0x00030012,
2201        FpMmx3 = 0x00030013,
2202        FpMmx4 = 0x00030014,
2203        FpMmx5 = 0x00030015,
2204        FpMmx6 = 0x00030016,
2205        FpMmx7 = 0x00030017,
2206        FpControlStatus = 0x00030018,
2207        XmmControlStatus = 0x00030019,
2208
2209        // X64 Control Registers
2210        Cr0 = 0x00040000,
2211        Cr2 = 0x00040001,
2212        Cr3 = 0x00040002,
2213        Cr4 = 0x00040003,
2214        Cr8 = 0x00040004,
2215        Xfem = 0x00040005,
2216        // X64 Intermediate Control Registers
2217        IntermediateCr0 = 0x00041000,
2218        IntermediateCr3 = 0x00041002,
2219        IntermediateCr4 = 0x00041003,
2220        IntermediateCr8 = 0x00041004,
2221        // X64 Debug Registers
2222        Dr0 = 0x00050000,
2223        Dr1 = 0x00050001,
2224        Dr2 = 0x00050002,
2225        Dr3 = 0x00050003,
2226        Dr6 = 0x00050004,
2227        Dr7 = 0x00050005,
2228        // X64 Segment Registers
2229        Es = 0x00060000,
2230        Cs = 0x00060001,
2231        Ss = 0x00060002,
2232        Ds = 0x00060003,
2233        Fs = 0x00060004,
2234        Gs = 0x00060005,
2235        Ldtr = 0x00060006,
2236        Tr = 0x00060007,
2237        // X64 Table Registers
2238        Idtr = 0x00070000,
2239        Gdtr = 0x00070001,
2240        // X64 Virtualized MSRs
2241        Tsc = 0x00080000,
2242        Efer = 0x00080001,
2243        KernelGsBase = 0x00080002,
2244        ApicBase = 0x00080003,
2245        Pat = 0x00080004,
2246        SysenterCs = 0x00080005,
2247        SysenterEip = 0x00080006,
2248        SysenterEsp = 0x00080007,
2249        Star = 0x00080008,
2250        Lstar = 0x00080009,
2251        Cstar = 0x0008000a,
2252        Sfmask = 0x0008000b,
2253        InitialApicId = 0x0008000c,
2254        // X64 Cache control MSRs
2255        MsrMtrrCap = 0x0008000d,
2256        MsrMtrrDefType = 0x0008000e,
2257        MsrMtrrPhysBase0 = 0x00080010,
2258        MsrMtrrPhysBase1 = 0x00080011,
2259        MsrMtrrPhysBase2 = 0x00080012,
2260        MsrMtrrPhysBase3 = 0x00080013,
2261        MsrMtrrPhysBase4 = 0x00080014,
2262        MsrMtrrPhysBase5 = 0x00080015,
2263        MsrMtrrPhysBase6 = 0x00080016,
2264        MsrMtrrPhysBase7 = 0x00080017,
2265        MsrMtrrPhysBase8 = 0x00080018,
2266        MsrMtrrPhysBase9 = 0x00080019,
2267        MsrMtrrPhysBaseA = 0x0008001a,
2268        MsrMtrrPhysBaseB = 0x0008001b,
2269        MsrMtrrPhysBaseC = 0x0008001c,
2270        MsrMtrrPhysBaseD = 0x0008001d,
2271        MsrMtrrPhysBaseE = 0x0008001e,
2272        MsrMtrrPhysBaseF = 0x0008001f,
2273        MsrMtrrPhysMask0 = 0x00080040,
2274        MsrMtrrPhysMask1 = 0x00080041,
2275        MsrMtrrPhysMask2 = 0x00080042,
2276        MsrMtrrPhysMask3 = 0x00080043,
2277        MsrMtrrPhysMask4 = 0x00080044,
2278        MsrMtrrPhysMask5 = 0x00080045,
2279        MsrMtrrPhysMask6 = 0x00080046,
2280        MsrMtrrPhysMask7 = 0x00080047,
2281        MsrMtrrPhysMask8 = 0x00080048,
2282        MsrMtrrPhysMask9 = 0x00080049,
2283        MsrMtrrPhysMaskA = 0x0008004a,
2284        MsrMtrrPhysMaskB = 0x0008004b,
2285        MsrMtrrPhysMaskC = 0x0008004c,
2286        MsrMtrrPhysMaskD = 0x0008004d,
2287        MsrMtrrPhysMaskE = 0x0008004e,
2288        MsrMtrrPhysMaskF = 0x0008004f,
2289        MsrMtrrFix64k00000 = 0x00080070,
2290        MsrMtrrFix16k80000 = 0x00080071,
2291        MsrMtrrFix16kA0000 = 0x00080072,
2292        MsrMtrrFix4kC0000 = 0x00080073,
2293        MsrMtrrFix4kC8000 = 0x00080074,
2294        MsrMtrrFix4kD0000 = 0x00080075,
2295        MsrMtrrFix4kD8000 = 0x00080076,
2296        MsrMtrrFix4kE0000 = 0x00080077,
2297        MsrMtrrFix4kE8000 = 0x00080078,
2298        MsrMtrrFix4kF0000 = 0x00080079,
2299        MsrMtrrFix4kF8000 = 0x0008007a,
2300
2301        TscAux = 0x0008007B,
2302        Bndcfgs = 0x0008007C,
2303        DebugCtl = 0x0008007D,
2304        MCount = 0x0008007E,
2305        ACount = 0x0008007F,
2306
2307        SgxLaunchControl0 = 0x00080080,
2308        SgxLaunchControl1 = 0x00080081,
2309        SgxLaunchControl2 = 0x00080082,
2310        SgxLaunchControl3 = 0x00080083,
2311        SpecCtrl = 0x00080084,
2312        PredCmd = 0x00080085,
2313        VirtSpecCtrl = 0x00080086,
2314        TscVirtualOffset = 0x00080087,
2315        TsxCtrl = 0x00080088,
2316        MsrMcUpdatePatchLevel = 0x00080089,
2317        Available1 = 0x0008008A,
2318        Xss = 0x0008008B,
2319        UCet = 0x0008008C,
2320        SCet = 0x0008008D,
2321        Ssp = 0x0008008E,
2322        Pl0Ssp = 0x0008008F,
2323        Pl1Ssp = 0x00080090,
2324        Pl2Ssp = 0x00080091,
2325        Pl3Ssp = 0x00080092,
2326        InterruptSspTableAddr = 0x00080093,
2327        TscVirtualMultiplier = 0x00080094,
2328        TscDeadline = 0x00080095,
2329        TscAdjust = 0x00080096,
2330        Pasid = 0x00080097,
2331        UmwaitControl = 0x00080098,
2332        Xfd = 0x00080099,
2333        XfdErr = 0x0008009A,
2334
2335        Hypercall = 0x00090001,
2336        RegisterPage = 0x0009001C,
2337
2338        // Partition Timer Assist Registers
2339        EmulatedTimerPeriod = 0x00090030,
2340        EmulatedTimerControl = 0x00090031,
2341        PmTimerAssist = 0x00090032,
2342
2343        // AMD SEV configuration MSRs
2344        SevControl = 0x00090040,
2345
2346        CrInterceptControl = 0x000E0000,
2347        CrInterceptCr0Mask = 0x000E0001,
2348        CrInterceptCr4Mask = 0x000E0002,
2349        CrInterceptIa32MiscEnableMask = 0x000E0003,
2350    }
2351}
2352
2353registers! {
2354    HvArm64RegisterName {
2355        HypervisorVersion = 0x00000100,
2356        PrivilegesAndFeaturesInfo = 0x00000200,
2357        FeaturesInfo = 0x00000201,
2358        ImplementationLimitsInfo = 0x00000202,
2359        HardwareFeaturesInfo = 0x00000203,
2360        CpuManagementFeaturesInfo = 0x00000204,
2361        PasidFeaturesInfo = 0x00000205,
2362        SkipLevelFeaturesInfo = 0x00000206,
2363        NestedVirtFeaturesInfo = 0x00000207,
2364        IptFeaturesInfo = 0x00000208,
2365        IsolationConfiguration = 0x00000209,
2366
2367        X0 = 0x00020000,
2368        X1 = 0x00020001,
2369        X2 = 0x00020002,
2370        X3 = 0x00020003,
2371        X4 = 0x00020004,
2372        X5 = 0x00020005,
2373        X6 = 0x00020006,
2374        X7 = 0x00020007,
2375        X8 = 0x00020008,
2376        X9 = 0x00020009,
2377        X10 = 0x0002000A,
2378        X11 = 0x0002000B,
2379        X12 = 0x0002000C,
2380        X13 = 0x0002000D,
2381        X14 = 0x0002000E,
2382        X15 = 0x0002000F,
2383        X16 = 0x00020010,
2384        X17 = 0x00020011,
2385        X18 = 0x00020012,
2386        X19 = 0x00020013,
2387        X20 = 0x00020014,
2388        X21 = 0x00020015,
2389        X22 = 0x00020016,
2390        X23 = 0x00020017,
2391        X24 = 0x00020018,
2392        X25 = 0x00020019,
2393        X26 = 0x0002001A,
2394        X27 = 0x0002001B,
2395        X28 = 0x0002001C,
2396        XFp = 0x0002001D,
2397        XLr = 0x0002001E,
2398        XSp = 0x0002001F, // alias for either El0/x depending on Cpsr.SPSel
2399        XSpEl0 = 0x00020020,
2400        XSpElx = 0x00020021,
2401        XPc = 0x00020022,
2402        Cpsr = 0x00020023,
2403        SpsrEl2 = 0x00021002,
2404
2405        SctlrEl1 = 0x00040002,
2406        Ttbr0El1 = 0x00040005,
2407        Ttbr1El1 = 0x00040006,
2408        TcrEl1 = 0x00040007,
2409        EsrEl1 = 0x00040008,
2410        FarEl1 = 0x00040009,
2411        MairEl1 = 0x0004000b,
2412        VbarEl1 = 0x0004000c,
2413        ElrEl1 = 0x00040015,
2414    }
2415}
2416
2417#[repr(C)]
2418#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2419pub struct HvRegisterValue(pub AlignedU128);
2420
2421impl HvRegisterValue {
2422    pub fn as_u128(&self) -> u128 {
2423        self.0.into()
2424    }
2425
2426    pub fn as_u64(&self) -> u64 {
2427        self.as_u128() as u64
2428    }
2429
2430    pub fn as_u32(&self) -> u32 {
2431        self.as_u128() as u32
2432    }
2433
2434    pub fn as_u16(&self) -> u16 {
2435        self.as_u128() as u16
2436    }
2437
2438    pub fn as_u8(&self) -> u8 {
2439        self.as_u128() as u8
2440    }
2441
2442    pub fn as_table(&self) -> HvX64TableRegister {
2443        HvX64TableRegister::read_from_prefix(self.as_bytes())
2444            .unwrap()
2445            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2446    }
2447
2448    pub fn as_segment(&self) -> HvX64SegmentRegister {
2449        HvX64SegmentRegister::read_from_prefix(self.as_bytes())
2450            .unwrap()
2451            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2452    }
2453}
2454
2455impl From<u8> for HvRegisterValue {
2456    fn from(val: u8) -> Self {
2457        (val as u128).into()
2458    }
2459}
2460
2461impl From<u16> for HvRegisterValue {
2462    fn from(val: u16) -> Self {
2463        (val as u128).into()
2464    }
2465}
2466
2467impl From<u32> for HvRegisterValue {
2468    fn from(val: u32) -> Self {
2469        (val as u128).into()
2470    }
2471}
2472
2473impl From<u64> for HvRegisterValue {
2474    fn from(val: u64) -> Self {
2475        (val as u128).into()
2476    }
2477}
2478
2479impl From<u128> for HvRegisterValue {
2480    fn from(val: u128) -> Self {
2481        Self(val.into())
2482    }
2483}
2484
2485#[repr(C)]
2486#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2487pub struct HvX64TableRegister {
2488    pub pad: [u16; 3],
2489    pub limit: u16,
2490    pub base: u64,
2491}
2492
2493impl From<HvX64TableRegister> for HvRegisterValue {
2494    fn from(val: HvX64TableRegister) -> Self {
2495        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2496    }
2497}
2498
2499impl From<HvRegisterValue> for HvX64TableRegister {
2500    fn from(val: HvRegisterValue) -> Self {
2501        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2502    }
2503}
2504
2505#[repr(C)]
2506#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2507pub struct HvX64SegmentRegister {
2508    pub base: u64,
2509    pub limit: u32,
2510    pub selector: u16,
2511    pub attributes: u16,
2512}
2513
2514impl From<HvX64SegmentRegister> for HvRegisterValue {
2515    fn from(val: HvX64SegmentRegister) -> Self {
2516        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2517    }
2518}
2519
2520impl From<HvRegisterValue> for HvX64SegmentRegister {
2521    fn from(val: HvRegisterValue) -> Self {
2522        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2523    }
2524}
2525
2526#[bitfield(u64)]
2527#[derive(IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
2528pub struct HvDeliverabilityNotificationsRegister {
2529    /// x86_64 only.
2530    pub nmi_notification: bool,
2531    /// x86_64 only.
2532    pub interrupt_notification: bool,
2533    /// x86_64 only.
2534    #[bits(4)]
2535    /// Only used on x86_64.
2536    pub interrupt_priority: u8,
2537    #[bits(42)]
2538    pub reserved: u64,
2539    pub sints: u16,
2540}
2541
2542open_enum! {
2543    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2544    pub enum HvVtlEntryReason: u32 {
2545        /// This reason is reserved and is not used.
2546        RESERVED = 0,
2547
2548        /// Indicates entry due to a VTL call from a lower VTL.
2549        VTL_CALL = 1,
2550
2551        /// Indicates entry due to an interrupt targeted to the VTL.
2552        INTERRUPT = 2,
2553
2554        // Indicates an entry due to an intercept delivered via the intercept page.
2555        INTERCEPT = 3,
2556    }
2557}
2558
2559#[repr(C)]
2560#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2561pub struct HvVpVtlControl {
2562    //
2563    // The hypervisor updates the entry reason with an indication as to why the
2564    // VTL was entered on the virtual processor.
2565    //
2566    pub entry_reason: HvVtlEntryReason,
2567
2568    /// This flag determines whether the VINA interrupt line is asserted.
2569    pub vina_status: u8,
2570    pub reserved_z0: u8,
2571    pub reserved_z1: u16,
2572
2573    /// A guest updates the VtlReturn* fields to provide the register values to
2574    /// restore on VTL return.  The specific register values that are restored
2575    /// will vary based on whether the VTL is 32-bit or 64-bit: rax and rcx or
2576    /// eax, ecx, and edx.
2577    pub registers: [u64; 2],
2578}
2579
2580#[bitfield(u64)]
2581#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2582pub struct HvRegisterVsmVina {
2583    pub vector: u8,
2584    pub enabled: bool,
2585    pub auto_reset: bool,
2586    pub auto_eoi: bool,
2587    #[bits(53)]
2588    pub reserved: u64,
2589}
2590
2591#[repr(C)]
2592#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2593pub struct HvVpAssistPage {
2594    /// APIC assist for optimized EOI processing.
2595    pub apic_assist: u32,
2596    pub reserved_z0: u32,
2597
2598    /// VP-VTL control information
2599    pub vtl_control: HvVpVtlControl,
2600
2601    pub nested_enlightenments_control: u64,
2602    pub enlighten_vm_entry: u8,
2603    pub reserved_z1: [u8; 7],
2604    pub current_nested_vmcs: u64,
2605    pub synthetic_time_unhalted_timer_expired: u8,
2606    pub reserved_z2: [u8; 7],
2607    pub virtualization_fault_information: [u8; 40],
2608    pub reserved_z3: u64,
2609    pub intercept_message: HvMessage,
2610    pub vtl_return_actions: [u8; 256],
2611}
2612
2613#[repr(C)]
2614#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2615pub struct HvVpAssistPageActionSignalEvent {
2616    pub action_type: u64,
2617    pub target_vp: u32,
2618    pub target_vtl: u8,
2619    pub target_sint: u8,
2620    pub flag_number: u16,
2621}
2622
2623open_enum! {
2624    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2625    pub enum HvInterceptAccessType: u8 {
2626        READ = 0,
2627        WRITE = 1,
2628        EXECUTE = 2,
2629    }
2630}
2631
2632#[bitfield(u16)]
2633#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2634pub struct HvX64VpExecutionState {
2635    #[bits(2)]
2636    pub cpl: u8,
2637    pub cr0_pe: bool,
2638    pub cr0_am: bool,
2639    pub efer_lma: bool,
2640    pub debug_active: bool,
2641    pub interruption_pending: bool,
2642    #[bits(4)]
2643    pub vtl: u8,
2644    pub enclave_mode: bool,
2645    pub interrupt_shadow: bool,
2646    pub virtualization_fault_active: bool,
2647    #[bits(2)]
2648    pub reserved: u8,
2649}
2650
2651#[bitfield(u16)]
2652#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2653pub struct HvArm64VpExecutionState {
2654    #[bits(2)]
2655    pub cpl: u8,
2656    pub debug_active: bool,
2657    pub interruption_pending: bool,
2658    #[bits(4)]
2659    pub vtl: u8,
2660    pub virtualization_fault_active: bool,
2661    #[bits(7)]
2662    pub reserved: u8,
2663}
2664
2665#[repr(C)]
2666#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2667pub struct HvX64InterceptMessageHeader {
2668    pub vp_index: u32,
2669    pub instruction_length_and_cr8: u8,
2670    pub intercept_access_type: HvInterceptAccessType,
2671    pub execution_state: HvX64VpExecutionState,
2672    pub cs_segment: HvX64SegmentRegister,
2673    pub rip: u64,
2674    pub rflags: u64,
2675}
2676
2677impl MessagePayload for HvX64InterceptMessageHeader {}
2678
2679impl HvX64InterceptMessageHeader {
2680    pub fn instruction_len(&self) -> u8 {
2681        self.instruction_length_and_cr8 & 0xf
2682    }
2683
2684    pub fn cr8(&self) -> u8 {
2685        self.instruction_length_and_cr8 >> 4
2686    }
2687}
2688
2689#[repr(C)]
2690#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2691pub struct HvArm64InterceptMessageHeader {
2692    pub vp_index: u32,
2693    pub instruction_length: u8,
2694    pub intercept_access_type: HvInterceptAccessType,
2695    pub execution_state: HvArm64VpExecutionState,
2696    pub pc: u64,
2697    pub cspr: u64,
2698}
2699const_assert!(size_of::<HvArm64InterceptMessageHeader>() == 0x18);
2700
2701impl MessagePayload for HvArm64InterceptMessageHeader {}
2702
2703#[repr(transparent)]
2704#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2705pub struct HvX64IoPortAccessInfo(pub u8);
2706
2707impl HvX64IoPortAccessInfo {
2708    pub fn new(access_size: u8, string_op: bool, rep_prefix: bool) -> Self {
2709        let mut info = access_size & 0x7;
2710
2711        if string_op {
2712            info |= 0x8;
2713        }
2714
2715        if rep_prefix {
2716            info |= 0x10;
2717        }
2718
2719        Self(info)
2720    }
2721
2722    pub fn access_size(&self) -> u8 {
2723        self.0 & 0x7
2724    }
2725
2726    pub fn string_op(&self) -> bool {
2727        self.0 & 0x8 != 0
2728    }
2729
2730    pub fn rep_prefix(&self) -> bool {
2731        self.0 & 0x10 != 0
2732    }
2733}
2734
2735#[repr(C)]
2736#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2737pub struct HvX64IoPortInterceptMessage {
2738    pub header: HvX64InterceptMessageHeader,
2739    pub port_number: u16,
2740    pub access_info: HvX64IoPortAccessInfo,
2741    pub instruction_byte_count: u8,
2742    pub reserved: u32,
2743    pub rax: u64,
2744    pub instruction_bytes: [u8; 16],
2745    pub ds_segment: HvX64SegmentRegister,
2746    pub es_segment: HvX64SegmentRegister,
2747    pub rcx: u64,
2748    pub rsi: u64,
2749    pub rdi: u64,
2750}
2751
2752impl MessagePayload for HvX64IoPortInterceptMessage {}
2753
2754#[bitfield(u8)]
2755#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2756pub struct HvX64MemoryAccessInfo {
2757    pub gva_valid: bool,
2758    pub gva_gpa_valid: bool,
2759    pub hypercall_output_pending: bool,
2760    pub tlb_locked: bool,
2761    pub supervisor_shadow_stack: bool,
2762    #[bits(3)]
2763    pub reserved1: u8,
2764}
2765
2766#[bitfield(u8)]
2767#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2768pub struct HvArm64MemoryAccessInfo {
2769    pub gva_valid: bool,
2770    pub gva_gpa_valid: bool,
2771    pub hypercall_output_pending: bool,
2772    #[bits(5)]
2773    pub reserved1: u8,
2774}
2775
2776open_enum! {
2777    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2778    pub enum HvCacheType: u32 {
2779        #![expect(non_upper_case_globals)]
2780        HvCacheTypeUncached = 0,
2781        HvCacheTypeWriteCombining = 1,
2782        HvCacheTypeWriteThrough = 4,
2783        HvCacheTypeWriteProtected = 5,
2784        HvCacheTypeWriteBack = 6,
2785    }
2786}
2787
2788#[repr(C)]
2789#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2790pub struct HvX64MemoryInterceptMessage {
2791    pub header: HvX64InterceptMessageHeader,
2792    pub cache_type: HvCacheType,
2793    pub instruction_byte_count: u8,
2794    pub memory_access_info: HvX64MemoryAccessInfo,
2795    pub tpr_priority: u8,
2796    pub reserved: u8,
2797    pub guest_virtual_address: u64,
2798    pub guest_physical_address: u64,
2799    pub instruction_bytes: [u8; 16],
2800}
2801
2802impl MessagePayload for HvX64MemoryInterceptMessage {}
2803const_assert!(size_of::<HvX64MemoryInterceptMessage>() == 0x50);
2804
2805#[repr(C)]
2806#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2807pub struct HvArm64MemoryInterceptMessage {
2808    pub header: HvArm64InterceptMessageHeader,
2809    pub cache_type: HvCacheType,
2810    pub instruction_byte_count: u8,
2811    pub memory_access_info: HvArm64MemoryAccessInfo,
2812    pub reserved1: u16,
2813    pub instruction_bytes: [u8; 4],
2814    pub reserved2: u32,
2815    pub guest_virtual_address: u64,
2816    pub guest_physical_address: u64,
2817    pub syndrome: u64,
2818}
2819
2820impl MessagePayload for HvArm64MemoryInterceptMessage {}
2821const_assert!(size_of::<HvArm64MemoryInterceptMessage>() == 0x40);
2822
2823#[repr(C)]
2824#[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)]
2825pub struct HvArm64MmioInterceptMessage {
2826    pub header: HvArm64InterceptMessageHeader,
2827    pub guest_physical_address: u64,
2828    pub access_size: u32,
2829    pub data: [u8; 32],
2830    pub padding: u32,
2831}
2832
2833impl MessagePayload for HvArm64MmioInterceptMessage {}
2834const_assert!(size_of::<HvArm64MmioInterceptMessage>() == 0x48);
2835
2836#[repr(C)]
2837#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2838pub struct HvX64MsrInterceptMessage {
2839    pub header: HvX64InterceptMessageHeader,
2840    pub msr_number: u32,
2841    pub reserved: u32,
2842    pub rdx: u64,
2843    pub rax: u64,
2844}
2845
2846impl MessagePayload for HvX64MsrInterceptMessage {}
2847
2848#[repr(C)]
2849#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2850pub struct HvX64SipiInterceptMessage {
2851    pub header: HvX64InterceptMessageHeader,
2852    pub target_vp_index: u32,
2853    pub vector: u32,
2854}
2855
2856impl MessagePayload for HvX64SipiInterceptMessage {}
2857
2858#[repr(C)]
2859#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2860pub struct HvX64SynicSintDeliverableMessage {
2861    pub header: HvX64InterceptMessageHeader,
2862    pub deliverable_sints: u16,
2863    pub rsvd1: u16,
2864    pub rsvd2: u32,
2865}
2866
2867impl MessagePayload for HvX64SynicSintDeliverableMessage {}
2868
2869#[repr(C)]
2870#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2871pub struct HvArm64SynicSintDeliverableMessage {
2872    pub header: HvArm64InterceptMessageHeader,
2873    pub deliverable_sints: u16,
2874    pub rsvd1: u16,
2875    pub rsvd2: u32,
2876}
2877
2878impl MessagePayload for HvArm64SynicSintDeliverableMessage {}
2879
2880#[repr(C)]
2881#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2882pub struct HvX64InterruptionDeliverableMessage {
2883    pub header: HvX64InterceptMessageHeader,
2884    pub deliverable_type: HvX64PendingInterruptionType,
2885    pub rsvd: [u8; 3],
2886    pub rsvd2: u32,
2887}
2888
2889impl MessagePayload for HvX64InterruptionDeliverableMessage {}
2890
2891open_enum! {
2892    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2893    pub enum HvX64PendingInterruptionType: u8 {
2894        HV_X64_PENDING_INTERRUPT = 0,
2895        HV_X64_PENDING_NMI = 2,
2896        HV_X64_PENDING_EXCEPTION = 3,
2897        HV_X64_PENDING_SOFTWARE_INTERRUPT = 4,
2898        HV_X64_PENDING_PRIVILEGED_SOFTWARE_EXCEPTION = 5,
2899        HV_X64_PENDING_SOFTWARE_EXCEPTION = 6,
2900    }
2901}
2902
2903#[repr(C)]
2904#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2905pub struct HvX64HypercallInterceptMessage {
2906    pub header: HvX64InterceptMessageHeader,
2907    pub rax: u64,
2908    pub rbx: u64,
2909    pub rcx: u64,
2910    pub rdx: u64,
2911    pub r8: u64,
2912    pub rsi: u64,
2913    pub rdi: u64,
2914    pub xmm_registers: [AlignedU128; 6],
2915    pub flags: HvHypercallInterceptMessageFlags,
2916    pub rsvd2: [u32; 3],
2917}
2918
2919impl MessagePayload for HvX64HypercallInterceptMessage {}
2920
2921#[repr(C)]
2922#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2923pub struct HvArm64HypercallInterceptMessage {
2924    pub header: HvArm64InterceptMessageHeader,
2925    pub immediate: u16,
2926    pub reserved: u16,
2927    pub flags: HvHypercallInterceptMessageFlags,
2928    pub x: [u64; 18],
2929}
2930
2931impl MessagePayload for HvArm64HypercallInterceptMessage {}
2932
2933#[bitfield(u32)]
2934#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2935pub struct HvHypercallInterceptMessageFlags {
2936    pub is_isolated: bool,
2937    #[bits(31)]
2938    _reserved: u32,
2939}
2940
2941#[repr(C)]
2942#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2943pub struct HvX64CpuidInterceptMessage {
2944    pub header: HvX64InterceptMessageHeader,
2945    pub rax: u64,
2946    pub rcx: u64,
2947    pub rdx: u64,
2948    pub rbx: u64,
2949    pub default_result_rax: u64,
2950    pub default_result_rcx: u64,
2951    pub default_result_rdx: u64,
2952    pub default_result_rbx: u64,
2953}
2954
2955impl MessagePayload for HvX64CpuidInterceptMessage {}
2956
2957#[bitfield(u8)]
2958#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2959pub struct HvX64ExceptionInfo {
2960    pub error_code_valid: bool,
2961    pub software_exception: bool,
2962    #[bits(6)]
2963    reserved: u8,
2964}
2965
2966#[repr(C)]
2967#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2968pub struct HvX64ExceptionInterceptMessage {
2969    pub header: HvX64InterceptMessageHeader,
2970    pub vector: u16,
2971    pub exception_info: HvX64ExceptionInfo,
2972    pub instruction_byte_count: u8,
2973    pub error_code: u32,
2974    pub exception_parameter: u64,
2975    pub reserved: u64,
2976    pub instruction_bytes: [u8; 16],
2977    pub ds_segment: HvX64SegmentRegister,
2978    pub ss_segment: HvX64SegmentRegister,
2979    pub rax: u64,
2980    pub rcx: u64,
2981    pub rdx: u64,
2982    pub rbx: u64,
2983    pub rsp: u64,
2984    pub rbp: u64,
2985    pub rsi: u64,
2986    pub rdi: u64,
2987    pub r8: u64,
2988    pub r9: u64,
2989    pub r10: u64,
2990    pub r11: u64,
2991    pub r12: u64,
2992    pub r13: u64,
2993    pub r14: u64,
2994    pub r15: u64,
2995}
2996
2997impl MessagePayload for HvX64ExceptionInterceptMessage {}
2998
2999#[repr(C)]
3000#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3001pub struct HvInvalidVpRegisterMessage {
3002    pub vp_index: u32,
3003    pub reserved: u32,
3004}
3005
3006impl MessagePayload for HvInvalidVpRegisterMessage {}
3007
3008#[repr(C)]
3009#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3010pub struct HvX64ApicEoiMessage {
3011    pub vp_index: u32,
3012    pub interrupt_vector: u32,
3013}
3014
3015impl MessagePayload for HvX64ApicEoiMessage {}
3016
3017#[repr(C)]
3018#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3019pub struct HvX64UnrecoverableExceptionMessage {
3020    pub header: HvX64InterceptMessageHeader,
3021}
3022
3023impl MessagePayload for HvX64UnrecoverableExceptionMessage {}
3024
3025#[repr(C)]
3026#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3027pub struct HvX64HaltMessage {
3028    pub header: HvX64InterceptMessageHeader,
3029}
3030
3031impl MessagePayload for HvX64HaltMessage {}
3032
3033#[repr(C)]
3034#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3035pub struct HvArm64ResetInterceptMessage {
3036    pub header: HvArm64InterceptMessageHeader,
3037    pub reset_type: HvArm64ResetType,
3038    pub reset_code: u32,
3039}
3040
3041impl MessagePayload for HvArm64ResetInterceptMessage {}
3042
3043open_enum! {
3044    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3045    pub enum HvArm64ResetType: u32 {
3046        POWER_OFF = 0,
3047        REBOOT = 1,
3048        SYSTEM_RESET = 2,
3049        HIBERNATE = 3,
3050    }
3051}
3052
3053#[bitfield(u8)]
3054#[derive(IntoBytes, Immutable, FromBytes)]
3055pub struct HvX64RegisterInterceptMessageFlags {
3056    pub is_memory_op: bool,
3057    #[bits(7)]
3058    _rsvd: u8,
3059}
3060
3061#[repr(C)]
3062#[derive(IntoBytes, Immutable, FromBytes)]
3063pub struct HvX64RegisterInterceptMessage {
3064    pub header: HvX64InterceptMessageHeader,
3065    pub flags: HvX64RegisterInterceptMessageFlags,
3066    pub rsvd: u8,
3067    pub rsvd2: u16,
3068    pub register_name: HvX64RegisterName,
3069    pub access_info: HvX64RegisterAccessInfo,
3070}
3071
3072#[repr(transparent)]
3073#[derive(IntoBytes, Immutable, FromBytes)]
3074pub struct HvX64RegisterAccessInfo(u128);
3075
3076impl HvX64RegisterAccessInfo {
3077    pub fn new_source_value(source_value: HvRegisterValue) -> Self {
3078        Self(source_value.as_u128())
3079    }
3080}
3081
3082open_enum! {
3083    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3084    pub enum HvInterruptType : u32  {
3085        #![expect(non_upper_case_globals)]
3086        HvArm64InterruptTypeFixed = 0x0000,
3087        HvX64InterruptTypeFixed = 0x0000,
3088        HvX64InterruptTypeLowestPriority = 0x0001,
3089        HvX64InterruptTypeSmi = 0x0002,
3090        HvX64InterruptTypeRemoteRead = 0x0003,
3091        HvX64InterruptTypeNmi = 0x0004,
3092        HvX64InterruptTypeInit = 0x0005,
3093        HvX64InterruptTypeSipi = 0x0006,
3094        HvX64InterruptTypeExtInt = 0x0007,
3095        HvX64InterruptTypeLocalInt0 = 0x0008,
3096        HvX64InterruptTypeLocalInt1 = 0x0009,
3097    }
3098}
3099
3100/// The declaration uses the fact the bits for the different
3101/// architectures don't intersect. When (if ever) they do,
3102/// will need to come up with a more elaborate abstraction.
3103/// The other possible downside is the lack of the compile-time
3104/// checks as adding that will require `guest_arch` support and
3105/// a large refactoring. To sum up, choosing expediency.
3106#[bitfield(u64)]
3107#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3108pub struct HvInterruptControl {
3109    interrupt_type_value: u32,
3110    pub x86_level_triggered: bool,
3111    pub x86_logical_destination_mode: bool,
3112    pub arm64_asserted: bool,
3113    #[bits(29)]
3114    pub unused: u32,
3115}
3116
3117impl HvInterruptControl {
3118    pub fn interrupt_type(&self) -> HvInterruptType {
3119        HvInterruptType(self.interrupt_type_value())
3120    }
3121
3122    pub fn set_interrupt_type(&mut self, ty: HvInterruptType) {
3123        self.set_interrupt_type_value(ty.0)
3124    }
3125
3126    pub fn with_interrupt_type(self, ty: HvInterruptType) -> Self {
3127        self.with_interrupt_type_value(ty.0)
3128    }
3129}
3130
3131#[bitfield(u64)]
3132pub struct HvRegisterVsmCapabilities {
3133    pub dr6_shared: bool,
3134    pub mbec_vtl_mask: u16,
3135    pub deny_lower_vtl_startup: bool,
3136    pub supervisor_shadow_stack: bool,
3137    pub hardware_hvpt_available: bool,
3138    pub software_hvpt_available: bool,
3139    #[bits(6)]
3140    pub hardware_hvpt_range_bits: u8,
3141    pub intercept_page_available: bool,
3142    pub return_action_available: bool,
3143    /// If the VTL0 view of memory is mapped to the high address space, which is
3144    /// the highest legal physical address bit.
3145    ///
3146    /// Only available in VTL2.
3147    pub vtl0_alias_map_available: bool,
3148    /// If the [`HvRegisterVsmPartitionConfig`] register has support for
3149    /// `intercept_not_present`.
3150    ///
3151    /// Only available in VTL2.
3152    pub intercept_not_present_available: bool,
3153    pub install_intercept_ex: bool,
3154    /// Only available in VTL2.
3155    pub intercept_system_reset_available: bool,
3156    #[bits(1)]
3157    pub reserved1: u8,
3158    pub proxy_interrupt_redirect_available: bool,
3159    #[bits(29)]
3160    pub reserved2: u64,
3161}
3162
3163#[bitfield(u64)]
3164pub struct HvRegisterVsmPartitionConfig {
3165    pub enable_vtl_protection: bool,
3166    #[bits(4)]
3167    pub default_vtl_protection_mask: u8,
3168    pub zero_memory_on_reset: bool,
3169    pub deny_lower_vtl_startup: bool,
3170    pub intercept_acceptance: bool,
3171    pub intercept_enable_vtl_protection: bool,
3172    pub intercept_vp_startup: bool,
3173    pub intercept_cpuid_unimplemented: bool,
3174    pub intercept_unrecoverable_exception: bool,
3175    pub intercept_page: bool,
3176    pub intercept_restore_partition_time: bool,
3177    /// The hypervisor will send all unmapped GPA intercepts to VTL2 rather than
3178    /// the host.
3179    pub intercept_not_present: bool,
3180    pub intercept_system_reset: bool,
3181    #[bits(48)]
3182    pub reserved: u64,
3183}
3184
3185#[bitfield(u64)]
3186pub struct HvRegisterVsmPartitionStatus {
3187    #[bits(16)]
3188    pub enabled_vtl_set: u16,
3189    #[bits(4)]
3190    pub maximum_vtl: u8,
3191    #[bits(16)]
3192    pub mbec_enabled_vtl_set: u16,
3193    #[bits(4)]
3194    pub supervisor_shadow_stack_enabled_vtl_set: u8,
3195    #[bits(24)]
3196    pub reserved: u64,
3197}
3198
3199#[bitfield(u64)]
3200pub struct HvRegisterGuestVsmPartitionConfig {
3201    #[bits(4)]
3202    pub maximum_vtl: u8,
3203    #[bits(60)]
3204    pub reserved: u64,
3205}
3206
3207#[bitfield(u64)]
3208pub struct HvRegisterVsmVpStatus {
3209    #[bits(4)]
3210    pub active_vtl: u8,
3211    pub active_mbec_enabled: bool,
3212    #[bits(11)]
3213    pub reserved_mbz0: u16,
3214    #[bits(16)]
3215    pub enabled_vtl_set: u16,
3216    #[bits(32)]
3217    pub reserved_mbz1: u32,
3218}
3219
3220#[bitfield(u64)]
3221pub struct HvRegisterVsmCodePageOffsets {
3222    #[bits(12)]
3223    pub call_offset: u16,
3224    #[bits(12)]
3225    pub return_offset: u16,
3226    #[bits(40)]
3227    pub reserved: u64,
3228}
3229
3230#[repr(C)]
3231#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3232pub struct HvStimerState {
3233    pub undelivered_message_pending: u32,
3234    pub reserved: u32,
3235    pub config: u64,
3236    pub count: u64,
3237    pub adjustment: u64,
3238    pub undelivered_expiration_time: u64,
3239}
3240
3241#[repr(C)]
3242#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3243pub struct HvSyntheticTimersState {
3244    pub timers: [HvStimerState; 4],
3245    pub reserved: [u64; 5],
3246}
3247
3248#[bitfield(u64)]
3249pub struct HvInternalActivityRegister {
3250    pub startup_suspend: bool,
3251    pub halt_suspend: bool,
3252    pub idle_suspend: bool,
3253    #[bits(61)]
3254    pub reserved: u64,
3255}
3256
3257#[bitfield(u64)]
3258pub struct HvSynicSint {
3259    pub vector: u8,
3260    _reserved: u8,
3261    pub masked: bool,
3262    pub auto_eoi: bool,
3263    pub polling: bool,
3264    _reserved2: bool,
3265    pub proxy: bool,
3266    #[bits(43)]
3267    _reserved2: u64,
3268}
3269
3270#[bitfield(u64)]
3271pub struct HvSynicScontrol {
3272    pub enabled: bool,
3273    #[bits(63)]
3274    _reserved: u64,
3275}
3276
3277#[bitfield(u64)]
3278pub struct HvSynicSimpSiefp {
3279    pub enabled: bool,
3280    #[bits(11)]
3281    _reserved: u64,
3282    #[bits(52)]
3283    pub base_gpn: u64,
3284}
3285
3286#[bitfield(u64)]
3287pub struct HvSynicStimerConfig {
3288    pub enabled: bool,
3289    pub periodic: bool,
3290    pub lazy: bool,
3291    pub auto_enable: bool,
3292    // Note: On ARM64 the top 3 bits of apic_vector are reserved.
3293    pub apic_vector: u8,
3294    pub direct_mode: bool,
3295    #[bits(3)]
3296    pub _reserved1: u8,
3297    #[bits(4)]
3298    pub sint: u8,
3299    #[bits(44)]
3300    pub _reserved2: u64,
3301}
3302
3303pub const HV_X64_PENDING_EVENT_EXCEPTION: u8 = 0;
3304pub const HV_X64_PENDING_EVENT_MEMORY_INTERCEPT: u8 = 1;
3305pub const HV_X64_PENDING_EVENT_NESTED_MEMORY_INTERCEPT: u8 = 2;
3306pub const HV_X64_PENDING_EVENT_VIRTUALIZATION_FAULT: u8 = 3;
3307pub const HV_X64_PENDING_EVENT_HYPERCALL_OUTPUT: u8 = 4;
3308pub const HV_X64_PENDING_EVENT_EXT_INT: u8 = 5;
3309pub const HV_X64_PENDING_EVENT_SHADOW_IPT: u8 = 6;
3310
3311// Provides information about an exception.
3312#[bitfield(u128)]
3313pub struct HvX64PendingExceptionEvent {
3314    pub event_pending: bool,
3315    #[bits(3)]
3316    pub event_type: u8,
3317    #[bits(4)]
3318    pub reserved0: u8,
3319
3320    pub deliver_error_code: bool,
3321    #[bits(7)]
3322    pub reserved1: u8,
3323    pub vector: u16,
3324    pub error_code: u32,
3325    pub exception_parameter: u64,
3326}
3327
3328/// Provides information about a virtualization fault.
3329#[bitfield(u128)]
3330pub struct HvX64PendingVirtualizationFaultEvent {
3331    pub event_pending: bool,
3332    #[bits(3)]
3333    pub event_type: u8,
3334    #[bits(4)]
3335    pub reserved0: u8,
3336
3337    pub reserved1: u8,
3338    pub parameter0: u16,
3339    pub code: u32,
3340    pub parameter1: u64,
3341}
3342
3343/// Part of [`HvX64PendingEventMemoryIntercept`]
3344#[bitfield(u8)]
3345#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3346pub struct HvX64PendingEventMemoryInterceptPendingEventHeader {
3347    pub event_pending: bool,
3348    #[bits(3)]
3349    pub event_type: u8,
3350    #[bits(4)]
3351    _reserved0: u8,
3352}
3353
3354/// Part of [`HvX64PendingEventMemoryIntercept`]
3355#[bitfield(u8)]
3356#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3357pub struct HvX64PendingEventMemoryInterceptAccessFlags {
3358    /// Indicates if the guest linear address is valid.
3359    pub guest_linear_address_valid: bool,
3360    /// Indicates that the memory intercept was caused by an access to a guest physical address
3361    /// (instead of a page table as part of a page table walk).
3362    pub caused_by_gpa_access: bool,
3363    #[bits(6)]
3364    _reserved1: u8,
3365}
3366
3367/// Provides information about a memory intercept.
3368#[repr(C)]
3369#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3370pub struct HvX64PendingEventMemoryIntercept {
3371    pub event_header: HvX64PendingEventMemoryInterceptPendingEventHeader,
3372    /// VTL at which the memory intercept is targeted.
3373    /// Note: This field must be in Reg0.
3374    pub target_vtl: u8,
3375    /// Type of the memory access.
3376    pub access_type: HvInterceptAccessType,
3377    pub access_flags: HvX64PendingEventMemoryInterceptAccessFlags,
3378    pub _reserved2: u32,
3379    /// The guest linear address that caused the fault.
3380    pub guest_linear_address: u64,
3381    /// The guest physical address that caused the memory intercept.
3382    pub guest_physical_address: u64,
3383    pub _reserved3: u64,
3384}
3385const_assert!(size_of::<HvX64PendingEventMemoryIntercept>() == 0x20);
3386
3387//
3388// Provides information about pending hypercall output.
3389//
3390#[bitfield(u128)]
3391pub struct HvX64PendingHypercallOutputEvent {
3392    pub event_pending: bool,
3393    #[bits(3)]
3394    pub event_type: u8,
3395    #[bits(4)]
3396    pub reserved0: u8,
3397
3398    // Whether the hypercall has been retired.
3399    pub retired: bool,
3400
3401    #[bits(23)]
3402    pub reserved1: u32,
3403
3404    // Indicates the number of bytes to be written starting from OutputGpa.
3405    pub output_size: u32,
3406
3407    // Indicates the output GPA, which is not required to be page-aligned.
3408    pub output_gpa: u64,
3409}
3410
3411// Provides information about a directly asserted ExtInt.
3412#[bitfield(u128)]
3413pub struct HvX64PendingExtIntEvent {
3414    pub event_pending: bool,
3415    #[bits(3)]
3416    pub event_type: u8,
3417    #[bits(4)]
3418    pub reserved0: u8,
3419    pub vector: u8,
3420    #[bits(48)]
3421    pub reserved1: u64,
3422    pub reserved2: u64,
3423}
3424
3425// Provides information about pending IPT shadowing.
3426#[bitfield(u128)]
3427pub struct HvX64PendingShadowIptEvent {
3428    pub event_pending: bool,
3429    #[bits(4)]
3430    pub event_type: u8,
3431    #[bits(59)]
3432    pub reserved0: u64,
3433
3434    pub reserved1: u64,
3435}
3436
3437#[bitfield(u128)]
3438#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3439pub struct HvX64PendingEventReg0 {
3440    pub event_pending: bool,
3441    #[bits(3)]
3442    pub event_type: u8,
3443    #[bits(4)]
3444    pub reserved: u8,
3445    #[bits(120)]
3446    pub data: u128,
3447}
3448
3449#[repr(C)]
3450#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3451pub struct HvX64PendingEvent {
3452    pub reg_0: HvX64PendingEventReg0,
3453    pub reg_1: AlignedU128,
3454}
3455const_assert!(size_of::<HvX64PendingEvent>() == 0x20);
3456
3457impl From<HvX64PendingExceptionEvent> for HvX64PendingEvent {
3458    fn from(exception_event: HvX64PendingExceptionEvent) -> Self {
3459        HvX64PendingEvent {
3460            reg_0: HvX64PendingEventReg0::from(u128::from(exception_event)),
3461            reg_1: 0u128.into(),
3462        }
3463    }
3464}
3465
3466#[bitfield(u64)]
3467#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3468pub struct HvX64PendingInterruptionRegister {
3469    pub interruption_pending: bool,
3470    #[bits(3)]
3471    pub interruption_type: u8,
3472    pub deliver_error_code: bool,
3473    #[bits(4)]
3474    pub instruction_length: u8,
3475    pub nested_event: bool,
3476    #[bits(6)]
3477    pub reserved: u8,
3478    pub interruption_vector: u16,
3479    pub error_code: u32,
3480}
3481
3482#[bitfield(u64)]
3483#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3484pub struct HvX64InterruptStateRegister {
3485    pub interrupt_shadow: bool,
3486    pub nmi_masked: bool,
3487    #[bits(62)]
3488    pub reserved: u64,
3489}
3490
3491#[bitfield(u64)]
3492#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3493pub struct HvInstructionEmulatorHintsRegister {
3494    /// Indicates whether any secure VTL is enabled for the partition.
3495    pub partition_secure_vtl_enabled: bool,
3496    /// Indicates whether kernel or user execute control architecturally
3497    /// applies to execute accesses.
3498    pub mbec_user_execute_control: bool,
3499    #[bits(62)]
3500    pub _padding: u64,
3501}
3502
3503open_enum! {
3504    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3505    pub enum HvAarch64PendingEventType: u8 {
3506        EXCEPTION = 0,
3507        SYNTHETIC_EXCEPTION = 1,
3508        HYPERCALL_OUTPUT = 2,
3509    }
3510}
3511
3512// Support for bitfield structures.
3513impl HvAarch64PendingEventType {
3514    const fn from_bits(val: u8) -> Self {
3515        HvAarch64PendingEventType(val)
3516    }
3517
3518    const fn into_bits(self) -> u8 {
3519        self.0
3520    }
3521}
3522
3523#[bitfield[u8]]
3524#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3525pub struct HvAarch64PendingEventHeader {
3526    #[bits(1)]
3527    pub event_pending: bool,
3528    #[bits(3)]
3529    pub event_type: HvAarch64PendingEventType,
3530    #[bits(4)]
3531    pub reserved: u8,
3532}
3533
3534#[repr(C)]
3535#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3536pub struct HvAarch64PendingExceptionEvent {
3537    pub header: HvAarch64PendingEventHeader,
3538    pub _padding: [u8; 7],
3539    pub syndrome: u64,
3540    pub fault_address: u64,
3541}
3542
3543#[bitfield[u8]]
3544#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3545pub struct HvAarch64PendingHypercallOutputEventFlags {
3546    #[bits(1)]
3547    pub retired: u8,
3548    #[bits(7)]
3549    pub reserved: u8,
3550}
3551
3552#[repr(C)]
3553#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3554pub struct HvAarch64PendingHypercallOutputEvent {
3555    pub header: HvAarch64PendingEventHeader,
3556    pub flags: HvAarch64PendingHypercallOutputEventFlags,
3557    pub reserved: u16,
3558    pub output_size: u32,
3559    pub output_gpa: u64,
3560}
3561
3562#[repr(C)]
3563#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3564pub struct HvAarch64PendingEvent {
3565    pub header: HvAarch64PendingEventHeader,
3566    pub event_data: [u8; 15],
3567    pub _padding: [u64; 2],
3568}
3569
3570#[bitfield(u32)]
3571#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3572pub struct HvMapGpaFlags {
3573    pub readable: bool,
3574    pub writable: bool,
3575    pub kernel_executable: bool,
3576    pub user_executable: bool,
3577    pub supervisor_shadow_stack: bool,
3578    pub paging_writability: bool,
3579    pub verify_paging_writability: bool,
3580    #[bits(8)]
3581    _padding0: u32,
3582    pub adjustable: bool,
3583    #[bits(16)]
3584    _padding1: u32,
3585}
3586
3587/// [`HvMapGpaFlags`] with no permissions set
3588pub const HV_MAP_GPA_PERMISSIONS_NONE: HvMapGpaFlags = HvMapGpaFlags::new();
3589pub const HV_MAP_GPA_PERMISSIONS_ALL: HvMapGpaFlags = HvMapGpaFlags::new()
3590    .with_readable(true)
3591    .with_writable(true)
3592    .with_kernel_executable(true)
3593    .with_user_executable(true);
3594
3595#[repr(C)]
3596#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3597pub struct HvMonitorPage {
3598    pub trigger_state: HvMonitorTriggerState,
3599    pub reserved1: u32,
3600    pub trigger_group: [HvMonitorTriggerGroup; 4],
3601    pub reserved2: [u64; 3],
3602    pub next_check_time: [[u32; 32]; 4],
3603    pub latency: [[u16; 32]; 4],
3604    pub reserved3: [u64; 32],
3605    pub parameter: [[HvMonitorParameter; 32]; 4],
3606    pub reserved4: [u8; 1984],
3607}
3608
3609#[repr(C)]
3610#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3611pub struct HvMonitorPageSmall {
3612    pub trigger_state: HvMonitorTriggerState,
3613    pub reserved1: u32,
3614    pub trigger_group: [HvMonitorTriggerGroup; 4],
3615}
3616
3617#[repr(C)]
3618#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3619pub struct HvMonitorTriggerGroup {
3620    pub pending: u32,
3621    pub armed: u32,
3622}
3623
3624#[repr(C)]
3625#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3626pub struct HvMonitorParameter {
3627    pub connection_id: u32,
3628    pub flag_number: u16,
3629    pub reserved: u16,
3630}
3631
3632#[bitfield(u32)]
3633#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3634pub struct HvMonitorTriggerState {
3635    #[bits(4)]
3636    pub group_enable: u32,
3637    #[bits(28)]
3638    pub reserved: u32,
3639}
3640
3641#[bitfield(u64)]
3642#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3643pub struct HvPmTimerInfo {
3644    #[bits(16)]
3645    pub port: u16,
3646    #[bits(1)]
3647    pub width_24: bool,
3648    #[bits(1)]
3649    pub enabled: bool,
3650    #[bits(14)]
3651    pub reserved1: u32,
3652    #[bits(32)]
3653    pub reserved2: u32,
3654}
3655
3656#[bitfield(u64)]
3657#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3658pub struct HvX64RegisterSevControl {
3659    pub enable_encrypted_state: bool,
3660    #[bits(11)]
3661    _rsvd1: u64,
3662    #[bits(52)]
3663    pub vmsa_gpa_page_number: u64,
3664}
3665
3666#[bitfield(u64)]
3667#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3668pub struct HvRegisterReferenceTsc {
3669    pub enable: bool,
3670    #[bits(11)]
3671    pub reserved_p: u64,
3672    #[bits(52)]
3673    pub gpn: u64,
3674}
3675
3676#[repr(C)]
3677#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3678pub struct HvReferenceTscPage {
3679    pub tsc_sequence: u32,
3680    pub reserved1: u32,
3681    pub tsc_scale: u64,
3682    pub tsc_offset: i64,
3683    pub timeline_bias: u64,
3684    pub tsc_multiplier: u64,
3685    pub reserved2: [u64; 507],
3686}
3687
3688pub const HV_REFERENCE_TSC_SEQUENCE_INVALID: u32 = 0;
3689
3690#[bitfield(u64)]
3691#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3692pub struct HvX64VmgexitInterceptMessageFlags {
3693    pub ghcb_page_valid: bool,
3694    pub ghcb_request_error: bool,
3695    #[bits(62)]
3696    _reserved: u64,
3697}
3698
3699#[repr(C)]
3700#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3701pub struct HvX64VmgexitInterceptMessageGhcbPageStandard {
3702    pub ghcb_protocol_version: u16,
3703    _reserved: [u16; 3],
3704    pub sw_exit_code: u64,
3705    pub sw_exit_info1: u64,
3706    pub sw_exit_info2: u64,
3707    pub sw_scratch: u64,
3708}
3709
3710#[repr(C)]
3711#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3712pub struct HvX64VmgexitInterceptMessageGhcbPage {
3713    pub ghcb_usage: u32,
3714    _reserved: u32,
3715    pub standard: HvX64VmgexitInterceptMessageGhcbPageStandard,
3716}
3717
3718#[repr(C)]
3719#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3720pub struct HvX64VmgexitInterceptMessage {
3721    pub header: HvX64InterceptMessageHeader,
3722    pub ghcb_msr: u64,
3723    pub flags: HvX64VmgexitInterceptMessageFlags,
3724    pub ghcb_page: HvX64VmgexitInterceptMessageGhcbPage,
3725}
3726
3727impl MessagePayload for HvX64VmgexitInterceptMessage {}
3728
3729#[bitfield(u64)]
3730pub struct HvRegisterVpAssistPage {
3731    pub enabled: bool,
3732    #[bits(11)]
3733    _reserved: u64,
3734    #[bits(52)]
3735    pub gpa_page_number: u64,
3736}
3737
3738#[bitfield(u32)]
3739#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3740pub struct HvX64RegisterPageDirtyFlags {
3741    pub general_purpose: bool,
3742    pub instruction_pointer: bool,
3743    pub xmm: bool,
3744    pub segments: bool,
3745    pub flags: bool,
3746    #[bits(27)]
3747    reserved: u32,
3748}
3749
3750#[repr(C)]
3751#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3752pub struct HvX64RegisterPage {
3753    pub version: u16,
3754    pub is_valid: u8,
3755    pub vtl: u8,
3756    pub dirty: HvX64RegisterPageDirtyFlags,
3757    pub gp_registers: [u64; 16],
3758    pub rip: u64,
3759    pub rflags: u64,
3760    pub reserved: u64,
3761    pub xmm: [u128; 6],
3762    pub segment: [u128; 6],
3763    // Misc. control registers (cannot be set via this interface).
3764    pub cr0: u64,
3765    pub cr3: u64,
3766    pub cr4: u64,
3767    pub cr8: u64,
3768    pub efer: u64,
3769    pub dr7: u64,
3770    pub pending_interruption: HvX64PendingInterruptionRegister,
3771    pub interrupt_state: HvX64InterruptStateRegister,
3772    pub instruction_emulation_hints: HvInstructionEmulatorHintsRegister,
3773    pub reserved_end: [u8; 3672],
3774}
3775
3776const _: () = assert!(size_of::<HvX64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3777
3778#[bitfield(u32)]
3779#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3780pub struct HvAarch64RegisterPageDirtyFlags {
3781    _unused: bool,
3782    pub instruction_pointer: bool,
3783    pub processor_state: bool,
3784    pub control_registers: bool,
3785    #[bits(28)]
3786    reserved: u32,
3787}
3788
3789#[repr(C)]
3790#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3791pub struct HvAarch64RegisterPage {
3792    pub version: u16,
3793    pub is_valid: u8,
3794    pub vtl: u8,
3795    pub dirty: HvAarch64RegisterPageDirtyFlags,
3796    // Reserved.
3797    pub _rsvd: [u64; 33],
3798    // Instruction pointer.
3799    pub pc: u64,
3800    // Processor state.
3801    pub cpsr: u64,
3802    // Control registers.
3803    pub sctlr_el1: u64,
3804    pub tcr_el1: u64,
3805    // Reserved.
3806    pub reserved_end: [u8; 3792],
3807}
3808
3809const _: () = assert!(size_of::<HvAarch64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3810
3811#[bitfield(u64)]
3812pub struct HvRegisterVsmWpWaitForTlbLock {
3813    pub wait: bool,
3814    #[bits(63)]
3815    _reserved: u64,
3816}
3817
3818#[bitfield(u64)]
3819pub struct HvRegisterVsmVpSecureVtlConfig {
3820    pub mbec_enabled: bool,
3821    pub tlb_locked: bool,
3822    pub supervisor_shadow_stack_enabled: bool,
3823    pub hardware_hvpt_enabled: bool,
3824    #[bits(60)]
3825    _reserved: u64,
3826}
3827
3828#[bitfield(u64)]
3829pub struct HvRegisterCrInterceptControl {
3830    pub cr0_write: bool,
3831    pub cr4_write: bool,
3832    pub xcr0_write: bool,
3833    pub ia32_misc_enable_read: bool,
3834    pub ia32_misc_enable_write: bool,
3835    pub msr_lstar_read: bool,
3836    pub msr_lstar_write: bool,
3837    pub msr_star_read: bool,
3838    pub msr_star_write: bool,
3839    pub msr_cstar_read: bool,
3840    pub msr_cstar_write: bool,
3841    pub apic_base_msr_read: bool,
3842    pub apic_base_msr_write: bool,
3843    pub msr_efer_read: bool,
3844    pub msr_efer_write: bool,
3845    pub gdtr_write: bool,
3846    pub idtr_write: bool,
3847    pub ldtr_write: bool,
3848    pub tr_write: bool,
3849    pub msr_sysenter_cs_write: bool,
3850    pub msr_sysenter_eip_write: bool,
3851    pub msr_sysenter_esp_write: bool,
3852    pub msr_sfmask_write: bool,
3853    pub msr_tsc_aux_write: bool,
3854    pub msr_sgx_launch_control_write: bool,
3855    pub msr_xss_write: bool,
3856    pub msr_scet_write: bool,
3857    pub msr_pls_ssp_write: bool,
3858    pub msr_interrupt_ssp_table_addr_write: bool,
3859    #[bits(35)]
3860    _rsvd_z: u64,
3861}