hvdef/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Microsoft hypervisor definitions.
5
6#![expect(missing_docs)]
7#![forbid(unsafe_code)]
8#![no_std]
9
10pub mod vbs;
11
12use bitfield_struct::bitfield;
13use core::fmt::Debug;
14use core::mem::size_of;
15use open_enum::open_enum;
16use static_assertions::const_assert;
17use zerocopy::FromBytes;
18use zerocopy::FromZeros;
19use zerocopy::Immutable;
20use zerocopy::IntoBytes;
21use zerocopy::KnownLayout;
22
23pub const HV_PAGE_SIZE: u64 = 4096;
24pub const HV_PAGE_SIZE_USIZE: usize = 4096;
25pub const HV_PAGE_SHIFT: u64 = 12;
26
27pub const HV_PARTITION_ID_SELF: u64 = u64::MAX;
28pub const HV_VP_INDEX_SELF: u32 = 0xfffffffe;
29
30pub const HV_CPUID_FUNCTION_VERSION_AND_FEATURES: u32 = 0x00000001;
31pub const HV_CPUID_FUNCTION_HV_VENDOR_AND_MAX_FUNCTION: u32 = 0x40000000;
32pub const HV_CPUID_FUNCTION_HV_INTERFACE: u32 = 0x40000001;
33pub const HV_CPUID_FUNCTION_MS_HV_VERSION: u32 = 0x40000002;
34pub const HV_CPUID_FUNCTION_MS_HV_FEATURES: u32 = 0x40000003;
35pub const HV_CPUID_FUNCTION_MS_HV_ENLIGHTENMENT_INFORMATION: u32 = 0x40000004;
36pub const HV_CPUID_FUNCTION_MS_HV_IMPLEMENTATION_LIMITS: u32 = 0x40000005;
37pub const HV_CPUID_FUNCTION_MS_HV_HARDWARE_FEATURES: u32 = 0x40000006;
38pub const HV_CPUID_FUNCTION_MS_HV_ISOLATION_CONFIGURATION: u32 = 0x4000000C;
39
40pub const VIRTUALIZATION_STACK_CPUID_VENDOR: u32 = 0x40000080;
41pub const VIRTUALIZATION_STACK_CPUID_INTERFACE: u32 = 0x40000081;
42pub const VIRTUALIZATION_STACK_CPUID_PROPERTIES: u32 = 0x40000082;
43
44/// The result of querying the VIRTUALIZATION_STACK_CPUID_PROPERTIES leaf.
45///
46/// The current partition is considered "portable": the virtualization stack may
47/// attempt to bring up the partition on another physical machine.
48pub const VS1_PARTITION_PROPERTIES_EAX_IS_PORTABLE: u32 = 0x000000001;
49/// The current partition has a synthetic debug device available to it.
50pub const VS1_PARTITION_PROPERTIES_EAX_DEBUG_DEVICE_PRESENT: u32 = 0x000000002;
51/// Extended I/O APIC RTEs are supported for the current partition.
52pub const VS1_PARTITION_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE: u32 = 0x000000004;
53/// Confidential VMBus is available.
54pub const VS1_PARTITION_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE: u32 = 0x000000008;
55
56/// SMCCC UID for the Microsoft Hypervisor.
57pub const VENDOR_HYP_UID_MS_HYPERVISOR: [u32; 4] = [0x4d32ba58, 0xcd244764, 0x8eef6c75, 0x16597024];
58
59#[bitfield(u64)]
60pub struct HvPartitionPrivilege {
61    // access to virtual msrs
62    pub access_vp_runtime_msr: bool,
63    pub access_partition_reference_counter: bool,
64    pub access_synic_msrs: bool,
65    pub access_synthetic_timer_msrs: bool,
66    pub access_apic_msrs: bool,
67    pub access_hypercall_msrs: bool,
68    pub access_vp_index: bool,
69    pub access_reset_msr: bool,
70    pub access_stats_msr: bool,
71    pub access_partition_reference_tsc: bool,
72    pub access_guest_idle_msr: bool,
73    pub access_frequency_msrs: bool,
74    pub access_debug_msrs: bool,
75    pub access_reenlightenment_ctrls: bool,
76    pub access_root_scheduler_msr: bool,
77    pub access_tsc_invariant_controls: bool,
78    _reserved1: u16,
79
80    // Access to hypercalls
81    pub create_partitions: bool,
82    pub access_partition_id: bool,
83    pub access_memory_pool: bool,
84    pub adjust_message_buffers: bool,
85    pub post_messages: bool,
86    pub signal_events: bool,
87    pub create_port: bool,
88    pub connect_port: bool,
89    pub access_stats: bool,
90    #[bits(2)]
91    _reserved2: u64,
92    pub debugging: bool,
93    pub cpu_management: bool,
94    pub configure_profiler: bool,
95    pub access_vp_exit_tracing: bool,
96    pub enable_extended_gva_ranges_flush_va_list: bool,
97    pub access_vsm: bool,
98    pub access_vp_registers: bool,
99    _unused_bit: bool,
100    pub fast_hypercall_output: bool,
101    pub enable_extended_hypercalls: bool,
102    pub start_virtual_processor: bool,
103    pub isolation: bool,
104    #[bits(9)]
105    _reserved3: u64,
106}
107
108open_enum! {
109    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
110    pub enum HvPartitionIsolationType: u8 {
111        NONE = 0,
112        VBS = 1,
113        SNP = 2,
114        TDX = 3,
115    }
116}
117
118#[bitfield(u128)]
119#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
120pub struct HvFeatures {
121    #[bits(64)]
122    pub privileges: HvPartitionPrivilege,
123
124    #[bits(4)]
125    pub max_supported_cstate: u32,
126    pub hpet_needed_for_c3_power_state_deprecated: bool,
127    pub invariant_mperf_available: bool,
128    pub supervisor_shadow_stack_available: bool,
129    pub arch_pmu_available: bool,
130    pub exception_trap_intercept_available: bool,
131    #[bits(23)]
132    reserved: u32,
133
134    pub mwait_available_deprecated: bool,
135    pub guest_debugging_available: bool,
136    pub performance_monitors_available: bool,
137    pub cpu_dynamic_partitioning_available: bool,
138    pub xmm_registers_for_fast_hypercall_available: bool,
139    pub guest_idle_available: bool,
140    pub hypervisor_sleep_state_support_available: bool,
141    pub numa_distance_query_available: bool,
142    pub frequency_regs_available: bool,
143    pub synthetic_machine_check_available: bool,
144    pub guest_crash_regs_available: bool,
145    pub debug_regs_available: bool,
146    pub npiep1_available: bool,
147    pub disable_hypervisor_available: bool,
148    pub extended_gva_ranges_for_flush_virtual_address_list_available: bool,
149    pub fast_hypercall_output_available: bool,
150    pub svm_features_available: bool,
151    pub sint_polling_mode_available: bool,
152    pub hypercall_msr_lock_available: bool,
153    pub direct_synthetic_timers: bool,
154    pub register_pat_available: bool,
155    pub register_bndcfgs_available: bool,
156    pub watchdog_timer_available: bool,
157    pub synthetic_time_unhalted_timer_available: bool,
158    pub device_domains_available: bool,    // HDK only.
159    pub s1_device_domains_available: bool, // HDK only.
160    pub lbr_available: bool,
161    pub ipt_available: bool,
162    pub cross_vtl_flush_available: bool,
163    pub idle_spec_ctrl_available: bool,
164    pub translate_gva_flags_available: bool,
165    pub apic_eoi_intercept_available: bool,
166}
167
168impl HvFeatures {
169    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
170        zerocopy::transmute!(cpuid)
171    }
172
173    pub fn into_cpuid(self) -> [u32; 4] {
174        zerocopy::transmute!(self)
175    }
176}
177
178#[bitfield(u128)]
179#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
180pub struct HvEnlightenmentInformation {
181    pub use_hypercall_for_address_space_switch: bool,
182    pub use_hypercall_for_local_flush: bool,
183    pub use_hypercall_for_remote_flush_and_local_flush_entire: bool,
184    pub use_apic_msrs: bool,
185    pub use_hv_register_for_reset: bool,
186    pub use_relaxed_timing: bool,
187    pub use_dma_remapping_deprecated: bool,
188    pub use_interrupt_remapping_deprecated: bool,
189    pub use_x2_apic_msrs: bool,
190    pub deprecate_auto_eoi: bool,
191    pub use_synthetic_cluster_ipi: bool,
192    pub use_ex_processor_masks: bool,
193    pub nested: bool,
194    pub use_int_for_mbec_system_calls: bool,
195    pub use_vmcs_enlightenments: bool,
196    pub use_synced_timeline: bool,
197    pub core_scheduler_requested: bool,
198    pub use_direct_local_flush_entire: bool,
199    pub no_non_architectural_core_sharing: bool,
200    pub use_x2_apic: bool,
201    pub restore_time_on_resume: bool,
202    pub use_hypercall_for_mmio_access: bool,
203    pub use_gpa_pinning_hypercall: bool,
204    pub wake_vps: bool,
205    _reserved: u8,
206    pub long_spin_wait_count: u32,
207    #[bits(7)]
208    pub implemented_physical_address_bits: u32,
209    #[bits(25)]
210    _reserved1: u32,
211    _reserved2: u32,
212}
213
214impl HvEnlightenmentInformation {
215    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
216        zerocopy::transmute!(cpuid)
217    }
218
219    pub fn into_cpuid(self) -> [u32; 4] {
220        zerocopy::transmute!(self)
221    }
222}
223
224#[bitfield(u128)]
225pub struct HvHardwareFeatures {
226    pub apic_overlay_assist_in_use: bool,
227    pub msr_bitmaps_in_use: bool,
228    pub architectural_performance_counters_in_use: bool,
229    pub second_level_address_translation_in_use: bool,
230    pub dma_remapping_in_use: bool,
231    pub interrupt_remapping_in_use: bool,
232    pub memory_patrol_scrubber_present: bool,
233    pub dma_protection_in_use: bool,
234    pub hpet_requested: bool,
235    pub synthetic_timers_volatile: bool,
236    #[bits(4)]
237    pub hypervisor_level: u32,
238    pub physical_destination_mode_required: bool,
239    pub use_vmfunc_for_alias_map_switch: bool,
240    pub hv_register_for_memory_zeroing_supported: bool,
241    pub unrestricted_guest_supported: bool,
242    pub rdt_afeatures_supported: bool,
243    pub rdt_mfeatures_supported: bool,
244    pub child_perfmon_pmu_supported: bool,
245    pub child_perfmon_lbr_supported: bool,
246    pub child_perfmon_ipt_supported: bool,
247    pub apic_emulation_supported: bool,
248    pub child_x2_apic_recommended: bool,
249    pub hardware_watchdog_reserved: bool,
250    pub device_access_tracking_supported: bool,
251    pub hardware_gpa_access_tracking_supported: bool,
252    #[bits(4)]
253    _reserved: u32,
254
255    pub device_domain_input_width: u8,
256    #[bits(24)]
257    _reserved1: u32,
258    _reserved2: u32,
259    _reserved3: u32,
260}
261
262#[bitfield(u128)]
263pub struct HvIsolationConfiguration {
264    pub paravisor_present: bool,
265    #[bits(31)]
266    pub _reserved0: u32,
267
268    #[bits(4)]
269    pub isolation_type: u8,
270    _reserved11: bool,
271    pub shared_gpa_boundary_active: bool,
272    #[bits(6)]
273    pub shared_gpa_boundary_bits: u8,
274    #[bits(20)]
275    _reserved12: u32,
276    _reserved2: u32,
277    _reserved3: u32,
278}
279
280open_enum! {
281    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
282    pub enum HypercallCode: u16 {
283        #![expect(non_upper_case_globals)]
284
285        HvCallSwitchVirtualAddressSpace = 0x0001,
286        HvCallFlushVirtualAddressSpace = 0x0002,
287        HvCallFlushVirtualAddressList = 0x0003,
288        HvCallNotifyLongSpinWait = 0x0008,
289        HvCallInvokeHypervisorDebugger = 0x000a,
290        HvCallSendSyntheticClusterIpi = 0x000b,
291        HvCallModifyVtlProtectionMask = 0x000c,
292        HvCallEnablePartitionVtl = 0x000d,
293        HvCallEnableVpVtl = 0x000f,
294        HvCallVtlCall = 0x0011,
295        HvCallVtlReturn = 0x0012,
296        HvCallFlushVirtualAddressSpaceEx = 0x0013,
297        HvCallFlushVirtualAddressListEx = 0x0014,
298        HvCallSendSyntheticClusterIpiEx = 0x0015,
299        HvCallInstallIntercept = 0x004d,
300        HvCallGetVpRegisters = 0x0050,
301        HvCallSetVpRegisters = 0x0051,
302        HvCallTranslateVirtualAddress = 0x0052,
303        HvCallPostMessage = 0x005C,
304        HvCallSignalEvent = 0x005D,
305        HvCallOutputDebugCharacter = 0x0071,
306        HvCallGetSystemProperty = 0x007b,
307        HvCallRetargetDeviceInterrupt = 0x007e,
308        HvCallNotifyPartitionEvent = 0x0087,
309        HvCallAssertVirtualInterrupt = 0x0094,
310        HvCallStartVirtualProcessor = 0x0099,
311        HvCallGetVpIndexFromApicId = 0x009A,
312        HvCallTranslateVirtualAddressEx = 0x00AC,
313        HvCallCheckForIoIntercept = 0x00ad,
314        HvCallFlushGuestPhysicalAddressSpace = 0x00AF,
315        HvCallFlushGuestPhysicalAddressList = 0x00B0,
316        HvCallSignalEventDirect = 0x00C0,
317        HvCallPostMessageDirect = 0x00C1,
318        HvCallCheckSparseGpaPageVtlAccess = 0x00D4,
319        HvCallAcceptGpaPages = 0x00D9,
320        HvCallModifySparseGpaPageHostVisibility = 0x00DB,
321        HvCallMemoryMappedIoRead = 0x0106,
322        HvCallMemoryMappedIoWrite = 0x0107,
323        HvCallPinGpaPageRanges = 0x0112,
324        HvCallUnpinGpaPageRanges = 0x0113,
325        HvCallQuerySparseGpaPageHostVisibility = 0x011C,
326
327        // Extended hypercalls.
328        HvExtCallQueryCapabilities = 0x8001,
329
330        // VBS guest calls.
331        HvCallVbsVmCallReport = 0xC001,
332    }
333}
334
335pub const HV_X64_MSR_GUEST_OS_ID: u32 = 0x40000000;
336pub const HV_X64_MSR_HYPERCALL: u32 = 0x40000001;
337pub const HV_X64_MSR_VP_INDEX: u32 = 0x40000002;
338pub const HV_X64_MSR_TIME_REF_COUNT: u32 = 0x40000020;
339pub const HV_X64_MSR_REFERENCE_TSC: u32 = 0x40000021;
340pub const HV_X64_MSR_TSC_FREQUENCY: u32 = 0x40000022;
341pub const HV_X64_MSR_APIC_FREQUENCY: u32 = 0x40000023;
342pub const HV_X64_MSR_EOI: u32 = 0x40000070;
343pub const HV_X64_MSR_ICR: u32 = 0x40000071;
344pub const HV_X64_MSR_TPR: u32 = 0x40000072;
345pub const HV_X64_MSR_VP_ASSIST_PAGE: u32 = 0x40000073;
346pub const HV_X64_MSR_SCONTROL: u32 = 0x40000080;
347pub const HV_X64_MSR_SVERSION: u32 = 0x40000081;
348pub const HV_X64_MSR_SIEFP: u32 = 0x40000082;
349pub const HV_X64_MSR_SIMP: u32 = 0x40000083;
350pub const HV_X64_MSR_EOM: u32 = 0x40000084;
351pub const HV_X64_MSR_SINT0: u32 = 0x40000090;
352pub const HV_X64_MSR_SINT1: u32 = 0x40000091;
353pub const HV_X64_MSR_SINT2: u32 = 0x40000092;
354pub const HV_X64_MSR_SINT3: u32 = 0x40000093;
355pub const HV_X64_MSR_SINT4: u32 = 0x40000094;
356pub const HV_X64_MSR_SINT5: u32 = 0x40000095;
357pub const HV_X64_MSR_SINT6: u32 = 0x40000096;
358pub const HV_X64_MSR_SINT7: u32 = 0x40000097;
359pub const HV_X64_MSR_SINT8: u32 = 0x40000098;
360pub const HV_X64_MSR_SINT9: u32 = 0x40000099;
361pub const HV_X64_MSR_SINT10: u32 = 0x4000009a;
362pub const HV_X64_MSR_SINT11: u32 = 0x4000009b;
363pub const HV_X64_MSR_SINT12: u32 = 0x4000009c;
364pub const HV_X64_MSR_SINT13: u32 = 0x4000009d;
365pub const HV_X64_MSR_SINT14: u32 = 0x4000009e;
366pub const HV_X64_MSR_SINT15: u32 = 0x4000009f;
367pub const HV_X64_MSR_STIMER0_CONFIG: u32 = 0x400000b0;
368pub const HV_X64_MSR_STIMER0_COUNT: u32 = 0x400000b1;
369pub const HV_X64_MSR_STIMER1_CONFIG: u32 = 0x400000b2;
370pub const HV_X64_MSR_STIMER1_COUNT: u32 = 0x400000b3;
371pub const HV_X64_MSR_STIMER2_CONFIG: u32 = 0x400000b4;
372pub const HV_X64_MSR_STIMER2_COUNT: u32 = 0x400000b5;
373pub const HV_X64_MSR_STIMER3_CONFIG: u32 = 0x400000b6;
374pub const HV_X64_MSR_STIMER3_COUNT: u32 = 0x400000b7;
375pub const HV_X64_MSR_GUEST_IDLE: u32 = 0x400000F0;
376pub const HV_X64_MSR_GUEST_CRASH_P0: u32 = 0x40000100;
377pub const HV_X64_MSR_GUEST_CRASH_P1: u32 = 0x40000101;
378pub const HV_X64_MSR_GUEST_CRASH_P2: u32 = 0x40000102;
379pub const HV_X64_MSR_GUEST_CRASH_P3: u32 = 0x40000103;
380pub const HV_X64_MSR_GUEST_CRASH_P4: u32 = 0x40000104;
381pub const HV_X64_MSR_GUEST_CRASH_CTL: u32 = 0x40000105;
382
383pub const HV_X64_GUEST_CRASH_PARAMETER_MSRS: usize = 5;
384
385/// A hypervisor status code.
386///
387/// The non-success status codes are defined in [`HvError`].
388#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
389#[repr(transparent)]
390pub struct HvStatus(pub u16);
391
392impl HvStatus {
393    /// The success status code.
394    pub const SUCCESS: Self = Self(0);
395
396    /// Returns `Ok(())` if this is `HvStatus::SUCCESS`, otherwise returns an
397    /// `Err(err)` where `err` is the corresponding `HvError`.
398    pub fn result(self) -> HvResult<()> {
399        if let Ok(err) = self.0.try_into() {
400            Err(HvError(err))
401        } else {
402            Ok(())
403        }
404    }
405
406    /// Returns true if this is `HvStatus::SUCCESS`.
407    pub fn is_ok(self) -> bool {
408        self == Self::SUCCESS
409    }
410
411    /// Returns true if this is not `HvStatus::SUCCESS`.
412    pub fn is_err(self) -> bool {
413        self != Self::SUCCESS
414    }
415
416    const fn from_bits(bits: u16) -> Self {
417        Self(bits)
418    }
419
420    const fn into_bits(self) -> u16 {
421        self.0
422    }
423}
424
425impl From<Result<(), HvError>> for HvStatus {
426    fn from(err: Result<(), HvError>) -> Self {
427        err.err().map_or(Self::SUCCESS, |err| Self(err.0.get()))
428    }
429}
430
431impl Debug for HvStatus {
432    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
433        match self.result() {
434            Ok(()) => f.write_str("Success"),
435            Err(err) => Debug::fmt(&err, f),
436        }
437    }
438}
439
440/// An [`HvStatus`] value representing an error.
441//
442// DEVNOTE: use `NonZeroU16` to get a niche optimization, since 0 is reserved
443// for success.
444#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout)]
445#[repr(transparent)]
446pub struct HvError(core::num::NonZeroU16);
447
448impl From<core::num::NonZeroU16> for HvError {
449    fn from(err: core::num::NonZeroU16) -> Self {
450        Self(err)
451    }
452}
453
454impl Debug for HvError {
455    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
456        match self.debug_name() {
457            Some(name) => f.pad(name),
458            None => Debug::fmt(&self.0.get(), f),
459        }
460    }
461}
462
463impl core::fmt::Display for HvError {
464    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
465        match self.doc_str() {
466            Some(s) => f.write_str(s),
467            None => write!(f, "Hypervisor error {:#06x}", self.0),
468        }
469    }
470}
471
472impl core::error::Error for HvError {}
473
474macro_rules! hv_error {
475    ($ty:ty, $(#[doc = $doc:expr] $ident:ident = $val:expr),* $(,)?) => {
476
477        #[expect(non_upper_case_globals)]
478        impl $ty {
479            $(
480                #[doc = $doc]
481                pub const $ident: Self = Self(core::num::NonZeroU16::new($val).unwrap());
482            )*
483
484            fn debug_name(&self) -> Option<&'static str> {
485                Some(match self.0.get() {
486                    $(
487                        $val => stringify!($ident),
488                    )*
489                    _ => return None,
490                })
491            }
492
493            fn doc_str(&self) -> Option<&'static str> {
494                Some(match self.0.get() {
495                    $(
496                        $val => const { $doc.trim_ascii() },
497                    )*
498                    _ => return None,
499                })
500            }
501        }
502    };
503}
504
505// DEVNOTE: the doc comments here are also used as the runtime error strings.
506hv_error! {
507    HvError,
508    /// Invalid hypercall code
509    InvalidHypercallCode = 0x0002,
510    /// Invalid hypercall input
511    InvalidHypercallInput = 0x0003,
512    /// Invalid alignment
513    InvalidAlignment = 0x0004,
514    /// Invalid parameter
515    InvalidParameter = 0x0005,
516    /// Access denied
517    AccessDenied = 0x0006,
518    /// Invalid partition state
519    InvalidPartitionState = 0x0007,
520    /// Operation denied
521    OperationDenied = 0x0008,
522    /// Unknown property
523    UnknownProperty = 0x0009,
524    /// Property value out of range
525    PropertyValueOutOfRange = 0x000A,
526    /// Insufficient memory
527    InsufficientMemory = 0x000B,
528    /// Partition too deep
529    PartitionTooDeep = 0x000C,
530    /// Invalid partition ID
531    InvalidPartitionId = 0x000D,
532    /// Invalid VP index
533    InvalidVpIndex = 0x000E,
534    /// Not found
535    NotFound = 0x0010,
536    /// Invalid port ID
537    InvalidPortId = 0x0011,
538    /// Invalid connection ID
539    InvalidConnectionId = 0x0012,
540    /// Insufficient buffers
541    InsufficientBuffers = 0x0013,
542    /// Not acknowledged
543    NotAcknowledged = 0x0014,
544    /// Invalid VP state
545    InvalidVpState = 0x0015,
546    /// Acknowledged
547    Acknowledged = 0x0016,
548    /// Invalid save restore state
549    InvalidSaveRestoreState = 0x0017,
550    /// Invalid SynIC state
551    InvalidSynicState = 0x0018,
552    /// Object in use
553    ObjectInUse = 0x0019,
554    /// Invalid proximity domain info
555    InvalidProximityDomainInfo = 0x001A,
556    /// No data
557    NoData = 0x001B,
558    /// Inactive
559    Inactive = 0x001C,
560    /// No resources
561    NoResources = 0x001D,
562    /// Feature unavailable
563    FeatureUnavailable = 0x001E,
564    /// Partial packet
565    PartialPacket = 0x001F,
566    /// Processor feature not supported
567    ProcessorFeatureNotSupported = 0x0020,
568    /// Processor cache line flush size incompatible
569    ProcessorCacheLineFlushSizeIncompatible = 0x0030,
570    /// Insufficient buffer
571    InsufficientBuffer = 0x0033,
572    /// Incompatible processor
573    IncompatibleProcessor = 0x0037,
574    /// Insufficient device domains
575    InsufficientDeviceDomains = 0x0038,
576    /// CPUID feature validation error
577    CpuidFeatureValidationError = 0x003C,
578    /// CPUID XSAVE feature validation error
579    CpuidXsaveFeatureValidationError = 0x003D,
580    /// Processor startup timeout
581    ProcessorStartupTimeout = 0x003E,
582    /// SMX enabled
583    SmxEnabled = 0x003F,
584    /// Invalid LP index
585    InvalidLpIndex = 0x0041,
586    /// Invalid register value
587    InvalidRegisterValue = 0x0050,
588    /// Invalid VTL state
589    InvalidVtlState = 0x0051,
590    /// NX not detected
591    NxNotDetected = 0x0055,
592    /// Invalid device ID
593    InvalidDeviceId = 0x0057,
594    /// Invalid device state
595    InvalidDeviceState = 0x0058,
596    /// Pending page requests
597    PendingPageRequests = 0x0059,
598    /// Page request invalid
599    PageRequestInvalid = 0x0060,
600    /// Key already exists
601    KeyAlreadyExists = 0x0065,
602    /// Device already in domain
603    DeviceAlreadyInDomain = 0x0066,
604    /// Invalid CPU group ID
605    InvalidCpuGroupId = 0x006F,
606    /// Invalid CPU group state
607    InvalidCpuGroupState = 0x0070,
608    /// Operation failed
609    OperationFailed = 0x0071,
610    /// Not allowed with nested virtualization active
611    NotAllowedWithNestedVirtActive = 0x0072,
612    /// Insufficient root memory
613    InsufficientRootMemory = 0x0073,
614    /// Event buffer already freed
615    EventBufferAlreadyFreed = 0x0074,
616    /// The specified timeout expired before the operation completed.
617    Timeout = 0x0078,
618    /// The VTL specified for the operation is already in an enabled state.
619    VtlAlreadyEnabled = 0x0086,
620    /// Unknown register name
621    UnknownRegisterName = 0x0087,
622}
623
624/// A useful result type for hypervisor operations.
625pub type HvResult<T> = Result<T, HvError>;
626
627#[repr(u8)]
628#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
629pub enum Vtl {
630    Vtl0 = 0,
631    Vtl1 = 1,
632    Vtl2 = 2,
633}
634
635impl TryFrom<u8> for Vtl {
636    type Error = HvError;
637
638    fn try_from(value: u8) -> Result<Self, Self::Error> {
639        Ok(match value {
640            0 => Self::Vtl0,
641            1 => Self::Vtl1,
642            2 => Self::Vtl2,
643            _ => return Err(HvError::InvalidParameter),
644        })
645    }
646}
647
648impl From<Vtl> for u8 {
649    fn from(value: Vtl) -> Self {
650        value as u8
651    }
652}
653
654/// The contents of `HV_X64_MSR_GUEST_CRASH_CTL`
655#[bitfield(u64)]
656pub struct GuestCrashCtl {
657    #[bits(58)]
658    _reserved: u64,
659    // ID of the pre-OS environment
660    #[bits(3)]
661    pub pre_os_id: u8,
662    // Crash dump will not be captured
663    #[bits(1)]
664    pub no_crash_dump: bool,
665    // `HV_X64_MSR_GUEST_CRASH_P3` is the GPA of the message,
666    // `HV_X64_MSR_GUEST_CRASH_P4` is its length in bytes
667    #[bits(1)]
668    pub crash_message: bool,
669    // Log contents of crash parameter system registers
670    #[bits(1)]
671    pub crash_notify: bool,
672}
673
674#[repr(C, align(16))]
675#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
676pub struct AlignedU128([u8; 16]);
677
678impl AlignedU128 {
679    pub fn as_ne_bytes(&self) -> [u8; 16] {
680        self.0
681    }
682
683    pub fn from_ne_bytes(val: [u8; 16]) -> Self {
684        Self(val)
685    }
686}
687
688impl Debug for AlignedU128 {
689    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
690        Debug::fmt(&u128::from_ne_bytes(self.0), f)
691    }
692}
693
694impl From<u128> for AlignedU128 {
695    fn from(v: u128) -> Self {
696        Self(v.to_ne_bytes())
697    }
698}
699
700impl From<u64> for AlignedU128 {
701    fn from(v: u64) -> Self {
702        (v as u128).into()
703    }
704}
705
706impl From<u32> for AlignedU128 {
707    fn from(v: u32) -> Self {
708        (v as u128).into()
709    }
710}
711
712impl From<u16> for AlignedU128 {
713    fn from(v: u16) -> Self {
714        (v as u128).into()
715    }
716}
717
718impl From<u8> for AlignedU128 {
719    fn from(v: u8) -> Self {
720        (v as u128).into()
721    }
722}
723
724impl From<AlignedU128> for u128 {
725    fn from(v: AlignedU128) -> Self {
726        u128::from_ne_bytes(v.0)
727    }
728}
729
730open_enum! {
731    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
732    pub enum HvMessageType: u32 {
733        #![expect(non_upper_case_globals)]
734
735        HvMessageTypeNone = 0x00000000,
736
737        HvMessageTypeUnmappedGpa = 0x80000000,
738        HvMessageTypeGpaIntercept = 0x80000001,
739        HvMessageTypeUnacceptedGpa = 0x80000003,
740        HvMessageTypeGpaAttributeIntercept = 0x80000004,
741        HvMessageTypeEnablePartitionVtlIntercept = 0x80000005,
742        HvMessageTypeTimerExpired = 0x80000010,
743        HvMessageTypeInvalidVpRegisterValue = 0x80000020,
744        HvMessageTypeUnrecoverableException = 0x80000021,
745        HvMessageTypeUnsupportedFeature = 0x80000022,
746        HvMessageTypeTlbPageSizeMismatch = 0x80000023,
747        HvMessageTypeIommuFault = 0x80000024,
748        HvMessageTypeEventLogBufferComplete = 0x80000040,
749        HvMessageTypeHypercallIntercept = 0x80000050,
750        HvMessageTypeSynicEventIntercept = 0x80000060,
751        HvMessageTypeSynicSintIntercept = 0x80000061,
752        HvMessageTypeSynicSintDeliverable = 0x80000062,
753        HvMessageTypeAsyncCallCompletion = 0x80000070,
754        HvMessageTypeX64IoPortIntercept = 0x80010000,
755        HvMessageTypeMsrIntercept = 0x80010001,
756        HvMessageTypeX64CpuidIntercept = 0x80010002,
757        HvMessageTypeExceptionIntercept = 0x80010003,
758        HvMessageTypeX64ApicEoi = 0x80010004,
759        HvMessageTypeX64IommuPrq = 0x80010005,
760        HvMessageTypeRegisterIntercept = 0x80010006,
761        HvMessageTypeX64Halt = 0x80010007,
762        HvMessageTypeX64InterruptionDeliverable = 0x80010008,
763        HvMessageTypeX64SipiIntercept = 0x80010009,
764        HvMessageTypeX64RdtscIntercept = 0x8001000a,
765        HvMessageTypeX64ApicSmiIntercept = 0x8001000b,
766        HvMessageTypeArm64ResetIntercept = 0x8001000c,
767        HvMessageTypeX64ApicInitSipiIntercept = 0x8001000d,
768        HvMessageTypeX64ApicWriteIntercept = 0x8001000e,
769        HvMessageTypeX64ProxyInterruptIntercept = 0x8001000f,
770        HvMessageTypeX64IsolationCtrlRegIntercept = 0x80010010,
771        HvMessageTypeX64SnpGuestRequestIntercept = 0x80010011,
772        HvMessageTypeX64ExceptionTrapIntercept = 0x80010012,
773        HvMessageTypeX64SevVmgexitIntercept = 0x80010013,
774    }
775}
776
777impl Default for HvMessageType {
778    fn default() -> Self {
779        HvMessageType::HvMessageTypeNone
780    }
781}
782
783pub const HV_SYNIC_INTERCEPTION_SINT_INDEX: u8 = 0;
784
785pub const NUM_SINTS: usize = 16;
786pub const NUM_TIMERS: usize = 4;
787
788#[repr(C)]
789#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
790pub struct HvMessageHeader {
791    pub typ: HvMessageType,
792    pub len: u8,
793    pub flags: HvMessageFlags,
794    pub rsvd: u16,
795    pub id: u64,
796}
797
798#[bitfield(u8)]
799#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
800pub struct HvMessageFlags {
801    pub message_pending: bool,
802    #[bits(7)]
803    _reserved: u8,
804}
805
806pub const HV_MESSAGE_SIZE: usize = size_of::<HvMessage>();
807const_assert!(HV_MESSAGE_SIZE == 256);
808pub const HV_MESSAGE_PAYLOAD_SIZE: usize = 240;
809
810#[repr(C, align(16))]
811#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
812pub struct HvMessage {
813    pub header: HvMessageHeader,
814    pub payload_buffer: [u8; HV_MESSAGE_PAYLOAD_SIZE],
815}
816
817impl Default for HvMessage {
818    fn default() -> Self {
819        Self {
820            header: FromZeros::new_zeroed(),
821            payload_buffer: [0; 240],
822        }
823    }
824}
825
826impl HvMessage {
827    /// Constructs a new message. `payload` must fit into the payload field (240
828    /// bytes limit).
829    pub fn new(typ: HvMessageType, id: u64, payload: &[u8]) -> Self {
830        let mut msg = HvMessage {
831            header: HvMessageHeader {
832                typ,
833                len: payload.len() as u8,
834                flags: HvMessageFlags::new(),
835                rsvd: 0,
836                id,
837            },
838            payload_buffer: [0; 240],
839        };
840        msg.payload_buffer[..payload.len()].copy_from_slice(payload);
841        msg
842    }
843
844    pub fn payload(&self) -> &[u8] {
845        &self.payload_buffer[..self.header.len as usize]
846    }
847
848    pub fn as_message<T: MessagePayload>(&self) -> &T {
849        // Ensure invariants are met.
850        let () = T::CHECK;
851        T::ref_from_prefix(&self.payload_buffer).unwrap().0
852    }
853
854    pub fn as_message_mut<T: MessagePayload>(&mut self) -> &T {
855        // Ensure invariants are met.
856        let () = T::CHECK;
857        T::mut_from_prefix(&mut self.payload_buffer).unwrap().0
858    }
859}
860
861pub trait MessagePayload: KnownLayout + Immutable + IntoBytes + FromBytes + Sized {
862    /// Used to ensure this trait is only implemented on messages of the proper
863    /// size and alignment.
864    #[doc(hidden)]
865    const CHECK: () = {
866        assert!(size_of::<Self>() <= HV_MESSAGE_PAYLOAD_SIZE);
867        assert!(align_of::<Self>() <= align_of::<HvMessage>());
868    };
869}
870
871#[repr(C)]
872#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
873pub struct TimerMessagePayload {
874    pub timer_index: u32,
875    pub reserved: u32,
876    pub expiration_time: u64,
877    pub delivery_time: u64,
878}
879
880pub mod hypercall {
881    use super::*;
882    use core::ops::RangeInclusive;
883    use zerocopy::Unalign;
884
885    /// The hypercall input value.
886    #[bitfield(u64)]
887    pub struct Control {
888        /// The hypercall code.
889        pub code: u16,
890        /// If this hypercall is a fast hypercall.
891        pub fast: bool,
892        /// The variable header size, in qwords.
893        #[bits(10)]
894        pub variable_header_size: usize,
895        #[bits(4)]
896        _rsvd0: u8,
897        /// Specifies that the hypercall should be handled by the L0 hypervisor in a nested environment.
898        pub nested: bool,
899        /// The element count for rep hypercalls.
900        #[bits(12)]
901        pub rep_count: usize,
902        #[bits(4)]
903        _rsvd1: u8,
904        /// The first element to start processing in a rep hypercall.
905        #[bits(12)]
906        pub rep_start: usize,
907        #[bits(4)]
908        _rsvd2: u8,
909    }
910
911    /// The hypercall output value returned to the guest.
912    #[bitfield(u64)]
913    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
914    #[must_use]
915    pub struct HypercallOutput {
916        #[bits(16)]
917        pub call_status: HvStatus,
918        pub rsvd: u16,
919        #[bits(12)]
920        pub elements_processed: usize,
921        #[bits(20)]
922        pub rsvd2: u32,
923    }
924
925    impl From<HvError> for HypercallOutput {
926        fn from(e: HvError) -> Self {
927            Self::new().with_call_status(Err(e).into())
928        }
929    }
930
931    impl HypercallOutput {
932        /// A success output with zero elements processed.
933        pub const SUCCESS: Self = Self::new();
934
935        pub fn result(&self) -> Result<(), HvError> {
936            self.call_status().result()
937        }
938    }
939
940    #[repr(C)]
941    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
942    pub struct HvRegisterAssoc {
943        pub name: HvRegisterName,
944        pub pad: [u32; 3],
945        pub value: HvRegisterValue,
946    }
947
948    impl<N: Into<HvRegisterName>, T: Into<HvRegisterValue>> From<(N, T)> for HvRegisterAssoc {
949        fn from((name, value): (N, T)) -> Self {
950            Self {
951                name: name.into(),
952                pad: [0; 3],
953                value: value.into(),
954            }
955        }
956    }
957
958    impl<N: Copy + Into<HvRegisterName>, T: Copy + Into<HvRegisterValue>> From<&(N, T)>
959        for HvRegisterAssoc
960    {
961        fn from(&(name, value): &(N, T)) -> Self {
962            Self {
963                name: name.into(),
964                pad: [0; 3],
965                value: value.into(),
966            }
967        }
968    }
969
970    #[bitfield(u64)]
971    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
972    pub struct MsrHypercallContents {
973        pub enable: bool,
974        pub locked: bool,
975        #[bits(10)]
976        pub reserved_p: u64,
977        #[bits(52)]
978        pub gpn: u64,
979    }
980
981    #[repr(C, align(8))]
982    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
983    pub struct PostMessage {
984        pub connection_id: u32,
985        pub padding: u32,
986        pub message_type: u32,
987        pub payload_size: u32,
988        pub payload: [u8; 240],
989    }
990
991    #[repr(C, align(8))]
992    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
993    pub struct SignalEvent {
994        pub connection_id: u32,
995        pub flag_number: u16,
996        pub rsvd: u16,
997    }
998
999    #[repr(C)]
1000    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1001    pub struct PostMessageDirect {
1002        pub partition_id: u64,
1003        pub vp_index: u32,
1004        pub vtl: u8,
1005        pub padding0: [u8; 3],
1006        pub sint: u8,
1007        pub padding1: [u8; 3],
1008        pub message: Unalign<HvMessage>,
1009        pub padding2: u32,
1010    }
1011
1012    #[repr(C)]
1013    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1014    pub struct SignalEventDirect {
1015        pub target_partition: u64,
1016        pub target_vp: u32,
1017        pub target_vtl: u8,
1018        pub target_sint: u8,
1019        pub flag_number: u16,
1020    }
1021
1022    #[repr(C)]
1023    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1024    pub struct SignalEventDirectOutput {
1025        pub newly_signaled: u8,
1026        pub rsvd: [u8; 7],
1027    }
1028
1029    #[repr(C)]
1030    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1031    pub struct InterruptEntry {
1032        pub source: HvInterruptSource,
1033        pub rsvd: u32,
1034        pub data: [u32; 2],
1035    }
1036
1037    open_enum! {
1038        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1039        pub enum HvInterruptSource: u32 {
1040            MSI = 1,
1041            IO_APIC = 2,
1042        }
1043    }
1044
1045    #[repr(C)]
1046    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1047    pub struct InterruptTarget {
1048        pub vector: u32,
1049        pub flags: HvInterruptTargetFlags,
1050        pub mask_or_format: u64,
1051    }
1052
1053    #[bitfield(u32)]
1054    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1055    pub struct HvInterruptTargetFlags {
1056        pub multicast: bool,
1057        pub processor_set: bool,
1058        #[bits(30)]
1059        pub reserved: u32,
1060    }
1061
1062    pub const HV_DEVICE_INTERRUPT_TARGET_MULTICAST: u32 = 1;
1063    pub const HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET: u32 = 2;
1064
1065    pub const HV_GENERIC_SET_SPARSE_4K: u64 = 0;
1066    pub const HV_GENERIC_SET_ALL: u64 = 1;
1067
1068    #[repr(C)]
1069    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1070    pub struct RetargetDeviceInterrupt {
1071        pub partition_id: u64,
1072        pub device_id: u64,
1073        pub entry: InterruptEntry,
1074        pub rsvd: u64,
1075        pub target_header: InterruptTarget,
1076    }
1077
1078    #[bitfield(u8)]
1079    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1080    pub struct HvInputVtl {
1081        #[bits(4)]
1082        pub target_vtl_value: u8,
1083        pub use_target_vtl: bool,
1084        #[bits(3)]
1085        pub reserved: u8,
1086    }
1087
1088    impl From<Vtl> for HvInputVtl {
1089        fn from(value: Vtl) -> Self {
1090            Self::from(Some(value))
1091        }
1092    }
1093
1094    impl From<Option<Vtl>> for HvInputVtl {
1095        fn from(value: Option<Vtl>) -> Self {
1096            Self::new()
1097                .with_use_target_vtl(value.is_some())
1098                .with_target_vtl_value(value.map_or(0, Into::into))
1099        }
1100    }
1101
1102    impl HvInputVtl {
1103        /// None = target current vtl
1104        pub fn target_vtl(&self) -> Result<Option<Vtl>, HvError> {
1105            if self.reserved() != 0 {
1106                return Err(HvError::InvalidParameter);
1107            }
1108            if self.use_target_vtl() {
1109                Ok(Some(self.target_vtl_value().try_into()?))
1110            } else {
1111                Ok(None)
1112            }
1113        }
1114
1115        pub const CURRENT_VTL: Self = Self::new();
1116    }
1117
1118    #[repr(C)]
1119    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1120    pub struct GetSetVpRegisters {
1121        pub partition_id: u64,
1122        pub vp_index: u32,
1123        pub target_vtl: HvInputVtl,
1124        pub rsvd: [u8; 3],
1125    }
1126
1127    open_enum::open_enum! {
1128        #[derive(Default)]
1129        pub enum HvGuestOsMicrosoftIds: u8 {
1130            UNDEFINED = 0x00,
1131            MSDOS = 0x01,
1132            WINDOWS_3X = 0x02,
1133            WINDOWS_9X = 0x03,
1134            WINDOWS_NT = 0x04,
1135            WINDOWS_CE = 0x05,
1136        }
1137    }
1138
1139    #[bitfield(u64)]
1140    pub struct HvGuestOsMicrosoft {
1141        #[bits(40)]
1142        _rsvd: u64,
1143        #[bits(8)]
1144        pub os_id: u8,
1145        // The top bit must be zero and the least significant 15 bits holds the value of the vendor id.
1146        #[bits(16)]
1147        pub vendor_id: u16,
1148    }
1149
1150    open_enum::open_enum! {
1151        #[derive(Default)]
1152        pub enum HvGuestOsOpenSourceType: u8 {
1153            UNDEFINED = 0x00,
1154            LINUX = 0x01,
1155            FREEBSD = 0x02,
1156            XEN = 0x03,
1157            ILLUMOS = 0x04,
1158        }
1159    }
1160
1161    #[bitfield(u64)]
1162    pub struct HvGuestOsOpenSource {
1163        #[bits(16)]
1164        pub build_no: u16,
1165        #[bits(32)]
1166        pub version: u32,
1167        #[bits(8)]
1168        pub os_id: u8,
1169        #[bits(7)]
1170        pub os_type: u8,
1171        #[bits(1)]
1172        pub is_open_source: bool,
1173    }
1174
1175    #[bitfield(u64)]
1176    pub struct HvGuestOsId {
1177        #[bits(63)]
1178        _rsvd: u64,
1179        is_open_source: bool,
1180    }
1181
1182    impl HvGuestOsId {
1183        pub fn microsoft(&self) -> Option<HvGuestOsMicrosoft> {
1184            (!self.is_open_source()).then(|| HvGuestOsMicrosoft::from(u64::from(*self)))
1185        }
1186
1187        pub fn open_source(&self) -> Option<HvGuestOsOpenSource> {
1188            (self.is_open_source()).then(|| HvGuestOsOpenSource::from(u64::from(*self)))
1189        }
1190
1191        pub fn as_u64(&self) -> u64 {
1192            self.0
1193        }
1194    }
1195
1196    pub const HV_INTERCEPT_ACCESS_MASK_NONE: u32 = 0x00;
1197    pub const HV_INTERCEPT_ACCESS_MASK_READ: u32 = 0x01;
1198    pub const HV_INTERCEPT_ACCESS_MASK_WRITE: u32 = 0x02;
1199    pub const HV_INTERCEPT_ACCESS_MASK_READ_WRITE: u32 =
1200        HV_INTERCEPT_ACCESS_MASK_READ | HV_INTERCEPT_ACCESS_MASK_WRITE;
1201    pub const HV_INTERCEPT_ACCESS_MASK_EXECUTE: u32 = 0x04;
1202
1203    open_enum::open_enum! {
1204        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1205        pub enum HvInterceptType: u32 {
1206            #![expect(non_upper_case_globals)]
1207            HvInterceptTypeX64IoPort = 0x00000000,
1208            HvInterceptTypeX64Msr = 0x00000001,
1209            HvInterceptTypeX64Cpuid = 0x00000002,
1210            HvInterceptTypeException = 0x00000003,
1211            HvInterceptTypeHypercall = 0x00000008,
1212            HvInterceptTypeUnknownSynicConnection = 0x0000000D,
1213            HvInterceptTypeX64ApicEoi = 0x0000000E,
1214            HvInterceptTypeRetargetInterruptWithUnknownDeviceId = 0x0000000F,
1215            HvInterceptTypeX64IoPortRange = 0x00000011,
1216        }
1217    }
1218
1219    #[repr(transparent)]
1220    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1221    pub struct HvInterceptParameters(u64);
1222
1223    impl HvInterceptParameters {
1224        pub fn new_io_port(port: u16) -> Self {
1225            Self(port as u64)
1226        }
1227
1228        pub fn new_io_port_range(ports: RangeInclusive<u16>) -> Self {
1229            let base = *ports.start() as u64;
1230            let end = *ports.end() as u64;
1231            Self(base | (end << 16))
1232        }
1233
1234        pub fn new_exception(vector: u16) -> Self {
1235            Self(vector as u64)
1236        }
1237
1238        pub fn io_port(&self) -> u16 {
1239            self.0 as u16
1240        }
1241
1242        pub fn io_port_range(&self) -> RangeInclusive<u16> {
1243            let base = self.0 as u16;
1244            let end = (self.0 >> 16) as u16;
1245            base..=end
1246        }
1247
1248        pub fn cpuid_index(&self) -> u32 {
1249            self.0 as u32
1250        }
1251
1252        pub fn exception(&self) -> u16 {
1253            self.0 as u16
1254        }
1255    }
1256
1257    #[repr(C)]
1258    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1259    pub struct InstallIntercept {
1260        pub partition_id: u64,
1261        pub access_type_mask: u32,
1262        pub intercept_type: HvInterceptType,
1263        pub intercept_parameters: HvInterceptParameters,
1264    }
1265
1266    #[repr(C)]
1267    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1268    pub struct AssertVirtualInterrupt {
1269        pub partition_id: u64,
1270        pub interrupt_control: HvInterruptControl,
1271        pub destination_address: u64,
1272        pub requested_vector: u32,
1273        pub target_vtl: u8,
1274        pub rsvd0: u8,
1275        pub rsvd1: u16,
1276    }
1277
1278    #[repr(C)]
1279    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1280    pub struct StartVirtualProcessorX64 {
1281        pub partition_id: u64,
1282        pub vp_index: u32,
1283        pub target_vtl: u8,
1284        pub rsvd0: u8,
1285        pub rsvd1: u16,
1286        pub vp_context: InitialVpContextX64,
1287    }
1288
1289    #[repr(C)]
1290    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1291    pub struct InitialVpContextX64 {
1292        pub rip: u64,
1293        pub rsp: u64,
1294        pub rflags: u64,
1295        pub cs: HvX64SegmentRegister,
1296        pub ds: HvX64SegmentRegister,
1297        pub es: HvX64SegmentRegister,
1298        pub fs: HvX64SegmentRegister,
1299        pub gs: HvX64SegmentRegister,
1300        pub ss: HvX64SegmentRegister,
1301        pub tr: HvX64SegmentRegister,
1302        pub ldtr: HvX64SegmentRegister,
1303        pub idtr: HvX64TableRegister,
1304        pub gdtr: HvX64TableRegister,
1305        pub efer: u64,
1306        pub cr0: u64,
1307        pub cr3: u64,
1308        pub cr4: u64,
1309        pub msr_cr_pat: u64,
1310    }
1311
1312    #[repr(C)]
1313    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1314    pub struct StartVirtualProcessorArm64 {
1315        pub partition_id: u64,
1316        pub vp_index: u32,
1317        pub target_vtl: u8,
1318        pub rsvd0: u8,
1319        pub rsvd1: u16,
1320        pub vp_context: InitialVpContextArm64,
1321    }
1322
1323    #[repr(C)]
1324    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1325    pub struct InitialVpContextArm64 {
1326        pub pc: u64,
1327        pub sp_elh: u64,
1328        pub sctlr_el1: u64,
1329        pub mair_el1: u64,
1330        pub tcr_el1: u64,
1331        pub vbar_el1: u64,
1332        pub ttbr0_el1: u64,
1333        pub ttbr1_el1: u64,
1334        pub x18: u64,
1335    }
1336
1337    impl InitialVpContextX64 {
1338        pub fn as_hv_register_assocs(&self) -> impl Iterator<Item = HvRegisterAssoc> + '_ {
1339            let regs = [
1340                (HvX64RegisterName::Rip, HvRegisterValue::from(self.rip)).into(),
1341                (HvX64RegisterName::Rsp, HvRegisterValue::from(self.rsp)).into(),
1342                (
1343                    HvX64RegisterName::Rflags,
1344                    HvRegisterValue::from(self.rflags),
1345                )
1346                    .into(),
1347                (HvX64RegisterName::Cs, HvRegisterValue::from(self.cs)).into(),
1348                (HvX64RegisterName::Ds, HvRegisterValue::from(self.ds)).into(),
1349                (HvX64RegisterName::Es, HvRegisterValue::from(self.es)).into(),
1350                (HvX64RegisterName::Fs, HvRegisterValue::from(self.fs)).into(),
1351                (HvX64RegisterName::Gs, HvRegisterValue::from(self.gs)).into(),
1352                (HvX64RegisterName::Ss, HvRegisterValue::from(self.ss)).into(),
1353                (HvX64RegisterName::Tr, HvRegisterValue::from(self.tr)).into(),
1354                (HvX64RegisterName::Ldtr, HvRegisterValue::from(self.ldtr)).into(),
1355                (HvX64RegisterName::Idtr, HvRegisterValue::from(self.idtr)).into(),
1356                (HvX64RegisterName::Gdtr, HvRegisterValue::from(self.gdtr)).into(),
1357                (HvX64RegisterName::Efer, HvRegisterValue::from(self.efer)).into(),
1358                (HvX64RegisterName::Cr0, HvRegisterValue::from(self.cr0)).into(),
1359                (HvX64RegisterName::Cr3, HvRegisterValue::from(self.cr3)).into(),
1360                (HvX64RegisterName::Cr4, HvRegisterValue::from(self.cr4)).into(),
1361                (
1362                    HvX64RegisterName::Pat,
1363                    HvRegisterValue::from(self.msr_cr_pat),
1364                )
1365                    .into(),
1366            ];
1367            regs.into_iter()
1368        }
1369    }
1370
1371    #[bitfield(u64)]
1372    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1373    pub struct TranslateGvaControlFlagsX64 {
1374        /// Request data read access
1375        pub validate_read: bool,
1376        /// Request data write access
1377        pub validate_write: bool,
1378        /// Request instruction fetch access.
1379        pub validate_execute: bool,
1380        /// Don't enforce any checks related to access mode (supervisor vs. user; SMEP and SMAP are treated
1381        /// as disabled).
1382        pub privilege_exempt: bool,
1383        /// Set the appropriate page table bits (i.e. access/dirty bit)
1384        pub set_page_table_bits: bool,
1385        /// Lock the TLB
1386        pub tlb_flush_inhibit: bool,
1387        /// Treat the access as a supervisor mode access irrespective of current mode.
1388        pub supervisor_access: bool,
1389        /// Treat the access as a user mode access irrespective of current mode.
1390        pub user_access: bool,
1391        /// Enforce the SMAP restriction on supervisor data access to user mode addresses if CR4.SMAP=1
1392        /// irrespective of current EFLAGS.AC i.e. the behavior for "implicit supervisor-mode accesses"
1393        /// (e.g. to the GDT, etc.) and when EFLAGS.AC=0. Does nothing if CR4.SMAP=0.
1394        pub enforce_smap: bool,
1395        /// Don't enforce the SMAP restriction on supervisor data access to user mode addresses irrespective
1396        /// of current EFLAGS.AC i.e. the behavior when EFLAGS.AC=1.
1397        pub override_smap: bool,
1398        /// Treat the access as a shadow stack access.
1399        pub shadow_stack: bool,
1400        #[bits(45)]
1401        _unused: u64,
1402        /// Target vtl
1403        input_vtl_value: u8,
1404    }
1405
1406    impl TranslateGvaControlFlagsX64 {
1407        pub fn input_vtl(&self) -> HvInputVtl {
1408            self.input_vtl_value().into()
1409        }
1410
1411        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1412            self.with_input_vtl_value(input_vtl.into())
1413        }
1414
1415        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1416            self.set_input_vtl_value(input_vtl.into())
1417        }
1418    }
1419
1420    #[bitfield(u64)]
1421    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1422    pub struct TranslateGvaControlFlagsArm64 {
1423        /// Request data read access
1424        pub validate_read: bool,
1425        /// Request data write access
1426        pub validate_write: bool,
1427        /// Request instruction fetch access.
1428        pub validate_execute: bool,
1429        _reserved0: bool,
1430        /// Set the appropriate page table bits (i.e. access/dirty bit)
1431        pub set_page_table_bits: bool,
1432        /// Lock the TLB
1433        pub tlb_flush_inhibit: bool,
1434        /// Treat the access as a supervisor mode access irrespective of current mode.
1435        pub supervisor_access: bool,
1436        /// Treat the access as a user mode access irrespective of current mode.
1437        pub user_access: bool,
1438        /// Restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN i.e.
1439        /// the behavior when PSTATE.PAN=1.
1440        pub pan_set: bool,
1441        /// Don't restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN
1442        /// i.e. the behavior when PSTATE.PAN=0.
1443        pub pan_clear: bool,
1444        #[bits(46)]
1445        _unused: u64,
1446        /// Target vtl
1447        #[bits(8)]
1448        input_vtl_value: u8,
1449    }
1450
1451    impl TranslateGvaControlFlagsArm64 {
1452        pub fn input_vtl(&self) -> HvInputVtl {
1453            self.input_vtl_value().into()
1454        }
1455
1456        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1457            self.with_input_vtl_value(input_vtl.into())
1458        }
1459
1460        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1461            self.set_input_vtl_value(input_vtl.into())
1462        }
1463    }
1464
1465    #[repr(C)]
1466    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1467    pub struct TranslateVirtualAddressX64 {
1468        pub partition_id: u64,
1469        pub vp_index: u32,
1470        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1471        pub reserved: u32,
1472        pub control_flags: TranslateGvaControlFlagsX64,
1473        pub gva_page: u64,
1474    }
1475
1476    #[repr(C)]
1477    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1478    pub struct TranslateVirtualAddressArm64 {
1479        pub partition_id: u64,
1480        pub vp_index: u32,
1481        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1482        pub reserved: u32,
1483        pub control_flags: TranslateGvaControlFlagsArm64,
1484        pub gva_page: u64,
1485    }
1486
1487    open_enum::open_enum! {
1488        pub enum TranslateGvaResultCode: u32 {
1489            SUCCESS = 0,
1490
1491            // Translation Failures
1492            PAGE_NOT_PRESENT = 1,
1493            PRIVILEGE_VIOLATION = 2,
1494            INVALID_PAGE_TABLE_FLAGS = 3,
1495
1496            // GPA access failures
1497            GPA_UNMAPPED = 4,
1498            GPA_NO_READ_ACCESS = 5,
1499            GPA_NO_WRITE_ACCESS = 6,
1500            GPA_ILLEGAL_OVERLAY_ACCESS = 7,
1501
1502            /// Intercept of the memory access by either
1503            /// - a higher VTL
1504            /// - a nested hypervisor (due to a violation of the nested page table)
1505            INTERCEPT = 8,
1506
1507            GPA_UNACCEPTED = 9,
1508        }
1509    }
1510
1511    #[bitfield(u64)]
1512    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1513    pub struct TranslateGvaResult {
1514        pub result_code: u32,
1515        pub cache_type: u8,
1516        pub overlay_page: bool,
1517        #[bits(23)]
1518        pub reserved: u32,
1519    }
1520
1521    #[repr(C)]
1522    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1523    pub struct TranslateVirtualAddressOutput {
1524        pub translation_result: TranslateGvaResult,
1525        pub gpa_page: u64,
1526    }
1527
1528    #[repr(C)]
1529    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1530    pub struct TranslateGvaResultExX64 {
1531        pub result: TranslateGvaResult,
1532        pub reserved: u64,
1533        pub event_info: HvX64PendingEvent,
1534    }
1535
1536    const_assert!(size_of::<TranslateGvaResultExX64>() == 0x30);
1537
1538    #[repr(C)]
1539    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1540    pub struct TranslateGvaResultExArm64 {
1541        pub result: TranslateGvaResult,
1542    }
1543
1544    const_assert!(size_of::<TranslateGvaResultExArm64>() == 0x8);
1545
1546    #[repr(C)]
1547    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1548    pub struct TranslateVirtualAddressExOutputX64 {
1549        pub translation_result: TranslateGvaResultExX64,
1550        pub gpa_page: u64,
1551        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1552        pub reserved: u64,
1553    }
1554
1555    const_assert!(size_of::<TranslateVirtualAddressExOutputX64>() == 0x40);
1556
1557    #[repr(C)]
1558    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1559    pub struct TranslateVirtualAddressExOutputArm64 {
1560        pub translation_result: TranslateGvaResultExArm64,
1561        pub gpa_page: u64,
1562    }
1563
1564    const_assert!(size_of::<TranslateVirtualAddressExOutputArm64>() == 0x10);
1565
1566    #[repr(C)]
1567    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1568    pub struct GetVpIndexFromApicId {
1569        pub partition_id: u64,
1570        pub target_vtl: u8,
1571        pub reserved: [u8; 7],
1572    }
1573
1574    #[repr(C)]
1575    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1576    pub struct EnableVpVtlX64 {
1577        pub partition_id: u64,
1578        pub vp_index: u32,
1579        pub target_vtl: u8,
1580        pub reserved: [u8; 3],
1581        pub vp_vtl_context: InitialVpContextX64,
1582    }
1583
1584    #[repr(C)]
1585    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1586    pub struct EnableVpVtlArm64 {
1587        pub partition_id: u64,
1588        pub vp_index: u32,
1589        pub target_vtl: u8,
1590        pub reserved: [u8; 3],
1591        pub vp_vtl_context: InitialVpContextArm64,
1592    }
1593
1594    #[repr(C)]
1595    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1596    pub struct ModifyVtlProtectionMask {
1597        pub partition_id: u64,
1598        pub map_flags: HvMapGpaFlags,
1599        pub target_vtl: HvInputVtl,
1600        pub reserved: [u8; 3],
1601    }
1602
1603    #[repr(C)]
1604    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1605    pub struct CheckSparseGpaPageVtlAccess {
1606        pub partition_id: u64,
1607        pub target_vtl: HvInputVtl,
1608        pub desired_access: u8,
1609        pub reserved0: u16,
1610        pub reserved1: u32,
1611    }
1612    const_assert!(size_of::<CheckSparseGpaPageVtlAccess>() == 0x10);
1613
1614    #[bitfield(u64)]
1615    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1616    pub struct CheckSparseGpaPageVtlAccessOutput {
1617        pub result_code: u8,
1618        pub denied_access: u8,
1619        #[bits(4)]
1620        pub intercepting_vtl: u32,
1621        #[bits(12)]
1622        _reserved0: u32,
1623        _reserved1: u32,
1624    }
1625    const_assert!(size_of::<CheckSparseGpaPageVtlAccessOutput>() == 0x8);
1626
1627    open_enum::open_enum! {
1628        pub enum CheckGpaPageVtlAccessResultCode: u32 {
1629            SUCCESS = 0,
1630            MEMORY_INTERCEPT = 1,
1631        }
1632    }
1633
1634    /// The number of VTLs for which permissions can be specified in a VTL permission set.
1635    pub const HV_VTL_PERMISSION_SET_SIZE: usize = 2;
1636
1637    #[repr(C)]
1638    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1639    pub struct VtlPermissionSet {
1640        /// VTL permissions for the GPA page, starting from VTL 1.
1641        pub vtl_permission_from_1: [u16; HV_VTL_PERMISSION_SET_SIZE],
1642    }
1643
1644    open_enum::open_enum! {
1645        pub enum AcceptMemoryType: u32 {
1646            ANY = 0,
1647            RAM = 1,
1648        }
1649    }
1650
1651    open_enum! {
1652        /// Host visibility used in hypercall inputs.
1653        ///
1654        /// NOTE: While this is a 2 bit set with the lower bit representing host
1655        /// read access and upper bit representing host write access, hardware
1656        /// platforms do not support that form of isolation. Only support
1657        /// private or full shared in this definition.
1658        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1659        pub enum HostVisibilityType: u8 {
1660            PRIVATE = 0,
1661            SHARED = 3,
1662        }
1663    }
1664
1665    // Used by bitfield-struct implicitly.
1666    impl HostVisibilityType {
1667        const fn from_bits(value: u8) -> Self {
1668            Self(value)
1669        }
1670
1671        const fn into_bits(value: Self) -> u8 {
1672            value.0
1673        }
1674    }
1675
1676    /// Attributes for accepting pages. See [`AcceptGpaPages`]
1677    #[bitfield(u32)]
1678    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1679    pub struct AcceptPagesAttributes {
1680        #[bits(6)]
1681        /// Supplies the expected memory type [`AcceptMemoryType`].
1682        pub memory_type: u32,
1683        #[bits(2)]
1684        /// Supplies the initial host visibility (exclusive, shared read-only, shared read-write).
1685        pub host_visibility: HostVisibilityType,
1686        #[bits(3)]
1687        /// Supplies the set of VTLs for which initial VTL permissions will be set.
1688        pub vtl_set: u32,
1689        #[bits(21)]
1690        _reserved: u32,
1691    }
1692
1693    #[repr(C)]
1694    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1695    pub struct AcceptGpaPages {
1696        /// Supplies the partition ID of the partition this request is for.
1697        pub partition_id: u64,
1698        /// Supplies attributes of the pages being accepted, such as whether
1699        /// they should be made host visible.
1700        pub page_attributes: AcceptPagesAttributes,
1701        /// Supplies the set of initial VTL permissions.
1702        pub vtl_permission_set: VtlPermissionSet,
1703        /// Supplies the GPA page number of the first page to modify.
1704        pub gpa_page_base: u64,
1705    }
1706    const_assert!(size_of::<AcceptGpaPages>() == 0x18);
1707
1708    /// Attributes for unaccepting pages. See [`UnacceptGpaPages`]
1709    #[bitfield(u32)]
1710    pub struct UnacceptPagesAttributes {
1711        #[bits(3)]
1712        pub vtl_set: u32,
1713        #[bits(29)]
1714        _reserved: u32,
1715    }
1716
1717    #[repr(C)]
1718    pub struct UnacceptGpaPages {
1719        /// Supplies the partition ID of the partition this request is for.
1720        pub partition_id: u64,
1721        /// Supplies the set of VTLs for which VTL permissions will be checked.
1722        pub page_attributes: UnacceptPagesAttributes,
1723        ///  Supplies the set of VTL permissions to check against.
1724        pub vtl_permission_set: VtlPermissionSet,
1725        /// Supplies the GPA page number of the first page to modify.
1726        pub gpa_page_base: u64,
1727    }
1728    const_assert!(size_of::<UnacceptGpaPages>() == 0x18);
1729
1730    #[bitfield(u32)]
1731    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1732    pub struct ModifyHostVisibility {
1733        #[bits(2)]
1734        pub host_visibility: HostVisibilityType,
1735        #[bits(30)]
1736        _reserved: u32,
1737    }
1738
1739    #[repr(C)]
1740    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1741    pub struct ModifySparsePageVisibility {
1742        pub partition_id: u64,
1743        pub host_visibility: ModifyHostVisibility,
1744        pub reserved: u32,
1745    }
1746
1747    #[repr(C)]
1748    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1749    pub struct QuerySparsePageVisibility {
1750        pub partition_id: u64,
1751    }
1752
1753    pub const VBS_VM_REPORT_DATA_SIZE: usize = 64;
1754    pub const VBS_VM_MAX_REPORT_SIZE: usize = 2048;
1755
1756    #[repr(C)]
1757    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1758    pub struct VbsVmCallReport {
1759        pub report_data: [u8; VBS_VM_REPORT_DATA_SIZE],
1760    }
1761
1762    #[repr(C)]
1763    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1764    pub struct VbsVmCallReportOutput {
1765        pub report: [u8; VBS_VM_MAX_REPORT_SIZE],
1766    }
1767
1768    #[bitfield(u8)]
1769    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1770    pub struct EnablePartitionVtlFlags {
1771        pub enable_mbec: bool,
1772        pub enable_supervisor_shadow_stack: bool,
1773        pub enable_hardware_hvpt: bool,
1774        #[bits(5)]
1775        pub reserved: u8,
1776    }
1777
1778    #[repr(C)]
1779    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1780    pub struct EnablePartitionVtl {
1781        pub partition_id: u64,
1782        pub target_vtl: u8,
1783        pub flags: EnablePartitionVtlFlags,
1784        pub reserved_z0: u16,
1785        pub reserved_z1: u32,
1786    }
1787
1788    #[repr(C)]
1789    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1790    pub struct FlushVirtualAddressSpace {
1791        pub address_space: u64,
1792        pub flags: HvFlushFlags,
1793        pub processor_mask: u64,
1794    }
1795
1796    #[repr(C)]
1797    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1798    pub struct FlushVirtualAddressSpaceEx {
1799        pub address_space: u64,
1800        pub flags: HvFlushFlags,
1801        pub vp_set_format: u64,
1802        pub vp_set_valid_banks_mask: u64,
1803        // Followed by the variable-sized part of an HvVpSet
1804    }
1805
1806    #[repr(C)]
1807    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1808    pub struct PinUnpinGpaPageRangesHeader {
1809        pub reserved: u64,
1810    }
1811
1812    #[repr(C)]
1813    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1814    pub struct SendSyntheticClusterIpi {
1815        pub vector: u32,
1816        pub target_vtl: HvInputVtl,
1817        pub flags: u8,
1818        pub reserved: u16,
1819        pub processor_mask: u64,
1820    }
1821
1822    #[repr(C)]
1823    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1824    pub struct SendSyntheticClusterIpiEx {
1825        pub vector: u32,
1826        pub target_vtl: HvInputVtl,
1827        pub flags: u8,
1828        pub reserved: u16,
1829        pub vp_set_format: u64,
1830        pub vp_set_valid_banks_mask: u64,
1831        // Followed by the variable-sized part of an HvVpSet
1832    }
1833
1834    #[bitfield(u64)]
1835    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1836    pub struct HvFlushFlags {
1837        pub all_processors: bool,
1838        pub all_virtual_address_spaces: bool,
1839        pub non_global_mappings_only: bool,
1840        pub use_extended_range_format: bool,
1841        pub use_target_vtl: bool,
1842
1843        #[bits(3)]
1844        _reserved: u8,
1845
1846        pub target_vtl0: bool,
1847        pub target_vtl1: bool,
1848
1849        #[bits(54)]
1850        _reserved2: u64,
1851    }
1852
1853    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1854    #[repr(transparent)]
1855    pub struct HvGvaRange(pub u64);
1856
1857    impl From<u64> for HvGvaRange {
1858        fn from(value: u64) -> Self {
1859            Self(value)
1860        }
1861    }
1862
1863    impl From<HvGvaRange> for u64 {
1864        fn from(value: HvGvaRange) -> Self {
1865            value.0
1866        }
1867    }
1868
1869    impl HvGvaRange {
1870        pub fn as_simple(self) -> HvGvaRangeSimple {
1871            HvGvaRangeSimple(self.0)
1872        }
1873
1874        pub fn as_extended(self) -> HvGvaRangeExtended {
1875            HvGvaRangeExtended(self.0)
1876        }
1877
1878        pub fn as_extended_large_page(self) -> HvGvaRangeExtendedLargePage {
1879            HvGvaRangeExtendedLargePage(self.0)
1880        }
1881    }
1882
1883    #[bitfield(u64)]
1884    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1885    pub struct HvGvaRangeSimple {
1886        /// The number of pages beyond one.
1887        #[bits(12)]
1888        pub additional_pages: u64,
1889        /// The top 52 most significant bits of the guest virtual address.
1890        #[bits(52)]
1891        pub gva_page_number: u64,
1892    }
1893
1894    #[bitfield(u64)]
1895    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1896    pub struct HvGvaRangeExtended {
1897        /// The number of pages beyond one.
1898        #[bits(11)]
1899        pub additional_pages: u64,
1900        /// Is page size greater than 4 KB.
1901        pub large_page: bool,
1902        /// The top 52 most significant bits of the guest virtual address when `large_page`` is clear.
1903        #[bits(52)]
1904        pub gva_page_number: u64,
1905    }
1906
1907    #[bitfield(u64)]
1908    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1909    pub struct HvGvaRangeExtendedLargePage {
1910        /// The number of pages beyond one.
1911        #[bits(11)]
1912        pub additional_pages: u64,
1913        /// Is page size greater than 4 KB.
1914        pub large_page: bool,
1915        /// The page size when `large_page`` is set.
1916        /// false: 2 MB
1917        /// true: 1 GB
1918        pub page_size: bool,
1919        #[bits(8)]
1920        _reserved: u64,
1921        /// The top 43 most significant bits of the guest virtual address when `large_page`` is set.
1922        #[bits(43)]
1923        pub gva_large_page_number: u64,
1924    }
1925
1926    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1927    #[repr(transparent)]
1928    pub struct HvGpaRange(pub u64);
1929
1930    impl HvGpaRange {
1931        pub fn as_simple(self) -> HvGpaRangeSimple {
1932            HvGpaRangeSimple(self.0)
1933        }
1934
1935        pub fn as_extended(self) -> HvGpaRangeExtended {
1936            HvGpaRangeExtended(self.0)
1937        }
1938
1939        pub fn as_extended_large_page(self) -> HvGpaRangeExtendedLargePage {
1940            HvGpaRangeExtendedLargePage(self.0)
1941        }
1942    }
1943
1944    #[bitfield(u64)]
1945    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1946    pub struct HvGpaRangeSimple {
1947        /// The number of pages beyond one.
1948        #[bits(12)]
1949        pub additional_pages: u64,
1950        /// The top 52 most significant bits of the guest physical address.
1951        #[bits(52)]
1952        pub gpa_page_number: u64,
1953    }
1954
1955    #[bitfield(u64)]
1956    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1957    pub struct HvGpaRangeExtended {
1958        /// The number of pages beyond one.
1959        #[bits(11)]
1960        pub additional_pages: u64,
1961        /// Is page size greater than 4 KB.
1962        pub large_page: bool,
1963        /// The top 52 most significant bits of the guest physical address when `large_page`` is clear.
1964        #[bits(52)]
1965        pub gpa_page_number: u64,
1966    }
1967
1968    #[bitfield(u64)]
1969    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1970    pub struct HvGpaRangeExtendedLargePage {
1971        /// The number of pages beyond one.
1972        #[bits(11)]
1973        pub additional_pages: u64,
1974        /// Is page size greater than 4 KB.
1975        pub large_page: bool,
1976        /// The page size when `large_page`` is set.
1977        /// false: 2 MB
1978        /// true: 1 GB
1979        pub page_size: bool,
1980        #[bits(8)]
1981        _reserved: u64,
1982        /// The top 43 most significant bits of the guest physical address when `large_page`` is set.
1983        #[bits(43)]
1984        pub gpa_large_page_number: u64,
1985    }
1986
1987    pub const HV_HYPERCALL_MMIO_MAX_DATA_LENGTH: usize = 64;
1988
1989    #[repr(C)]
1990    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1991    pub struct MemoryMappedIoRead {
1992        pub gpa: u64,
1993        pub access_width: u32,
1994        pub reserved_z0: u32,
1995    }
1996
1997    #[repr(C)]
1998    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1999    pub struct MemoryMappedIoReadOutput {
2000        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
2001    }
2002
2003    #[repr(C)]
2004    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2005    pub struct MemoryMappedIoWrite {
2006        pub gpa: u64,
2007        pub access_width: u32,
2008        pub reserved_z0: u32,
2009        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
2010    }
2011}
2012
2013macro_rules! registers {
2014    ($name:ident {
2015        $(
2016            $(#[$vattr:meta])*
2017            $variant:ident = $value:expr
2018        ),*
2019        $(,)?
2020    }) => {
2021        open_enum! {
2022    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2023            pub enum $name: u32 {
2024        #![expect(non_upper_case_globals)]
2025                $($variant = $value,)*
2026                InstructionEmulationHints = 0x00000002,
2027                InternalActivityState = 0x00000004,
2028
2029        // Guest Crash Registers
2030                GuestCrashP0  = 0x00000210,
2031                GuestCrashP1  = 0x00000211,
2032                GuestCrashP2  = 0x00000212,
2033                GuestCrashP3  = 0x00000213,
2034                GuestCrashP4  = 0x00000214,
2035                GuestCrashCtl = 0x00000215,
2036
2037                PendingInterruption = 0x00010002,
2038                InterruptState = 0x00010003,
2039                PendingEvent0 = 0x00010004,
2040                PendingEvent1 = 0x00010005,
2041                DeliverabilityNotifications = 0x00010006,
2042
2043                GicrBaseGpa = 0x00063000,
2044
2045                VpRuntime = 0x00090000,
2046                GuestOsId = 0x00090002,
2047                VpIndex = 0x00090003,
2048                TimeRefCount = 0x00090004,
2049                CpuManagementVersion = 0x00090007,
2050                VpAssistPage = 0x00090013,
2051                VpRootSignalCount = 0x00090014,
2052                ReferenceTsc = 0x00090017,
2053                VpConfig = 0x00090018,
2054                Ghcb = 0x00090019,
2055                ReferenceTscSequence = 0x0009001A,
2056                GuestSchedulerEvent = 0x0009001B,
2057
2058                Sint0 = 0x000A0000,
2059                Sint1 = 0x000A0001,
2060                Sint2 = 0x000A0002,
2061                Sint3 = 0x000A0003,
2062                Sint4 = 0x000A0004,
2063                Sint5 = 0x000A0005,
2064                Sint6 = 0x000A0006,
2065                Sint7 = 0x000A0007,
2066                Sint8 = 0x000A0008,
2067                Sint9 = 0x000A0009,
2068                Sint10 = 0x000A000A,
2069                Sint11 = 0x000A000B,
2070                Sint12 = 0x000A000C,
2071                Sint13 = 0x000A000D,
2072                Sint14 = 0x000A000E,
2073                Sint15 = 0x000A000F,
2074                Scontrol = 0x000A0010,
2075                Sversion = 0x000A0011,
2076                Sifp = 0x000A0012,
2077                Sipp = 0x000A0013,
2078                Eom = 0x000A0014,
2079                Sirbp = 0x000A0015,
2080
2081                Stimer0Config = 0x000B0000,
2082                Stimer0Count = 0x000B0001,
2083                Stimer1Config = 0x000B0002,
2084                Stimer1Count = 0x000B0003,
2085                Stimer2Config = 0x000B0004,
2086                Stimer2Count = 0x000B0005,
2087                Stimer3Config = 0x000B0006,
2088                Stimer3Count = 0x000B0007,
2089                StimeUnhaltedTimerConfig = 0x000B0100,
2090                StimeUnhaltedTimerCount = 0x000B0101,
2091
2092                VsmCodePageOffsets = 0x000D0002,
2093                VsmVpStatus = 0x000D0003,
2094                VsmPartitionStatus = 0x000D0004,
2095                VsmVina = 0x000D0005,
2096                VsmCapabilities = 0x000D0006,
2097                VsmPartitionConfig = 0x000D0007,
2098                GuestVsmPartitionConfig = 0x000D0008,
2099                VsmVpSecureConfigVtl0 = 0x000D0010,
2100                VsmVpSecureConfigVtl1 = 0x000D0011,
2101                VsmVpSecureConfigVtl2 = 0x000D0012,
2102                VsmVpSecureConfigVtl3 = 0x000D0013,
2103                VsmVpSecureConfigVtl4 = 0x000D0014,
2104                VsmVpSecureConfigVtl5 = 0x000D0015,
2105                VsmVpSecureConfigVtl6 = 0x000D0016,
2106                VsmVpSecureConfigVtl7 = 0x000D0017,
2107                VsmVpSecureConfigVtl8 = 0x000D0018,
2108                VsmVpSecureConfigVtl9 = 0x000D0019,
2109                VsmVpSecureConfigVtl10 = 0x000D001A,
2110                VsmVpSecureConfigVtl11 = 0x000D001B,
2111                VsmVpSecureConfigVtl12 = 0x000D001C,
2112                VsmVpSecureConfigVtl13 = 0x000D001D,
2113                VsmVpSecureConfigVtl14 = 0x000D001E,
2114                VsmVpWaitForTlbLock = 0x000D0020,
2115            }
2116        }
2117
2118        impl From<HvRegisterName> for $name {
2119            fn from(name: HvRegisterName) -> Self {
2120                Self(name.0)
2121            }
2122        }
2123
2124        impl From<$name> for HvRegisterName {
2125            fn from(name: $name) -> Self {
2126                Self(name.0)
2127            }
2128        }
2129    };
2130}
2131
2132/// A hypervisor register for any architecture.
2133///
2134/// This exists only to pass registers through layers where the architecture
2135/// type has been lost. In general, you should use the arch-specific registers.
2136#[repr(C)]
2137#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2138pub struct HvRegisterName(pub u32);
2139
2140registers! {
2141    // Typed enum for registers that are shared across architectures.
2142    HvAllArchRegisterName {}
2143}
2144
2145impl From<HvAllArchRegisterName> for HvX64RegisterName {
2146    fn from(name: HvAllArchRegisterName) -> Self {
2147        Self(name.0)
2148    }
2149}
2150
2151impl From<HvAllArchRegisterName> for HvArm64RegisterName {
2152    fn from(name: HvAllArchRegisterName) -> Self {
2153        Self(name.0)
2154    }
2155}
2156
2157registers! {
2158    HvX64RegisterName {
2159        // X64 User-Mode Registers
2160        Rax = 0x00020000,
2161        Rcx = 0x00020001,
2162        Rdx = 0x00020002,
2163        Rbx = 0x00020003,
2164        Rsp = 0x00020004,
2165        Rbp = 0x00020005,
2166        Rsi = 0x00020006,
2167        Rdi = 0x00020007,
2168        R8 = 0x00020008,
2169        R9 = 0x00020009,
2170        R10 = 0x0002000a,
2171        R11 = 0x0002000b,
2172        R12 = 0x0002000c,
2173        R13 = 0x0002000d,
2174        R14 = 0x0002000e,
2175        R15 = 0x0002000f,
2176        Rip = 0x00020010,
2177        Rflags = 0x00020011,
2178
2179        // X64 Floating Point and Vector Registers
2180        Xmm0 = 0x00030000,
2181        Xmm1 = 0x00030001,
2182        Xmm2 = 0x00030002,
2183        Xmm3 = 0x00030003,
2184        Xmm4 = 0x00030004,
2185        Xmm5 = 0x00030005,
2186        Xmm6 = 0x00030006,
2187        Xmm7 = 0x00030007,
2188        Xmm8 = 0x00030008,
2189        Xmm9 = 0x00030009,
2190        Xmm10 = 0x0003000A,
2191        Xmm11 = 0x0003000B,
2192        Xmm12 = 0x0003000C,
2193        Xmm13 = 0x0003000D,
2194        Xmm14 = 0x0003000E,
2195        Xmm15 = 0x0003000F,
2196        FpMmx0 = 0x00030010,
2197        FpMmx1 = 0x00030011,
2198        FpMmx2 = 0x00030012,
2199        FpMmx3 = 0x00030013,
2200        FpMmx4 = 0x00030014,
2201        FpMmx5 = 0x00030015,
2202        FpMmx6 = 0x00030016,
2203        FpMmx7 = 0x00030017,
2204        FpControlStatus = 0x00030018,
2205        XmmControlStatus = 0x00030019,
2206
2207        // X64 Control Registers
2208        Cr0 = 0x00040000,
2209        Cr2 = 0x00040001,
2210        Cr3 = 0x00040002,
2211        Cr4 = 0x00040003,
2212        Cr8 = 0x00040004,
2213        Xfem = 0x00040005,
2214        // X64 Intermediate Control Registers
2215        IntermediateCr0 = 0x00041000,
2216        IntermediateCr3 = 0x00041002,
2217        IntermediateCr4 = 0x00041003,
2218        IntermediateCr8 = 0x00041004,
2219        // X64 Debug Registers
2220        Dr0 = 0x00050000,
2221        Dr1 = 0x00050001,
2222        Dr2 = 0x00050002,
2223        Dr3 = 0x00050003,
2224        Dr6 = 0x00050004,
2225        Dr7 = 0x00050005,
2226        // X64 Segment Registers
2227        Es = 0x00060000,
2228        Cs = 0x00060001,
2229        Ss = 0x00060002,
2230        Ds = 0x00060003,
2231        Fs = 0x00060004,
2232        Gs = 0x00060005,
2233        Ldtr = 0x00060006,
2234        Tr = 0x00060007,
2235        // X64 Table Registers
2236        Idtr = 0x00070000,
2237        Gdtr = 0x00070001,
2238        // X64 Virtualized MSRs
2239        Tsc = 0x00080000,
2240        Efer = 0x00080001,
2241        KernelGsBase = 0x00080002,
2242        ApicBase = 0x00080003,
2243        Pat = 0x00080004,
2244        SysenterCs = 0x00080005,
2245        SysenterEip = 0x00080006,
2246        SysenterEsp = 0x00080007,
2247        Star = 0x00080008,
2248        Lstar = 0x00080009,
2249        Cstar = 0x0008000a,
2250        Sfmask = 0x0008000b,
2251        InitialApicId = 0x0008000c,
2252        // X64 Cache control MSRs
2253        MsrMtrrCap = 0x0008000d,
2254        MsrMtrrDefType = 0x0008000e,
2255        MsrMtrrPhysBase0 = 0x00080010,
2256        MsrMtrrPhysBase1 = 0x00080011,
2257        MsrMtrrPhysBase2 = 0x00080012,
2258        MsrMtrrPhysBase3 = 0x00080013,
2259        MsrMtrrPhysBase4 = 0x00080014,
2260        MsrMtrrPhysBase5 = 0x00080015,
2261        MsrMtrrPhysBase6 = 0x00080016,
2262        MsrMtrrPhysBase7 = 0x00080017,
2263        MsrMtrrPhysBase8 = 0x00080018,
2264        MsrMtrrPhysBase9 = 0x00080019,
2265        MsrMtrrPhysBaseA = 0x0008001a,
2266        MsrMtrrPhysBaseB = 0x0008001b,
2267        MsrMtrrPhysBaseC = 0x0008001c,
2268        MsrMtrrPhysBaseD = 0x0008001d,
2269        MsrMtrrPhysBaseE = 0x0008001e,
2270        MsrMtrrPhysBaseF = 0x0008001f,
2271        MsrMtrrPhysMask0 = 0x00080040,
2272        MsrMtrrPhysMask1 = 0x00080041,
2273        MsrMtrrPhysMask2 = 0x00080042,
2274        MsrMtrrPhysMask3 = 0x00080043,
2275        MsrMtrrPhysMask4 = 0x00080044,
2276        MsrMtrrPhysMask5 = 0x00080045,
2277        MsrMtrrPhysMask6 = 0x00080046,
2278        MsrMtrrPhysMask7 = 0x00080047,
2279        MsrMtrrPhysMask8 = 0x00080048,
2280        MsrMtrrPhysMask9 = 0x00080049,
2281        MsrMtrrPhysMaskA = 0x0008004a,
2282        MsrMtrrPhysMaskB = 0x0008004b,
2283        MsrMtrrPhysMaskC = 0x0008004c,
2284        MsrMtrrPhysMaskD = 0x0008004d,
2285        MsrMtrrPhysMaskE = 0x0008004e,
2286        MsrMtrrPhysMaskF = 0x0008004f,
2287        MsrMtrrFix64k00000 = 0x00080070,
2288        MsrMtrrFix16k80000 = 0x00080071,
2289        MsrMtrrFix16kA0000 = 0x00080072,
2290        MsrMtrrFix4kC0000 = 0x00080073,
2291        MsrMtrrFix4kC8000 = 0x00080074,
2292        MsrMtrrFix4kD0000 = 0x00080075,
2293        MsrMtrrFix4kD8000 = 0x00080076,
2294        MsrMtrrFix4kE0000 = 0x00080077,
2295        MsrMtrrFix4kE8000 = 0x00080078,
2296        MsrMtrrFix4kF0000 = 0x00080079,
2297        MsrMtrrFix4kF8000 = 0x0008007a,
2298
2299        TscAux = 0x0008007B,
2300        Bndcfgs = 0x0008007C,
2301        DebugCtl = 0x0008007D,
2302        MCount = 0x0008007E,
2303        ACount = 0x0008007F,
2304
2305        SgxLaunchControl0 = 0x00080080,
2306        SgxLaunchControl1 = 0x00080081,
2307        SgxLaunchControl2 = 0x00080082,
2308        SgxLaunchControl3 = 0x00080083,
2309        SpecCtrl = 0x00080084,
2310        PredCmd = 0x00080085,
2311        VirtSpecCtrl = 0x00080086,
2312        TscVirtualOffset = 0x00080087,
2313        TsxCtrl = 0x00080088,
2314        MsrMcUpdatePatchLevel = 0x00080089,
2315        Available1 = 0x0008008A,
2316        Xss = 0x0008008B,
2317        UCet = 0x0008008C,
2318        SCet = 0x0008008D,
2319        Ssp = 0x0008008E,
2320        Pl0Ssp = 0x0008008F,
2321        Pl1Ssp = 0x00080090,
2322        Pl2Ssp = 0x00080091,
2323        Pl3Ssp = 0x00080092,
2324        InterruptSspTableAddr = 0x00080093,
2325        TscVirtualMultiplier = 0x00080094,
2326        TscDeadline = 0x00080095,
2327        TscAdjust = 0x00080096,
2328        Pasid = 0x00080097,
2329        UmwaitControl = 0x00080098,
2330        Xfd = 0x00080099,
2331        XfdErr = 0x0008009A,
2332
2333        Hypercall = 0x00090001,
2334        RegisterPage = 0x0009001C,
2335
2336        // Partition Timer Assist Registers
2337        EmulatedTimerPeriod = 0x00090030,
2338        EmulatedTimerControl = 0x00090031,
2339        PmTimerAssist = 0x00090032,
2340
2341        // AMD SEV configuration MSRs
2342        SevControl = 0x00090040,
2343
2344        CrInterceptControl = 0x000E0000,
2345        CrInterceptCr0Mask = 0x000E0001,
2346        CrInterceptCr4Mask = 0x000E0002,
2347        CrInterceptIa32MiscEnableMask = 0x000E0003,
2348    }
2349}
2350
2351registers! {
2352    HvArm64RegisterName {
2353        HypervisorVersion = 0x00000100,
2354        PrivilegesAndFeaturesInfo = 0x00000200,
2355        FeaturesInfo = 0x00000201,
2356        ImplementationLimitsInfo = 0x00000202,
2357        HardwareFeaturesInfo = 0x00000203,
2358        CpuManagementFeaturesInfo = 0x00000204,
2359        PasidFeaturesInfo = 0x00000205,
2360        SkipLevelFeaturesInfo = 0x00000206,
2361        NestedVirtFeaturesInfo = 0x00000207,
2362        IptFeaturesInfo = 0x00000208,
2363        IsolationConfiguration = 0x00000209,
2364
2365        X0 = 0x00020000,
2366        X1 = 0x00020001,
2367        X2 = 0x00020002,
2368        X3 = 0x00020003,
2369        X4 = 0x00020004,
2370        X5 = 0x00020005,
2371        X6 = 0x00020006,
2372        X7 = 0x00020007,
2373        X8 = 0x00020008,
2374        X9 = 0x00020009,
2375        X10 = 0x0002000A,
2376        X11 = 0x0002000B,
2377        X12 = 0x0002000C,
2378        X13 = 0x0002000D,
2379        X14 = 0x0002000E,
2380        X15 = 0x0002000F,
2381        X16 = 0x00020010,
2382        X17 = 0x00020011,
2383        X18 = 0x00020012,
2384        X19 = 0x00020013,
2385        X20 = 0x00020014,
2386        X21 = 0x00020015,
2387        X22 = 0x00020016,
2388        X23 = 0x00020017,
2389        X24 = 0x00020018,
2390        X25 = 0x00020019,
2391        X26 = 0x0002001A,
2392        X27 = 0x0002001B,
2393        X28 = 0x0002001C,
2394        XFp = 0x0002001D,
2395        XLr = 0x0002001E,
2396        XSp = 0x0002001F, // alias for either El0/x depending on Cpsr.SPSel
2397        XSpEl0 = 0x00020020,
2398        XSpElx = 0x00020021,
2399        XPc = 0x00020022,
2400        Cpsr = 0x00020023,
2401        SpsrEl2 = 0x00021002,
2402
2403        SctlrEl1 = 0x00040002,
2404        Ttbr0El1 = 0x00040005,
2405        Ttbr1El1 = 0x00040006,
2406        TcrEl1 = 0x00040007,
2407        EsrEl1 = 0x00040008,
2408        FarEl1 = 0x00040009,
2409        MairEl1 = 0x0004000b,
2410        VbarEl1 = 0x0004000c,
2411        ElrEl1 = 0x00040015,
2412    }
2413}
2414
2415#[repr(C)]
2416#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2417pub struct HvRegisterValue(pub AlignedU128);
2418
2419impl HvRegisterValue {
2420    pub fn as_u128(&self) -> u128 {
2421        self.0.into()
2422    }
2423
2424    pub fn as_u64(&self) -> u64 {
2425        self.as_u128() as u64
2426    }
2427
2428    pub fn as_u32(&self) -> u32 {
2429        self.as_u128() as u32
2430    }
2431
2432    pub fn as_u16(&self) -> u16 {
2433        self.as_u128() as u16
2434    }
2435
2436    pub fn as_u8(&self) -> u8 {
2437        self.as_u128() as u8
2438    }
2439
2440    pub fn as_table(&self) -> HvX64TableRegister {
2441        HvX64TableRegister::read_from_prefix(self.as_bytes())
2442            .unwrap()
2443            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2444    }
2445
2446    pub fn as_segment(&self) -> HvX64SegmentRegister {
2447        HvX64SegmentRegister::read_from_prefix(self.as_bytes())
2448            .unwrap()
2449            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2450    }
2451}
2452
2453impl From<u8> for HvRegisterValue {
2454    fn from(val: u8) -> Self {
2455        (val as u128).into()
2456    }
2457}
2458
2459impl From<u16> for HvRegisterValue {
2460    fn from(val: u16) -> Self {
2461        (val as u128).into()
2462    }
2463}
2464
2465impl From<u32> for HvRegisterValue {
2466    fn from(val: u32) -> Self {
2467        (val as u128).into()
2468    }
2469}
2470
2471impl From<u64> for HvRegisterValue {
2472    fn from(val: u64) -> Self {
2473        (val as u128).into()
2474    }
2475}
2476
2477impl From<u128> for HvRegisterValue {
2478    fn from(val: u128) -> Self {
2479        Self(val.into())
2480    }
2481}
2482
2483#[repr(C)]
2484#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2485pub struct HvX64TableRegister {
2486    pub pad: [u16; 3],
2487    pub limit: u16,
2488    pub base: u64,
2489}
2490
2491impl From<HvX64TableRegister> for HvRegisterValue {
2492    fn from(val: HvX64TableRegister) -> Self {
2493        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2494    }
2495}
2496
2497impl From<HvRegisterValue> for HvX64TableRegister {
2498    fn from(val: HvRegisterValue) -> Self {
2499        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2500    }
2501}
2502
2503#[repr(C)]
2504#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2505pub struct HvX64SegmentRegister {
2506    pub base: u64,
2507    pub limit: u32,
2508    pub selector: u16,
2509    pub attributes: u16,
2510}
2511
2512impl From<HvX64SegmentRegister> for HvRegisterValue {
2513    fn from(val: HvX64SegmentRegister) -> Self {
2514        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2515    }
2516}
2517
2518impl From<HvRegisterValue> for HvX64SegmentRegister {
2519    fn from(val: HvRegisterValue) -> Self {
2520        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2521    }
2522}
2523
2524#[bitfield(u64)]
2525#[derive(IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
2526pub struct HvDeliverabilityNotificationsRegister {
2527    /// x86_64 only.
2528    pub nmi_notification: bool,
2529    /// x86_64 only.
2530    pub interrupt_notification: bool,
2531    /// x86_64 only.
2532    #[bits(4)]
2533    /// Only used on x86_64.
2534    pub interrupt_priority: u8,
2535    #[bits(42)]
2536    pub reserved: u64,
2537    pub sints: u16,
2538}
2539
2540open_enum! {
2541    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2542    pub enum HvVtlEntryReason: u32 {
2543        /// This reason is reserved and is not used.
2544        RESERVED = 0,
2545
2546        /// Indicates entry due to a VTL call from a lower VTL.
2547        VTL_CALL = 1,
2548
2549        /// Indicates entry due to an interrupt targeted to the VTL.
2550        INTERRUPT = 2,
2551
2552        // Indicates an entry due to an intercept delivered via the intercept page.
2553        INTERCEPT = 3,
2554    }
2555}
2556
2557#[repr(C)]
2558#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2559pub struct HvVpVtlControl {
2560    //
2561    // The hypervisor updates the entry reason with an indication as to why the
2562    // VTL was entered on the virtual processor.
2563    //
2564    pub entry_reason: HvVtlEntryReason,
2565
2566    /// This flag determines whether the VINA interrupt line is asserted.
2567    pub vina_status: u8,
2568    pub reserved_z0: u8,
2569    pub reserved_z1: u16,
2570
2571    /// A guest updates the VtlReturn* fields to provide the register values to
2572    /// restore on VTL return.  The specific register values that are restored
2573    /// will vary based on whether the VTL is 32-bit or 64-bit: rax and rcx or
2574    /// eax, ecx, and edx.
2575    pub registers: [u64; 2],
2576}
2577
2578#[bitfield(u64)]
2579#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2580pub struct HvRegisterVsmVina {
2581    pub vector: u8,
2582    pub enabled: bool,
2583    pub auto_reset: bool,
2584    pub auto_eoi: bool,
2585    #[bits(53)]
2586    pub reserved: u64,
2587}
2588
2589#[repr(C)]
2590#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2591pub struct HvVpAssistPage {
2592    /// APIC assist for optimized EOI processing.
2593    pub apic_assist: u32,
2594    pub reserved_z0: u32,
2595
2596    /// VP-VTL control information
2597    pub vtl_control: HvVpVtlControl,
2598
2599    pub nested_enlightenments_control: u64,
2600    pub enlighten_vm_entry: u8,
2601    pub reserved_z1: [u8; 7],
2602    pub current_nested_vmcs: u64,
2603    pub synthetic_time_unhalted_timer_expired: u8,
2604    pub reserved_z2: [u8; 7],
2605    pub virtualization_fault_information: [u8; 40],
2606    pub reserved_z3: u64,
2607    pub intercept_message: HvMessage,
2608    pub vtl_return_actions: [u8; 256],
2609}
2610
2611#[repr(C)]
2612#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2613pub struct HvVpAssistPageActionSignalEvent {
2614    pub action_type: u64,
2615    pub target_vp: u32,
2616    pub target_vtl: u8,
2617    pub target_sint: u8,
2618    pub flag_number: u16,
2619}
2620
2621open_enum! {
2622    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2623    pub enum HvInterceptAccessType: u8 {
2624        READ = 0,
2625        WRITE = 1,
2626        EXECUTE = 2,
2627    }
2628}
2629
2630#[bitfield(u16)]
2631#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2632pub struct HvX64VpExecutionState {
2633    #[bits(2)]
2634    pub cpl: u8,
2635    pub cr0_pe: bool,
2636    pub cr0_am: bool,
2637    pub efer_lma: bool,
2638    pub debug_active: bool,
2639    pub interruption_pending: bool,
2640    #[bits(4)]
2641    pub vtl: u8,
2642    pub enclave_mode: bool,
2643    pub interrupt_shadow: bool,
2644    pub virtualization_fault_active: bool,
2645    #[bits(2)]
2646    pub reserved: u8,
2647}
2648
2649#[bitfield(u16)]
2650#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2651pub struct HvArm64VpExecutionState {
2652    #[bits(2)]
2653    pub cpl: u8,
2654    pub debug_active: bool,
2655    pub interruption_pending: bool,
2656    #[bits(4)]
2657    pub vtl: u8,
2658    pub virtualization_fault_active: bool,
2659    #[bits(7)]
2660    pub reserved: u8,
2661}
2662
2663#[repr(C)]
2664#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2665pub struct HvX64InterceptMessageHeader {
2666    pub vp_index: u32,
2667    pub instruction_length_and_cr8: u8,
2668    pub intercept_access_type: HvInterceptAccessType,
2669    pub execution_state: HvX64VpExecutionState,
2670    pub cs_segment: HvX64SegmentRegister,
2671    pub rip: u64,
2672    pub rflags: u64,
2673}
2674
2675impl MessagePayload for HvX64InterceptMessageHeader {}
2676
2677impl HvX64InterceptMessageHeader {
2678    pub fn instruction_len(&self) -> u8 {
2679        self.instruction_length_and_cr8 & 0xf
2680    }
2681
2682    pub fn cr8(&self) -> u8 {
2683        self.instruction_length_and_cr8 >> 4
2684    }
2685}
2686
2687#[repr(C)]
2688#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2689pub struct HvArm64InterceptMessageHeader {
2690    pub vp_index: u32,
2691    pub instruction_length: u8,
2692    pub intercept_access_type: HvInterceptAccessType,
2693    pub execution_state: HvArm64VpExecutionState,
2694    pub pc: u64,
2695    pub cspr: u64,
2696}
2697const_assert!(size_of::<HvArm64InterceptMessageHeader>() == 0x18);
2698
2699impl MessagePayload for HvArm64InterceptMessageHeader {}
2700
2701#[repr(transparent)]
2702#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2703pub struct HvX64IoPortAccessInfo(pub u8);
2704
2705impl HvX64IoPortAccessInfo {
2706    pub fn new(access_size: u8, string_op: bool, rep_prefix: bool) -> Self {
2707        let mut info = access_size & 0x7;
2708
2709        if string_op {
2710            info |= 0x8;
2711        }
2712
2713        if rep_prefix {
2714            info |= 0x10;
2715        }
2716
2717        Self(info)
2718    }
2719
2720    pub fn access_size(&self) -> u8 {
2721        self.0 & 0x7
2722    }
2723
2724    pub fn string_op(&self) -> bool {
2725        self.0 & 0x8 != 0
2726    }
2727
2728    pub fn rep_prefix(&self) -> bool {
2729        self.0 & 0x10 != 0
2730    }
2731}
2732
2733#[repr(C)]
2734#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2735pub struct HvX64IoPortInterceptMessage {
2736    pub header: HvX64InterceptMessageHeader,
2737    pub port_number: u16,
2738    pub access_info: HvX64IoPortAccessInfo,
2739    pub instruction_byte_count: u8,
2740    pub reserved: u32,
2741    pub rax: u64,
2742    pub instruction_bytes: [u8; 16],
2743    pub ds_segment: HvX64SegmentRegister,
2744    pub es_segment: HvX64SegmentRegister,
2745    pub rcx: u64,
2746    pub rsi: u64,
2747    pub rdi: u64,
2748}
2749
2750impl MessagePayload for HvX64IoPortInterceptMessage {}
2751
2752#[bitfield(u8)]
2753#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2754pub struct HvX64MemoryAccessInfo {
2755    pub gva_valid: bool,
2756    pub gva_gpa_valid: bool,
2757    pub hypercall_output_pending: bool,
2758    pub tlb_locked: bool,
2759    pub supervisor_shadow_stack: bool,
2760    #[bits(3)]
2761    pub reserved1: u8,
2762}
2763
2764#[bitfield(u8)]
2765#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2766pub struct HvArm64MemoryAccessInfo {
2767    pub gva_valid: bool,
2768    pub gva_gpa_valid: bool,
2769    pub hypercall_output_pending: bool,
2770    #[bits(5)]
2771    pub reserved1: u8,
2772}
2773
2774open_enum! {
2775    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2776    pub enum HvCacheType: u32 {
2777        #![expect(non_upper_case_globals)]
2778        HvCacheTypeUncached = 0,
2779        HvCacheTypeWriteCombining = 1,
2780        HvCacheTypeWriteThrough = 4,
2781        HvCacheTypeWriteProtected = 5,
2782        HvCacheTypeWriteBack = 6,
2783    }
2784}
2785
2786#[repr(C)]
2787#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2788pub struct HvX64MemoryInterceptMessage {
2789    pub header: HvX64InterceptMessageHeader,
2790    pub cache_type: HvCacheType,
2791    pub instruction_byte_count: u8,
2792    pub memory_access_info: HvX64MemoryAccessInfo,
2793    pub tpr_priority: u8,
2794    pub reserved: u8,
2795    pub guest_virtual_address: u64,
2796    pub guest_physical_address: u64,
2797    pub instruction_bytes: [u8; 16],
2798}
2799
2800impl MessagePayload for HvX64MemoryInterceptMessage {}
2801const_assert!(size_of::<HvX64MemoryInterceptMessage>() == 0x50);
2802
2803#[repr(C)]
2804#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2805pub struct HvArm64MemoryInterceptMessage {
2806    pub header: HvArm64InterceptMessageHeader,
2807    pub cache_type: HvCacheType,
2808    pub instruction_byte_count: u8,
2809    pub memory_access_info: HvArm64MemoryAccessInfo,
2810    pub reserved1: u16,
2811    pub instruction_bytes: [u8; 4],
2812    pub reserved2: u32,
2813    pub guest_virtual_address: u64,
2814    pub guest_physical_address: u64,
2815    pub syndrome: u64,
2816}
2817
2818impl MessagePayload for HvArm64MemoryInterceptMessage {}
2819const_assert!(size_of::<HvArm64MemoryInterceptMessage>() == 0x40);
2820
2821#[repr(C)]
2822#[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)]
2823pub struct HvArm64MmioInterceptMessage {
2824    pub header: HvArm64InterceptMessageHeader,
2825    pub guest_physical_address: u64,
2826    pub access_size: u32,
2827    pub data: [u8; 32],
2828    pub padding: u32,
2829}
2830
2831impl MessagePayload for HvArm64MmioInterceptMessage {}
2832const_assert!(size_of::<HvArm64MmioInterceptMessage>() == 0x48);
2833
2834#[repr(C)]
2835#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2836pub struct HvX64MsrInterceptMessage {
2837    pub header: HvX64InterceptMessageHeader,
2838    pub msr_number: u32,
2839    pub reserved: u32,
2840    pub rdx: u64,
2841    pub rax: u64,
2842}
2843
2844impl MessagePayload for HvX64MsrInterceptMessage {}
2845
2846#[repr(C)]
2847#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2848pub struct HvX64SipiInterceptMessage {
2849    pub header: HvX64InterceptMessageHeader,
2850    pub target_vp_index: u32,
2851    pub vector: u32,
2852}
2853
2854impl MessagePayload for HvX64SipiInterceptMessage {}
2855
2856#[repr(C)]
2857#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2858pub struct HvX64SynicSintDeliverableMessage {
2859    pub header: HvX64InterceptMessageHeader,
2860    pub deliverable_sints: u16,
2861    pub rsvd1: u16,
2862    pub rsvd2: u32,
2863}
2864
2865impl MessagePayload for HvX64SynicSintDeliverableMessage {}
2866
2867#[repr(C)]
2868#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2869pub struct HvArm64SynicSintDeliverableMessage {
2870    pub header: HvArm64InterceptMessageHeader,
2871    pub deliverable_sints: u16,
2872    pub rsvd1: u16,
2873    pub rsvd2: u32,
2874}
2875
2876impl MessagePayload for HvArm64SynicSintDeliverableMessage {}
2877
2878#[repr(C)]
2879#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2880pub struct HvX64InterruptionDeliverableMessage {
2881    pub header: HvX64InterceptMessageHeader,
2882    pub deliverable_type: HvX64PendingInterruptionType,
2883    pub rsvd: [u8; 3],
2884    pub rsvd2: u32,
2885}
2886
2887impl MessagePayload for HvX64InterruptionDeliverableMessage {}
2888
2889open_enum! {
2890    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2891    pub enum HvX64PendingInterruptionType: u8 {
2892        HV_X64_PENDING_INTERRUPT = 0,
2893        HV_X64_PENDING_NMI = 2,
2894        HV_X64_PENDING_EXCEPTION = 3,
2895        HV_X64_PENDING_SOFTWARE_INTERRUPT = 4,
2896        HV_X64_PENDING_PRIVILEGED_SOFTWARE_EXCEPTION = 5,
2897        HV_X64_PENDING_SOFTWARE_EXCEPTION = 6,
2898    }
2899}
2900
2901#[repr(C)]
2902#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2903pub struct HvX64HypercallInterceptMessage {
2904    pub header: HvX64InterceptMessageHeader,
2905    pub rax: u64,
2906    pub rbx: u64,
2907    pub rcx: u64,
2908    pub rdx: u64,
2909    pub r8: u64,
2910    pub rsi: u64,
2911    pub rdi: u64,
2912    pub xmm_registers: [AlignedU128; 6],
2913    pub flags: HvHypercallInterceptMessageFlags,
2914    pub rsvd2: [u32; 3],
2915}
2916
2917impl MessagePayload for HvX64HypercallInterceptMessage {}
2918
2919#[repr(C)]
2920#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2921pub struct HvArm64HypercallInterceptMessage {
2922    pub header: HvArm64InterceptMessageHeader,
2923    pub immediate: u16,
2924    pub reserved: u16,
2925    pub flags: HvHypercallInterceptMessageFlags,
2926    pub x: [u64; 18],
2927}
2928
2929impl MessagePayload for HvArm64HypercallInterceptMessage {}
2930
2931#[bitfield(u32)]
2932#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2933pub struct HvHypercallInterceptMessageFlags {
2934    pub is_isolated: bool,
2935    #[bits(31)]
2936    _reserved: u32,
2937}
2938
2939#[repr(C)]
2940#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2941pub struct HvX64CpuidInterceptMessage {
2942    pub header: HvX64InterceptMessageHeader,
2943    pub rax: u64,
2944    pub rcx: u64,
2945    pub rdx: u64,
2946    pub rbx: u64,
2947    pub default_result_rax: u64,
2948    pub default_result_rcx: u64,
2949    pub default_result_rdx: u64,
2950    pub default_result_rbx: u64,
2951}
2952
2953impl MessagePayload for HvX64CpuidInterceptMessage {}
2954
2955#[bitfield(u8)]
2956#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2957pub struct HvX64ExceptionInfo {
2958    pub error_code_valid: bool,
2959    pub software_exception: bool,
2960    #[bits(6)]
2961    reserved: u8,
2962}
2963
2964#[repr(C)]
2965#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2966pub struct HvX64ExceptionInterceptMessage {
2967    pub header: HvX64InterceptMessageHeader,
2968    pub vector: u16,
2969    pub exception_info: HvX64ExceptionInfo,
2970    pub instruction_byte_count: u8,
2971    pub error_code: u32,
2972    pub exception_parameter: u64,
2973    pub reserved: u64,
2974    pub instruction_bytes: [u8; 16],
2975    pub ds_segment: HvX64SegmentRegister,
2976    pub ss_segment: HvX64SegmentRegister,
2977    pub rax: u64,
2978    pub rcx: u64,
2979    pub rdx: u64,
2980    pub rbx: u64,
2981    pub rsp: u64,
2982    pub rbp: u64,
2983    pub rsi: u64,
2984    pub rdi: u64,
2985    pub r8: u64,
2986    pub r9: u64,
2987    pub r10: u64,
2988    pub r11: u64,
2989    pub r12: u64,
2990    pub r13: u64,
2991    pub r14: u64,
2992    pub r15: u64,
2993}
2994
2995impl MessagePayload for HvX64ExceptionInterceptMessage {}
2996
2997#[repr(C)]
2998#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2999pub struct HvInvalidVpRegisterMessage {
3000    pub vp_index: u32,
3001    pub reserved: u32,
3002}
3003
3004impl MessagePayload for HvInvalidVpRegisterMessage {}
3005
3006#[repr(C)]
3007#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3008pub struct HvX64ApicEoiMessage {
3009    pub vp_index: u32,
3010    pub interrupt_vector: u32,
3011}
3012
3013impl MessagePayload for HvX64ApicEoiMessage {}
3014
3015#[repr(C)]
3016#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3017pub struct HvX64UnrecoverableExceptionMessage {
3018    pub header: HvX64InterceptMessageHeader,
3019}
3020
3021impl MessagePayload for HvX64UnrecoverableExceptionMessage {}
3022
3023#[repr(C)]
3024#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3025pub struct HvX64HaltMessage {
3026    pub header: HvX64InterceptMessageHeader,
3027}
3028
3029impl MessagePayload for HvX64HaltMessage {}
3030
3031#[repr(C)]
3032#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3033pub struct HvArm64ResetInterceptMessage {
3034    pub header: HvArm64InterceptMessageHeader,
3035    pub reset_type: HvArm64ResetType,
3036    pub reset_code: u32,
3037}
3038
3039impl MessagePayload for HvArm64ResetInterceptMessage {}
3040
3041open_enum! {
3042    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3043    pub enum HvArm64ResetType: u32 {
3044        POWER_OFF = 0,
3045        REBOOT = 1,
3046        SYSTEM_RESET = 2,
3047        HIBERNATE = 3,
3048    }
3049}
3050
3051#[bitfield(u8)]
3052#[derive(IntoBytes, Immutable, FromBytes)]
3053pub struct HvX64RegisterInterceptMessageFlags {
3054    pub is_memory_op: bool,
3055    #[bits(7)]
3056    _rsvd: u8,
3057}
3058
3059#[repr(C)]
3060#[derive(IntoBytes, Immutable, FromBytes)]
3061pub struct HvX64RegisterInterceptMessage {
3062    pub header: HvX64InterceptMessageHeader,
3063    pub flags: HvX64RegisterInterceptMessageFlags,
3064    pub rsvd: u8,
3065    pub rsvd2: u16,
3066    pub register_name: HvX64RegisterName,
3067    pub access_info: HvX64RegisterAccessInfo,
3068}
3069
3070#[repr(transparent)]
3071#[derive(IntoBytes, Immutable, FromBytes)]
3072pub struct HvX64RegisterAccessInfo(u128);
3073
3074impl HvX64RegisterAccessInfo {
3075    pub fn new_source_value(source_value: HvRegisterValue) -> Self {
3076        Self(source_value.as_u128())
3077    }
3078}
3079
3080open_enum! {
3081    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3082    pub enum HvInterruptType : u32  {
3083        #![expect(non_upper_case_globals)]
3084        HvArm64InterruptTypeFixed = 0x0000,
3085        HvX64InterruptTypeFixed = 0x0000,
3086        HvX64InterruptTypeLowestPriority = 0x0001,
3087        HvX64InterruptTypeSmi = 0x0002,
3088        HvX64InterruptTypeRemoteRead = 0x0003,
3089        HvX64InterruptTypeNmi = 0x0004,
3090        HvX64InterruptTypeInit = 0x0005,
3091        HvX64InterruptTypeSipi = 0x0006,
3092        HvX64InterruptTypeExtInt = 0x0007,
3093        HvX64InterruptTypeLocalInt0 = 0x0008,
3094        HvX64InterruptTypeLocalInt1 = 0x0009,
3095    }
3096}
3097
3098/// The declaration uses the fact the bits for the different
3099/// architectures don't intersect. When (if ever) they do,
3100/// will need to come up with a more elaborate abstraction.
3101/// The other possible downside is the lack of the compile-time
3102/// checks as adding that will require `guest_arch` support and
3103/// a large refactoring. To sum up, choosing expediency.
3104#[bitfield(u64)]
3105#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3106pub struct HvInterruptControl {
3107    interrupt_type_value: u32,
3108    pub x86_level_triggered: bool,
3109    pub x86_logical_destination_mode: bool,
3110    pub arm64_asserted: bool,
3111    #[bits(29)]
3112    pub unused: u32,
3113}
3114
3115impl HvInterruptControl {
3116    pub fn interrupt_type(&self) -> HvInterruptType {
3117        HvInterruptType(self.interrupt_type_value())
3118    }
3119
3120    pub fn set_interrupt_type(&mut self, ty: HvInterruptType) {
3121        self.set_interrupt_type_value(ty.0)
3122    }
3123
3124    pub fn with_interrupt_type(self, ty: HvInterruptType) -> Self {
3125        self.with_interrupt_type_value(ty.0)
3126    }
3127}
3128
3129#[bitfield(u64)]
3130pub struct HvRegisterVsmCapabilities {
3131    pub dr6_shared: bool,
3132    pub mbec_vtl_mask: u16,
3133    pub deny_lower_vtl_startup: bool,
3134    pub supervisor_shadow_stack: bool,
3135    pub hardware_hvpt_available: bool,
3136    pub software_hvpt_available: bool,
3137    #[bits(6)]
3138    pub hardware_hvpt_range_bits: u8,
3139    pub intercept_page_available: bool,
3140    pub return_action_available: bool,
3141    /// If the VTL0 view of memory is mapped to the high address space, which is
3142    /// the highest legal physical address bit.
3143    ///
3144    /// Only available in VTL2.
3145    pub vtl0_alias_map_available: bool,
3146    /// If the [`HvRegisterVsmPartitionConfig`] register has support for
3147    /// `intercept_not_present`.
3148    ///
3149    /// Only available in VTL2.
3150    pub intercept_not_present_available: bool,
3151    pub install_intercept_ex: bool,
3152    /// Only available in VTL2.
3153    pub intercept_system_reset_available: bool,
3154    #[bits(31)]
3155    pub reserved: u64,
3156}
3157
3158#[bitfield(u64)]
3159pub struct HvRegisterVsmPartitionConfig {
3160    pub enable_vtl_protection: bool,
3161    #[bits(4)]
3162    pub default_vtl_protection_mask: u8,
3163    pub zero_memory_on_reset: bool,
3164    pub deny_lower_vtl_startup: bool,
3165    pub intercept_acceptance: bool,
3166    pub intercept_enable_vtl_protection: bool,
3167    pub intercept_vp_startup: bool,
3168    pub intercept_cpuid_unimplemented: bool,
3169    pub intercept_unrecoverable_exception: bool,
3170    pub intercept_page: bool,
3171    pub intercept_restore_partition_time: bool,
3172    /// The hypervisor will send all unmapped GPA intercepts to VTL2 rather than
3173    /// the host.
3174    pub intercept_not_present: bool,
3175    pub intercept_system_reset: bool,
3176    #[bits(48)]
3177    pub reserved: u64,
3178}
3179
3180#[bitfield(u64)]
3181pub struct HvRegisterVsmPartitionStatus {
3182    #[bits(16)]
3183    pub enabled_vtl_set: u16,
3184    #[bits(4)]
3185    pub maximum_vtl: u8,
3186    #[bits(16)]
3187    pub mbec_enabled_vtl_set: u16,
3188    #[bits(4)]
3189    pub supervisor_shadow_stack_enabled_vtl_set: u8,
3190    #[bits(24)]
3191    pub reserved: u64,
3192}
3193
3194#[bitfield(u64)]
3195pub struct HvRegisterGuestVsmPartitionConfig {
3196    #[bits(4)]
3197    pub maximum_vtl: u8,
3198    #[bits(60)]
3199    pub reserved: u64,
3200}
3201
3202#[bitfield(u64)]
3203pub struct HvRegisterVsmVpStatus {
3204    #[bits(4)]
3205    pub active_vtl: u8,
3206    pub active_mbec_enabled: bool,
3207    #[bits(11)]
3208    pub reserved_mbz0: u16,
3209    #[bits(16)]
3210    pub enabled_vtl_set: u16,
3211    #[bits(32)]
3212    pub reserved_mbz1: u32,
3213}
3214
3215#[bitfield(u64)]
3216pub struct HvRegisterVsmCodePageOffsets {
3217    #[bits(12)]
3218    pub call_offset: u16,
3219    #[bits(12)]
3220    pub return_offset: u16,
3221    #[bits(40)]
3222    pub reserved: u64,
3223}
3224
3225#[repr(C)]
3226#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3227pub struct HvStimerState {
3228    pub undelivered_message_pending: u32,
3229    pub reserved: u32,
3230    pub config: u64,
3231    pub count: u64,
3232    pub adjustment: u64,
3233    pub undelivered_expiration_time: u64,
3234}
3235
3236#[repr(C)]
3237#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3238pub struct HvSyntheticTimersState {
3239    pub timers: [HvStimerState; 4],
3240    pub reserved: [u64; 5],
3241}
3242
3243#[bitfield(u64)]
3244pub struct HvInternalActivityRegister {
3245    pub startup_suspend: bool,
3246    pub halt_suspend: bool,
3247    pub idle_suspend: bool,
3248    #[bits(61)]
3249    pub reserved: u64,
3250}
3251
3252#[bitfield(u64)]
3253pub struct HvSynicSint {
3254    pub vector: u8,
3255    _reserved: u8,
3256    pub masked: bool,
3257    pub auto_eoi: bool,
3258    pub polling: bool,
3259    _reserved2: bool,
3260    pub proxy: bool,
3261    #[bits(43)]
3262    _reserved2: u64,
3263}
3264
3265#[bitfield(u64)]
3266pub struct HvSynicScontrol {
3267    pub enabled: bool,
3268    #[bits(63)]
3269    _reserved: u64,
3270}
3271
3272#[bitfield(u64)]
3273pub struct HvSynicSimpSiefp {
3274    pub enabled: bool,
3275    #[bits(11)]
3276    _reserved: u64,
3277    #[bits(52)]
3278    pub base_gpn: u64,
3279}
3280
3281#[bitfield(u64)]
3282pub struct HvSynicStimerConfig {
3283    pub enabled: bool,
3284    pub periodic: bool,
3285    pub lazy: bool,
3286    pub auto_enable: bool,
3287    // Note: On ARM64 the top 3 bits of apic_vector are reserved.
3288    pub apic_vector: u8,
3289    pub direct_mode: bool,
3290    #[bits(3)]
3291    pub _reserved1: u8,
3292    #[bits(4)]
3293    pub sint: u8,
3294    #[bits(44)]
3295    pub _reserved2: u64,
3296}
3297
3298pub const HV_X64_PENDING_EVENT_EXCEPTION: u8 = 0;
3299pub const HV_X64_PENDING_EVENT_MEMORY_INTERCEPT: u8 = 1;
3300pub const HV_X64_PENDING_EVENT_NESTED_MEMORY_INTERCEPT: u8 = 2;
3301pub const HV_X64_PENDING_EVENT_VIRTUALIZATION_FAULT: u8 = 3;
3302pub const HV_X64_PENDING_EVENT_HYPERCALL_OUTPUT: u8 = 4;
3303pub const HV_X64_PENDING_EVENT_EXT_INT: u8 = 5;
3304pub const HV_X64_PENDING_EVENT_SHADOW_IPT: u8 = 6;
3305
3306// Provides information about an exception.
3307#[bitfield(u128)]
3308pub struct HvX64PendingExceptionEvent {
3309    pub event_pending: bool,
3310    #[bits(3)]
3311    pub event_type: u8,
3312    #[bits(4)]
3313    pub reserved0: u8,
3314
3315    pub deliver_error_code: bool,
3316    #[bits(7)]
3317    pub reserved1: u8,
3318    pub vector: u16,
3319    pub error_code: u32,
3320    pub exception_parameter: u64,
3321}
3322
3323/// Provides information about a virtualization fault.
3324#[bitfield(u128)]
3325pub struct HvX64PendingVirtualizationFaultEvent {
3326    pub event_pending: bool,
3327    #[bits(3)]
3328    pub event_type: u8,
3329    #[bits(4)]
3330    pub reserved0: u8,
3331
3332    pub reserved1: u8,
3333    pub parameter0: u16,
3334    pub code: u32,
3335    pub parameter1: u64,
3336}
3337
3338/// Part of [`HvX64PendingEventMemoryIntercept`]
3339#[bitfield(u8)]
3340#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3341pub struct HvX64PendingEventMemoryInterceptPendingEventHeader {
3342    pub event_pending: bool,
3343    #[bits(3)]
3344    pub event_type: u8,
3345    #[bits(4)]
3346    _reserved0: u8,
3347}
3348
3349/// Part of [`HvX64PendingEventMemoryIntercept`]
3350#[bitfield(u8)]
3351#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3352pub struct HvX64PendingEventMemoryInterceptAccessFlags {
3353    /// Indicates if the guest linear address is valid.
3354    pub guest_linear_address_valid: bool,
3355    /// Indicates that the memory intercept was caused by an access to a guest physical address
3356    /// (instead of a page table as part of a page table walk).
3357    pub caused_by_gpa_access: bool,
3358    #[bits(6)]
3359    _reserved1: u8,
3360}
3361
3362/// Provides information about a memory intercept.
3363#[repr(C)]
3364#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3365pub struct HvX64PendingEventMemoryIntercept {
3366    pub event_header: HvX64PendingEventMemoryInterceptPendingEventHeader,
3367    /// VTL at which the memory intercept is targeted.
3368    /// Note: This field must be in Reg0.
3369    pub target_vtl: u8,
3370    /// Type of the memory access.
3371    pub access_type: HvInterceptAccessType,
3372    pub access_flags: HvX64PendingEventMemoryInterceptAccessFlags,
3373    pub _reserved2: u32,
3374    /// The guest linear address that caused the fault.
3375    pub guest_linear_address: u64,
3376    /// The guest physical address that caused the memory intercept.
3377    pub guest_physical_address: u64,
3378    pub _reserved3: u64,
3379}
3380const_assert!(size_of::<HvX64PendingEventMemoryIntercept>() == 0x20);
3381
3382//
3383// Provides information about pending hypercall output.
3384//
3385#[bitfield(u128)]
3386pub struct HvX64PendingHypercallOutputEvent {
3387    pub event_pending: bool,
3388    #[bits(3)]
3389    pub event_type: u8,
3390    #[bits(4)]
3391    pub reserved0: u8,
3392
3393    // Whether the hypercall has been retired.
3394    pub retired: bool,
3395
3396    #[bits(23)]
3397    pub reserved1: u32,
3398
3399    // Indicates the number of bytes to be written starting from OutputGpa.
3400    pub output_size: u32,
3401
3402    // Indicates the output GPA, which is not required to be page-aligned.
3403    pub output_gpa: u64,
3404}
3405
3406// Provides information about a directly asserted ExtInt.
3407#[bitfield(u128)]
3408pub struct HvX64PendingExtIntEvent {
3409    pub event_pending: bool,
3410    #[bits(3)]
3411    pub event_type: u8,
3412    #[bits(4)]
3413    pub reserved0: u8,
3414    pub vector: u8,
3415    #[bits(48)]
3416    pub reserved1: u64,
3417    pub reserved2: u64,
3418}
3419
3420// Provides information about pending IPT shadowing.
3421#[bitfield(u128)]
3422pub struct HvX64PendingShadowIptEvent {
3423    pub event_pending: bool,
3424    #[bits(4)]
3425    pub event_type: u8,
3426    #[bits(59)]
3427    pub reserved0: u64,
3428
3429    pub reserved1: u64,
3430}
3431
3432#[bitfield(u128)]
3433#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3434pub struct HvX64PendingEventReg0 {
3435    pub event_pending: bool,
3436    #[bits(3)]
3437    pub event_type: u8,
3438    #[bits(4)]
3439    pub reserved: u8,
3440    #[bits(120)]
3441    pub data: u128,
3442}
3443
3444#[repr(C)]
3445#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3446pub struct HvX64PendingEvent {
3447    pub reg_0: HvX64PendingEventReg0,
3448    pub reg_1: AlignedU128,
3449}
3450const_assert!(size_of::<HvX64PendingEvent>() == 0x20);
3451
3452impl From<HvX64PendingExceptionEvent> for HvX64PendingEvent {
3453    fn from(exception_event: HvX64PendingExceptionEvent) -> Self {
3454        HvX64PendingEvent {
3455            reg_0: HvX64PendingEventReg0::from(u128::from(exception_event)),
3456            reg_1: 0u128.into(),
3457        }
3458    }
3459}
3460
3461#[bitfield(u64)]
3462#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3463pub struct HvX64PendingInterruptionRegister {
3464    pub interruption_pending: bool,
3465    #[bits(3)]
3466    pub interruption_type: u8,
3467    pub deliver_error_code: bool,
3468    #[bits(4)]
3469    pub instruction_length: u8,
3470    pub nested_event: bool,
3471    #[bits(6)]
3472    pub reserved: u8,
3473    pub interruption_vector: u16,
3474    pub error_code: u32,
3475}
3476
3477#[bitfield(u64)]
3478#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3479pub struct HvX64InterruptStateRegister {
3480    pub interrupt_shadow: bool,
3481    pub nmi_masked: bool,
3482    #[bits(62)]
3483    pub reserved: u64,
3484}
3485
3486#[bitfield(u64)]
3487#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3488pub struct HvInstructionEmulatorHintsRegister {
3489    /// Indicates whether any secure VTL is enabled for the partition.
3490    pub partition_secure_vtl_enabled: bool,
3491    /// Indicates whether kernel or user execute control architecturally
3492    /// applies to execute accesses.
3493    pub mbec_user_execute_control: bool,
3494    #[bits(62)]
3495    pub _padding: u64,
3496}
3497
3498open_enum! {
3499    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3500    pub enum HvAarch64PendingEventType: u8 {
3501        EXCEPTION = 0,
3502        SYNTHETIC_EXCEPTION = 1,
3503        HYPERCALL_OUTPUT = 2,
3504    }
3505}
3506
3507// Support for bitfield structures.
3508impl HvAarch64PendingEventType {
3509    const fn from_bits(val: u8) -> Self {
3510        HvAarch64PendingEventType(val)
3511    }
3512
3513    const fn into_bits(self) -> u8 {
3514        self.0
3515    }
3516}
3517
3518#[bitfield[u8]]
3519#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3520pub struct HvAarch64PendingEventHeader {
3521    #[bits(1)]
3522    pub event_pending: bool,
3523    #[bits(3)]
3524    pub event_type: HvAarch64PendingEventType,
3525    #[bits(4)]
3526    pub reserved: u8,
3527}
3528
3529#[repr(C)]
3530#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3531pub struct HvAarch64PendingExceptionEvent {
3532    pub header: HvAarch64PendingEventHeader,
3533    pub _padding: [u8; 7],
3534    pub syndrome: u64,
3535    pub fault_address: u64,
3536}
3537
3538#[bitfield[u8]]
3539#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3540pub struct HvAarch64PendingHypercallOutputEventFlags {
3541    #[bits(1)]
3542    pub retired: u8,
3543    #[bits(7)]
3544    pub reserved: u8,
3545}
3546
3547#[repr(C)]
3548#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3549pub struct HvAarch64PendingHypercallOutputEvent {
3550    pub header: HvAarch64PendingEventHeader,
3551    pub flags: HvAarch64PendingHypercallOutputEventFlags,
3552    pub reserved: u16,
3553    pub output_size: u32,
3554    pub output_gpa: u64,
3555}
3556
3557#[repr(C)]
3558#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3559pub struct HvAarch64PendingEvent {
3560    pub header: HvAarch64PendingEventHeader,
3561    pub event_data: [u8; 15],
3562    pub _padding: [u64; 2],
3563}
3564
3565#[bitfield(u32)]
3566#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3567pub struct HvMapGpaFlags {
3568    pub readable: bool,
3569    pub writable: bool,
3570    pub kernel_executable: bool,
3571    pub user_executable: bool,
3572    pub supervisor_shadow_stack: bool,
3573    pub paging_writability: bool,
3574    pub verify_paging_writability: bool,
3575    #[bits(8)]
3576    _padding0: u32,
3577    pub adjustable: bool,
3578    #[bits(16)]
3579    _padding1: u32,
3580}
3581
3582/// [`HvMapGpaFlags`] with no permissions set
3583pub const HV_MAP_GPA_PERMISSIONS_NONE: HvMapGpaFlags = HvMapGpaFlags::new();
3584pub const HV_MAP_GPA_PERMISSIONS_ALL: HvMapGpaFlags = HvMapGpaFlags::new()
3585    .with_readable(true)
3586    .with_writable(true)
3587    .with_kernel_executable(true)
3588    .with_user_executable(true);
3589
3590#[repr(C)]
3591#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3592pub struct HvMonitorPage {
3593    pub trigger_state: HvMonitorTriggerState,
3594    pub reserved1: u32,
3595    pub trigger_group: [HvMonitorTriggerGroup; 4],
3596    pub reserved2: [u64; 3],
3597    pub next_check_time: [[u32; 32]; 4],
3598    pub latency: [[u16; 32]; 4],
3599    pub reserved3: [u64; 32],
3600    pub parameter: [[HvMonitorParameter; 32]; 4],
3601    pub reserved4: [u8; 1984],
3602}
3603
3604#[repr(C)]
3605#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3606pub struct HvMonitorPageSmall {
3607    pub trigger_state: HvMonitorTriggerState,
3608    pub reserved1: u32,
3609    pub trigger_group: [HvMonitorTriggerGroup; 4],
3610}
3611
3612#[repr(C)]
3613#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3614pub struct HvMonitorTriggerGroup {
3615    pub pending: u32,
3616    pub armed: u32,
3617}
3618
3619#[repr(C)]
3620#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3621pub struct HvMonitorParameter {
3622    pub connection_id: u32,
3623    pub flag_number: u16,
3624    pub reserved: u16,
3625}
3626
3627#[bitfield(u32)]
3628#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3629pub struct HvMonitorTriggerState {
3630    #[bits(4)]
3631    pub group_enable: u32,
3632    #[bits(28)]
3633    pub reserved: u32,
3634}
3635
3636#[bitfield(u64)]
3637#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3638pub struct HvPmTimerInfo {
3639    #[bits(16)]
3640    pub port: u16,
3641    #[bits(1)]
3642    pub width_24: bool,
3643    #[bits(1)]
3644    pub enabled: bool,
3645    #[bits(14)]
3646    pub reserved1: u32,
3647    #[bits(32)]
3648    pub reserved2: u32,
3649}
3650
3651#[bitfield(u64)]
3652#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3653pub struct HvX64RegisterSevControl {
3654    pub enable_encrypted_state: bool,
3655    #[bits(11)]
3656    _rsvd1: u64,
3657    #[bits(52)]
3658    pub vmsa_gpa_page_number: u64,
3659}
3660
3661#[bitfield(u64)]
3662#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3663pub struct HvRegisterReferenceTsc {
3664    pub enable: bool,
3665    #[bits(11)]
3666    pub reserved_p: u64,
3667    #[bits(52)]
3668    pub gpn: u64,
3669}
3670
3671#[repr(C)]
3672#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3673pub struct HvReferenceTscPage {
3674    pub tsc_sequence: u32,
3675    pub reserved1: u32,
3676    pub tsc_scale: u64,
3677    pub tsc_offset: i64,
3678    pub timeline_bias: u64,
3679    pub tsc_multiplier: u64,
3680    pub reserved2: [u64; 507],
3681}
3682
3683pub const HV_REFERENCE_TSC_SEQUENCE_INVALID: u32 = 0;
3684
3685#[bitfield(u64)]
3686#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3687pub struct HvX64VmgexitInterceptMessageFlags {
3688    pub ghcb_page_valid: bool,
3689    pub ghcb_request_error: bool,
3690    #[bits(62)]
3691    _reserved: u64,
3692}
3693
3694#[repr(C)]
3695#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3696pub struct HvX64VmgexitInterceptMessageGhcbPageStandard {
3697    pub ghcb_protocol_version: u16,
3698    _reserved: [u16; 3],
3699    pub sw_exit_code: u64,
3700    pub sw_exit_info1: u64,
3701    pub sw_exit_info2: u64,
3702    pub sw_scratch: u64,
3703}
3704
3705#[repr(C)]
3706#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3707pub struct HvX64VmgexitInterceptMessageGhcbPage {
3708    pub ghcb_usage: u32,
3709    _reserved: u32,
3710    pub standard: HvX64VmgexitInterceptMessageGhcbPageStandard,
3711}
3712
3713#[repr(C)]
3714#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3715pub struct HvX64VmgexitInterceptMessage {
3716    pub header: HvX64InterceptMessageHeader,
3717    pub ghcb_msr: u64,
3718    pub flags: HvX64VmgexitInterceptMessageFlags,
3719    pub ghcb_page: HvX64VmgexitInterceptMessageGhcbPage,
3720}
3721
3722impl MessagePayload for HvX64VmgexitInterceptMessage {}
3723
3724#[bitfield(u64)]
3725pub struct HvRegisterVpAssistPage {
3726    pub enabled: bool,
3727    #[bits(11)]
3728    _reserved: u64,
3729    #[bits(52)]
3730    pub gpa_page_number: u64,
3731}
3732
3733#[bitfield(u32)]
3734#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3735pub struct HvX64RegisterPageDirtyFlags {
3736    pub general_purpose: bool,
3737    pub instruction_pointer: bool,
3738    pub xmm: bool,
3739    pub segments: bool,
3740    pub flags: bool,
3741    #[bits(27)]
3742    reserved: u32,
3743}
3744
3745#[repr(C)]
3746#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3747pub struct HvX64RegisterPage {
3748    pub version: u16,
3749    pub is_valid: u8,
3750    pub vtl: u8,
3751    pub dirty: HvX64RegisterPageDirtyFlags,
3752    pub gp_registers: [u64; 16],
3753    pub rip: u64,
3754    pub rflags: u64,
3755    pub reserved: u64,
3756    pub xmm: [u128; 6],
3757    pub segment: [u128; 6],
3758    // Misc. control registers (cannot be set via this interface).
3759    pub cr0: u64,
3760    pub cr3: u64,
3761    pub cr4: u64,
3762    pub cr8: u64,
3763    pub efer: u64,
3764    pub dr7: u64,
3765    pub pending_interruption: HvX64PendingInterruptionRegister,
3766    pub interrupt_state: HvX64InterruptStateRegister,
3767    pub instruction_emulation_hints: HvInstructionEmulatorHintsRegister,
3768    pub reserved_end: [u8; 3672],
3769}
3770
3771const _: () = assert!(size_of::<HvX64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3772
3773#[bitfield(u32)]
3774#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3775pub struct HvAarch64RegisterPageDirtyFlags {
3776    _unused: bool,
3777    pub instruction_pointer: bool,
3778    pub processor_state: bool,
3779    pub control_registers: bool,
3780    #[bits(28)]
3781    reserved: u32,
3782}
3783
3784#[repr(C)]
3785#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3786pub struct HvAarch64RegisterPage {
3787    pub version: u16,
3788    pub is_valid: u8,
3789    pub vtl: u8,
3790    pub dirty: HvAarch64RegisterPageDirtyFlags,
3791    // Reserved.
3792    pub _rsvd: [u64; 33],
3793    // Instruction pointer.
3794    pub pc: u64,
3795    // Processor state.
3796    pub cpsr: u64,
3797    // Control registers.
3798    pub sctlr_el1: u64,
3799    pub tcr_el1: u64,
3800    // Reserved.
3801    pub reserved_end: [u8; 3792],
3802}
3803
3804const _: () = assert!(size_of::<HvAarch64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3805
3806#[bitfield(u64)]
3807pub struct HvRegisterVsmWpWaitForTlbLock {
3808    pub wait: bool,
3809    #[bits(63)]
3810    _reserved: u64,
3811}
3812
3813#[bitfield(u64)]
3814pub struct HvRegisterVsmVpSecureVtlConfig {
3815    pub mbec_enabled: bool,
3816    pub tlb_locked: bool,
3817    pub supervisor_shadow_stack_enabled: bool,
3818    pub hardware_hvpt_enabled: bool,
3819    #[bits(60)]
3820    _reserved: u64,
3821}
3822
3823#[bitfield(u64)]
3824pub struct HvRegisterCrInterceptControl {
3825    pub cr0_write: bool,
3826    pub cr4_write: bool,
3827    pub xcr0_write: bool,
3828    pub ia32_misc_enable_read: bool,
3829    pub ia32_misc_enable_write: bool,
3830    pub msr_lstar_read: bool,
3831    pub msr_lstar_write: bool,
3832    pub msr_star_read: bool,
3833    pub msr_star_write: bool,
3834    pub msr_cstar_read: bool,
3835    pub msr_cstar_write: bool,
3836    pub apic_base_msr_read: bool,
3837    pub apic_base_msr_write: bool,
3838    pub msr_efer_read: bool,
3839    pub msr_efer_write: bool,
3840    pub gdtr_write: bool,
3841    pub idtr_write: bool,
3842    pub ldtr_write: bool,
3843    pub tr_write: bool,
3844    pub msr_sysenter_cs_write: bool,
3845    pub msr_sysenter_eip_write: bool,
3846    pub msr_sysenter_esp_write: bool,
3847    pub msr_sfmask_write: bool,
3848    pub msr_tsc_aux_write: bool,
3849    pub msr_sgx_launch_control_write: bool,
3850    pub msr_xss_write: bool,
3851    pub msr_scet_write: bool,
3852    pub msr_pls_ssp_write: bool,
3853    pub msr_interrupt_ssp_table_addr_write: bool,
3854    #[bits(35)]
3855    _rsvd_z: u64,
3856}