hvdef/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Microsoft hypervisor definitions.
5
6#![expect(missing_docs)]
7#![no_std]
8
9use bitfield_struct::bitfield;
10use core::fmt::Debug;
11use core::mem::size_of;
12use open_enum::open_enum;
13use static_assertions::const_assert;
14use zerocopy::FromBytes;
15use zerocopy::FromZeros;
16use zerocopy::Immutable;
17use zerocopy::IntoBytes;
18use zerocopy::KnownLayout;
19
20pub const HV_PAGE_SIZE: u64 = 4096;
21pub const HV_PAGE_SIZE_USIZE: usize = 4096;
22pub const HV_PAGE_SHIFT: u64 = 12;
23
24pub const HV_PARTITION_ID_SELF: u64 = u64::MAX;
25pub const HV_VP_INDEX_SELF: u32 = 0xfffffffe;
26
27pub const HV_CPUID_FUNCTION_VERSION_AND_FEATURES: u32 = 0x00000001;
28pub const HV_CPUID_FUNCTION_HV_VENDOR_AND_MAX_FUNCTION: u32 = 0x40000000;
29pub const HV_CPUID_FUNCTION_HV_INTERFACE: u32 = 0x40000001;
30pub const HV_CPUID_FUNCTION_MS_HV_VERSION: u32 = 0x40000002;
31pub const HV_CPUID_FUNCTION_MS_HV_FEATURES: u32 = 0x40000003;
32pub const HV_CPUID_FUNCTION_MS_HV_ENLIGHTENMENT_INFORMATION: u32 = 0x40000004;
33pub const HV_CPUID_FUNCTION_MS_HV_IMPLEMENTATION_LIMITS: u32 = 0x40000005;
34pub const HV_CPUID_FUNCTION_MS_HV_HARDWARE_FEATURES: u32 = 0x40000006;
35pub const HV_CPUID_FUNCTION_MS_HV_ISOLATION_CONFIGURATION: u32 = 0x4000000C;
36
37pub const VIRTUALIZATION_STACK_CPUID_VENDOR: u32 = 0x40000080;
38pub const VIRTUALIZATION_STACK_CPUID_INTERFACE: u32 = 0x40000081;
39pub const VIRTUALIZATION_STACK_CPUID_PROPERTIES: u32 = 0x40000082;
40
41/// The result of querying the VIRTUALIZATION_STACK_CPUID_PROPERTIES leaf.
42///
43/// The current partition is considered "portable": the virtualization stack may
44/// attempt to bring up the partition on another physical machine.
45pub const VS1_PARTITION_PROPERTIES_EAX_IS_PORTABLE: u32 = 0x000000001;
46/// The current partition has a synthetic debug device available to it.
47pub const VS1_PARTITION_PROPERTIES_EAX_DEBUG_DEVICE_PRESENT: u32 = 0x000000002;
48/// Extended I/O APIC RTEs are supported for the current partition.
49pub const VS1_PARTITION_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE: u32 = 0x000000004;
50
51#[bitfield(u64)]
52pub struct HvPartitionPrivilege {
53    // access to virtual msrs
54    pub access_vp_runtime_msr: bool,
55    pub access_partition_reference_counter: bool,
56    pub access_synic_msrs: bool,
57    pub access_synthetic_timer_msrs: bool,
58    pub access_apic_msrs: bool,
59    pub access_hypercall_msrs: bool,
60    pub access_vp_index: bool,
61    pub access_reset_msr: bool,
62    pub access_stats_msr: bool,
63    pub access_partition_reference_tsc: bool,
64    pub access_guest_idle_msr: bool,
65    pub access_frequency_msrs: bool,
66    pub access_debug_msrs: bool,
67    pub access_reenlightenment_ctrls: bool,
68    pub access_root_scheduler_msr: bool,
69    pub access_tsc_invariant_controls: bool,
70    _reserved1: u16,
71
72    // Access to hypercalls
73    pub create_partitions: bool,
74    pub access_partition_id: bool,
75    pub access_memory_pool: bool,
76    pub adjust_message_buffers: bool,
77    pub post_messages: bool,
78    pub signal_events: bool,
79    pub create_port: bool,
80    pub connect_port: bool,
81    pub access_stats: bool,
82    #[bits(2)]
83    _reserved2: u64,
84    pub debugging: bool,
85    pub cpu_management: bool,
86    pub configure_profiler: bool,
87    pub access_vp_exit_tracing: bool,
88    pub enable_extended_gva_ranges_flush_va_list: bool,
89    pub access_vsm: bool,
90    pub access_vp_registers: bool,
91    _unused_bit: bool,
92    pub fast_hypercall_output: bool,
93    pub enable_extended_hypercalls: bool,
94    pub start_virtual_processor: bool,
95    pub isolation: bool,
96    #[bits(9)]
97    _reserved3: u64,
98}
99
100open_enum! {
101    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
102    pub enum HvPartitionIsolationType: u8 {
103        NONE = 0,
104        VBS = 1,
105        SNP = 2,
106        TDX = 3,
107    }
108}
109
110#[bitfield(u128)]
111#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
112pub struct HvFeatures {
113    #[bits(64)]
114    pub privileges: HvPartitionPrivilege,
115
116    #[bits(4)]
117    pub max_supported_cstate: u32,
118    pub hpet_needed_for_c3_power_state_deprecated: bool,
119    pub invariant_mperf_available: bool,
120    pub supervisor_shadow_stack_available: bool,
121    pub arch_pmu_available: bool,
122    pub exception_trap_intercept_available: bool,
123    #[bits(23)]
124    reserved: u32,
125
126    pub mwait_available_deprecated: bool,
127    pub guest_debugging_available: bool,
128    pub performance_monitors_available: bool,
129    pub cpu_dynamic_partitioning_available: bool,
130    pub xmm_registers_for_fast_hypercall_available: bool,
131    pub guest_idle_available: bool,
132    pub hypervisor_sleep_state_support_available: bool,
133    pub numa_distance_query_available: bool,
134    pub frequency_regs_available: bool,
135    pub synthetic_machine_check_available: bool,
136    pub guest_crash_regs_available: bool,
137    pub debug_regs_available: bool,
138    pub npiep1_available: bool,
139    pub disable_hypervisor_available: bool,
140    pub extended_gva_ranges_for_flush_virtual_address_list_available: bool,
141    pub fast_hypercall_output_available: bool,
142    pub svm_features_available: bool,
143    pub sint_polling_mode_available: bool,
144    pub hypercall_msr_lock_available: bool,
145    pub direct_synthetic_timers: bool,
146    pub register_pat_available: bool,
147    pub register_bndcfgs_available: bool,
148    pub watchdog_timer_available: bool,
149    pub synthetic_time_unhalted_timer_available: bool,
150    pub device_domains_available: bool,    // HDK only.
151    pub s1_device_domains_available: bool, // HDK only.
152    pub lbr_available: bool,
153    pub ipt_available: bool,
154    pub cross_vtl_flush_available: bool,
155    pub idle_spec_ctrl_available: bool,
156    pub translate_gva_flags_available: bool,
157    pub apic_eoi_intercept_available: bool,
158}
159
160impl HvFeatures {
161    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
162        zerocopy::transmute!(cpuid)
163    }
164
165    pub fn into_cpuid(self) -> [u32; 4] {
166        zerocopy::transmute!(self)
167    }
168}
169
170#[bitfield(u128)]
171pub struct HvEnlightenmentInformation {
172    pub use_hypercall_for_address_space_switch: bool,
173    pub use_hypercall_for_local_flush: bool,
174    pub use_hypercall_for_remote_flush_and_local_flush_entire: bool,
175    pub use_apic_msrs: bool,
176    pub use_hv_register_for_reset: bool,
177    pub use_relaxed_timing: bool,
178    pub use_dma_remapping_deprecated: bool,
179    pub use_interrupt_remapping_deprecated: bool,
180    pub use_x2_apic_msrs: bool,
181    pub deprecate_auto_eoi: bool,
182    pub use_synthetic_cluster_ipi: bool,
183    pub use_ex_processor_masks: bool,
184    pub nested: bool,
185    pub use_int_for_mbec_system_calls: bool,
186    pub use_vmcs_enlightenments: bool,
187    pub use_synced_timeline: bool,
188    pub core_scheduler_requested: bool,
189    pub use_direct_local_flush_entire: bool,
190    pub no_non_architectural_core_sharing: bool,
191    pub use_x2_apic: bool,
192    pub restore_time_on_resume: bool,
193    pub use_hypercall_for_mmio_access: bool,
194    pub use_gpa_pinning_hypercall: bool,
195    pub wake_vps: bool,
196    _reserved: u8,
197    pub long_spin_wait_count: u32,
198    #[bits(7)]
199    pub implemented_physical_address_bits: u32,
200    #[bits(25)]
201    _reserved1: u32,
202    _reserved2: u32,
203}
204
205#[bitfield(u128)]
206pub struct HvHardwareFeatures {
207    pub apic_overlay_assist_in_use: bool,
208    pub msr_bitmaps_in_use: bool,
209    pub architectural_performance_counters_in_use: bool,
210    pub second_level_address_translation_in_use: bool,
211    pub dma_remapping_in_use: bool,
212    pub interrupt_remapping_in_use: bool,
213    pub memory_patrol_scrubber_present: bool,
214    pub dma_protection_in_use: bool,
215    pub hpet_requested: bool,
216    pub synthetic_timers_volatile: bool,
217    #[bits(4)]
218    pub hypervisor_level: u32,
219    pub physical_destination_mode_required: bool,
220    pub use_vmfunc_for_alias_map_switch: bool,
221    pub hv_register_for_memory_zeroing_supported: bool,
222    pub unrestricted_guest_supported: bool,
223    pub rdt_afeatures_supported: bool,
224    pub rdt_mfeatures_supported: bool,
225    pub child_perfmon_pmu_supported: bool,
226    pub child_perfmon_lbr_supported: bool,
227    pub child_perfmon_ipt_supported: bool,
228    pub apic_emulation_supported: bool,
229    pub child_x2_apic_recommended: bool,
230    pub hardware_watchdog_reserved: bool,
231    pub device_access_tracking_supported: bool,
232    pub hardware_gpa_access_tracking_supported: bool,
233    #[bits(4)]
234    _reserved: u32,
235
236    pub device_domain_input_width: u8,
237    #[bits(24)]
238    _reserved1: u32,
239    _reserved2: u32,
240    _reserved3: u32,
241}
242
243#[bitfield(u128)]
244pub struct HvIsolationConfiguration {
245    pub paravisor_present: bool,
246    #[bits(31)]
247    pub _reserved0: u32,
248
249    #[bits(4)]
250    pub isolation_type: u8,
251    _reserved11: bool,
252    pub shared_gpa_boundary_active: bool,
253    #[bits(6)]
254    pub shared_gpa_boundary_bits: u8,
255    #[bits(20)]
256    _reserved12: u32,
257    _reserved2: u32,
258    _reserved3: u32,
259}
260
261open_enum! {
262    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
263    pub enum HypercallCode: u16 {
264        #![allow(non_upper_case_globals)]
265
266        HvCallSwitchVirtualAddressSpace = 0x0001,
267        HvCallFlushVirtualAddressSpace = 0x0002,
268        HvCallFlushVirtualAddressList = 0x0003,
269        HvCallNotifyLongSpinWait = 0x0008,
270        HvCallSendSyntheticClusterIpi = 0x000b,
271        HvCallModifyVtlProtectionMask = 0x000c,
272        HvCallEnablePartitionVtl = 0x000d,
273        HvCallEnableVpVtl = 0x000f,
274        HvCallVtlCall = 0x0011,
275        HvCallVtlReturn = 0x0012,
276        HvCallFlushVirtualAddressSpaceEx = 0x0013,
277        HvCallFlushVirtualAddressListEx = 0x0014,
278        HvCallSendSyntheticClusterIpiEx = 0x0015,
279        HvCallInstallIntercept = 0x004d,
280        HvCallGetVpRegisters = 0x0050,
281        HvCallSetVpRegisters = 0x0051,
282        HvCallTranslateVirtualAddress = 0x0052,
283        HvCallPostMessage = 0x005C,
284        HvCallSignalEvent = 0x005D,
285        HvCallOutputDebugCharacter = 0x0071,
286        HvCallRetargetDeviceInterrupt = 0x007e,
287        HvCallAssertVirtualInterrupt = 0x0094,
288        HvCallStartVirtualProcessor = 0x0099,
289        HvCallGetVpIndexFromApicId = 0x009A,
290        HvCallTranslateVirtualAddressEx = 0x00AC,
291        HvCallCheckForIoIntercept = 0x00ad,
292        HvCallFlushGuestPhysicalAddressSpace = 0x00AF,
293        HvCallFlushGuestPhysicalAddressList = 0x00B0,
294        HvCallSignalEventDirect = 0x00C0,
295        HvCallPostMessageDirect = 0x00C1,
296        HvCallCheckSparseGpaPageVtlAccess = 0x00D4,
297        HvCallAcceptGpaPages = 0x00D9,
298        HvCallModifySparseGpaPageHostVisibility = 0x00DB,
299        HvCallMemoryMappedIoRead = 0x0106,
300        HvCallMemoryMappedIoWrite = 0x0107,
301        HvCallPinGpaPageRanges = 0x0112,
302        HvCallUnpinGpaPageRanges = 0x0113,
303        HvCallQuerySparseGpaPageHostVisibility = 0x011C,
304
305        // Extended hypercalls.
306        HvExtCallQueryCapabilities = 0x8001,
307    }
308}
309
310pub const HV_X64_MSR_GUEST_OS_ID: u32 = 0x40000000;
311pub const HV_X64_MSR_HYPERCALL: u32 = 0x40000001;
312pub const HV_X64_MSR_VP_INDEX: u32 = 0x40000002;
313pub const HV_X64_MSR_TIME_REF_COUNT: u32 = 0x40000020;
314pub const HV_X64_MSR_REFERENCE_TSC: u32 = 0x40000021;
315pub const HV_X64_MSR_TSC_FREQUENCY: u32 = 0x40000022;
316pub const HV_X64_MSR_APIC_FREQUENCY: u32 = 0x40000023;
317pub const HV_X64_MSR_EOI: u32 = 0x40000070;
318pub const HV_X64_MSR_ICR: u32 = 0x40000071;
319pub const HV_X64_MSR_TPR: u32 = 0x40000072;
320pub const HV_X64_MSR_VP_ASSIST_PAGE: u32 = 0x40000073;
321pub const HV_X64_MSR_SCONTROL: u32 = 0x40000080;
322pub const HV_X64_MSR_SVERSION: u32 = 0x40000081;
323pub const HV_X64_MSR_SIEFP: u32 = 0x40000082;
324pub const HV_X64_MSR_SIMP: u32 = 0x40000083;
325pub const HV_X64_MSR_EOM: u32 = 0x40000084;
326pub const HV_X64_MSR_SINT0: u32 = 0x40000090;
327pub const HV_X64_MSR_SINT1: u32 = 0x40000091;
328pub const HV_X64_MSR_SINT2: u32 = 0x40000092;
329pub const HV_X64_MSR_SINT3: u32 = 0x40000093;
330pub const HV_X64_MSR_SINT4: u32 = 0x40000094;
331pub const HV_X64_MSR_SINT5: u32 = 0x40000095;
332pub const HV_X64_MSR_SINT6: u32 = 0x40000096;
333pub const HV_X64_MSR_SINT7: u32 = 0x40000097;
334pub const HV_X64_MSR_SINT8: u32 = 0x40000098;
335pub const HV_X64_MSR_SINT9: u32 = 0x40000099;
336pub const HV_X64_MSR_SINT10: u32 = 0x4000009a;
337pub const HV_X64_MSR_SINT11: u32 = 0x4000009b;
338pub const HV_X64_MSR_SINT12: u32 = 0x4000009c;
339pub const HV_X64_MSR_SINT13: u32 = 0x4000009d;
340pub const HV_X64_MSR_SINT14: u32 = 0x4000009e;
341pub const HV_X64_MSR_SINT15: u32 = 0x4000009f;
342pub const HV_X64_MSR_STIMER0_CONFIG: u32 = 0x400000b0;
343pub const HV_X64_MSR_STIMER0_COUNT: u32 = 0x400000b1;
344pub const HV_X64_MSR_STIMER1_CONFIG: u32 = 0x400000b2;
345pub const HV_X64_MSR_STIMER1_COUNT: u32 = 0x400000b3;
346pub const HV_X64_MSR_STIMER2_CONFIG: u32 = 0x400000b4;
347pub const HV_X64_MSR_STIMER2_COUNT: u32 = 0x400000b5;
348pub const HV_X64_MSR_STIMER3_CONFIG: u32 = 0x400000b6;
349pub const HV_X64_MSR_STIMER3_COUNT: u32 = 0x400000b7;
350pub const HV_X64_MSR_GUEST_IDLE: u32 = 0x400000F0;
351pub const HV_X64_MSR_GUEST_CRASH_P0: u32 = 0x40000100;
352pub const HV_X64_MSR_GUEST_CRASH_P1: u32 = 0x40000101;
353pub const HV_X64_MSR_GUEST_CRASH_P2: u32 = 0x40000102;
354pub const HV_X64_MSR_GUEST_CRASH_P3: u32 = 0x40000103;
355pub const HV_X64_MSR_GUEST_CRASH_P4: u32 = 0x40000104;
356pub const HV_X64_MSR_GUEST_CRASH_CTL: u32 = 0x40000105;
357
358pub const HV_X64_GUEST_CRASH_PARAMETER_MSRS: usize = 5;
359
360/// A hypervisor status code.
361///
362/// The non-success status codes are defined in [`HvError`].
363#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
364#[repr(transparent)]
365pub struct HvStatus(pub u16);
366
367impl HvStatus {
368    /// The success status code.
369    pub const SUCCESS: Self = Self(0);
370
371    /// Returns `Ok(())` if this is `HvStatus::SUCCESS`, otherwise returns an
372    /// `Err(err)` where `err` is the corresponding `HvError`.
373    pub fn result(self) -> HvResult<()> {
374        if let Ok(err) = self.0.try_into() {
375            Err(HvError(err))
376        } else {
377            Ok(())
378        }
379    }
380
381    /// Returns true if this is `HvStatus::SUCCESS`.
382    pub fn is_ok(self) -> bool {
383        self == Self::SUCCESS
384    }
385
386    /// Returns true if this is not `HvStatus::SUCCESS`.
387    pub fn is_err(self) -> bool {
388        self != Self::SUCCESS
389    }
390
391    const fn from_bits(bits: u16) -> Self {
392        Self(bits)
393    }
394
395    const fn into_bits(self) -> u16 {
396        self.0
397    }
398}
399
400impl From<Result<(), HvError>> for HvStatus {
401    fn from(err: Result<(), HvError>) -> Self {
402        err.err().map_or(Self::SUCCESS, |err| Self(err.0.get()))
403    }
404}
405
406impl Debug for HvStatus {
407    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
408        match self.result() {
409            Ok(()) => f.write_str("Success"),
410            Err(err) => Debug::fmt(&err, f),
411        }
412    }
413}
414
415/// An [`HvStatus`] value representing an error.
416//
417// DEVNOTE: use `NonZeroU16` to get a niche optimization, since 0 is reserved
418// for success.
419#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout)]
420#[repr(transparent)]
421pub struct HvError(core::num::NonZeroU16);
422
423impl From<core::num::NonZeroU16> for HvError {
424    fn from(err: core::num::NonZeroU16) -> Self {
425        Self(err)
426    }
427}
428
429impl Debug for HvError {
430    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
431        match self.debug_name() {
432            Some(name) => f.pad(name),
433            None => Debug::fmt(&self.0.get(), f),
434        }
435    }
436}
437
438impl core::fmt::Display for HvError {
439    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
440        match self.doc_str() {
441            Some(s) => f.write_str(s),
442            None => write!(f, "Hypervisor error {:#06x}", self.0),
443        }
444    }
445}
446
447impl core::error::Error for HvError {}
448
449macro_rules! hv_error {
450    ($ty:ty, $(#[doc = $doc:expr] $ident:ident = $val:expr),* $(,)?) => {
451
452        #[allow(non_upper_case_globals)]
453        impl $ty {
454            $(
455                #[doc = $doc]
456                pub const $ident: Self = Self(core::num::NonZeroU16::new($val).unwrap());
457            )*
458
459            fn debug_name(&self) -> Option<&'static str> {
460                Some(match self.0.get() {
461                    $(
462                        $val => stringify!($ident),
463                    )*
464                    _ => return None,
465                })
466            }
467
468            fn doc_str(&self) -> Option<&'static str> {
469                Some(match self.0.get() {
470                    $(
471                        $val => $doc,
472                    )*
473                    _ => return None,
474                })
475            }
476        }
477    };
478}
479
480// DEVNOTE: the doc comments here are also used as the runtime error strings.
481hv_error! {
482    HvError,
483    /// Invalid hypercall code
484    InvalidHypercallCode = 0x0002,
485    /// Invalid hypercall input
486    InvalidHypercallInput = 0x0003,
487    /// Invalid alignment
488    InvalidAlignment = 0x0004,
489    /// Invalid parameter
490    InvalidParameter = 0x0005,
491    /// Access denied
492    AccessDenied = 0x0006,
493    /// Invalid partition state
494    InvalidPartitionState = 0x0007,
495    /// Operation denied
496    OperationDenied = 0x0008,
497    /// Unknown property
498    UnknownProperty = 0x0009,
499    /// Property value out of range
500    PropertyValueOutOfRange = 0x000A,
501    /// Insufficient memory
502    InsufficientMemory = 0x000B,
503    /// Partition too deep
504    PartitionTooDeep = 0x000C,
505    /// Invalid partition ID
506    InvalidPartitionId = 0x000D,
507    /// Invalid VP index
508    InvalidVpIndex = 0x000E,
509    /// Not found
510    NotFound = 0x0010,
511    /// Invalid port ID
512    InvalidPortId = 0x0011,
513    /// Invalid connection ID
514    InvalidConnectionId = 0x0012,
515    /// Insufficient buffers
516    InsufficientBuffers = 0x0013,
517    /// Not acknowledged
518    NotAcknowledged = 0x0014,
519    /// Invalid VP state
520    InvalidVpState = 0x0015,
521    /// Acknowledged
522    Acknowledged = 0x0016,
523    /// Invalid save restore state
524    InvalidSaveRestoreState = 0x0017,
525    /// Invalid SynIC state
526    InvalidSynicState = 0x0018,
527    /// Object in use
528    ObjectInUse = 0x0019,
529    /// Invalid proximity domain info
530    InvalidProximityDomainInfo = 0x001A,
531    /// No data
532    NoData = 0x001B,
533    /// Inactive
534    Inactive = 0x001C,
535    /// No resources
536    NoResources = 0x001D,
537    /// Feature unavailable
538    FeatureUnavailable = 0x001E,
539    /// Partial packet
540    PartialPacket = 0x001F,
541    /// Processor feature not supported
542    ProcessorFeatureNotSupported = 0x0020,
543    /// Processor cache line flush size incompatible
544    ProcessorCacheLineFlushSizeIncompatible = 0x0030,
545    /// Insufficient buffer
546    InsufficientBuffer = 0x0033,
547    /// Incompatible processor
548    IncompatibleProcessor = 0x0037,
549    /// Insufficient device domains
550    InsufficientDeviceDomains = 0x0038,
551    /// CPUID feature validation error
552    CpuidFeatureValidationError = 0x003C,
553    /// CPUID XSAVE feature validation error
554    CpuidXsaveFeatureValidationError = 0x003D,
555    /// Processor startup timeout
556    ProcessorStartupTimeout = 0x003E,
557    /// SMX enabled
558    SmxEnabled = 0x003F,
559    /// Invalid LP index
560    InvalidLpIndex = 0x0041,
561    /// Invalid register value
562    InvalidRegisterValue = 0x0050,
563    /// Invalid VTL state
564    InvalidVtlState = 0x0051,
565    /// NX not detected
566    NxNotDetected = 0x0055,
567    /// Invalid device ID
568    InvalidDeviceId = 0x0057,
569    /// Invalid device state
570    InvalidDeviceState = 0x0058,
571    /// Pending page requests
572    PendingPageRequests = 0x0059,
573    /// Page request invalid
574    PageRequestInvalid = 0x0060,
575    /// Key already exists
576    KeyAlreadyExists = 0x0065,
577    /// Device already in domain
578    DeviceAlreadyInDomain = 0x0066,
579    /// Invalid CPU group ID
580    InvalidCpuGroupId = 0x006F,
581    /// Invalid CPU group state
582    InvalidCpuGroupState = 0x0070,
583    /// Operation failed
584    OperationFailed = 0x0071,
585    /// Not allowed with nested virtualization active
586    NotAllowedWithNestedVirtActive = 0x0072,
587    /// Insufficient root memory
588    InsufficientRootMemory = 0x0073,
589    /// Event buffer already freed
590    EventBufferAlreadyFreed = 0x0074,
591    /// The specified timeout expired before the operation completed.
592    Timeout = 0x0078,
593    /// The VTL specified for the operation is already in an enabled state.
594    VtlAlreadyEnabled = 0x0086,
595    /// Unknown register name
596    UnknownRegisterName = 0x0087,
597}
598
599/// A useful result type for hypervisor operations.
600pub type HvResult<T> = Result<T, HvError>;
601
602#[repr(u8)]
603#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
604pub enum Vtl {
605    Vtl0 = 0,
606    Vtl1 = 1,
607    Vtl2 = 2,
608}
609
610impl TryFrom<u8> for Vtl {
611    type Error = HvError;
612
613    fn try_from(value: u8) -> Result<Self, Self::Error> {
614        Ok(match value {
615            0 => Self::Vtl0,
616            1 => Self::Vtl1,
617            2 => Self::Vtl2,
618            _ => return Err(HvError::InvalidParameter),
619        })
620    }
621}
622
623impl From<Vtl> for u8 {
624    fn from(value: Vtl) -> Self {
625        value as u8
626    }
627}
628
629/// The contents of `HV_X64_MSR_GUEST_CRASH_CTL`
630#[bitfield(u64)]
631pub struct GuestCrashCtl {
632    #[bits(58)]
633    _reserved: u64,
634    // ID of the pre-OS environment
635    #[bits(3)]
636    pub pre_os_id: u8,
637    // Crash dump will not be captured
638    #[bits(1)]
639    pub no_crash_dump: bool,
640    // `HV_X64_MSR_GUEST_CRASH_P3` is the GPA of the message,
641    // `HV_X64_MSR_GUEST_CRASH_P4` is its length in bytes
642    #[bits(1)]
643    pub crash_message: bool,
644    // Log contents of crash parameter system registers
645    #[bits(1)]
646    pub crash_notify: bool,
647}
648
649#[repr(C, align(16))]
650#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
651pub struct AlignedU128([u8; 16]);
652
653impl AlignedU128 {
654    pub fn as_ne_bytes(&self) -> [u8; 16] {
655        self.0
656    }
657
658    pub fn from_ne_bytes(val: [u8; 16]) -> Self {
659        Self(val)
660    }
661}
662
663impl Debug for AlignedU128 {
664    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
665        Debug::fmt(&u128::from_ne_bytes(self.0), f)
666    }
667}
668
669impl From<u128> for AlignedU128 {
670    fn from(v: u128) -> Self {
671        Self(v.to_ne_bytes())
672    }
673}
674
675impl From<u64> for AlignedU128 {
676    fn from(v: u64) -> Self {
677        (v as u128).into()
678    }
679}
680
681impl From<u32> for AlignedU128 {
682    fn from(v: u32) -> Self {
683        (v as u128).into()
684    }
685}
686
687impl From<u16> for AlignedU128 {
688    fn from(v: u16) -> Self {
689        (v as u128).into()
690    }
691}
692
693impl From<u8> for AlignedU128 {
694    fn from(v: u8) -> Self {
695        (v as u128).into()
696    }
697}
698
699impl From<AlignedU128> for u128 {
700    fn from(v: AlignedU128) -> Self {
701        u128::from_ne_bytes(v.0)
702    }
703}
704
705open_enum! {
706    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
707    pub enum HvMessageType: u32 {
708        #![allow(non_upper_case_globals)]
709
710        HvMessageTypeNone = 0x00000000,
711
712        HvMessageTypeUnmappedGpa = 0x80000000,
713        HvMessageTypeGpaIntercept = 0x80000001,
714        HvMessageTypeUnacceptedGpa = 0x80000003,
715        HvMessageTypeGpaAttributeIntercept = 0x80000004,
716        HvMessageTypeEnablePartitionVtlIntercept = 0x80000005,
717        HvMessageTypeTimerExpired = 0x80000010,
718        HvMessageTypeInvalidVpRegisterValue = 0x80000020,
719        HvMessageTypeUnrecoverableException = 0x80000021,
720        HvMessageTypeUnsupportedFeature = 0x80000022,
721        HvMessageTypeTlbPageSizeMismatch = 0x80000023,
722        HvMessageTypeIommuFault = 0x80000024,
723        HvMessageTypeEventLogBufferComplete = 0x80000040,
724        HvMessageTypeHypercallIntercept = 0x80000050,
725        HvMessageTypeSynicEventIntercept = 0x80000060,
726        HvMessageTypeSynicSintIntercept = 0x80000061,
727        HvMessageTypeSynicSintDeliverable = 0x80000062,
728        HvMessageTypeAsyncCallCompletion = 0x80000070,
729        HvMessageTypeX64IoPortIntercept = 0x80010000,
730        HvMessageTypeMsrIntercept = 0x80010001,
731        HvMessageTypeX64CpuidIntercept = 0x80010002,
732        HvMessageTypeExceptionIntercept = 0x80010003,
733        HvMessageTypeX64ApicEoi = 0x80010004,
734        HvMessageTypeX64IommuPrq = 0x80010005,
735        HvMessageTypeRegisterIntercept = 0x80010006,
736        HvMessageTypeX64Halt = 0x80010007,
737        HvMessageTypeX64InterruptionDeliverable = 0x80010008,
738        HvMessageTypeX64SipiIntercept = 0x80010009,
739        HvMessageTypeX64RdtscIntercept = 0x8001000a,
740        HvMessageTypeX64ApicSmiIntercept = 0x8001000b,
741        HvMessageTypeArm64ResetIntercept = 0x8001000c,
742        HvMessageTypeX64ApicInitSipiIntercept = 0x8001000d,
743        HvMessageTypeX64ApicWriteIntercept = 0x8001000e,
744        HvMessageTypeX64ProxyInterruptIntercept = 0x8001000f,
745        HvMessageTypeX64IsolationCtrlRegIntercept = 0x80010010,
746        HvMessageTypeX64SnpGuestRequestIntercept = 0x80010011,
747        HvMessageTypeX64ExceptionTrapIntercept = 0x80010012,
748        HvMessageTypeX64SevVmgexitIntercept = 0x80010013,
749    }
750}
751
752impl Default for HvMessageType {
753    fn default() -> Self {
754        HvMessageType::HvMessageTypeNone
755    }
756}
757
758pub const HV_SYNIC_INTERCEPTION_SINT_INDEX: u8 = 0;
759
760pub const NUM_SINTS: usize = 16;
761pub const NUM_TIMERS: usize = 4;
762
763#[repr(C)]
764#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
765pub struct HvMessageHeader {
766    pub typ: HvMessageType,
767    pub len: u8,
768    pub flags: HvMessageFlags,
769    pub rsvd: u16,
770    pub id: u64,
771}
772
773#[bitfield(u8)]
774#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
775pub struct HvMessageFlags {
776    pub message_pending: bool,
777    #[bits(7)]
778    _reserved: u8,
779}
780
781pub const HV_MESSAGE_SIZE: usize = size_of::<HvMessage>();
782const_assert!(HV_MESSAGE_SIZE == 256);
783pub const HV_MESSAGE_PAYLOAD_SIZE: usize = 240;
784
785#[repr(C, align(16))]
786#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
787pub struct HvMessage {
788    pub header: HvMessageHeader,
789    pub payload_buffer: [u8; HV_MESSAGE_PAYLOAD_SIZE],
790}
791
792impl Default for HvMessage {
793    fn default() -> Self {
794        Self {
795            header: FromZeros::new_zeroed(),
796            payload_buffer: [0; 240],
797        }
798    }
799}
800
801impl HvMessage {
802    /// Constructs a new message. `payload` must fit into the payload field (240
803    /// bytes limit).
804    pub fn new(typ: HvMessageType, id: u64, payload: &[u8]) -> Self {
805        let mut msg = HvMessage {
806            header: HvMessageHeader {
807                typ,
808                len: payload.len() as u8,
809                flags: HvMessageFlags::new(),
810                rsvd: 0,
811                id,
812            },
813            payload_buffer: [0; 240],
814        };
815        msg.payload_buffer[..payload.len()].copy_from_slice(payload);
816        msg
817    }
818
819    pub fn payload(&self) -> &[u8] {
820        &self.payload_buffer[..self.header.len as usize]
821    }
822
823    pub fn as_message<T: MessagePayload>(&self) -> &T {
824        // Ensure invariants are met.
825        let () = T::CHECK;
826        T::ref_from_prefix(&self.payload_buffer).unwrap().0
827    }
828
829    pub fn as_message_mut<T: MessagePayload>(&mut self) -> &T {
830        // Ensure invariants are met.
831        let () = T::CHECK;
832        T::mut_from_prefix(&mut self.payload_buffer).unwrap().0
833    }
834}
835
836pub trait MessagePayload: KnownLayout + Immutable + IntoBytes + FromBytes + Sized {
837    /// Used to ensure this trait is only implemented on messages of the proper
838    /// size and alignment.
839    #[doc(hidden)]
840    const CHECK: () = {
841        assert!(size_of::<Self>() <= HV_MESSAGE_PAYLOAD_SIZE);
842        assert!(align_of::<Self>() <= align_of::<HvMessage>());
843    };
844}
845
846#[repr(C)]
847#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
848pub struct TimerMessagePayload {
849    pub timer_index: u32,
850    pub reserved: u32,
851    pub expiration_time: u64,
852    pub delivery_time: u64,
853}
854
855pub mod hypercall {
856    use super::*;
857    use core::ops::RangeInclusive;
858    use zerocopy::Unalign;
859
860    /// The hypercall input value.
861    #[bitfield(u64)]
862    pub struct Control {
863        /// The hypercall code.
864        pub code: u16,
865        /// If this hypercall is a fast hypercall.
866        pub fast: bool,
867        /// The variable header size, in qwords.
868        #[bits(10)]
869        pub variable_header_size: usize,
870        #[bits(4)]
871        _rsvd0: u8,
872        /// Specifies that the hypercall should be handled by the L0 hypervisor in a nested environment.
873        pub nested: bool,
874        /// The element count for rep hypercalls.
875        #[bits(12)]
876        pub rep_count: usize,
877        #[bits(4)]
878        _rsvd1: u8,
879        /// The first element to start processing in a rep hypercall.
880        #[bits(12)]
881        pub rep_start: usize,
882        #[bits(4)]
883        _rsvd2: u8,
884    }
885
886    /// The hypercall output value returned to the guest.
887    #[bitfield(u64)]
888    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
889    #[must_use]
890    pub struct HypercallOutput {
891        #[bits(16)]
892        pub call_status: HvStatus,
893        pub rsvd: u16,
894        #[bits(12)]
895        pub elements_processed: usize,
896        #[bits(20)]
897        pub rsvd2: u32,
898    }
899
900    impl From<HvError> for HypercallOutput {
901        fn from(e: HvError) -> Self {
902            Self::new().with_call_status(Err(e).into())
903        }
904    }
905
906    impl HypercallOutput {
907        /// A success output with zero elements processed.
908        pub const SUCCESS: Self = Self::new();
909
910        pub fn result(&self) -> Result<(), HvError> {
911            self.call_status().result()
912        }
913    }
914
915    #[repr(C)]
916    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
917    pub struct HvRegisterAssoc {
918        pub name: HvRegisterName,
919        pub pad: [u32; 3],
920        pub value: HvRegisterValue,
921    }
922
923    impl<N: Into<HvRegisterName>, T: Into<HvRegisterValue>> From<(N, T)> for HvRegisterAssoc {
924        fn from((name, value): (N, T)) -> Self {
925            Self {
926                name: name.into(),
927                pad: [0; 3],
928                value: value.into(),
929            }
930        }
931    }
932
933    impl<N: Copy + Into<HvRegisterName>, T: Copy + Into<HvRegisterValue>> From<&(N, T)>
934        for HvRegisterAssoc
935    {
936        fn from(&(name, value): &(N, T)) -> Self {
937            Self {
938                name: name.into(),
939                pad: [0; 3],
940                value: value.into(),
941            }
942        }
943    }
944
945    #[bitfield(u64)]
946    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
947    pub struct MsrHypercallContents {
948        pub enable: bool,
949        pub locked: bool,
950        #[bits(10)]
951        pub reserved_p: u64,
952        #[bits(52)]
953        pub gpn: u64,
954    }
955
956    #[repr(C, align(8))]
957    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
958    pub struct PostMessage {
959        pub connection_id: u32,
960        pub padding: u32,
961        pub message_type: u32,
962        pub payload_size: u32,
963        pub payload: [u8; 240],
964    }
965
966    #[repr(C, align(8))]
967    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
968    pub struct SignalEvent {
969        pub connection_id: u32,
970        pub flag_number: u16,
971        pub rsvd: u16,
972    }
973
974    #[repr(C)]
975    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
976    pub struct PostMessageDirect {
977        pub partition_id: u64,
978        pub vp_index: u32,
979        pub vtl: u8,
980        pub padding0: [u8; 3],
981        pub sint: u8,
982        pub padding1: [u8; 3],
983        pub message: Unalign<HvMessage>,
984        pub padding2: u32,
985    }
986
987    #[repr(C)]
988    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
989    pub struct SignalEventDirect {
990        pub target_partition: u64,
991        pub target_vp: u32,
992        pub target_vtl: u8,
993        pub target_sint: u8,
994        pub flag_number: u16,
995    }
996
997    #[repr(C)]
998    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
999    pub struct SignalEventDirectOutput {
1000        pub newly_signaled: u8,
1001        pub rsvd: [u8; 7],
1002    }
1003
1004    #[repr(C)]
1005    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1006    pub struct InterruptEntry {
1007        pub source: HvInterruptSource,
1008        pub rsvd: u32,
1009        pub data: [u32; 2],
1010    }
1011
1012    open_enum! {
1013        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1014        pub enum HvInterruptSource: u32 {
1015            MSI = 1,
1016            IO_APIC = 2,
1017        }
1018    }
1019
1020    #[repr(C)]
1021    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1022    pub struct InterruptTarget {
1023        pub vector: u32,
1024        pub flags: HvInterruptTargetFlags,
1025        pub mask_or_format: u64,
1026    }
1027
1028    #[bitfield(u32)]
1029    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1030    pub struct HvInterruptTargetFlags {
1031        pub multicast: bool,
1032        pub processor_set: bool,
1033        #[bits(30)]
1034        pub reserved: u32,
1035    }
1036
1037    pub const HV_DEVICE_INTERRUPT_TARGET_MULTICAST: u32 = 1;
1038    pub const HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET: u32 = 2;
1039
1040    pub const HV_GENERIC_SET_SPARSE_4K: u64 = 0;
1041    pub const HV_GENERIC_SET_ALL: u64 = 1;
1042
1043    #[repr(C)]
1044    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1045    pub struct RetargetDeviceInterrupt {
1046        pub partition_id: u64,
1047        pub device_id: u64,
1048        pub entry: InterruptEntry,
1049        pub rsvd: u64,
1050        pub target_header: InterruptTarget,
1051    }
1052
1053    #[bitfield(u8)]
1054    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1055    pub struct HvInputVtl {
1056        #[bits(4)]
1057        pub target_vtl_value: u8,
1058        pub use_target_vtl: bool,
1059        #[bits(3)]
1060        pub reserved: u8,
1061    }
1062
1063    impl From<Vtl> for HvInputVtl {
1064        fn from(value: Vtl) -> Self {
1065            Self::from(Some(value))
1066        }
1067    }
1068
1069    impl From<Option<Vtl>> for HvInputVtl {
1070        fn from(value: Option<Vtl>) -> Self {
1071            Self::new()
1072                .with_use_target_vtl(value.is_some())
1073                .with_target_vtl_value(value.map_or(0, Into::into))
1074        }
1075    }
1076
1077    impl HvInputVtl {
1078        /// None = target current vtl
1079        pub fn target_vtl(&self) -> Result<Option<Vtl>, HvError> {
1080            if self.reserved() != 0 {
1081                return Err(HvError::InvalidParameter);
1082            }
1083            if self.use_target_vtl() {
1084                Ok(Some(self.target_vtl_value().try_into()?))
1085            } else {
1086                Ok(None)
1087            }
1088        }
1089
1090        pub const CURRENT_VTL: Self = Self::new();
1091    }
1092
1093    #[repr(C)]
1094    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1095    pub struct GetSetVpRegisters {
1096        pub partition_id: u64,
1097        pub vp_index: u32,
1098        pub target_vtl: HvInputVtl,
1099        pub rsvd: [u8; 3],
1100    }
1101
1102    open_enum::open_enum! {
1103        #[derive(Default)]
1104        pub enum HvGuestOsMicrosoftIds: u8 {
1105            UNDEFINED = 0x00,
1106            MSDOS = 0x01,
1107            WINDOWS_3X = 0x02,
1108            WINDOWS_9X = 0x03,
1109            WINDOWS_NT = 0x04,
1110            WINDOWS_CE = 0x05,
1111        }
1112    }
1113
1114    #[bitfield(u64)]
1115    pub struct HvGuestOsMicrosoft {
1116        #[bits(40)]
1117        _rsvd: u64,
1118        #[bits(8)]
1119        pub os_id: u8,
1120        // The top bit must be zero and the least significant 15 bits holds the value of the vendor id.
1121        #[bits(16)]
1122        pub vendor_id: u16,
1123    }
1124
1125    open_enum::open_enum! {
1126        #[derive(Default)]
1127        pub enum HvGuestOsOpenSourceType: u8 {
1128            UNDEFINED = 0x00,
1129            LINUX = 0x01,
1130            FREEBSD = 0x02,
1131            XEN = 0x03,
1132            ILLUMOS = 0x04,
1133        }
1134    }
1135
1136    #[bitfield(u64)]
1137    pub struct HvGuestOsOpenSource {
1138        #[bits(16)]
1139        pub build_no: u16,
1140        #[bits(32)]
1141        pub version: u32,
1142        #[bits(8)]
1143        pub os_id: u8,
1144        #[bits(7)]
1145        pub os_type: u8,
1146        #[bits(1)]
1147        pub is_open_source: bool,
1148    }
1149
1150    #[bitfield(u64)]
1151    pub struct HvGuestOsId {
1152        #[bits(63)]
1153        _rsvd: u64,
1154        is_open_source: bool,
1155    }
1156
1157    impl HvGuestOsId {
1158        pub fn microsoft(&self) -> Option<HvGuestOsMicrosoft> {
1159            (!self.is_open_source()).then(|| HvGuestOsMicrosoft::from(u64::from(*self)))
1160        }
1161
1162        pub fn open_source(&self) -> Option<HvGuestOsOpenSource> {
1163            (self.is_open_source()).then(|| HvGuestOsOpenSource::from(u64::from(*self)))
1164        }
1165
1166        pub fn as_u64(&self) -> u64 {
1167            self.0
1168        }
1169    }
1170
1171    pub const HV_INTERCEPT_ACCESS_MASK_NONE: u32 = 0x00;
1172    pub const HV_INTERCEPT_ACCESS_MASK_READ: u32 = 0x01;
1173    pub const HV_INTERCEPT_ACCESS_MASK_WRITE: u32 = 0x02;
1174    pub const HV_INTERCEPT_ACCESS_MASK_READ_WRITE: u32 =
1175        HV_INTERCEPT_ACCESS_MASK_READ | HV_INTERCEPT_ACCESS_MASK_WRITE;
1176    pub const HV_INTERCEPT_ACCESS_MASK_EXECUTE: u32 = 0x04;
1177
1178    open_enum::open_enum! {
1179        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1180        pub enum HvInterceptType: u32 {
1181            #![allow(non_upper_case_globals)]
1182            HvInterceptTypeX64IoPort = 0x00000000,
1183            HvInterceptTypeX64Msr = 0x00000001,
1184            HvInterceptTypeX64Cpuid = 0x00000002,
1185            HvInterceptTypeException = 0x00000003,
1186            HvInterceptTypeHypercall = 0x00000008,
1187            HvInterceptTypeUnknownSynicConnection = 0x0000000D,
1188            HvInterceptTypeX64ApicEoi = 0x0000000E,
1189            HvInterceptTypeRetargetInterruptWithUnknownDeviceId = 0x0000000F,
1190            HvInterceptTypeX64IoPortRange = 0x00000011,
1191        }
1192    }
1193
1194    #[repr(transparent)]
1195    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1196    pub struct HvInterceptParameters(u64);
1197
1198    impl HvInterceptParameters {
1199        pub fn new_io_port(port: u16) -> Self {
1200            Self(port as u64)
1201        }
1202
1203        pub fn new_io_port_range(ports: RangeInclusive<u16>) -> Self {
1204            let base = *ports.start() as u64;
1205            let end = *ports.end() as u64;
1206            Self(base | (end << 16))
1207        }
1208
1209        pub fn new_exception(vector: u16) -> Self {
1210            Self(vector as u64)
1211        }
1212
1213        pub fn io_port(&self) -> u16 {
1214            self.0 as u16
1215        }
1216
1217        pub fn io_port_range(&self) -> RangeInclusive<u16> {
1218            let base = self.0 as u16;
1219            let end = (self.0 >> 16) as u16;
1220            base..=end
1221        }
1222
1223        pub fn cpuid_index(&self) -> u32 {
1224            self.0 as u32
1225        }
1226
1227        pub fn exception(&self) -> u16 {
1228            self.0 as u16
1229        }
1230    }
1231
1232    #[repr(C)]
1233    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1234    pub struct InstallIntercept {
1235        pub partition_id: u64,
1236        pub access_type_mask: u32,
1237        pub intercept_type: HvInterceptType,
1238        pub intercept_parameters: HvInterceptParameters,
1239    }
1240
1241    #[repr(C)]
1242    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1243    pub struct AssertVirtualInterrupt {
1244        pub partition_id: u64,
1245        pub interrupt_control: HvInterruptControl,
1246        pub destination_address: u64,
1247        pub requested_vector: u32,
1248        pub target_vtl: u8,
1249        pub rsvd0: u8,
1250        pub rsvd1: u16,
1251    }
1252
1253    #[repr(C)]
1254    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1255    pub struct StartVirtualProcessorX64 {
1256        pub partition_id: u64,
1257        pub vp_index: u32,
1258        pub target_vtl: u8,
1259        pub rsvd0: u8,
1260        pub rsvd1: u16,
1261        pub vp_context: InitialVpContextX64,
1262    }
1263
1264    #[repr(C)]
1265    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1266    pub struct InitialVpContextX64 {
1267        pub rip: u64,
1268        pub rsp: u64,
1269        pub rflags: u64,
1270        pub cs: HvX64SegmentRegister,
1271        pub ds: HvX64SegmentRegister,
1272        pub es: HvX64SegmentRegister,
1273        pub fs: HvX64SegmentRegister,
1274        pub gs: HvX64SegmentRegister,
1275        pub ss: HvX64SegmentRegister,
1276        pub tr: HvX64SegmentRegister,
1277        pub ldtr: HvX64SegmentRegister,
1278        pub idtr: HvX64TableRegister,
1279        pub gdtr: HvX64TableRegister,
1280        pub efer: u64,
1281        pub cr0: u64,
1282        pub cr3: u64,
1283        pub cr4: u64,
1284        pub msr_cr_pat: u64,
1285    }
1286
1287    #[repr(C)]
1288    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1289    pub struct StartVirtualProcessorArm64 {
1290        pub partition_id: u64,
1291        pub vp_index: u32,
1292        pub target_vtl: u8,
1293        pub rsvd0: u8,
1294        pub rsvd1: u16,
1295        pub vp_context: InitialVpContextArm64,
1296    }
1297
1298    #[repr(C)]
1299    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1300    pub struct InitialVpContextArm64 {
1301        pub pc: u64,
1302        pub sp_elh: u64,
1303        pub sctlr_el1: u64,
1304        pub mair_el1: u64,
1305        pub tcr_el1: u64,
1306        pub vbar_el1: u64,
1307        pub ttbr0_el1: u64,
1308        pub ttbr1_el1: u64,
1309        pub x18: u64,
1310    }
1311
1312    impl InitialVpContextX64 {
1313        pub fn as_hv_register_assocs(&self) -> impl Iterator<Item = HvRegisterAssoc> + '_ {
1314            let regs = [
1315                (HvX64RegisterName::Rip, HvRegisterValue::from(self.rip)).into(),
1316                (HvX64RegisterName::Rsp, HvRegisterValue::from(self.rsp)).into(),
1317                (
1318                    HvX64RegisterName::Rflags,
1319                    HvRegisterValue::from(self.rflags),
1320                )
1321                    .into(),
1322                (HvX64RegisterName::Cs, HvRegisterValue::from(self.cs)).into(),
1323                (HvX64RegisterName::Ds, HvRegisterValue::from(self.ds)).into(),
1324                (HvX64RegisterName::Es, HvRegisterValue::from(self.es)).into(),
1325                (HvX64RegisterName::Fs, HvRegisterValue::from(self.fs)).into(),
1326                (HvX64RegisterName::Gs, HvRegisterValue::from(self.gs)).into(),
1327                (HvX64RegisterName::Ss, HvRegisterValue::from(self.ss)).into(),
1328                (HvX64RegisterName::Tr, HvRegisterValue::from(self.tr)).into(),
1329                (HvX64RegisterName::Ldtr, HvRegisterValue::from(self.ldtr)).into(),
1330                (HvX64RegisterName::Idtr, HvRegisterValue::from(self.idtr)).into(),
1331                (HvX64RegisterName::Gdtr, HvRegisterValue::from(self.gdtr)).into(),
1332                (HvX64RegisterName::Efer, HvRegisterValue::from(self.efer)).into(),
1333                (HvX64RegisterName::Cr0, HvRegisterValue::from(self.cr0)).into(),
1334                (HvX64RegisterName::Cr3, HvRegisterValue::from(self.cr3)).into(),
1335                (HvX64RegisterName::Cr4, HvRegisterValue::from(self.cr4)).into(),
1336                (
1337                    HvX64RegisterName::Pat,
1338                    HvRegisterValue::from(self.msr_cr_pat),
1339                )
1340                    .into(),
1341            ];
1342            regs.into_iter()
1343        }
1344    }
1345
1346    #[bitfield(u64)]
1347    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1348    pub struct TranslateGvaControlFlagsX64 {
1349        /// Request data read access
1350        pub validate_read: bool,
1351        /// Request data write access
1352        pub validate_write: bool,
1353        /// Request instruction fetch access.
1354        pub validate_execute: bool,
1355        /// Don't enforce any checks related to access mode (supervisor vs. user; SMEP and SMAP are treated
1356        /// as disabled).
1357        pub privilege_exempt: bool,
1358        /// Set the appropriate page table bits (i.e. access/dirty bit)
1359        pub set_page_table_bits: bool,
1360        /// Lock the TLB
1361        pub tlb_flush_inhibit: bool,
1362        /// Treat the access as a supervisor mode access irrespective of current mode.
1363        pub supervisor_access: bool,
1364        /// Treat the access as a user mode access irrespective of current mode.
1365        pub user_access: bool,
1366        /// Enforce the SMAP restriction on supervisor data access to user mode addresses if CR4.SMAP=1
1367        /// irrespective of current EFLAGS.AC i.e. the behavior for "implicit supervisor-mode accesses"
1368        /// (e.g. to the GDT, etc.) and when EFLAGS.AC=0. Does nothing if CR4.SMAP=0.
1369        pub enforce_smap: bool,
1370        /// Don't enforce the SMAP restriction on supervisor data access to user mode addresses irrespective
1371        /// of current EFLAGS.AC i.e. the behavior when EFLAGS.AC=1.
1372        pub override_smap: bool,
1373        /// Treat the access as a shadow stack access.
1374        pub shadow_stack: bool,
1375        #[bits(45)]
1376        _unused: u64,
1377        /// Target vtl
1378        input_vtl_value: u8,
1379    }
1380
1381    impl TranslateGvaControlFlagsX64 {
1382        pub fn input_vtl(&self) -> HvInputVtl {
1383            self.input_vtl_value().into()
1384        }
1385
1386        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1387            self.with_input_vtl_value(input_vtl.into())
1388        }
1389
1390        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1391            self.set_input_vtl_value(input_vtl.into())
1392        }
1393    }
1394
1395    #[bitfield(u64)]
1396    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1397    pub struct TranslateGvaControlFlagsArm64 {
1398        /// Request data read access
1399        pub validate_read: bool,
1400        /// Request data write access
1401        pub validate_write: bool,
1402        /// Request instruction fetch access.
1403        pub validate_execute: bool,
1404        _reserved0: bool,
1405        /// Set the appropriate page table bits (i.e. access/dirty bit)
1406        pub set_page_table_bits: bool,
1407        /// Lock the TLB
1408        pub tlb_flush_inhibit: bool,
1409        /// Treat the access as a supervisor mode access irrespective of current mode.
1410        pub supervisor_access: bool,
1411        /// Treat the access as a user mode access irrespective of current mode.
1412        pub user_access: bool,
1413        /// Restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN i.e.
1414        /// the behavior when PSTATE.PAN=1.
1415        pub pan_set: bool,
1416        /// Don't restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN
1417        /// i.e. the behavior when PSTATE.PAN=0.
1418        pub pan_clear: bool,
1419        #[bits(46)]
1420        _unused: u64,
1421        /// Target vtl
1422        #[bits(8)]
1423        input_vtl_value: u8,
1424    }
1425
1426    impl TranslateGvaControlFlagsArm64 {
1427        pub fn input_vtl(&self) -> HvInputVtl {
1428            self.input_vtl_value().into()
1429        }
1430
1431        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1432            self.with_input_vtl_value(input_vtl.into())
1433        }
1434
1435        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1436            self.set_input_vtl_value(input_vtl.into())
1437        }
1438    }
1439
1440    #[repr(C)]
1441    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1442    pub struct TranslateVirtualAddressX64 {
1443        pub partition_id: u64,
1444        pub vp_index: u32,
1445        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1446        pub reserved: u32,
1447        pub control_flags: TranslateGvaControlFlagsX64,
1448        pub gva_page: u64,
1449    }
1450
1451    #[repr(C)]
1452    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1453    pub struct TranslateVirtualAddressArm64 {
1454        pub partition_id: u64,
1455        pub vp_index: u32,
1456        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1457        pub reserved: u32,
1458        pub control_flags: TranslateGvaControlFlagsArm64,
1459        pub gva_page: u64,
1460    }
1461
1462    open_enum::open_enum! {
1463        pub enum TranslateGvaResultCode: u32 {
1464            SUCCESS = 0,
1465
1466            // Translation Failures
1467            PAGE_NOT_PRESENT = 1,
1468            PRIVILEGE_VIOLATION = 2,
1469            INVALID_PAGE_TABLE_FLAGS = 3,
1470
1471            // GPA access failures
1472            GPA_UNMAPPED = 4,
1473            GPA_NO_READ_ACCESS = 5,
1474            GPA_NO_WRITE_ACCESS = 6,
1475            GPA_ILLEGAL_OVERLAY_ACCESS = 7,
1476
1477            /// Intercept of the memory access by either
1478            /// - a higher VTL
1479            /// - a nested hypervisor (due to a violation of the nested page table)
1480            INTERCEPT = 8,
1481
1482            GPA_UNACCEPTED = 9,
1483        }
1484    }
1485
1486    #[bitfield(u64)]
1487    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1488    pub struct TranslateGvaResult {
1489        pub result_code: u32,
1490        pub cache_type: u8,
1491        pub overlay_page: bool,
1492        #[bits(23)]
1493        pub reserved: u32,
1494    }
1495
1496    #[repr(C)]
1497    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1498    pub struct TranslateVirtualAddressOutput {
1499        pub translation_result: TranslateGvaResult,
1500        pub gpa_page: u64,
1501    }
1502
1503    #[repr(C)]
1504    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1505    pub struct TranslateGvaResultExX64 {
1506        pub result: TranslateGvaResult,
1507        pub reserved: u64,
1508        pub event_info: HvX64PendingEvent,
1509    }
1510
1511    const_assert!(size_of::<TranslateGvaResultExX64>() == 0x30);
1512
1513    #[repr(C)]
1514    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1515    pub struct TranslateGvaResultExArm64 {
1516        pub result: TranslateGvaResult,
1517    }
1518
1519    const_assert!(size_of::<TranslateGvaResultExArm64>() == 0x8);
1520
1521    #[repr(C)]
1522    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1523    pub struct TranslateVirtualAddressExOutputX64 {
1524        pub translation_result: TranslateGvaResultExX64,
1525        pub gpa_page: u64,
1526        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1527        pub reserved: u64,
1528    }
1529
1530    const_assert!(size_of::<TranslateVirtualAddressExOutputX64>() == 0x40);
1531
1532    #[repr(C)]
1533    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1534    pub struct TranslateVirtualAddressExOutputArm64 {
1535        pub translation_result: TranslateGvaResultExArm64,
1536        pub gpa_page: u64,
1537    }
1538
1539    const_assert!(size_of::<TranslateVirtualAddressExOutputArm64>() == 0x10);
1540
1541    #[repr(C)]
1542    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1543    pub struct GetVpIndexFromApicId {
1544        pub partition_id: u64,
1545        pub target_vtl: u8,
1546        pub reserved: [u8; 7],
1547    }
1548
1549    #[repr(C)]
1550    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1551    pub struct EnableVpVtlX64 {
1552        pub partition_id: u64,
1553        pub vp_index: u32,
1554        pub target_vtl: u8,
1555        pub reserved: [u8; 3],
1556        pub vp_vtl_context: InitialVpContextX64,
1557    }
1558
1559    #[repr(C)]
1560    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1561    pub struct EnableVpVtlArm64 {
1562        pub partition_id: u64,
1563        pub vp_index: u32,
1564        pub target_vtl: u8,
1565        pub reserved: [u8; 3],
1566        pub vp_vtl_context: InitialVpContextArm64,
1567    }
1568
1569    #[repr(C)]
1570    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1571    pub struct ModifyVtlProtectionMask {
1572        pub partition_id: u64,
1573        pub map_flags: HvMapGpaFlags,
1574        pub target_vtl: HvInputVtl,
1575        pub reserved: [u8; 3],
1576    }
1577
1578    #[repr(C)]
1579    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1580    pub struct CheckSparseGpaPageVtlAccess {
1581        pub partition_id: u64,
1582        pub target_vtl: HvInputVtl,
1583        pub desired_access: u8,
1584        pub reserved0: u16,
1585        pub reserved1: u32,
1586    }
1587    const_assert!(size_of::<CheckSparseGpaPageVtlAccess>() == 0x10);
1588
1589    #[bitfield(u64)]
1590    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1591    pub struct CheckSparseGpaPageVtlAccessOutput {
1592        pub result_code: u8,
1593        pub denied_access: u8,
1594        #[bits(4)]
1595        pub intercepting_vtl: u32,
1596        #[bits(12)]
1597        _reserved0: u32,
1598        _reserved1: u32,
1599    }
1600    const_assert!(size_of::<CheckSparseGpaPageVtlAccessOutput>() == 0x8);
1601
1602    open_enum::open_enum! {
1603        pub enum CheckGpaPageVtlAccessResultCode: u32 {
1604            SUCCESS = 0,
1605            MEMORY_INTERCEPT = 1,
1606        }
1607    }
1608
1609    /// The number of VTLs for which permissions can be specified in a VTL permission set.
1610    pub const HV_VTL_PERMISSION_SET_SIZE: usize = 2;
1611
1612    #[repr(C)]
1613    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1614    pub struct VtlPermissionSet {
1615        /// VTL permissions for the GPA page, starting from VTL 1.
1616        pub vtl_permission_from_1: [u16; HV_VTL_PERMISSION_SET_SIZE],
1617    }
1618
1619    open_enum::open_enum! {
1620        pub enum AcceptMemoryType: u32 {
1621            ANY = 0,
1622            RAM = 1,
1623        }
1624    }
1625
1626    open_enum! {
1627        /// Host visibility used in hypercall inputs.
1628        ///
1629        /// NOTE: While this is a 2 bit set with the lower bit representing host
1630        /// read access and upper bit representing host write access, hardware
1631        /// platforms do not support that form of isolation. Only support
1632        /// private or full shared in this definition.
1633        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1634        pub enum HostVisibilityType: u8 {
1635            PRIVATE = 0,
1636            SHARED = 3,
1637        }
1638    }
1639
1640    // Used by bitfield-struct implicitly.
1641    impl HostVisibilityType {
1642        const fn from_bits(value: u8) -> Self {
1643            Self(value)
1644        }
1645
1646        const fn into_bits(value: Self) -> u8 {
1647            value.0
1648        }
1649    }
1650
1651    /// Attributes for accepting pages. See [`AcceptGpaPages`]
1652    #[bitfield(u32)]
1653    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1654    pub struct AcceptPagesAttributes {
1655        #[bits(6)]
1656        /// Supplies the expected memory type [`AcceptMemoryType`].
1657        pub memory_type: u32,
1658        #[bits(2)]
1659        /// Supplies the initial host visibility (exclusive, shared read-only, shared read-write).
1660        pub host_visibility: HostVisibilityType,
1661        #[bits(3)]
1662        /// Supplies the set of VTLs for which initial VTL permissions will be set.
1663        pub vtl_set: u32,
1664        #[bits(21)]
1665        _reserved: u32,
1666    }
1667
1668    #[repr(C)]
1669    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1670    pub struct AcceptGpaPages {
1671        /// Supplies the partition ID of the partition this request is for.
1672        pub partition_id: u64,
1673        /// Supplies attributes of the pages being accepted, such as whether
1674        /// they should be made host visible.
1675        pub page_attributes: AcceptPagesAttributes,
1676        /// Supplies the set of initial VTL permissions.
1677        pub vtl_permission_set: VtlPermissionSet,
1678        /// Supplies the GPA page number of the first page to modify.
1679        pub gpa_page_base: u64,
1680    }
1681    const_assert!(size_of::<AcceptGpaPages>() == 0x18);
1682
1683    /// Attributes for unaccepting pages. See [`UnacceptGpaPages`]
1684    #[bitfield(u32)]
1685    pub struct UnacceptPagesAttributes {
1686        #[bits(3)]
1687        pub vtl_set: u32,
1688        #[bits(29)]
1689        _reserved: u32,
1690    }
1691
1692    #[repr(C)]
1693    pub struct UnacceptGpaPages {
1694        /// Supplies the partition ID of the partition this request is for.
1695        pub partition_id: u64,
1696        /// Supplies the set of VTLs for which VTL permissions will be checked.
1697        pub page_attributes: UnacceptPagesAttributes,
1698        ///  Supplies the set of VTL permissions to check against.
1699        pub vtl_permission_set: VtlPermissionSet,
1700        /// Supplies the GPA page number of the first page to modify.
1701        pub gpa_page_base: u64,
1702    }
1703    const_assert!(size_of::<UnacceptGpaPages>() == 0x18);
1704
1705    #[bitfield(u32)]
1706    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1707    pub struct ModifyHostVisibility {
1708        #[bits(2)]
1709        pub host_visibility: HostVisibilityType,
1710        #[bits(30)]
1711        _reserved: u32,
1712    }
1713
1714    #[repr(C)]
1715    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1716    pub struct ModifySparsePageVisibility {
1717        pub partition_id: u64,
1718        pub host_visibility: ModifyHostVisibility,
1719        pub reserved: u32,
1720    }
1721
1722    #[repr(C)]
1723    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1724    pub struct QuerySparsePageVisibility {
1725        pub partition_id: u64,
1726    }
1727
1728    #[bitfield(u8)]
1729    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1730    pub struct EnablePartitionVtlFlags {
1731        pub enable_mbec: bool,
1732        pub enable_supervisor_shadow_stack: bool,
1733        pub enable_hardware_hvpt: bool,
1734        #[bits(5)]
1735        pub reserved: u8,
1736    }
1737
1738    #[repr(C)]
1739    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1740    pub struct EnablePartitionVtl {
1741        pub partition_id: u64,
1742        pub target_vtl: u8,
1743        pub flags: EnablePartitionVtlFlags,
1744        pub reserved_z0: u16,
1745        pub reserved_z1: u32,
1746    }
1747
1748    #[repr(C)]
1749    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1750    pub struct FlushVirtualAddressSpace {
1751        pub address_space: u64,
1752        pub flags: HvFlushFlags,
1753        pub processor_mask: u64,
1754    }
1755
1756    #[repr(C)]
1757    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1758    pub struct FlushVirtualAddressSpaceEx {
1759        pub address_space: u64,
1760        pub flags: HvFlushFlags,
1761        pub vp_set_format: u64,
1762        pub vp_set_valid_banks_mask: u64,
1763        // Followed by the variable-sized part of an HvVpSet
1764    }
1765
1766    #[repr(C)]
1767    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1768    pub struct PinUnpinGpaPageRangesHeader {
1769        pub reserved: u64,
1770    }
1771
1772    #[repr(C)]
1773    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1774    pub struct SendSyntheticClusterIpi {
1775        pub vector: u32,
1776        pub target_vtl: HvInputVtl,
1777        pub flags: u8,
1778        pub reserved: u16,
1779        pub processor_mask: u64,
1780    }
1781
1782    #[repr(C)]
1783    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1784    pub struct SendSyntheticClusterIpiEx {
1785        pub vector: u32,
1786        pub target_vtl: HvInputVtl,
1787        pub flags: u8,
1788        pub reserved: u16,
1789        pub vp_set_format: u64,
1790        pub vp_set_valid_banks_mask: u64,
1791        // Followed by the variable-sized part of an HvVpSet
1792    }
1793
1794    #[bitfield(u64)]
1795    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1796    pub struct HvFlushFlags {
1797        pub all_processors: bool,
1798        pub all_virtual_address_spaces: bool,
1799        pub non_global_mappings_only: bool,
1800        pub use_extended_range_format: bool,
1801        pub use_target_vtl: bool,
1802
1803        #[bits(3)]
1804        _reserved: u8,
1805
1806        pub target_vtl0: bool,
1807        pub target_vtl1: bool,
1808
1809        #[bits(54)]
1810        _reserved2: u64,
1811    }
1812
1813    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1814    #[repr(transparent)]
1815    pub struct HvGvaRange(pub u64);
1816
1817    impl HvGvaRange {
1818        pub fn as_simple(self) -> HvGvaRangeSimple {
1819            HvGvaRangeSimple(self.0)
1820        }
1821
1822        pub fn as_extended(self) -> HvGvaRangeExtended {
1823            HvGvaRangeExtended(self.0)
1824        }
1825
1826        pub fn as_extended_large_page(self) -> HvGvaRangeExtendedLargePage {
1827            HvGvaRangeExtendedLargePage(self.0)
1828        }
1829    }
1830
1831    #[bitfield(u64)]
1832    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1833    pub struct HvGvaRangeSimple {
1834        /// The number of pages beyond one.
1835        #[bits(12)]
1836        pub additional_pages: u64,
1837        /// The top 52 most significant bits of the guest virtual address.
1838        #[bits(52)]
1839        pub gva_page_number: u64,
1840    }
1841
1842    #[bitfield(u64)]
1843    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1844    pub struct HvGvaRangeExtended {
1845        /// The number of pages beyond one.
1846        #[bits(11)]
1847        pub additional_pages: u64,
1848        /// Is page size greater than 4 KB.
1849        pub large_page: bool,
1850        /// The top 52 most significant bits of the guest virtual address when `large_page`` is clear.
1851        #[bits(52)]
1852        pub gva_page_number: u64,
1853    }
1854
1855    #[bitfield(u64)]
1856    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1857    pub struct HvGvaRangeExtendedLargePage {
1858        /// The number of pages beyond one.
1859        #[bits(11)]
1860        pub additional_pages: u64,
1861        /// Is page size greater than 4 KB.
1862        pub large_page: bool,
1863        /// The page size when `large_page`` is set.
1864        /// false: 2 MB
1865        /// true: 1 GB
1866        pub page_size: bool,
1867        #[bits(8)]
1868        _reserved: u64,
1869        /// The top 43 most significant bits of the guest virtual address when `large_page`` is set.
1870        #[bits(43)]
1871        pub gva_large_page_number: u64,
1872    }
1873
1874    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1875    #[repr(transparent)]
1876    pub struct HvGpaRange(pub u64);
1877
1878    impl HvGpaRange {
1879        pub fn as_simple(self) -> HvGpaRangeSimple {
1880            HvGpaRangeSimple(self.0)
1881        }
1882
1883        pub fn as_extended(self) -> HvGpaRangeExtended {
1884            HvGpaRangeExtended(self.0)
1885        }
1886
1887        pub fn as_extended_large_page(self) -> HvGpaRangeExtendedLargePage {
1888            HvGpaRangeExtendedLargePage(self.0)
1889        }
1890    }
1891
1892    #[bitfield(u64)]
1893    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1894    pub struct HvGpaRangeSimple {
1895        /// The number of pages beyond one.
1896        #[bits(12)]
1897        pub additional_pages: u64,
1898        /// The top 52 most significant bits of the guest physical address.
1899        #[bits(52)]
1900        pub gpa_page_number: u64,
1901    }
1902
1903    #[bitfield(u64)]
1904    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1905    pub struct HvGpaRangeExtended {
1906        /// The number of pages beyond one.
1907        #[bits(11)]
1908        pub additional_pages: u64,
1909        /// Is page size greater than 4 KB.
1910        pub large_page: bool,
1911        /// The top 52 most significant bits of the guest physical address when `large_page`` is clear.
1912        #[bits(52)]
1913        pub gpa_page_number: u64,
1914    }
1915
1916    #[bitfield(u64)]
1917    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1918    pub struct HvGpaRangeExtendedLargePage {
1919        /// The number of pages beyond one.
1920        #[bits(11)]
1921        pub additional_pages: u64,
1922        /// Is page size greater than 4 KB.
1923        pub large_page: bool,
1924        /// The page size when `large_page`` is set.
1925        /// false: 2 MB
1926        /// true: 1 GB
1927        pub page_size: bool,
1928        #[bits(8)]
1929        _reserved: u64,
1930        /// The top 43 most significant bits of the guest physical address when `large_page`` is set.
1931        #[bits(43)]
1932        pub gpa_large_page_number: u64,
1933    }
1934
1935    pub const HV_HYPERCALL_MMIO_MAX_DATA_LENGTH: usize = 64;
1936
1937    #[repr(C)]
1938    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1939    pub struct MemoryMappedIoRead {
1940        pub gpa: u64,
1941        pub access_width: u32,
1942        pub reserved_z0: u32,
1943    }
1944
1945    #[repr(C)]
1946    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1947    pub struct MemoryMappedIoReadOutput {
1948        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
1949    }
1950
1951    #[repr(C)]
1952    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1953    pub struct MemoryMappedIoWrite {
1954        pub gpa: u64,
1955        pub access_width: u32,
1956        pub reserved_z0: u32,
1957        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
1958    }
1959}
1960
1961macro_rules! registers {
1962    ($name:ident {
1963        $(
1964            $(#[$vattr:meta])*
1965            $variant:ident = $value:expr
1966        ),*
1967        $(,)?
1968    }) => {
1969        open_enum! {
1970    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1971            pub enum $name: u32 {
1972        #![allow(non_upper_case_globals)]
1973                $($variant = $value,)*
1974                InstructionEmulationHints = 0x00000002,
1975                InternalActivityState = 0x00000004,
1976
1977        // Guest Crash Registers
1978                GuestCrashP0  = 0x00000210,
1979                GuestCrashP1  = 0x00000211,
1980                GuestCrashP2  = 0x00000212,
1981                GuestCrashP3  = 0x00000213,
1982                GuestCrashP4  = 0x00000214,
1983                GuestCrashCtl = 0x00000215,
1984
1985                PendingInterruption = 0x00010002,
1986                InterruptState = 0x00010003,
1987                PendingEvent0 = 0x00010004,
1988                PendingEvent1 = 0x00010005,
1989                DeliverabilityNotifications = 0x00010006,
1990
1991                GicrBaseGpa = 0x00063000,
1992
1993                VpRuntime = 0x00090000,
1994                GuestOsId = 0x00090002,
1995                VpIndex = 0x00090003,
1996                TimeRefCount = 0x00090004,
1997                CpuManagementVersion = 0x00090007,
1998                VpAssistPage = 0x00090013,
1999                VpRootSignalCount = 0x00090014,
2000                ReferenceTsc = 0x00090017,
2001                VpConfig = 0x00090018,
2002                Ghcb = 0x00090019,
2003                ReferenceTscSequence = 0x0009001A,
2004                GuestSchedulerEvent = 0x0009001B,
2005
2006                Sint0 = 0x000A0000,
2007                Sint1 = 0x000A0001,
2008                Sint2 = 0x000A0002,
2009                Sint3 = 0x000A0003,
2010                Sint4 = 0x000A0004,
2011                Sint5 = 0x000A0005,
2012                Sint6 = 0x000A0006,
2013                Sint7 = 0x000A0007,
2014                Sint8 = 0x000A0008,
2015                Sint9 = 0x000A0009,
2016                Sint10 = 0x000A000A,
2017                Sint11 = 0x000A000B,
2018                Sint12 = 0x000A000C,
2019                Sint13 = 0x000A000D,
2020                Sint14 = 0x000A000E,
2021                Sint15 = 0x000A000F,
2022                Scontrol = 0x000A0010,
2023                Sversion = 0x000A0011,
2024                Sifp = 0x000A0012,
2025                Sipp = 0x000A0013,
2026                Eom = 0x000A0014,
2027                Sirbp = 0x000A0015,
2028
2029                Stimer0Config = 0x000B0000,
2030                Stimer0Count = 0x000B0001,
2031                Stimer1Config = 0x000B0002,
2032                Stimer1Count = 0x000B0003,
2033                Stimer2Config = 0x000B0004,
2034                Stimer2Count = 0x000B0005,
2035                Stimer3Config = 0x000B0006,
2036                Stimer3Count = 0x000B0007,
2037                StimeUnhaltedTimerConfig = 0x000B0100,
2038                StimeUnhaltedTimerCount = 0x000B0101,
2039
2040                VsmCodePageOffsets = 0x000D0002,
2041                VsmVpStatus = 0x000D0003,
2042                VsmPartitionStatus = 0x000D0004,
2043                VsmVina = 0x000D0005,
2044                VsmCapabilities = 0x000D0006,
2045                VsmPartitionConfig = 0x000D0007,
2046                GuestVsmPartitionConfig = 0x000D0008,
2047                VsmVpSecureConfigVtl0 = 0x000D0010,
2048                VsmVpSecureConfigVtl1 = 0x000D0011,
2049                VsmVpSecureConfigVtl2 = 0x000D0012,
2050                VsmVpSecureConfigVtl3 = 0x000D0013,
2051                VsmVpSecureConfigVtl4 = 0x000D0014,
2052                VsmVpSecureConfigVtl5 = 0x000D0015,
2053                VsmVpSecureConfigVtl6 = 0x000D0016,
2054                VsmVpSecureConfigVtl7 = 0x000D0017,
2055                VsmVpSecureConfigVtl8 = 0x000D0018,
2056                VsmVpSecureConfigVtl9 = 0x000D0019,
2057                VsmVpSecureConfigVtl10 = 0x000D001A,
2058                VsmVpSecureConfigVtl11 = 0x000D001B,
2059                VsmVpSecureConfigVtl12 = 0x000D001C,
2060                VsmVpSecureConfigVtl13 = 0x000D001D,
2061                VsmVpSecureConfigVtl14 = 0x000D001E,
2062                VsmVpWaitForTlbLock = 0x000D0020,
2063            }
2064        }
2065
2066        impl From<HvRegisterName> for $name {
2067            fn from(name: HvRegisterName) -> Self {
2068                Self(name.0)
2069            }
2070        }
2071
2072        impl From<$name> for HvRegisterName {
2073            fn from(name: $name) -> Self {
2074                Self(name.0)
2075            }
2076        }
2077    };
2078}
2079
2080/// A hypervisor register for any architecture.
2081///
2082/// This exists only to pass registers through layers where the architecture
2083/// type has been lost. In general, you should use the arch-specific registers.
2084#[repr(C)]
2085#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2086pub struct HvRegisterName(pub u32);
2087
2088registers! {
2089    // Typed enum for registers that are shared across architectures.
2090    HvAllArchRegisterName {}
2091}
2092
2093impl From<HvAllArchRegisterName> for HvX64RegisterName {
2094    fn from(name: HvAllArchRegisterName) -> Self {
2095        Self(name.0)
2096    }
2097}
2098
2099impl From<HvAllArchRegisterName> for HvArm64RegisterName {
2100    fn from(name: HvAllArchRegisterName) -> Self {
2101        Self(name.0)
2102    }
2103}
2104
2105registers! {
2106    HvX64RegisterName {
2107        // X64 User-Mode Registers
2108        Rax = 0x00020000,
2109        Rcx = 0x00020001,
2110        Rdx = 0x00020002,
2111        Rbx = 0x00020003,
2112        Rsp = 0x00020004,
2113        Rbp = 0x00020005,
2114        Rsi = 0x00020006,
2115        Rdi = 0x00020007,
2116        R8 = 0x00020008,
2117        R9 = 0x00020009,
2118        R10 = 0x0002000a,
2119        R11 = 0x0002000b,
2120        R12 = 0x0002000c,
2121        R13 = 0x0002000d,
2122        R14 = 0x0002000e,
2123        R15 = 0x0002000f,
2124        Rip = 0x00020010,
2125        Rflags = 0x00020011,
2126
2127        // X64 Floating Point and Vector Registers
2128        Xmm0 = 0x00030000,
2129        Xmm1 = 0x00030001,
2130        Xmm2 = 0x00030002,
2131        Xmm3 = 0x00030003,
2132        Xmm4 = 0x00030004,
2133        Xmm5 = 0x00030005,
2134        Xmm6 = 0x00030006,
2135        Xmm7 = 0x00030007,
2136        Xmm8 = 0x00030008,
2137        Xmm9 = 0x00030009,
2138        Xmm10 = 0x0003000A,
2139        Xmm11 = 0x0003000B,
2140        Xmm12 = 0x0003000C,
2141        Xmm13 = 0x0003000D,
2142        Xmm14 = 0x0003000E,
2143        Xmm15 = 0x0003000F,
2144        FpMmx0 = 0x00030010,
2145        FpMmx1 = 0x00030011,
2146        FpMmx2 = 0x00030012,
2147        FpMmx3 = 0x00030013,
2148        FpMmx4 = 0x00030014,
2149        FpMmx5 = 0x00030015,
2150        FpMmx6 = 0x00030016,
2151        FpMmx7 = 0x00030017,
2152        FpControlStatus = 0x00030018,
2153        XmmControlStatus = 0x00030019,
2154
2155        // X64 Control Registers
2156        Cr0 = 0x00040000,
2157        Cr2 = 0x00040001,
2158        Cr3 = 0x00040002,
2159        Cr4 = 0x00040003,
2160        Cr8 = 0x00040004,
2161        Xfem = 0x00040005,
2162        // X64 Intermediate Control Registers
2163        IntermediateCr0 = 0x00041000,
2164        IntermediateCr3 = 0x00041002,
2165        IntermediateCr4 = 0x00041003,
2166        IntermediateCr8 = 0x00041004,
2167        // X64 Debug Registers
2168        Dr0 = 0x00050000,
2169        Dr1 = 0x00050001,
2170        Dr2 = 0x00050002,
2171        Dr3 = 0x00050003,
2172        Dr6 = 0x00050004,
2173        Dr7 = 0x00050005,
2174        // X64 Segment Registers
2175        Es = 0x00060000,
2176        Cs = 0x00060001,
2177        Ss = 0x00060002,
2178        Ds = 0x00060003,
2179        Fs = 0x00060004,
2180        Gs = 0x00060005,
2181        Ldtr = 0x00060006,
2182        Tr = 0x00060007,
2183        // X64 Table Registers
2184        Idtr = 0x00070000,
2185        Gdtr = 0x00070001,
2186        // X64 Virtualized MSRs
2187        Tsc = 0x00080000,
2188        Efer = 0x00080001,
2189        KernelGsBase = 0x00080002,
2190        ApicBase = 0x00080003,
2191        Pat = 0x00080004,
2192        SysenterCs = 0x00080005,
2193        SysenterEip = 0x00080006,
2194        SysenterEsp = 0x00080007,
2195        Star = 0x00080008,
2196        Lstar = 0x00080009,
2197        Cstar = 0x0008000a,
2198        Sfmask = 0x0008000b,
2199        InitialApicId = 0x0008000c,
2200        // X64 Cache control MSRs
2201        MsrMtrrCap = 0x0008000d,
2202        MsrMtrrDefType = 0x0008000e,
2203        MsrMtrrPhysBase0 = 0x00080010,
2204        MsrMtrrPhysBase1 = 0x00080011,
2205        MsrMtrrPhysBase2 = 0x00080012,
2206        MsrMtrrPhysBase3 = 0x00080013,
2207        MsrMtrrPhysBase4 = 0x00080014,
2208        MsrMtrrPhysBase5 = 0x00080015,
2209        MsrMtrrPhysBase6 = 0x00080016,
2210        MsrMtrrPhysBase7 = 0x00080017,
2211        MsrMtrrPhysBase8 = 0x00080018,
2212        MsrMtrrPhysBase9 = 0x00080019,
2213        MsrMtrrPhysBaseA = 0x0008001a,
2214        MsrMtrrPhysBaseB = 0x0008001b,
2215        MsrMtrrPhysBaseC = 0x0008001c,
2216        MsrMtrrPhysBaseD = 0x0008001d,
2217        MsrMtrrPhysBaseE = 0x0008001e,
2218        MsrMtrrPhysBaseF = 0x0008001f,
2219        MsrMtrrPhysMask0 = 0x00080040,
2220        MsrMtrrPhysMask1 = 0x00080041,
2221        MsrMtrrPhysMask2 = 0x00080042,
2222        MsrMtrrPhysMask3 = 0x00080043,
2223        MsrMtrrPhysMask4 = 0x00080044,
2224        MsrMtrrPhysMask5 = 0x00080045,
2225        MsrMtrrPhysMask6 = 0x00080046,
2226        MsrMtrrPhysMask7 = 0x00080047,
2227        MsrMtrrPhysMask8 = 0x00080048,
2228        MsrMtrrPhysMask9 = 0x00080049,
2229        MsrMtrrPhysMaskA = 0x0008004a,
2230        MsrMtrrPhysMaskB = 0x0008004b,
2231        MsrMtrrPhysMaskC = 0x0008004c,
2232        MsrMtrrPhysMaskD = 0x0008004d,
2233        MsrMtrrPhysMaskE = 0x0008004e,
2234        MsrMtrrPhysMaskF = 0x0008004f,
2235        MsrMtrrFix64k00000 = 0x00080070,
2236        MsrMtrrFix16k80000 = 0x00080071,
2237        MsrMtrrFix16kA0000 = 0x00080072,
2238        MsrMtrrFix4kC0000 = 0x00080073,
2239        MsrMtrrFix4kC8000 = 0x00080074,
2240        MsrMtrrFix4kD0000 = 0x00080075,
2241        MsrMtrrFix4kD8000 = 0x00080076,
2242        MsrMtrrFix4kE0000 = 0x00080077,
2243        MsrMtrrFix4kE8000 = 0x00080078,
2244        MsrMtrrFix4kF0000 = 0x00080079,
2245        MsrMtrrFix4kF8000 = 0x0008007a,
2246
2247        TscAux = 0x0008007B,
2248        Bndcfgs = 0x0008007C,
2249        DebugCtl = 0x0008007D,
2250        MCount = 0x0008007E,
2251        ACount = 0x0008007F,
2252
2253        SgxLaunchControl0 = 0x00080080,
2254        SgxLaunchControl1 = 0x00080081,
2255        SgxLaunchControl2 = 0x00080082,
2256        SgxLaunchControl3 = 0x00080083,
2257        SpecCtrl = 0x00080084,
2258        PredCmd = 0x00080085,
2259        VirtSpecCtrl = 0x00080086,
2260        TscVirtualOffset = 0x00080087,
2261        TsxCtrl = 0x00080088,
2262        MsrMcUpdatePatchLevel = 0x00080089,
2263        Available1 = 0x0008008A,
2264        Xss = 0x0008008B,
2265        UCet = 0x0008008C,
2266        SCet = 0x0008008D,
2267        Ssp = 0x0008008E,
2268        Pl0Ssp = 0x0008008F,
2269        Pl1Ssp = 0x00080090,
2270        Pl2Ssp = 0x00080091,
2271        Pl3Ssp = 0x00080092,
2272        InterruptSspTableAddr = 0x00080093,
2273        TscVirtualMultiplier = 0x00080094,
2274        TscDeadline = 0x00080095,
2275        TscAdjust = 0x00080096,
2276        Pasid = 0x00080097,
2277        UmwaitControl = 0x00080098,
2278        Xfd = 0x00080099,
2279        XfdErr = 0x0008009A,
2280
2281        Hypercall = 0x00090001,
2282        RegisterPage = 0x0009001C,
2283
2284        // Partition Timer Assist Registers
2285        EmulatedTimerPeriod = 0x00090030,
2286        EmulatedTimerControl = 0x00090031,
2287        PmTimerAssist = 0x00090032,
2288
2289        // AMD SEV configuration MSRs
2290        SevControl = 0x00090040,
2291
2292        CrInterceptControl = 0x000E0000,
2293        CrInterceptCr0Mask = 0x000E0001,
2294        CrInterceptCr4Mask = 0x000E0002,
2295        CrInterceptIa32MiscEnableMask = 0x000E0003,
2296    }
2297}
2298
2299registers! {
2300    HvArm64RegisterName {
2301        HypervisorVersion = 0x00000100,
2302        PrivilegesAndFeaturesInfo = 0x00000200,
2303        FeaturesInfo = 0x00000201,
2304        ImplementationLimitsInfo = 0x00000202,
2305        HardwareFeaturesInfo = 0x00000203,
2306        CpuManagementFeaturesInfo = 0x00000204,
2307        PasidFeaturesInfo = 0x00000205,
2308        SkipLevelFeaturesInfo = 0x00000206,
2309        NestedVirtFeaturesInfo = 0x00000207,
2310        IptFeaturesInfo = 0x00000208,
2311        IsolationConfiguration = 0x00000209,
2312
2313        X0 = 0x00020000,
2314        X1 = 0x00020001,
2315        X2 = 0x00020002,
2316        X3 = 0x00020003,
2317        X4 = 0x00020004,
2318        X5 = 0x00020005,
2319        X6 = 0x00020006,
2320        X7 = 0x00020007,
2321        X8 = 0x00020008,
2322        X9 = 0x00020009,
2323        X10 = 0x0002000A,
2324        X11 = 0x0002000B,
2325        X12 = 0x0002000C,
2326        X13 = 0x0002000D,
2327        X14 = 0x0002000E,
2328        X15 = 0x0002000F,
2329        X16 = 0x00020010,
2330        X17 = 0x00020011,
2331        X18 = 0x00020012,
2332        X19 = 0x00020013,
2333        X20 = 0x00020014,
2334        X21 = 0x00020015,
2335        X22 = 0x00020016,
2336        X23 = 0x00020017,
2337        X24 = 0x00020018,
2338        X25 = 0x00020019,
2339        X26 = 0x0002001A,
2340        X27 = 0x0002001B,
2341        X28 = 0x0002001C,
2342        XFp = 0x0002001D,
2343        XLr = 0x0002001E,
2344        XSp = 0x0002001F, // alias for either El0/x depending on Cpsr.SPSel
2345        XSpEl0 = 0x00020020,
2346        XSpElx = 0x00020021,
2347        XPc = 0x00020022,
2348        Cpsr = 0x00020023,
2349        SpsrEl2 = 0x00021002,
2350
2351        SctlrEl1 = 0x00040002,
2352        Ttbr0El1 = 0x00040005,
2353        Ttbr1El1 = 0x00040006,
2354        TcrEl1 = 0x00040007,
2355        EsrEl1 = 0x00040008,
2356        FarEl1 = 0x00040009,
2357        MairEl1 = 0x0004000b,
2358        VbarEl1 = 0x0004000c,
2359        ElrEl1 = 0x00040015,
2360    }
2361}
2362
2363#[repr(C)]
2364#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2365pub struct HvRegisterValue(pub AlignedU128);
2366
2367impl HvRegisterValue {
2368    pub fn as_u128(&self) -> u128 {
2369        self.0.into()
2370    }
2371
2372    pub fn as_u64(&self) -> u64 {
2373        self.as_u128() as u64
2374    }
2375
2376    pub fn as_u32(&self) -> u32 {
2377        self.as_u128() as u32
2378    }
2379
2380    pub fn as_u16(&self) -> u16 {
2381        self.as_u128() as u16
2382    }
2383
2384    pub fn as_u8(&self) -> u8 {
2385        self.as_u128() as u8
2386    }
2387
2388    pub fn as_table(&self) -> HvX64TableRegister {
2389        HvX64TableRegister::read_from_prefix(self.as_bytes())
2390            .unwrap()
2391            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2392    }
2393
2394    pub fn as_segment(&self) -> HvX64SegmentRegister {
2395        HvX64SegmentRegister::read_from_prefix(self.as_bytes())
2396            .unwrap()
2397            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2398    }
2399}
2400
2401impl From<u8> for HvRegisterValue {
2402    fn from(val: u8) -> Self {
2403        (val as u128).into()
2404    }
2405}
2406
2407impl From<u16> for HvRegisterValue {
2408    fn from(val: u16) -> Self {
2409        (val as u128).into()
2410    }
2411}
2412
2413impl From<u32> for HvRegisterValue {
2414    fn from(val: u32) -> Self {
2415        (val as u128).into()
2416    }
2417}
2418
2419impl From<u64> for HvRegisterValue {
2420    fn from(val: u64) -> Self {
2421        (val as u128).into()
2422    }
2423}
2424
2425impl From<u128> for HvRegisterValue {
2426    fn from(val: u128) -> Self {
2427        Self(val.into())
2428    }
2429}
2430
2431#[repr(C)]
2432#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2433pub struct HvX64TableRegister {
2434    pub pad: [u16; 3],
2435    pub limit: u16,
2436    pub base: u64,
2437}
2438
2439impl From<HvX64TableRegister> for HvRegisterValue {
2440    fn from(val: HvX64TableRegister) -> Self {
2441        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2442    }
2443}
2444
2445impl From<HvRegisterValue> for HvX64TableRegister {
2446    fn from(val: HvRegisterValue) -> Self {
2447        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2448    }
2449}
2450
2451#[repr(C)]
2452#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2453pub struct HvX64SegmentRegister {
2454    pub base: u64,
2455    pub limit: u32,
2456    pub selector: u16,
2457    pub attributes: u16,
2458}
2459
2460impl From<HvX64SegmentRegister> for HvRegisterValue {
2461    fn from(val: HvX64SegmentRegister) -> Self {
2462        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2463    }
2464}
2465
2466impl From<HvRegisterValue> for HvX64SegmentRegister {
2467    fn from(val: HvRegisterValue) -> Self {
2468        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2469    }
2470}
2471
2472#[bitfield(u64)]
2473#[derive(IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
2474pub struct HvDeliverabilityNotificationsRegister {
2475    /// x86_64 only.
2476    pub nmi_notification: bool,
2477    /// x86_64 only.
2478    pub interrupt_notification: bool,
2479    /// x86_64 only.
2480    #[bits(4)]
2481    /// Only used on x86_64.
2482    pub interrupt_priority: u8,
2483    #[bits(42)]
2484    pub reserved: u64,
2485    pub sints: u16,
2486}
2487
2488open_enum! {
2489    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2490    pub enum HvVtlEntryReason: u32 {
2491        /// This reason is reserved and is not used.
2492        RESERVED = 0,
2493
2494        /// Indicates entry due to a VTL call from a lower VTL.
2495        VTL_CALL = 1,
2496
2497        /// Indicates entry due to an interrupt targeted to the VTL.
2498        INTERRUPT = 2,
2499
2500        // Indicates an entry due to an intercept delivered via the intercept page.
2501        INTERCEPT = 3,
2502    }
2503}
2504
2505#[repr(C)]
2506#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2507pub struct HvVpVtlControl {
2508    //
2509    // The hypervisor updates the entry reason with an indication as to why the
2510    // VTL was entered on the virtual processor.
2511    //
2512    pub entry_reason: HvVtlEntryReason,
2513
2514    /// This flag determines whether the VINA interrupt line is asserted.
2515    pub vina_status: u8,
2516    pub reserved_z0: u8,
2517    pub reserved_z1: u16,
2518
2519    /// A guest updates the VtlReturn* fields to provide the register values to
2520    /// restore on VTL return.  The specific register values that are restored
2521    /// will vary based on whether the VTL is 32-bit or 64-bit: rax and rcx or
2522    /// eax, ecx, and edx.
2523    pub registers: [u64; 2],
2524}
2525
2526#[bitfield(u64)]
2527#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2528pub struct HvRegisterVsmVina {
2529    pub vector: u8,
2530    pub enabled: bool,
2531    pub auto_reset: bool,
2532    pub auto_eoi: bool,
2533    #[bits(53)]
2534    pub reserved: u64,
2535}
2536
2537#[repr(C)]
2538#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2539pub struct HvVpAssistPage {
2540    /// APIC assist for optimized EOI processing.
2541    pub apic_assist: u32,
2542    pub reserved_z0: u32,
2543
2544    /// VP-VTL control information
2545    pub vtl_control: HvVpVtlControl,
2546
2547    pub nested_enlightenments_control: u64,
2548    pub enlighten_vm_entry: u8,
2549    pub reserved_z1: [u8; 7],
2550    pub current_nested_vmcs: u64,
2551    pub synthetic_time_unhalted_timer_expired: u8,
2552    pub reserved_z2: [u8; 7],
2553    pub virtualization_fault_information: [u8; 40],
2554    pub reserved_z3: u64,
2555    pub intercept_message: HvMessage,
2556    pub vtl_return_actions: [u8; 256],
2557}
2558
2559#[repr(C)]
2560#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2561pub struct HvVpAssistPageActionSignalEvent {
2562    pub action_type: u64,
2563    pub target_vp: u32,
2564    pub target_vtl: u8,
2565    pub target_sint: u8,
2566    pub flag_number: u16,
2567}
2568
2569open_enum! {
2570    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2571    pub enum HvInterceptAccessType: u8 {
2572        READ = 0,
2573        WRITE = 1,
2574        EXECUTE = 2,
2575    }
2576}
2577
2578#[bitfield(u16)]
2579#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2580pub struct HvX64VpExecutionState {
2581    #[bits(2)]
2582    pub cpl: u8,
2583    pub cr0_pe: bool,
2584    pub cr0_am: bool,
2585    pub efer_lma: bool,
2586    pub debug_active: bool,
2587    pub interruption_pending: bool,
2588    #[bits(4)]
2589    pub vtl: u8,
2590    pub enclave_mode: bool,
2591    pub interrupt_shadow: bool,
2592    pub virtualization_fault_active: bool,
2593    #[bits(2)]
2594    pub reserved: u8,
2595}
2596
2597#[bitfield(u16)]
2598#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2599pub struct HvArm64VpExecutionState {
2600    #[bits(2)]
2601    pub cpl: u8,
2602    pub debug_active: bool,
2603    pub interruption_pending: bool,
2604    #[bits(4)]
2605    pub vtl: u8,
2606    pub virtualization_fault_active: bool,
2607    #[bits(7)]
2608    pub reserved: u8,
2609}
2610
2611#[repr(C)]
2612#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2613pub struct HvX64InterceptMessageHeader {
2614    pub vp_index: u32,
2615    pub instruction_length_and_cr8: u8,
2616    pub intercept_access_type: HvInterceptAccessType,
2617    pub execution_state: HvX64VpExecutionState,
2618    pub cs_segment: HvX64SegmentRegister,
2619    pub rip: u64,
2620    pub rflags: u64,
2621}
2622
2623impl MessagePayload for HvX64InterceptMessageHeader {}
2624
2625impl HvX64InterceptMessageHeader {
2626    pub fn instruction_len(&self) -> u8 {
2627        self.instruction_length_and_cr8 & 0xf
2628    }
2629
2630    pub fn cr8(&self) -> u8 {
2631        self.instruction_length_and_cr8 >> 4
2632    }
2633}
2634
2635#[repr(C)]
2636#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2637pub struct HvArm64InterceptMessageHeader {
2638    pub vp_index: u32,
2639    pub instruction_length: u8,
2640    pub intercept_access_type: HvInterceptAccessType,
2641    pub execution_state: HvArm64VpExecutionState,
2642    pub pc: u64,
2643    pub cspr: u64,
2644}
2645const_assert!(size_of::<HvArm64InterceptMessageHeader>() == 0x18);
2646
2647impl MessagePayload for HvArm64InterceptMessageHeader {}
2648
2649#[repr(transparent)]
2650#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2651pub struct HvX64IoPortAccessInfo(pub u8);
2652
2653impl HvX64IoPortAccessInfo {
2654    pub fn new(access_size: u8, string_op: bool, rep_prefix: bool) -> Self {
2655        let mut info = access_size & 0x7;
2656
2657        if string_op {
2658            info |= 0x8;
2659        }
2660
2661        if rep_prefix {
2662            info |= 0x10;
2663        }
2664
2665        Self(info)
2666    }
2667
2668    pub fn access_size(&self) -> u8 {
2669        self.0 & 0x7
2670    }
2671
2672    pub fn string_op(&self) -> bool {
2673        self.0 & 0x8 != 0
2674    }
2675
2676    pub fn rep_prefix(&self) -> bool {
2677        self.0 & 0x10 != 0
2678    }
2679}
2680
2681#[repr(C)]
2682#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2683pub struct HvX64IoPortInterceptMessage {
2684    pub header: HvX64InterceptMessageHeader,
2685    pub port_number: u16,
2686    pub access_info: HvX64IoPortAccessInfo,
2687    pub instruction_byte_count: u8,
2688    pub reserved: u32,
2689    pub rax: u64,
2690    pub instruction_bytes: [u8; 16],
2691    pub ds_segment: HvX64SegmentRegister,
2692    pub es_segment: HvX64SegmentRegister,
2693    pub rcx: u64,
2694    pub rsi: u64,
2695    pub rdi: u64,
2696}
2697
2698impl MessagePayload for HvX64IoPortInterceptMessage {}
2699
2700#[bitfield(u8)]
2701#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2702pub struct HvX64MemoryAccessInfo {
2703    pub gva_valid: bool,
2704    pub gva_gpa_valid: bool,
2705    pub hypercall_output_pending: bool,
2706    pub tlb_locked: bool,
2707    pub supervisor_shadow_stack: bool,
2708    #[bits(3)]
2709    pub reserved1: u8,
2710}
2711
2712#[bitfield(u8)]
2713#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2714pub struct HvArm64MemoryAccessInfo {
2715    pub gva_valid: bool,
2716    pub gva_gpa_valid: bool,
2717    pub hypercall_output_pending: bool,
2718    #[bits(5)]
2719    pub reserved1: u8,
2720}
2721
2722open_enum! {
2723    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2724    pub enum HvCacheType: u32 {
2725        #![allow(non_upper_case_globals)]
2726        HvCacheTypeUncached = 0,
2727        HvCacheTypeWriteCombining = 1,
2728        HvCacheTypeWriteThrough = 4,
2729        HvCacheTypeWriteProtected = 5,
2730        HvCacheTypeWriteBack = 6,
2731    }
2732}
2733
2734#[repr(C)]
2735#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2736pub struct HvX64MemoryInterceptMessage {
2737    pub header: HvX64InterceptMessageHeader,
2738    pub cache_type: HvCacheType,
2739    pub instruction_byte_count: u8,
2740    pub memory_access_info: HvX64MemoryAccessInfo,
2741    pub tpr_priority: u8,
2742    pub reserved: u8,
2743    pub guest_virtual_address: u64,
2744    pub guest_physical_address: u64,
2745    pub instruction_bytes: [u8; 16],
2746}
2747
2748impl MessagePayload for HvX64MemoryInterceptMessage {}
2749const_assert!(size_of::<HvX64MemoryInterceptMessage>() == 0x50);
2750
2751#[repr(C)]
2752#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2753pub struct HvArm64MemoryInterceptMessage {
2754    pub header: HvArm64InterceptMessageHeader,
2755    pub cache_type: HvCacheType,
2756    pub instruction_byte_count: u8,
2757    pub memory_access_info: HvArm64MemoryAccessInfo,
2758    pub reserved1: u16,
2759    pub instruction_bytes: [u8; 4],
2760    pub reserved2: u32,
2761    pub guest_virtual_address: u64,
2762    pub guest_physical_address: u64,
2763    pub syndrome: u64,
2764}
2765
2766impl MessagePayload for HvArm64MemoryInterceptMessage {}
2767const_assert!(size_of::<HvArm64MemoryInterceptMessage>() == 0x40);
2768
2769#[repr(C)]
2770#[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)]
2771pub struct HvArm64MmioInterceptMessage {
2772    pub header: HvArm64InterceptMessageHeader,
2773    pub guest_physical_address: u64,
2774    pub access_size: u32,
2775    pub data: [u8; 32],
2776    pub padding: u32,
2777}
2778
2779impl MessagePayload for HvArm64MmioInterceptMessage {}
2780const_assert!(size_of::<HvArm64MmioInterceptMessage>() == 0x48);
2781
2782#[repr(C)]
2783#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2784pub struct HvX64MsrInterceptMessage {
2785    pub header: HvX64InterceptMessageHeader,
2786    pub msr_number: u32,
2787    pub reserved: u32,
2788    pub rdx: u64,
2789    pub rax: u64,
2790}
2791
2792impl MessagePayload for HvX64MsrInterceptMessage {}
2793
2794#[repr(C)]
2795#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2796pub struct HvX64SipiInterceptMessage {
2797    pub header: HvX64InterceptMessageHeader,
2798    pub target_vp_index: u32,
2799    pub vector: u32,
2800}
2801
2802impl MessagePayload for HvX64SipiInterceptMessage {}
2803
2804#[repr(C)]
2805#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2806pub struct HvX64SynicSintDeliverableMessage {
2807    pub header: HvX64InterceptMessageHeader,
2808    pub deliverable_sints: u16,
2809    pub rsvd1: u16,
2810    pub rsvd2: u32,
2811}
2812
2813impl MessagePayload for HvX64SynicSintDeliverableMessage {}
2814
2815#[repr(C)]
2816#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2817pub struct HvArm64SynicSintDeliverableMessage {
2818    pub header: HvArm64InterceptMessageHeader,
2819    pub deliverable_sints: u16,
2820    pub rsvd1: u16,
2821    pub rsvd2: u32,
2822}
2823
2824impl MessagePayload for HvArm64SynicSintDeliverableMessage {}
2825
2826#[repr(C)]
2827#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2828pub struct HvX64InterruptionDeliverableMessage {
2829    pub header: HvX64InterceptMessageHeader,
2830    pub deliverable_type: HvX64PendingInterruptionType,
2831    pub rsvd: [u8; 3],
2832    pub rsvd2: u32,
2833}
2834
2835impl MessagePayload for HvX64InterruptionDeliverableMessage {}
2836
2837open_enum! {
2838    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2839    pub enum HvX64PendingInterruptionType: u8 {
2840        HV_X64_PENDING_INTERRUPT = 0,
2841        HV_X64_PENDING_NMI = 2,
2842        HV_X64_PENDING_EXCEPTION = 3,
2843        HV_X64_PENDING_SOFTWARE_INTERRUPT = 4,
2844        HV_X64_PENDING_PRIVILEGED_SOFTWARE_EXCEPTION = 5,
2845        HV_X64_PENDING_SOFTWARE_EXCEPTION = 6,
2846    }
2847}
2848
2849#[repr(C)]
2850#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2851pub struct HvX64HypercallInterceptMessage {
2852    pub header: HvX64InterceptMessageHeader,
2853    pub rax: u64,
2854    pub rbx: u64,
2855    pub rcx: u64,
2856    pub rdx: u64,
2857    pub r8: u64,
2858    pub rsi: u64,
2859    pub rdi: u64,
2860    pub xmm_registers: [AlignedU128; 6],
2861    pub flags: HvHypercallInterceptMessageFlags,
2862    pub rsvd2: [u32; 3],
2863}
2864
2865impl MessagePayload for HvX64HypercallInterceptMessage {}
2866
2867#[repr(C)]
2868#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2869pub struct HvArm64HypercallInterceptMessage {
2870    pub header: HvArm64InterceptMessageHeader,
2871    pub immediate: u16,
2872    pub reserved: u16,
2873    pub flags: HvHypercallInterceptMessageFlags,
2874    pub x: [u64; 18],
2875}
2876
2877impl MessagePayload for HvArm64HypercallInterceptMessage {}
2878
2879#[bitfield(u32)]
2880#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2881pub struct HvHypercallInterceptMessageFlags {
2882    pub is_isolated: bool,
2883    #[bits(31)]
2884    _reserved: u32,
2885}
2886
2887#[repr(C)]
2888#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2889pub struct HvX64CpuidInterceptMessage {
2890    pub header: HvX64InterceptMessageHeader,
2891    pub rax: u64,
2892    pub rcx: u64,
2893    pub rdx: u64,
2894    pub rbx: u64,
2895    pub default_result_rax: u64,
2896    pub default_result_rcx: u64,
2897    pub default_result_rdx: u64,
2898    pub default_result_rbx: u64,
2899}
2900
2901impl MessagePayload for HvX64CpuidInterceptMessage {}
2902
2903#[bitfield(u8)]
2904#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2905pub struct HvX64ExceptionInfo {
2906    pub error_code_valid: bool,
2907    pub software_exception: bool,
2908    #[bits(6)]
2909    reserved: u8,
2910}
2911
2912#[repr(C)]
2913#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2914pub struct HvX64ExceptionInterceptMessage {
2915    pub header: HvX64InterceptMessageHeader,
2916    pub vector: u16,
2917    pub exception_info: HvX64ExceptionInfo,
2918    pub instruction_byte_count: u8,
2919    pub error_code: u32,
2920    pub exception_parameter: u64,
2921    pub reserved: u64,
2922    pub instruction_bytes: [u8; 16],
2923    pub ds_segment: HvX64SegmentRegister,
2924    pub ss_segment: HvX64SegmentRegister,
2925    pub rax: u64,
2926    pub rcx: u64,
2927    pub rdx: u64,
2928    pub rbx: u64,
2929    pub rsp: u64,
2930    pub rbp: u64,
2931    pub rsi: u64,
2932    pub rdi: u64,
2933    pub r8: u64,
2934    pub r9: u64,
2935    pub r10: u64,
2936    pub r11: u64,
2937    pub r12: u64,
2938    pub r13: u64,
2939    pub r14: u64,
2940    pub r15: u64,
2941}
2942
2943impl MessagePayload for HvX64ExceptionInterceptMessage {}
2944
2945#[repr(C)]
2946#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2947pub struct HvInvalidVpRegisterMessage {
2948    pub vp_index: u32,
2949    pub reserved: u32,
2950}
2951
2952impl MessagePayload for HvInvalidVpRegisterMessage {}
2953
2954#[repr(C)]
2955#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2956pub struct HvX64ApicEoiMessage {
2957    pub vp_index: u32,
2958    pub interrupt_vector: u32,
2959}
2960
2961impl MessagePayload for HvX64ApicEoiMessage {}
2962
2963#[repr(C)]
2964#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2965pub struct HvX64UnrecoverableExceptionMessage {
2966    pub header: HvX64InterceptMessageHeader,
2967}
2968
2969impl MessagePayload for HvX64UnrecoverableExceptionMessage {}
2970
2971#[repr(C)]
2972#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2973pub struct HvX64HaltMessage {
2974    pub header: HvX64InterceptMessageHeader,
2975}
2976
2977impl MessagePayload for HvX64HaltMessage {}
2978
2979#[repr(C)]
2980#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2981pub struct HvArm64ResetInterceptMessage {
2982    pub header: HvArm64InterceptMessageHeader,
2983    pub reset_type: HvArm64ResetType,
2984    pub padding: u32,
2985}
2986
2987impl MessagePayload for HvArm64ResetInterceptMessage {}
2988
2989open_enum! {
2990    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2991    pub enum HvArm64ResetType: u32 {
2992        POWER_OFF = 0,
2993        REBOOT = 1,
2994    }
2995}
2996
2997#[bitfield(u8)]
2998#[derive(IntoBytes, Immutable, FromBytes)]
2999pub struct HvX64RegisterInterceptMessageFlags {
3000    pub is_memory_op: bool,
3001    #[bits(7)]
3002    _rsvd: u8,
3003}
3004
3005#[repr(C)]
3006#[derive(IntoBytes, Immutable, FromBytes)]
3007pub struct HvX64RegisterInterceptMessage {
3008    pub header: HvX64InterceptMessageHeader,
3009    pub flags: HvX64RegisterInterceptMessageFlags,
3010    pub rsvd: u8,
3011    pub rsvd2: u16,
3012    pub register_name: HvX64RegisterName,
3013    pub access_info: HvX64RegisterAccessInfo,
3014}
3015
3016#[repr(transparent)]
3017#[derive(IntoBytes, Immutable, FromBytes)]
3018pub struct HvX64RegisterAccessInfo(u128);
3019
3020impl HvX64RegisterAccessInfo {
3021    pub fn new_source_value(source_value: HvRegisterValue) -> Self {
3022        Self(source_value.as_u128())
3023    }
3024}
3025
3026open_enum! {
3027    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3028    pub enum HvInterruptType : u32  {
3029        #![allow(non_upper_case_globals)]
3030        HvArm64InterruptTypeFixed = 0x0000,
3031        HvX64InterruptTypeFixed = 0x0000,
3032        HvX64InterruptTypeLowestPriority = 0x0001,
3033        HvX64InterruptTypeSmi = 0x0002,
3034        HvX64InterruptTypeRemoteRead = 0x0003,
3035        HvX64InterruptTypeNmi = 0x0004,
3036        HvX64InterruptTypeInit = 0x0005,
3037        HvX64InterruptTypeSipi = 0x0006,
3038        HvX64InterruptTypeExtInt = 0x0007,
3039        HvX64InterruptTypeLocalInt0 = 0x0008,
3040        HvX64InterruptTypeLocalInt1 = 0x0009,
3041    }
3042}
3043
3044/// The declaration uses the fact the bits for the different
3045/// architectures don't intersect. When (if ever) they do,
3046/// will need to come up with a more elaborate abstraction.
3047/// The other possible downside is the lack of the compile-time
3048/// checks as adding that will require `guest_arch` support and
3049/// a large refactoring. To sum up, choosing expediency.
3050#[bitfield(u64)]
3051#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3052pub struct HvInterruptControl {
3053    interrupt_type_value: u32,
3054    pub x86_level_triggered: bool,
3055    pub x86_logical_destination_mode: bool,
3056    pub arm64_asserted: bool,
3057    #[bits(29)]
3058    pub unused: u32,
3059}
3060
3061impl HvInterruptControl {
3062    pub fn interrupt_type(&self) -> HvInterruptType {
3063        HvInterruptType(self.interrupt_type_value())
3064    }
3065
3066    pub fn set_interrupt_type(&mut self, ty: HvInterruptType) {
3067        self.set_interrupt_type_value(ty.0)
3068    }
3069
3070    pub fn with_interrupt_type(self, ty: HvInterruptType) -> Self {
3071        self.with_interrupt_type_value(ty.0)
3072    }
3073}
3074
3075#[bitfield(u64)]
3076pub struct HvRegisterVsmCapabilities {
3077    pub dr6_shared: bool,
3078    pub mbec_vtl_mask: u16,
3079    pub deny_lower_vtl_startup: bool,
3080    pub supervisor_shadow_stack: bool,
3081    pub hardware_hvpt_available: bool,
3082    pub software_hvpt_available: bool,
3083    #[bits(6)]
3084    pub hardware_hvpt_range_bits: u8,
3085    pub intercept_page_available: bool,
3086    pub return_action_available: bool,
3087    /// If the VTL0 view of memory is mapped to the high address space, which is
3088    /// the highest legal physical address bit.
3089    ///
3090    /// Only available in VTL2.
3091    pub vtl0_alias_map_available: bool,
3092    /// If the [`HvRegisterVsmPartitionConfig`] register has support for
3093    /// `intercept_not_present`.
3094    ///
3095    /// Only available in VTL2.
3096    pub intercept_not_present_available: bool,
3097    pub install_intercept_ex: bool,
3098    /// Only available in VTL2.
3099    pub intercept_system_reset_available: bool,
3100    #[bits(31)]
3101    pub reserved: u64,
3102}
3103
3104#[bitfield(u64)]
3105pub struct HvRegisterVsmPartitionConfig {
3106    pub enable_vtl_protection: bool,
3107    #[bits(4)]
3108    pub default_vtl_protection_mask: u8,
3109    pub zero_memory_on_reset: bool,
3110    pub deny_lower_vtl_startup: bool,
3111    pub intercept_acceptance: bool,
3112    pub intercept_enable_vtl_protection: bool,
3113    pub intercept_vp_startup: bool,
3114    pub intercept_cpuid_unimplemented: bool,
3115    pub intercept_unrecoverable_exception: bool,
3116    pub intercept_page: bool,
3117    pub intercept_restore_partition_time: bool,
3118    /// The hypervisor will send all unmapped GPA intercepts to VTL2 rather than
3119    /// the host.
3120    pub intercept_not_present: bool,
3121    pub intercept_system_reset: bool,
3122    #[bits(48)]
3123    pub reserved: u64,
3124}
3125
3126#[bitfield(u64)]
3127pub struct HvRegisterVsmPartitionStatus {
3128    #[bits(16)]
3129    pub enabled_vtl_set: u16,
3130    #[bits(4)]
3131    pub maximum_vtl: u8,
3132    #[bits(16)]
3133    pub mbec_enabled_vtl_set: u16,
3134    #[bits(4)]
3135    pub supervisor_shadow_stack_enabled_vtl_set: u8,
3136    #[bits(24)]
3137    pub reserved: u64,
3138}
3139
3140#[bitfield(u64)]
3141pub struct HvRegisterGuestVsmPartitionConfig {
3142    #[bits(4)]
3143    pub maximum_vtl: u8,
3144    #[bits(60)]
3145    pub reserved: u64,
3146}
3147
3148#[bitfield(u64)]
3149pub struct HvRegisterVsmVpStatus {
3150    #[bits(4)]
3151    pub active_vtl: u8,
3152    pub active_mbec_enabled: bool,
3153    #[bits(11)]
3154    pub reserved_mbz0: u16,
3155    #[bits(16)]
3156    pub enabled_vtl_set: u16,
3157    #[bits(32)]
3158    pub reserved_mbz1: u32,
3159}
3160
3161#[bitfield(u64)]
3162pub struct HvRegisterVsmCodePageOffsets {
3163    #[bits(12)]
3164    pub call_offset: u16,
3165    #[bits(12)]
3166    pub return_offset: u16,
3167    #[bits(40)]
3168    pub reserved: u64,
3169}
3170
3171#[repr(C)]
3172#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3173pub struct HvStimerState {
3174    pub undelivered_message_pending: u32,
3175    pub reserved: u32,
3176    pub config: u64,
3177    pub count: u64,
3178    pub adjustment: u64,
3179    pub undelivered_expiration_time: u64,
3180}
3181
3182#[repr(C)]
3183#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3184pub struct HvSyntheticTimersState {
3185    pub timers: [HvStimerState; 4],
3186    pub reserved: [u64; 5],
3187}
3188
3189#[bitfield(u64)]
3190pub struct HvInternalActivityRegister {
3191    pub startup_suspend: bool,
3192    pub halt_suspend: bool,
3193    pub idle_suspend: bool,
3194    #[bits(61)]
3195    pub reserved: u64,
3196}
3197
3198#[bitfield(u64)]
3199pub struct HvSynicSint {
3200    pub vector: u8,
3201    _reserved: u8,
3202    pub masked: bool,
3203    pub auto_eoi: bool,
3204    pub polling: bool,
3205    _reserved2: bool,
3206    pub proxy: bool,
3207    #[bits(43)]
3208    _reserved2: u64,
3209}
3210
3211#[bitfield(u64)]
3212pub struct HvSynicScontrol {
3213    pub enabled: bool,
3214    #[bits(63)]
3215    _reserved: u64,
3216}
3217
3218#[bitfield(u64)]
3219pub struct HvSynicSimpSiefp {
3220    pub enabled: bool,
3221    #[bits(11)]
3222    _reserved: u64,
3223    #[bits(52)]
3224    pub base_gpn: u64,
3225}
3226
3227#[bitfield(u64)]
3228pub struct HvSynicStimerConfig {
3229    pub enabled: bool,
3230    pub periodic: bool,
3231    pub lazy: bool,
3232    pub auto_enable: bool,
3233    // Note: On ARM64 the top 3 bits of apic_vector are reserved.
3234    pub apic_vector: u8,
3235    pub direct_mode: bool,
3236    #[bits(3)]
3237    pub _reserved1: u8,
3238    #[bits(4)]
3239    pub sint: u8,
3240    #[bits(44)]
3241    pub _reserved2: u64,
3242}
3243
3244pub const HV_X64_PENDING_EVENT_EXCEPTION: u8 = 0;
3245pub const HV_X64_PENDING_EVENT_MEMORY_INTERCEPT: u8 = 1;
3246pub const HV_X64_PENDING_EVENT_NESTED_MEMORY_INTERCEPT: u8 = 2;
3247pub const HV_X64_PENDING_EVENT_VIRTUALIZATION_FAULT: u8 = 3;
3248pub const HV_X64_PENDING_EVENT_HYPERCALL_OUTPUT: u8 = 4;
3249pub const HV_X64_PENDING_EVENT_EXT_INT: u8 = 5;
3250pub const HV_X64_PENDING_EVENT_SHADOW_IPT: u8 = 6;
3251
3252// Provides information about an exception.
3253#[bitfield(u128)]
3254pub struct HvX64PendingExceptionEvent {
3255    pub event_pending: bool,
3256    #[bits(3)]
3257    pub event_type: u8,
3258    #[bits(4)]
3259    pub reserved0: u8,
3260
3261    pub deliver_error_code: bool,
3262    #[bits(7)]
3263    pub reserved1: u8,
3264    pub vector: u16,
3265    pub error_code: u32,
3266    pub exception_parameter: u64,
3267}
3268
3269/// Provides information about a virtualization fault.
3270#[bitfield(u128)]
3271pub struct HvX64PendingVirtualizationFaultEvent {
3272    pub event_pending: bool,
3273    #[bits(3)]
3274    pub event_type: u8,
3275    #[bits(4)]
3276    pub reserved0: u8,
3277
3278    pub reserved1: u8,
3279    pub parameter0: u16,
3280    pub code: u32,
3281    pub parameter1: u64,
3282}
3283
3284/// Part of [`HvX64PendingEventMemoryIntercept`]
3285#[bitfield(u8)]
3286#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3287pub struct HvX64PendingEventMemoryInterceptPendingEventHeader {
3288    pub event_pending: bool,
3289    #[bits(3)]
3290    pub event_type: u8,
3291    #[bits(4)]
3292    _reserved0: u8,
3293}
3294
3295/// Part of [`HvX64PendingEventMemoryIntercept`]
3296#[bitfield(u8)]
3297#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3298pub struct HvX64PendingEventMemoryInterceptAccessFlags {
3299    /// Indicates if the guest linear address is valid.
3300    pub guest_linear_address_valid: bool,
3301    /// Indicates that the memory intercept was caused by an access to a guest physical address
3302    /// (instead of a page table as part of a page table walk).
3303    pub caused_by_gpa_access: bool,
3304    #[bits(6)]
3305    _reserved1: u8,
3306}
3307
3308/// Provides information about a memory intercept.
3309#[repr(C)]
3310#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3311pub struct HvX64PendingEventMemoryIntercept {
3312    pub event_header: HvX64PendingEventMemoryInterceptPendingEventHeader,
3313    /// VTL at which the memory intercept is targeted.
3314    /// Note: This field must be in Reg0.
3315    pub target_vtl: u8,
3316    /// Type of the memory access.
3317    pub access_type: HvInterceptAccessType,
3318    pub access_flags: HvX64PendingEventMemoryInterceptAccessFlags,
3319    pub _reserved2: u32,
3320    /// The guest linear address that caused the fault.
3321    pub guest_linear_address: u64,
3322    /// The guest physical address that caused the memory intercept.
3323    pub guest_physical_address: u64,
3324    pub _reserved3: u64,
3325}
3326const_assert!(size_of::<HvX64PendingEventMemoryIntercept>() == 0x20);
3327
3328//
3329// Provides information about pending hypercall output.
3330//
3331#[bitfield(u128)]
3332pub struct HvX64PendingHypercallOutputEvent {
3333    pub event_pending: bool,
3334    #[bits(3)]
3335    pub event_type: u8,
3336    #[bits(4)]
3337    pub reserved0: u8,
3338
3339    // Whether the hypercall has been retired.
3340    pub retired: bool,
3341
3342    #[bits(23)]
3343    pub reserved1: u32,
3344
3345    // Indicates the number of bytes to be written starting from OutputGpa.
3346    pub output_size: u32,
3347
3348    // Indicates the output GPA, which is not required to be page-aligned.
3349    pub output_gpa: u64,
3350}
3351
3352// Provides information about a directly asserted ExtInt.
3353#[bitfield(u128)]
3354pub struct HvX64PendingExtIntEvent {
3355    pub event_pending: bool,
3356    #[bits(3)]
3357    pub event_type: u8,
3358    #[bits(4)]
3359    pub reserved0: u8,
3360    pub vector: u8,
3361    #[bits(48)]
3362    pub reserved1: u64,
3363    pub reserved2: u64,
3364}
3365
3366// Provides information about pending IPT shadowing.
3367#[bitfield(u128)]
3368pub struct HvX64PendingShadowIptEvent {
3369    pub event_pending: bool,
3370    #[bits(4)]
3371    pub event_type: u8,
3372    #[bits(59)]
3373    pub reserved0: u64,
3374
3375    pub reserved1: u64,
3376}
3377
3378#[bitfield(u128)]
3379#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3380pub struct HvX64PendingEventReg0 {
3381    pub event_pending: bool,
3382    #[bits(3)]
3383    pub event_type: u8,
3384    #[bits(4)]
3385    pub reserved: u8,
3386    #[bits(120)]
3387    pub data: u128,
3388}
3389
3390#[repr(C)]
3391#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3392pub struct HvX64PendingEvent {
3393    pub reg_0: HvX64PendingEventReg0,
3394    pub reg_1: AlignedU128,
3395}
3396const_assert!(size_of::<HvX64PendingEvent>() == 0x20);
3397
3398impl From<HvX64PendingExceptionEvent> for HvX64PendingEvent {
3399    fn from(exception_event: HvX64PendingExceptionEvent) -> Self {
3400        HvX64PendingEvent {
3401            reg_0: HvX64PendingEventReg0::from(u128::from(exception_event)),
3402            reg_1: 0u128.into(),
3403        }
3404    }
3405}
3406
3407#[bitfield(u64)]
3408#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3409pub struct HvX64PendingInterruptionRegister {
3410    pub interruption_pending: bool,
3411    #[bits(3)]
3412    pub interruption_type: u8,
3413    pub deliver_error_code: bool,
3414    #[bits(4)]
3415    pub instruction_length: u8,
3416    pub nested_event: bool,
3417    #[bits(6)]
3418    pub reserved: u8,
3419    pub interruption_vector: u16,
3420    pub error_code: u32,
3421}
3422
3423#[bitfield(u64)]
3424#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3425pub struct HvX64InterruptStateRegister {
3426    pub interrupt_shadow: bool,
3427    pub nmi_masked: bool,
3428    #[bits(62)]
3429    pub reserved: u64,
3430}
3431
3432#[bitfield(u64)]
3433#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3434pub struct HvInstructionEmulatorHintsRegister {
3435    /// Indicates whether any secure VTL is enabled for the partition.
3436    pub partition_secure_vtl_enabled: bool,
3437    /// Indicates whether kernel or user execute control architecturally
3438    /// applies to execute accesses.
3439    pub mbec_user_execute_control: bool,
3440    #[bits(62)]
3441    pub _padding: u64,
3442}
3443
3444open_enum! {
3445    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3446    pub enum HvAarch64PendingEventType: u8 {
3447        EXCEPTION = 0,
3448        SYNTHETIC_EXCEPTION = 1,
3449        HYPERCALL_OUTPUT = 2,
3450    }
3451}
3452
3453// Support for bitfield structures.
3454impl HvAarch64PendingEventType {
3455    const fn from_bits(val: u8) -> Self {
3456        HvAarch64PendingEventType(val)
3457    }
3458
3459    const fn into_bits(self) -> u8 {
3460        self.0
3461    }
3462}
3463
3464#[bitfield[u8]]
3465#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3466pub struct HvAarch64PendingEventHeader {
3467    #[bits(1)]
3468    pub event_pending: bool,
3469    #[bits(3)]
3470    pub event_type: HvAarch64PendingEventType,
3471    #[bits(4)]
3472    pub reserved: u8,
3473}
3474
3475#[repr(C)]
3476#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3477pub struct HvAarch64PendingExceptionEvent {
3478    pub header: HvAarch64PendingEventHeader,
3479    pub _padding: [u8; 7],
3480    pub syndrome: u64,
3481    pub fault_address: u64,
3482}
3483
3484#[bitfield[u8]]
3485#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3486pub struct HvAarch64PendingHypercallOutputEventFlags {
3487    #[bits(1)]
3488    pub retired: u8,
3489    #[bits(7)]
3490    pub reserved: u8,
3491}
3492
3493#[repr(C)]
3494#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3495pub struct HvAarch64PendingHypercallOutputEvent {
3496    pub header: HvAarch64PendingEventHeader,
3497    pub flags: HvAarch64PendingHypercallOutputEventFlags,
3498    pub reserved: u16,
3499    pub output_size: u32,
3500    pub output_gpa: u64,
3501}
3502
3503#[repr(C)]
3504#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3505pub struct HvAarch64PendingEvent {
3506    pub header: HvAarch64PendingEventHeader,
3507    pub event_data: [u8; 15],
3508    pub _padding: [u64; 2],
3509}
3510
3511#[bitfield(u32)]
3512#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3513pub struct HvMapGpaFlags {
3514    pub readable: bool,
3515    pub writable: bool,
3516    pub kernel_executable: bool,
3517    pub user_executable: bool,
3518    pub supervisor_shadow_stack: bool,
3519    pub paging_writability: bool,
3520    pub verify_paging_writability: bool,
3521    #[bits(8)]
3522    _padding0: u32,
3523    pub adjustable: bool,
3524    #[bits(16)]
3525    _padding1: u32,
3526}
3527
3528/// [`HvMapGpaFlags`] with no permissions set
3529pub const HV_MAP_GPA_PERMISSIONS_NONE: HvMapGpaFlags = HvMapGpaFlags::new();
3530pub const HV_MAP_GPA_PERMISSIONS_ALL: HvMapGpaFlags = HvMapGpaFlags::new()
3531    .with_readable(true)
3532    .with_writable(true)
3533    .with_kernel_executable(true)
3534    .with_user_executable(true);
3535
3536#[repr(C)]
3537#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3538pub struct HvMonitorPage {
3539    pub trigger_state: HvMonitorTriggerState,
3540    pub reserved1: u32,
3541    pub trigger_group: [HvMonitorTriggerGroup; 4],
3542    pub reserved2: [u64; 3],
3543    pub next_check_time: [[u32; 32]; 4],
3544    pub latency: [[u16; 32]; 4],
3545    pub reserved3: [u64; 32],
3546    pub parameter: [[HvMonitorParameter; 32]; 4],
3547    pub reserved4: [u8; 1984],
3548}
3549
3550#[repr(C)]
3551#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3552pub struct HvMonitorPageSmall {
3553    pub trigger_state: HvMonitorTriggerState,
3554    pub reserved1: u32,
3555    pub trigger_group: [HvMonitorTriggerGroup; 4],
3556}
3557
3558#[repr(C)]
3559#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3560pub struct HvMonitorTriggerGroup {
3561    pub pending: u32,
3562    pub armed: u32,
3563}
3564
3565#[repr(C)]
3566#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3567pub struct HvMonitorParameter {
3568    pub connection_id: u32,
3569    pub flag_number: u16,
3570    pub reserved: u16,
3571}
3572
3573#[bitfield(u32)]
3574#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3575pub struct HvMonitorTriggerState {
3576    #[bits(4)]
3577    pub group_enable: u32,
3578    #[bits(28)]
3579    pub reserved: u32,
3580}
3581
3582#[bitfield(u64)]
3583#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3584pub struct HvPmTimerInfo {
3585    #[bits(16)]
3586    pub port: u16,
3587    #[bits(1)]
3588    pub width_24: bool,
3589    #[bits(1)]
3590    pub enabled: bool,
3591    #[bits(14)]
3592    pub reserved1: u32,
3593    #[bits(32)]
3594    pub reserved2: u32,
3595}
3596
3597#[bitfield(u64)]
3598#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3599pub struct HvX64RegisterSevControl {
3600    pub enable_encrypted_state: bool,
3601    #[bits(11)]
3602    _rsvd1: u64,
3603    #[bits(52)]
3604    pub vmsa_gpa_page_number: u64,
3605}
3606
3607#[bitfield(u64)]
3608#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3609pub struct HvRegisterReferenceTsc {
3610    pub enable: bool,
3611    #[bits(11)]
3612    pub reserved_p: u64,
3613    #[bits(52)]
3614    pub gpn: u64,
3615}
3616
3617#[repr(C)]
3618#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3619pub struct HvReferenceTscPage {
3620    pub tsc_sequence: u32,
3621    pub reserved1: u32,
3622    pub tsc_scale: u64,
3623    pub tsc_offset: i64,
3624    pub timeline_bias: u64,
3625    pub tsc_multiplier: u64,
3626    pub reserved2: [u64; 507],
3627}
3628
3629pub const HV_REFERENCE_TSC_SEQUENCE_INVALID: u32 = 0;
3630
3631#[bitfield(u64)]
3632#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3633pub struct HvX64VmgexitInterceptMessageFlags {
3634    pub ghcb_page_valid: bool,
3635    pub ghcb_request_error: bool,
3636    #[bits(62)]
3637    _reserved: u64,
3638}
3639
3640#[repr(C)]
3641#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3642pub struct HvX64VmgexitInterceptMessageGhcbPageStandard {
3643    pub ghcb_protocol_version: u16,
3644    _reserved: [u16; 3],
3645    pub sw_exit_code: u64,
3646    pub sw_exit_info1: u64,
3647    pub sw_exit_info2: u64,
3648    pub sw_scratch: u64,
3649}
3650
3651#[repr(C)]
3652#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3653pub struct HvX64VmgexitInterceptMessageGhcbPage {
3654    pub ghcb_usage: u32,
3655    _reserved: u32,
3656    pub standard: HvX64VmgexitInterceptMessageGhcbPageStandard,
3657}
3658
3659#[repr(C)]
3660#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3661pub struct HvX64VmgexitInterceptMessage {
3662    pub header: HvX64InterceptMessageHeader,
3663    pub ghcb_msr: u64,
3664    pub flags: HvX64VmgexitInterceptMessageFlags,
3665    pub ghcb_page: HvX64VmgexitInterceptMessageGhcbPage,
3666}
3667
3668impl MessagePayload for HvX64VmgexitInterceptMessage {}
3669
3670#[bitfield(u64)]
3671pub struct HvRegisterVpAssistPage {
3672    pub enabled: bool,
3673    #[bits(11)]
3674    _reserved: u64,
3675    #[bits(52)]
3676    pub gpa_page_number: u64,
3677}
3678
3679#[bitfield(u32)]
3680#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3681pub struct X64RegisterPageDirtyFlags {
3682    pub general_purpose: bool,
3683    pub instruction_pointer: bool,
3684    pub xmm: bool,
3685    pub segments: bool,
3686    pub flags: bool,
3687    #[bits(27)]
3688    reserved: u32,
3689}
3690
3691#[repr(C)]
3692#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3693pub struct HvX64RegisterPage {
3694    pub version: u16,
3695    pub is_valid: u8,
3696    pub vtl: u8,
3697    pub dirty: X64RegisterPageDirtyFlags,
3698    pub gp_registers: [u64; 16],
3699    pub rip: u64,
3700    pub rflags: u64,
3701    pub reserved: u64,
3702    pub xmm: [u128; 6],
3703    pub segment: [u128; 6],
3704    // Misc. control registers (cannot be set via this interface).
3705    pub cr0: u64,
3706    pub cr3: u64,
3707    pub cr4: u64,
3708    pub cr8: u64,
3709    pub efer: u64,
3710    pub dr7: u64,
3711    pub pending_interruption: HvX64PendingInterruptionRegister,
3712    pub interrupt_state: HvX64InterruptStateRegister,
3713    pub instruction_emulation_hints: HvInstructionEmulatorHintsRegister,
3714    pub reserved_end: [u8; 3672],
3715}
3716
3717const _: () = assert!(size_of::<HvX64RegisterPage>() == HV_PAGE_SIZE_USIZE);
3718
3719#[bitfield(u64)]
3720pub struct HvRegisterVsmWpWaitForTlbLock {
3721    pub wait: bool,
3722    #[bits(63)]
3723    _reserved: u64,
3724}
3725
3726#[bitfield(u64)]
3727pub struct HvRegisterVsmVpSecureVtlConfig {
3728    pub mbec_enabled: bool,
3729    pub tlb_locked: bool,
3730    pub supervisor_shadow_stack_enabled: bool,
3731    pub hardware_hvpt_enabled: bool,
3732    #[bits(60)]
3733    _reserved: u64,
3734}
3735
3736#[bitfield(u64)]
3737pub struct HvRegisterCrInterceptControl {
3738    pub cr0_write: bool,
3739    pub cr4_write: bool,
3740    pub xcr0_write: bool,
3741    pub ia32_misc_enable_read: bool,
3742    pub ia32_misc_enable_write: bool,
3743    pub msr_lstar_read: bool,
3744    pub msr_lstar_write: bool,
3745    pub msr_star_read: bool,
3746    pub msr_star_write: bool,
3747    pub msr_cstar_read: bool,
3748    pub msr_cstar_write: bool,
3749    pub apic_base_msr_read: bool,
3750    pub apic_base_msr_write: bool,
3751    pub msr_efer_read: bool,
3752    pub msr_efer_write: bool,
3753    pub gdtr_write: bool,
3754    pub idtr_write: bool,
3755    pub ldtr_write: bool,
3756    pub tr_write: bool,
3757    pub msr_sysenter_cs_write: bool,
3758    pub msr_sysenter_eip_write: bool,
3759    pub msr_sysenter_esp_write: bool,
3760    pub msr_sfmask_write: bool,
3761    pub msr_tsc_aux_write: bool,
3762    pub msr_sgx_launch_control_write: bool,
3763    pub msr_xss_write: bool,
3764    pub msr_scet_write: bool,
3765    pub msr_pls_ssp_write: bool,
3766    pub msr_interrupt_ssp_table_addr_write: bool,
3767    #[bits(35)]
3768    _rsvd_z: u64,
3769}