Skip to main content

hvdef/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Microsoft hypervisor definitions.
5
6#![expect(missing_docs)]
7#![forbid(unsafe_code)]
8#![no_std]
9
10pub mod vbs;
11
12use bitfield_struct::bitfield;
13use core::fmt::Debug;
14use core::mem::size_of;
15use open_enum::open_enum;
16use static_assertions::const_assert;
17use zerocopy::FromBytes;
18use zerocopy::FromZeros;
19use zerocopy::Immutable;
20use zerocopy::IntoBytes;
21use zerocopy::KnownLayout;
22
23pub const HV_PAGE_SIZE: u64 = 4096;
24pub const HV_PAGE_SIZE_USIZE: usize = 4096;
25pub const HV_PAGE_SHIFT: u64 = 12;
26
27pub const HV_PARTITION_ID_SELF: u64 = u64::MAX;
28pub const HV_VP_INDEX_SELF: u32 = 0xfffffffe;
29pub const HV_ANY_VP: u32 = 0xffffffff;
30
31pub const HV_CPUID_FUNCTION_VERSION_AND_FEATURES: u32 = 0x00000001;
32pub const HV_CPUID_FUNCTION_HV_VENDOR_AND_MAX_FUNCTION: u32 = 0x40000000;
33pub const HV_CPUID_FUNCTION_HV_INTERFACE: u32 = 0x40000001;
34pub const HV_CPUID_FUNCTION_MS_HV_VERSION: u32 = 0x40000002;
35pub const HV_CPUID_FUNCTION_MS_HV_FEATURES: u32 = 0x40000003;
36pub const HV_CPUID_FUNCTION_MS_HV_ENLIGHTENMENT_INFORMATION: u32 = 0x40000004;
37pub const HV_CPUID_FUNCTION_MS_HV_IMPLEMENTATION_LIMITS: u32 = 0x40000005;
38pub const HV_CPUID_FUNCTION_MS_HV_HARDWARE_FEATURES: u32 = 0x40000006;
39pub const HV_CPUID_FUNCTION_MS_HV_ISOLATION_CONFIGURATION: u32 = 0x4000000C;
40
41pub const VIRTUALIZATION_STACK_CPUID_VENDOR: u32 = 0x40000080;
42pub const VIRTUALIZATION_STACK_CPUID_INTERFACE: u32 = 0x40000081;
43pub const VIRTUALIZATION_STACK_CPUID_PROPERTIES: u32 = 0x40000082;
44
45/// The result of querying the VIRTUALIZATION_STACK_CPUID_PROPERTIES leaf.
46///
47/// The current partition is considered "portable": the virtualization stack may
48/// attempt to bring up the partition on another physical machine.
49pub const VS1_PARTITION_PROPERTIES_EAX_IS_PORTABLE: u32 = 0x000000001;
50/// The current partition has a synthetic debug device available to it.
51pub const VS1_PARTITION_PROPERTIES_EAX_DEBUG_DEVICE_PRESENT: u32 = 0x000000002;
52/// Extended I/O APIC RTEs are supported for the current partition.
53pub const VS1_PARTITION_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE: u32 = 0x000000004;
54/// Confidential VMBus is available.
55pub const VS1_PARTITION_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE: u32 = 0x000000008;
56
57/// SMCCC UID for the Microsoft Hypervisor.
58pub const VENDOR_HYP_UID_MS_HYPERVISOR: [u32; 4] = [0x4d32ba58, 0xcd244764, 0x8eef6c75, 0x16597024];
59
60#[bitfield(u64)]
61pub struct HvPartitionPrivilege {
62    // access to virtual msrs
63    pub access_vp_runtime_msr: bool,
64    pub access_partition_reference_counter: bool,
65    pub access_synic_msrs: bool,
66    pub access_synthetic_timer_msrs: bool,
67    pub access_apic_msrs: bool,
68    pub access_hypercall_msrs: bool,
69    pub access_vp_index: bool,
70    pub access_reset_msr: bool,
71    pub access_stats_msr: bool,
72    pub access_partition_reference_tsc: bool,
73    pub access_guest_idle_msr: bool,
74    pub access_frequency_msrs: bool,
75    pub access_debug_msrs: bool,
76    pub access_reenlightenment_ctrls: bool,
77    pub access_root_scheduler_msr: bool,
78    pub access_tsc_invariant_controls: bool,
79    _reserved1: u16,
80
81    // Access to hypercalls
82    pub create_partitions: bool,
83    pub access_partition_id: bool,
84    pub access_memory_pool: bool,
85    pub adjust_message_buffers: bool,
86    pub post_messages: bool,
87    pub signal_events: bool,
88    pub create_port: bool,
89    pub connect_port: bool,
90    pub access_stats: bool,
91    #[bits(2)]
92    _reserved2: u64,
93    pub debugging: bool,
94    pub cpu_management: bool,
95    pub configure_profiler: bool,
96    pub access_vp_exit_tracing: bool,
97    pub enable_extended_gva_ranges_flush_va_list: bool,
98    pub access_vsm: bool,
99    pub access_vp_registers: bool,
100    _unused_bit: bool,
101    pub fast_hypercall_output: bool,
102    pub enable_extended_hypercalls: bool,
103    pub start_virtual_processor: bool,
104    pub isolation: bool,
105    #[bits(9)]
106    _reserved3: u64,
107}
108
109/// Partition processor features (bank 0).
110///
111/// Each bit indicates whether the corresponding processor feature is enabled
112/// for a partition. When used in `mshv_create_partition_v2.pt_cpu_fbanks`,
113/// the sense is *inverted*: a set bit means the feature is **disabled**.
114#[bitfield(u64)]
115pub struct HvX64PartitionProcessorFeatures {
116    pub sse3_support: bool,
117    pub lahf_sahf_support: bool,
118    pub ssse3_support: bool,
119    pub sse4_1_support: bool,
120    pub sse4_2_support: bool,
121    pub sse4a_support: bool,
122    pub xop_support: bool,
123    pub pop_cnt_support: bool,
124    pub cmpxchg16b_support: bool,
125    pub altmovcr8_support: bool,
126    pub lzcnt_support: bool,
127    pub mis_align_sse_support: bool,
128    pub mmx_ext_support: bool,
129    pub amd3d_now_support: bool,
130    pub extended_amd3d_now_support: bool,
131    pub page_1gb_support: bool,
132    pub aes_support: bool,
133    pub pclmulqdq_support: bool,
134    pub pcid_support: bool,
135    pub fma4_support: bool,
136    pub f16c_support: bool,
137    pub rd_rand_support: bool,
138    pub rd_wr_fs_gs_support: bool,
139    pub smep_support: bool,
140    pub enhanced_fast_string_support: bool,
141    pub bmi1_support: bool,
142    pub bmi2_support: bool,
143    pub hle_support_deprecated: bool,
144    pub rtm_support_deprecated: bool,
145    pub movbe_support: bool,
146    pub npiep1_support: bool,
147    pub dep_x87_fpu_save_support: bool,
148    pub rd_seed_support: bool,
149    pub adx_support: bool,
150    pub intel_prefetch_support: bool,
151    pub smap_support: bool,
152    pub hle_support: bool,
153    pub rtm_support: bool,
154    pub rdtscp_support: bool,
155    pub clflushopt_support: bool,
156    pub clwb_support: bool,
157    pub sha_support: bool,
158    pub x87_pointers_saved_support: bool,
159    pub invpcid_support: bool,
160    pub ibrs_support: bool,
161    pub stibp_support: bool,
162    pub ibpb_support: bool,
163    pub unrestricted_guest_support: bool,
164    pub mdd_support: bool,
165    pub fast_short_rep_mov_support: bool,
166    pub l1d_cache_flush_support: bool,
167    pub rdcl_no_support: bool,
168    pub ibrs_all_support: bool,
169    pub skip_l1df_support: bool,
170    pub ssb_no_support: bool,
171    pub rsb_a_no_support: bool,
172    pub virt_spec_ctrl_support: bool,
173    pub rd_pid_support: bool,
174    pub umip_support: bool,
175    pub mbs_no_support: bool,
176    pub mb_clear_support: bool,
177    pub taa_no_support: bool,
178    pub tsx_ctrl_support: bool,
179    _reserved_bank0: bool,
180}
181
182/// Partition processor features (bank 1).
183#[bitfield(u64)]
184pub struct HvX64PartitionProcessorFeatures1 {
185    pub a_count_m_count_support: bool,
186    pub tsc_invariant_support: bool,
187    pub cl_zero_support: bool,
188    pub rdpru_support: bool,
189    pub la57_support: bool,
190    pub mbec_support: bool,
191    pub nested_virt_support: bool,
192    pub psfd_support: bool,
193    pub cet_ss_support: bool,
194    pub cet_ibt_support: bool,
195    pub vmx_exception_inject_support: bool,
196    pub enqcmd_support: bool,
197    pub umwait_tpause_support: bool,
198    pub movdiri_support: bool,
199    pub movdir64b_support: bool,
200    pub cldemote_support: bool,
201    pub serialize_support: bool,
202    pub tsc_deadline_tmr_support: bool,
203    pub tsc_adjust_support: bool,
204    pub fz_l_rep_movsb: bool,
205    pub fs_rep_stosb: bool,
206    pub fs_rep_cmpsb: bool,
207    pub tsx_ld_trk_support: bool,
208    pub vmx_ins_outs_exit_info_support: bool,
209    pub hlat_support: bool,
210    pub sbdr_ssdp_no_support: bool,
211    pub fbsdp_no_support: bool,
212    pub psdp_no_support: bool,
213    pub fb_clear_support: bool,
214    pub btc_no_support: bool,
215    pub ibpb_rsb_flush_support: bool,
216    pub stibp_always_on_support: bool,
217    pub perf_global_ctrl_support: bool,
218    pub npt_execute_only_support: bool,
219    pub npt_ad_flags_support: bool,
220    pub npt_1gb_page_support: bool,
221    pub amd_processor_topology_node_id_support: bool,
222    pub local_machine_check_support: bool,
223    pub extended_topology_leaf_fp256_amd_support: bool,
224    pub gds_no_support: bool,
225    pub cmpccxadd_support: bool,
226    pub tsc_aux_virtualization_support: bool,
227    pub rmp_query_support: bool,
228    pub bhi_no_support: bool,
229    pub bhi_dis_support: bool,
230    pub prefetch_i_support: bool,
231    pub sha512_support: bool,
232    pub mitigation_ctrl_support: bool,
233    pub rfds_no_support: bool,
234    pub rfds_clear_support: bool,
235    pub sm3_support: bool,
236    pub sm4_support: bool,
237    pub secure_avic_support: bool,
238    pub guest_intercept_ctrl_support: bool,
239    pub sbpb_support: bool,
240    pub ibpb_br_type_support: bool,
241    pub srso_no_support: bool,
242    pub srso_user_kernel_no_support: bool,
243    pub vrew_clear_support: bool,
244    pub tsa_l1_no_support: bool,
245    pub tsa_sq_no_support: bool,
246    pub lass_support: bool,
247    #[bits(2)]
248    _reserved_bank1: u8,
249}
250
251/// Partition processor XSAVE features.
252#[bitfield(u64)]
253pub struct HvX64PartitionProcessorXsaveFeatures {
254    pub xsave_support: bool,
255    pub xsaveopt_support: bool,
256    pub avx_support: bool,
257    pub avx2_support: bool,
258    pub fma_support: bool,
259    pub mpx_support: bool,
260    pub avx512_support: bool,
261    pub avx512_dq_support: bool,
262    pub avx512_cd_support: bool,
263    pub avx512_bw_support: bool,
264    pub avx512_vl_support: bool,
265    pub xsave_comp_support: bool,
266    pub xsave_supervisor_support: bool,
267    pub xcr1_support: bool,
268    pub avx512_bitalg_support: bool,
269    pub avx512_ifma_support: bool,
270    pub avx512_vbmi_support: bool,
271    pub avx512_vbmi2_support: bool,
272    pub avx512_vnni_support: bool,
273    pub gfni_support: bool,
274    pub vaes_support: bool,
275    pub avx512_vpopcntdq_support: bool,
276    pub vpclmulqdq_support: bool,
277    pub avx512_bf16_support: bool,
278    pub avx512_vp2_intersect_support: bool,
279    pub avx512_fp16_support: bool,
280    pub xfd_support: bool,
281    pub amx_tile_support: bool,
282    pub amx_bf16_support: bool,
283    pub amx_int8_support: bool,
284    pub avx_vnni_support: bool,
285    pub avx_ifma_support: bool,
286    pub avx_ne_convert_support: bool,
287    pub avx_vnni_int8_support: bool,
288    pub avx_vnni_int16_support: bool,
289    pub avx10_1_256_support: bool,
290    pub avx10_1_512_support: bool,
291    pub amx_fp16_support: bool,
292    #[bits(26)]
293    _reserved: u64,
294}
295
296/// Synthetic processor features that control which Hyper-V enlightenments
297/// are exposed to a guest partition.
298#[bitfield(u64)]
299pub struct HvPartitionSyntheticProcessorFeatures {
300    pub hypervisor_present: bool,
301    pub hv1: bool,
302    pub access_vp_run_time_reg: bool,
303    pub access_partition_reference_counter: bool,
304    pub access_synic_regs: bool,
305    pub access_synthetic_timer_regs: bool,
306    pub access_intr_ctrl_regs: bool,
307    pub access_hypercall_regs: bool,
308    pub access_vp_index: bool,
309    pub access_partition_reference_tsc: bool,
310    pub access_guest_idle_reg: bool,
311    pub access_frequency_regs: bool,
312    _reserved_z12: bool,
313    _reserved_z13: bool,
314    _reserved_z14: bool,
315    pub enable_extended_gva_ranges_for_flush_virtual_address_list: bool,
316    _reserved_z16: bool,
317    _reserved_z17: bool,
318    pub fast_hypercall_output: bool,
319    _reserved_z19: bool,
320    pub start_virtual_processor: bool,
321    _reserved_z21: bool,
322    pub direct_synthetic_timers: bool,
323    _reserved_z23: bool,
324    pub extended_processor_masks: bool,
325    pub tb_flush_hypercalls: bool,
326    pub synthetic_cluster_ipi: bool,
327    pub notify_long_spin_wait: bool,
328    pub query_numa_distance: bool,
329    pub signal_events: bool,
330    pub retarget_device_interrupt: bool,
331    pub restore_time: bool,
332    pub enlightened_vmcs: bool,
333    pub nested_debug_ctl: bool,
334    pub synthetic_time_unhalted_timer: bool,
335    pub idle_spec_ctrl: bool,
336    _reserved_z36: bool,
337    pub wake_vps: bool,
338    pub access_vp_regs: bool,
339    _reserved_z39: bool,
340    pub management_vtl_synic_support: bool,
341    pub proxy_interrupt_doorbell_support: bool,
342    _reserved_z42: bool,
343    pub mmio_hypercalls: bool,
344    #[bits(20)]
345    _reserved: u64,
346}
347
348open_enum! {
349    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
350    pub enum HvPartitionIsolationType: u8 {
351        NONE = 0,
352        VBS = 1,
353        SNP = 2,
354        TDX = 3,
355    }
356}
357
358open_enum! {
359    /// Partition property codes.
360    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
361    pub enum HvPartitionPropertyCode: u32 {
362        #![expect(non_upper_case_globals)]
363
364        // Privilege properties
365        PrivilegeFlags                       = 0x00010000,
366        SyntheticProcFeatures                = 0x00010001,
367        AllowedParentUserModeHypercalls      = 0x00010002,
368
369        // Scheduling properties
370        Suspend                                = 0x00020000,
371        CpuReserve                             = 0x00020001,
372        CpuCap                                 = 0x00020002,
373        CpuWeight                              = 0x00020003,
374        CpuGroupId                             = 0x00020004,
375        HierarchicalIntegratedSchedulerEnabled = 0x00020005,
376
377        // Time properties
378        TimeFreeze                           = 0x00030003,
379        ApicFrequency                        = 0x00030004,
380        ReferenceTime                        = 0x00030005,
381
382        // Debugging properties
383        DebugChannelId                       = 0x00040000,
384        DebugChannelId0                      = 0x00040001,
385        DebugChannelId1                      = 0x00040002,
386        DebugChannelId2                      = 0x00040003,
387
388        // Resource properties
389        VirtualTlbPageCount                  = 0x00050000,
390        VsmConfig                            = 0x00050001,
391        ZeroMemoryOnReset                    = 0x00050002,
392        ProcessorsPerSocket                  = 0x00050003,
393        NestedTlbSize                        = 0x00050004,
394        GpaPageAccessTracking                = 0x00050005,
395        VsmPermissionsDirtySinceLastQuery    = 0x00050006,
396        SgxLaunchControlConfig               = 0x00050007,
397        DefaultSgxLaunchControl0             = 0x00050008,
398        DefaultSgxLaunchControl1             = 0x00050009,
399        DefaultSgxLaunchControl2             = 0x0005000A,
400        DefaultSgxLaunchControl3             = 0x0005000B,
401        IsolationState                       = 0x0005000C,
402        IsolationControl                     = 0x0005000D,
403        AllocationId                         = 0x0005000E,
404        MonitoringId                         = 0x0005000F,
405        ImplementedPhysicalAddressBits       = 0x00050010,
406        NonArchitecturalCoreSharing          = 0x00050011,
407        HypercallDoorbellPage                = 0x00050012,
408        CppcRequestValue                     = 0x00050013,
409        IsolationPolicy                      = 0x00050014,
410        DmaCapableDevices                    = 0x00050015,
411        ProcessorsPerL3                      = 0x00050016,
412        UnimplementedMsrAction               = 0x00050017,
413        AmdNodesPerSocket                    = 0x00050018,
414        ReferenceTscPageActive               = 0x00050019,
415        AutoEoiEnabled                       = 0x0005001A,
416        L3CacheWays                          = 0x0005001B,
417        IsolationType                        = 0x0005001C,
418        PerfmonMode                          = 0x0005001D,
419        DepositStatus                        = 0x0005001E,
420        Mirroring                            = 0x0005001F,
421        MirrorState                          = 0x00050020,
422        MgmtVtlMaxMemorySections             = 0x00050021,
423        SevVmgexitOffloads                   = 0x00050022,
424        PenalizeBusLock                      = 0x00050023,
425        TopologyApicIdOptIn                  = 0x00050024,
426        CppcResourcePrioritiesValue          = 0x00050025,
427        PartitionDiagBufferConfig            = 0x00050026,
428        GicdBaseAddress                      = 0x00050028,
429        GitsTranslaterBaseAddress            = 0x00050029,
430        GicLpiIntIdBits                      = 0x0005002A,
431        GicPpiOverflowInterruptFromCntv      = 0x0005002B,
432        GicPpiOverflowInterruptFromCntp      = 0x0005002C,
433        GicPpiPerformanceMonitorsInterrupt   = 0x0005002D,
434        GicPpiPmbirq                         = 0x0005002E,
435        TdMigrationStreamCount               = 0x0005002F,
436        AutoSuspend                          = 0x00050030,
437        SintReservedInterruptId              = 0x00050031,
438        GpaPinningEnabled                    = 0x00050032,
439        TdMigrationMaxStreamCount            = 0x00050033,
440        TdMigrationNumMemScanContext         = 0x00050034,
441        TdMigrationMaxMemScanRanges          = 0x00050035,
442
443        // Compatibility properties
444        ProcessorVendor                      = 0x00060000,
445        ProcessorFeaturesDeprecated          = 0x00060001,
446        ProcessorXsaveFeatures               = 0x00060002,
447        ProcessorCLFlushSize                 = 0x00060003,
448        EnlightenmentModifications           = 0x00060004,
449        CompatibilityVersion                 = 0x00060005,
450        PhysicalAddressWidth                 = 0x00060006,
451        XsaveStates                          = 0x00060007,
452        MaxXsaveDataSize                     = 0x00060008,
453        ProcessorClockFrequency              = 0x00060009,
454        ProcessorFeatures0                   = 0x0006000A,
455        ProcessorFeatures1                   = 0x0006000B,
456        ProcessorCtrEl0                      = 0x0006000C,
457        ProcessorDczidEl0                    = 0x0006000D,
458        ProcessorIchVtrEl2                   = 0x0006000E,
459        ProcessorIdAa64Dfr0El1               = 0x0006000F,
460        RootProcessorFeatures0               = 0x00060010,
461        RootProcessorFeatures1               = 0x00060011,
462        RootProcessorXsaveFeatures           = 0x00060012,
463        RootSyntheticProcFeatures            = 0x00060013,
464        PhysicalAddressSize                  = 0x00060014,
465        FeatureBankCount                     = 0x00060015,
466        ProcessorIdAa64Dfr1El1               = 0x00060016,
467        ProcessorCntfrqEl0                   = 0x00060017,
468        MaxSveVectorLength                   = 0x00060018,
469        MaxSmeStreamingVectorLength          = 0x00060019,
470
471        // Guest software properties
472        GuestOsId                            = 0x00070000,
473
474        // Nested virtualization properties
475        ProcessorVirtualizationFeatures      = 0x00080000,
476        MaxHardwareIsolatedGuests            = 0x00080001,
477        SnpEnabled                           = 0x00080002,
478        NestedVmxBasic                       = 0x00080003,
479        NestedVmxPinbasedCtls                = 0x00080004,
480        NestedVmxProcbasedCtls               = 0x00080005,
481        NestedVmxExitCtls                    = 0x00080006,
482        NestedVmxEntryCtls                   = 0x00080007,
483        NestedVmxMisc                        = 0x00080008,
484        NestedVmxCr0Fixed0                   = 0x00080009,
485        NestedVmxCr0Fixed1                   = 0x0008000A,
486        NestedVmxCr4Fixed0                   = 0x0008000B,
487        NestedVmxCr4Fixed1                   = 0x0008000C,
488        NestedVmxVmcsEnum                    = 0x0008000D,
489        NestedVmxProcbasedCtls2              = 0x0008000E,
490        NestedVmxEptVpidCap                  = 0x0008000F,
491        NestedVmxTruePinbasedCtls            = 0x00080010,
492        NestedVmxTrueProcbasedCtls           = 0x00080011,
493        NestedVmxTrueExitCtls                = 0x00080012,
494        NestedVmxTrueEntryCtls               = 0x00080013,
495        NestedVmxProcbasedCtls3              = 0x00080014,
496        NestedVmxExitCtls2                   = 0x00080015,
497        VhState                              = 0x00080100,
498        MaxHierarchicalPartitionCount        = 0x00080101,
499        MaxHierarchicalVpCount               = 0x00080102,
500        StateTransferMode                    = 0x00080103,
501        MigrationAbortCleanupCount           = 0x00080104,
502        TdComprehensiveReset                 = 0x00080105,
503
504        // Extended properties with larger property values
505        InheritedDeviceDomainReservedRegions = 0x00090000,
506        TdMrConfigId                         = 0x00090001,
507        TdMrOwner                            = 0x00090002,
508        TdMrOwnerConfig                      = 0x00090003,
509        VNUMATopologyConfig                  = 0x00090004,
510        RootVpSharedPages                    = 0x00090005,
511        VmmCapabilities                      = 0x00090007,
512        CompletePartitionIntercept           = 0x00090008,
513        AssignableSyntheticProcFeatures      = 0x00090009,
514        HwIsolationTdxSupported              = 0x0009000A,
515        HwIsolationSevSupported              = 0x0009000B,
516        MigrationTdInfoHash                  = 0x0009000C,
517        MigrationTdBindingSlot               = 0x0009000D,
518        DisabledProcessorFeaturesEx          = 0x0009000E,
519        RootProcessorFeaturesEx              = 0x0009000F,
520        EnabledProcessorFeaturesEx           = 0x00090010,
521        PmuEventTypes                        = 0x00090011,
522        TdComprehensiveConfigure             = 0x00090012,
523    }
524}
525
526open_enum! {
527    /// Processor vendor as returned by [`HvPartitionPropertyCode::ProcessorVendor`].
528    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
529    pub enum HvProcessorVendor: u32 {
530        AMD    = 0x0000,
531        INTEL  = 0x0001,
532        HYGON  = 0x0002,
533        ARM    = 0x0010,
534    }
535}
536
537#[bitfield(u128)]
538#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
539pub struct HvFeatures {
540    #[bits(64)]
541    pub privileges: HvPartitionPrivilege,
542
543    #[bits(4)]
544    pub max_supported_cstate: u32,
545    pub hpet_needed_for_c3_power_state_deprecated: bool,
546    pub invariant_mperf_available: bool,
547    pub supervisor_shadow_stack_available: bool,
548    pub arch_pmu_available: bool,
549    pub exception_trap_intercept_available: bool,
550    #[bits(23)]
551    reserved: u32,
552
553    pub mwait_available_deprecated: bool,
554    pub guest_debugging_available: bool,
555    pub performance_monitors_available: bool,
556    pub cpu_dynamic_partitioning_available: bool,
557    pub xmm_registers_for_fast_hypercall_available: bool,
558    pub guest_idle_available: bool,
559    pub hypervisor_sleep_state_support_available: bool,
560    pub numa_distance_query_available: bool,
561    pub frequency_regs_available: bool,
562    pub synthetic_machine_check_available: bool,
563    pub guest_crash_regs_available: bool,
564    pub debug_regs_available: bool,
565    pub npiep1_available: bool,
566    pub disable_hypervisor_available: bool,
567    pub extended_gva_ranges_for_flush_virtual_address_list_available: bool,
568    pub fast_hypercall_output_available: bool,
569    pub svm_features_available: bool,
570    pub sint_polling_mode_available: bool,
571    pub hypercall_msr_lock_available: bool,
572    pub direct_synthetic_timers: bool,
573    pub register_pat_available: bool,
574    pub register_bndcfgs_available: bool,
575    pub watchdog_timer_available: bool,
576    pub synthetic_time_unhalted_timer_available: bool,
577    pub device_domains_available: bool,    // HDK only.
578    pub s1_device_domains_available: bool, // HDK only.
579    pub lbr_available: bool,
580    pub ipt_available: bool,
581    pub cross_vtl_flush_available: bool,
582    pub idle_spec_ctrl_available: bool,
583    pub translate_gva_flags_available: bool,
584    pub apic_eoi_intercept_available: bool,
585}
586
587impl HvFeatures {
588    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
589        zerocopy::transmute!(cpuid)
590    }
591
592    pub fn into_cpuid(self) -> [u32; 4] {
593        zerocopy::transmute!(self)
594    }
595}
596
597#[bitfield(u128)]
598#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
599pub struct HvEnlightenmentInformation {
600    pub use_hypercall_for_address_space_switch: bool,
601    pub use_hypercall_for_local_flush: bool,
602    pub use_hypercall_for_remote_flush_and_local_flush_entire: bool,
603    pub use_apic_msrs: bool,
604    pub use_hv_register_for_reset: bool,
605    pub use_relaxed_timing: bool,
606    pub use_dma_remapping_deprecated: bool,
607    pub use_interrupt_remapping_deprecated: bool,
608    pub use_x2_apic_msrs: bool,
609    pub deprecate_auto_eoi: bool,
610    pub use_synthetic_cluster_ipi: bool,
611    pub use_ex_processor_masks: bool,
612    pub nested: bool,
613    pub use_int_for_mbec_system_calls: bool,
614    pub use_vmcs_enlightenments: bool,
615    pub use_synced_timeline: bool,
616    pub core_scheduler_requested: bool,
617    pub use_direct_local_flush_entire: bool,
618    pub no_non_architectural_core_sharing: bool,
619    pub use_x2_apic: bool,
620    pub restore_time_on_resume: bool,
621    pub use_hypercall_for_mmio_access: bool,
622    pub use_gpa_pinning_hypercall: bool,
623    pub wake_vps: bool,
624    _reserved: u8,
625    pub long_spin_wait_count: u32,
626    #[bits(7)]
627    pub implemented_physical_address_bits: u32,
628    #[bits(25)]
629    _reserved1: u32,
630    _reserved2: u32,
631}
632
633impl HvEnlightenmentInformation {
634    pub fn from_cpuid(cpuid: [u32; 4]) -> Self {
635        zerocopy::transmute!(cpuid)
636    }
637
638    pub fn into_cpuid(self) -> [u32; 4] {
639        zerocopy::transmute!(self)
640    }
641}
642
643#[bitfield(u128)]
644pub struct HvHardwareFeatures {
645    pub apic_overlay_assist_in_use: bool,
646    pub msr_bitmaps_in_use: bool,
647    pub architectural_performance_counters_in_use: bool,
648    pub second_level_address_translation_in_use: bool,
649    pub dma_remapping_in_use: bool,
650    pub interrupt_remapping_in_use: bool,
651    pub memory_patrol_scrubber_present: bool,
652    pub dma_protection_in_use: bool,
653    pub hpet_requested: bool,
654    pub synthetic_timers_volatile: bool,
655    #[bits(4)]
656    pub hypervisor_level: u32,
657    pub physical_destination_mode_required: bool,
658    pub use_vmfunc_for_alias_map_switch: bool,
659    pub hv_register_for_memory_zeroing_supported: bool,
660    pub unrestricted_guest_supported: bool,
661    pub rdt_afeatures_supported: bool,
662    pub rdt_mfeatures_supported: bool,
663    pub child_perfmon_pmu_supported: bool,
664    pub child_perfmon_lbr_supported: bool,
665    pub child_perfmon_ipt_supported: bool,
666    pub apic_emulation_supported: bool,
667    pub child_x2_apic_recommended: bool,
668    pub hardware_watchdog_reserved: bool,
669    pub device_access_tracking_supported: bool,
670    pub hardware_gpa_access_tracking_supported: bool,
671    #[bits(4)]
672    _reserved: u32,
673
674    pub device_domain_input_width: u8,
675    #[bits(24)]
676    _reserved1: u32,
677    _reserved2: u32,
678    _reserved3: u32,
679}
680
681#[bitfield(u128)]
682pub struct HvIsolationConfiguration {
683    pub paravisor_present: bool,
684    #[bits(31)]
685    pub _reserved0: u32,
686
687    #[bits(4)]
688    pub isolation_type: u8,
689    _reserved11: bool,
690    pub shared_gpa_boundary_active: bool,
691    #[bits(6)]
692    pub shared_gpa_boundary_bits: u8,
693    #[bits(20)]
694    _reserved12: u32,
695    _reserved2: u32,
696    _reserved3: u32,
697}
698
699open_enum! {
700    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
701    pub enum HypercallCode: u16 {
702        #![expect(non_upper_case_globals)]
703
704        HvCallSwitchVirtualAddressSpace = 0x0001,
705        HvCallFlushVirtualAddressSpace = 0x0002,
706        HvCallFlushVirtualAddressList = 0x0003,
707        HvCallNotifyLongSpinWait = 0x0008,
708        HvCallInvokeHypervisorDebugger = 0x000a,
709        HvCallSendSyntheticClusterIpi = 0x000b,
710        HvCallModifyVtlProtectionMask = 0x000c,
711        HvCallEnablePartitionVtl = 0x000d,
712        HvCallEnableVpVtl = 0x000f,
713        HvCallVtlCall = 0x0011,
714        HvCallVtlReturn = 0x0012,
715        HvCallFlushVirtualAddressSpaceEx = 0x0013,
716        HvCallFlushVirtualAddressListEx = 0x0014,
717        HvCallSendSyntheticClusterIpiEx = 0x0015,
718        HvCallInstallIntercept = 0x004d,
719        HvCallGetVpRegisters = 0x0050,
720        HvCallSetVpRegisters = 0x0051,
721        HvCallTranslateVirtualAddress = 0x0052,
722        HvCallPostMessage = 0x005C,
723        HvCallSignalEvent = 0x005D,
724        HvCallOutputDebugCharacter = 0x0071,
725        HvCallGetSystemProperty = 0x007b,
726        HvCallRetargetDeviceInterrupt = 0x007e,
727        HvCallNotifyPartitionEvent = 0x0087,
728        HvCallRegisterInterceptResult = 0x0091,
729        HvCallAssertVirtualInterrupt = 0x0094,
730        HvCallStartVirtualProcessor = 0x0099,
731        HvCallGetVpIndexFromApicId = 0x009A,
732        HvCallTranslateVirtualAddressEx = 0x00AC,
733        HvCallCheckForIoIntercept = 0x00ad,
734        HvCallFlushGuestPhysicalAddressSpace = 0x00AF,
735        HvCallFlushGuestPhysicalAddressList = 0x00B0,
736        HvCallSignalEventDirect = 0x00C0,
737        HvCallPostMessageDirect = 0x00C1,
738        HvCallCheckSparseGpaPageVtlAccess = 0x00D4,
739        HvCallAcceptGpaPages = 0x00D9,
740        HvCallModifySparseGpaPageHostVisibility = 0x00DB,
741        HvCallGetVpCpuidValues = 0x00F4,
742        HvCallRestorePartitionTime = 0x0103,
743        HvCallMemoryMappedIoRead = 0x0106,
744        HvCallMemoryMappedIoWrite = 0x0107,
745        HvCallPinGpaPageRanges = 0x0112,
746        HvCallUnpinGpaPageRanges = 0x0113,
747        HvCallQuerySparseGpaPageHostVisibility = 0x011C,
748
749        // Extended hypercalls.
750        HvExtCallQueryCapabilities = 0x8001,
751
752        // VBS guest calls.
753        HvCallVbsVmCallReport = 0xC001,
754    }
755}
756
757pub const HV_X64_MSR_GUEST_OS_ID: u32 = 0x40000000;
758pub const HV_X64_MSR_HYPERCALL: u32 = 0x40000001;
759pub const HV_X64_MSR_VP_INDEX: u32 = 0x40000002;
760pub const HV_X64_MSR_TIME_REF_COUNT: u32 = 0x40000020;
761pub const HV_X64_MSR_REFERENCE_TSC: u32 = 0x40000021;
762pub const HV_X64_MSR_TSC_FREQUENCY: u32 = 0x40000022;
763pub const HV_X64_MSR_APIC_FREQUENCY: u32 = 0x40000023;
764pub const HV_X64_MSR_EOI: u32 = 0x40000070;
765pub const HV_X64_MSR_ICR: u32 = 0x40000071;
766pub const HV_X64_MSR_TPR: u32 = 0x40000072;
767pub const HV_X64_MSR_VP_ASSIST_PAGE: u32 = 0x40000073;
768pub const HV_X64_MSR_SCONTROL: u32 = 0x40000080;
769pub const HV_X64_MSR_SVERSION: u32 = 0x40000081;
770pub const HV_X64_MSR_SIEFP: u32 = 0x40000082;
771pub const HV_X64_MSR_SIMP: u32 = 0x40000083;
772pub const HV_X64_MSR_EOM: u32 = 0x40000084;
773pub const HV_X64_MSR_SINT0: u32 = 0x40000090;
774pub const HV_X64_MSR_SINT1: u32 = 0x40000091;
775pub const HV_X64_MSR_SINT2: u32 = 0x40000092;
776pub const HV_X64_MSR_SINT3: u32 = 0x40000093;
777pub const HV_X64_MSR_SINT4: u32 = 0x40000094;
778pub const HV_X64_MSR_SINT5: u32 = 0x40000095;
779pub const HV_X64_MSR_SINT6: u32 = 0x40000096;
780pub const HV_X64_MSR_SINT7: u32 = 0x40000097;
781pub const HV_X64_MSR_SINT8: u32 = 0x40000098;
782pub const HV_X64_MSR_SINT9: u32 = 0x40000099;
783pub const HV_X64_MSR_SINT10: u32 = 0x4000009a;
784pub const HV_X64_MSR_SINT11: u32 = 0x4000009b;
785pub const HV_X64_MSR_SINT12: u32 = 0x4000009c;
786pub const HV_X64_MSR_SINT13: u32 = 0x4000009d;
787pub const HV_X64_MSR_SINT14: u32 = 0x4000009e;
788pub const HV_X64_MSR_SINT15: u32 = 0x4000009f;
789pub const HV_X64_MSR_STIMER0_CONFIG: u32 = 0x400000b0;
790pub const HV_X64_MSR_STIMER0_COUNT: u32 = 0x400000b1;
791pub const HV_X64_MSR_STIMER1_CONFIG: u32 = 0x400000b2;
792pub const HV_X64_MSR_STIMER1_COUNT: u32 = 0x400000b3;
793pub const HV_X64_MSR_STIMER2_CONFIG: u32 = 0x400000b4;
794pub const HV_X64_MSR_STIMER2_COUNT: u32 = 0x400000b5;
795pub const HV_X64_MSR_STIMER3_CONFIG: u32 = 0x400000b6;
796pub const HV_X64_MSR_STIMER3_COUNT: u32 = 0x400000b7;
797pub const HV_X64_MSR_GUEST_IDLE: u32 = 0x400000F0;
798pub const HV_X64_MSR_GUEST_CRASH_P0: u32 = 0x40000100;
799pub const HV_X64_MSR_GUEST_CRASH_P1: u32 = 0x40000101;
800pub const HV_X64_MSR_GUEST_CRASH_P2: u32 = 0x40000102;
801pub const HV_X64_MSR_GUEST_CRASH_P3: u32 = 0x40000103;
802pub const HV_X64_MSR_GUEST_CRASH_P4: u32 = 0x40000104;
803pub const HV_X64_MSR_GUEST_CRASH_CTL: u32 = 0x40000105;
804
805pub const HV_X64_GUEST_CRASH_PARAMETER_MSRS: usize = 5;
806
807/// A hypervisor status code.
808///
809/// The non-success status codes are defined in [`HvError`].
810#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
811#[repr(transparent)]
812pub struct HvStatus(pub u16);
813
814impl HvStatus {
815    /// The success status code.
816    pub const SUCCESS: Self = Self(0);
817
818    /// Returns `Ok(())` if this is `HvStatus::SUCCESS`, otherwise returns an
819    /// `Err(err)` where `err` is the corresponding `HvError`.
820    pub fn result(self) -> HvResult<()> {
821        if let Ok(err) = self.0.try_into() {
822            Err(HvError(err))
823        } else {
824            Ok(())
825        }
826    }
827
828    /// Returns true if this is `HvStatus::SUCCESS`.
829    pub fn is_ok(self) -> bool {
830        self == Self::SUCCESS
831    }
832
833    /// Returns true if this is not `HvStatus::SUCCESS`.
834    pub fn is_err(self) -> bool {
835        self != Self::SUCCESS
836    }
837
838    const fn from_bits(bits: u16) -> Self {
839        Self(bits)
840    }
841
842    const fn into_bits(self) -> u16 {
843        self.0
844    }
845}
846
847impl From<Result<(), HvError>> for HvStatus {
848    fn from(err: Result<(), HvError>) -> Self {
849        err.err().map_or(Self::SUCCESS, |err| Self(err.0.get()))
850    }
851}
852
853impl Debug for HvStatus {
854    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
855        match self.result() {
856            Ok(()) => f.write_str("Success"),
857            Err(err) => Debug::fmt(&err, f),
858        }
859    }
860}
861
862/// An [`HvStatus`] value representing an error.
863//
864// DEVNOTE: use `NonZeroU16` to get a niche optimization, since 0 is reserved
865// for success.
866#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout)]
867#[repr(transparent)]
868pub struct HvError(core::num::NonZeroU16);
869
870impl From<core::num::NonZeroU16> for HvError {
871    fn from(err: core::num::NonZeroU16) -> Self {
872        Self(err)
873    }
874}
875
876impl Debug for HvError {
877    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
878        match self.debug_name() {
879            Some(name) => f.pad(name),
880            None => Debug::fmt(&self.0.get(), f),
881        }
882    }
883}
884
885impl core::fmt::Display for HvError {
886    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
887        match self.doc_str() {
888            Some(s) => f.write_str(s),
889            None => write!(f, "Hypervisor error {:#06x}", self.0),
890        }
891    }
892}
893
894impl core::error::Error for HvError {}
895
896macro_rules! hv_error {
897    ($ty:ty, $(#[doc = $doc:expr] $ident:ident = $val:expr),* $(,)?) => {
898
899        #[expect(non_upper_case_globals)]
900        impl $ty {
901            $(
902                #[doc = $doc]
903                pub const $ident: Self = Self(core::num::NonZeroU16::new($val).unwrap());
904            )*
905
906            fn debug_name(&self) -> Option<&'static str> {
907                Some(match self.0.get() {
908                    $(
909                        $val => stringify!($ident),
910                    )*
911                    _ => return None,
912                })
913            }
914
915            fn doc_str(&self) -> Option<&'static str> {
916                Some(match self.0.get() {
917                    $(
918                        $val => const { $doc.trim_ascii() },
919                    )*
920                    _ => return None,
921                })
922            }
923        }
924    };
925}
926
927// DEVNOTE: the doc comments here are also used as the runtime error strings.
928hv_error! {
929    HvError,
930    /// Invalid hypercall code
931    InvalidHypercallCode = 0x0002,
932    /// Invalid hypercall input
933    InvalidHypercallInput = 0x0003,
934    /// Invalid alignment
935    InvalidAlignment = 0x0004,
936    /// Invalid parameter
937    InvalidParameter = 0x0005,
938    /// Access denied
939    AccessDenied = 0x0006,
940    /// Invalid partition state
941    InvalidPartitionState = 0x0007,
942    /// Operation denied
943    OperationDenied = 0x0008,
944    /// Unknown property
945    UnknownProperty = 0x0009,
946    /// Property value out of range
947    PropertyValueOutOfRange = 0x000A,
948    /// Insufficient memory
949    InsufficientMemory = 0x000B,
950    /// Partition too deep
951    PartitionTooDeep = 0x000C,
952    /// Invalid partition ID
953    InvalidPartitionId = 0x000D,
954    /// Invalid VP index
955    InvalidVpIndex = 0x000E,
956    /// Not found
957    NotFound = 0x0010,
958    /// Invalid port ID
959    InvalidPortId = 0x0011,
960    /// Invalid connection ID
961    InvalidConnectionId = 0x0012,
962    /// Insufficient buffers
963    InsufficientBuffers = 0x0013,
964    /// Not acknowledged
965    NotAcknowledged = 0x0014,
966    /// Invalid VP state
967    InvalidVpState = 0x0015,
968    /// Acknowledged
969    Acknowledged = 0x0016,
970    /// Invalid save restore state
971    InvalidSaveRestoreState = 0x0017,
972    /// Invalid SynIC state
973    InvalidSynicState = 0x0018,
974    /// Object in use
975    ObjectInUse = 0x0019,
976    /// Invalid proximity domain info
977    InvalidProximityDomainInfo = 0x001A,
978    /// No data
979    NoData = 0x001B,
980    /// Inactive
981    Inactive = 0x001C,
982    /// No resources
983    NoResources = 0x001D,
984    /// Feature unavailable
985    FeatureUnavailable = 0x001E,
986    /// Partial packet
987    PartialPacket = 0x001F,
988    /// Processor feature not supported
989    ProcessorFeatureNotSupported = 0x0020,
990    /// Processor cache line flush size incompatible
991    ProcessorCacheLineFlushSizeIncompatible = 0x0030,
992    /// Insufficient buffer
993    InsufficientBuffer = 0x0033,
994    /// Incompatible processor
995    IncompatibleProcessor = 0x0037,
996    /// Insufficient device domains
997    InsufficientDeviceDomains = 0x0038,
998    /// CPUID feature validation error
999    CpuidFeatureValidationError = 0x003C,
1000    /// CPUID XSAVE feature validation error
1001    CpuidXsaveFeatureValidationError = 0x003D,
1002    /// Processor startup timeout
1003    ProcessorStartupTimeout = 0x003E,
1004    /// SMX enabled
1005    SmxEnabled = 0x003F,
1006    /// Invalid LP index
1007    InvalidLpIndex = 0x0041,
1008    /// Invalid register value
1009    InvalidRegisterValue = 0x0050,
1010    /// Invalid VTL state
1011    InvalidVtlState = 0x0051,
1012    /// NX not detected
1013    NxNotDetected = 0x0055,
1014    /// Invalid device ID
1015    InvalidDeviceId = 0x0057,
1016    /// Invalid device state
1017    InvalidDeviceState = 0x0058,
1018    /// Pending page requests
1019    PendingPageRequests = 0x0059,
1020    /// Page request invalid
1021    PageRequestInvalid = 0x0060,
1022    /// Key already exists
1023    KeyAlreadyExists = 0x0065,
1024    /// Device already in domain
1025    DeviceAlreadyInDomain = 0x0066,
1026    /// Invalid CPU group ID
1027    InvalidCpuGroupId = 0x006F,
1028    /// Invalid CPU group state
1029    InvalidCpuGroupState = 0x0070,
1030    /// Operation failed
1031    OperationFailed = 0x0071,
1032    /// Not allowed with nested virtualization active
1033    NotAllowedWithNestedVirtActive = 0x0072,
1034    /// Insufficient root memory
1035    InsufficientRootMemory = 0x0073,
1036    /// Event buffer already freed
1037    EventBufferAlreadyFreed = 0x0074,
1038    /// The specified timeout expired before the operation completed.
1039    Timeout = 0x0078,
1040    /// The VTL specified for the operation is already in an enabled state.
1041    VtlAlreadyEnabled = 0x0086,
1042    /// Unknown register name
1043    UnknownRegisterName = 0x0087,
1044}
1045
1046/// A useful result type for hypervisor operations.
1047pub type HvResult<T> = Result<T, HvError>;
1048
1049#[repr(u8)]
1050#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
1051pub enum Vtl {
1052    Vtl0 = 0,
1053    Vtl1 = 1,
1054    Vtl2 = 2,
1055}
1056
1057impl TryFrom<u8> for Vtl {
1058    type Error = HvError;
1059
1060    fn try_from(value: u8) -> Result<Self, Self::Error> {
1061        Ok(match value {
1062            0 => Self::Vtl0,
1063            1 => Self::Vtl1,
1064            2 => Self::Vtl2,
1065            _ => return Err(HvError::InvalidParameter),
1066        })
1067    }
1068}
1069
1070impl From<Vtl> for u8 {
1071    fn from(value: Vtl) -> Self {
1072        value as u8
1073    }
1074}
1075
1076/// The contents of `HV_X64_MSR_GUEST_CRASH_CTL`
1077#[bitfield(u64)]
1078pub struct GuestCrashCtl {
1079    #[bits(58)]
1080    _reserved: u64,
1081    // ID of the pre-OS environment
1082    #[bits(3)]
1083    pub pre_os_id: u8,
1084    // Crash dump will not be captured
1085    #[bits(1)]
1086    pub no_crash_dump: bool,
1087    // `HV_X64_MSR_GUEST_CRASH_P3` is the GPA of the message,
1088    // `HV_X64_MSR_GUEST_CRASH_P4` is its length in bytes
1089    #[bits(1)]
1090    pub crash_message: bool,
1091    // Log contents of crash parameter system registers
1092    #[bits(1)]
1093    pub crash_notify: bool,
1094}
1095
1096#[repr(C, align(16))]
1097#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
1098pub struct AlignedU128([u8; 16]);
1099
1100impl AlignedU128 {
1101    pub fn as_ne_bytes(&self) -> [u8; 16] {
1102        self.0
1103    }
1104
1105    pub fn from_ne_bytes(val: [u8; 16]) -> Self {
1106        Self(val)
1107    }
1108}
1109
1110impl Debug for AlignedU128 {
1111    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
1112        Debug::fmt(&u128::from_ne_bytes(self.0), f)
1113    }
1114}
1115
1116impl From<u128> for AlignedU128 {
1117    fn from(v: u128) -> Self {
1118        Self(v.to_ne_bytes())
1119    }
1120}
1121
1122impl From<u64> for AlignedU128 {
1123    fn from(v: u64) -> Self {
1124        (v as u128).into()
1125    }
1126}
1127
1128impl From<u32> for AlignedU128 {
1129    fn from(v: u32) -> Self {
1130        (v as u128).into()
1131    }
1132}
1133
1134impl From<u16> for AlignedU128 {
1135    fn from(v: u16) -> Self {
1136        (v as u128).into()
1137    }
1138}
1139
1140impl From<u8> for AlignedU128 {
1141    fn from(v: u8) -> Self {
1142        (v as u128).into()
1143    }
1144}
1145
1146impl From<AlignedU128> for u128 {
1147    fn from(v: AlignedU128) -> Self {
1148        u128::from_ne_bytes(v.0)
1149    }
1150}
1151
1152open_enum! {
1153    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1154    pub enum HvMessageType: u32 {
1155        #![expect(non_upper_case_globals)]
1156
1157        HvMessageTypeNone = 0x00000000,
1158
1159        HvMessageTypeUnmappedGpa = 0x80000000,
1160        HvMessageTypeGpaIntercept = 0x80000001,
1161        HvMessageTypeUnacceptedGpa = 0x80000003,
1162        HvMessageTypeGpaAttributeIntercept = 0x80000004,
1163        HvMessageTypeEnablePartitionVtlIntercept = 0x80000005,
1164        HvMessageTypeTimerExpired = 0x80000010,
1165        HvMessageTypeInvalidVpRegisterValue = 0x80000020,
1166        HvMessageTypeUnrecoverableException = 0x80000021,
1167        HvMessageTypeUnsupportedFeature = 0x80000022,
1168        HvMessageTypeTlbPageSizeMismatch = 0x80000023,
1169        HvMessageTypeIommuFault = 0x80000024,
1170        HvMessageTypeEventLogBufferComplete = 0x80000040,
1171        HvMessageTypeHypercallIntercept = 0x80000050,
1172        HvMessageTypeSynicEventIntercept = 0x80000060,
1173        HvMessageTypeSynicSintIntercept = 0x80000061,
1174        HvMessageTypeSynicSintDeliverable = 0x80000062,
1175        HvMessageTypeAsyncCallCompletion = 0x80000070,
1176        HvMessageTypeX64IoPortIntercept = 0x80010000,
1177        HvMessageTypeMsrIntercept = 0x80010001,
1178        HvMessageTypeX64CpuidIntercept = 0x80010002,
1179        HvMessageTypeExceptionIntercept = 0x80010003,
1180        HvMessageTypeX64ApicEoi = 0x80010004,
1181        HvMessageTypeX64IommuPrq = 0x80010005,
1182        HvMessageTypeRegisterIntercept = 0x80010006,
1183        HvMessageTypeX64Halt = 0x80010007,
1184        HvMessageTypeX64InterruptionDeliverable = 0x80010008,
1185        HvMessageTypeX64SipiIntercept = 0x80010009,
1186        HvMessageTypeX64RdtscIntercept = 0x8001000a,
1187        HvMessageTypeX64ApicSmiIntercept = 0x8001000b,
1188        HvMessageTypeArm64ResetIntercept = 0x8001000c,
1189        HvMessageTypeX64ApicInitSipiIntercept = 0x8001000d,
1190        HvMessageTypeX64ApicWriteIntercept = 0x8001000e,
1191        HvMessageTypeX64ProxyInterruptIntercept = 0x8001000f,
1192        HvMessageTypeX64IsolationCtrlRegIntercept = 0x80010010,
1193        HvMessageTypeX64SnpGuestRequestIntercept = 0x80010011,
1194        HvMessageTypeX64ExceptionTrapIntercept = 0x80010012,
1195        HvMessageTypeX64SevVmgexitIntercept = 0x80010013,
1196    }
1197}
1198
1199impl Default for HvMessageType {
1200    fn default() -> Self {
1201        HvMessageType::HvMessageTypeNone
1202    }
1203}
1204
1205pub const HV_SYNIC_INTERCEPTION_SINT_INDEX: u8 = 0;
1206
1207pub const NUM_SINTS: usize = 16;
1208pub const NUM_TIMERS: usize = 4;
1209
1210#[repr(C)]
1211#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1212pub struct HvMessageHeader {
1213    pub typ: HvMessageType,
1214    pub len: u8,
1215    pub flags: HvMessageFlags,
1216    pub rsvd: u16,
1217    pub id: u64,
1218}
1219
1220#[bitfield(u8)]
1221#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1222pub struct HvMessageFlags {
1223    pub message_pending: bool,
1224    #[bits(7)]
1225    _reserved: u8,
1226}
1227
1228pub const HV_MESSAGE_SIZE: usize = size_of::<HvMessage>();
1229const_assert!(HV_MESSAGE_SIZE == 256);
1230pub const HV_MESSAGE_PAYLOAD_SIZE: usize = 240;
1231
1232#[repr(C, align(16))]
1233#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1234pub struct HvMessage {
1235    pub header: HvMessageHeader,
1236    pub payload_buffer: [u8; HV_MESSAGE_PAYLOAD_SIZE],
1237}
1238
1239impl Default for HvMessage {
1240    fn default() -> Self {
1241        Self {
1242            header: FromZeros::new_zeroed(),
1243            payload_buffer: [0; 240],
1244        }
1245    }
1246}
1247
1248impl HvMessage {
1249    /// Constructs a new message. `payload` must fit into the payload field (240
1250    /// bytes limit).
1251    pub fn new(typ: HvMessageType, id: u64, payload: &[u8]) -> Self {
1252        let mut msg = HvMessage {
1253            header: HvMessageHeader {
1254                typ,
1255                len: payload.len() as u8,
1256                flags: HvMessageFlags::new(),
1257                rsvd: 0,
1258                id,
1259            },
1260            payload_buffer: [0; 240],
1261        };
1262        msg.payload_buffer[..payload.len()].copy_from_slice(payload);
1263        msg
1264    }
1265
1266    pub fn payload(&self) -> &[u8] {
1267        &self.payload_buffer[..self.header.len as usize]
1268    }
1269
1270    pub fn as_message<T: MessagePayload>(&self) -> &T {
1271        // Ensure invariants are met.
1272        let () = T::CHECK;
1273        T::ref_from_prefix(&self.payload_buffer).unwrap().0
1274    }
1275
1276    pub fn as_message_mut<T: MessagePayload>(&mut self) -> &T {
1277        // Ensure invariants are met.
1278        let () = T::CHECK;
1279        T::mut_from_prefix(&mut self.payload_buffer).unwrap().0
1280    }
1281}
1282
1283pub trait MessagePayload: KnownLayout + Immutable + IntoBytes + FromBytes + Sized {
1284    /// Used to ensure this trait is only implemented on messages of the proper
1285    /// size and alignment.
1286    #[doc(hidden)]
1287    const CHECK: () = {
1288        assert!(size_of::<Self>() <= HV_MESSAGE_PAYLOAD_SIZE);
1289        assert!(align_of::<Self>() <= align_of::<HvMessage>());
1290    };
1291}
1292
1293#[repr(C)]
1294#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1295pub struct TimerMessagePayload {
1296    pub timer_index: u32,
1297    pub reserved: u32,
1298    pub expiration_time: u64,
1299    pub delivery_time: u64,
1300}
1301
1302pub mod hypercall {
1303    use super::*;
1304    use core::ops::RangeInclusive;
1305    use zerocopy::Unalign;
1306
1307    /// The hypercall input value.
1308    #[bitfield(u64)]
1309    pub struct Control {
1310        /// The hypercall code.
1311        pub code: u16,
1312        /// If this hypercall is a fast hypercall.
1313        pub fast: bool,
1314        /// The variable header size, in qwords.
1315        #[bits(10)]
1316        pub variable_header_size: usize,
1317        #[bits(4)]
1318        _rsvd0: u8,
1319        /// Specifies that the hypercall should be handled by the L0 hypervisor in a nested environment.
1320        pub nested: bool,
1321        /// The element count for rep hypercalls.
1322        #[bits(12)]
1323        pub rep_count: usize,
1324        #[bits(4)]
1325        _rsvd1: u8,
1326        /// The first element to start processing in a rep hypercall.
1327        #[bits(12)]
1328        pub rep_start: usize,
1329        #[bits(4)]
1330        _rsvd2: u8,
1331    }
1332
1333    /// The hypercall output value returned to the guest.
1334    #[bitfield(u64)]
1335    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1336    #[must_use]
1337    pub struct HypercallOutput {
1338        #[bits(16)]
1339        pub call_status: HvStatus,
1340        pub rsvd: u16,
1341        #[bits(12)]
1342        pub elements_processed: usize,
1343        #[bits(20)]
1344        pub rsvd2: u32,
1345    }
1346
1347    impl From<HvError> for HypercallOutput {
1348        fn from(e: HvError) -> Self {
1349            Self::new().with_call_status(Err(e).into())
1350        }
1351    }
1352
1353    impl HypercallOutput {
1354        /// A success output with zero elements processed.
1355        pub const SUCCESS: Self = Self::new();
1356
1357        pub fn result(&self) -> Result<(), HvError> {
1358            self.call_status().result()
1359        }
1360    }
1361
1362    #[repr(C)]
1363    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1364    pub struct HvRegisterAssoc {
1365        pub name: HvRegisterName,
1366        pub pad: [u32; 3],
1367        pub value: HvRegisterValue,
1368    }
1369
1370    impl<N: Into<HvRegisterName>, T: Into<HvRegisterValue>> From<(N, T)> for HvRegisterAssoc {
1371        fn from((name, value): (N, T)) -> Self {
1372            Self {
1373                name: name.into(),
1374                pad: [0; 3],
1375                value: value.into(),
1376            }
1377        }
1378    }
1379
1380    impl<N: Copy + Into<HvRegisterName>, T: Copy + Into<HvRegisterValue>> From<&(N, T)>
1381        for HvRegisterAssoc
1382    {
1383        fn from(&(name, value): &(N, T)) -> Self {
1384            Self {
1385                name: name.into(),
1386                pad: [0; 3],
1387                value: value.into(),
1388            }
1389        }
1390    }
1391
1392    #[bitfield(u64)]
1393    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1394    pub struct MsrHypercallContents {
1395        pub enable: bool,
1396        pub locked: bool,
1397        #[bits(10)]
1398        pub reserved_p: u64,
1399        #[bits(52)]
1400        pub gpn: u64,
1401    }
1402
1403    #[repr(C, align(8))]
1404    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1405    pub struct PostMessage {
1406        pub connection_id: u32,
1407        pub padding: u32,
1408        pub message_type: u32,
1409        pub payload_size: u32,
1410        pub payload: [u8; 240],
1411    }
1412
1413    #[repr(C, align(8))]
1414    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1415    pub struct SignalEvent {
1416        pub connection_id: u32,
1417        pub flag_number: u16,
1418        pub rsvd: u16,
1419    }
1420
1421    #[repr(C)]
1422    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1423    pub struct PostMessageDirect {
1424        pub partition_id: u64,
1425        pub vp_index: u32,
1426        pub vtl: u8,
1427        pub padding0: [u8; 3],
1428        pub sint: u8,
1429        pub padding1: [u8; 3],
1430        pub message: Unalign<HvMessage>,
1431        pub padding2: u32,
1432    }
1433
1434    #[repr(C)]
1435    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1436    pub struct SignalEventDirect {
1437        pub target_partition: u64,
1438        pub target_vp: u32,
1439        pub target_vtl: u8,
1440        pub target_sint: u8,
1441        pub flag_number: u16,
1442    }
1443
1444    #[repr(C)]
1445    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1446    pub struct SignalEventDirectOutput {
1447        pub newly_signaled: u8,
1448        pub rsvd: [u8; 7],
1449    }
1450
1451    #[repr(C)]
1452    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1453    pub struct InterruptEntry {
1454        pub source: HvInterruptSource,
1455        pub rsvd: u32,
1456        pub data: [u32; 2],
1457    }
1458
1459    open_enum! {
1460        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1461        pub enum HvInterruptSource: u32 {
1462            MSI = 1,
1463            IO_APIC = 2,
1464        }
1465    }
1466
1467    #[repr(C)]
1468    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1469    pub struct InterruptTarget {
1470        pub vector: u32,
1471        pub flags: HvInterruptTargetFlags,
1472        pub mask_or_format: u64,
1473    }
1474
1475    #[bitfield(u32)]
1476    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1477    pub struct HvInterruptTargetFlags {
1478        pub multicast: bool,
1479        pub processor_set: bool,
1480        pub proxy_redirect: bool,
1481        #[bits(29)]
1482        pub reserved: u32,
1483    }
1484
1485    pub const HV_DEVICE_INTERRUPT_TARGET_MULTICAST: u32 = 1;
1486    pub const HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET: u32 = 2;
1487    pub const HV_DEVICE_INTERRUPT_TARGET_PROXY_REDIRECT: u32 = 4;
1488
1489    pub const HV_GENERIC_SET_SPARSE_4K: u64 = 0;
1490    pub const HV_GENERIC_SET_ALL: u64 = 1;
1491
1492    #[repr(C)]
1493    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1494    pub struct RetargetDeviceInterrupt {
1495        pub partition_id: u64,
1496        pub device_id: u64,
1497        pub entry: InterruptEntry,
1498        pub rsvd: u64,
1499        pub target_header: InterruptTarget,
1500    }
1501
1502    #[bitfield(u8)]
1503    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1504    pub struct HvInputVtl {
1505        #[bits(4)]
1506        pub target_vtl_value: u8,
1507        pub use_target_vtl: bool,
1508        #[bits(3)]
1509        pub reserved: u8,
1510    }
1511
1512    impl From<Vtl> for HvInputVtl {
1513        fn from(value: Vtl) -> Self {
1514            Self::from(Some(value))
1515        }
1516    }
1517
1518    impl From<Option<Vtl>> for HvInputVtl {
1519        fn from(value: Option<Vtl>) -> Self {
1520            Self::new()
1521                .with_use_target_vtl(value.is_some())
1522                .with_target_vtl_value(value.map_or(0, Into::into))
1523        }
1524    }
1525
1526    impl HvInputVtl {
1527        /// None = target current vtl
1528        pub fn target_vtl(&self) -> Result<Option<Vtl>, HvError> {
1529            if self.reserved() != 0 {
1530                return Err(HvError::InvalidParameter);
1531            }
1532            if self.use_target_vtl() {
1533                Ok(Some(self.target_vtl_value().try_into()?))
1534            } else {
1535                Ok(None)
1536            }
1537        }
1538
1539        pub const CURRENT_VTL: Self = Self::new();
1540    }
1541
1542    #[repr(C)]
1543    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
1544    pub struct GetSetVpRegisters {
1545        pub partition_id: u64,
1546        pub vp_index: u32,
1547        pub target_vtl: HvInputVtl,
1548        pub rsvd: [u8; 3],
1549    }
1550
1551    open_enum::open_enum! {
1552        #[derive(Default)]
1553        pub enum HvGuestOsMicrosoftIds: u8 {
1554            UNDEFINED = 0x00,
1555            MSDOS = 0x01,
1556            WINDOWS_3X = 0x02,
1557            WINDOWS_9X = 0x03,
1558            WINDOWS_NT = 0x04,
1559            WINDOWS_CE = 0x05,
1560        }
1561    }
1562
1563    #[bitfield(u64)]
1564    pub struct HvGuestOsMicrosoft {
1565        #[bits(40)]
1566        _rsvd: u64,
1567        #[bits(8)]
1568        pub os_id: u8,
1569        // The top bit must be zero and the least significant 15 bits holds the value of the vendor id.
1570        #[bits(16)]
1571        pub vendor_id: u16,
1572    }
1573
1574    open_enum::open_enum! {
1575        #[derive(Default)]
1576        pub enum HvGuestOsOpenSourceType: u8 {
1577            UNDEFINED = 0x00,
1578            LINUX = 0x01,
1579            FREEBSD = 0x02,
1580            XEN = 0x03,
1581            ILLUMOS = 0x04,
1582        }
1583    }
1584
1585    #[bitfield(u64)]
1586    pub struct HvGuestOsOpenSource {
1587        #[bits(16)]
1588        pub build_no: u16,
1589        #[bits(32)]
1590        pub version: u32,
1591        #[bits(8)]
1592        pub os_id: u8,
1593        #[bits(7)]
1594        pub os_type: u8,
1595        #[bits(1)]
1596        pub is_open_source: bool,
1597    }
1598
1599    #[bitfield(u64)]
1600    pub struct HvGuestOsId {
1601        #[bits(63)]
1602        _rsvd: u64,
1603        is_open_source: bool,
1604    }
1605
1606    impl HvGuestOsId {
1607        pub fn microsoft(&self) -> Option<HvGuestOsMicrosoft> {
1608            (!self.is_open_source()).then(|| HvGuestOsMicrosoft::from(u64::from(*self)))
1609        }
1610
1611        pub fn open_source(&self) -> Option<HvGuestOsOpenSource> {
1612            (self.is_open_source()).then(|| HvGuestOsOpenSource::from(u64::from(*self)))
1613        }
1614
1615        pub fn as_u64(&self) -> u64 {
1616            self.0
1617        }
1618    }
1619
1620    pub const HV_INTERCEPT_ACCESS_MASK_NONE: u32 = 0x00;
1621    pub const HV_INTERCEPT_ACCESS_MASK_READ: u32 = 0x01;
1622    pub const HV_INTERCEPT_ACCESS_MASK_WRITE: u32 = 0x02;
1623    pub const HV_INTERCEPT_ACCESS_MASK_READ_WRITE: u32 =
1624        HV_INTERCEPT_ACCESS_MASK_READ | HV_INTERCEPT_ACCESS_MASK_WRITE;
1625    pub const HV_INTERCEPT_ACCESS_MASK_EXECUTE: u32 = 0x04;
1626
1627    open_enum::open_enum! {
1628        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1629        pub enum HvInterceptType: u32 {
1630            #![expect(non_upper_case_globals)]
1631            HvInterceptTypeX64IoPort = 0x00000000,
1632            HvInterceptTypeX64Msr = 0x00000001,
1633            HvInterceptTypeX64Cpuid = 0x00000002,
1634            HvInterceptTypeException = 0x00000003,
1635            HvInterceptTypeHypercall = 0x00000008,
1636            HvInterceptTypeUnknownSynicConnection = 0x0000000D,
1637            HvInterceptTypeX64ApicEoi = 0x0000000E,
1638            HvInterceptTypeRetargetInterruptWithUnknownDeviceId = 0x0000000F,
1639            HvInterceptTypeX64IoPortRange = 0x00000011,
1640        }
1641    }
1642
1643    #[repr(transparent)]
1644    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1645    pub struct HvInterceptParameters(u64);
1646
1647    impl HvInterceptParameters {
1648        pub fn new_io_port(port: u16) -> Self {
1649            Self(port as u64)
1650        }
1651
1652        pub fn new_io_port_range(ports: RangeInclusive<u16>) -> Self {
1653            let base = *ports.start() as u64;
1654            let end = *ports.end() as u64;
1655            Self(base | (end << 16))
1656        }
1657
1658        pub fn new_exception(vector: u16) -> Self {
1659            Self(vector as u64)
1660        }
1661
1662        pub fn io_port(&self) -> u16 {
1663            self.0 as u16
1664        }
1665
1666        pub fn io_port_range(&self) -> RangeInclusive<u16> {
1667            let base = self.0 as u16;
1668            let end = (self.0 >> 16) as u16;
1669            base..=end
1670        }
1671
1672        pub fn cpuid_index(&self) -> u32 {
1673            self.0 as u32
1674        }
1675
1676        pub fn exception(&self) -> u16 {
1677            self.0 as u16
1678        }
1679    }
1680
1681    #[repr(C)]
1682    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1683    pub struct InstallIntercept {
1684        pub partition_id: u64,
1685        pub access_type_mask: u32,
1686        pub intercept_type: HvInterceptType,
1687        pub intercept_parameters: HvInterceptParameters,
1688    }
1689
1690    /// Input for [`HypercallCode::HvCallRegisterInterceptResult`] with CPUID intercept type.
1691    #[repr(C)]
1692    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1693    pub struct RegisterInterceptResultCpuid {
1694        pub partition_id: u64,
1695        pub vp_index: u32,
1696        pub intercept_type: HvInterceptType,
1697        pub parameters: HvRegisterX64CpuidResultParameters,
1698        /// Explicit tail padding (struct alignment is 8 due to partition_id).
1699        pub _reserved: u32,
1700    }
1701
1702    /// CPUID intercept result parameters.
1703    #[repr(C)]
1704    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1705    pub struct HvRegisterX64CpuidResultParameters {
1706        pub input: HvRegisterX64CpuidResultParametersInput,
1707        pub result: HvRegisterX64CpuidResultParametersOutput,
1708    }
1709
1710    /// Input portion of CPUID intercept result parameters.
1711    #[repr(C)]
1712    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1713    pub struct HvRegisterX64CpuidResultParametersInput {
1714        pub eax: u32,
1715        pub ecx: u32,
1716        pub subleaf_specific: u8,
1717        pub always_override: u8,
1718        pub padding: u16,
1719    }
1720
1721    /// Output portion of CPUID intercept result parameters.
1722    #[repr(C)]
1723    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1724    pub struct HvRegisterX64CpuidResultParametersOutput {
1725        pub eax: u32,
1726        pub eax_mask: u32,
1727        pub ebx: u32,
1728        pub ebx_mask: u32,
1729        pub ecx: u32,
1730        pub ecx_mask: u32,
1731        pub edx: u32,
1732        pub edx_mask: u32,
1733    }
1734
1735    #[repr(C)]
1736    #[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, Debug)]
1737    pub struct AssertVirtualInterrupt {
1738        pub partition_id: u64,
1739        pub interrupt_control: HvInterruptControl,
1740        pub destination_address: u64,
1741        pub requested_vector: u32,
1742        pub target_vtl: u8,
1743        pub rsvd0: u8,
1744        pub rsvd1: u16,
1745    }
1746
1747    #[repr(C)]
1748    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1749    pub struct StartVirtualProcessorX64 {
1750        pub partition_id: u64,
1751        pub vp_index: u32,
1752        pub target_vtl: u8,
1753        pub rsvd0: u8,
1754        pub rsvd1: u16,
1755        pub vp_context: InitialVpContextX64,
1756    }
1757
1758    #[repr(C)]
1759    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1760    pub struct InitialVpContextX64 {
1761        pub rip: u64,
1762        pub rsp: u64,
1763        pub rflags: u64,
1764        pub cs: HvX64SegmentRegister,
1765        pub ds: HvX64SegmentRegister,
1766        pub es: HvX64SegmentRegister,
1767        pub fs: HvX64SegmentRegister,
1768        pub gs: HvX64SegmentRegister,
1769        pub ss: HvX64SegmentRegister,
1770        pub tr: HvX64SegmentRegister,
1771        pub ldtr: HvX64SegmentRegister,
1772        pub idtr: HvX64TableRegister,
1773        pub gdtr: HvX64TableRegister,
1774        pub efer: u64,
1775        pub cr0: u64,
1776        pub cr3: u64,
1777        pub cr4: u64,
1778        pub msr_cr_pat: u64,
1779    }
1780
1781    #[repr(C)]
1782    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1783    pub struct StartVirtualProcessorArm64 {
1784        pub partition_id: u64,
1785        pub vp_index: u32,
1786        pub target_vtl: u8,
1787        pub rsvd0: u8,
1788        pub rsvd1: u16,
1789        pub vp_context: InitialVpContextArm64,
1790    }
1791
1792    #[repr(C)]
1793    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1794    pub struct InitialVpContextArm64 {
1795        pub pc: u64,
1796        pub sp_elh: u64,
1797        pub sctlr_el1: u64,
1798        pub mair_el1: u64,
1799        pub tcr_el1: u64,
1800        pub vbar_el1: u64,
1801        pub ttbr0_el1: u64,
1802        pub ttbr1_el1: u64,
1803        pub x18: u64,
1804    }
1805
1806    impl InitialVpContextX64 {
1807        pub fn as_hv_register_assocs(&self) -> impl Iterator<Item = HvRegisterAssoc> + '_ {
1808            let regs = [
1809                (HvX64RegisterName::Rip, HvRegisterValue::from(self.rip)).into(),
1810                (HvX64RegisterName::Rsp, HvRegisterValue::from(self.rsp)).into(),
1811                (
1812                    HvX64RegisterName::Rflags,
1813                    HvRegisterValue::from(self.rflags),
1814                )
1815                    .into(),
1816                (HvX64RegisterName::Cs, HvRegisterValue::from(self.cs)).into(),
1817                (HvX64RegisterName::Ds, HvRegisterValue::from(self.ds)).into(),
1818                (HvX64RegisterName::Es, HvRegisterValue::from(self.es)).into(),
1819                (HvX64RegisterName::Fs, HvRegisterValue::from(self.fs)).into(),
1820                (HvX64RegisterName::Gs, HvRegisterValue::from(self.gs)).into(),
1821                (HvX64RegisterName::Ss, HvRegisterValue::from(self.ss)).into(),
1822                (HvX64RegisterName::Tr, HvRegisterValue::from(self.tr)).into(),
1823                (HvX64RegisterName::Ldtr, HvRegisterValue::from(self.ldtr)).into(),
1824                (HvX64RegisterName::Idtr, HvRegisterValue::from(self.idtr)).into(),
1825                (HvX64RegisterName::Gdtr, HvRegisterValue::from(self.gdtr)).into(),
1826                (HvX64RegisterName::Efer, HvRegisterValue::from(self.efer)).into(),
1827                (HvX64RegisterName::Cr0, HvRegisterValue::from(self.cr0)).into(),
1828                (HvX64RegisterName::Cr3, HvRegisterValue::from(self.cr3)).into(),
1829                (HvX64RegisterName::Cr4, HvRegisterValue::from(self.cr4)).into(),
1830                (
1831                    HvX64RegisterName::Pat,
1832                    HvRegisterValue::from(self.msr_cr_pat),
1833                )
1834                    .into(),
1835            ];
1836            regs.into_iter()
1837        }
1838    }
1839
1840    #[bitfield(u64)]
1841    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1842    pub struct TranslateGvaControlFlagsX64 {
1843        /// Request data read access
1844        pub validate_read: bool,
1845        /// Request data write access
1846        pub validate_write: bool,
1847        /// Request instruction fetch access.
1848        pub validate_execute: bool,
1849        /// Don't enforce any checks related to access mode (supervisor vs. user; SMEP and SMAP are treated
1850        /// as disabled).
1851        pub privilege_exempt: bool,
1852        /// Set the appropriate page table bits (i.e. access/dirty bit)
1853        pub set_page_table_bits: bool,
1854        /// Lock the TLB
1855        pub tlb_flush_inhibit: bool,
1856        /// Treat the access as a supervisor mode access irrespective of current mode.
1857        pub supervisor_access: bool,
1858        /// Treat the access as a user mode access irrespective of current mode.
1859        pub user_access: bool,
1860        /// Enforce the SMAP restriction on supervisor data access to user mode addresses if CR4.SMAP=1
1861        /// irrespective of current EFLAGS.AC i.e. the behavior for "implicit supervisor-mode accesses"
1862        /// (e.g. to the GDT, etc.) and when EFLAGS.AC=0. Does nothing if CR4.SMAP=0.
1863        pub enforce_smap: bool,
1864        /// Don't enforce the SMAP restriction on supervisor data access to user mode addresses irrespective
1865        /// of current EFLAGS.AC i.e. the behavior when EFLAGS.AC=1.
1866        pub override_smap: bool,
1867        /// Treat the access as a shadow stack access.
1868        pub shadow_stack: bool,
1869        #[bits(45)]
1870        _unused: u64,
1871        /// Target vtl
1872        input_vtl_value: u8,
1873    }
1874
1875    impl TranslateGvaControlFlagsX64 {
1876        pub fn input_vtl(&self) -> HvInputVtl {
1877            self.input_vtl_value().into()
1878        }
1879
1880        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1881            self.with_input_vtl_value(input_vtl.into())
1882        }
1883
1884        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1885            self.set_input_vtl_value(input_vtl.into())
1886        }
1887    }
1888
1889    #[bitfield(u64)]
1890    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1891    pub struct TranslateGvaControlFlagsArm64 {
1892        /// Request data read access
1893        pub validate_read: bool,
1894        /// Request data write access
1895        pub validate_write: bool,
1896        /// Request instruction fetch access.
1897        pub validate_execute: bool,
1898        _reserved0: bool,
1899        /// Set the appropriate page table bits (i.e. access/dirty bit)
1900        pub set_page_table_bits: bool,
1901        /// Lock the TLB
1902        pub tlb_flush_inhibit: bool,
1903        /// Treat the access as a supervisor mode access irrespective of current mode.
1904        pub supervisor_access: bool,
1905        /// Treat the access as a user mode access irrespective of current mode.
1906        pub user_access: bool,
1907        /// Restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN i.e.
1908        /// the behavior when PSTATE.PAN=1.
1909        pub pan_set: bool,
1910        /// Don't restrict supervisor data access to user mode addresses irrespective of current PSTATE.PAN
1911        /// i.e. the behavior when PSTATE.PAN=0.
1912        pub pan_clear: bool,
1913        #[bits(46)]
1914        _unused: u64,
1915        /// Target vtl
1916        #[bits(8)]
1917        input_vtl_value: u8,
1918    }
1919
1920    impl TranslateGvaControlFlagsArm64 {
1921        pub fn input_vtl(&self) -> HvInputVtl {
1922            self.input_vtl_value().into()
1923        }
1924
1925        pub fn with_input_vtl(self, input_vtl: HvInputVtl) -> Self {
1926            self.with_input_vtl_value(input_vtl.into())
1927        }
1928
1929        pub fn set_input_vtl(&mut self, input_vtl: HvInputVtl) {
1930            self.set_input_vtl_value(input_vtl.into())
1931        }
1932    }
1933
1934    #[repr(C)]
1935    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1936    pub struct TranslateVirtualAddressX64 {
1937        pub partition_id: u64,
1938        pub vp_index: u32,
1939        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1940        pub reserved: u32,
1941        pub control_flags: TranslateGvaControlFlagsX64,
1942        pub gva_page: u64,
1943    }
1944
1945    #[repr(C)]
1946    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1947    pub struct TranslateVirtualAddressArm64 {
1948        pub partition_id: u64,
1949        pub vp_index: u32,
1950        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
1951        pub reserved: u32,
1952        pub control_flags: TranslateGvaControlFlagsArm64,
1953        pub gva_page: u64,
1954    }
1955
1956    open_enum::open_enum! {
1957        pub enum TranslateGvaResultCode: u32 {
1958            SUCCESS = 0,
1959
1960            // Translation Failures
1961            PAGE_NOT_PRESENT = 1,
1962            PRIVILEGE_VIOLATION = 2,
1963            INVALID_PAGE_TABLE_FLAGS = 3,
1964
1965            // GPA access failures
1966            GPA_UNMAPPED = 4,
1967            GPA_NO_READ_ACCESS = 5,
1968            GPA_NO_WRITE_ACCESS = 6,
1969            GPA_ILLEGAL_OVERLAY_ACCESS = 7,
1970
1971            /// Intercept of the memory access by either
1972            /// - a higher VTL
1973            /// - a nested hypervisor (due to a violation of the nested page table)
1974            INTERCEPT = 8,
1975
1976            GPA_UNACCEPTED = 9,
1977        }
1978    }
1979
1980    #[bitfield(u64)]
1981    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1982    pub struct TranslateGvaResult {
1983        pub result_code: u32,
1984        pub cache_type: u8,
1985        pub overlay_page: bool,
1986        #[bits(23)]
1987        pub reserved: u32,
1988    }
1989
1990    #[repr(C)]
1991    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1992    pub struct TranslateVirtualAddressOutput {
1993        pub translation_result: TranslateGvaResult,
1994        pub gpa_page: u64,
1995    }
1996
1997    #[repr(C)]
1998    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
1999    pub struct TranslateGvaResultExX64 {
2000        pub result: TranslateGvaResult,
2001        pub reserved: u64,
2002        pub event_info: HvX64PendingEvent,
2003    }
2004
2005    const_assert!(size_of::<TranslateGvaResultExX64>() == 0x30);
2006
2007    #[repr(C)]
2008    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2009    pub struct TranslateGvaResultExArm64 {
2010        pub result: TranslateGvaResult,
2011    }
2012
2013    const_assert!(size_of::<TranslateGvaResultExArm64>() == 0x8);
2014
2015    #[repr(C)]
2016    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2017    pub struct TranslateVirtualAddressExOutputX64 {
2018        pub translation_result: TranslateGvaResultExX64,
2019        pub gpa_page: u64,
2020        // NOTE: This reserved field is not in the OS headers, but is required due to alignment. Confirmed via debugger.
2021        pub reserved: u64,
2022    }
2023
2024    const_assert!(size_of::<TranslateVirtualAddressExOutputX64>() == 0x40);
2025
2026    #[repr(C)]
2027    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2028    pub struct TranslateVirtualAddressExOutputArm64 {
2029        pub translation_result: TranslateGvaResultExArm64,
2030        pub gpa_page: u64,
2031    }
2032
2033    const_assert!(size_of::<TranslateVirtualAddressExOutputArm64>() == 0x10);
2034
2035    #[repr(C)]
2036    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2037    pub struct GetVpIndexFromApicId {
2038        pub partition_id: u64,
2039        pub target_vtl: u8,
2040        pub reserved: [u8; 7],
2041    }
2042
2043    #[repr(C)]
2044    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2045    pub struct EnableVpVtlX64 {
2046        pub partition_id: u64,
2047        pub vp_index: u32,
2048        pub target_vtl: u8,
2049        pub reserved: [u8; 3],
2050        pub vp_vtl_context: InitialVpContextX64,
2051    }
2052
2053    #[repr(C)]
2054    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2055    pub struct EnableVpVtlArm64 {
2056        pub partition_id: u64,
2057        pub vp_index: u32,
2058        pub target_vtl: u8,
2059        pub reserved: [u8; 3],
2060        pub vp_vtl_context: InitialVpContextArm64,
2061    }
2062
2063    #[repr(C)]
2064    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2065    pub struct ModifyVtlProtectionMask {
2066        pub partition_id: u64,
2067        pub map_flags: HvMapGpaFlags,
2068        pub target_vtl: HvInputVtl,
2069        pub reserved: [u8; 3],
2070    }
2071
2072    #[repr(C)]
2073    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2074    pub struct CheckSparseGpaPageVtlAccess {
2075        pub partition_id: u64,
2076        pub target_vtl: HvInputVtl,
2077        pub desired_access: u8,
2078        pub reserved0: u16,
2079        pub reserved1: u32,
2080    }
2081    const_assert!(size_of::<CheckSparseGpaPageVtlAccess>() == 0x10);
2082
2083    #[bitfield(u64)]
2084    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2085    pub struct CheckSparseGpaPageVtlAccessOutput {
2086        pub result_code: u8,
2087        pub denied_access: u8,
2088        #[bits(4)]
2089        pub intercepting_vtl: u32,
2090        #[bits(12)]
2091        _reserved0: u32,
2092        _reserved1: u32,
2093    }
2094    const_assert!(size_of::<CheckSparseGpaPageVtlAccessOutput>() == 0x8);
2095
2096    open_enum::open_enum! {
2097        pub enum CheckGpaPageVtlAccessResultCode: u32 {
2098            SUCCESS = 0,
2099            MEMORY_INTERCEPT = 1,
2100        }
2101    }
2102
2103    /// The number of VTLs for which permissions can be specified in a VTL permission set.
2104    pub const HV_VTL_PERMISSION_SET_SIZE: usize = 2;
2105
2106    #[repr(C)]
2107    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2108    pub struct VtlPermissionSet {
2109        /// VTL permissions for the GPA page, starting from VTL 1.
2110        pub vtl_permission_from_1: [u16; HV_VTL_PERMISSION_SET_SIZE],
2111    }
2112
2113    open_enum::open_enum! {
2114        pub enum AcceptMemoryType: u32 {
2115            ANY = 0,
2116            RAM = 1,
2117        }
2118    }
2119
2120    open_enum! {
2121        /// Host visibility used in hypercall inputs.
2122        ///
2123        /// NOTE: While this is a 2 bit set with the lower bit representing host
2124        /// read access and upper bit representing host write access, hardware
2125        /// platforms do not support that form of isolation. Only support
2126        /// private or full shared in this definition.
2127        #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2128        pub enum HostVisibilityType: u8 {
2129            PRIVATE = 0,
2130            SHARED = 3,
2131        }
2132    }
2133
2134    // Used by bitfield-struct implicitly.
2135    impl HostVisibilityType {
2136        const fn from_bits(value: u8) -> Self {
2137            Self(value)
2138        }
2139
2140        const fn into_bits(value: Self) -> u8 {
2141            value.0
2142        }
2143    }
2144
2145    /// Attributes for accepting pages. See [`AcceptGpaPages`]
2146    #[bitfield(u32)]
2147    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2148    pub struct AcceptPagesAttributes {
2149        #[bits(6)]
2150        /// Supplies the expected memory type [`AcceptMemoryType`].
2151        pub memory_type: u32,
2152        #[bits(2)]
2153        /// Supplies the initial host visibility (exclusive, shared read-only, shared read-write).
2154        pub host_visibility: HostVisibilityType,
2155        #[bits(3)]
2156        /// Supplies the set of VTLs for which initial VTL permissions will be set.
2157        pub vtl_set: u32,
2158        #[bits(21)]
2159        _reserved: u32,
2160    }
2161
2162    #[repr(C)]
2163    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2164    pub struct AcceptGpaPages {
2165        /// Supplies the partition ID of the partition this request is for.
2166        pub partition_id: u64,
2167        /// Supplies attributes of the pages being accepted, such as whether
2168        /// they should be made host visible.
2169        pub page_attributes: AcceptPagesAttributes,
2170        /// Supplies the set of initial VTL permissions.
2171        pub vtl_permission_set: VtlPermissionSet,
2172        /// Supplies the GPA page number of the first page to modify.
2173        pub gpa_page_base: u64,
2174    }
2175    const_assert!(size_of::<AcceptGpaPages>() == 0x18);
2176
2177    /// Attributes for unaccepting pages. See [`UnacceptGpaPages`]
2178    #[bitfield(u32)]
2179    pub struct UnacceptPagesAttributes {
2180        #[bits(3)]
2181        pub vtl_set: u32,
2182        #[bits(29)]
2183        _reserved: u32,
2184    }
2185
2186    #[repr(C)]
2187    pub struct UnacceptGpaPages {
2188        /// Supplies the partition ID of the partition this request is for.
2189        pub partition_id: u64,
2190        /// Supplies the set of VTLs for which VTL permissions will be checked.
2191        pub page_attributes: UnacceptPagesAttributes,
2192        ///  Supplies the set of VTL permissions to check against.
2193        pub vtl_permission_set: VtlPermissionSet,
2194        /// Supplies the GPA page number of the first page to modify.
2195        pub gpa_page_base: u64,
2196    }
2197    const_assert!(size_of::<UnacceptGpaPages>() == 0x18);
2198
2199    #[bitfield(u32)]
2200    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2201    pub struct ModifyHostVisibility {
2202        #[bits(2)]
2203        pub host_visibility: HostVisibilityType,
2204        #[bits(30)]
2205        _reserved: u32,
2206    }
2207
2208    #[repr(C)]
2209    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2210    pub struct ModifySparsePageVisibility {
2211        pub partition_id: u64,
2212        pub host_visibility: ModifyHostVisibility,
2213        pub reserved: u32,
2214    }
2215
2216    #[repr(C)]
2217    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2218    pub struct QuerySparsePageVisibility {
2219        pub partition_id: u64,
2220    }
2221
2222    pub const VBS_VM_REPORT_DATA_SIZE: usize = 64;
2223    pub const VBS_VM_MAX_REPORT_SIZE: usize = 2048;
2224
2225    #[repr(C)]
2226    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2227    pub struct VbsVmCallReport {
2228        pub report_data: [u8; VBS_VM_REPORT_DATA_SIZE],
2229    }
2230
2231    #[repr(C)]
2232    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2233    pub struct VbsVmCallReportOutput {
2234        pub report: [u8; VBS_VM_MAX_REPORT_SIZE],
2235    }
2236
2237    #[bitfield(u8)]
2238    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2239    pub struct EnablePartitionVtlFlags {
2240        pub enable_mbec: bool,
2241        pub enable_supervisor_shadow_stack: bool,
2242        pub enable_hardware_hvpt: bool,
2243        #[bits(5)]
2244        pub reserved: u8,
2245    }
2246
2247    #[repr(C)]
2248    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2249    pub struct EnablePartitionVtl {
2250        pub partition_id: u64,
2251        pub target_vtl: u8,
2252        pub flags: EnablePartitionVtlFlags,
2253        pub reserved_z0: u16,
2254        pub reserved_z1: u32,
2255    }
2256
2257    #[repr(C)]
2258    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2259    pub struct FlushVirtualAddressSpace {
2260        pub address_space: u64,
2261        pub flags: HvFlushFlags,
2262        pub processor_mask: u64,
2263    }
2264
2265    #[repr(C)]
2266    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2267    pub struct FlushVirtualAddressSpaceEx {
2268        pub address_space: u64,
2269        pub flags: HvFlushFlags,
2270        pub vp_set_format: u64,
2271        pub vp_set_valid_banks_mask: u64,
2272        // Followed by the variable-sized part of an HvVpSet
2273    }
2274
2275    #[repr(C)]
2276    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2277    pub struct PinUnpinGpaPageRangesHeader {
2278        pub reserved: u64,
2279    }
2280
2281    #[repr(C)]
2282    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2283    pub struct SendSyntheticClusterIpi {
2284        pub vector: u32,
2285        pub target_vtl: HvInputVtl,
2286        pub flags: u8,
2287        pub reserved: u16,
2288        pub processor_mask: u64,
2289    }
2290
2291    #[repr(C)]
2292    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2293    pub struct SendSyntheticClusterIpiEx {
2294        pub vector: u32,
2295        pub target_vtl: HvInputVtl,
2296        pub flags: u8,
2297        pub reserved: u16,
2298        pub vp_set_format: u64,
2299        pub vp_set_valid_banks_mask: u64,
2300        // Followed by the variable-sized part of an HvVpSet
2301    }
2302
2303    #[bitfield(u64)]
2304    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2305    pub struct HvFlushFlags {
2306        pub all_processors: bool,
2307        pub all_virtual_address_spaces: bool,
2308        pub non_global_mappings_only: bool,
2309        pub use_extended_range_format: bool,
2310        pub use_target_vtl: bool,
2311
2312        #[bits(3)]
2313        _reserved: u8,
2314
2315        pub target_vtl0: bool,
2316        pub target_vtl1: bool,
2317
2318        #[bits(54)]
2319        _reserved2: u64,
2320    }
2321
2322    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2323    #[repr(transparent)]
2324    pub struct HvGvaRange(pub u64);
2325
2326    impl From<u64> for HvGvaRange {
2327        fn from(value: u64) -> Self {
2328            Self(value)
2329        }
2330    }
2331
2332    impl From<HvGvaRange> for u64 {
2333        fn from(value: HvGvaRange) -> Self {
2334            value.0
2335        }
2336    }
2337
2338    impl HvGvaRange {
2339        pub fn as_simple(self) -> HvGvaRangeSimple {
2340            HvGvaRangeSimple(self.0)
2341        }
2342
2343        pub fn as_extended(self) -> HvGvaRangeExtended {
2344            HvGvaRangeExtended(self.0)
2345        }
2346
2347        pub fn as_extended_large_page(self) -> HvGvaRangeExtendedLargePage {
2348            HvGvaRangeExtendedLargePage(self.0)
2349        }
2350    }
2351
2352    #[bitfield(u64)]
2353    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2354    pub struct HvGvaRangeSimple {
2355        /// The number of pages beyond one.
2356        #[bits(12)]
2357        pub additional_pages: u64,
2358        /// The top 52 most significant bits of the guest virtual address.
2359        #[bits(52)]
2360        pub gva_page_number: u64,
2361    }
2362
2363    #[bitfield(u64)]
2364    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2365    pub struct HvGvaRangeExtended {
2366        /// The number of pages beyond one.
2367        #[bits(11)]
2368        pub additional_pages: u64,
2369        /// Is page size greater than 4 KB.
2370        pub large_page: bool,
2371        /// The top 52 most significant bits of the guest virtual address when `large_page`` is clear.
2372        #[bits(52)]
2373        pub gva_page_number: u64,
2374    }
2375
2376    #[bitfield(u64)]
2377    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2378    pub struct HvGvaRangeExtendedLargePage {
2379        /// The number of pages beyond one.
2380        #[bits(11)]
2381        pub additional_pages: u64,
2382        /// Is page size greater than 4 KB.
2383        pub large_page: bool,
2384        /// The page size when `large_page`` is set.
2385        /// false: 2 MB
2386        /// true: 1 GB
2387        pub page_size: bool,
2388        #[bits(8)]
2389        _reserved: u64,
2390        /// The top 43 most significant bits of the guest virtual address when `large_page`` is set.
2391        #[bits(43)]
2392        pub gva_large_page_number: u64,
2393    }
2394
2395    #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
2396    #[repr(transparent)]
2397    pub struct HvGpaRange(pub u64);
2398
2399    impl HvGpaRange {
2400        pub fn as_simple(self) -> HvGpaRangeSimple {
2401            HvGpaRangeSimple(self.0)
2402        }
2403
2404        pub fn as_extended(self) -> HvGpaRangeExtended {
2405            HvGpaRangeExtended(self.0)
2406        }
2407
2408        pub fn as_extended_large_page(self) -> HvGpaRangeExtendedLargePage {
2409            HvGpaRangeExtendedLargePage(self.0)
2410        }
2411    }
2412
2413    #[bitfield(u64)]
2414    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2415    pub struct HvGpaRangeSimple {
2416        /// The number of pages beyond one.
2417        #[bits(12)]
2418        pub additional_pages: u64,
2419        /// The top 52 most significant bits of the guest physical address.
2420        #[bits(52)]
2421        pub gpa_page_number: u64,
2422    }
2423
2424    #[bitfield(u64)]
2425    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2426    pub struct HvGpaRangeExtended {
2427        /// The number of pages beyond one.
2428        #[bits(11)]
2429        pub additional_pages: u64,
2430        /// Is page size greater than 4 KB.
2431        pub large_page: bool,
2432        /// The top 52 most significant bits of the guest physical address when `large_page`` is clear.
2433        #[bits(52)]
2434        pub gpa_page_number: u64,
2435    }
2436
2437    #[bitfield(u64)]
2438    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2439    pub struct HvGpaRangeExtendedLargePage {
2440        /// The number of pages beyond one.
2441        #[bits(11)]
2442        pub additional_pages: u64,
2443        /// Is page size greater than 4 KB.
2444        pub large_page: bool,
2445        /// The page size when `large_page`` is set.
2446        /// false: 2 MB
2447        /// true: 1 GB
2448        pub page_size: bool,
2449        #[bits(8)]
2450        _reserved: u64,
2451        /// The top 43 most significant bits of the guest physical address when `large_page`` is set.
2452        #[bits(43)]
2453        pub gpa_large_page_number: u64,
2454    }
2455
2456    pub const HV_HYPERCALL_MMIO_MAX_DATA_LENGTH: usize = 64;
2457
2458    #[repr(C)]
2459    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2460    pub struct MemoryMappedIoRead {
2461        pub gpa: u64,
2462        pub access_width: u32,
2463        pub reserved_z0: u32,
2464    }
2465
2466    #[repr(C)]
2467    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2468    pub struct MemoryMappedIoReadOutput {
2469        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
2470    }
2471
2472    #[repr(C)]
2473    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2474    pub struct MemoryMappedIoWrite {
2475        pub gpa: u64,
2476        pub access_width: u32,
2477        pub reserved_z0: u32,
2478        pub data: [u8; HV_HYPERCALL_MMIO_MAX_DATA_LENGTH],
2479    }
2480
2481    #[repr(C)]
2482    #[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
2483    pub struct RestorePartitionTime {
2484        pub partition_id: u64,
2485        pub tsc_sequence: u32,
2486        pub reserved: u32,
2487        pub reference_time_in_100_ns: u64,
2488        pub tsc: u64,
2489    }
2490}
2491
2492macro_rules! registers {
2493    ($name:ident {
2494        $(
2495            $(#[$vattr:meta])*
2496            $variant:ident = $value:expr
2497        ),*
2498        $(,)?
2499    }) => {
2500        open_enum! {
2501    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
2502            pub enum $name: u32 {
2503        #![expect(non_upper_case_globals)]
2504                $($variant = $value,)*
2505                InstructionEmulationHints = 0x00000002,
2506                InternalActivityState = 0x00000004,
2507
2508        // Guest Crash Registers
2509                GuestCrashP0  = 0x00000210,
2510                GuestCrashP1  = 0x00000211,
2511                GuestCrashP2  = 0x00000212,
2512                GuestCrashP3  = 0x00000213,
2513                GuestCrashP4  = 0x00000214,
2514                GuestCrashCtl = 0x00000215,
2515
2516                PendingInterruption = 0x00010002,
2517                InterruptState = 0x00010003,
2518                PendingEvent0 = 0x00010004,
2519                PendingEvent1 = 0x00010005,
2520                DeliverabilityNotifications = 0x00010006,
2521
2522                GicrBaseGpa = 0x00063000,
2523
2524                VpRuntime = 0x00090000,
2525                GuestOsId = 0x00090002,
2526                VpIndex = 0x00090003,
2527                TimeRefCount = 0x00090004,
2528                CpuManagementVersion = 0x00090007,
2529                VpAssistPage = 0x00090013,
2530                VpRootSignalCount = 0x00090014,
2531                ReferenceTsc = 0x00090017,
2532                VpConfig = 0x00090018,
2533                Ghcb = 0x00090019,
2534                ReferenceTscSequence = 0x0009001A,
2535                GuestSchedulerEvent = 0x0009001B,
2536
2537                Sint0 = 0x000A0000,
2538                Sint1 = 0x000A0001,
2539                Sint2 = 0x000A0002,
2540                Sint3 = 0x000A0003,
2541                Sint4 = 0x000A0004,
2542                Sint5 = 0x000A0005,
2543                Sint6 = 0x000A0006,
2544                Sint7 = 0x000A0007,
2545                Sint8 = 0x000A0008,
2546                Sint9 = 0x000A0009,
2547                Sint10 = 0x000A000A,
2548                Sint11 = 0x000A000B,
2549                Sint12 = 0x000A000C,
2550                Sint13 = 0x000A000D,
2551                Sint14 = 0x000A000E,
2552                Sint15 = 0x000A000F,
2553                Scontrol = 0x000A0010,
2554                Sversion = 0x000A0011,
2555                Sifp = 0x000A0012,
2556                Sipp = 0x000A0013,
2557                Eom = 0x000A0014,
2558                Sirbp = 0x000A0015,
2559
2560                Stimer0Config = 0x000B0000,
2561                Stimer0Count = 0x000B0001,
2562                Stimer1Config = 0x000B0002,
2563                Stimer1Count = 0x000B0003,
2564                Stimer2Config = 0x000B0004,
2565                Stimer2Count = 0x000B0005,
2566                Stimer3Config = 0x000B0006,
2567                Stimer3Count = 0x000B0007,
2568                StimeUnhaltedTimerConfig = 0x000B0100,
2569                StimeUnhaltedTimerCount = 0x000B0101,
2570
2571                VsmCodePageOffsets = 0x000D0002,
2572                VsmVpStatus = 0x000D0003,
2573                VsmPartitionStatus = 0x000D0004,
2574                VsmVina = 0x000D0005,
2575                VsmCapabilities = 0x000D0006,
2576                VsmPartitionConfig = 0x000D0007,
2577                GuestVsmPartitionConfig = 0x000D0008,
2578                VsmVpSecureConfigVtl0 = 0x000D0010,
2579                VsmVpSecureConfigVtl1 = 0x000D0011,
2580                VsmVpSecureConfigVtl2 = 0x000D0012,
2581                VsmVpSecureConfigVtl3 = 0x000D0013,
2582                VsmVpSecureConfigVtl4 = 0x000D0014,
2583                VsmVpSecureConfigVtl5 = 0x000D0015,
2584                VsmVpSecureConfigVtl6 = 0x000D0016,
2585                VsmVpSecureConfigVtl7 = 0x000D0017,
2586                VsmVpSecureConfigVtl8 = 0x000D0018,
2587                VsmVpSecureConfigVtl9 = 0x000D0019,
2588                VsmVpSecureConfigVtl10 = 0x000D001A,
2589                VsmVpSecureConfigVtl11 = 0x000D001B,
2590                VsmVpSecureConfigVtl12 = 0x000D001C,
2591                VsmVpSecureConfigVtl13 = 0x000D001D,
2592                VsmVpSecureConfigVtl14 = 0x000D001E,
2593                VsmVpWaitForTlbLock = 0x000D0020,
2594            }
2595        }
2596
2597        impl From<HvRegisterName> for $name {
2598            fn from(name: HvRegisterName) -> Self {
2599                Self(name.0)
2600            }
2601        }
2602
2603        impl From<$name> for HvRegisterName {
2604            fn from(name: $name) -> Self {
2605                Self(name.0)
2606            }
2607        }
2608    };
2609}
2610
2611/// A hypervisor register for any architecture.
2612///
2613/// This exists only to pass registers through layers where the architecture
2614/// type has been lost. In general, you should use the arch-specific registers.
2615#[repr(C)]
2616#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2617pub struct HvRegisterName(pub u32);
2618
2619registers! {
2620    // Typed enum for registers that are shared across architectures.
2621    HvAllArchRegisterName {}
2622}
2623
2624impl From<HvAllArchRegisterName> for HvX64RegisterName {
2625    fn from(name: HvAllArchRegisterName) -> Self {
2626        Self(name.0)
2627    }
2628}
2629
2630impl From<HvAllArchRegisterName> for HvArm64RegisterName {
2631    fn from(name: HvAllArchRegisterName) -> Self {
2632        Self(name.0)
2633    }
2634}
2635
2636registers! {
2637    HvX64RegisterName {
2638        // X64 User-Mode Registers
2639        Rax = 0x00020000,
2640        Rcx = 0x00020001,
2641        Rdx = 0x00020002,
2642        Rbx = 0x00020003,
2643        Rsp = 0x00020004,
2644        Rbp = 0x00020005,
2645        Rsi = 0x00020006,
2646        Rdi = 0x00020007,
2647        R8 = 0x00020008,
2648        R9 = 0x00020009,
2649        R10 = 0x0002000a,
2650        R11 = 0x0002000b,
2651        R12 = 0x0002000c,
2652        R13 = 0x0002000d,
2653        R14 = 0x0002000e,
2654        R15 = 0x0002000f,
2655        Rip = 0x00020010,
2656        Rflags = 0x00020011,
2657
2658        // X64 Floating Point and Vector Registers
2659        Xmm0 = 0x00030000,
2660        Xmm1 = 0x00030001,
2661        Xmm2 = 0x00030002,
2662        Xmm3 = 0x00030003,
2663        Xmm4 = 0x00030004,
2664        Xmm5 = 0x00030005,
2665        Xmm6 = 0x00030006,
2666        Xmm7 = 0x00030007,
2667        Xmm8 = 0x00030008,
2668        Xmm9 = 0x00030009,
2669        Xmm10 = 0x0003000A,
2670        Xmm11 = 0x0003000B,
2671        Xmm12 = 0x0003000C,
2672        Xmm13 = 0x0003000D,
2673        Xmm14 = 0x0003000E,
2674        Xmm15 = 0x0003000F,
2675        FpMmx0 = 0x00030010,
2676        FpMmx1 = 0x00030011,
2677        FpMmx2 = 0x00030012,
2678        FpMmx3 = 0x00030013,
2679        FpMmx4 = 0x00030014,
2680        FpMmx5 = 0x00030015,
2681        FpMmx6 = 0x00030016,
2682        FpMmx7 = 0x00030017,
2683        FpControlStatus = 0x00030018,
2684        XmmControlStatus = 0x00030019,
2685
2686        // X64 Control Registers
2687        Cr0 = 0x00040000,
2688        Cr2 = 0x00040001,
2689        Cr3 = 0x00040002,
2690        Cr4 = 0x00040003,
2691        Cr8 = 0x00040004,
2692        Xfem = 0x00040005,
2693        // X64 Intermediate Control Registers
2694        IntermediateCr0 = 0x00041000,
2695        IntermediateCr3 = 0x00041002,
2696        IntermediateCr4 = 0x00041003,
2697        IntermediateCr8 = 0x00041004,
2698        // X64 Debug Registers
2699        Dr0 = 0x00050000,
2700        Dr1 = 0x00050001,
2701        Dr2 = 0x00050002,
2702        Dr3 = 0x00050003,
2703        Dr6 = 0x00050004,
2704        Dr7 = 0x00050005,
2705        // X64 Segment Registers
2706        Es = 0x00060000,
2707        Cs = 0x00060001,
2708        Ss = 0x00060002,
2709        Ds = 0x00060003,
2710        Fs = 0x00060004,
2711        Gs = 0x00060005,
2712        Ldtr = 0x00060006,
2713        Tr = 0x00060007,
2714        // X64 Table Registers
2715        Idtr = 0x00070000,
2716        Gdtr = 0x00070001,
2717        // X64 Virtualized MSRs
2718        Tsc = 0x00080000,
2719        Efer = 0x00080001,
2720        KernelGsBase = 0x00080002,
2721        ApicBase = 0x00080003,
2722        Pat = 0x00080004,
2723        SysenterCs = 0x00080005,
2724        SysenterEip = 0x00080006,
2725        SysenterEsp = 0x00080007,
2726        Star = 0x00080008,
2727        Lstar = 0x00080009,
2728        Cstar = 0x0008000a,
2729        Sfmask = 0x0008000b,
2730        InitialApicId = 0x0008000c,
2731        // X64 Cache control MSRs
2732        MsrMtrrCap = 0x0008000d,
2733        MsrMtrrDefType = 0x0008000e,
2734        MsrMtrrPhysBase0 = 0x00080010,
2735        MsrMtrrPhysBase1 = 0x00080011,
2736        MsrMtrrPhysBase2 = 0x00080012,
2737        MsrMtrrPhysBase3 = 0x00080013,
2738        MsrMtrrPhysBase4 = 0x00080014,
2739        MsrMtrrPhysBase5 = 0x00080015,
2740        MsrMtrrPhysBase6 = 0x00080016,
2741        MsrMtrrPhysBase7 = 0x00080017,
2742        MsrMtrrPhysBase8 = 0x00080018,
2743        MsrMtrrPhysBase9 = 0x00080019,
2744        MsrMtrrPhysBaseA = 0x0008001a,
2745        MsrMtrrPhysBaseB = 0x0008001b,
2746        MsrMtrrPhysBaseC = 0x0008001c,
2747        MsrMtrrPhysBaseD = 0x0008001d,
2748        MsrMtrrPhysBaseE = 0x0008001e,
2749        MsrMtrrPhysBaseF = 0x0008001f,
2750        MsrMtrrPhysMask0 = 0x00080040,
2751        MsrMtrrPhysMask1 = 0x00080041,
2752        MsrMtrrPhysMask2 = 0x00080042,
2753        MsrMtrrPhysMask3 = 0x00080043,
2754        MsrMtrrPhysMask4 = 0x00080044,
2755        MsrMtrrPhysMask5 = 0x00080045,
2756        MsrMtrrPhysMask6 = 0x00080046,
2757        MsrMtrrPhysMask7 = 0x00080047,
2758        MsrMtrrPhysMask8 = 0x00080048,
2759        MsrMtrrPhysMask9 = 0x00080049,
2760        MsrMtrrPhysMaskA = 0x0008004a,
2761        MsrMtrrPhysMaskB = 0x0008004b,
2762        MsrMtrrPhysMaskC = 0x0008004c,
2763        MsrMtrrPhysMaskD = 0x0008004d,
2764        MsrMtrrPhysMaskE = 0x0008004e,
2765        MsrMtrrPhysMaskF = 0x0008004f,
2766        MsrMtrrFix64k00000 = 0x00080070,
2767        MsrMtrrFix16k80000 = 0x00080071,
2768        MsrMtrrFix16kA0000 = 0x00080072,
2769        MsrMtrrFix4kC0000 = 0x00080073,
2770        MsrMtrrFix4kC8000 = 0x00080074,
2771        MsrMtrrFix4kD0000 = 0x00080075,
2772        MsrMtrrFix4kD8000 = 0x00080076,
2773        MsrMtrrFix4kE0000 = 0x00080077,
2774        MsrMtrrFix4kE8000 = 0x00080078,
2775        MsrMtrrFix4kF0000 = 0x00080079,
2776        MsrMtrrFix4kF8000 = 0x0008007a,
2777
2778        TscAux = 0x0008007B,
2779        Bndcfgs = 0x0008007C,
2780        DebugCtl = 0x0008007D,
2781        MCount = 0x0008007E,
2782        ACount = 0x0008007F,
2783
2784        SgxLaunchControl0 = 0x00080080,
2785        SgxLaunchControl1 = 0x00080081,
2786        SgxLaunchControl2 = 0x00080082,
2787        SgxLaunchControl3 = 0x00080083,
2788        SpecCtrl = 0x00080084,
2789        PredCmd = 0x00080085,
2790        VirtSpecCtrl = 0x00080086,
2791        TscVirtualOffset = 0x00080087,
2792        TsxCtrl = 0x00080088,
2793        MsrMcUpdatePatchLevel = 0x00080089,
2794        Available1 = 0x0008008A,
2795        Xss = 0x0008008B,
2796        UCet = 0x0008008C,
2797        SCet = 0x0008008D,
2798        Ssp = 0x0008008E,
2799        Pl0Ssp = 0x0008008F,
2800        Pl1Ssp = 0x00080090,
2801        Pl2Ssp = 0x00080091,
2802        Pl3Ssp = 0x00080092,
2803        InterruptSspTableAddr = 0x00080093,
2804        TscVirtualMultiplier = 0x00080094,
2805        TscDeadline = 0x00080095,
2806        TscAdjust = 0x00080096,
2807        Pasid = 0x00080097,
2808        UmwaitControl = 0x00080098,
2809        Xfd = 0x00080099,
2810        XfdErr = 0x0008009A,
2811
2812        // X64 Apic registers. These match the equivalent x2APIC MSR offsets.
2813        ApicId = 0x00084802,
2814        ApicVersion = 0x00084803,
2815        ApicTpr = 0x00084808,
2816        ApicPpr = 0x0008480a,
2817        ApicEoi = 0x0008480b,
2818        ApicLdr = 0x0008480d,
2819        ApicSpurious = 0x0008480f,
2820        ApicIsr0 = 0x00084810,
2821        ApicIsr1 = 0x00084811,
2822        ApicIsr2 = 0x00084812,
2823        ApicIsr3 = 0x00084813,
2824        ApicIsr4 = 0x00084814,
2825        ApicIsr5 = 0x00084815,
2826        ApicIsr6 = 0x00084816,
2827        ApicIsr7 = 0x00084817,
2828        ApicTmr0 = 0x00084818,
2829        ApicTmr1 = 0x00084819,
2830        ApicTmr2 = 0x0008481a,
2831        ApicTmr3 = 0x0008481b,
2832        ApicTmr4 = 0x0008481c,
2833        ApicTmr5 = 0x0008481d,
2834        ApicTmr6 = 0x0008481e,
2835        ApicTmr7 = 0x0008481f,
2836        ApicIrr0 = 0x00084820,
2837        ApicIrr1 = 0x00084821,
2838        ApicIrr2 = 0x00084822,
2839        ApicIrr3 = 0x00084823,
2840        ApicIrr4 = 0x00084824,
2841        ApicIrr5 = 0x00084825,
2842        ApicIrr6 = 0x00084826,
2843        ApicIrr7 = 0x00084827,
2844        ApicEse = 0x00084828,
2845        ApicIcr = 0x00084830,
2846        ApicLvtTimer = 0x00084832,
2847        ApicLvtThermal = 0x00084833,
2848        ApicLvtPerfmon = 0x00084834,
2849        ApicLvtLint0 = 0x00084835,
2850        ApicLvtLint1 = 0x00084836,
2851        ApicLvtError = 0x00084837,
2852        ApicInitCount = 0x00084838,
2853        ApicCurrentCount = 0x00084839,
2854        ApicDivide = 0x0008483e,
2855        ApicSelfIpi = 0x0008483f,
2856
2857        Hypercall = 0x00090001,
2858        RegisterPage = 0x0009001C,
2859
2860        // Partition Timer Assist Registers
2861        EmulatedTimerPeriod = 0x00090030,
2862        EmulatedTimerControl = 0x00090031,
2863        PmTimerAssist = 0x00090032,
2864
2865        // AMD SEV configuration MSRs
2866        SevControl = 0x00090040,
2867        SevGhcbGpa = 0x00090041,
2868        SevAvicGpa = 0x00090043,
2869
2870        CrInterceptControl = 0x000E0000,
2871        CrInterceptCr0Mask = 0x000E0001,
2872        CrInterceptCr4Mask = 0x000E0002,
2873        CrInterceptIa32MiscEnableMask = 0x000E0003,
2874    }
2875}
2876
2877registers! {
2878    HvArm64RegisterName {
2879        HypervisorVersion = 0x00000100,
2880        PrivilegesAndFeaturesInfo = 0x00000200,
2881        FeaturesInfo = 0x00000201,
2882        ImplementationLimitsInfo = 0x00000202,
2883        HardwareFeaturesInfo = 0x00000203,
2884        CpuManagementFeaturesInfo = 0x00000204,
2885        PasidFeaturesInfo = 0x00000205,
2886        SkipLevelFeaturesInfo = 0x00000206,
2887        NestedVirtFeaturesInfo = 0x00000207,
2888        IptFeaturesInfo = 0x00000208,
2889        IsolationConfiguration = 0x00000209,
2890
2891        X0 = 0x00020000,
2892        X1 = 0x00020001,
2893        X2 = 0x00020002,
2894        X3 = 0x00020003,
2895        X4 = 0x00020004,
2896        X5 = 0x00020005,
2897        X6 = 0x00020006,
2898        X7 = 0x00020007,
2899        X8 = 0x00020008,
2900        X9 = 0x00020009,
2901        X10 = 0x0002000A,
2902        X11 = 0x0002000B,
2903        X12 = 0x0002000C,
2904        X13 = 0x0002000D,
2905        X14 = 0x0002000E,
2906        X15 = 0x0002000F,
2907        X16 = 0x00020010,
2908        X17 = 0x00020011,
2909        X18 = 0x00020012,
2910        X19 = 0x00020013,
2911        X20 = 0x00020014,
2912        X21 = 0x00020015,
2913        X22 = 0x00020016,
2914        X23 = 0x00020017,
2915        X24 = 0x00020018,
2916        X25 = 0x00020019,
2917        X26 = 0x0002001A,
2918        X27 = 0x0002001B,
2919        X28 = 0x0002001C,
2920        XFp = 0x0002001D,
2921        XLr = 0x0002001E,
2922        XSp = 0x0002001F, // alias for either El0/x depending on Cpsr.SPSel
2923        XSpEl0 = 0x00020020,
2924        XSpElx = 0x00020021,
2925        XPc = 0x00020022,
2926        Cpsr = 0x00020023,
2927        SpsrEl2 = 0x00021002,
2928
2929        SctlrEl1 = 0x00040002,
2930        Ttbr0El1 = 0x00040005,
2931        Ttbr1El1 = 0x00040006,
2932        TcrEl1 = 0x00040007,
2933        EsrEl1 = 0x00040008,
2934        FarEl1 = 0x00040009,
2935        MairEl1 = 0x0004000b,
2936        VbarEl1 = 0x0004000c,
2937        ElrEl1 = 0x00040015,
2938    }
2939}
2940
2941#[repr(C)]
2942#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
2943pub struct HvRegisterValue(pub AlignedU128);
2944
2945impl HvRegisterValue {
2946    pub fn as_u128(&self) -> u128 {
2947        self.0.into()
2948    }
2949
2950    pub fn as_u64(&self) -> u64 {
2951        self.as_u128() as u64
2952    }
2953
2954    pub fn as_u32(&self) -> u32 {
2955        self.as_u128() as u32
2956    }
2957
2958    pub fn as_u16(&self) -> u16 {
2959        self.as_u128() as u16
2960    }
2961
2962    pub fn as_u8(&self) -> u8 {
2963        self.as_u128() as u8
2964    }
2965
2966    pub fn as_table(&self) -> HvX64TableRegister {
2967        HvX64TableRegister::read_from_prefix(self.as_bytes())
2968            .unwrap()
2969            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2970    }
2971
2972    pub fn as_segment(&self) -> HvX64SegmentRegister {
2973        HvX64SegmentRegister::read_from_prefix(self.as_bytes())
2974            .unwrap()
2975            .0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
2976    }
2977}
2978
2979impl From<u8> for HvRegisterValue {
2980    fn from(val: u8) -> Self {
2981        (val as u128).into()
2982    }
2983}
2984
2985impl From<u16> for HvRegisterValue {
2986    fn from(val: u16) -> Self {
2987        (val as u128).into()
2988    }
2989}
2990
2991impl From<u32> for HvRegisterValue {
2992    fn from(val: u32) -> Self {
2993        (val as u128).into()
2994    }
2995}
2996
2997impl From<u64> for HvRegisterValue {
2998    fn from(val: u64) -> Self {
2999        (val as u128).into()
3000    }
3001}
3002
3003impl From<u128> for HvRegisterValue {
3004    fn from(val: u128) -> Self {
3005        Self(val.into())
3006    }
3007}
3008
3009#[repr(C)]
3010#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3011pub struct HvX64TableRegister {
3012    pub pad: [u16; 3],
3013    pub limit: u16,
3014    pub base: u64,
3015}
3016
3017impl From<HvX64TableRegister> for HvRegisterValue {
3018    fn from(val: HvX64TableRegister) -> Self {
3019        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
3020    }
3021}
3022
3023impl From<HvRegisterValue> for HvX64TableRegister {
3024    fn from(val: HvRegisterValue) -> Self {
3025        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
3026    }
3027}
3028
3029#[repr(C)]
3030#[derive(Clone, Copy, Debug, Eq, PartialEq, IntoBytes, Immutable, KnownLayout, FromBytes)]
3031pub struct HvX64SegmentRegister {
3032    pub base: u64,
3033    pub limit: u32,
3034    pub selector: u16,
3035    pub attributes: u16,
3036}
3037
3038impl From<HvX64SegmentRegister> for HvRegisterValue {
3039    fn from(val: HvX64SegmentRegister) -> Self {
3040        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
3041    }
3042}
3043
3044impl From<HvRegisterValue> for HvX64SegmentRegister {
3045    fn from(val: HvRegisterValue) -> Self {
3046        Self::read_from_prefix(val.as_bytes()).unwrap().0 // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
3047    }
3048}
3049
3050#[bitfield(u64)]
3051#[derive(IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq, Eq)]
3052pub struct HvDeliverabilityNotificationsRegister {
3053    /// x86_64 only.
3054    pub nmi_notification: bool,
3055    /// x86_64 only.
3056    pub interrupt_notification: bool,
3057    /// x86_64 only.
3058    #[bits(4)]
3059    /// Only used on x86_64.
3060    pub interrupt_priority: u8,
3061    #[bits(42)]
3062    pub reserved: u64,
3063    pub sints: u16,
3064}
3065
3066open_enum! {
3067    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3068    pub enum HvVtlEntryReason: u32 {
3069        /// This reason is reserved and is not used.
3070        RESERVED = 0,
3071
3072        /// Indicates entry due to a VTL call from a lower VTL.
3073        VTL_CALL = 1,
3074
3075        /// Indicates entry due to an interrupt targeted to the VTL.
3076        INTERRUPT = 2,
3077
3078        // Indicates an entry due to an intercept delivered via the intercept page.
3079        INTERCEPT = 3,
3080    }
3081}
3082
3083#[repr(C)]
3084#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3085pub struct HvVpVtlControl {
3086    //
3087    // The hypervisor updates the entry reason with an indication as to why the
3088    // VTL was entered on the virtual processor.
3089    //
3090    pub entry_reason: HvVtlEntryReason,
3091
3092    /// This flag determines whether the VINA interrupt line is asserted.
3093    pub vina_status: u8,
3094    pub reserved_z0: u8,
3095    pub reserved_z1: u16,
3096
3097    /// A guest updates the VtlReturn* fields to provide the register values to
3098    /// restore on VTL return.  The specific register values that are restored
3099    /// will vary based on whether the VTL is 32-bit or 64-bit: rax and rcx or
3100    /// eax, ecx, and edx.
3101    pub registers: [u64; 2],
3102}
3103
3104#[bitfield(u64)]
3105#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3106pub struct HvRegisterVsmVina {
3107    pub vector: u8,
3108    pub enabled: bool,
3109    pub auto_reset: bool,
3110    pub auto_eoi: bool,
3111    #[bits(53)]
3112    pub reserved: u64,
3113}
3114
3115#[repr(C)]
3116#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3117pub struct HvVpAssistPage {
3118    /// APIC assist for optimized EOI processing.
3119    pub apic_assist: u32,
3120    pub reserved_z0: u32,
3121
3122    /// VP-VTL control information
3123    pub vtl_control: HvVpVtlControl,
3124
3125    pub nested_enlightenments_control: u64,
3126    pub enlighten_vm_entry: u8,
3127    pub reserved_z1: [u8; 7],
3128    pub current_nested_vmcs: u64,
3129    pub synthetic_time_unhalted_timer_expired: u8,
3130    pub reserved_z2: [u8; 7],
3131    pub virtualization_fault_information: [u8; 40],
3132    pub reserved_z3: u64,
3133    pub intercept_message: HvMessage,
3134    pub vtl_return_actions: [u8; 256],
3135}
3136
3137#[repr(C)]
3138#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3139pub struct HvVpAssistPageActionSignalEvent {
3140    pub action_type: u64,
3141    pub target_vp: u32,
3142    pub target_vtl: u8,
3143    pub target_sint: u8,
3144    pub flag_number: u16,
3145}
3146
3147open_enum! {
3148    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3149    pub enum HvInterceptAccessType: u8 {
3150        READ = 0,
3151        WRITE = 1,
3152        EXECUTE = 2,
3153    }
3154}
3155
3156#[bitfield(u16)]
3157#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3158pub struct HvX64VpExecutionState {
3159    #[bits(2)]
3160    pub cpl: u8,
3161    pub cr0_pe: bool,
3162    pub cr0_am: bool,
3163    pub efer_lma: bool,
3164    pub debug_active: bool,
3165    pub interruption_pending: bool,
3166    #[bits(4)]
3167    pub vtl: u8,
3168    pub enclave_mode: bool,
3169    pub interrupt_shadow: bool,
3170    pub virtualization_fault_active: bool,
3171    #[bits(2)]
3172    pub reserved: u8,
3173}
3174
3175#[bitfield(u16)]
3176#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3177pub struct HvArm64VpExecutionState {
3178    #[bits(2)]
3179    pub cpl: u8,
3180    pub debug_active: bool,
3181    pub interruption_pending: bool,
3182    #[bits(4)]
3183    pub vtl: u8,
3184    pub virtualization_fault_active: bool,
3185    #[bits(7)]
3186    pub reserved: u8,
3187}
3188
3189#[repr(C)]
3190#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
3191pub struct HvX64InterceptMessageHeader {
3192    pub vp_index: u32,
3193    pub instruction_length_and_cr8: u8,
3194    pub intercept_access_type: HvInterceptAccessType,
3195    pub execution_state: HvX64VpExecutionState,
3196    pub cs_segment: HvX64SegmentRegister,
3197    pub rip: u64,
3198    pub rflags: u64,
3199}
3200
3201impl MessagePayload for HvX64InterceptMessageHeader {}
3202
3203impl HvX64InterceptMessageHeader {
3204    pub fn instruction_len(&self) -> u8 {
3205        self.instruction_length_and_cr8 & 0xf
3206    }
3207
3208    pub fn cr8(&self) -> u8 {
3209        self.instruction_length_and_cr8 >> 4
3210    }
3211}
3212
3213#[repr(C)]
3214#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
3215pub struct HvArm64InterceptMessageHeader {
3216    pub vp_index: u32,
3217    pub instruction_length: u8,
3218    pub intercept_access_type: HvInterceptAccessType,
3219    pub execution_state: HvArm64VpExecutionState,
3220    pub pc: u64,
3221    pub cspr: u64,
3222}
3223const_assert!(size_of::<HvArm64InterceptMessageHeader>() == 0x18);
3224
3225impl MessagePayload for HvArm64InterceptMessageHeader {}
3226
3227#[repr(transparent)]
3228#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
3229pub struct HvX64IoPortAccessInfo(pub u8);
3230
3231impl HvX64IoPortAccessInfo {
3232    pub fn new(access_size: u8, string_op: bool, rep_prefix: bool) -> Self {
3233        let mut info = access_size & 0x7;
3234
3235        if string_op {
3236            info |= 0x8;
3237        }
3238
3239        if rep_prefix {
3240            info |= 0x10;
3241        }
3242
3243        Self(info)
3244    }
3245
3246    pub fn access_size(&self) -> u8 {
3247        self.0 & 0x7
3248    }
3249
3250    pub fn string_op(&self) -> bool {
3251        self.0 & 0x8 != 0
3252    }
3253
3254    pub fn rep_prefix(&self) -> bool {
3255        self.0 & 0x10 != 0
3256    }
3257}
3258
3259#[repr(C)]
3260#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3261pub struct HvX64IoPortInterceptMessage {
3262    pub header: HvX64InterceptMessageHeader,
3263    pub port_number: u16,
3264    pub access_info: HvX64IoPortAccessInfo,
3265    pub instruction_byte_count: u8,
3266    pub reserved: u32,
3267    pub rax: u64,
3268    pub instruction_bytes: [u8; 16],
3269    pub ds_segment: HvX64SegmentRegister,
3270    pub es_segment: HvX64SegmentRegister,
3271    pub rcx: u64,
3272    pub rsi: u64,
3273    pub rdi: u64,
3274}
3275
3276impl MessagePayload for HvX64IoPortInterceptMessage {}
3277
3278#[bitfield(u8)]
3279#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3280pub struct HvX64MemoryAccessInfo {
3281    pub gva_valid: bool,
3282    pub gva_gpa_valid: bool,
3283    pub hypercall_output_pending: bool,
3284    pub tlb_locked: bool,
3285    pub supervisor_shadow_stack: bool,
3286    #[bits(3)]
3287    pub reserved1: u8,
3288}
3289
3290#[bitfield(u8)]
3291#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3292pub struct HvArm64MemoryAccessInfo {
3293    pub gva_valid: bool,
3294    pub gva_gpa_valid: bool,
3295    pub hypercall_output_pending: bool,
3296    #[bits(5)]
3297    pub reserved1: u8,
3298}
3299
3300open_enum! {
3301    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3302    pub enum HvCacheType: u32 {
3303        #![expect(non_upper_case_globals)]
3304        HvCacheTypeUncached = 0,
3305        HvCacheTypeWriteCombining = 1,
3306        HvCacheTypeWriteThrough = 4,
3307        HvCacheTypeWriteProtected = 5,
3308        HvCacheTypeWriteBack = 6,
3309    }
3310}
3311
3312#[repr(C)]
3313#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3314pub struct HvX64MemoryInterceptMessage {
3315    pub header: HvX64InterceptMessageHeader,
3316    pub cache_type: HvCacheType,
3317    pub instruction_byte_count: u8,
3318    pub memory_access_info: HvX64MemoryAccessInfo,
3319    pub tpr_priority: u8,
3320    pub reserved: u8,
3321    pub guest_virtual_address: u64,
3322    pub guest_physical_address: u64,
3323    pub instruction_bytes: [u8; 16],
3324}
3325
3326impl MessagePayload for HvX64MemoryInterceptMessage {}
3327const_assert!(size_of::<HvX64MemoryInterceptMessage>() == 0x50);
3328
3329#[repr(C)]
3330#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3331pub struct HvArm64MemoryInterceptMessage {
3332    pub header: HvArm64InterceptMessageHeader,
3333    pub cache_type: HvCacheType,
3334    pub instruction_byte_count: u8,
3335    pub memory_access_info: HvArm64MemoryAccessInfo,
3336    pub reserved1: u16,
3337    pub instruction_bytes: [u8; 4],
3338    pub reserved2: u32,
3339    pub guest_virtual_address: u64,
3340    pub guest_physical_address: u64,
3341    pub syndrome: u64,
3342}
3343
3344impl MessagePayload for HvArm64MemoryInterceptMessage {}
3345const_assert!(size_of::<HvArm64MemoryInterceptMessage>() == 0x40);
3346
3347#[repr(C)]
3348#[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)]
3349pub struct HvArm64MmioInterceptMessage {
3350    pub header: HvArm64InterceptMessageHeader,
3351    pub guest_physical_address: u64,
3352    pub access_size: u32,
3353    pub data: [u8; 32],
3354    pub padding: u32,
3355}
3356
3357impl MessagePayload for HvArm64MmioInterceptMessage {}
3358const_assert!(size_of::<HvArm64MmioInterceptMessage>() == 0x48);
3359
3360#[repr(C)]
3361#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3362pub struct HvX64MsrInterceptMessage {
3363    pub header: HvX64InterceptMessageHeader,
3364    pub msr_number: u32,
3365    pub reserved: u32,
3366    pub rdx: u64,
3367    pub rax: u64,
3368}
3369
3370impl MessagePayload for HvX64MsrInterceptMessage {}
3371
3372#[repr(C)]
3373#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3374pub struct HvX64SipiInterceptMessage {
3375    pub header: HvX64InterceptMessageHeader,
3376    pub target_vp_index: u32,
3377    pub vector: u32,
3378}
3379
3380impl MessagePayload for HvX64SipiInterceptMessage {}
3381
3382#[repr(C)]
3383#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3384pub struct HvX64SynicSintDeliverableMessage {
3385    pub header: HvX64InterceptMessageHeader,
3386    pub deliverable_sints: u16,
3387    pub rsvd1: u16,
3388    pub rsvd2: u32,
3389}
3390
3391impl MessagePayload for HvX64SynicSintDeliverableMessage {}
3392
3393#[repr(C)]
3394#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3395pub struct HvArm64SynicSintDeliverableMessage {
3396    pub header: HvArm64InterceptMessageHeader,
3397    pub deliverable_sints: u16,
3398    pub rsvd1: u16,
3399    pub rsvd2: u32,
3400}
3401
3402impl MessagePayload for HvArm64SynicSintDeliverableMessage {}
3403
3404#[repr(C)]
3405#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3406pub struct HvX64InterruptionDeliverableMessage {
3407    pub header: HvX64InterceptMessageHeader,
3408    pub deliverable_type: HvX64PendingInterruptionType,
3409    pub rsvd: [u8; 3],
3410    pub rsvd2: u32,
3411}
3412
3413impl MessagePayload for HvX64InterruptionDeliverableMessage {}
3414
3415open_enum! {
3416    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3417    pub enum HvX64PendingInterruptionType: u8 {
3418        HV_X64_PENDING_INTERRUPT = 0,
3419        HV_X64_PENDING_NMI = 2,
3420        HV_X64_PENDING_EXCEPTION = 3,
3421        HV_X64_PENDING_SOFTWARE_INTERRUPT = 4,
3422        HV_X64_PENDING_PRIVILEGED_SOFTWARE_EXCEPTION = 5,
3423        HV_X64_PENDING_SOFTWARE_EXCEPTION = 6,
3424    }
3425}
3426
3427#[repr(C)]
3428#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
3429pub struct HvX64HypercallInterceptMessage {
3430    pub header: HvX64InterceptMessageHeader,
3431    pub rax: u64,
3432    pub rbx: u64,
3433    pub rcx: u64,
3434    pub rdx: u64,
3435    pub r8: u64,
3436    pub rsi: u64,
3437    pub rdi: u64,
3438    pub xmm_registers: [AlignedU128; 6],
3439    pub flags: HvHypercallInterceptMessageFlags,
3440    pub rsvd2: [u32; 3],
3441}
3442
3443impl MessagePayload for HvX64HypercallInterceptMessage {}
3444
3445#[repr(C)]
3446#[derive(Debug, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
3447pub struct HvArm64HypercallInterceptMessage {
3448    pub header: HvArm64InterceptMessageHeader,
3449    pub immediate: u16,
3450    pub reserved: u16,
3451    pub flags: HvHypercallInterceptMessageFlags,
3452    pub x: [u64; 18],
3453}
3454
3455impl MessagePayload for HvArm64HypercallInterceptMessage {}
3456
3457#[bitfield(u32)]
3458#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3459pub struct HvHypercallInterceptMessageFlags {
3460    pub is_isolated: bool,
3461    #[bits(31)]
3462    _reserved: u32,
3463}
3464
3465#[repr(C)]
3466#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3467pub struct HvX64CpuidInterceptMessage {
3468    pub header: HvX64InterceptMessageHeader,
3469    pub rax: u64,
3470    pub rcx: u64,
3471    pub rdx: u64,
3472    pub rbx: u64,
3473    pub default_result_rax: u64,
3474    pub default_result_rcx: u64,
3475    pub default_result_rdx: u64,
3476    pub default_result_rbx: u64,
3477}
3478
3479impl MessagePayload for HvX64CpuidInterceptMessage {}
3480
3481#[bitfield(u8)]
3482#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3483pub struct HvX64ExceptionInfo {
3484    pub error_code_valid: bool,
3485    pub software_exception: bool,
3486    #[bits(6)]
3487    reserved: u8,
3488}
3489
3490#[repr(C)]
3491#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3492pub struct HvX64ExceptionInterceptMessage {
3493    pub header: HvX64InterceptMessageHeader,
3494    pub vector: u16,
3495    pub exception_info: HvX64ExceptionInfo,
3496    pub instruction_byte_count: u8,
3497    pub error_code: u32,
3498    pub exception_parameter: u64,
3499    pub reserved: u64,
3500    pub instruction_bytes: [u8; 16],
3501    pub ds_segment: HvX64SegmentRegister,
3502    pub ss_segment: HvX64SegmentRegister,
3503    pub rax: u64,
3504    pub rcx: u64,
3505    pub rdx: u64,
3506    pub rbx: u64,
3507    pub rsp: u64,
3508    pub rbp: u64,
3509    pub rsi: u64,
3510    pub rdi: u64,
3511    pub r8: u64,
3512    pub r9: u64,
3513    pub r10: u64,
3514    pub r11: u64,
3515    pub r12: u64,
3516    pub r13: u64,
3517    pub r14: u64,
3518    pub r15: u64,
3519}
3520
3521impl MessagePayload for HvX64ExceptionInterceptMessage {}
3522
3523#[repr(C)]
3524#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3525pub struct HvInvalidVpRegisterMessage {
3526    pub vp_index: u32,
3527    pub reserved: u32,
3528}
3529
3530impl MessagePayload for HvInvalidVpRegisterMessage {}
3531
3532#[repr(C)]
3533#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3534pub struct HvX64ApicEoiMessage {
3535    pub vp_index: u32,
3536    pub interrupt_vector: u32,
3537}
3538
3539impl MessagePayload for HvX64ApicEoiMessage {}
3540
3541#[repr(C)]
3542#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3543pub struct HvX64UnrecoverableExceptionMessage {
3544    pub header: HvX64InterceptMessageHeader,
3545}
3546
3547impl MessagePayload for HvX64UnrecoverableExceptionMessage {}
3548
3549#[repr(C)]
3550#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3551pub struct HvX64HaltMessage {
3552    pub header: HvX64InterceptMessageHeader,
3553}
3554
3555impl MessagePayload for HvX64HaltMessage {}
3556
3557#[repr(C)]
3558#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
3559pub struct HvArm64ResetInterceptMessage {
3560    pub header: HvArm64InterceptMessageHeader,
3561    pub reset_type: HvArm64ResetType,
3562    pub reset_code: u32,
3563}
3564
3565impl MessagePayload for HvArm64ResetInterceptMessage {}
3566
3567open_enum! {
3568    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3569    pub enum HvArm64ResetType: u32 {
3570        POWER_OFF = 0,
3571        REBOOT = 1,
3572        SYSTEM_RESET = 2,
3573        HIBERNATE = 3,
3574    }
3575}
3576
3577#[bitfield(u8)]
3578#[derive(IntoBytes, Immutable, FromBytes)]
3579pub struct HvX64RegisterInterceptMessageFlags {
3580    pub is_memory_op: bool,
3581    #[bits(7)]
3582    _rsvd: u8,
3583}
3584
3585#[repr(C)]
3586#[derive(IntoBytes, Immutable, FromBytes)]
3587pub struct HvX64RegisterInterceptMessage {
3588    pub header: HvX64InterceptMessageHeader,
3589    pub flags: HvX64RegisterInterceptMessageFlags,
3590    pub rsvd: u8,
3591    pub rsvd2: u16,
3592    pub register_name: HvX64RegisterName,
3593    pub access_info: HvX64RegisterAccessInfo,
3594}
3595
3596#[repr(transparent)]
3597#[derive(IntoBytes, Immutable, FromBytes)]
3598pub struct HvX64RegisterAccessInfo(u128);
3599
3600impl HvX64RegisterAccessInfo {
3601    pub fn new_source_value(source_value: HvRegisterValue) -> Self {
3602        Self(source_value.as_u128())
3603    }
3604}
3605
3606open_enum! {
3607    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3608    pub enum HvInterruptType : u32  {
3609        #![expect(non_upper_case_globals)]
3610        HvArm64InterruptTypeFixed = 0x0000,
3611        HvX64InterruptTypeFixed = 0x0000,
3612        HvX64InterruptTypeLowestPriority = 0x0001,
3613        HvX64InterruptTypeSmi = 0x0002,
3614        HvX64InterruptTypeRemoteRead = 0x0003,
3615        HvX64InterruptTypeNmi = 0x0004,
3616        HvX64InterruptTypeInit = 0x0005,
3617        HvX64InterruptTypeSipi = 0x0006,
3618        HvX64InterruptTypeExtInt = 0x0007,
3619        HvX64InterruptTypeLocalInt0 = 0x0008,
3620        HvX64InterruptTypeLocalInt1 = 0x0009,
3621    }
3622}
3623
3624/// The declaration uses the fact the bits for the different
3625/// architectures don't intersect. When (if ever) they do,
3626/// will need to come up with a more elaborate abstraction.
3627/// The other possible downside is the lack of the compile-time
3628/// checks as adding that will require `guest_arch` support and
3629/// a large refactoring. To sum up, choosing expediency.
3630#[bitfield(u64)]
3631#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3632pub struct HvInterruptControl {
3633    interrupt_type_value: u32,
3634    pub x86_level_triggered: bool,
3635    pub x86_logical_destination_mode: bool,
3636    pub arm64_asserted: bool,
3637    #[bits(29)]
3638    pub unused: u32,
3639}
3640
3641impl HvInterruptControl {
3642    pub fn interrupt_type(&self) -> HvInterruptType {
3643        HvInterruptType(self.interrupt_type_value())
3644    }
3645
3646    pub fn set_interrupt_type(&mut self, ty: HvInterruptType) {
3647        self.set_interrupt_type_value(ty.0)
3648    }
3649
3650    pub fn with_interrupt_type(self, ty: HvInterruptType) -> Self {
3651        self.with_interrupt_type_value(ty.0)
3652    }
3653}
3654
3655#[bitfield(u64)]
3656pub struct HvRegisterVsmCapabilities {
3657    pub dr6_shared: bool,
3658    pub mbec_vtl_mask: u16,
3659    pub deny_lower_vtl_startup: bool,
3660    pub supervisor_shadow_stack: bool,
3661    pub hardware_hvpt_available: bool,
3662    pub software_hvpt_available: bool,
3663    #[bits(6)]
3664    pub hardware_hvpt_range_bits: u8,
3665    pub intercept_page_available: bool,
3666    pub return_action_available: bool,
3667    /// If the VTL0 view of memory is mapped to the high address space, which is
3668    /// the highest legal physical address bit.
3669    ///
3670    /// Only available in VTL2.
3671    pub vtl0_alias_map_available: bool,
3672    /// If the [`HvRegisterVsmPartitionConfig`] register has support for
3673    /// `intercept_not_present`.
3674    ///
3675    /// Only available in VTL2.
3676    pub intercept_not_present_available: bool,
3677    pub install_intercept_ex: bool,
3678    /// Only available in VTL2.
3679    pub intercept_system_reset_available: bool,
3680    #[bits(1)]
3681    pub reserved1: u8,
3682    pub proxy_interrupt_redirect_available: bool,
3683    #[bits(29)]
3684    pub reserved2: u64,
3685}
3686
3687#[bitfield(u64)]
3688pub struct HvRegisterVsmPartitionConfig {
3689    pub enable_vtl_protection: bool,
3690    #[bits(4)]
3691    pub default_vtl_protection_mask: u8,
3692    pub zero_memory_on_reset: bool,
3693    pub deny_lower_vtl_startup: bool,
3694    pub intercept_acceptance: bool,
3695    pub intercept_enable_vtl_protection: bool,
3696    pub intercept_vp_startup: bool,
3697    pub intercept_cpuid_unimplemented: bool,
3698    pub intercept_unrecoverable_exception: bool,
3699    pub intercept_page: bool,
3700    pub intercept_restore_partition_time: bool,
3701    /// The hypervisor will send all unmapped GPA intercepts to VTL2 rather than
3702    /// the host.
3703    pub intercept_not_present: bool,
3704    pub intercept_system_reset: bool,
3705    #[bits(48)]
3706    pub reserved: u64,
3707}
3708
3709#[bitfield(u64)]
3710pub struct HvRegisterVsmPartitionStatus {
3711    #[bits(16)]
3712    pub enabled_vtl_set: u16,
3713    #[bits(4)]
3714    pub maximum_vtl: u8,
3715    #[bits(16)]
3716    pub mbec_enabled_vtl_set: u16,
3717    #[bits(4)]
3718    pub supervisor_shadow_stack_enabled_vtl_set: u8,
3719    #[bits(24)]
3720    pub reserved: u64,
3721}
3722
3723open_enum! {
3724    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3725    pub enum HvSnpInterruptInjection : u8  {
3726        #![allow(non_upper_case_globals)]
3727        HvSnpRestricted = 0x0,
3728        HvSnpNormal = 0x1,
3729        HvSnpAlternate = 0x2,
3730        HvSnpSecureAvic = 0x3,
3731    }
3732}
3733
3734// Support for bitfield structures.
3735impl HvSnpInterruptInjection {
3736    const fn from_bits(val: u8) -> Self {
3737        HvSnpInterruptInjection(val)
3738    }
3739
3740    const fn into_bits(self) -> u8 {
3741        self.0
3742    }
3743}
3744
3745#[bitfield(u64)]
3746pub struct HvRegisterGuestVsmPartitionConfig {
3747    #[bits(4)]
3748    pub maximum_vtl: u8,
3749    #[bits(2)]
3750    pub vtl0_interrupt_injection: HvSnpInterruptInjection,
3751    #[bits(2)]
3752    pub vtl1_interrupt_injection: HvSnpInterruptInjection,
3753    #[bits(56)]
3754    pub reserved: u64,
3755}
3756
3757#[bitfield(u64)]
3758pub struct HvRegisterVsmVpStatus {
3759    #[bits(4)]
3760    pub active_vtl: u8,
3761    pub active_mbec_enabled: bool,
3762    #[bits(11)]
3763    pub reserved_mbz0: u16,
3764    #[bits(16)]
3765    pub enabled_vtl_set: u16,
3766    #[bits(32)]
3767    pub reserved_mbz1: u32,
3768}
3769
3770#[bitfield(u64)]
3771pub struct HvRegisterVsmCodePageOffsets {
3772    #[bits(12)]
3773    pub call_offset: u16,
3774    #[bits(12)]
3775    pub return_offset: u16,
3776    #[bits(40)]
3777    pub reserved: u64,
3778}
3779
3780#[repr(C)]
3781#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3782pub struct HvStimerState {
3783    pub undelivered_message_pending: u32,
3784    pub reserved: u32,
3785    pub config: u64,
3786    pub count: u64,
3787    pub adjustment: u64,
3788    pub undelivered_expiration_time: u64,
3789}
3790
3791#[repr(C)]
3792#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3793pub struct HvSyntheticTimersState {
3794    pub timers: [HvStimerState; 4],
3795    pub reserved: [u64; 5],
3796}
3797
3798#[bitfield(u64)]
3799pub struct HvInternalActivityRegister {
3800    pub startup_suspend: bool,
3801    pub halt_suspend: bool,
3802    pub idle_suspend: bool,
3803    #[bits(61)]
3804    pub reserved: u64,
3805}
3806
3807#[bitfield(u64)]
3808pub struct HvSynicSint {
3809    pub vector: u8,
3810    _reserved: u8,
3811    pub masked: bool,
3812    pub auto_eoi: bool,
3813    pub polling: bool,
3814    _reserved2: bool,
3815    pub proxy: bool,
3816    #[bits(43)]
3817    _reserved2: u64,
3818}
3819
3820#[bitfield(u64)]
3821pub struct HvSynicScontrol {
3822    pub enabled: bool,
3823    #[bits(63)]
3824    _reserved: u64,
3825}
3826
3827#[bitfield(u64)]
3828pub struct HvSynicSimpSiefp {
3829    pub enabled: bool,
3830    #[bits(11)]
3831    _reserved: u64,
3832    #[bits(52)]
3833    pub base_gpn: u64,
3834}
3835
3836#[bitfield(u64)]
3837pub struct HvSynicStimerConfig {
3838    pub enabled: bool,
3839    pub periodic: bool,
3840    pub lazy: bool,
3841    pub auto_enable: bool,
3842    // Note: On ARM64 the top 3 bits of apic_vector are reserved.
3843    pub apic_vector: u8,
3844    pub direct_mode: bool,
3845    #[bits(3)]
3846    pub _reserved1: u8,
3847    #[bits(4)]
3848    pub sint: u8,
3849    #[bits(44)]
3850    pub _reserved2: u64,
3851}
3852
3853pub const HV_X64_PENDING_EVENT_EXCEPTION: u8 = 0;
3854pub const HV_X64_PENDING_EVENT_MEMORY_INTERCEPT: u8 = 1;
3855pub const HV_X64_PENDING_EVENT_NESTED_MEMORY_INTERCEPT: u8 = 2;
3856pub const HV_X64_PENDING_EVENT_VIRTUALIZATION_FAULT: u8 = 3;
3857pub const HV_X64_PENDING_EVENT_HYPERCALL_OUTPUT: u8 = 4;
3858pub const HV_X64_PENDING_EVENT_EXT_INT: u8 = 5;
3859pub const HV_X64_PENDING_EVENT_SHADOW_IPT: u8 = 6;
3860
3861// Provides information about an exception.
3862#[bitfield(u128)]
3863pub struct HvX64PendingExceptionEvent {
3864    pub event_pending: bool,
3865    #[bits(3)]
3866    pub event_type: u8,
3867    #[bits(4)]
3868    pub reserved0: u8,
3869
3870    pub deliver_error_code: bool,
3871    #[bits(7)]
3872    pub reserved1: u8,
3873    pub vector: u16,
3874    pub error_code: u32,
3875    pub exception_parameter: u64,
3876}
3877
3878/// Provides information about a virtualization fault.
3879#[bitfield(u128)]
3880pub struct HvX64PendingVirtualizationFaultEvent {
3881    pub event_pending: bool,
3882    #[bits(3)]
3883    pub event_type: u8,
3884    #[bits(4)]
3885    pub reserved0: u8,
3886
3887    pub reserved1: u8,
3888    pub parameter0: u16,
3889    pub code: u32,
3890    pub parameter1: u64,
3891}
3892
3893/// Part of [`HvX64PendingEventMemoryIntercept`]
3894#[bitfield(u8)]
3895#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3896pub struct HvX64PendingEventMemoryInterceptPendingEventHeader {
3897    pub event_pending: bool,
3898    #[bits(3)]
3899    pub event_type: u8,
3900    #[bits(4)]
3901    _reserved0: u8,
3902}
3903
3904/// Part of [`HvX64PendingEventMemoryIntercept`]
3905#[bitfield(u8)]
3906#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3907pub struct HvX64PendingEventMemoryInterceptAccessFlags {
3908    /// Indicates if the guest linear address is valid.
3909    pub guest_linear_address_valid: bool,
3910    /// Indicates that the memory intercept was caused by an access to a guest physical address
3911    /// (instead of a page table as part of a page table walk).
3912    pub caused_by_gpa_access: bool,
3913    #[bits(6)]
3914    _reserved1: u8,
3915}
3916
3917/// Provides information about a memory intercept.
3918#[repr(C)]
3919#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3920pub struct HvX64PendingEventMemoryIntercept {
3921    pub event_header: HvX64PendingEventMemoryInterceptPendingEventHeader,
3922    /// VTL at which the memory intercept is targeted.
3923    /// Note: This field must be in Reg0.
3924    pub target_vtl: u8,
3925    /// Type of the memory access.
3926    pub access_type: HvInterceptAccessType,
3927    pub access_flags: HvX64PendingEventMemoryInterceptAccessFlags,
3928    pub _reserved2: u32,
3929    /// The guest linear address that caused the fault.
3930    pub guest_linear_address: u64,
3931    /// The guest physical address that caused the memory intercept.
3932    pub guest_physical_address: u64,
3933    pub _reserved3: u64,
3934}
3935const_assert!(size_of::<HvX64PendingEventMemoryIntercept>() == 0x20);
3936
3937//
3938// Provides information about pending hypercall output.
3939//
3940#[bitfield(u128)]
3941pub struct HvX64PendingHypercallOutputEvent {
3942    pub event_pending: bool,
3943    #[bits(3)]
3944    pub event_type: u8,
3945    #[bits(4)]
3946    pub reserved0: u8,
3947
3948    // Whether the hypercall has been retired.
3949    pub retired: bool,
3950
3951    #[bits(23)]
3952    pub reserved1: u32,
3953
3954    // Indicates the number of bytes to be written starting from OutputGpa.
3955    pub output_size: u32,
3956
3957    // Indicates the output GPA, which is not required to be page-aligned.
3958    pub output_gpa: u64,
3959}
3960
3961// Provides information about a directly asserted ExtInt.
3962#[bitfield(u128)]
3963pub struct HvX64PendingExtIntEvent {
3964    pub event_pending: bool,
3965    #[bits(3)]
3966    pub event_type: u8,
3967    #[bits(4)]
3968    pub reserved0: u8,
3969    pub vector: u8,
3970    #[bits(48)]
3971    pub reserved1: u64,
3972    pub reserved2: u64,
3973}
3974
3975// Provides information about pending IPT shadowing.
3976#[bitfield(u128)]
3977pub struct HvX64PendingShadowIptEvent {
3978    pub event_pending: bool,
3979    #[bits(4)]
3980    pub event_type: u8,
3981    #[bits(59)]
3982    pub reserved0: u64,
3983
3984    pub reserved1: u64,
3985}
3986
3987#[bitfield(u128)]
3988#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
3989pub struct HvX64PendingEventReg0 {
3990    pub event_pending: bool,
3991    #[bits(3)]
3992    pub event_type: u8,
3993    #[bits(4)]
3994    pub reserved: u8,
3995    #[bits(120)]
3996    pub data: u128,
3997}
3998
3999#[repr(C)]
4000#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4001pub struct HvX64PendingEvent {
4002    pub reg_0: HvX64PendingEventReg0,
4003    pub reg_1: AlignedU128,
4004}
4005const_assert!(size_of::<HvX64PendingEvent>() == 0x20);
4006
4007impl From<HvX64PendingExceptionEvent> for HvX64PendingEvent {
4008    fn from(exception_event: HvX64PendingExceptionEvent) -> Self {
4009        HvX64PendingEvent {
4010            reg_0: HvX64PendingEventReg0::from(u128::from(exception_event)),
4011            reg_1: 0u128.into(),
4012        }
4013    }
4014}
4015
4016#[bitfield(u64)]
4017#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4018pub struct HvX64PendingInterruptionRegister {
4019    pub interruption_pending: bool,
4020    #[bits(3)]
4021    pub interruption_type: u8,
4022    pub deliver_error_code: bool,
4023    #[bits(4)]
4024    pub instruction_length: u8,
4025    pub nested_event: bool,
4026    #[bits(6)]
4027    pub reserved: u8,
4028    pub interruption_vector: u16,
4029    pub error_code: u32,
4030}
4031
4032#[bitfield(u64)]
4033#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4034pub struct HvX64InterruptStateRegister {
4035    pub interrupt_shadow: bool,
4036    pub nmi_masked: bool,
4037    #[bits(62)]
4038    pub reserved: u64,
4039}
4040
4041#[bitfield(u64)]
4042#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4043pub struct HvInstructionEmulatorHintsRegister {
4044    /// Indicates whether any secure VTL is enabled for the partition.
4045    pub partition_secure_vtl_enabled: bool,
4046    /// Indicates whether kernel or user execute control architecturally
4047    /// applies to execute accesses.
4048    pub mbec_user_execute_control: bool,
4049    #[bits(62)]
4050    pub _padding: u64,
4051}
4052
4053open_enum! {
4054    #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4055    pub enum HvAarch64PendingEventType: u8 {
4056        EXCEPTION = 0,
4057        SYNTHETIC_EXCEPTION = 1,
4058        HYPERCALL_OUTPUT = 2,
4059    }
4060}
4061
4062// Support for bitfield structures.
4063impl HvAarch64PendingEventType {
4064    const fn from_bits(val: u8) -> Self {
4065        HvAarch64PendingEventType(val)
4066    }
4067
4068    const fn into_bits(self) -> u8 {
4069        self.0
4070    }
4071}
4072
4073#[bitfield[u8]]
4074#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4075pub struct HvAarch64PendingEventHeader {
4076    #[bits(1)]
4077    pub event_pending: bool,
4078    #[bits(3)]
4079    pub event_type: HvAarch64PendingEventType,
4080    #[bits(4)]
4081    pub reserved: u8,
4082}
4083
4084#[repr(C)]
4085#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4086pub struct HvAarch64PendingExceptionEvent {
4087    pub header: HvAarch64PendingEventHeader,
4088    pub _padding: [u8; 7],
4089    pub syndrome: u64,
4090    pub fault_address: u64,
4091}
4092
4093#[bitfield[u8]]
4094#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4095pub struct HvAarch64PendingHypercallOutputEventFlags {
4096    #[bits(1)]
4097    pub retired: u8,
4098    #[bits(7)]
4099    pub reserved: u8,
4100}
4101
4102#[repr(C)]
4103#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4104pub struct HvAarch64PendingHypercallOutputEvent {
4105    pub header: HvAarch64PendingEventHeader,
4106    pub flags: HvAarch64PendingHypercallOutputEventFlags,
4107    pub reserved: u16,
4108    pub output_size: u32,
4109    pub output_gpa: u64,
4110}
4111
4112#[repr(C)]
4113#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4114pub struct HvAarch64PendingEvent {
4115    pub header: HvAarch64PendingEventHeader,
4116    pub event_data: [u8; 15],
4117    pub _padding: [u64; 2],
4118}
4119
4120#[bitfield(u32)]
4121#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
4122pub struct HvMapGpaFlags {
4123    pub readable: bool,
4124    pub writable: bool,
4125    pub kernel_executable: bool,
4126    pub user_executable: bool,
4127    pub supervisor_shadow_stack: bool,
4128    pub paging_writability: bool,
4129    pub verify_paging_writability: bool,
4130    #[bits(8)]
4131    _padding0: u32,
4132    pub adjustable: bool,
4133    #[bits(16)]
4134    _padding1: u32,
4135}
4136
4137/// [`HvMapGpaFlags`] with no permissions set
4138pub const HV_MAP_GPA_PERMISSIONS_NONE: HvMapGpaFlags = HvMapGpaFlags::new();
4139pub const HV_MAP_GPA_PERMISSIONS_ALL: HvMapGpaFlags = HvMapGpaFlags::new()
4140    .with_readable(true)
4141    .with_writable(true)
4142    .with_kernel_executable(true)
4143    .with_user_executable(true);
4144
4145#[repr(C)]
4146#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4147pub struct HvMonitorPage {
4148    pub trigger_state: HvMonitorTriggerState,
4149    pub reserved1: u32,
4150    pub trigger_group: [HvMonitorTriggerGroup; 4],
4151    pub reserved2: [u64; 3],
4152    pub next_check_time: [[u32; 32]; 4],
4153    pub latency: [[u16; 32]; 4],
4154    pub reserved3: [u64; 32],
4155    pub parameter: [[HvMonitorParameter; 32]; 4],
4156    pub reserved4: [u8; 1984],
4157}
4158
4159#[repr(C)]
4160#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4161pub struct HvMonitorPageSmall {
4162    pub trigger_state: HvMonitorTriggerState,
4163    pub reserved1: u32,
4164    pub trigger_group: [HvMonitorTriggerGroup; 4],
4165}
4166
4167#[repr(C)]
4168#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4169pub struct HvMonitorTriggerGroup {
4170    pub pending: u32,
4171    pub armed: u32,
4172}
4173
4174#[repr(C)]
4175#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4176pub struct HvMonitorParameter {
4177    pub connection_id: u32,
4178    pub flag_number: u16,
4179    pub reserved: u16,
4180}
4181
4182#[bitfield(u32)]
4183#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4184pub struct HvMonitorTriggerState {
4185    #[bits(4)]
4186    pub group_enable: u32,
4187    #[bits(28)]
4188    pub reserved: u32,
4189}
4190
4191#[bitfield(u64)]
4192#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4193pub struct HvPmTimerInfo {
4194    #[bits(16)]
4195    pub port: u16,
4196    #[bits(1)]
4197    pub width_24: bool,
4198    #[bits(1)]
4199    pub enabled: bool,
4200    #[bits(14)]
4201    pub reserved1: u32,
4202    #[bits(32)]
4203    pub reserved2: u32,
4204}
4205
4206#[bitfield(u64)]
4207#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4208pub struct HvX64RegisterSevControl {
4209    pub enable_encrypted_state: bool,
4210    #[bits(11)]
4211    _rsvd1: u64,
4212    #[bits(52)]
4213    pub vmsa_gpa_page_number: u64,
4214}
4215
4216#[bitfield(u64)]
4217#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4218pub struct HvX64RegisterSevAvic {
4219    pub enable_secure_apic: bool,
4220    #[bits(11)]
4221    _rsvd1: u64,
4222    #[bits(52)]
4223    pub avic_gpa_page_number: u64,
4224}
4225
4226#[bitfield(u64)]
4227#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4228pub struct HvRegisterReferenceTsc {
4229    pub enable: bool,
4230    #[bits(11)]
4231    pub reserved_p: u64,
4232    #[bits(52)]
4233    pub gpn: u64,
4234}
4235
4236#[repr(C)]
4237#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4238pub struct HvReferenceTscPage {
4239    pub tsc_sequence: u32,
4240    pub reserved1: u32,
4241    pub tsc_scale: u64,
4242    pub tsc_offset: i64,
4243    pub timeline_bias: u64,
4244    pub tsc_multiplier: u64,
4245    pub reserved2: [u64; 507],
4246}
4247
4248pub const HV_REFERENCE_TSC_SEQUENCE_INVALID: u32 = 0;
4249
4250#[bitfield(u64)]
4251#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4252pub struct HvX64VmgexitInterceptMessageFlags {
4253    pub ghcb_page_valid: bool,
4254    pub ghcb_request_error: bool,
4255    #[bits(62)]
4256    _reserved: u64,
4257}
4258
4259#[repr(C)]
4260#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4261pub struct HvX64VmgexitInterceptMessageGhcbPageStandard {
4262    pub ghcb_protocol_version: u16,
4263    _reserved: [u16; 3],
4264    pub sw_exit_code: u64,
4265    pub sw_exit_info1: u64,
4266    pub sw_exit_info2: u64,
4267    pub sw_scratch: u64,
4268}
4269
4270#[repr(C)]
4271#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4272pub struct HvX64VmgexitInterceptMessageGhcbPage {
4273    pub ghcb_usage: u32,
4274    _reserved: u32,
4275    pub standard: HvX64VmgexitInterceptMessageGhcbPageStandard,
4276}
4277
4278#[repr(C)]
4279#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4280pub struct HvX64VmgexitInterceptMessage {
4281    pub header: HvX64InterceptMessageHeader,
4282    pub ghcb_msr: u64,
4283    pub flags: HvX64VmgexitInterceptMessageFlags,
4284    pub ghcb_page: HvX64VmgexitInterceptMessageGhcbPage,
4285}
4286
4287impl MessagePayload for HvX64VmgexitInterceptMessage {}
4288
4289#[bitfield(u64)]
4290pub struct HvRegisterVpAssistPage {
4291    pub enabled: bool,
4292    #[bits(11)]
4293    _reserved: u64,
4294    #[bits(52)]
4295    pub gpa_page_number: u64,
4296}
4297
4298#[bitfield(u32)]
4299#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4300pub struct HvX64RegisterPageDirtyFlags {
4301    pub general_purpose: bool,
4302    pub instruction_pointer: bool,
4303    pub xmm: bool,
4304    pub segments: bool,
4305    pub flags: bool,
4306    #[bits(27)]
4307    reserved: u32,
4308}
4309
4310#[repr(C)]
4311#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4312pub struct HvX64RegisterPage {
4313    pub version: u16,
4314    pub is_valid: u8,
4315    pub vtl: u8,
4316    pub dirty: HvX64RegisterPageDirtyFlags,
4317    /// General-purpose registers. These are in the order defined by the x86-64
4318    /// architecture.
4319    pub gp_registers: [u64; 16],
4320    pub rip: u64,
4321    pub rflags: u64,
4322    pub reserved: u64,
4323    pub xmm: [u128; 6],
4324    pub segment: [HvX64SegmentRegister; 6],
4325    // Misc. control registers (cannot be set via this interface).
4326    pub cr0: u64,
4327    pub cr3: u64,
4328    pub cr4: u64,
4329    pub cr8: u64,
4330    pub efer: u64,
4331    pub dr7: u64,
4332    pub pending_interruption: HvX64PendingInterruptionRegister,
4333    pub interrupt_state: HvX64InterruptStateRegister,
4334    pub instruction_emulation_hints: HvInstructionEmulatorHintsRegister,
4335    pub reserved_end: [u8; 3672],
4336}
4337
4338const _: () = assert!(size_of::<HvX64RegisterPage>() == HV_PAGE_SIZE_USIZE);
4339
4340#[bitfield(u32)]
4341#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
4342pub struct HvAarch64RegisterPageDirtyFlags {
4343    _unused: bool,
4344    pub instruction_pointer: bool,
4345    pub processor_state: bool,
4346    pub control_registers: bool,
4347    #[bits(28)]
4348    reserved: u32,
4349}
4350
4351#[repr(C)]
4352#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
4353pub struct HvAarch64RegisterPage {
4354    pub version: u16,
4355    pub is_valid: u8,
4356    pub vtl: u8,
4357    pub dirty: HvAarch64RegisterPageDirtyFlags,
4358    // Reserved.
4359    pub _rsvd: [u64; 33],
4360    // Instruction pointer.
4361    pub pc: u64,
4362    // Processor state.
4363    pub cpsr: u64,
4364    // Control registers.
4365    pub sctlr_el1: u64,
4366    pub tcr_el1: u64,
4367    // Reserved.
4368    pub reserved_end: [u8; 3792],
4369}
4370
4371const _: () = assert!(size_of::<HvAarch64RegisterPage>() == HV_PAGE_SIZE_USIZE);
4372
4373#[bitfield(u64)]
4374pub struct HvRegisterVsmWpWaitForTlbLock {
4375    pub wait: bool,
4376    #[bits(63)]
4377    _reserved: u64,
4378}
4379
4380#[bitfield(u64)]
4381pub struct HvRegisterVsmVpSecureVtlConfig {
4382    pub mbec_enabled: bool,
4383    pub tlb_locked: bool,
4384    pub supervisor_shadow_stack_enabled: bool,
4385    pub hardware_hvpt_enabled: bool,
4386    #[bits(60)]
4387    _reserved: u64,
4388}
4389
4390#[bitfield(u64)]
4391pub struct HvRegisterCrInterceptControl {
4392    pub cr0_write: bool,
4393    pub cr4_write: bool,
4394    pub xcr0_write: bool,
4395    pub ia32_misc_enable_read: bool,
4396    pub ia32_misc_enable_write: bool,
4397    pub msr_lstar_read: bool,
4398    pub msr_lstar_write: bool,
4399    pub msr_star_read: bool,
4400    pub msr_star_write: bool,
4401    pub msr_cstar_read: bool,
4402    pub msr_cstar_write: bool,
4403    pub apic_base_msr_read: bool,
4404    pub apic_base_msr_write: bool,
4405    pub msr_efer_read: bool,
4406    pub msr_efer_write: bool,
4407    pub gdtr_write: bool,
4408    pub idtr_write: bool,
4409    pub ldtr_write: bool,
4410    pub tr_write: bool,
4411    pub msr_sysenter_cs_write: bool,
4412    pub msr_sysenter_eip_write: bool,
4413    pub msr_sysenter_esp_write: bool,
4414    pub msr_sfmask_write: bool,
4415    pub msr_tsc_aux_write: bool,
4416    pub msr_sgx_launch_control_write: bool,
4417    pub msr_xss_write: bool,
4418    pub msr_scet_write: bool,
4419    pub msr_pls_ssp_write: bool,
4420    pub msr_interrupt_ssp_table_addr_write: bool,
4421    #[bits(35)]
4422    _rsvd_z: u64,
4423}
4424
4425#[repr(C)]
4426#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
4427pub struct HvX64InterruptControllerState {
4428    pub apic_id: u32,
4429    pub apic_version: u32,
4430    pub apic_ldr: u32,
4431    pub apic_dfr: u32,
4432    pub apic_spurious: u32,
4433    pub apic_isr: [u32; 8],
4434    pub apic_tmr: [u32; 8],
4435    pub apic_irr: [u32; 8],
4436    pub apic_esr: u32,
4437    pub apic_icr_high: u32,
4438    pub apic_icr_low: u32,
4439    pub apic_lvt_timer: u32,
4440    pub apic_lvt_thermal: u32,
4441    pub apic_lvt_perfmon: u32,
4442    pub apic_lvt_lint0: u32,
4443    pub apic_lvt_lint1: u32,
4444    pub apic_lvt_error: u32,
4445    pub apic_lvt_cmci: u32,
4446    pub apic_error_status: u32,
4447    pub apic_initial_count: u32,
4448    pub apic_counter_value: u32,
4449    pub apic_divide_configuration: u32,
4450    pub apic_remote_read: u32,
4451}