hcl/
protocol.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Structures and definitions used between the underhill kernel and HvLite.
5
6#![expect(non_camel_case_types, missing_docs)]
7
8use bitfield_struct::bitfield;
9use hvdef::HV_MESSAGE_SIZE;
10use hvdef::hypercall::HvInputVtl;
11use libc::c_void;
12use zerocopy::FromBytes;
13use zerocopy::Immutable;
14use zerocopy::IntoBytes;
15use zerocopy::KnownLayout;
16
17#[repr(C)]
18#[derive(Copy, Clone, Debug, Default)]
19pub struct hcl_translate_address_info {
20    pub gva_pfn: u64,
21    pub gpa_pfn: u64,
22}
23
24#[repr(C)]
25#[derive(Copy, Clone, Debug, Default)]
26pub struct hcl_signal_event_direct_t {
27    pub vp: u32,
28    pub flag: u16,
29    pub sint: u8,
30    pub vtl: u8,
31    pub pad: u32,
32    pub pad1: u16,
33    pub pad2: u8,
34    pub newly_signaled: u8,
35}
36
37pub const HV_VP_ASSIST_PAGE_SIGNAL_EVENT_COUNT: usize = 16;
38pub const HV_VP_ASSIST_PAGE_ACTION_TYPE_SIGNAL_EVENT: u64 = 1;
39
40#[repr(C)]
41#[derive(Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
42pub struct hv_vp_assist_page_signal_event {
43    pub action_type: u64,
44    pub vp: u32,
45    pub vtl: u8,
46    pub sint: u8,
47    pub flag: u16,
48}
49
50#[repr(C)]
51#[derive(Copy, Clone, Debug)]
52pub struct hcl_post_message_direct_t {
53    pub vp: u32,
54    pub sint: u32,
55    pub pad: u32,
56    pub pad2: u8,
57    pub vtl: u8,
58    pub size: u16,
59    pub message: *const u8,
60}
61
62#[repr(C, packed)]
63#[derive(Copy, Clone, Debug, Default)]
64pub struct hcl_pfn_range_t {
65    pub start_pfn: u64,
66    pub last_pfn: u64,
67}
68
69#[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
70#[repr(C)]
71pub struct hcl_cpu_context_x64 {
72    pub gps: [u64; 16],
73    pub fx_state: x86defs::xsave::Fxsave,
74    pub reserved: [u8; 384],
75}
76
77const _: () = assert!(size_of::<hcl_cpu_context_x64>() == 1024);
78
79#[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
80#[repr(C)]
81// NOTE: x18 is managed by the hypervisor. It is assumed here be available
82// for easier offset arithmetic.
83pub struct hcl_cpu_context_aarch64 {
84    pub x: [u64; 31],
85    pub _rsvd: u64,
86    pub q: [u128; 32],
87    pub reserved: [u8; 256],
88}
89
90const _: () = assert!(size_of::<hcl_cpu_context_aarch64>() == 1024);
91
92pub const RAX: usize = 0;
93pub const RCX: usize = 1;
94pub const RDX: usize = 2;
95pub const RBX: usize = 3;
96pub const CR2: usize = 4; // RSP on TdxL2EnterGuestState, CR2 on hcl_cpu_context_x64
97pub const RBP: usize = 5;
98pub const RSI: usize = 6;
99pub const RDI: usize = 7;
100pub const R8: usize = 8;
101pub const R9: usize = 9;
102pub const R10: usize = 10;
103pub const R11: usize = 11;
104pub const R12: usize = 12;
105pub const R13: usize = 13;
106pub const R14: usize = 14;
107pub const R15: usize = 15;
108
109pub const VTL_RETURN_ACTION_SIZE: usize = 256;
110
111/// Kernel IPI offloading flags
112#[bitfield(u8)]
113#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
114pub struct hcl_intr_offload_flags {
115    /// Enable the base level of kernel offloading support. Requires vAPIC to be enabled.
116    /// HLT and Idle are accelerated by the kernel. When halted, an interrupt may be injected
117    /// entirely in kernel, bypassing user-space.
118    pub offload_intr_inject: bool,
119    /// Handle the X2 APIC ICR register in kernel
120    pub offload_x2apic: bool,
121    #[bits(3)]
122    reserved: u8,
123    /// Halt, due to other reason. Kernel cannot clear this state.
124    pub halted_other: bool,
125    /// Halt, due to HLT instruction. Kernel can clear this state.
126    pub halted_hlt: bool,
127    /// Halt, due to guest idle. Kernel can clear this state.
128    pub halted_idle: bool,
129}
130
131#[repr(C)]
132pub struct hcl_run {
133    pub cancel: u32,
134    pub vtl_ret_action_size: u32,
135    pub flags: u32,
136    pub scan_proxy_irr: u8,
137    pub offload_flags: hcl_intr_offload_flags,
138    pub pad: [u8; 1],
139    pub mode: EnterModes,
140    pub exit_message: [u8; HV_MESSAGE_SIZE],
141    pub context: [u8; 1024],
142    pub vtl_ret_actions: [u8; VTL_RETURN_ACTION_SIZE],
143    pub proxy_irr: [u32; 8],
144    pub target_vtl: HvInputVtl,
145    pub proxy_irr_blocked: [u32; 8],
146    pub proxy_irr_exit: [u32; 8],
147}
148
149// The size of hcl_run must be less than or equal to a single 4K page.
150const _: () = assert!(size_of::<hcl_run>() <= 4096);
151
152pub const MSHV_VTL_RUN_FLAG_HALTED: u32 = 1 << 0;
153
154#[repr(C)]
155pub struct hcl_set_poll_file {
156    pub cpu: i32,
157    pub fd: i32,
158}
159
160#[repr(C)]
161pub struct hcl_hvcall_setup {
162    pub allow_bitmap_size: u64,
163    pub allow_bitmap_ptr: *const u64,
164}
165
166#[repr(C)]
167pub struct hcl_hvcall {
168    pub control: hvdef::hypercall::Control,
169    pub input_size: usize,
170    pub input_data: *const c_void,
171    pub status: hvdef::hypercall::HypercallOutput,
172    pub output_size: usize,
173    pub output_data: *const c_void,
174}
175
176pub const HCL_REG_PAGE_OFFSET: i64 = 1 << 16;
177pub const HCL_VMSA_PAGE_OFFSET: i64 = 2 << 16;
178pub const MSHV_APIC_PAGE_OFFSET: i64 = 3 << 16;
179pub const HCL_VMSA_GUEST_VSM_PAGE_OFFSET: i64 = 4 << 16;
180
181open_enum::open_enum! {
182    /// 4 bits represent VTL0 enter mode.
183    pub enum EnterMode: u8 {
184        /// "Fast" mode: Enters VTL0 with scheduler ticks on, no extra cost on turning off the scheduler
185        /// timers, therefore it's fast.
186        FAST = 0,
187        /// "Play idle" mode: Enters VTL0 with scheduler ticks off (setting the current kernel thread to
188        /// idle).
189        PLAY_IDLE = 1,
190        /// "Idle to VTL0 idle" mode: Switches to the idle thread, and the idle thread enters VTL0 with
191        /// scheduler ticks off.
192        IDLE_TO_VTL0 = 2,
193    }
194}
195
196impl EnterMode {
197    const fn into_bits(self) -> u8 {
198        self.0
199    }
200
201    const fn from_bits(bits: u8) -> Self {
202        Self(bits)
203    }
204}
205
206/// Controls how to enter VTL0.
207#[bitfield(u8)]
208pub struct EnterModes {
209    /// [`Mode`] used when entering VTL0 the first time.
210    #[bits(4)]
211    pub first: EnterMode,
212    /// [`Mode`] used when interrupted from the previous enter to VTL0.
213    #[bits(4)]
214    pub second: EnterMode,
215}
216
217/// The register values returned from a TDG.VP.ENTER call. These are readable
218/// via mmaping the mshv_vtl driver inside `hcl_run`, and returned on a run_vp
219/// ioctl exit. See the TDX ABI specification for output operands for
220/// TDG.VP.ENTER.
221#[repr(C)]
222#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
223pub struct tdx_tdg_vp_enter_exit_info {
224    pub rax: u64,
225    pub rcx: u64,
226    pub rdx: u64,
227    pub rsi: u64,
228    pub rdi: u64,
229    pub r8: u64,
230    pub r9: u64,
231    pub r10: u64,
232    pub r11: u64,
233    pub r12: u64,
234    pub r13: u64,
235}
236
237#[bitfield(u64)]
238#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
239pub struct tdx_vp_state_flags {
240    /// Issue a cache flush for a WBINVD before calling VP.ENTER.
241    pub wbinvd: bool,
242    /// Issue a cache flush for a WBNOINVD before calling VP.ENTER.
243    pub wbnoinvd: bool,
244    #[bits(62)]
245    reserved: u64,
246}
247
248/// Additional VP state that is save/restored across TDG.VP.ENTER.
249#[repr(C)]
250#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
251pub struct tdx_vp_state {
252    pub msr_kernel_gs_base: u64,
253    pub msr_star: u64,
254    pub msr_lstar: u64,
255    pub msr_sfmask: u64,
256    pub msr_xss: u64,
257    pub cr2: u64,
258    pub msr_tsc_aux: u64,
259    pub flags: tdx_vp_state_flags,
260}
261
262#[repr(C)]
263#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)]
264pub struct tdx_vp_context {
265    pub exit_info: tdx_tdg_vp_enter_exit_info,
266    pub pad1: [u8; 48],
267    pub vp_state: tdx_vp_state,
268    pub pad2: [u8; 32],
269    pub entry_rcx: x86defs::tdx::TdxVmFlags,
270    pub gpr_list: x86defs::tdx::TdxL2EnterGuestState,
271    pub pad3: [u8; 96],
272    pub fx_state: x86defs::xsave::Fxsave,
273    pub pad4: [u8; 16],
274}
275
276const _: () = assert!(core::mem::offset_of!(tdx_vp_context, gpr_list) + 272 == 512);
277const _: () = assert!(size_of::<tdx_vp_context>() == 1024);
278
279#[bitfield(u64)]
280#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
281pub struct hcl_kick_cpus_flags {
282    #[bits(1)]
283    pub wait_for_other_cpus: bool,
284    #[bits(1)]
285    pub cancel_run: bool,
286    #[bits(62)]
287    reserved: u64,
288}
289
290#[repr(C)]
291pub struct hcl_kick_cpus {
292    pub len: u64,
293    pub cpu_mask: *const u8,
294    pub flags: hcl_kick_cpus_flags,
295}