hcl/ioctl/
tdx.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Backing for TDX partitions.
5
6use super::Hcl;
7use super::HclVp;
8use super::MshvVtl;
9use super::NoRunner;
10use super::ProcessorRunner;
11use super::hcl_tdcall;
12use super::mshv_tdcall;
13use crate::GuestVtl;
14use crate::protocol::tdx_l2_tsc_deadline_state;
15use crate::protocol::tdx_tdg_vp_enter_exit_info;
16use crate::protocol::tdx_vp_context;
17use crate::protocol::tdx_vp_state;
18use crate::protocol::tdx_vp_state_flags;
19use hv1_structs::VtlArray;
20use hvdef::HvRegisterName;
21use hvdef::HvRegisterValue;
22use memory_range::MemoryRange;
23use sidecar_client::SidecarVp;
24use std::cell::UnsafeCell;
25use std::os::fd::AsRawFd;
26use tdcall::Tdcall;
27use tdcall::tdcall_vp_invgla;
28use tdcall::tdcall_vp_rd;
29use tdcall::tdcall_vp_wr;
30use x86defs::tdx::TdCallResult;
31use x86defs::tdx::TdCallResultCode;
32use x86defs::tdx::TdGlaVmAndFlags;
33use x86defs::tdx::TdVpsClassCode;
34use x86defs::tdx::TdgMemPageAttrWriteR8;
35use x86defs::tdx::TdgMemPageGpaAttr;
36use x86defs::tdx::TdxContextCode;
37use x86defs::tdx::TdxExtendedFieldCode;
38use x86defs::tdx::TdxGlaListInfo;
39use x86defs::tdx::TdxGp;
40use x86defs::tdx::TdxL2Ctls;
41use x86defs::tdx::TdxL2EnterGuestState;
42use x86defs::tdx::TdxVmFlags;
43use x86defs::vmx::ApicPage;
44use x86defs::vmx::VmcsField;
45
46/// Runner backing for TDX partitions.
47pub struct Tdx<'a> {
48    apic_pages: VtlArray<&'a UnsafeCell<ApicPage>, 2>,
49}
50
51impl MshvVtl {
52    /// Issues a tdcall to set page attributes.
53    pub fn tdx_set_page_attributes(
54        &self,
55        range: MemoryRange,
56        attributes: TdgMemPageGpaAttr,
57        mask: TdgMemPageAttrWriteR8,
58    ) -> Result<(), TdCallResultCode> {
59        tdcall::set_page_attributes(&mut MshvVtlTdcall(self), range, attributes, mask)
60    }
61
62    /// Issues a tdcall to accept pages, optionally also setting attributes.
63    ///
64    /// These operations are combined because this code tries accepting at 2MB
65    /// granularity first and then falls back to 4KB. A separate call to
66    /// [`Self::tdx_set_page_attributes`] has to re-derive the appropriate
67    /// granularity.
68    pub fn tdx_accept_pages(
69        &self,
70        range: MemoryRange,
71        attributes: Option<(TdgMemPageGpaAttr, TdgMemPageAttrWriteR8)>,
72    ) -> Result<(), tdcall::AcceptPagesError> {
73        let attributes = attributes
74            .map_or(tdcall::AcceptPagesAttributes::None, |(attributes, mask)| {
75                tdcall::AcceptPagesAttributes::Set { attributes, mask }
76            });
77
78        tdcall::accept_pages(&mut MshvVtlTdcall(self), range, attributes)
79    }
80}
81
82impl<'a> ProcessorRunner<'a, Tdx<'a>> {
83    /// Gets a reference to the TDX VP context that is unioned inside the run
84    /// page.
85    fn tdx_vp_context(&self) -> &tdx_vp_context {
86        // SAFETY: the VP context will not be concurrently accessed by the
87        // processor while this VP is in VTL2. This is a TDX partition so the
88        // context union should be interpreted as a `tdx_vp_context`.
89        unsafe { &*(&raw mut (*self.run.get()).context).cast() }
90    }
91
92    /// Gets a mutable reference to the TDX VP context that is unioned inside
93    /// the run page.
94    fn tdx_vp_context_mut(&mut self) -> &mut tdx_vp_context {
95        // SAFETY: the VP context will not be concurrently accessed by the
96        // processor while this VP is in VTL2. This is a TDX partition so the
97        // context union should be interpreted as a `tdx_vp_context`.
98        unsafe { &mut *(&raw mut (*self.run.get()).context).cast() }
99    }
100
101    /// Gets a reference to the TDX enter guest state.
102    fn tdx_enter_guest_state(&self) -> &TdxL2EnterGuestState {
103        &self.tdx_vp_context().gpr_list
104    }
105
106    /// Gets a mutable reference to the TDX enter guest state.
107    fn tdx_enter_guest_state_mut(&mut self) -> &mut TdxL2EnterGuestState {
108        &mut self.tdx_vp_context_mut().gpr_list
109    }
110
111    /// Gets a reference to the TDX enter guest state's GP list.
112    pub fn tdx_enter_guest_gps(&self) -> &[u64; 16] {
113        &self.tdx_enter_guest_state().gps
114    }
115
116    /// Gets a mutable reference to the TDX enter guest state's GP list.
117    pub fn tdx_enter_guest_gps_mut(&mut self) -> &mut [u64; 16] {
118        &mut self.tdx_enter_guest_state_mut().gps
119    }
120
121    /// Gets a reference to the tdx exit info from a VP.ENTER call.
122    pub fn tdx_vp_enter_exit_info(&self) -> &tdx_tdg_vp_enter_exit_info {
123        &self.tdx_vp_context().exit_info
124    }
125
126    /// Gets a reference to the tdx APIC page for the given VTL.
127    pub fn tdx_apic_page(&self, vtl: GuestVtl) -> &ApicPage {
128        // SAFETY: the APIC pages will not be concurrently accessed by the processor
129        // while this VP is in VTL2.
130        unsafe { &*self.state.apic_pages[vtl].get() }
131    }
132
133    /// Gets a mutable reference to the tdx APIC page for the given VTL.
134    pub fn tdx_apic_page_mut(&mut self, vtl: GuestVtl) -> &mut ApicPage {
135        // SAFETY: the APIC pages will not be concurrently accessed by the processor
136        // while this VP is in VTL2.
137        unsafe { &mut *self.state.apic_pages[vtl].get() }
138    }
139
140    /// Gets a reference to TDX VP specific state.
141    fn tdx_vp_state(&self) -> &tdx_vp_state {
142        &self.tdx_vp_context().vp_state
143    }
144
145    /// Gets a mutable reference to TDX VP specific state
146    fn tdx_vp_state_mut(&mut self) -> &mut tdx_vp_state {
147        &mut self.tdx_vp_context_mut().vp_state
148    }
149
150    /// Gets the value of CR2 from the shared kernel state.
151    pub fn cr2(&self) -> u64 {
152        self.tdx_vp_state().cr2
153    }
154
155    /// Gets the value of CR2 from the shared kernel state.
156    pub fn set_cr2(&mut self, value: u64) {
157        self.tdx_vp_state_mut().cr2 = value;
158    }
159
160    /// Gets a mutable reference to TDX specific VP flags.
161    pub fn tdx_vp_state_flags_mut(&mut self) -> &mut tdx_vp_state_flags {
162        &mut self.tdx_vp_state_mut().flags
163    }
164
165    /// Gets a reference to the TDX VP entry flags.
166    fn tdx_vp_entry_flags(&self) -> &TdxVmFlags {
167        &self.tdx_vp_context().entry_rcx
168    }
169
170    /// Gets a mutable reference to the TDX VP entry flags.
171    fn tdx_vp_entry_flags_mut(&mut self) -> &mut TdxVmFlags {
172        &mut self.tdx_vp_context_mut().entry_rcx
173    }
174
175    /// Gets a reference to the TDX L2 TSC deadline state.
176    pub fn tdx_l2_tsc_deadline_state(&self) -> &tdx_l2_tsc_deadline_state {
177        &self.tdx_vp_context().l2_tsc_deadline
178    }
179
180    /// Gets a mutable reference to the TDX L2 TSC deadline state.
181    pub fn tdx_l2_tsc_deadline_state_mut(&mut self) -> &mut tdx_l2_tsc_deadline_state {
182        &mut self.tdx_vp_context_mut().l2_tsc_deadline
183    }
184
185    /// Reads the private registers from the kernel's shared run page into
186    /// the given [`TdxPrivateRegs`].
187    pub fn read_private_regs(&self, regs: &mut TdxPrivateRegs) {
188        let TdxL2EnterGuestState {
189            gps, // Shared between VTLs except for RSP
190            rflags,
191            rip,
192            ssp,
193            rvi,
194            svi,
195            reserved: _reserved,
196        } = self.tdx_enter_guest_state();
197        regs.rflags = *rflags;
198        regs.rip = *rip;
199        regs.rsp = gps[TdxGp::RSP];
200        regs.ssp = *ssp;
201        regs.rvi = *rvi;
202        regs.svi = *svi;
203
204        let tdx_vp_state {
205            msr_kernel_gs_base,
206            msr_star,
207            msr_lstar,
208            msr_sfmask,
209            msr_xss,
210            cr2: _cr2, // Shared between VTLs
211            msr_tsc_aux,
212            flags: _flags, // Global flags
213        } = self.tdx_vp_state();
214        regs.msr_kernel_gs_base = *msr_kernel_gs_base;
215        regs.msr_star = *msr_star;
216        regs.msr_lstar = *msr_lstar;
217        regs.msr_sfmask = *msr_sfmask;
218        regs.msr_xss = *msr_xss;
219        regs.msr_tsc_aux = *msr_tsc_aux;
220
221        regs.vp_entry_flags = *self.tdx_vp_entry_flags();
222    }
223
224    /// Writes the private registers from the given [`TdxPrivateRegs`] to the
225    /// kernel's shared run page.
226    pub fn write_private_regs(&mut self, regs: &TdxPrivateRegs) {
227        let TdxPrivateRegs {
228            rflags,
229            rip,
230            rsp,
231            ssp,
232            rvi,
233            svi,
234            msr_kernel_gs_base,
235            msr_star,
236            msr_lstar,
237            msr_sfmask,
238            msr_xss,
239            msr_tsc_aux,
240            vp_entry_flags,
241        } = regs;
242
243        let enter_guest_state = self.tdx_enter_guest_state_mut();
244        enter_guest_state.rflags = *rflags;
245        enter_guest_state.rip = *rip;
246        enter_guest_state.ssp = *ssp;
247        enter_guest_state.rvi = *rvi;
248        enter_guest_state.svi = *svi;
249        enter_guest_state.gps[TdxGp::RSP] = *rsp;
250
251        let vp_state = self.tdx_vp_state_mut();
252        vp_state.msr_kernel_gs_base = *msr_kernel_gs_base;
253        vp_state.msr_star = *msr_star;
254        vp_state.msr_lstar = *msr_lstar;
255        vp_state.msr_sfmask = *msr_sfmask;
256        vp_state.msr_xss = *msr_xss;
257        vp_state.msr_tsc_aux = *msr_tsc_aux;
258
259        *self.tdx_vp_entry_flags_mut() = *vp_entry_flags;
260    }
261
262    fn write_vmcs(&mut self, vtl: GuestVtl, field: VmcsField, mask: u64, value: u64) -> u64 {
263        tdcall_vp_wr(
264            &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
265            vmcs_field_code(field, vtl),
266            value,
267            mask,
268        )
269        .expect("fatal vmcs access failure")
270    }
271
272    fn read_vmcs(&self, vtl: GuestVtl, field: VmcsField) -> u64 {
273        tdcall_vp_rd(
274            &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
275            vmcs_field_code(field, vtl),
276        )
277        .expect("fatal vmcs access failure")
278    }
279
280    /// Write a 64-bit VMCS field.
281    ///
282    /// Only updates the bits that are set in `mask`. Returns the old value of
283    /// the field.
284    ///
285    /// Panics if the field is not a 64-bit field, or if there is an error in
286    /// the TDX module when writing the field.
287    pub fn write_vmcs64(&mut self, vtl: GuestVtl, field: VmcsField, mask: u64, value: u64) -> u64 {
288        assert!(matches!(
289            field.field_width(),
290            x86defs::vmx::FieldWidth::WidthNatural | x86defs::vmx::FieldWidth::Width64
291        ));
292        self.write_vmcs(vtl, field, mask, value)
293    }
294
295    /// Reads a 64-bit VMCS field.
296    ///
297    /// Panics if the field is not a 64-bit field, or if there is an error in
298    /// the TDX module when reading the field.
299    pub fn read_vmcs64(&self, vtl: GuestVtl, field: VmcsField) -> u64 {
300        assert!(matches!(
301            field.field_width(),
302            x86defs::vmx::FieldWidth::WidthNatural | x86defs::vmx::FieldWidth::Width64
303        ));
304        self.read_vmcs(vtl, field)
305    }
306
307    /// Write a 32-bit VMCS field.
308    ///
309    /// Only updates the bits that are set in `mask`. Returns the old value of
310    /// the field.
311    ///
312    /// Panics if the field is not a 32-bit field, or if there is an error in
313    /// the TDX module when writing the field.
314    pub fn write_vmcs32(&mut self, vtl: GuestVtl, field: VmcsField, mask: u32, value: u32) -> u32 {
315        assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width32);
316        self.write_vmcs(vtl, field, mask.into(), value.into()) as u32
317    }
318
319    /// Reads a 32-bit VMCS field.
320    ///
321    /// Panics if the field is not a 32-bit field, or if there is an error in
322    /// the TDX module when reading the field.
323    pub fn read_vmcs32(&self, vtl: GuestVtl, field: VmcsField) -> u32 {
324        assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width32);
325        self.read_vmcs(vtl, field) as u32
326    }
327
328    /// Write a 16-bit VMCS field.
329    ///
330    /// Only updates the bits that are set in `mask`. Returns the old value of
331    /// the field.
332    ///
333    /// Panics if the field is not a 16-bit field, or if there is an error in
334    /// the TDX module when writing the field.
335    pub fn write_vmcs16(&mut self, vtl: GuestVtl, field: VmcsField, mask: u16, value: u16) -> u16 {
336        assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width16);
337        self.write_vmcs(vtl, field, mask.into(), value.into()) as u16
338    }
339
340    /// Reads a 16-bit VMCS field.
341    ///
342    /// Panics if the field is not a 16-bit field, or if there is an error in
343    /// the TDX module when reading the field.
344    pub fn read_vmcs16(&self, vtl: GuestVtl, field: VmcsField) -> u16 {
345        assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width16);
346        self.read_vmcs(vtl, field) as u16
347    }
348
349    /// Sets the MSR bitmap intercept bit for the given MSR index.
350    ///
351    /// Panics if there is an error in the TDX module when writing the bit.
352    pub fn set_msr_bit(&self, vtl: GuestVtl, msr_index: u32, write: bool, intercept: bool) {
353        let mut word_index = (msr_index & 0xFFFF) / 64;
354
355        if msr_index & 0x80000000 == 0x80000000 {
356            assert!((0xC0000000..=0xC0001FFF).contains(&msr_index));
357            word_index += 0x80;
358        } else {
359            assert!(msr_index <= 0x00001FFF);
360        }
361
362        if write {
363            word_index += 0x100;
364        }
365
366        self.write_msr_bitmap(
367            vtl,
368            word_index,
369            1 << (msr_index as u64 & 0x3F),
370            if intercept { !0 } else { 0 },
371        );
372    }
373
374    /// Writes 64-bit word with index `i` of the MSR bitmap.
375    ///
376    /// Only updates the bits that are set in `mask`. Returns the old value of
377    /// the word.
378    ///
379    /// Panics if there is an error in the TDX module when writing the word.
380    pub fn write_msr_bitmap(&self, vtl: GuestVtl, i: u32, mask: u64, word: u64) -> u64 {
381        let class_code = match vtl {
382            GuestVtl::Vtl0 => TdVpsClassCode::MSR_BITMAPS_1,
383            GuestVtl::Vtl1 => TdVpsClassCode::MSR_BITMAPS_2,
384        };
385        let field_code = TdxExtendedFieldCode::new()
386            .with_context_code(TdxContextCode::TD_VCPU)
387            .with_field_size(x86defs::tdx::FieldSize::Size64Bit)
388            .with_field_code(i)
389            .with_class_code(class_code.0);
390
391        tdcall_vp_wr(
392            &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
393            field_code,
394            word,
395            mask,
396        )
397        .unwrap()
398    }
399
400    /// Sets the L2_CTLS field of the VP.
401    ///
402    /// Returns the old value of the field.
403    pub fn set_l2_ctls(&self, vtl: GuestVtl, value: TdxL2Ctls) -> Result<TdxL2Ctls, TdCallResult> {
404        let field_code = match vtl {
405            GuestVtl::Vtl0 => x86defs::tdx::TDX_FIELD_CODE_L2_CTLS_VM1,
406            GuestVtl::Vtl1 => x86defs::tdx::TDX_FIELD_CODE_L2_CTLS_VM2,
407        };
408        tdcall_vp_wr(
409            &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
410            field_code,
411            value.into(),
412            !0,
413        )
414        .map(Into::into)
415    }
416
417    /// Issues an INVGLA instruction for the VP.
418    pub fn invgla(
419        &self,
420        gla_flags: TdGlaVmAndFlags,
421        gla_info: TdxGlaListInfo,
422    ) -> Result<(), TdCallResult> {
423        tdcall_vp_invgla(&mut MshvVtlTdcall(&self.hcl.mshv_vtl), gla_flags, gla_info)
424    }
425
426    /// Gets the FPU state for the VP.
427    pub fn fx_state(&self) -> &x86defs::xsave::Fxsave {
428        &self.tdx_vp_context().fx_state
429    }
430
431    /// Sets the FPU state for the VP.
432    pub fn fx_state_mut(&mut self) -> &mut x86defs::xsave::Fxsave {
433        &mut self.tdx_vp_context_mut().fx_state
434    }
435}
436
437fn vmcs_field_code(field: VmcsField, vtl: GuestVtl) -> TdxExtendedFieldCode {
438    let class_code = match vtl {
439        GuestVtl::Vtl0 => TdVpsClassCode::VMCS_1,
440        GuestVtl::Vtl1 => TdVpsClassCode::VMCS_2,
441    };
442    let field_size = match field.field_width() {
443        x86defs::vmx::FieldWidth::Width16 => x86defs::tdx::FieldSize::Size16Bit,
444        x86defs::vmx::FieldWidth::Width32 => x86defs::tdx::FieldSize::Size32Bit,
445        x86defs::vmx::FieldWidth::Width64 => x86defs::tdx::FieldSize::Size64Bit,
446        x86defs::vmx::FieldWidth::WidthNatural => x86defs::tdx::FieldSize::Size64Bit,
447    };
448    TdxExtendedFieldCode::new()
449        .with_context_code(TdxContextCode::TD_VCPU)
450        .with_class_code(class_code.0)
451        .with_field_code(field.into())
452        .with_field_size(field_size)
453}
454
455impl<'a> super::private::BackingPrivate<'a> for Tdx<'a> {
456    fn new(vp: &'a HclVp, sidecar: Option<&SidecarVp<'_>>, hcl: &Hcl) -> Result<Self, NoRunner> {
457        assert!(sidecar.is_none());
458        let super::BackingState::Tdx {
459            vtl0_apic_page,
460            vtl1_apic_page,
461        } = &vp.backing
462        else {
463            return Err(NoRunner::MismatchedIsolation);
464        };
465
466        // Register the VTL 1 APIC page with the TD module.
467        // The VTL 0 APIC page is registered by the kernel.
468        let vtl1_apic_page_addr = vtl1_apic_page.pfns()[0] * user_driver::memory::PAGE_SIZE64;
469        tdcall_vp_wr(
470            &mut MshvVtlTdcall(&hcl.mshv_vtl),
471            vmcs_field_code(VmcsField::VMX_VMCS_VIRTUAL_APIC_PAGE, GuestVtl::Vtl1),
472            vtl1_apic_page_addr,
473            !0,
474        )
475        .expect("failed registering VTL1 APIC page");
476
477        // SAFETY: The mapping is held for the appropriate lifetime, and the
478        // APIC page is never accessed as any other type, or by any other location.
479        let vtl1_apic_page = unsafe { &*vtl1_apic_page.base().cast() };
480
481        Ok(Self {
482            apic_pages: [vtl0_apic_page.as_ref(), vtl1_apic_page].into(),
483        })
484    }
485
486    fn try_set_reg(
487        _runner: &mut ProcessorRunner<'a, Self>,
488        _vtl: GuestVtl,
489        _name: HvRegisterName,
490        _value: HvRegisterValue,
491    ) -> bool {
492        false
493    }
494
495    fn must_flush_regs_on(_runner: &ProcessorRunner<'a, Self>, _name: HvRegisterName) -> bool {
496        false
497    }
498
499    fn try_get_reg(
500        _runner: &ProcessorRunner<'a, Self>,
501        _vtl: GuestVtl,
502        _name: HvRegisterName,
503    ) -> Option<HvRegisterValue> {
504        None
505    }
506
507    fn flush_register_page(_runner: &mut ProcessorRunner<'a, Self>) {}
508}
509
510/// Private registers that are copied to/from the kernel's shared run page.
511#[derive(inspect::InspectMut)]
512#[expect(missing_docs, reason = "Self-describing field names")]
513pub struct TdxPrivateRegs {
514    // Registers on [`TdxL2EnterGuestState`].
515    pub rflags: u64,
516    pub rip: u64,
517    pub rsp: u64,
518    pub ssp: u64,
519    pub rvi: u8,
520    pub svi: u8,
521    // Registers on [`tdx_vp_state`].
522    pub msr_kernel_gs_base: u64,
523    pub msr_star: u64,
524    pub msr_lstar: u64,
525    pub msr_sfmask: u64,
526    pub msr_xss: u64,
527    pub msr_tsc_aux: u64,
528    // VP Entry flags
529    #[inspect(hex, with = "|x| x.into_bits()")]
530    pub vp_entry_flags: TdxVmFlags,
531}
532
533impl TdxPrivateRegs {
534    /// Creates a new register set with the given values.
535    /// Other values are initialized to zero.
536    pub fn new(vtl: GuestVtl) -> Self {
537        Self {
538            rflags: x86defs::RFlags::at_reset().into(),
539            rip: 0,
540            rsp: 0,
541            ssp: 0,
542            rvi: 0,
543            svi: 0,
544            msr_kernel_gs_base: 0,
545            msr_star: 0,
546            msr_lstar: 0,
547            msr_sfmask: 0,
548            msr_xss: 0,
549            msr_tsc_aux: 0,
550            // We initialize with a TLB flush pending so that save/restore/reset
551            // operations (not supported yet, but maybe someday) will start with
552            // a clear TLB. During regular boots this won't matter, as the TLB
553            // will already be empty.
554            vp_entry_flags: TdxVmFlags::new()
555                .with_vm_index(vtl as u8 + 1)
556                .with_invd_translations(x86defs::tdx::TDX_VP_ENTER_INVD_INVEPT),
557        }
558    }
559}
560
561struct MshvVtlTdcall<'a>(&'a MshvVtl);
562
563impl Tdcall for MshvVtlTdcall<'_> {
564    fn tdcall(&mut self, input: tdcall::TdcallInput) -> tdcall::TdcallOutput {
565        let mut mshv_tdcall_args = {
566            let tdcall::TdcallInput {
567                leaf,
568                rcx,
569                rdx,
570                r8,
571                r9,
572                r10,
573                r11,
574                r12,
575                r13,
576                r14,
577                r15,
578            } = input;
579
580            // NOTE: Only TD module calls are supported by the kernel, so assert
581            // that here before dispatching. Additionally, the kernel only
582            // supports a limited set of input registers.
583            assert_ne!(leaf, x86defs::tdx::TdCallLeaf::VP_VMCALL);
584            assert_eq!(r10, 0);
585            assert_eq!(r11, 0);
586            assert_eq!(r12, 0);
587            assert_eq!(r13, 0);
588            assert_eq!(r14, 0);
589            assert_eq!(r15, 0);
590
591            mshv_tdcall {
592                rax: leaf.0,
593                rcx,
594                rdx,
595                r8,
596                r9,
597                r10_out: 0,
598                r11_out: 0,
599            }
600        };
601
602        // SAFETY: Calling tdcall ioctl with the correct arguments.
603        unsafe {
604            // NOTE: This ioctl should never fail, as the tdcall itself failing
605            // is returned as output in the structure given by the kernel.
606            hcl_tdcall(self.0.file.as_raw_fd(), &mut mshv_tdcall_args)
607                .expect("todo handle tdcall ioctl error");
608        }
609
610        tdcall::TdcallOutput {
611            rax: TdCallResult::from(mshv_tdcall_args.rax),
612            rcx: mshv_tdcall_args.rcx,
613            rdx: mshv_tdcall_args.rdx,
614            r8: mshv_tdcall_args.r8,
615            r10: mshv_tdcall_args.r10_out,
616            r11: mshv_tdcall_args.r11_out,
617        }
618    }
619}