1use super::Hcl;
7use super::HclVp;
8use super::MshvVtl;
9use super::NoRunner;
10use super::ProcessorRunner;
11use super::hcl_tdcall;
12use super::mshv_tdcall;
13use crate::GuestVtl;
14use crate::protocol::tdx_l2_tsc_deadline_state;
15use crate::protocol::tdx_tdg_vp_enter_exit_info;
16use crate::protocol::tdx_vp_context;
17use crate::protocol::tdx_vp_state;
18use crate::protocol::tdx_vp_state_flags;
19use hv1_structs::VtlArray;
20use hvdef::HvRegisterName;
21use hvdef::HvRegisterValue;
22use memory_range::MemoryRange;
23use sidecar_client::SidecarVp;
24use std::cell::UnsafeCell;
25use std::os::fd::AsRawFd;
26use tdcall::Tdcall;
27use tdcall::tdcall_vp_invgla;
28use tdcall::tdcall_vp_rd;
29use tdcall::tdcall_vp_wr;
30use x86defs::tdx::TdCallResult;
31use x86defs::tdx::TdCallResultCode;
32use x86defs::tdx::TdGlaVmAndFlags;
33use x86defs::tdx::TdVpsClassCode;
34use x86defs::tdx::TdgMemPageAttrWriteR8;
35use x86defs::tdx::TdgMemPageGpaAttr;
36use x86defs::tdx::TdxContextCode;
37use x86defs::tdx::TdxExtendedFieldCode;
38use x86defs::tdx::TdxGlaListInfo;
39use x86defs::tdx::TdxGp;
40use x86defs::tdx::TdxL2Ctls;
41use x86defs::tdx::TdxL2EnterGuestState;
42use x86defs::tdx::TdxVmFlags;
43use x86defs::vmx::ApicPage;
44use x86defs::vmx::VmcsField;
45
46pub struct Tdx<'a> {
48 apic_pages: VtlArray<&'a UnsafeCell<ApicPage>, 2>,
49}
50
51impl MshvVtl {
52 pub fn tdx_set_page_attributes(
54 &self,
55 range: MemoryRange,
56 attributes: TdgMemPageGpaAttr,
57 mask: TdgMemPageAttrWriteR8,
58 ) -> Result<(), TdCallResultCode> {
59 tdcall::set_page_attributes(&mut MshvVtlTdcall(self), range, attributes, mask)
60 }
61
62 pub fn tdx_accept_pages(
69 &self,
70 range: MemoryRange,
71 attributes: Option<(TdgMemPageGpaAttr, TdgMemPageAttrWriteR8)>,
72 ) -> Result<(), tdcall::AcceptPagesError> {
73 let attributes = attributes
74 .map_or(tdcall::AcceptPagesAttributes::None, |(attributes, mask)| {
75 tdcall::AcceptPagesAttributes::Set { attributes, mask }
76 });
77
78 tdcall::accept_pages(&mut MshvVtlTdcall(self), range, attributes)
79 }
80}
81
82impl<'a> ProcessorRunner<'a, Tdx<'a>> {
83 fn tdx_vp_context(&self) -> &tdx_vp_context {
86 unsafe { &*(&raw mut (*self.run.get()).context).cast() }
90 }
91
92 fn tdx_vp_context_mut(&mut self) -> &mut tdx_vp_context {
95 unsafe { &mut *(&raw mut (*self.run.get()).context).cast() }
99 }
100
101 fn tdx_enter_guest_state(&self) -> &TdxL2EnterGuestState {
103 &self.tdx_vp_context().gpr_list
104 }
105
106 fn tdx_enter_guest_state_mut(&mut self) -> &mut TdxL2EnterGuestState {
108 &mut self.tdx_vp_context_mut().gpr_list
109 }
110
111 pub fn tdx_enter_guest_gps(&self) -> &[u64; 16] {
113 &self.tdx_enter_guest_state().gps
114 }
115
116 pub fn tdx_enter_guest_gps_mut(&mut self) -> &mut [u64; 16] {
118 &mut self.tdx_enter_guest_state_mut().gps
119 }
120
121 pub fn tdx_vp_enter_exit_info(&self) -> &tdx_tdg_vp_enter_exit_info {
123 &self.tdx_vp_context().exit_info
124 }
125
126 pub fn tdx_apic_page(&self, vtl: GuestVtl) -> &ApicPage {
128 unsafe { &*self.state.apic_pages[vtl].get() }
131 }
132
133 pub fn tdx_apic_page_mut(&mut self, vtl: GuestVtl) -> &mut ApicPage {
135 unsafe { &mut *self.state.apic_pages[vtl].get() }
138 }
139
140 fn tdx_vp_state(&self) -> &tdx_vp_state {
142 &self.tdx_vp_context().vp_state
143 }
144
145 fn tdx_vp_state_mut(&mut self) -> &mut tdx_vp_state {
147 &mut self.tdx_vp_context_mut().vp_state
148 }
149
150 pub fn cr2(&self) -> u64 {
152 self.tdx_vp_state().cr2
153 }
154
155 pub fn set_cr2(&mut self, value: u64) {
157 self.tdx_vp_state_mut().cr2 = value;
158 }
159
160 pub fn tdx_vp_state_flags_mut(&mut self) -> &mut tdx_vp_state_flags {
162 &mut self.tdx_vp_state_mut().flags
163 }
164
165 fn tdx_vp_entry_flags(&self) -> &TdxVmFlags {
167 &self.tdx_vp_context().entry_rcx
168 }
169
170 fn tdx_vp_entry_flags_mut(&mut self) -> &mut TdxVmFlags {
172 &mut self.tdx_vp_context_mut().entry_rcx
173 }
174
175 pub fn tdx_l2_tsc_deadline_state(&self) -> &tdx_l2_tsc_deadline_state {
177 &self.tdx_vp_context().l2_tsc_deadline
178 }
179
180 pub fn tdx_l2_tsc_deadline_state_mut(&mut self) -> &mut tdx_l2_tsc_deadline_state {
182 &mut self.tdx_vp_context_mut().l2_tsc_deadline
183 }
184
185 pub fn read_private_regs(&self, regs: &mut TdxPrivateRegs) {
188 let TdxL2EnterGuestState {
189 gps, rflags,
191 rip,
192 ssp,
193 rvi,
194 svi,
195 reserved: _reserved,
196 } = self.tdx_enter_guest_state();
197 regs.rflags = *rflags;
198 regs.rip = *rip;
199 regs.rsp = gps[TdxGp::RSP];
200 regs.ssp = *ssp;
201 regs.rvi = *rvi;
202 regs.svi = *svi;
203
204 let tdx_vp_state {
205 msr_kernel_gs_base,
206 msr_star,
207 msr_lstar,
208 msr_sfmask,
209 msr_xss,
210 cr2: _cr2, msr_tsc_aux,
212 flags: _flags, } = self.tdx_vp_state();
214 regs.msr_kernel_gs_base = *msr_kernel_gs_base;
215 regs.msr_star = *msr_star;
216 regs.msr_lstar = *msr_lstar;
217 regs.msr_sfmask = *msr_sfmask;
218 regs.msr_xss = *msr_xss;
219 regs.msr_tsc_aux = *msr_tsc_aux;
220
221 regs.vp_entry_flags = *self.tdx_vp_entry_flags();
222 }
223
224 pub fn write_private_regs(&mut self, regs: &TdxPrivateRegs) {
227 let TdxPrivateRegs {
228 rflags,
229 rip,
230 rsp,
231 ssp,
232 rvi,
233 svi,
234 msr_kernel_gs_base,
235 msr_star,
236 msr_lstar,
237 msr_sfmask,
238 msr_xss,
239 msr_tsc_aux,
240 vp_entry_flags,
241 } = regs;
242
243 let enter_guest_state = self.tdx_enter_guest_state_mut();
244 enter_guest_state.rflags = *rflags;
245 enter_guest_state.rip = *rip;
246 enter_guest_state.ssp = *ssp;
247 enter_guest_state.rvi = *rvi;
248 enter_guest_state.svi = *svi;
249 enter_guest_state.gps[TdxGp::RSP] = *rsp;
250
251 let vp_state = self.tdx_vp_state_mut();
252 vp_state.msr_kernel_gs_base = *msr_kernel_gs_base;
253 vp_state.msr_star = *msr_star;
254 vp_state.msr_lstar = *msr_lstar;
255 vp_state.msr_sfmask = *msr_sfmask;
256 vp_state.msr_xss = *msr_xss;
257 vp_state.msr_tsc_aux = *msr_tsc_aux;
258
259 *self.tdx_vp_entry_flags_mut() = *vp_entry_flags;
260 }
261
262 fn write_vmcs(&mut self, vtl: GuestVtl, field: VmcsField, mask: u64, value: u64) -> u64 {
263 tdcall_vp_wr(
264 &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
265 vmcs_field_code(field, vtl),
266 value,
267 mask,
268 )
269 .expect("fatal vmcs access failure")
270 }
271
272 fn read_vmcs(&self, vtl: GuestVtl, field: VmcsField) -> u64 {
273 tdcall_vp_rd(
274 &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
275 vmcs_field_code(field, vtl),
276 )
277 .expect("fatal vmcs access failure")
278 }
279
280 pub fn write_vmcs64(&mut self, vtl: GuestVtl, field: VmcsField, mask: u64, value: u64) -> u64 {
288 assert!(matches!(
289 field.field_width(),
290 x86defs::vmx::FieldWidth::WidthNatural | x86defs::vmx::FieldWidth::Width64
291 ));
292 self.write_vmcs(vtl, field, mask, value)
293 }
294
295 pub fn read_vmcs64(&self, vtl: GuestVtl, field: VmcsField) -> u64 {
300 assert!(matches!(
301 field.field_width(),
302 x86defs::vmx::FieldWidth::WidthNatural | x86defs::vmx::FieldWidth::Width64
303 ));
304 self.read_vmcs(vtl, field)
305 }
306
307 pub fn write_vmcs32(&mut self, vtl: GuestVtl, field: VmcsField, mask: u32, value: u32) -> u32 {
315 assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width32);
316 self.write_vmcs(vtl, field, mask.into(), value.into()) as u32
317 }
318
319 pub fn read_vmcs32(&self, vtl: GuestVtl, field: VmcsField) -> u32 {
324 assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width32);
325 self.read_vmcs(vtl, field) as u32
326 }
327
328 pub fn write_vmcs16(&mut self, vtl: GuestVtl, field: VmcsField, mask: u16, value: u16) -> u16 {
336 assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width16);
337 self.write_vmcs(vtl, field, mask.into(), value.into()) as u16
338 }
339
340 pub fn read_vmcs16(&self, vtl: GuestVtl, field: VmcsField) -> u16 {
345 assert_eq!(field.field_width(), x86defs::vmx::FieldWidth::Width16);
346 self.read_vmcs(vtl, field) as u16
347 }
348
349 pub fn set_msr_bit(&self, vtl: GuestVtl, msr_index: u32, write: bool, intercept: bool) {
353 let mut word_index = (msr_index & 0xFFFF) / 64;
354
355 if msr_index & 0x80000000 == 0x80000000 {
356 assert!((0xC0000000..=0xC0001FFF).contains(&msr_index));
357 word_index += 0x80;
358 } else {
359 assert!(msr_index <= 0x00001FFF);
360 }
361
362 if write {
363 word_index += 0x100;
364 }
365
366 self.write_msr_bitmap(
367 vtl,
368 word_index,
369 1 << (msr_index as u64 & 0x3F),
370 if intercept { !0 } else { 0 },
371 );
372 }
373
374 pub fn write_msr_bitmap(&self, vtl: GuestVtl, i: u32, mask: u64, word: u64) -> u64 {
381 let class_code = match vtl {
382 GuestVtl::Vtl0 => TdVpsClassCode::MSR_BITMAPS_1,
383 GuestVtl::Vtl1 => TdVpsClassCode::MSR_BITMAPS_2,
384 };
385 let field_code = TdxExtendedFieldCode::new()
386 .with_context_code(TdxContextCode::TD_VCPU)
387 .with_field_size(x86defs::tdx::FieldSize::Size64Bit)
388 .with_field_code(i)
389 .with_class_code(class_code.0);
390
391 tdcall_vp_wr(
392 &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
393 field_code,
394 word,
395 mask,
396 )
397 .unwrap()
398 }
399
400 pub fn set_l2_ctls(&self, vtl: GuestVtl, value: TdxL2Ctls) -> Result<TdxL2Ctls, TdCallResult> {
404 let field_code = match vtl {
405 GuestVtl::Vtl0 => x86defs::tdx::TDX_FIELD_CODE_L2_CTLS_VM1,
406 GuestVtl::Vtl1 => x86defs::tdx::TDX_FIELD_CODE_L2_CTLS_VM2,
407 };
408 tdcall_vp_wr(
409 &mut MshvVtlTdcall(&self.hcl.mshv_vtl),
410 field_code,
411 value.into(),
412 !0,
413 )
414 .map(Into::into)
415 }
416
417 pub fn invgla(
419 &self,
420 gla_flags: TdGlaVmAndFlags,
421 gla_info: TdxGlaListInfo,
422 ) -> Result<(), TdCallResult> {
423 tdcall_vp_invgla(&mut MshvVtlTdcall(&self.hcl.mshv_vtl), gla_flags, gla_info)
424 }
425
426 pub fn fx_state(&self) -> &x86defs::xsave::Fxsave {
428 &self.tdx_vp_context().fx_state
429 }
430
431 pub fn fx_state_mut(&mut self) -> &mut x86defs::xsave::Fxsave {
433 &mut self.tdx_vp_context_mut().fx_state
434 }
435}
436
437fn vmcs_field_code(field: VmcsField, vtl: GuestVtl) -> TdxExtendedFieldCode {
438 let class_code = match vtl {
439 GuestVtl::Vtl0 => TdVpsClassCode::VMCS_1,
440 GuestVtl::Vtl1 => TdVpsClassCode::VMCS_2,
441 };
442 let field_size = match field.field_width() {
443 x86defs::vmx::FieldWidth::Width16 => x86defs::tdx::FieldSize::Size16Bit,
444 x86defs::vmx::FieldWidth::Width32 => x86defs::tdx::FieldSize::Size32Bit,
445 x86defs::vmx::FieldWidth::Width64 => x86defs::tdx::FieldSize::Size64Bit,
446 x86defs::vmx::FieldWidth::WidthNatural => x86defs::tdx::FieldSize::Size64Bit,
447 };
448 TdxExtendedFieldCode::new()
449 .with_context_code(TdxContextCode::TD_VCPU)
450 .with_class_code(class_code.0)
451 .with_field_code(field.into())
452 .with_field_size(field_size)
453}
454
455impl<'a> super::private::BackingPrivate<'a> for Tdx<'a> {
456 fn new(vp: &'a HclVp, sidecar: Option<&SidecarVp<'_>>, hcl: &Hcl) -> Result<Self, NoRunner> {
457 assert!(sidecar.is_none());
458 let super::BackingState::Tdx {
459 vtl0_apic_page,
460 vtl1_apic_page,
461 } = &vp.backing
462 else {
463 return Err(NoRunner::MismatchedIsolation);
464 };
465
466 let vtl1_apic_page_addr = vtl1_apic_page.pfns()[0] * user_driver::memory::PAGE_SIZE64;
469 tdcall_vp_wr(
470 &mut MshvVtlTdcall(&hcl.mshv_vtl),
471 vmcs_field_code(VmcsField::VMX_VMCS_VIRTUAL_APIC_PAGE, GuestVtl::Vtl1),
472 vtl1_apic_page_addr,
473 !0,
474 )
475 .expect("failed registering VTL1 APIC page");
476
477 let vtl1_apic_page = unsafe { &*vtl1_apic_page.base().cast() };
480
481 Ok(Self {
482 apic_pages: [vtl0_apic_page.as_ref(), vtl1_apic_page].into(),
483 })
484 }
485
486 fn try_set_reg(
487 _runner: &mut ProcessorRunner<'a, Self>,
488 _vtl: GuestVtl,
489 _name: HvRegisterName,
490 _value: HvRegisterValue,
491 ) -> bool {
492 false
493 }
494
495 fn must_flush_regs_on(_runner: &ProcessorRunner<'a, Self>, _name: HvRegisterName) -> bool {
496 false
497 }
498
499 fn try_get_reg(
500 _runner: &ProcessorRunner<'a, Self>,
501 _vtl: GuestVtl,
502 _name: HvRegisterName,
503 ) -> Option<HvRegisterValue> {
504 None
505 }
506
507 fn flush_register_page(_runner: &mut ProcessorRunner<'a, Self>) {}
508}
509
510#[derive(inspect::InspectMut)]
512#[expect(missing_docs, reason = "Self-describing field names")]
513pub struct TdxPrivateRegs {
514 pub rflags: u64,
516 pub rip: u64,
517 pub rsp: u64,
518 pub ssp: u64,
519 pub rvi: u8,
520 pub svi: u8,
521 pub msr_kernel_gs_base: u64,
523 pub msr_star: u64,
524 pub msr_lstar: u64,
525 pub msr_sfmask: u64,
526 pub msr_xss: u64,
527 pub msr_tsc_aux: u64,
528 #[inspect(hex, with = "|x| x.into_bits()")]
530 pub vp_entry_flags: TdxVmFlags,
531}
532
533impl TdxPrivateRegs {
534 pub fn new(vtl: GuestVtl) -> Self {
537 Self {
538 rflags: x86defs::RFlags::at_reset().into(),
539 rip: 0,
540 rsp: 0,
541 ssp: 0,
542 rvi: 0,
543 svi: 0,
544 msr_kernel_gs_base: 0,
545 msr_star: 0,
546 msr_lstar: 0,
547 msr_sfmask: 0,
548 msr_xss: 0,
549 msr_tsc_aux: 0,
550 vp_entry_flags: TdxVmFlags::new()
555 .with_vm_index(vtl as u8 + 1)
556 .with_invd_translations(x86defs::tdx::TDX_VP_ENTER_INVD_INVEPT),
557 }
558 }
559}
560
561struct MshvVtlTdcall<'a>(&'a MshvVtl);
562
563impl Tdcall for MshvVtlTdcall<'_> {
564 fn tdcall(&mut self, input: tdcall::TdcallInput) -> tdcall::TdcallOutput {
565 let mut mshv_tdcall_args = {
566 let tdcall::TdcallInput {
567 leaf,
568 rcx,
569 rdx,
570 r8,
571 r9,
572 r10,
573 r11,
574 r12,
575 r13,
576 r14,
577 r15,
578 } = input;
579
580 assert_ne!(leaf, x86defs::tdx::TdCallLeaf::VP_VMCALL);
584 assert_eq!(r10, 0);
585 assert_eq!(r11, 0);
586 assert_eq!(r12, 0);
587 assert_eq!(r13, 0);
588 assert_eq!(r14, 0);
589 assert_eq!(r15, 0);
590
591 mshv_tdcall {
592 rax: leaf.0,
593 rcx,
594 rdx,
595 r8,
596 r9,
597 r10_out: 0,
598 r11_out: 0,
599 }
600 };
601
602 unsafe {
604 hcl_tdcall(self.0.file.as_raw_fd(), &mut mshv_tdcall_args)
607 .expect("todo handle tdcall ioctl error");
608 }
609
610 tdcall::TdcallOutput {
611 rax: TdCallResult::from(mshv_tdcall_args.rax),
612 rcx: mshv_tdcall_args.rcx,
613 rdx: mshv_tdcall_args.rdx,
614 r8: mshv_tdcall_args.r8,
615 r10: mshv_tdcall_args.r10_out,
616 r11: mshv_tdcall_args.r11_out,
617 }
618 }
619}