igvmfilegen/vp_context_builder/
tdx.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! TDX VP context builder.
5
6use super::VpContextBuilder;
7use super::VpContextState;
8use crate::vp_context_builder::VpContextPageState;
9use igvm_defs::PAGE_SIZE_4K;
10use loader::importer::SegmentRegister;
11use loader::importer::X86Register;
12use loader_defs::shim::TdxTrampolineContext;
13use std::mem::offset_of;
14use x86defs::X64_EFER_LME;
15use x86defs::X86X_MSR_DEFAULT_PAT;
16use zerocopy::IntoBytes;
17
18/// Represents a hardware context for TDX. This contains both the sets of
19/// initial registers and registers set by the trampoline code.
20#[derive(Debug)]
21pub struct TdxHardwareContext {
22    trampoline_context: TdxTrampolineContext,
23    accept_lower_1mb: bool,
24}
25
26impl TdxHardwareContext {
27    pub fn new(accept_lower_1mb: bool) -> Self {
28        Self {
29            trampoline_context: TdxTrampolineContext::default(),
30            accept_lower_1mb,
31        }
32    }
33}
34
35impl VpContextBuilder for TdxHardwareContext {
36    type Register = X86Register;
37
38    /// Import a register into the hardware context. Only a subset of registers
39    /// are allowed.
40    fn import_vp_register(&mut self, register: X86Register) {
41        let mut set_data_selector = |reg: SegmentRegister| {
42            if self.trampoline_context.data_selector == 0 {
43                self.trampoline_context.data_selector = reg.selector;
44            } else if self.trampoline_context.data_selector != reg.selector {
45                panic!("data selectors must be the same");
46            }
47        };
48
49        match register {
50            X86Register::Gdtr(reg) => {
51                self.trampoline_context.gdtr_base = reg.base;
52                self.trampoline_context.gdtr_limit = reg.limit;
53            }
54            X86Register::Idtr(reg) => {
55                self.trampoline_context.idtr_base = reg.base;
56                self.trampoline_context.idtr_limit = reg.limit;
57            }
58            X86Register::Ds(reg)
59            | X86Register::Es(reg)
60            | X86Register::Fs(reg)
61            | X86Register::Gs(reg)
62            | X86Register::Ss(reg) => set_data_selector(reg),
63            X86Register::Cs(reg) => self.trampoline_context.code_selector = reg.selector,
64            X86Register::Tr(reg) => {
65                self.trampoline_context.task_selector = reg.selector;
66            }
67            X86Register::Cr0(cr0) => self.trampoline_context.cr0 = cr0,
68            X86Register::Cr3(cr3) => {
69                let cr3_u32: u32 = cr3.try_into().expect("cr3 must fit in u32");
70                self.trampoline_context.transition_cr3 = cr3_u32;
71                self.trampoline_context.cr3 = cr3;
72            }
73            X86Register::Cr4(cr4) => self.trampoline_context.cr4 = cr4,
74            X86Register::Efer(efer) => {
75                // TDX guests are not permitted to set EFER explicitly.  Verify
76                // that the requested EFER value is compatible with the
77                // architecturally imposed value.
78                if efer & X64_EFER_LME == 0 {
79                    panic!("EFER LME must be set for tdx")
80                }
81            }
82            X86Register::Pat(pat) => {
83                if pat != X86X_MSR_DEFAULT_PAT {
84                    panic!("PAT must be default for tdx")
85                }
86            }
87            X86Register::Rbp(rbp) => self.trampoline_context.rbp = rbp,
88            X86Register::Rip(rip) => self.trampoline_context.initial_rip = rip,
89            X86Register::Rsi(rsi) => self.trampoline_context.rsi = rsi,
90            X86Register::Rsp(rsp) => self.trampoline_context.rsp = rsp,
91            X86Register::R8(r8) => self.trampoline_context.r8 = r8,
92            X86Register::R9(r9) => self.trampoline_context.r9 = r9,
93            X86Register::R10(r10) => self.trampoline_context.r10 = r10,
94            X86Register::R11(r11) => self.trampoline_context.r11 = r11,
95            X86Register::R12(_) => panic!("r12 not allowed for tdx"),
96            X86Register::Rflags(_) => panic!("rflags not allowed for tdx"),
97
98            X86Register::MtrrDefType(_)
99            | X86Register::MtrrPhysBase0(_)
100            | X86Register::MtrrPhysMask0(_)
101            | X86Register::MtrrPhysBase1(_)
102            | X86Register::MtrrPhysMask1(_)
103            | X86Register::MtrrPhysBase2(_)
104            | X86Register::MtrrPhysMask2(_)
105            | X86Register::MtrrPhysBase3(_)
106            | X86Register::MtrrPhysMask3(_)
107            | X86Register::MtrrPhysBase4(_)
108            | X86Register::MtrrPhysMask4(_)
109            | X86Register::MtrrFix64k00000(_)
110            | X86Register::MtrrFix16k80000(_)
111            | X86Register::MtrrFix4kE0000(_)
112            | X86Register::MtrrFix4kE8000(_)
113            | X86Register::MtrrFix4kF0000(_)
114            | X86Register::MtrrFix4kF8000(_) => {
115                tracing::warn!(?register, "Ignoring MTRR register for TDX.")
116            }
117        }
118    }
119
120    fn set_vp_context_memory(&mut self, _page_base: u64) {
121        unimplemented!("not supported for TDX");
122    }
123
124    fn finalize(&mut self, state: &mut Vec<VpContextState>) {
125        // Construct and load an initial temporary GDT to use for the transition
126        // to long mode.  A single selector (0008:) is defined as a 64-bit code
127        // segment.
128        self.trampoline_context.static_gdt[0x08] = 0xFF;
129        self.trampoline_context.static_gdt[0x09] = 0xFF;
130        self.trampoline_context.static_gdt[0x0D] = 0x9B;
131        self.trampoline_context.static_gdt[0x0E] = 0xA0;
132
133        self.trampoline_context.static_gdt_limit = 0xF;
134        self.trampoline_context.static_gdt_base =
135            0xFFFFF000 + offset_of!(TdxTrampolineContext, static_gdt) as u32;
136
137        // Generate a 32-bit assembly trampoline to enable long mode and transfer
138        // to the specified context.
139        let mut byte_offset = 0xFF0;
140
141        // Fill the reset page with INT 3 as a standard code fill value.
142        let mut reset_page = vec![0xCCu8; PAGE_SIZE_4K as usize];
143
144        // Copy trampoline_context to the start of the reset page.
145        let trampoline_context = self.trampoline_context.as_bytes();
146        reset_page[0..trampoline_context.len()].copy_from_slice(trampoline_context);
147
148        let copy_instr =
149            |trampoline_page: &mut Vec<u8>, byte_offset, instruction: &[u8]| -> usize {
150                trampoline_page[byte_offset..byte_offset + instruction.len()]
151                    .copy_from_slice(instruction);
152                byte_offset + instruction.len()
153            };
154
155        // jmp InitialCode
156        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0xE9]);
157        let mut relative_offset =
158            (trampoline_context.len() as u32).wrapping_sub((byte_offset + 4) as u32);
159        copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
160
161        byte_offset = trampoline_context.len();
162
163        // L0:
164        let l0_offset = byte_offset;
165
166        // lgdt, [staticGdt]
167        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x01, 0x15]);
168        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, static_gdt_limit) as u32;
169        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
170
171        // Load the control registers.  CR0 must be last so long mode is properly
172        // enabled (the architecture sets LME prior to initial entry), and the CR0
173        // load must be followed by a far jump to complete long mode
174        // configuration.
175
176        // mov eax, [initialCr4]
177        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
178        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, cr4) as u32;
179        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
180
181        // mov cr4, eax
182        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xE0]);
183
184        // mov eax, [transitionCr3]
185        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
186        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, transition_cr3) as u32;
187        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
188
189        // mov cr3, eax
190        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xD8]);
191
192        // mov eax, [initialCr0]
193        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
194        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, cr0) as u32;
195        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
196
197        // mov cr0, eax
198        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xC0]);
199
200        // jmp far L2
201        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0xEA]);
202        relative_offset = 0xFFFFF000 + byte_offset as u32 + 6;
203        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
204        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x08, 0x00]);
205
206        // L2:
207
208        // Load the 64-bit CR3 now that long mode is active.
209
210        // mov rax, [initialCr3]
211        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x05]);
212        relative_offset =
213            (offset_of!(TdxTrampolineContext, cr3) as u32).wrapping_sub((byte_offset + 4) as u32);
214        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
215
216        // mov cr3, rax
217        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xD8]);
218
219        // Load descriptor tables and selectors, except CS which will be loaded in
220        // the final jump.  If no GDT is specified, then skip loading all
221        // selectors.
222
223        // mov ax, [initialGdtrLimit]
224        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
225        relative_offset = (offset_of!(TdxTrampolineContext, gdtr_limit) as u32)
226            .wrapping_sub((byte_offset + 4) as u32);
227        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
228
229        // test ax, ax
230        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
231
232        // jz L4
233        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
234        byte_offset += 1;
235        let l4_offset = byte_offset as u32;
236
237        // lgdt [initialGdtr]
238        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x01, 0x15]);
239        relative_offset = (offset_of!(TdxTrampolineContext, gdtr_limit) as u32)
240            .wrapping_sub((byte_offset + 4) as u32);
241        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
242
243        // @@:
244        reset_page[l0_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(l0_offset)) as u8;
245
246        // mov ax, [initialIdtrLimit]
247        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
248        relative_offset = (offset_of!(TdxTrampolineContext, idtr_limit) as u32)
249            .wrapping_sub((byte_offset + 4) as u32);
250        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
251
252        // test ax, ax
253        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
254
255        // jz @f
256        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
257        byte_offset += 1;
258        let jump_offset = byte_offset;
259
260        // lidt [initialIdtr]
261        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x01, 0x1D]);
262        relative_offset = (offset_of!(TdxTrampolineContext, idtr_limit) as u32)
263            .wrapping_sub((byte_offset + 4) as u32);
264        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
265
266        // @@:
267        reset_page[jump_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(jump_offset)) as u8;
268
269        // mov ax, [dataSelector]
270        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
271        relative_offset = (offset_of!(TdxTrampolineContext, data_selector) as u32)
272            .wrapping_sub((byte_offset + 4) as u32);
273        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
274
275        // mov ss, ax
276        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xD0]);
277
278        // mov ds, ax
279        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xD8]);
280
281        // mov es, ax
282        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xC0]);
283
284        // mov fs, ax
285        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xE0]);
286
287        // mov gs, ax
288        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xE8]);
289
290        // mov ax, [taskSelector]
291        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
292        relative_offset = (offset_of!(TdxTrampolineContext, task_selector) as u32)
293            .wrapping_sub((byte_offset + 4) as u32);
294        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
295
296        // test ax, ax
297        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
298
299        // jz @f
300        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
301        byte_offset += 1;
302        let jump_offset = byte_offset;
303
304        // ltr ax
305        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x00, 0xD8]);
306
307        // @@:
308        reset_page[jump_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(jump_offset)) as u8;
309
310        // L4:
311        reset_page[(l4_offset as usize).wrapping_sub(1)] =
312            (byte_offset.wrapping_sub(l4_offset as usize)) as u8;
313
314        // test esi, esi
315        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x85, 0xF6]);
316
317        // Skip the mailbox spinloop if we are on the BSP
318
319        // jz skip_mailbox_for_bsp
320        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
321        byte_offset += 1;
322        let skip_mailbox_for_bsp = byte_offset;
323
324        // Read the APIC_ID of this AP with a TDG.VP.VMCALL hypercall
325
326        // xor eax, eax
327        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x31, 0xC0]);
328
329        // mov ecx, 1c00
330        byte_offset = copy_instr(
331            &mut reset_page,
332            byte_offset,
333            &[0xB9, 0x00, 0x1C, 0x00, 0x00],
334        );
335
336        // xor r10, r10
337        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4D, 0x31, 0xD2]);
338
339        // mov r11d, 01f
340        byte_offset = copy_instr(
341            &mut reset_page,
342            byte_offset,
343            &[0x41, 0xBB, 0x1F, 0x00, 0x00, 0x00],
344        );
345
346        // mov r12d, 802h
347        byte_offset = copy_instr(
348            &mut reset_page,
349            byte_offset,
350            &[0x41, 0xBC, 0x02, 0x08, 0x00, 0x00],
351        );
352
353        // tdcall
354        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x0F, 0x01, 0xCC]);
355
356        // Spin until the kernel requests this AP to continue in the mailbox
357
358        let mailbox_spinloop = byte_offset;
359        // mov eax, [mailbox_apic_id]
360        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
361        relative_offset = (offset_of!(TdxTrampolineContext, mailbox_apic_id) as u32)
362            .wrapping_sub((byte_offset + 4) as u32);
363        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
364
365        //cmp r11d, eax
366        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x41, 0x39, 0xC3]);
367
368        // jne mailbox_spinloop
369        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x75]);
370        byte_offset += 1;
371        reset_page[byte_offset.wrapping_sub(1)] =
372            (mailbox_spinloop.wrapping_sub(byte_offset)) as u8;
373
374        // xor ebx, ebx
375        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x31, 0xDB]);
376
377        // mov ebx, [mailbox_command]
378        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x1D]);
379        relative_offset = (offset_of!(TdxTrampolineContext, mailbox_command) as u32)
380            .wrapping_sub((byte_offset + 4) as u32);
381        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
382
383        // mov dx, 01h
384        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0xBA, 0x01, 0x00]);
385
386        // cmp ebx, edx
387        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x39, 0xD3]);
388
389        // jne mailbox_spinloop
390        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x75]);
391        byte_offset += 1;
392
393        // skip_mailbox_for_bsp:
394        reset_page[skip_mailbox_for_bsp.wrapping_sub(1)] =
395            (byte_offset.wrapping_sub(skip_mailbox_for_bsp)) as u8;
396
397        // Execute TDG.MEM.PAGE.ACCEPT to accept the low 1 MB of the address
398        // space.  This is only required if the start context is in VTL 0, and
399        // only on the BSP.
400        if self.accept_lower_1mb {
401            // test esi, esi
402            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x85, 0xF6]);
403
404            // jnz L3
405            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x75]);
406            byte_offset += 1;
407            let l3_offset = byte_offset;
408
409            // L2:
410            // xor ecx, ecx
411            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x33, 0xC9]);
412
413            // xor edx, edx
414            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x33, 0xD2]);
415
416            // mov edi, 0100000h
417            byte_offset = copy_instr(
418                &mut reset_page,
419                byte_offset,
420                &[0xBF, 0x00, 0x00, 0x10, 0x00],
421            );
422
423            // L1:
424            let jump_offset = byte_offset;
425
426            // mov eax, 06h
427            byte_offset = copy_instr(
428                &mut reset_page,
429                byte_offset,
430                &[0xB8, 0x06, 0x00, 0x00, 0x00],
431            );
432
433            // tdcall
434            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x0F, 0x01, 0xCC]);
435
436            // test rax, rax
437            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x85, 0xC0]);
438
439            // jne BreakPoint
440            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x85]);
441            byte_offset += 4;
442            let relative_offset = 0xFEF - byte_offset;
443            copy_instr(
444                &mut reset_page,
445                byte_offset.wrapping_sub(4),
446                relative_offset.as_bytes(),
447            );
448
449            // add ecx, 01000h
450            byte_offset = copy_instr(
451                &mut reset_page,
452                byte_offset,
453                &[0x81, 0xC1, 0x00, 0x10, 0x00, 0x00],
454            );
455
456            // cmp ecx, edi
457            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x3B, 0xCF]);
458
459            // jb L1
460            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x72]);
461            byte_offset += 1;
462            reset_page[byte_offset.wrapping_sub(1)] = (jump_offset.wrapping_sub(byte_offset)) as u8;
463
464            // L3:
465            reset_page[l3_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(l3_offset)) as u8;
466        }
467
468        // Load entry register state and transfer to the image.
469
470        // test esi, esi
471        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x85, 0xF6]);
472
473        // jz L7
474        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
475        byte_offset += 1;
476        let l7_offset = byte_offset;
477
478        // xor rax, rax
479        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x31, 0xC0]);
480
481        // mov [mailbox_command], ax
482        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x89, 0x05]);
483        relative_offset = (offset_of!(TdxTrampolineContext, mailbox_command) as u32)
484            .wrapping_sub((byte_offset + 4) as u32);
485        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
486
487        // mov rax, [mailbox_wakeup_vector]
488        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8b, 0x05]);
489        relative_offset = (offset_of!(TdxTrampolineContext, mailbox_wakeup_vector) as u32)
490            .wrapping_sub((byte_offset + 4) as u32);
491        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
492
493        // mov [initialRip], rax
494        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x89, 0x05]);
495        relative_offset = (offset_of!(TdxTrampolineContext, initial_rip) as u32)
496            .wrapping_sub((byte_offset + 4) as u32);
497        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
498
499        // L7:
500        reset_page[l7_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(l7_offset)) as u8;
501
502        // mov rsp, [initialRsp]
503        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x25]);
504        relative_offset =
505            (offset_of!(TdxTrampolineContext, rsp) as u32).wrapping_sub((byte_offset + 4) as u32);
506        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
507
508        // mov rbp, [initialRbp]
509        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x2D]);
510        relative_offset =
511            (offset_of!(TdxTrampolineContext, rbp) as u32).wrapping_sub((byte_offset + 4) as u32);
512        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
513
514        // mov ecx, esi
515        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0xCE]);
516
517        // mov rsi, [initialRsi]
518        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x35]);
519        relative_offset =
520            (offset_of!(TdxTrampolineContext, rsi) as u32).wrapping_sub((byte_offset + 4) as u32);
521        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
522
523        // mov r8, [initialR8]
524        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x05]);
525        relative_offset =
526            (offset_of!(TdxTrampolineContext, r8) as u32).wrapping_sub((byte_offset + 4) as u32);
527        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
528
529        // mov r9, [initialR9]
530        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x0D]);
531        relative_offset =
532            (offset_of!(TdxTrampolineContext, r9) as u32).wrapping_sub((byte_offset + 4) as u32);
533        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
534
535        // mov r10, [initialR10]
536        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x15]);
537        relative_offset =
538            (offset_of!(TdxTrampolineContext, r10) as u32).wrapping_sub((byte_offset + 4) as u32);
539        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
540
541        // mov r11, [initialR11]
542        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x1D]);
543        relative_offset =
544            (offset_of!(TdxTrampolineContext, r11) as u32).wrapping_sub((byte_offset + 4) as u32);
545        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
546
547        // mov ax, [initialCs]
548        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
549        relative_offset = (offset_of!(TdxTrampolineContext, code_selector) as u32)
550            .wrapping_sub((byte_offset + 4) as u32);
551        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
552
553        // test ax, ax
554        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
555
556        // jz @f
557        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
558        byte_offset += 1;
559        let jump_offset = byte_offset;
560
561        // jmp far [initialRip]
562        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0xFF, 0x2D]);
563        relative_offset = (offset_of!(TdxTrampolineContext, initial_rip) as u32)
564            .wrapping_sub((byte_offset + 4) as u32);
565        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
566
567        // @@:
568        reset_page[jump_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(jump_offset)) as u8;
569
570        // jmp [initialRip]
571        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0xFF, 0x25]);
572        relative_offset = (offset_of!(TdxTrampolineContext, initial_rip) as u32)
573            .wrapping_sub((byte_offset + 4) as u32);
574        copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
575
576        // Add this data to the architectural reset page.
577        state.push(VpContextState::Page(VpContextPageState {
578            page_base: 0xFFFFF,
579            page_count: 1,
580            acceptance: loader::importer::BootPageAcceptance::Exclusive,
581            data: reset_page,
582        }));
583    }
584}