igvmfilegen/vp_context_builder/
tdx.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! TDX VP context builder.
5
6use super::VpContextBuilder;
7use super::VpContextState;
8use crate::vp_context_builder::VpContextPageState;
9use igvm_defs::PAGE_SIZE_4K;
10use loader::importer::SegmentRegister;
11use loader::importer::X86Register;
12use std::mem::offset_of;
13use x86defs::X64_EFER_LME;
14use x86defs::X86X_MSR_DEFAULT_PAT;
15use zerocopy::Immutable;
16use zerocopy::IntoBytes;
17use zerocopy::KnownLayout;
18
19/// Fields in the trampoline context must be loaded from memory by the
20/// trampoline code.
21///
22/// Note that this trampoline context must also be used for bringing up APs, as
23/// the code placed in the reset vector will use this format to figure out what
24/// register state to load.
25#[repr(C)]
26#[derive(Debug, Default, Clone, Copy, IntoBytes, Immutable, KnownLayout)]
27pub struct TdxTrampolineContext {
28    start_gate: u32,
29
30    data_selector: u16,
31    static_gdt_limit: u16,
32    static_gdt_base: u32,
33
34    task_selector: u16,
35    idtr_limit: u16,
36    idtr_base: u64,
37
38    initial_rip: u64,
39    code_selector: u16,
40    padding_2: [u16; 2],
41    gdtr_limit: u16,
42    gdtr_base: u64,
43
44    rsp: u64,
45    rbp: u64,
46    rsi: u64,
47    r8: u64,
48    r9: u64,
49    r10: u64,
50    r11: u64,
51    cr0: u64,
52    cr3: u64,
53    cr4: u64,
54    transition_cr3: u32,
55    padding_3: u32,
56
57    static_gdt: [u8; 16],
58}
59
60/// Represents a hardware context for TDX. This contains both the sets of
61/// initial registers and registers set by the trampoline code.
62#[derive(Debug)]
63pub struct TdxHardwareContext {
64    trampoline_context: TdxTrampolineContext,
65    accept_lower_1mb: bool,
66}
67
68impl TdxHardwareContext {
69    pub fn new(accept_lower_1mb: bool) -> Self {
70        Self {
71            trampoline_context: TdxTrampolineContext::default(),
72            accept_lower_1mb,
73        }
74    }
75}
76
77impl VpContextBuilder for TdxHardwareContext {
78    type Register = X86Register;
79
80    /// Import a register into the hardware context. Only a subset of registers
81    /// are allowed.
82    fn import_vp_register(&mut self, register: X86Register) {
83        let mut set_data_selector = |reg: SegmentRegister| {
84            if self.trampoline_context.data_selector == 0 {
85                self.trampoline_context.data_selector = reg.selector;
86            } else if self.trampoline_context.data_selector != reg.selector {
87                panic!("data selectors must be the same");
88            }
89        };
90
91        match register {
92            X86Register::Gdtr(reg) => {
93                self.trampoline_context.gdtr_base = reg.base;
94                self.trampoline_context.gdtr_limit = reg.limit;
95            }
96            X86Register::Idtr(reg) => {
97                self.trampoline_context.idtr_base = reg.base;
98                self.trampoline_context.idtr_limit = reg.limit;
99            }
100            X86Register::Ds(reg)
101            | X86Register::Es(reg)
102            | X86Register::Fs(reg)
103            | X86Register::Gs(reg)
104            | X86Register::Ss(reg) => set_data_selector(reg),
105            X86Register::Cs(reg) => self.trampoline_context.code_selector = reg.selector,
106            X86Register::Tr(reg) => {
107                self.trampoline_context.task_selector = reg.selector;
108            }
109            X86Register::Cr0(cr0) => self.trampoline_context.cr0 = cr0,
110            X86Register::Cr3(cr3) => {
111                let cr3_u32: u32 = cr3.try_into().expect("cr3 must fit in u32");
112                self.trampoline_context.transition_cr3 = cr3_u32;
113                self.trampoline_context.cr3 = cr3;
114            }
115            X86Register::Cr4(cr4) => self.trampoline_context.cr4 = cr4,
116            X86Register::Efer(efer) => {
117                // TDX guests are not permitted to set EFER explicitly.  Verify
118                // that the requested EFER value is compatible with the
119                // architecturally imposed value.
120                if efer & X64_EFER_LME == 0 {
121                    panic!("EFER LME must be set for tdx")
122                }
123            }
124            X86Register::Pat(pat) => {
125                if pat != X86X_MSR_DEFAULT_PAT {
126                    panic!("PAT must be default for tdx")
127                }
128            }
129            X86Register::Rbp(rbp) => self.trampoline_context.rbp = rbp,
130            X86Register::Rip(rip) => self.trampoline_context.initial_rip = rip,
131            X86Register::Rsi(rsi) => self.trampoline_context.rsi = rsi,
132            X86Register::Rsp(rsp) => self.trampoline_context.rsp = rsp,
133            X86Register::R8(r8) => self.trampoline_context.r8 = r8,
134            X86Register::R9(r9) => self.trampoline_context.r9 = r9,
135            X86Register::R10(r10) => self.trampoline_context.r10 = r10,
136            X86Register::R11(r11) => self.trampoline_context.r11 = r11,
137            X86Register::R12(_) => panic!("r12 not allowed for tdx"),
138            X86Register::Rflags(_) => panic!("rflags not allowed for tdx"),
139
140            X86Register::MtrrDefType(_)
141            | X86Register::MtrrPhysBase0(_)
142            | X86Register::MtrrPhysMask0(_)
143            | X86Register::MtrrPhysBase1(_)
144            | X86Register::MtrrPhysMask1(_)
145            | X86Register::MtrrPhysBase2(_)
146            | X86Register::MtrrPhysMask2(_)
147            | X86Register::MtrrPhysBase3(_)
148            | X86Register::MtrrPhysMask3(_)
149            | X86Register::MtrrPhysBase4(_)
150            | X86Register::MtrrPhysMask4(_)
151            | X86Register::MtrrFix64k00000(_)
152            | X86Register::MtrrFix16k80000(_)
153            | X86Register::MtrrFix4kE0000(_)
154            | X86Register::MtrrFix4kE8000(_)
155            | X86Register::MtrrFix4kF0000(_)
156            | X86Register::MtrrFix4kF8000(_) => {
157                tracing::warn!(?register, "Ignoring MTRR register for TDX.")
158            }
159        }
160    }
161
162    fn set_vp_context_memory(&mut self, _page_base: u64) {
163        unimplemented!("not supported for TDX");
164    }
165
166    fn finalize(&mut self, state: &mut Vec<VpContextState>) {
167        // Construct and load an initial temporary GDT to use for the transition
168        // to long mode.  A single selector (0008:) is defined as a 64-bit code
169        // segment.
170        self.trampoline_context.static_gdt[0x08] = 0xFF;
171        self.trampoline_context.static_gdt[0x09] = 0xFF;
172        self.trampoline_context.static_gdt[0x0D] = 0x9B;
173        self.trampoline_context.static_gdt[0x0E] = 0xA0;
174
175        self.trampoline_context.static_gdt_limit = 0xF;
176        self.trampoline_context.static_gdt_base =
177            0xFFFFF000 + offset_of!(TdxTrampolineContext, static_gdt) as u32;
178
179        // Generate a 32-bit assembly trampoline to enable long mode and transfer
180        // to the specified context.
181        let mut byte_offset = 0xFF0;
182
183        // Fill the reset page with INT 3 as a standard code fill value.
184        let mut reset_page = vec![0xCCu8; PAGE_SIZE_4K as usize];
185
186        // Copy trampoline_context to the start of the reset page.
187        let trampoline_context = self.trampoline_context.as_bytes();
188        reset_page[0..trampoline_context.len()].copy_from_slice(trampoline_context);
189
190        let copy_instr =
191            |trampoline_page: &mut Vec<u8>, byte_offset, instruction: &[u8]| -> usize {
192                trampoline_page[byte_offset..byte_offset + instruction.len()]
193                    .copy_from_slice(instruction);
194                byte_offset + instruction.len()
195            };
196
197        // jmp InitialCode
198        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0xE9]);
199        let mut relative_offset =
200            (trampoline_context.len() as u32).wrapping_sub((byte_offset + 4) as u32);
201        copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
202
203        byte_offset = trampoline_context.len();
204
205        // Spin forever until this processor is selected to start.
206        // L0:
207        let l0_offset = byte_offset;
208
209        // cmp esi, [startGate]
210        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x3B, 0x35]);
211        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, start_gate) as u32;
212        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
213
214        // jne L0
215        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x75]);
216        let jne_l0_offset = (l0_offset.wrapping_sub(byte_offset + 1)) as u8;
217        byte_offset = copy_instr(&mut reset_page, byte_offset, &[jne_l0_offset]);
218
219        // lgdt, [staticGdt]
220        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x01, 0x15]);
221        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, static_gdt_limit) as u32;
222        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
223
224        // Load the control registers.  CR0 must be last so long mode is properly
225        // enabled (the architecture sets LME prior to initial entry), and the CR0
226        // load must be followed by a far jump to complete long mode
227        // configuration.
228
229        // mov eax, [initialCr4]
230        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
231        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, cr4) as u32;
232        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
233
234        // mov cr4, eax
235        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xE0]);
236
237        // mov eax, [transitionCr3]
238        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
239        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, transition_cr3) as u32;
240        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
241
242        // mov cr3, eax
243        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xD8]);
244
245        // mov eax, [initialCr0]
246        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0x05]);
247        relative_offset = 0xFFFFF000 + offset_of!(TdxTrampolineContext, cr0) as u32;
248        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
249
250        // mov cr0, eax
251        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xC0]);
252
253        // jmp far L2
254        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0xEA]);
255        relative_offset = 0xFFFFF000 + byte_offset as u32 + 6;
256        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
257        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x08, 0x00]);
258
259        // L2:
260
261        // Load the 64-bit CR3 now that long mode is active.
262
263        // mov rax, [initialCr3]
264        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x05]);
265        relative_offset =
266            (offset_of!(TdxTrampolineContext, cr3) as u32).wrapping_sub((byte_offset + 4) as u32);
267        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
268
269        // mov cr3, rax
270        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x22, 0xD8]);
271
272        // Load descriptor tables and selectors, except CS which will be loaded in
273        // the final jump.  If no GDT is specified, then skip loading all
274        // selectors.
275
276        // mov ax, [initialGdtrLimit]
277        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
278        relative_offset = (offset_of!(TdxTrampolineContext, gdtr_limit) as u32)
279            .wrapping_sub((byte_offset + 4) as u32);
280        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
281
282        // test ax, ax
283        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
284
285        // jz L4
286        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
287        byte_offset += 1;
288        let l4_offset = byte_offset as u32;
289
290        // lgdt [initialGdtr]
291        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x01, 0x15]);
292        relative_offset = (offset_of!(TdxTrampolineContext, gdtr_limit) as u32)
293            .wrapping_sub((byte_offset + 4) as u32);
294        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
295
296        // @@:
297        reset_page[l0_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(l0_offset)) as u8;
298
299        // mov ax, [initialIdtrLimit]
300        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
301        relative_offset = (offset_of!(TdxTrampolineContext, idtr_limit) as u32)
302            .wrapping_sub((byte_offset + 4) as u32);
303        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
304
305        // test ax, ax
306        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
307
308        // jz @f
309        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
310        byte_offset += 1;
311        let jump_offset = byte_offset;
312
313        // lidt [initialIdtr]
314        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x01, 0x1D]);
315        relative_offset = (offset_of!(TdxTrampolineContext, idtr_limit) as u32)
316            .wrapping_sub((byte_offset + 4) as u32);
317        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
318
319        // @@:
320        reset_page[jump_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(jump_offset)) as u8;
321
322        // mov ax, [dataSelector]
323        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
324        relative_offset = (offset_of!(TdxTrampolineContext, data_selector) as u32)
325            .wrapping_sub((byte_offset + 4) as u32);
326        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
327
328        // mov ss, ax
329        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xD0]);
330
331        // mov ds, ax
332        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xD8]);
333
334        // mov es, ax
335        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xC0]);
336
337        // mov fs, ax
338        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xE0]);
339
340        // mov gs, ax
341        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8E, 0xE8]);
342
343        // mov ax, [taskSelector]
344        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
345        relative_offset = (offset_of!(TdxTrampolineContext, task_selector) as u32)
346            .wrapping_sub((byte_offset + 4) as u32);
347        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
348
349        // test ax, ax
350        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
351
352        // jz @f
353        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
354        byte_offset += 1;
355        let jump_offset = byte_offset;
356
357        // ltr ax
358        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x00, 0xD8]);
359
360        // @@:
361        reset_page[jump_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(jump_offset)) as u8;
362
363        // L4:
364        reset_page[(l4_offset as usize).wrapping_sub(1)] =
365            (byte_offset.wrapping_sub(l4_offset as usize)) as u8;
366
367        // Execute TDG.MEM.PAGE.ACCEPT to accept the low 1 MB of the address
368        // space.  This is only required if the start context is in VTL 0, and
369        // only on the BSP.
370        if self.accept_lower_1mb {
371            // test esi, esi
372            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x85, 0xF6]);
373
374            // jnz L3
375            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x75]);
376            byte_offset += 1;
377            let l3_offset = byte_offset;
378
379            // L2:
380            // xor ecx, ecx
381            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x33, 0xC9]);
382
383            // xor edx, edx
384            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x33, 0xD2]);
385
386            // mov edi, 0100000h
387            byte_offset = copy_instr(
388                &mut reset_page,
389                byte_offset,
390                &[0xBF, 0x00, 0x00, 0x10, 0x00],
391            );
392
393            // L1:
394            let jump_offset = byte_offset;
395
396            // mov eax, 06h
397            byte_offset = copy_instr(
398                &mut reset_page,
399                byte_offset,
400                &[0xB8, 0x06, 0x00, 0x00, 0x00],
401            );
402
403            // tdcall
404            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x0F, 0x01, 0xCC]);
405
406            // test rax, rax
407            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x85, 0xC0]);
408
409            // jne BreakPoint
410            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x0F, 0x85]);
411            byte_offset += 4;
412            let relative_offset = 0xFEF - byte_offset;
413            copy_instr(
414                &mut reset_page,
415                byte_offset.wrapping_sub(4),
416                relative_offset.as_bytes(),
417            );
418
419            // add ecx, 01000h
420            byte_offset = copy_instr(
421                &mut reset_page,
422                byte_offset,
423                &[0x81, 0xC1, 0x00, 0x10, 0x00, 0x00],
424            );
425
426            // cmp ecx, edi
427            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x3B, 0xCF]);
428
429            // jb L1
430            byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x72]);
431            byte_offset += 1;
432            reset_page[byte_offset.wrapping_sub(1)] = (jump_offset.wrapping_sub(byte_offset)) as u8;
433
434            // L3:
435            reset_page[l3_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(l3_offset)) as u8;
436        }
437
438        // Load entry register state and transfer to the image.
439
440        // mov rsp, [initialRsp]
441        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x25]);
442        relative_offset =
443            (offset_of!(TdxTrampolineContext, rsp) as u32).wrapping_sub((byte_offset + 4) as u32);
444        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
445
446        // mov rbp, [initialRbp]
447        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x2D]);
448        relative_offset =
449            (offset_of!(TdxTrampolineContext, rbp) as u32).wrapping_sub((byte_offset + 4) as u32);
450        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
451
452        // mov ecx, esi
453        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x8B, 0xCE]);
454
455        // mov rsi, [initialRsi]
456        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0x8B, 0x35]);
457        relative_offset =
458            (offset_of!(TdxTrampolineContext, rsi) as u32).wrapping_sub((byte_offset + 4) as u32);
459        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
460
461        // mov r8, [initialR8]
462        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x05]);
463        relative_offset =
464            (offset_of!(TdxTrampolineContext, r8) as u32).wrapping_sub((byte_offset + 4) as u32);
465        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
466
467        // mov r9, [initialR9]
468        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x0D]);
469        relative_offset =
470            (offset_of!(TdxTrampolineContext, r9) as u32).wrapping_sub((byte_offset + 4) as u32);
471        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
472
473        // mov r10, [initialR10]
474        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x15]);
475        relative_offset =
476            (offset_of!(TdxTrampolineContext, r10) as u32).wrapping_sub((byte_offset + 4) as u32);
477        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
478
479        // mov r11, [initialR11]
480        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x4C, 0x8B, 0x1D]);
481        relative_offset =
482            (offset_of!(TdxTrampolineContext, r11) as u32).wrapping_sub((byte_offset + 4) as u32);
483        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
484
485        // mov ax, [initialCs]
486        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x8B, 0x05]);
487        relative_offset = (offset_of!(TdxTrampolineContext, code_selector) as u32)
488            .wrapping_sub((byte_offset + 4) as u32);
489        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
490
491        // test ax, ax
492        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x66, 0x85, 0xC0]);
493
494        // jz @f
495        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x74]);
496        byte_offset += 1;
497        let jump_offset = byte_offset;
498
499        // jmp far [initialRip]
500        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0xFF, 0x2D]);
501        relative_offset = (offset_of!(TdxTrampolineContext, initial_rip) as u32)
502            .wrapping_sub((byte_offset + 4) as u32);
503        byte_offset = copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
504
505        // @@:
506        reset_page[jump_offset.wrapping_sub(1)] = (byte_offset.wrapping_sub(jump_offset)) as u8;
507
508        // jmp [initialRip]
509        byte_offset = copy_instr(&mut reset_page, byte_offset, &[0x48, 0xFF, 0x25]);
510        relative_offset = (offset_of!(TdxTrampolineContext, initial_rip) as u32)
511            .wrapping_sub((byte_offset + 4) as u32);
512        copy_instr(&mut reset_page, byte_offset, relative_offset.as_bytes());
513
514        // Add this data to the architectural reset page.
515        state.push(VpContextState::Page(VpContextPageState {
516            page_base: 0xFFFFF,
517            page_count: 1,
518            acceptance: loader::importer::BootPageAcceptance::Exclusive,
519            data: reset_page,
520        }));
521    }
522}