igvmfilegen/vp_context_builder/
snp.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! SNP VP context builder.
5
6use crate::vp_context_builder::VpContextBuilder;
7use crate::vp_context_builder::VpContextPageState;
8use crate::vp_context_builder::VpContextState;
9use hvdef::Vtl;
10use igvm_defs::PAGE_SIZE_4K;
11use loader::importer::BootPageAcceptance;
12use loader::importer::SegmentRegister;
13use loader::importer::TableRegister;
14use loader::importer::X86Register;
15use loader::paravisor::HCL_SECURE_VTL;
16use std::fmt::Debug;
17use x86defs::X64_EFER_SVME;
18use x86defs::snp::SevSelector;
19use x86defs::snp::SevVmsa;
20use zerocopy::FromZeros;
21use zerocopy::IntoBytes;
22
23/// The interrupt injection type to use for the highest vmpl's VMSA.
24#[derive(Debug, Copy, Clone, PartialEq, Eq)]
25pub enum InjectionType {
26    /// Normal.
27    Normal,
28    /// Restricted injection.
29    Restricted,
30}
31
32/// A hardware SNP VP context, that is imported as a VMSA.
33#[derive(Debug)]
34pub struct SnpHardwareContext {
35    /// If an assembly stub to accept the lower 1mb should be imported as page
36    /// data.
37    accept_lower_1mb: bool,
38    /// The page number to import this vp context at.
39    page_number: Option<u64>,
40    /// The VMSA for this VP.
41    vmsa: SevVmsa,
42}
43
44impl SnpHardwareContext {
45    /// Create a new SNP VP context builder.
46    ///
47    /// `enlightened_uefi` specifies if UEFI is enlightened. This will result in
48    /// [`VpContextBuilder::finalize`] generating additional trampoline code for
49    /// UEFI running without a paravisor, along with setting different fields in
50    /// the `SEV_FEATURES` register.
51    ///
52    /// `injection_type` specifies the injection type for the highest enabled
53    /// VMPL.
54    ///
55    /// Only the highest VTL will have a VMSA generated, with lower VTLs being
56    /// imported with the VBS format as page data.
57    pub fn new(
58        vtl: Vtl,
59        enlightened_uefi: bool,
60        shared_gpa_boundary: u64,
61        injection_type: InjectionType,
62    ) -> Self {
63        let mut vmsa: SevVmsa = FromZeros::new_zeroed();
64
65        // Fill in reset values that are needed for consistency.
66        vmsa.efer = X64_EFER_SVME;
67
68        // Fill in boilerplate fields of the vmsa
69        vmsa.sev_features.set_snp(true);
70        vmsa.sev_features.set_vtom(true);
71        vmsa.virtual_tom = shared_gpa_boundary;
72        vmsa.sev_features.set_debug_swap(true);
73
74        if enlightened_uefi {
75            // Enlightened UEFI requires SevFeatureRestrictInjection to be set, in order
76            // to receive #HV interrupts.
77            assert_eq!(injection_type, InjectionType::Restricted);
78            vmsa.sev_features.set_restrict_injection(true);
79        } else {
80            // Lower VTLs like VTL0 images (UEFI) are SevFeatureAlternateInjection,
81            // while VTL2 (HCL) is SevFeatureRestrictInjection
82            // Additionally, set the BTB isolation and Prevent Host IBS property for
83            // VTL2. VTL2 is responsible for setting this property on any additional
84            // VMSAs.
85            if vtl < HCL_SECURE_VTL {
86                vmsa.sev_features
87                    .set_alternate_injection(injection_type == InjectionType::Restricted);
88            } else {
89                vmsa.sev_features
90                    .set_restrict_injection(injection_type == InjectionType::Restricted);
91                vmsa.sev_features.set_snp_btb_isolation(true);
92                vmsa.sev_features.set_prevent_host_ibs(true);
93                vmsa.sev_features.set_vmsa_reg_prot(true);
94                vmsa.sev_features.set_vtom(false);
95                vmsa.virtual_tom = 0;
96            }
97        }
98
99        // Configure the hardware reset value for XFEM.  The HCL will execute XSETBV if it needs
100        // additional XSAVE support.
101        vmsa.xcr0 = 0x1; // Maps to LegacyX87 bit
102
103        SnpHardwareContext {
104            accept_lower_1mb: enlightened_uefi,
105            page_number: None,
106            vmsa,
107        }
108    }
109}
110
111impl VpContextBuilder for SnpHardwareContext {
112    type Register = X86Register;
113
114    fn import_vp_register(&mut self, register: X86Register) {
115        let create_vmsa_table_register = |reg: TableRegister| -> SevSelector {
116            SevSelector {
117                limit: reg.limit as u32,
118                base: reg.base,
119                ..FromZeros::new_zeroed()
120            }
121        };
122
123        let create_vmsa_segment_register = |reg: SegmentRegister| -> SevSelector {
124            SevSelector {
125                limit: reg.limit,
126                base: reg.base,
127                selector: reg.selector,
128                attrib: (reg.attributes & 0xFF) | ((reg.attributes >> 4) & 0xF00),
129            }
130        };
131
132        match register {
133            X86Register::Gdtr(reg) => self.vmsa.gdtr = create_vmsa_table_register(reg),
134            X86Register::Idtr(_) => panic!("Idtr not allowed for SNP"),
135            X86Register::Ds(reg) => self.vmsa.ds = create_vmsa_segment_register(reg),
136            X86Register::Es(reg) => self.vmsa.es = create_vmsa_segment_register(reg),
137            X86Register::Fs(reg) => self.vmsa.fs = create_vmsa_segment_register(reg),
138            X86Register::Gs(reg) => self.vmsa.gs = create_vmsa_segment_register(reg),
139            X86Register::Ss(reg) => self.vmsa.ss = create_vmsa_segment_register(reg),
140            X86Register::Cs(reg) => self.vmsa.cs = create_vmsa_segment_register(reg),
141            X86Register::Tr(reg) => self.vmsa.tr = create_vmsa_segment_register(reg),
142            X86Register::Cr0(reg) => self.vmsa.cr0 = reg,
143            X86Register::Cr3(reg) => self.vmsa.cr3 = reg,
144            X86Register::Cr4(reg) => self.vmsa.cr4 = reg,
145            X86Register::Efer(reg) => {
146                // All SEV guests require EFER.SVME for the VMSA to be valid.
147                self.vmsa.efer = reg | X64_EFER_SVME;
148            }
149            X86Register::Pat(reg) => self.vmsa.pat = reg,
150            X86Register::Rbp(reg) => self.vmsa.rbp = reg,
151            X86Register::Rip(reg) => self.vmsa.rip = reg,
152            X86Register::Rsi(reg) => self.vmsa.rsi = reg,
153            X86Register::Rsp(_) => panic!("rsp not allowed for SNP"),
154            X86Register::R8(reg) => self.vmsa.r8 = reg,
155            X86Register::R9(reg) => self.vmsa.r9 = reg,
156            X86Register::R10(reg) => self.vmsa.r10 = reg,
157            X86Register::R11(reg) => self.vmsa.r11 = reg,
158            X86Register::R12(reg) => self.vmsa.r12 = reg,
159            X86Register::Rflags(_) => panic!("rflags not allowed for SNP"),
160
161            X86Register::MtrrDefType(_)
162            | X86Register::MtrrPhysBase0(_)
163            | X86Register::MtrrPhysMask0(_)
164            | X86Register::MtrrPhysBase1(_)
165            | X86Register::MtrrPhysMask1(_)
166            | X86Register::MtrrPhysBase2(_)
167            | X86Register::MtrrPhysMask2(_)
168            | X86Register::MtrrPhysBase3(_)
169            | X86Register::MtrrPhysMask3(_)
170            | X86Register::MtrrPhysBase4(_)
171            | X86Register::MtrrPhysMask4(_)
172            | X86Register::MtrrFix64k00000(_)
173            | X86Register::MtrrFix16k80000(_)
174            | X86Register::MtrrFix4kE0000(_)
175            | X86Register::MtrrFix4kE8000(_)
176            | X86Register::MtrrFix4kF0000(_)
177            | X86Register::MtrrFix4kF8000(_) => {
178                tracing::warn!(?register, "Ignoring MTRR register for SNP.")
179            }
180        }
181    }
182
183    fn set_vp_context_memory(&mut self, page_base: u64) {
184        assert!(self.page_number.is_none(), "only allowed to set vmsa once");
185        self.page_number = Some(page_base);
186    }
187
188    fn finalize(&mut self, state: &mut Vec<VpContextState>) {
189        let Some(page_number) = self.page_number else {
190            return;
191        };
192
193        // If no paravisor is present, then generate a trampoline page to perform
194        // validation of the low 1 MB of memory.  This is expected by UEFI and
195        // normally performed by the HCL, but must be done in a trampoline if no
196        // HCL is present.
197        if self.accept_lower_1mb {
198            let mut trampoline_page = vec![0u8; PAGE_SIZE_4K as usize];
199
200            // Since this page is discarded immediately after it executes, it can
201            // be placed anywhere in memory.  GPA page zero is a convenient unused
202            // location.
203            trampoline_page[..8].copy_from_slice(self.vmsa.rip.as_bytes());
204
205            // Place a breakpoint at the front of the page to force a triple fault
206            // in case of early failure.
207            let break_offset = size_of::<u64>();
208            trampoline_page[break_offset] = 0xCC;
209
210            // Set RIP to the trampoline page.
211            let mut byte_offset = break_offset + 1;
212            self.vmsa.rip = byte_offset as u64;
213
214            let copy_instr =
215                |trampoline_page: &mut Vec<u8>, byte_offset, instruction: &[u8]| -> usize {
216                    trampoline_page[byte_offset..byte_offset + instruction.len()]
217                        .copy_from_slice(instruction);
218                    byte_offset + instruction.len()
219                };
220
221            // mov esi, 01000h
222            byte_offset = copy_instr(
223                &mut trampoline_page,
224                byte_offset,
225                &[0xBE, 0x00, 0x10, 0x00, 0x00],
226            );
227
228            // mov ebx, 0100000h
229            byte_offset = copy_instr(
230                &mut trampoline_page,
231                byte_offset,
232                &[0xBB, 0x00, 0x00, 0x10, 0x00],
233            );
234
235            // xor ecx, ecx
236            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x33, 0xC9]);
237
238            // mov edx, 1
239            byte_offset = copy_instr(
240                &mut trampoline_page,
241                byte_offset,
242                &[0xBA, 0x01, 0x00, 0x00, 0x00],
243            );
244
245            // L1:
246            let jump_offset = byte_offset;
247
248            // mov eax, esi
249            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x8B, 0xC6]);
250
251            // pvalidate
252            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0xF2, 0x0F, 0x01, 0xFF]);
253
254            // jc Break
255            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x72]);
256            byte_offset += 1;
257            trampoline_page[byte_offset - 1] = (break_offset as u8).wrapping_sub(byte_offset as u8);
258
259            // test rax, rax
260            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x48, 0x85, 0xC0]);
261
262            // jnz Break
263            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x75]);
264            byte_offset += 1;
265            trampoline_page[byte_offset - 1] = (break_offset as u8).wrapping_sub(byte_offset as u8);
266
267            // add esi, 01000h
268            byte_offset = copy_instr(
269                &mut trampoline_page,
270                byte_offset,
271                &[0x81, 0xC6, 0x00, 0x10, 0x00, 0x00],
272            );
273
274            // cmp esi, ebx
275            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x3B, 0xF3]);
276
277            // jb L1
278            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x72]);
279            byte_offset += 1;
280            trampoline_page[byte_offset - 1] = (jump_offset as u8).wrapping_sub(byte_offset as u8);
281
282            // jmp [0]
283            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0xFF, 0x25]);
284            let relative_offset: u32 = 0u32.wrapping_sub(byte_offset as u32 + 4);
285            trampoline_page[byte_offset..byte_offset + 4]
286                .copy_from_slice(relative_offset.as_bytes());
287
288            state.push(VpContextState::Page(VpContextPageState {
289                page_base: 0,
290                page_count: 1,
291                acceptance: BootPageAcceptance::Exclusive,
292                data: trampoline_page,
293            }));
294        }
295
296        state.push(VpContextState::Page(VpContextPageState {
297            page_base: page_number,
298            page_count: 1,
299            acceptance: BootPageAcceptance::VpContext,
300            data: self.vmsa.as_bytes().to_vec(),
301        }));
302    }
303}