Skip to main content

igvmfilegen/vp_context_builder/
snp.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! SNP VP context builder.
5
6use crate::vp_context_builder::VpContextBuilder;
7use crate::vp_context_builder::VpContextPageState;
8use crate::vp_context_builder::VpContextState;
9use hvdef::Vtl;
10use igvm_defs::PAGE_SIZE_4K;
11use loader::importer::BootPageAcceptance;
12use loader::importer::SegmentRegister;
13use loader::importer::TableRegister;
14use loader::importer::X86Register;
15use loader::paravisor::HCL_SECURE_VTL;
16use std::fmt::Debug;
17use x86defs::X64_EFER_SVME;
18use x86defs::snp::SevSelector;
19use x86defs::snp::SevVmsa;
20use zerocopy::FromZeros;
21use zerocopy::IntoBytes;
22
23/// The interrupt injection type to use for the highest vmpl's VMSA.
24#[derive(Debug, Copy, Clone, PartialEq, Eq)]
25pub enum InjectionType {
26    /// Normal.
27    Normal,
28    /// Restricted injection.
29    Restricted,
30}
31
32/// The secure AVIC.
33#[derive(Debug, Copy, Clone, PartialEq, Eq)]
34pub enum SecureAvic {
35    /// Offload AVIC to the hardware.
36    Enabled,
37    /// The paravisor emulates APIC.
38    Disabled,
39}
40
41/// A hardware SNP VP context, that is imported as a VMSA.
42#[derive(Debug)]
43pub struct SnpHardwareContext {
44    /// If an assembly stub to accept the lower 1mb should be imported as page
45    /// data.
46    accept_lower_1mb: bool,
47    /// The page number to import this vp context at.
48    page_number: Option<u64>,
49    /// The VMSA for this VP.
50    vmsa: SevVmsa,
51}
52
53impl SnpHardwareContext {
54    /// Create a new SNP VP context builder.
55    ///
56    /// `enlightened_uefi` specifies if UEFI is enlightened. This will result in
57    /// [`VpContextBuilder::finalize`] generating additional trampoline code for
58    /// UEFI running without a paravisor, along with setting different fields in
59    /// the `SEV_FEATURES` register.
60    ///
61    /// `injection_type` specifies the injection type for the highest enabled
62    /// VMPL.
63    ///
64    /// Only the highest VTL will have a VMSA generated, with lower VTLs being
65    /// imported with the VBS format as page data.
66    pub fn new(
67        vtl: Vtl,
68        enlightened_uefi: bool,
69        shared_gpa_boundary: u64,
70        injection_type: InjectionType,
71        secure_avic: SecureAvic,
72    ) -> Self {
73        let mut vmsa: SevVmsa = FromZeros::new_zeroed();
74
75        // Fill in reset values that are needed for consistency.
76        vmsa.efer = X64_EFER_SVME;
77
78        // Fill in boilerplate fields of the vmsa
79        vmsa.sev_features.set_snp(true);
80        vmsa.sev_features.set_vtom(true);
81        vmsa.virtual_tom = shared_gpa_boundary;
82        vmsa.sev_features.set_debug_swap(true);
83
84        if enlightened_uefi {
85            // Enlightened UEFI requires SevFeatureRestrictInjection to be set, in order
86            // to receive #HV interrupts.
87            assert_eq!(injection_type, InjectionType::Restricted);
88            vmsa.sev_features.set_restrict_injection(true);
89        } else {
90            // Lower VTLs like VTL0 images (UEFI) are SevFeatureAlternateInjection,
91            // while VTL2 (HCL) is SevFeatureRestrictInjection
92            // Additionally, set the BTB isolation and Prevent Host IBS property for
93            // VTL2. VTL2 is responsible for setting this property on any additional
94            // VMSAs.
95            if vtl < HCL_SECURE_VTL {
96                vmsa.sev_features
97                    .set_alternate_injection(injection_type == InjectionType::Restricted);
98                if injection_type == InjectionType::Normal {
99                    vmsa.sev_features
100                        .set_secure_avic(secure_avic == SecureAvic::Enabled);
101                    vmsa.sev_features
102                        .set_guest_intercept_control(secure_avic == SecureAvic::Enabled);
103                }
104            } else {
105                vmsa.sev_features
106                    .set_restrict_injection(injection_type == InjectionType::Restricted);
107                vmsa.sev_features.set_snp_btb_isolation(true);
108                vmsa.sev_features.set_ibpb_on_entry(true);
109                vmsa.sev_features.set_prevent_host_ibs(true);
110                vmsa.sev_features.set_vmsa_reg_prot(true);
111                vmsa.sev_features.set_vtom(false);
112                vmsa.sev_features
113                    .set_secure_avic(secure_avic == SecureAvic::Enabled);
114                vmsa.virtual_tom = 0;
115            }
116        }
117
118        // Configure the hardware reset value for XFEM.  The HCL will execute XSETBV if it needs
119        // additional XSAVE support.
120        vmsa.xcr0 = 0x1; // Maps to LegacyX87 bit
121
122        SnpHardwareContext {
123            accept_lower_1mb: enlightened_uefi,
124            page_number: None,
125            vmsa,
126        }
127    }
128}
129
130impl VpContextBuilder for SnpHardwareContext {
131    type Register = X86Register;
132
133    fn import_vp_register(&mut self, register: X86Register) {
134        let create_vmsa_table_register = |reg: TableRegister| -> SevSelector {
135            SevSelector {
136                limit: reg.limit as u32,
137                base: reg.base,
138                ..FromZeros::new_zeroed()
139            }
140        };
141
142        let create_vmsa_segment_register = |reg: SegmentRegister| -> SevSelector {
143            SevSelector {
144                limit: reg.limit,
145                base: reg.base,
146                selector: reg.selector,
147                attrib: (reg.attributes & 0xFF) | ((reg.attributes >> 4) & 0xF00),
148            }
149        };
150
151        match register {
152            X86Register::Gdtr(reg) => self.vmsa.gdtr = create_vmsa_table_register(reg),
153            X86Register::Idtr(_) => panic!("Idtr not allowed for SNP"),
154            X86Register::Ds(reg) => self.vmsa.ds = create_vmsa_segment_register(reg),
155            X86Register::Es(reg) => self.vmsa.es = create_vmsa_segment_register(reg),
156            X86Register::Fs(reg) => self.vmsa.fs = create_vmsa_segment_register(reg),
157            X86Register::Gs(reg) => self.vmsa.gs = create_vmsa_segment_register(reg),
158            X86Register::Ss(reg) => self.vmsa.ss = create_vmsa_segment_register(reg),
159            X86Register::Cs(reg) => self.vmsa.cs = create_vmsa_segment_register(reg),
160            X86Register::Tr(reg) => self.vmsa.tr = create_vmsa_segment_register(reg),
161            X86Register::Cr0(reg) => self.vmsa.cr0 = reg,
162            X86Register::Cr3(reg) => self.vmsa.cr3 = reg,
163            X86Register::Cr4(reg) => self.vmsa.cr4 = reg,
164            X86Register::Efer(reg) => {
165                // All SEV guests require EFER.SVME for the VMSA to be valid.
166                self.vmsa.efer = reg | X64_EFER_SVME;
167            }
168            X86Register::Pat(reg) => self.vmsa.pat = reg,
169            X86Register::Rbp(reg) => self.vmsa.rbp = reg,
170            X86Register::Rip(reg) => self.vmsa.rip = reg,
171            X86Register::Rsi(reg) => self.vmsa.rsi = reg,
172            X86Register::Rsp(_) => panic!("rsp not allowed for SNP"),
173            X86Register::R8(reg) => self.vmsa.r8 = reg,
174            X86Register::R9(reg) => self.vmsa.r9 = reg,
175            X86Register::R10(reg) => self.vmsa.r10 = reg,
176            X86Register::R11(reg) => self.vmsa.r11 = reg,
177            X86Register::R12(reg) => self.vmsa.r12 = reg,
178            X86Register::Rflags(_) => panic!("rflags not allowed for SNP"),
179
180            X86Register::MtrrDefType(_)
181            | X86Register::MtrrPhysBase0(_)
182            | X86Register::MtrrPhysMask0(_)
183            | X86Register::MtrrPhysBase1(_)
184            | X86Register::MtrrPhysMask1(_)
185            | X86Register::MtrrPhysBase2(_)
186            | X86Register::MtrrPhysMask2(_)
187            | X86Register::MtrrPhysBase3(_)
188            | X86Register::MtrrPhysMask3(_)
189            | X86Register::MtrrPhysBase4(_)
190            | X86Register::MtrrPhysMask4(_)
191            | X86Register::MtrrFix64k00000(_)
192            | X86Register::MtrrFix16k80000(_)
193            | X86Register::MtrrFix4kE0000(_)
194            | X86Register::MtrrFix4kE8000(_)
195            | X86Register::MtrrFix4kF0000(_)
196            | X86Register::MtrrFix4kF8000(_) => {
197                tracing::warn!(?register, "Ignoring MTRR register for SNP.")
198            }
199        }
200    }
201
202    fn set_vp_context_memory(&mut self, page_base: u64) {
203        assert!(self.page_number.is_none(), "only allowed to set vmsa once");
204        self.page_number = Some(page_base);
205    }
206
207    fn finalize(&mut self, state: &mut Vec<VpContextState>) {
208        let Some(page_number) = self.page_number else {
209            return;
210        };
211
212        // If no paravisor is present, then generate a trampoline page to perform
213        // validation of the low 1 MB of memory.  This is expected by UEFI and
214        // normally performed by the HCL, but must be done in a trampoline if no
215        // HCL is present.
216        if self.accept_lower_1mb {
217            let mut trampoline_page = vec![0u8; PAGE_SIZE_4K as usize];
218
219            // Since this page is discarded immediately after it executes, it can
220            // be placed anywhere in memory.  GPA page zero is a convenient unused
221            // location.
222            trampoline_page[..8].copy_from_slice(self.vmsa.rip.as_bytes());
223
224            // Place a breakpoint at the front of the page to force a triple fault
225            // in case of early failure.
226            let break_offset = size_of::<u64>();
227            trampoline_page[break_offset] = 0xCC;
228
229            // Set RIP to the trampoline page.
230            let mut byte_offset = break_offset + 1;
231            self.vmsa.rip = byte_offset as u64;
232
233            let copy_instr =
234                |trampoline_page: &mut Vec<u8>, byte_offset, instruction: &[u8]| -> usize {
235                    trampoline_page[byte_offset..byte_offset + instruction.len()]
236                        .copy_from_slice(instruction);
237                    byte_offset + instruction.len()
238                };
239
240            // mov esi, 01000h
241            byte_offset = copy_instr(
242                &mut trampoline_page,
243                byte_offset,
244                &[0xBE, 0x00, 0x10, 0x00, 0x00],
245            );
246
247            // mov ebx, 0100000h
248            byte_offset = copy_instr(
249                &mut trampoline_page,
250                byte_offset,
251                &[0xBB, 0x00, 0x00, 0x10, 0x00],
252            );
253
254            // xor ecx, ecx
255            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x33, 0xC9]);
256
257            // mov edx, 1
258            byte_offset = copy_instr(
259                &mut trampoline_page,
260                byte_offset,
261                &[0xBA, 0x01, 0x00, 0x00, 0x00],
262            );
263
264            // L1:
265            let jump_offset = byte_offset;
266
267            // mov eax, esi
268            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x8B, 0xC6]);
269
270            // pvalidate
271            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0xF2, 0x0F, 0x01, 0xFF]);
272
273            // jc Break
274            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x72]);
275            byte_offset += 1;
276            trampoline_page[byte_offset - 1] = (break_offset as u8).wrapping_sub(byte_offset as u8);
277
278            // test rax, rax
279            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x48, 0x85, 0xC0]);
280
281            // jnz Break
282            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x75]);
283            byte_offset += 1;
284            trampoline_page[byte_offset - 1] = (break_offset as u8).wrapping_sub(byte_offset as u8);
285
286            // add esi, 01000h
287            byte_offset = copy_instr(
288                &mut trampoline_page,
289                byte_offset,
290                &[0x81, 0xC6, 0x00, 0x10, 0x00, 0x00],
291            );
292
293            // cmp esi, ebx
294            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x3B, 0xF3]);
295
296            // jb L1
297            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x72]);
298            byte_offset += 1;
299            trampoline_page[byte_offset - 1] = (jump_offset as u8).wrapping_sub(byte_offset as u8);
300
301            // jmp [0]
302            byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0xFF, 0x25]);
303            let relative_offset: u32 = 0u32.wrapping_sub(byte_offset as u32 + 4);
304            trampoline_page[byte_offset..byte_offset + 4]
305                .copy_from_slice(relative_offset.as_bytes());
306
307            state.push(VpContextState::Page(VpContextPageState {
308                page_base: 0,
309                page_count: 1,
310                acceptance: BootPageAcceptance::Exclusive,
311                data: trampoline_page,
312            }));
313        }
314
315        state.push(VpContextState::Page(VpContextPageState {
316            page_base: page_number,
317            page_count: 1,
318            acceptance: BootPageAcceptance::VpContext,
319            data: self.vmsa.as_bytes().to_vec(),
320        }));
321    }
322}