igvmfilegen/vp_context_builder/
snp.rs1use crate::vp_context_builder::VpContextBuilder;
7use crate::vp_context_builder::VpContextPageState;
8use crate::vp_context_builder::VpContextState;
9use hvdef::Vtl;
10use igvm_defs::PAGE_SIZE_4K;
11use loader::importer::BootPageAcceptance;
12use loader::importer::SegmentRegister;
13use loader::importer::TableRegister;
14use loader::importer::X86Register;
15use loader::paravisor::HCL_SECURE_VTL;
16use std::fmt::Debug;
17use x86defs::X64_EFER_SVME;
18use x86defs::snp::SevSelector;
19use x86defs::snp::SevVmsa;
20use zerocopy::FromZeros;
21use zerocopy::IntoBytes;
22
23#[derive(Debug, Copy, Clone, PartialEq, Eq)]
25pub enum InjectionType {
26 Normal,
28 Restricted,
30}
31
32#[derive(Debug)]
34pub struct SnpHardwareContext {
35 accept_lower_1mb: bool,
38 page_number: Option<u64>,
40 vmsa: SevVmsa,
42}
43
44impl SnpHardwareContext {
45 pub fn new(
58 vtl: Vtl,
59 enlightened_uefi: bool,
60 shared_gpa_boundary: u64,
61 injection_type: InjectionType,
62 ) -> Self {
63 let mut vmsa: SevVmsa = FromZeros::new_zeroed();
64
65 vmsa.efer = X64_EFER_SVME;
67
68 vmsa.sev_features.set_snp(true);
70 vmsa.sev_features.set_vtom(true);
71 vmsa.virtual_tom = shared_gpa_boundary;
72 vmsa.sev_features.set_debug_swap(true);
73
74 if enlightened_uefi {
75 assert_eq!(injection_type, InjectionType::Restricted);
78 vmsa.sev_features.set_restrict_injection(true);
79 } else {
80 if vtl < HCL_SECURE_VTL {
86 vmsa.sev_features
87 .set_alternate_injection(injection_type == InjectionType::Restricted);
88 } else {
89 vmsa.sev_features
90 .set_restrict_injection(injection_type == InjectionType::Restricted);
91 vmsa.sev_features.set_snp_btb_isolation(true);
92 vmsa.sev_features.set_prevent_host_ibs(true);
93 vmsa.sev_features.set_vmsa_reg_prot(true);
94 vmsa.sev_features.set_vtom(false);
95 vmsa.virtual_tom = 0;
96 }
97 }
98
99 vmsa.xcr0 = 0x1; SnpHardwareContext {
104 accept_lower_1mb: enlightened_uefi,
105 page_number: None,
106 vmsa,
107 }
108 }
109}
110
111impl VpContextBuilder for SnpHardwareContext {
112 type Register = X86Register;
113
114 fn import_vp_register(&mut self, register: X86Register) {
115 let create_vmsa_table_register = |reg: TableRegister| -> SevSelector {
116 SevSelector {
117 limit: reg.limit as u32,
118 base: reg.base,
119 ..FromZeros::new_zeroed()
120 }
121 };
122
123 let create_vmsa_segment_register = |reg: SegmentRegister| -> SevSelector {
124 SevSelector {
125 limit: reg.limit,
126 base: reg.base,
127 selector: reg.selector,
128 attrib: (reg.attributes & 0xFF) | ((reg.attributes >> 4) & 0xF00),
129 }
130 };
131
132 match register {
133 X86Register::Gdtr(reg) => self.vmsa.gdtr = create_vmsa_table_register(reg),
134 X86Register::Idtr(_) => panic!("Idtr not allowed for SNP"),
135 X86Register::Ds(reg) => self.vmsa.ds = create_vmsa_segment_register(reg),
136 X86Register::Es(reg) => self.vmsa.es = create_vmsa_segment_register(reg),
137 X86Register::Fs(reg) => self.vmsa.fs = create_vmsa_segment_register(reg),
138 X86Register::Gs(reg) => self.vmsa.gs = create_vmsa_segment_register(reg),
139 X86Register::Ss(reg) => self.vmsa.ss = create_vmsa_segment_register(reg),
140 X86Register::Cs(reg) => self.vmsa.cs = create_vmsa_segment_register(reg),
141 X86Register::Tr(reg) => self.vmsa.tr = create_vmsa_segment_register(reg),
142 X86Register::Cr0(reg) => self.vmsa.cr0 = reg,
143 X86Register::Cr3(reg) => self.vmsa.cr3 = reg,
144 X86Register::Cr4(reg) => self.vmsa.cr4 = reg,
145 X86Register::Efer(reg) => {
146 self.vmsa.efer = reg | X64_EFER_SVME;
148 }
149 X86Register::Pat(reg) => self.vmsa.pat = reg,
150 X86Register::Rbp(reg) => self.vmsa.rbp = reg,
151 X86Register::Rip(reg) => self.vmsa.rip = reg,
152 X86Register::Rsi(reg) => self.vmsa.rsi = reg,
153 X86Register::Rsp(_) => panic!("rsp not allowed for SNP"),
154 X86Register::R8(reg) => self.vmsa.r8 = reg,
155 X86Register::R9(reg) => self.vmsa.r9 = reg,
156 X86Register::R10(reg) => self.vmsa.r10 = reg,
157 X86Register::R11(reg) => self.vmsa.r11 = reg,
158 X86Register::R12(reg) => self.vmsa.r12 = reg,
159 X86Register::Rflags(_) => panic!("rflags not allowed for SNP"),
160
161 X86Register::MtrrDefType(_)
162 | X86Register::MtrrPhysBase0(_)
163 | X86Register::MtrrPhysMask0(_)
164 | X86Register::MtrrPhysBase1(_)
165 | X86Register::MtrrPhysMask1(_)
166 | X86Register::MtrrPhysBase2(_)
167 | X86Register::MtrrPhysMask2(_)
168 | X86Register::MtrrPhysBase3(_)
169 | X86Register::MtrrPhysMask3(_)
170 | X86Register::MtrrPhysBase4(_)
171 | X86Register::MtrrPhysMask4(_)
172 | X86Register::MtrrFix64k00000(_)
173 | X86Register::MtrrFix16k80000(_)
174 | X86Register::MtrrFix4kE0000(_)
175 | X86Register::MtrrFix4kE8000(_)
176 | X86Register::MtrrFix4kF0000(_)
177 | X86Register::MtrrFix4kF8000(_) => {
178 tracing::warn!(?register, "Ignoring MTRR register for SNP.")
179 }
180 }
181 }
182
183 fn set_vp_context_memory(&mut self, page_base: u64) {
184 assert!(self.page_number.is_none(), "only allowed to set vmsa once");
185 self.page_number = Some(page_base);
186 }
187
188 fn finalize(&mut self, state: &mut Vec<VpContextState>) {
189 let Some(page_number) = self.page_number else {
190 return;
191 };
192
193 if self.accept_lower_1mb {
198 let mut trampoline_page = vec![0u8; PAGE_SIZE_4K as usize];
199
200 trampoline_page[..8].copy_from_slice(self.vmsa.rip.as_bytes());
204
205 let break_offset = size_of::<u64>();
208 trampoline_page[break_offset] = 0xCC;
209
210 let mut byte_offset = break_offset + 1;
212 self.vmsa.rip = byte_offset as u64;
213
214 let copy_instr =
215 |trampoline_page: &mut Vec<u8>, byte_offset, instruction: &[u8]| -> usize {
216 trampoline_page[byte_offset..byte_offset + instruction.len()]
217 .copy_from_slice(instruction);
218 byte_offset + instruction.len()
219 };
220
221 byte_offset = copy_instr(
223 &mut trampoline_page,
224 byte_offset,
225 &[0xBE, 0x00, 0x10, 0x00, 0x00],
226 );
227
228 byte_offset = copy_instr(
230 &mut trampoline_page,
231 byte_offset,
232 &[0xBB, 0x00, 0x00, 0x10, 0x00],
233 );
234
235 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x33, 0xC9]);
237
238 byte_offset = copy_instr(
240 &mut trampoline_page,
241 byte_offset,
242 &[0xBA, 0x01, 0x00, 0x00, 0x00],
243 );
244
245 let jump_offset = byte_offset;
247
248 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x8B, 0xC6]);
250
251 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0xF2, 0x0F, 0x01, 0xFF]);
253
254 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x72]);
256 byte_offset += 1;
257 trampoline_page[byte_offset - 1] = (break_offset as u8).wrapping_sub(byte_offset as u8);
258
259 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x48, 0x85, 0xC0]);
261
262 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x75]);
264 byte_offset += 1;
265 trampoline_page[byte_offset - 1] = (break_offset as u8).wrapping_sub(byte_offset as u8);
266
267 byte_offset = copy_instr(
269 &mut trampoline_page,
270 byte_offset,
271 &[0x81, 0xC6, 0x00, 0x10, 0x00, 0x00],
272 );
273
274 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x3B, 0xF3]);
276
277 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0x72]);
279 byte_offset += 1;
280 trampoline_page[byte_offset - 1] = (jump_offset as u8).wrapping_sub(byte_offset as u8);
281
282 byte_offset = copy_instr(&mut trampoline_page, byte_offset, &[0xFF, 0x25]);
284 let relative_offset: u32 = 0u32.wrapping_sub(byte_offset as u32 + 4);
285 trampoline_page[byte_offset..byte_offset + 4]
286 .copy_from_slice(relative_offset.as_bytes());
287
288 state.push(VpContextState::Page(VpContextPageState {
289 page_base: 0,
290 page_count: 1,
291 acceptance: BootPageAcceptance::Exclusive,
292 data: trampoline_page,
293 }));
294 }
295
296 state.push(VpContextState::Page(VpContextPageState {
297 page_base: page_number,
298 page_count: 1,
299 acceptance: BootPageAcceptance::VpContext,
300 data: self.vmsa.as_bytes().to_vec(),
301 }));
302 }
303}