1use super::BackingParams;
7use super::BackingPrivate;
8use super::BackingSharedParams;
9use super::HardwareIsolatedBacking;
10use super::InterceptMessageOptionalState;
11use super::InterceptMessageState;
12use super::UhEmulationState;
13use super::hardware_cvm;
14use super::hardware_cvm::HardwareIsolatedGuestTimer;
15use super::vp_state;
16use super::vp_state::UhVpStateAccess;
17use crate::BackingShared;
18use crate::Error;
19use crate::GuestVtl;
20use crate::TlbFlushLockAccess;
21use crate::UhCvmPartitionState;
22use crate::UhCvmVpState;
23use crate::UhPartitionInner;
24use crate::UhPartitionNewParams;
25use crate::WakeReason;
26use crate::processor::UhHypercallHandler;
27use crate::processor::UhProcessor;
28use crate::processor::hardware_cvm::apic::ApicBacking;
29use cvm_tracing::CVM_ALLOWED;
30use cvm_tracing::CVM_CONFIDENTIAL;
31use hcl::vmsa::VmsaWrapper;
32use hv1_emulator::hv::ProcessorVtlHv;
33use hv1_emulator::synic::ProcessorSynic;
34use hv1_hypercall::HvRepResult;
35use hv1_hypercall::HypercallIo;
36use hv1_structs::ProcessorSet;
37use hv1_structs::VtlArray;
38use hvdef::HV_PAGE_SIZE;
39use hvdef::HvDeliverabilityNotificationsRegister;
40use hvdef::HvError;
41use hvdef::HvMessageType;
42use hvdef::HvX64PendingExceptionEvent;
43use hvdef::HvX64RegisterName;
44use hvdef::Vtl;
45use hvdef::hypercall::Control;
46use hvdef::hypercall::HvFlushFlags;
47use hvdef::hypercall::HvGvaRange;
48use hvdef::hypercall::HypercallOutput;
49use inspect::Inspect;
50use inspect::InspectMut;
51use inspect_counters::Counter;
52use virt::EmulatorMonitorSupport;
53use virt::Processor;
54use virt::VpHaltReason;
55use virt::VpIndex;
56use virt::io::CpuIo;
57use virt::state::StateElement;
58use virt::vp;
59use virt::vp::AccessVpState;
60use virt::vp::MpState;
61use virt::x86::MsrError;
62use virt::x86::MsrErrorExt;
63use virt::x86::SegmentRegister;
64use virt::x86::TableRegister;
65use virt_support_apic::ApicClient;
66use virt_support_x86emu::emulate::EmulatorSupport as X86EmulatorSupport;
67use virt_support_x86emu::emulate::emulate_io;
68use virt_support_x86emu::emulate::emulate_translate_gva;
69use virt_support_x86emu::translate::TranslationRegisters;
70use vmcore::vmtime::VmTimeAccess;
71use x86defs::RFlags;
72use x86defs::cpuid::CpuidFunction;
73use x86defs::snp::SevEventInjectInfo;
74use x86defs::snp::SevExitCode;
75use x86defs::snp::SevInvlpgbEcx;
76use x86defs::snp::SevInvlpgbEdx;
77use x86defs::snp::SevInvlpgbRax;
78use x86defs::snp::SevIoAccessInfo;
79use x86defs::snp::SevNpfInfo;
80use x86defs::snp::SevSelector;
81use x86defs::snp::SevStatusMsr;
82use x86defs::snp::SevVmsa;
83use x86defs::snp::Vmpl;
84use zerocopy::FromZeros;
85use zerocopy::IntoBytes;
86
87#[derive(Debug, Error)]
88#[error("invalid vmcb")]
89struct InvalidVmcb;
90
91#[derive(Debug, Error)]
92enum SnpGhcbError {
93 #[error("failed to access GHCB page")]
94 GhcbPageAccess(#[source] guestmem::GuestMemoryError),
95 #[error("ghcb page used for vmgexit does not match overlay page")]
96 GhcbMisconfiguration,
97}
98
99#[derive(Debug, Error)]
100#[error("failed to run")]
101struct SnpRunVpError(#[source] hcl::ioctl::Error);
102
103#[derive(InspectMut)]
105pub struct SnpBacked {
106 #[inspect(hex)]
107 hv_sint_notifications: u16,
108 general_stats: VtlArray<GeneralStats, 2>,
109 exit_stats: VtlArray<ExitStats, 2>,
110 #[inspect(flatten)]
111 cvm: UhCvmVpState,
112}
113
114#[derive(Inspect, Default)]
115struct GeneralStats {
116 guest_busy: Counter,
117 int_ack: Counter,
118 synth_int: Counter,
119}
120
121#[derive(Inspect, Default)]
122struct ExitStats {
123 automatic_exit: Counter,
124 cpuid: Counter,
125 hlt: Counter,
126 intr: Counter,
127 invd: Counter,
128 invlpgb: Counter,
129 ioio: Counter,
130 msr_read: Counter,
131 msr_write: Counter,
132 npf: Counter,
133 npf_no_intercept: Counter,
134 npf_spurious: Counter,
135 rdpmc: Counter,
136 vmgexit: Counter,
137 vmmcall: Counter,
138 xsetbv: Counter,
139 excp_db: Counter,
140 secure_reg_write: Counter,
141}
142
143enum UhDirectOverlay {
144 Sipp,
145 Sifp,
146 Ghcb,
147 Count,
148}
149
150impl SnpBacked {
151 fn calculate_efer(efer: u64, cr0: u64) -> u64 {
153 let new_efer = if efer & x86defs::X64_EFER_LME != 0 && cr0 & x86defs::X64_CR0_PG != 0 {
154 efer | x86defs::X64_EFER_LMA
155 } else {
156 efer & !x86defs::X64_EFER_LMA
157 };
158 new_efer | x86defs::X64_EFER_SVME
159 }
160
161 pub fn shared_pages_required_per_cpu() -> u64 {
164 UhDirectOverlay::Count as u64
165 }
166}
167
168impl HardwareIsolatedBacking for SnpBacked {
169 fn cvm_state(&self) -> &UhCvmVpState {
170 &self.cvm
171 }
172
173 fn cvm_state_mut(&mut self) -> &mut UhCvmVpState {
174 &mut self.cvm
175 }
176
177 fn cvm_partition_state(shared: &Self::Shared) -> &UhCvmPartitionState {
178 &shared.cvm
179 }
180
181 fn switch_vtl(this: &mut UhProcessor<'_, Self>, source_vtl: GuestVtl, target_vtl: GuestVtl) {
182 let [vmsa0, vmsa1] = this.runner.vmsas_mut();
183 let (current_vmsa, mut target_vmsa) = match (source_vtl, target_vtl) {
184 (GuestVtl::Vtl0, GuestVtl::Vtl1) => (vmsa0, vmsa1),
185 (GuestVtl::Vtl1, GuestVtl::Vtl0) => (vmsa1, vmsa0),
186 _ => unreachable!(),
187 };
188
189 target_vmsa.set_rax(current_vmsa.rax());
190 target_vmsa.set_rbx(current_vmsa.rbx());
191 target_vmsa.set_rcx(current_vmsa.rcx());
192 target_vmsa.set_rdx(current_vmsa.rdx());
193 target_vmsa.set_rbp(current_vmsa.rbp());
194 target_vmsa.set_rsi(current_vmsa.rsi());
195 target_vmsa.set_rdi(current_vmsa.rdi());
196 target_vmsa.set_r8(current_vmsa.r8());
197 target_vmsa.set_r9(current_vmsa.r9());
198 target_vmsa.set_r10(current_vmsa.r10());
199 target_vmsa.set_r11(current_vmsa.r11());
200 target_vmsa.set_r12(current_vmsa.r12());
201 target_vmsa.set_r13(current_vmsa.r13());
202 target_vmsa.set_r14(current_vmsa.r14());
203 target_vmsa.set_r15(current_vmsa.r15());
204 target_vmsa.set_xcr0(current_vmsa.xcr0());
205
206 target_vmsa.set_cr2(current_vmsa.cr2());
207
208 target_vmsa.set_dr0(current_vmsa.dr0());
210 target_vmsa.set_dr1(current_vmsa.dr1());
211 target_vmsa.set_dr2(current_vmsa.dr2());
212 target_vmsa.set_dr3(current_vmsa.dr3());
213
214 target_vmsa.set_pl0_ssp(current_vmsa.pl0_ssp());
215 target_vmsa.set_pl1_ssp(current_vmsa.pl1_ssp());
216 target_vmsa.set_pl2_ssp(current_vmsa.pl2_ssp());
217 target_vmsa.set_pl3_ssp(current_vmsa.pl3_ssp());
218 target_vmsa.set_u_cet(current_vmsa.u_cet());
219
220 target_vmsa.set_x87_registers(¤t_vmsa.x87_registers());
221
222 let vec_reg_count = 16;
223 for i in 0..vec_reg_count {
224 target_vmsa.set_xmm_registers(i, current_vmsa.xmm_registers(i));
225 target_vmsa.set_ymm_registers(i, current_vmsa.ymm_registers(i));
226 }
227
228 this.backing.cvm_state_mut().exit_vtl = target_vtl;
229 }
230
231 fn translation_registers(
232 &self,
233 this: &UhProcessor<'_, Self>,
234 vtl: GuestVtl,
235 ) -> TranslationRegisters {
236 let vmsa = this.runner.vmsa(vtl);
237 TranslationRegisters {
238 cr0: vmsa.cr0(),
239 cr4: vmsa.cr4(),
240 efer: vmsa.efer(),
241 cr3: vmsa.cr3(),
242 rflags: vmsa.rflags(),
243 ss: virt_seg_from_snp(vmsa.ss()).into(),
244 encryption_mode: virt_support_x86emu::translate::EncryptionMode::Vtom(
245 this.partition.caps.vtom.unwrap(),
246 ),
247 }
248 }
249
250 fn tlb_flush_lock_access<'a>(
251 vp_index: Option<VpIndex>,
252 partition: &'a UhPartitionInner,
253 shared: &'a Self::Shared,
254 ) -> impl TlbFlushLockAccess + 'a {
255 SnpTlbLockFlushAccess {
256 vp_index,
257 partition,
258 shared,
259 }
260 }
261
262 fn pending_event_vector(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> Option<u8> {
263 let event_inject = this.runner.vmsa(vtl).event_inject();
264 if event_inject.valid() {
265 Some(event_inject.vector())
266 } else {
267 None
268 }
269 }
270
271 fn set_pending_exception(
272 this: &mut UhProcessor<'_, Self>,
273 vtl: GuestVtl,
274 event: HvX64PendingExceptionEvent,
275 ) {
276 let inject_info = SevEventInjectInfo::new()
277 .with_valid(true)
278 .with_deliver_error_code(event.deliver_error_code())
279 .with_error_code(event.error_code())
280 .with_vector(event.vector().try_into().unwrap())
281 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT);
282
283 this.runner.vmsa_mut(vtl).set_event_inject(inject_info);
284 }
285
286 fn cr0(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
287 this.runner.vmsa(vtl).cr0()
288 }
289
290 fn cr4(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
291 this.runner.vmsa(vtl).cr4()
292 }
293
294 fn intercept_message_state(
295 this: &UhProcessor<'_, Self>,
296 vtl: GuestVtl,
297 include_optional_state: bool,
298 ) -> InterceptMessageState {
299 let vmsa = this.runner.vmsa(vtl);
300
301 let instr_len = if SevExitCode(vmsa.guest_error_code()) == SevExitCode::NPF {
303 0
304 } else {
305 (vmsa.next_rip() - vmsa.rip()) as u8
306 };
307
308 InterceptMessageState {
309 instruction_length_and_cr8: instr_len,
310 cpl: vmsa.cpl(),
311 efer_lma: vmsa.efer() & x86defs::X64_EFER_LMA != 0,
312 cs: virt_seg_from_snp(vmsa.cs()).into(),
313 rip: vmsa.rip(),
314 rflags: vmsa.rflags(),
315 rax: vmsa.rax(),
316 rdx: vmsa.rdx(),
317 optional: if include_optional_state {
318 Some(InterceptMessageOptionalState {
319 ds: virt_seg_from_snp(vmsa.ds()).into(),
320 es: virt_seg_from_snp(vmsa.es()).into(),
321 })
322 } else {
323 None
324 },
325 rcx: vmsa.rcx(),
326 rsi: vmsa.rsi(),
327 rdi: vmsa.rdi(),
328 }
329 }
330
331 fn cr_intercept_registration(
332 this: &mut UhProcessor<'_, Self>,
333 intercept_control: hvdef::HvRegisterCrInterceptControl,
334 ) {
335 this.runner
340 .set_vp_registers_hvcall(
341 Vtl::Vtl1,
342 [(
343 HvX64RegisterName::CrInterceptControl,
344 u64::from(intercept_control),
345 )],
346 )
347 .expect("setting intercept control succeeds");
348 }
349
350 fn is_interrupt_pending(
351 this: &mut UhProcessor<'_, Self>,
352 vtl: GuestVtl,
353 check_rflags: bool,
354 dev: &impl CpuIo,
355 ) -> bool {
356 let vmsa = this.runner.vmsa_mut(vtl);
357 if vmsa.event_inject().valid()
358 && vmsa.event_inject().interruption_type() == x86defs::snp::SEV_INTR_TYPE_NMI
359 {
360 return true;
361 }
362
363 let vmsa_priority = vmsa.v_intr_cntrl().priority() as u32;
364 let lapic = &mut this.backing.cvm.lapics[vtl].lapic;
365 let ppr = lapic
366 .access(&mut SnpApicClient {
367 partition: this.partition,
368 vmsa,
369 dev,
370 vmtime: &this.vmtime,
371 vtl,
372 })
373 .get_ppr();
374 let ppr_priority = ppr >> 4;
375 if vmsa_priority <= ppr_priority {
376 return false;
377 }
378
379 let vmsa = this.runner.vmsa_mut(vtl);
380 if (check_rflags && !RFlags::from_bits(vmsa.rflags()).interrupt_enable())
381 || vmsa.v_intr_cntrl().intr_shadow()
382 || !vmsa.v_intr_cntrl().irq()
383 {
384 return false;
385 }
386
387 true
388 }
389
390 fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic> {
391 None
392 }
393
394 fn update_deadline(this: &mut UhProcessor<'_, Self>, ref_time_now: u64, next_ref_time: u64) {
395 this.shared
396 .guest_timer
397 .update_deadline(this, ref_time_now, next_ref_time);
398 }
399
400 fn clear_deadline(this: &mut UhProcessor<'_, Self>) {
401 this.shared.guest_timer.clear_deadline(this);
402 }
403}
404
405#[derive(Inspect)]
407pub struct SnpBackedShared {
408 #[inspect(flatten)]
409 pub(crate) cvm: UhCvmPartitionState,
410 invlpgb_count_max: u16,
411 tsc_aux_virtualized: bool,
412 #[inspect(debug)]
413 sev_status: SevStatusMsr,
414 #[inspect(skip)]
416 guest_timer: hardware_cvm::VmTimeGuestTimer,
417}
418
419impl SnpBackedShared {
420 pub(crate) fn new(
421 _partition_params: &UhPartitionNewParams<'_>,
422 params: BackingSharedParams<'_>,
423 ) -> Result<Self, Error> {
424 let cvm = params.cvm_state.unwrap();
425 let invlpgb_count_max = x86defs::cpuid::ExtendedAddressSpaceSizesEdx::from(
426 params
427 .cpuid
428 .result(CpuidFunction::ExtendedAddressSpaceSizes.0, 0, &[0; 4])[3],
429 )
430 .invlpgb_count_max();
431 let tsc_aux_virtualized = x86defs::cpuid::ExtendedSevFeaturesEax::from(
432 params
433 .cpuid
434 .result(CpuidFunction::ExtendedSevFeatures.0, 0, &[0; 4])[0],
435 )
436 .tsc_aux_virtualization();
437
438 let msr = crate::MsrDevice::new(0).expect("open msr");
441 let sev_status =
442 SevStatusMsr::from(msr.read_msr(x86defs::X86X_AMD_MSR_SEV).expect("read msr"));
443 tracing::info!(CVM_ALLOWED, ?sev_status, "SEV status");
444
445 let guest_timer = hardware_cvm::VmTimeGuestTimer;
447
448 Ok(Self {
449 sev_status,
450 invlpgb_count_max,
451 tsc_aux_virtualized,
452 cvm,
453 guest_timer,
454 })
455 }
456}
457
458#[expect(private_interfaces)]
459impl BackingPrivate for SnpBacked {
460 type HclBacking<'snp> = hcl::ioctl::snp::Snp<'snp>;
461 type Shared = SnpBackedShared;
462 type EmulationCache = ();
463
464 fn shared(shared: &BackingShared) -> &Self::Shared {
465 let BackingShared::Snp(shared) = shared else {
466 unreachable!()
467 };
468 shared
469 }
470
471 fn new(params: BackingParams<'_, '_, Self>, shared: &SnpBackedShared) -> Result<Self, Error> {
472 Ok(Self {
473 hv_sint_notifications: 0,
474 general_stats: VtlArray::from_fn(|_| Default::default()),
475 exit_stats: VtlArray::from_fn(|_| Default::default()),
476 cvm: UhCvmVpState::new(
477 &shared.cvm,
478 params.partition,
479 params.vp_info,
480 UhDirectOverlay::Count as usize,
481 )?,
482 })
483 }
484
485 fn init(this: &mut UhProcessor<'_, Self>) {
486 let sev_status = this.vp().shared.sev_status;
487 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
488 init_vmsa(
489 &mut this.runner.vmsa_mut(vtl),
490 vtl,
491 this.partition.caps.vtom,
492 sev_status,
493 );
494
495 let registers = vp::Registers::at_reset(&this.partition.caps, &this.inner.vp_info);
497 this.access_state(vtl.into())
498 .set_registers(®isters)
499 .expect("Resetting to architectural state should succeed");
500
501 let debug_registers =
502 vp::DebugRegisters::at_reset(&this.partition.caps, &this.inner.vp_info);
503
504 this.access_state(vtl.into())
505 .set_debug_regs(&debug_registers)
506 .expect("Resetting to architectural state should succeed");
507
508 let xcr0 = vp::Xcr0::at_reset(&this.partition.caps, &this.inner.vp_info);
509 this.access_state(vtl.into())
510 .set_xcr(&xcr0)
511 .expect("Resetting to architectural state should succeed");
512
513 let cache_control = vp::Mtrrs::at_reset(&this.partition.caps, &this.inner.vp_info);
514 this.access_state(vtl.into())
515 .set_mtrrs(&cache_control)
516 .expect("Resetting to architectural state should succeed");
517 }
518
519 let pfns = &this.backing.cvm.direct_overlay_handle.pfns();
522 let values: &[(HvX64RegisterName, u64); 3] = &[
523 (
524 HvX64RegisterName::Sipp,
525 hvdef::HvSynicSimpSiefp::new()
526 .with_enabled(true)
527 .with_base_gpn(pfns[UhDirectOverlay::Sipp as usize])
528 .into(),
529 ),
530 (
531 HvX64RegisterName::Sifp,
532 hvdef::HvSynicSimpSiefp::new()
533 .with_enabled(true)
534 .with_base_gpn(pfns[UhDirectOverlay::Sifp as usize])
535 .into(),
536 ),
537 (
538 HvX64RegisterName::Ghcb,
539 x86defs::snp::GhcbMsr::new()
540 .with_info(x86defs::snp::GhcbInfo::REGISTER_REQUEST.0)
541 .with_pfn(pfns[UhDirectOverlay::Ghcb as usize])
542 .into(),
543 ),
544 ];
545
546 this.runner
547 .set_vp_registers_hvcall(Vtl::Vtl0, values)
548 .expect("set_vp_registers hypercall for direct overlays should succeed");
549 }
550
551 type StateAccess<'p, 'a>
552 = UhVpStateAccess<'a, 'p, Self>
553 where
554 Self: 'a + 'p,
555 'p: 'a;
556
557 fn access_vp_state<'a, 'p>(
558 this: &'a mut UhProcessor<'p, Self>,
559 vtl: GuestVtl,
560 ) -> Self::StateAccess<'p, 'a> {
561 UhVpStateAccess::new(this, vtl)
562 }
563
564 async fn run_vp(
565 this: &mut UhProcessor<'_, Self>,
566 dev: &impl CpuIo,
567 _stop: &mut virt::StopVp<'_>,
568 ) -> Result<(), VpHaltReason> {
569 this.run_vp_snp(dev).await
570 }
571
572 fn poll_apic(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl, scan_irr: bool) {
573 this.runner.vmsa_mut(vtl).v_intr_cntrl_mut().set_irq(false);
575
576 hardware_cvm::apic::poll_apic_core(this, vtl, scan_irr)
577 }
578
579 fn request_extint_readiness(_this: &mut UhProcessor<'_, Self>) {
580 unreachable!("extint managed through software apic")
581 }
582
583 fn request_untrusted_sint_readiness(this: &mut UhProcessor<'_, Self>, sints: u16) {
584 let sints = this.backing.hv_sint_notifications | sints;
585 if this.backing.hv_sint_notifications == sints {
586 return;
587 }
588 let notifications = HvDeliverabilityNotificationsRegister::new().with_sints(sints);
589 tracing::trace!(?notifications, "setting notifications");
590 this.runner
591 .set_vp_register(
592 GuestVtl::Vtl0,
593 HvX64RegisterName::DeliverabilityNotifications,
594 u64::from(notifications).into(),
595 )
596 .expect("requesting deliverability is not a fallable operation");
597
598 this.backing.hv_sint_notifications = sints;
599 }
600
601 fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) {
602 let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0);
603 let vtl1_vmsa = if this.backing.cvm_state().vtl1.is_some() {
604 Some(this.runner.vmsa(GuestVtl::Vtl1))
605 } else {
606 None
607 };
608
609 let add_vmsa_inspect = |req: inspect::Request<'_>, vmsa: VmsaWrapper<'_, &SevVmsa>| {
610 req.respond()
611 .hex("guest_error_code", vmsa.guest_error_code())
612 .hex("exit_info1", vmsa.exit_info1())
613 .hex("exit_info2", vmsa.exit_info2())
614 .hex("v_intr_cntrl", u64::from(vmsa.v_intr_cntrl()));
615 };
616
617 resp.child("vmsa_additional", |req| {
618 req.respond()
619 .child("vtl0", |inner_req| add_vmsa_inspect(inner_req, vtl0_vmsa))
620 .child("vtl1", |inner_req| {
621 if let Some(vtl1_vmsa) = vtl1_vmsa {
622 add_vmsa_inspect(inner_req, vtl1_vmsa);
623 }
624 });
625 });
626 }
627
628 fn hv(&self, vtl: GuestVtl) -> Option<&ProcessorVtlHv> {
629 Some(&self.cvm.hv[vtl])
630 }
631
632 fn hv_mut(&mut self, vtl: GuestVtl) -> Option<&mut ProcessorVtlHv> {
633 Some(&mut self.cvm.hv[vtl])
634 }
635
636 fn handle_vp_start_enable_vtl_wake(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl) {
637 this.hcvm_handle_vp_start_enable_vtl(vtl)
638 }
639
640 fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
641 this.hcvm_vtl1_inspectable()
642 }
643
644 fn process_interrupts(
645 this: &mut UhProcessor<'_, Self>,
646 scan_irr: VtlArray<bool, 2>,
647 first_scan_irr: &mut bool,
648 dev: &impl CpuIo,
649 ) -> bool {
650 this.cvm_process_interrupts(scan_irr, first_scan_irr, dev)
651 }
652}
653
654fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
655 SevSelector {
656 selector: val.selector,
657 attrib: (val.attributes & 0xFF) | ((val.attributes >> 4) & 0xF00),
658 limit: val.limit,
659 base: val.base,
660 }
661}
662
663fn virt_table_to_snp(val: TableRegister) -> SevSelector {
664 SevSelector {
665 limit: val.limit as u32,
666 base: val.base,
667 ..FromZeros::new_zeroed()
668 }
669}
670
671fn virt_seg_from_snp(selector: SevSelector) -> SegmentRegister {
672 SegmentRegister {
673 base: selector.base,
674 limit: selector.limit,
675 selector: selector.selector,
676 attributes: (selector.attrib & 0xFF) | ((selector.attrib & 0xF00) << 4),
677 }
678}
679
680fn virt_table_from_snp(selector: SevSelector) -> TableRegister {
681 TableRegister {
682 limit: selector.limit as u16,
683 base: selector.base,
684 }
685}
686
687fn init_vmsa(
688 vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>,
689 vtl: GuestVtl,
690 vtom: Option<u64>,
691 sev_status: SevStatusMsr,
692) {
693 vmsa.reset(sev_status.vmsa_reg_prot());
697 vmsa.sev_features_mut()
698 .set_snp_btb_isolation(sev_status.snp_btb_isolation());
699 vmsa.sev_features_mut()
700 .set_ibpb_on_entry(sev_status.ibpb_on_entry());
701 vmsa.sev_features_mut()
702 .set_prevent_host_ibs(sev_status.prevent_host_ibs());
703 vmsa.sev_features_mut()
704 .set_vmsa_reg_prot(sev_status.vmsa_reg_prot());
705 vmsa.sev_features_mut().set_snp(true);
706 vmsa.sev_features_mut().set_vtom(vtom.is_some());
707 vmsa.set_virtual_tom(vtom.unwrap_or(0));
708
709 vmsa.sev_features_mut().set_alternate_injection(true);
712 vmsa.sev_features_mut().set_reflect_vc(true);
713 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
714 vmsa.sev_features_mut().set_debug_swap(true);
715
716 let vmpl = match vtl {
717 GuestVtl::Vtl0 => Vmpl::Vmpl2,
718 GuestVtl::Vtl1 => Vmpl::Vmpl1,
719 };
720 vmsa.set_vmpl(vmpl.into());
721
722 vmsa.set_guest_error_code(SevExitCode::INTR.0);
725
726 vmsa.set_efer(x86defs::X64_EFER_SVME);
729}
730
731struct SnpApicClient<'a, T> {
732 partition: &'a UhPartitionInner,
733 vmsa: VmsaWrapper<'a, &'a mut SevVmsa>,
734 dev: &'a T,
735 vmtime: &'a VmTimeAccess,
736 vtl: GuestVtl,
737}
738
739impl<T: CpuIo> ApicClient for SnpApicClient<'_, T> {
740 fn cr8(&mut self) -> u32 {
741 self.vmsa.v_intr_cntrl().tpr().into()
742 }
743
744 fn set_cr8(&mut self, value: u32) {
745 self.vmsa.v_intr_cntrl_mut().set_tpr(value as u8);
746 }
747
748 fn set_apic_base(&mut self, _value: u64) {
749 }
751
752 fn wake(&mut self, vp_index: VpIndex) {
753 self.partition.vps[vp_index.index() as usize].wake(self.vtl, WakeReason::INTCON);
754 }
755
756 fn eoi(&mut self, vector: u8) {
757 debug_assert_eq!(self.vtl, GuestVtl::Vtl0);
758 self.dev.handle_eoi(vector.into())
759 }
760
761 fn now(&mut self) -> vmcore::vmtime::VmTime {
762 self.vmtime.now()
763 }
764
765 fn pull_offload(&mut self) -> ([u32; 8], [u32; 8]) {
766 unreachable!()
767 }
768}
769
770impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
771 const TRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
773 Self,
774 [
775 hv1_hypercall::HvModifySparseGpaPageHostVisibility,
776 hv1_hypercall::HvQuerySparseGpaPageHostVisibility,
777 hv1_hypercall::HvX64StartVirtualProcessor,
778 hv1_hypercall::HvGetVpIndexFromApicId,
779 hv1_hypercall::HvGetVpRegisters,
780 hv1_hypercall::HvEnablePartitionVtl,
781 hv1_hypercall::HvRetargetDeviceInterrupt,
782 hv1_hypercall::HvPostMessage,
783 hv1_hypercall::HvSignalEvent,
784 hv1_hypercall::HvX64EnableVpVtl,
785 hv1_hypercall::HvExtQueryCapabilities,
786 hv1_hypercall::HvVtlCall,
787 hv1_hypercall::HvVtlReturn,
788 hv1_hypercall::HvFlushVirtualAddressList,
789 hv1_hypercall::HvFlushVirtualAddressListEx,
790 hv1_hypercall::HvFlushVirtualAddressSpace,
791 hv1_hypercall::HvFlushVirtualAddressSpaceEx,
792 hv1_hypercall::HvSetVpRegisters,
793 hv1_hypercall::HvModifyVtlProtectionMask,
794 hv1_hypercall::HvX64TranslateVirtualAddress,
795 hv1_hypercall::HvSendSyntheticClusterIpi,
796 hv1_hypercall::HvSendSyntheticClusterIpiEx,
797 hv1_hypercall::HvInstallIntercept,
798 hv1_hypercall::HvAssertVirtualInterrupt,
799 ],
800 );
801
802 const UNTRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
805 Self,
806 [hv1_hypercall::HvPostMessage, hv1_hypercall::HvSignalEvent],
807 );
808}
809
810struct GhcbEnlightenedHypercall<'a, 'b, T> {
811 handler: UhHypercallHandler<'a, 'b, T, SnpBacked>,
812 control: u64,
813 output_gpa: u64,
814 input_gpa: u64,
815 result: u64,
816}
817
818impl<'a, 'b, T> hv1_hypercall::AsHandler<UhHypercallHandler<'a, 'b, T, SnpBacked>>
819 for &mut GhcbEnlightenedHypercall<'a, 'b, T>
820{
821 fn as_handler(&mut self) -> &mut UhHypercallHandler<'a, 'b, T, SnpBacked> {
822 &mut self.handler
823 }
824}
825
826impl<T> HypercallIo for GhcbEnlightenedHypercall<'_, '_, T> {
827 fn advance_ip(&mut self) {
828 }
830
831 fn retry(&mut self, control: u64) {
832 let control = Control::from(control);
841 self.set_result(
842 HypercallOutput::from(HvError::Timeout)
843 .with_elements_processed(control.rep_start())
844 .into(),
845 );
846 }
847
848 fn control(&mut self) -> u64 {
849 self.control
850 }
851
852 fn input_gpa(&mut self) -> u64 {
853 self.input_gpa
854 }
855
856 fn output_gpa(&mut self) -> u64 {
857 self.output_gpa
858 }
859
860 fn fast_register_pair_count(&mut self) -> usize {
861 0
862 }
863
864 fn extended_fast_hypercalls_ok(&mut self) -> bool {
865 false
866 }
867
868 fn fast_input(&mut self, _buf: &mut [[u64; 2]], _output_register_pairs: usize) -> usize {
869 unimplemented!("not supported for secure enlightened abi")
870 }
871
872 fn fast_output(&mut self, _starting_pair_index: usize, _buf: &[[u64; 2]]) {
873 unimplemented!("not supported for secure enlightened abi")
874 }
875
876 fn vtl_input(&mut self) -> u64 {
877 unimplemented!("not supported for secure enlightened abi")
878 }
879
880 fn set_result(&mut self, n: u64) {
881 self.result = n;
882 }
883
884 fn fast_regs(&mut self, _starting_pair_index: usize, _buf: &mut [[u64; 2]]) {
885 unimplemented!("not supported for secure enlightened abi")
886 }
887}
888
889impl<'b> ApicBacking<'b, SnpBacked> for UhProcessor<'b, SnpBacked> {
890 fn vp(&mut self) -> &mut UhProcessor<'b, SnpBacked> {
891 self
892 }
893
894 fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8) {
895 let mut vmsa = self.runner.vmsa_mut(vtl);
896 vmsa.v_intr_cntrl_mut().set_vector(vector);
897 vmsa.v_intr_cntrl_mut().set_priority((vector >> 4).into());
898 vmsa.v_intr_cntrl_mut().set_ignore_tpr(false);
899 vmsa.v_intr_cntrl_mut().set_irq(true);
900 self.backing.cvm.lapics[vtl].activity = MpState::Running;
901 }
902
903 fn handle_nmi(&mut self, vtl: GuestVtl) {
904 let mut vmsa = self.runner.vmsa_mut(vtl);
908
909 vmsa.set_event_inject(
913 SevEventInjectInfo::new()
914 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_NMI)
915 .with_vector(2)
916 .with_valid(true),
917 );
918 self.backing.cvm.lapics[vtl].nmi_pending = false;
919 self.backing.cvm.lapics[vtl].activity = MpState::Running;
920 }
921
922 fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister) {
923 let mut vmsa = self.runner.vmsa_mut(vtl);
924 vmsa.set_cs(virt_seg_to_snp(cs));
925 vmsa.set_rip(0);
926 self.backing.cvm.lapics[vtl].activity = MpState::Running;
927 }
928}
929
930impl UhProcessor<'_, SnpBacked> {
931 fn handle_synic_deliverable_exit(&mut self) {
932 let message = self
933 .runner
934 .exit_message()
935 .as_message::<hvdef::HvX64SynicSintDeliverableMessage>();
936
937 tracing::trace!(
938 deliverable_sints = message.deliverable_sints,
939 "sint deliverable"
940 );
941
942 self.backing.hv_sint_notifications &= !message.deliverable_sints;
943
944 self.deliver_synic_messages(GuestVtl::Vtl0, message.deliverable_sints);
946 }
947
948 fn handle_vmgexit(
949 &mut self,
950 dev: &impl CpuIo,
951 intercepted_vtl: GuestVtl,
952 ) -> Result<(), SnpGhcbError> {
953 let message = self
954 .runner
955 .exit_message()
956 .as_message::<hvdef::HvX64VmgexitInterceptMessage>();
957
958 let ghcb_msr = x86defs::snp::GhcbMsr::from(message.ghcb_msr);
959 tracing::trace!(?ghcb_msr, "vmgexit intercept");
960
961 match x86defs::snp::GhcbInfo(ghcb_msr.info()) {
962 x86defs::snp::GhcbInfo::NORMAL => {
963 assert!(message.flags.ghcb_page_valid());
964 let ghcb_pfn = ghcb_msr.pfn();
965
966 let ghcb_overlay =
967 self.backing.cvm.direct_overlay_handle.pfns()[UhDirectOverlay::Ghcb as usize];
968
969 if ghcb_pfn != ghcb_overlay {
971 tracelimit::warn_ratelimited!(
972 CVM_ALLOWED,
973 vmgexit_pfn = ghcb_pfn,
974 overlay_pfn = ghcb_overlay,
975 "ghcb page used for vmgexit does not match overlay page"
976 );
977
978 return Err(SnpGhcbError::GhcbMisconfiguration);
979 }
980
981 match x86defs::snp::GhcbUsage(message.ghcb_page.ghcb_usage) {
982 x86defs::snp::GhcbUsage::HYPERCALL => {
983 let guest_memory = &self.shared.cvm.shared_memory;
984 let overlay_base = ghcb_overlay * HV_PAGE_SIZE;
987 let x86defs::snp::GhcbHypercallParameters {
988 output_gpa,
989 input_control,
990 } = guest_memory
991 .read_plain(
992 overlay_base
993 + x86defs::snp::GHCB_PAGE_HYPERCALL_PARAMETERS_OFFSET as u64,
994 )
995 .map_err(SnpGhcbError::GhcbPageAccess)?;
996
997 let mut handler = GhcbEnlightenedHypercall {
998 handler: UhHypercallHandler {
999 vp: self,
1000 bus: dev,
1001 trusted: false,
1002 intercepted_vtl,
1003 },
1004 control: input_control,
1005 output_gpa,
1006 input_gpa: overlay_base,
1007 result: 0,
1008 };
1009
1010 UhHypercallHandler::UNTRUSTED_DISPATCHER
1011 .dispatch(guest_memory, &mut handler);
1012
1013 guest_memory
1021 .write_at(
1022 overlay_base
1023 + x86defs::snp::GHCB_PAGE_HYPERCALL_OUTPUT_OFFSET as u64,
1024 handler.result.as_bytes(),
1025 )
1026 .map_err(SnpGhcbError::GhcbPageAccess)?;
1027 }
1028 usage => unimplemented!("ghcb usage {usage:?}"),
1029 }
1030 }
1031 info => unimplemented!("ghcb info {info:?}"),
1032 }
1033
1034 Ok(())
1035 }
1036
1037 fn handle_msr_access(
1038 &mut self,
1039 dev: &impl CpuIo,
1040 entered_from_vtl: GuestVtl,
1041 msr: u32,
1042 is_write: bool,
1043 ) {
1044 if is_write && self.cvm_try_protect_msr_write(entered_from_vtl, msr) {
1045 return;
1046 }
1047
1048 let vmsa = self.runner.vmsa_mut(entered_from_vtl);
1049 let gp = if is_write {
1050 let value = (vmsa.rax() as u32 as u64) | ((vmsa.rdx() as u32 as u64) << 32);
1051
1052 let r = self.backing.cvm.lapics[entered_from_vtl]
1053 .lapic
1054 .access(&mut SnpApicClient {
1055 partition: self.partition,
1056 vmsa,
1057 dev,
1058 vmtime: &self.vmtime,
1059 vtl: entered_from_vtl,
1060 })
1061 .msr_write(msr, value)
1062 .or_else_if_unknown(|| self.write_msr_cvm(msr, value, entered_from_vtl))
1063 .or_else_if_unknown(|| self.write_msr_snp(dev, msr, value, entered_from_vtl));
1064
1065 match r {
1066 Ok(()) => false,
1067 Err(MsrError::Unknown) => {
1068 tracing::debug!(msr, value, "unknown cvm msr write");
1069 false
1070 }
1071 Err(MsrError::InvalidAccess) => true,
1072 }
1073 } else {
1074 let r = self.backing.cvm.lapics[entered_from_vtl]
1075 .lapic
1076 .access(&mut SnpApicClient {
1077 partition: self.partition,
1078 vmsa,
1079 dev,
1080 vmtime: &self.vmtime,
1081 vtl: entered_from_vtl,
1082 })
1083 .msr_read(msr)
1084 .or_else_if_unknown(|| self.read_msr_cvm(msr, entered_from_vtl))
1085 .or_else_if_unknown(|| self.read_msr_snp(dev, msr, entered_from_vtl));
1086
1087 let value = match r {
1088 Ok(v) => Some(v),
1089 Err(MsrError::Unknown) => {
1090 tracing::debug!(msr, "unknown cvm msr read");
1091 Some(0)
1092 }
1093 Err(MsrError::InvalidAccess) => None,
1094 };
1095
1096 if let Some(value) = value {
1097 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1098 vmsa.set_rax((value as u32).into());
1099 vmsa.set_rdx(((value >> 32) as u32).into());
1100 false
1101 } else {
1102 true
1103 }
1104 };
1105
1106 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1107 if gp {
1108 vmsa.set_event_inject(
1109 SevEventInjectInfo::new()
1110 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1111 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1112 .with_deliver_error_code(true)
1113 .with_valid(true),
1114 );
1115 } else {
1116 advance_to_next_instruction(&mut vmsa);
1117 }
1118 }
1119
1120 fn handle_xsetbv(&mut self, entered_from_vtl: GuestVtl) {
1121 let vmsa = self.runner.vmsa(entered_from_vtl);
1122 if let Some(value) = hardware_cvm::validate_xsetbv_exit(hardware_cvm::XsetbvExitInput {
1123 rax: vmsa.rax(),
1124 rcx: vmsa.rcx(),
1125 rdx: vmsa.rdx(),
1126 cr4: vmsa.cr4(),
1127 cpl: vmsa.cpl(),
1128 }) {
1129 if !self.cvm_try_protect_secure_register_write(
1130 entered_from_vtl,
1131 HvX64RegisterName::Xfem,
1132 value,
1133 ) {
1134 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1135 vmsa.set_xcr0(value);
1136 advance_to_next_instruction(&mut vmsa);
1137 }
1138 } else {
1139 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1140 vmsa.set_event_inject(
1141 SevEventInjectInfo::new()
1142 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1143 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1144 .with_deliver_error_code(true)
1145 .with_valid(true),
1146 );
1147 }
1148 }
1149
1150 fn handle_crx_intercept(&mut self, entered_from_vtl: GuestVtl, reg: HvX64RegisterName) {
1151 let vmsa = self.runner.vmsa(entered_from_vtl);
1152 let mov_crx_drx = x86defs::snp::MovCrxDrxInfo::from(vmsa.exit_info1());
1153 let reg_value = {
1154 let gpr_name =
1155 HvX64RegisterName(HvX64RegisterName::Rax.0 + mov_crx_drx.gpr_number() as u32);
1156
1157 match gpr_name {
1158 HvX64RegisterName::Rax => vmsa.rax(),
1159 HvX64RegisterName::Rbx => vmsa.rbx(),
1160 HvX64RegisterName::Rcx => vmsa.rcx(),
1161 HvX64RegisterName::Rdx => vmsa.rdx(),
1162 HvX64RegisterName::Rsp => vmsa.rsp(),
1163 HvX64RegisterName::Rbp => vmsa.rbp(),
1164 HvX64RegisterName::Rsi => vmsa.rsi(),
1165 HvX64RegisterName::Rdi => vmsa.rdi(),
1166 HvX64RegisterName::R8 => vmsa.r8(),
1167 HvX64RegisterName::R9 => vmsa.r9(),
1168 HvX64RegisterName::R10 => vmsa.r10(),
1169 HvX64RegisterName::R11 => vmsa.r11(),
1170 HvX64RegisterName::R12 => vmsa.r12(),
1171 HvX64RegisterName::R13 => vmsa.r13(),
1172 HvX64RegisterName::R14 => vmsa.r14(),
1173 HvX64RegisterName::R15 => vmsa.r15(),
1174 _ => unreachable!("unexpected register"),
1175 }
1176 };
1177
1178 if !mov_crx_drx.mov_crx() {
1185 tracelimit::warn_ratelimited!(
1186 CVM_ALLOWED,
1187 "Intercepted crx access, instruction is not mov crx"
1188 );
1189 return;
1190 }
1191
1192 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, reg_value) {
1193 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1194 match reg {
1195 HvX64RegisterName::Cr0 => vmsa.set_cr0(reg_value),
1196 HvX64RegisterName::Cr4 => vmsa.set_cr4(reg_value),
1197 _ => unreachable!(),
1198 }
1199 advance_to_next_instruction(&mut vmsa);
1200 }
1201 }
1202
1203 #[must_use]
1204 fn sync_lazy_eoi(&mut self, vtl: GuestVtl) -> bool {
1205 if self.backing.cvm.lapics[vtl].lapic.is_lazy_eoi_pending() {
1206 return self.backing.cvm.hv[vtl].set_lazy_eoi();
1207 }
1208
1209 false
1210 }
1211
1212 async fn run_vp_snp(&mut self, dev: &impl CpuIo) -> Result<(), VpHaltReason> {
1213 let next_vtl = self.backing.cvm.exit_vtl;
1214
1215 let mut vmsa = self.runner.vmsa_mut(next_vtl);
1216 let last_interrupt_ctrl = vmsa.v_intr_cntrl();
1217
1218 if vmsa.sev_features().alternate_injection() {
1219 vmsa.v_intr_cntrl_mut().set_guest_busy(false);
1220 }
1221
1222 self.unlock_tlb_lock(Vtl::Vtl2);
1223 let tlb_halt = self.should_halt_for_tlb_unlock(next_vtl);
1224
1225 let halt = self.backing.cvm.lapics[next_vtl].activity != MpState::Running || tlb_halt;
1226
1227 if halt && next_vtl == GuestVtl::Vtl1 && !tlb_halt {
1228 tracelimit::warn_ratelimited!(CVM_ALLOWED, "halting VTL 1, which might halt the guest");
1229 }
1230
1231 self.runner.set_halted(halt);
1232
1233 self.runner.set_exit_vtl(next_vtl);
1234
1235 let lazy_eoi = self.sync_lazy_eoi(next_vtl);
1237
1238 let mut has_intercept = self
1239 .runner
1240 .run()
1241 .map_err(|e| dev.fatal_error(SnpRunVpError(e).into()))?;
1242
1243 let entered_from_vtl = next_vtl;
1244 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1245
1246 let inject = if vmsa.sev_features().alternate_injection() {
1248 if vmsa.v_intr_cntrl().guest_busy() {
1249 self.backing.general_stats[entered_from_vtl]
1250 .guest_busy
1251 .increment();
1252 let exit_int_info = SevEventInjectInfo::from(vmsa.exit_int_info());
1260 assert!(
1261 exit_int_info.valid(),
1262 "event inject info should be valid {exit_int_info:x?}"
1263 );
1264
1265 match exit_int_info.interruption_type() {
1266 x86defs::snp::SEV_INTR_TYPE_EXCEPT => {
1267 if exit_int_info.vector() != 3 && exit_int_info.vector() != 4 {
1268 Some(exit_int_info)
1270 } else {
1271 None
1272 }
1273 }
1274 x86defs::snp::SEV_INTR_TYPE_SW => None,
1275 _ => Some(exit_int_info),
1276 }
1277 } else {
1278 None
1279 }
1280 } else {
1281 unimplemented!("Only alternate injection is supported for SNP")
1282 };
1283
1284 if let Some(inject) = inject {
1285 vmsa.set_event_inject(inject);
1286 }
1287 if vmsa.sev_features().alternate_injection() {
1288 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
1289 }
1290
1291 if last_interrupt_ctrl.irq() && !vmsa.v_intr_cntrl().irq() {
1292 self.backing.general_stats[entered_from_vtl]
1293 .int_ack
1294 .increment();
1295 self.backing.cvm.lapics[entered_from_vtl]
1297 .lapic
1298 .acknowledge_interrupt(last_interrupt_ctrl.vector());
1299 }
1300
1301 vmsa.v_intr_cntrl_mut().set_irq(false);
1302
1303 if lazy_eoi && self.backing.cvm.hv[entered_from_vtl].clear_lazy_eoi() {
1305 self.backing.cvm.lapics[entered_from_vtl]
1306 .lapic
1307 .access(&mut SnpApicClient {
1308 partition: self.partition,
1309 vmsa,
1310 dev,
1311 vmtime: &self.vmtime,
1312 vtl: entered_from_vtl,
1313 })
1314 .lazy_eoi();
1315 }
1316
1317 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1318 let sev_error_code = SevExitCode(vmsa.guest_error_code());
1319
1320 let stat = match sev_error_code {
1321 SevExitCode::CPUID => {
1322 self.handle_cpuid(entered_from_vtl);
1323 &mut self.backing.exit_stats[entered_from_vtl].cpuid
1324 }
1325
1326 SevExitCode::MSR => {
1327 let is_write = vmsa.exit_info1() & 1 != 0;
1328 let msr = vmsa.rcx() as u32;
1329
1330 self.handle_msr_access(dev, entered_from_vtl, msr, is_write);
1331
1332 if is_write {
1333 &mut self.backing.exit_stats[entered_from_vtl].msr_write
1334 } else {
1335 &mut self.backing.exit_stats[entered_from_vtl].msr_read
1336 }
1337 }
1338
1339 SevExitCode::IOIO => {
1340 let io_info =
1341 SevIoAccessInfo::from(self.runner.vmsa(entered_from_vtl).exit_info1() as u32);
1342
1343 let access_size = if io_info.access_size32() {
1344 4
1345 } else if io_info.access_size16() {
1346 2
1347 } else {
1348 1
1349 };
1350
1351 let port_access_protected = self.cvm_try_protect_io_port_access(
1352 entered_from_vtl,
1353 io_info.port(),
1354 io_info.read_access(),
1355 access_size,
1356 io_info.string_access(),
1357 io_info.rep_access(),
1358 );
1359
1360 let vmsa = self.runner.vmsa(entered_from_vtl);
1361 if !port_access_protected {
1362 if io_info.string_access() || io_info.rep_access() {
1363 let interruption_pending = vmsa.event_inject().valid()
1364 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1365
1366 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1371 .await?;
1372 } else {
1373 let mut rax = vmsa.rax();
1374 emulate_io(
1375 self.inner.vp_info.base.vp_index,
1376 !io_info.read_access(),
1377 io_info.port(),
1378 &mut rax,
1379 access_size,
1380 dev,
1381 )
1382 .await;
1383
1384 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1385 vmsa.set_rax(rax);
1386 advance_to_next_instruction(&mut vmsa);
1387 }
1388 }
1389 &mut self.backing.exit_stats[entered_from_vtl].ioio
1390 }
1391
1392 SevExitCode::VMMCALL => {
1393 let is_64bit = self.long_mode(entered_from_vtl);
1394 let guest_memory = &self.partition.gm[entered_from_vtl];
1395 let handler = UhHypercallHandler {
1396 trusted: !self.cvm_partition().hide_isolation,
1397 vp: &mut *self,
1398 bus: dev,
1399 intercepted_vtl: entered_from_vtl,
1400 };
1401
1402 UhHypercallHandler::TRUSTED_DISPATCHER.dispatch(
1405 guest_memory,
1406 hv1_hypercall::X64RegisterIo::new(handler, is_64bit),
1407 );
1408 &mut self.backing.exit_stats[entered_from_vtl].vmmcall
1409 }
1410
1411 SevExitCode::SHUTDOWN => {
1412 return Err(VpHaltReason::TripleFault {
1413 vtl: entered_from_vtl.into(),
1414 });
1415 }
1416
1417 SevExitCode::WBINVD | SevExitCode::INVD => {
1418 advance_to_next_instruction(&mut vmsa);
1422 &mut self.backing.exit_stats[entered_from_vtl].invd
1423 }
1424
1425 SevExitCode::NPF if has_intercept => {
1426 let gpa = vmsa.exit_info2();
1445 let interruption_pending = vmsa.event_inject().valid()
1446 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1447 let exit_info = SevNpfInfo::from(vmsa.exit_info1());
1448 let exit_message = self.runner.exit_message();
1449 let real = match exit_message.header.typ {
1450 HvMessageType::HvMessageTypeExceptionIntercept => {
1451 let exception_message =
1452 exit_message.as_message::<hvdef::HvX64ExceptionInterceptMessage>();
1453
1454 exception_message.vector
1455 == x86defs::Exception::SEV_VMM_COMMUNICATION.0 as u16
1456 }
1457 HvMessageType::HvMessageTypeUnmappedGpa
1458 | HvMessageType::HvMessageTypeGpaIntercept
1459 | HvMessageType::HvMessageTypeUnacceptedGpa => {
1460 let gpa_message =
1461 exit_message.as_message::<hvdef::HvX64MemoryInterceptMessage>();
1462
1463 (gpa_message.guest_physical_address >> hvdef::HV_PAGE_SHIFT)
1465 == (gpa >> hvdef::HV_PAGE_SHIFT)
1466 }
1467 _ => false,
1468 };
1469
1470 if real {
1471 has_intercept = false;
1472 if self.check_mem_fault(entered_from_vtl, gpa, exit_info.is_write(), exit_info)
1473 {
1474 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1475 .await?;
1476 }
1477 &mut self.backing.exit_stats[entered_from_vtl].npf
1478 } else {
1479 &mut self.backing.exit_stats[entered_from_vtl].npf_spurious
1480 }
1481 }
1482
1483 SevExitCode::NPF => &mut self.backing.exit_stats[entered_from_vtl].npf_no_intercept,
1484
1485 SevExitCode::HLT => {
1486 self.backing.cvm.lapics[entered_from_vtl].activity = MpState::Halted;
1487 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
1489 &mut self.backing.exit_stats[entered_from_vtl].hlt
1490 }
1491
1492 SevExitCode::INVALID_VMCB => {
1493 return Err(dev.fatal_error(InvalidVmcb.into()));
1494 }
1495
1496 SevExitCode::INVLPGB | SevExitCode::ILLEGAL_INVLPGB => {
1497 vmsa.set_event_inject(
1498 SevEventInjectInfo::new()
1499 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1500 .with_vector(x86defs::Exception::INVALID_OPCODE.0)
1501 .with_valid(true),
1502 );
1503 &mut self.backing.exit_stats[entered_from_vtl].invlpgb
1504 }
1505
1506 SevExitCode::RDPMC => {
1507 let cr4 = vmsa.cr4();
1510 if ((vmsa.cpl() > 0) && (cr4 & x86defs::X64_CR4_PCE == 0))
1511 || (vmsa.rcx() as u32 >= 4)
1512 {
1513 vmsa.set_event_inject(
1514 SevEventInjectInfo::new()
1515 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1516 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1517 .with_deliver_error_code(true)
1518 .with_valid(true),
1519 );
1520 } else {
1521 vmsa.set_rax(0);
1522 vmsa.set_rdx(0);
1523 advance_to_next_instruction(&mut vmsa);
1524 }
1525 &mut self.backing.exit_stats[entered_from_vtl].rdpmc
1526 }
1527
1528 SevExitCode::VMGEXIT if has_intercept => {
1529 has_intercept = false;
1530 match self.runner.exit_message().header.typ {
1531 HvMessageType::HvMessageTypeX64SevVmgexitIntercept => {
1532 self.handle_vmgexit(dev, entered_from_vtl)
1533 .map_err(|e| dev.fatal_error(e.into()))?;
1534 }
1535 _ => has_intercept = true,
1536 }
1537 &mut self.backing.exit_stats[entered_from_vtl].vmgexit
1538 }
1539
1540 SevExitCode::NMI
1541 | SevExitCode::PAUSE
1542 | SevExitCode::SMI
1543 | SevExitCode::VMGEXIT
1544 | SevExitCode::BUSLOCK
1545 | SevExitCode::IDLE_HLT => {
1546 &mut self.backing.exit_stats[entered_from_vtl].automatic_exit
1548 }
1549
1550 SevExitCode::VINTR => {
1551 unimplemented!("SevExitCode::VINTR");
1557 }
1558
1559 SevExitCode::INTR => {
1560 &mut self.backing.exit_stats[entered_from_vtl].intr
1563 }
1564
1565 SevExitCode::XSETBV => {
1566 self.handle_xsetbv(entered_from_vtl);
1567 &mut self.backing.exit_stats[entered_from_vtl].xsetbv
1568 }
1569
1570 SevExitCode::EXCP_DB => &mut self.backing.exit_stats[entered_from_vtl].excp_db,
1571
1572 SevExitCode::CR0_WRITE => {
1573 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr0);
1574 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1575 }
1576 SevExitCode::CR4_WRITE => {
1577 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr4);
1578 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1579 }
1580
1581 tr_exit_code @ (SevExitCode::GDTR_WRITE
1582 | SevExitCode::IDTR_WRITE
1583 | SevExitCode::LDTR_WRITE
1584 | SevExitCode::TR_WRITE) => {
1585 let reg = match tr_exit_code {
1586 SevExitCode::GDTR_WRITE => HvX64RegisterName::Gdtr,
1587 SevExitCode::IDTR_WRITE => HvX64RegisterName::Idtr,
1588 SevExitCode::LDTR_WRITE => HvX64RegisterName::Ldtr,
1589 SevExitCode::TR_WRITE => HvX64RegisterName::Tr,
1590 _ => unreachable!(),
1591 };
1592
1593 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, 0) {
1594 panic!("unexpected secure register");
1601 }
1602
1603 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1604 }
1605
1606 _ => {
1607 tracing::error!(
1608 CVM_CONFIDENTIAL,
1609 "SEV exit code {sev_error_code:x?} sev features {:x?} v_intr_control {:x?} event inject {:x?} \
1610 vmpl {:x?} cpl {:x?} exit_info1 {:x?} exit_info2 {:x?} exit_int_info {:x?} virtual_tom {:x?} \
1611 efer {:x?} cr4 {:x?} cr3 {:x?} cr0 {:x?} rflag {:x?} rip {:x?} next rip {:x?}",
1612 vmsa.sev_features(),
1613 vmsa.v_intr_cntrl(),
1614 vmsa.event_inject(),
1615 vmsa.vmpl(),
1616 vmsa.cpl(),
1617 vmsa.exit_info1(),
1618 vmsa.exit_info2(),
1619 vmsa.exit_int_info(),
1620 vmsa.virtual_tom(),
1621 vmsa.efer(),
1622 vmsa.cr4(),
1623 vmsa.cr3(),
1624 vmsa.cr0(),
1625 vmsa.rflags(),
1626 vmsa.rip(),
1627 vmsa.next_rip(),
1628 );
1629 panic!("Received unexpected SEV exit code {sev_error_code:x?}");
1630 }
1631 };
1632 stat.increment();
1633
1634 if cfg!(feature = "gdb") && sev_error_code == SevExitCode::EXCP_DB {
1636 return self.handle_debug_exception(dev, entered_from_vtl);
1637 }
1638
1639 if has_intercept {
1643 self.backing.general_stats[entered_from_vtl]
1644 .synth_int
1645 .increment();
1646 match self.runner.exit_message().header.typ {
1647 HvMessageType::HvMessageTypeSynicSintDeliverable => {
1648 self.handle_synic_deliverable_exit();
1649 }
1650 HvMessageType::HvMessageTypeX64Halt
1651 | HvMessageType::HvMessageTypeExceptionIntercept => {
1652 }
1656 message_type => {
1657 tracelimit::error_ratelimited!(
1658 CVM_ALLOWED,
1659 ?message_type,
1660 "unknown synthetic exit"
1661 );
1662 }
1663 }
1664 }
1665
1666 self.runner
1675 .vmsa_mut(entered_from_vtl)
1676 .set_guest_error_code(SevExitCode::INTR.0);
1677 Ok(())
1678 }
1679
1680 fn long_mode(&self, vtl: GuestVtl) -> bool {
1681 let vmsa = self.runner.vmsa(vtl);
1682 vmsa.cr0() & x86defs::X64_CR0_PE != 0 && vmsa.efer() & x86defs::X64_EFER_LMA != 0
1683 }
1684
1685 fn handle_cpuid(&mut self, vtl: GuestVtl) {
1686 let vmsa = self.runner.vmsa(vtl);
1687 let leaf = vmsa.rax() as u32;
1688 let subleaf = vmsa.rcx() as u32;
1689 let [mut eax, mut ebx, mut ecx, mut edx] = self.cvm_cpuid_result(vtl, leaf, subleaf);
1690
1691 match CpuidFunction(leaf) {
1699 CpuidFunction::ProcessorTopologyDefinition => {
1700 let apic_id = self.inner.vp_info.apic_id;
1701 let vps_per_socket = self.cvm_partition().vps_per_socket;
1702 eax = x86defs::cpuid::ProcessorTopologyDefinitionEax::from(eax)
1703 .with_extended_apic_id(apic_id)
1704 .into();
1705
1706 let topology_ebx = x86defs::cpuid::ProcessorTopologyDefinitionEbx::from(ebx);
1707 let mut new_unit_id = apic_id & (vps_per_socket - 1);
1708
1709 if topology_ebx.threads_per_compute_unit() > 0 {
1710 new_unit_id /= 2;
1711 }
1712
1713 ebx = topology_ebx.with_compute_unit_id(new_unit_id as u8).into();
1714
1715 let amd_nodes_per_socket = 1u32;
1718
1719 let node_id = apic_id
1720 >> (vps_per_socket
1721 .trailing_zeros()
1722 .saturating_sub(amd_nodes_per_socket.trailing_zeros()));
1723 let nodes_per_processor = amd_nodes_per_socket - 1;
1725
1726 ecx = x86defs::cpuid::ProcessorTopologyDefinitionEcx::from(ecx)
1727 .with_node_id(node_id as u8)
1728 .with_nodes_per_processor(nodes_per_processor as u8)
1729 .into();
1730 }
1731 CpuidFunction::ExtendedSevFeatures => {
1732 eax = 0;
1736 ebx = 0;
1737 ecx = 0;
1738 edx = 0;
1739 }
1740 _ => {}
1741 }
1742
1743 let mut vmsa = self.runner.vmsa_mut(vtl);
1744 vmsa.set_rax(eax.into());
1745 vmsa.set_rbx(ebx.into());
1746 vmsa.set_rcx(ecx.into());
1747 vmsa.set_rdx(edx.into());
1748 advance_to_next_instruction(&mut vmsa);
1749 }
1750}
1751
1752impl<T: CpuIo> X86EmulatorSupport for UhEmulationState<'_, '_, T, SnpBacked> {
1753 fn flush(&mut self) {
1754 }
1756
1757 fn vp_index(&self) -> VpIndex {
1758 self.vp.vp_index()
1759 }
1760
1761 fn vendor(&self) -> x86defs::cpuid::Vendor {
1762 self.vp.partition.caps.vendor
1763 }
1764
1765 fn gp(&mut self, reg: x86emu::Gp) -> u64 {
1766 let vmsa = self.vp.runner.vmsa(self.vtl);
1767 match reg {
1768 x86emu::Gp::RAX => vmsa.rax(),
1769 x86emu::Gp::RCX => vmsa.rcx(),
1770 x86emu::Gp::RDX => vmsa.rdx(),
1771 x86emu::Gp::RBX => vmsa.rbx(),
1772 x86emu::Gp::RSP => vmsa.rsp(),
1773 x86emu::Gp::RBP => vmsa.rbp(),
1774 x86emu::Gp::RSI => vmsa.rsi(),
1775 x86emu::Gp::RDI => vmsa.rdi(),
1776 x86emu::Gp::R8 => vmsa.r8(),
1777 x86emu::Gp::R9 => vmsa.r9(),
1778 x86emu::Gp::R10 => vmsa.r10(),
1779 x86emu::Gp::R11 => vmsa.r11(),
1780 x86emu::Gp::R12 => vmsa.r12(),
1781 x86emu::Gp::R13 => vmsa.r13(),
1782 x86emu::Gp::R14 => vmsa.r14(),
1783 x86emu::Gp::R15 => vmsa.r15(),
1784 }
1785 }
1786
1787 fn set_gp(&mut self, reg: x86emu::Gp, v: u64) {
1788 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1789 match reg {
1790 x86emu::Gp::RAX => vmsa.set_rax(v),
1791 x86emu::Gp::RCX => vmsa.set_rcx(v),
1792 x86emu::Gp::RDX => vmsa.set_rdx(v),
1793 x86emu::Gp::RBX => vmsa.set_rbx(v),
1794 x86emu::Gp::RSP => vmsa.set_rsp(v),
1795 x86emu::Gp::RBP => vmsa.set_rbp(v),
1796 x86emu::Gp::RSI => vmsa.set_rsi(v),
1797 x86emu::Gp::RDI => vmsa.set_rdi(v),
1798 x86emu::Gp::R8 => vmsa.set_r8(v),
1799 x86emu::Gp::R9 => vmsa.set_r9(v),
1800 x86emu::Gp::R10 => vmsa.set_r10(v),
1801 x86emu::Gp::R11 => vmsa.set_r11(v),
1802 x86emu::Gp::R12 => vmsa.set_r12(v),
1803 x86emu::Gp::R13 => vmsa.set_r13(v),
1804 x86emu::Gp::R14 => vmsa.set_r14(v),
1805 x86emu::Gp::R15 => vmsa.set_r15(v),
1806 };
1807 }
1808
1809 fn xmm(&mut self, index: usize) -> u128 {
1810 self.vp.runner.vmsa_mut(self.vtl).xmm_registers(index)
1811 }
1812
1813 fn set_xmm(&mut self, index: usize, v: u128) {
1814 self.vp
1815 .runner
1816 .vmsa_mut(self.vtl)
1817 .set_xmm_registers(index, v);
1818 }
1819
1820 fn rip(&mut self) -> u64 {
1821 let vmsa = self.vp.runner.vmsa(self.vtl);
1822 vmsa.rip()
1823 }
1824
1825 fn set_rip(&mut self, v: u64) {
1826 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1827 vmsa.set_rip(v);
1828 }
1829
1830 fn segment(&mut self, index: x86emu::Segment) -> x86defs::SegmentRegister {
1831 let vmsa = self.vp.runner.vmsa(self.vtl);
1832 match index {
1833 x86emu::Segment::ES => virt_seg_from_snp(vmsa.es()),
1834 x86emu::Segment::CS => virt_seg_from_snp(vmsa.cs()),
1835 x86emu::Segment::SS => virt_seg_from_snp(vmsa.ss()),
1836 x86emu::Segment::DS => virt_seg_from_snp(vmsa.ds()),
1837 x86emu::Segment::FS => virt_seg_from_snp(vmsa.fs()),
1838 x86emu::Segment::GS => virt_seg_from_snp(vmsa.gs()),
1839 }
1840 .into()
1841 }
1842
1843 fn efer(&mut self) -> u64 {
1844 let vmsa = self.vp.runner.vmsa(self.vtl);
1845 vmsa.efer()
1846 }
1847
1848 fn cr0(&mut self) -> u64 {
1849 let vmsa = self.vp.runner.vmsa(self.vtl);
1850 vmsa.cr0()
1851 }
1852
1853 fn rflags(&mut self) -> RFlags {
1854 let vmsa = self.vp.runner.vmsa(self.vtl);
1855 vmsa.rflags().into()
1856 }
1857
1858 fn set_rflags(&mut self, v: RFlags) {
1859 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1860 vmsa.set_rflags(v.into());
1861 }
1862
1863 fn instruction_bytes(&self) -> &[u8] {
1864 &[]
1865 }
1866
1867 fn physical_address(&self) -> Option<u64> {
1868 Some(self.vp.runner.vmsa(self.vtl).exit_info2())
1869 }
1870
1871 fn initial_gva_translation(
1872 &mut self,
1873 ) -> Option<virt_support_x86emu::emulate::InitialTranslation> {
1874 None
1875 }
1876
1877 fn interruption_pending(&self) -> bool {
1878 self.interruption_pending
1879 }
1880
1881 fn check_vtl_access(
1882 &mut self,
1883 _gpa: u64,
1884 _mode: virt_support_x86emu::emulate::TranslateMode,
1885 ) -> Result<(), virt_support_x86emu::emulate::EmuCheckVtlAccessError> {
1886 Ok(())
1888 }
1889
1890 fn translate_gva(
1891 &mut self,
1892 gva: u64,
1893 mode: virt_support_x86emu::emulate::TranslateMode,
1894 ) -> Result<
1895 virt_support_x86emu::emulate::EmuTranslateResult,
1896 virt_support_x86emu::emulate::EmuTranslateError,
1897 > {
1898 emulate_translate_gva(self, gva, mode)
1899 }
1900
1901 fn inject_pending_event(&mut self, event_info: hvdef::HvX64PendingEvent) {
1902 assert!(event_info.reg_0.event_pending());
1903 assert_eq!(
1904 event_info.reg_0.event_type(),
1905 hvdef::HV_X64_PENDING_EVENT_EXCEPTION
1906 );
1907
1908 let exception = HvX64PendingExceptionEvent::from(event_info.reg_0.into_bits());
1909 assert!(!self.interruption_pending);
1910
1911 SnpBacked::set_pending_exception(self.vp, self.vtl, exception);
1914 }
1915
1916 fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
1917 let vtom = self.vp.partition.caps.vtom.unwrap();
1920 debug_assert!(vtom == 0 || vtom.is_power_of_two());
1921 self.vp.partition.is_gpa_mapped(gpa & !vtom, write)
1922 }
1923
1924 fn lapic_base_address(&self) -> Option<u64> {
1925 self.vp.backing.cvm.lapics[self.vtl].lapic.base_address()
1926 }
1927
1928 fn lapic_read(&mut self, address: u64, data: &mut [u8]) {
1929 let vtl = self.vtl;
1930 self.vp.backing.cvm.lapics[vtl]
1931 .lapic
1932 .access(&mut SnpApicClient {
1933 partition: self.vp.partition,
1934 vmsa: self.vp.runner.vmsa_mut(vtl),
1935 dev: self.devices,
1936 vmtime: &self.vp.vmtime,
1937 vtl,
1938 })
1939 .mmio_read(address, data);
1940 }
1941
1942 fn lapic_write(&mut self, address: u64, data: &[u8]) {
1943 let vtl = self.vtl;
1944 self.vp.backing.cvm.lapics[vtl]
1945 .lapic
1946 .access(&mut SnpApicClient {
1947 partition: self.vp.partition,
1948 vmsa: self.vp.runner.vmsa_mut(vtl),
1949 dev: self.devices,
1950 vmtime: &self.vp.vmtime,
1951 vtl,
1952 })
1953 .mmio_write(address, data);
1954 }
1955
1956 fn monitor_support(&self) -> Option<&dyn EmulatorMonitorSupport> {
1957 Some(self)
1958 }
1959}
1960
1961impl<T> hv1_hypercall::X64RegisterState for UhHypercallHandler<'_, '_, T, SnpBacked> {
1962 fn rip(&mut self) -> u64 {
1963 self.vp.runner.vmsa(self.intercepted_vtl).rip()
1964 }
1965
1966 fn set_rip(&mut self, rip: u64) {
1967 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_rip(rip);
1968 }
1969
1970 fn gp(&mut self, n: hv1_hypercall::X64HypercallRegister) -> u64 {
1971 let vmsa = self.vp.runner.vmsa(self.intercepted_vtl);
1972 match n {
1973 hv1_hypercall::X64HypercallRegister::Rax => vmsa.rax(),
1974 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.rcx(),
1975 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.rdx(),
1976 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.rbx(),
1977 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.rsi(),
1978 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.rdi(),
1979 hv1_hypercall::X64HypercallRegister::R8 => vmsa.r8(),
1980 }
1981 }
1982
1983 fn set_gp(&mut self, n: hv1_hypercall::X64HypercallRegister, value: u64) {
1984 let mut vmsa = self.vp.runner.vmsa_mut(self.intercepted_vtl);
1985 match n {
1986 hv1_hypercall::X64HypercallRegister::Rax => vmsa.set_rax(value),
1987 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.set_rcx(value),
1988 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.set_rdx(value),
1989 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.set_rbx(value),
1990 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.set_rsi(value),
1991 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.set_rdi(value),
1992 hv1_hypercall::X64HypercallRegister::R8 => vmsa.set_r8(value),
1993 }
1994 }
1995
1996 fn xmm(&mut self, n: usize) -> u128 {
1997 self.vp.runner.vmsa(self.intercepted_vtl).xmm_registers(n)
1998 }
1999
2000 fn set_xmm(&mut self, n: usize, value: u128) {
2001 self.vp
2002 .runner
2003 .vmsa_mut(self.intercepted_vtl)
2004 .set_xmm_registers(n, value);
2005 }
2006}
2007
2008impl AccessVpState for UhVpStateAccess<'_, '_, SnpBacked> {
2009 type Error = vp_state::Error;
2010
2011 fn caps(&self) -> &virt::x86::X86PartitionCapabilities {
2012 &self.vp.partition.caps
2013 }
2014
2015 fn commit(&mut self) -> Result<(), Self::Error> {
2016 Ok(())
2017 }
2018
2019 fn registers(&mut self) -> Result<vp::Registers, Self::Error> {
2020 let vmsa = self.vp.runner.vmsa(self.vtl);
2021
2022 Ok(vp::Registers {
2023 rax: vmsa.rax(),
2024 rcx: vmsa.rcx(),
2025 rdx: vmsa.rdx(),
2026 rbx: vmsa.rbx(),
2027 rsp: vmsa.rsp(),
2028 rbp: vmsa.rbp(),
2029 rsi: vmsa.rsi(),
2030 rdi: vmsa.rdi(),
2031 r8: vmsa.r8(),
2032 r9: vmsa.r9(),
2033 r10: vmsa.r10(),
2034 r11: vmsa.r11(),
2035 r12: vmsa.r12(),
2036 r13: vmsa.r13(),
2037 r14: vmsa.r14(),
2038 r15: vmsa.r15(),
2039 rip: vmsa.rip(),
2040 rflags: vmsa.rflags(),
2041 cs: virt_seg_from_snp(vmsa.cs()),
2042 ds: virt_seg_from_snp(vmsa.ds()),
2043 es: virt_seg_from_snp(vmsa.es()),
2044 fs: virt_seg_from_snp(vmsa.fs()),
2045 gs: virt_seg_from_snp(vmsa.gs()),
2046 ss: virt_seg_from_snp(vmsa.ss()),
2047 tr: virt_seg_from_snp(vmsa.tr()),
2048 ldtr: virt_seg_from_snp(vmsa.ldtr()),
2049 gdtr: virt_table_from_snp(vmsa.gdtr()),
2050 idtr: virt_table_from_snp(vmsa.idtr()),
2051 cr0: vmsa.cr0(),
2052 cr2: vmsa.cr2(),
2053 cr3: vmsa.cr3(),
2054 cr4: vmsa.cr4(),
2055 cr8: vmsa.v_intr_cntrl().tpr().into(),
2056 efer: vmsa.efer(),
2057 })
2058 }
2059
2060 fn set_registers(&mut self, value: &vp::Registers) -> Result<(), Self::Error> {
2061 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2062
2063 let vp::Registers {
2064 rax,
2065 rcx,
2066 rdx,
2067 rbx,
2068 rsp,
2069 rbp,
2070 rsi,
2071 rdi,
2072 r8,
2073 r9,
2074 r10,
2075 r11,
2076 r12,
2077 r13,
2078 r14,
2079 r15,
2080 rip,
2081 rflags,
2082 cs,
2083 ds,
2084 es,
2085 fs,
2086 gs,
2087 ss,
2088 tr,
2089 ldtr,
2090 gdtr,
2091 idtr,
2092 cr0,
2093 cr2,
2094 cr3,
2095 cr4,
2096 cr8,
2097 efer,
2098 } = *value;
2099 vmsa.set_rax(rax);
2100 vmsa.set_rcx(rcx);
2101 vmsa.set_rdx(rdx);
2102 vmsa.set_rbx(rbx);
2103 vmsa.set_rsp(rsp);
2104 vmsa.set_rbp(rbp);
2105 vmsa.set_rsi(rsi);
2106 vmsa.set_rdi(rdi);
2107 vmsa.set_r8(r8);
2108 vmsa.set_r9(r9);
2109 vmsa.set_r10(r10);
2110 vmsa.set_r11(r11);
2111 vmsa.set_r12(r12);
2112 vmsa.set_r13(r13);
2113 vmsa.set_r14(r14);
2114 vmsa.set_r15(r15);
2115 vmsa.set_rip(rip);
2116 vmsa.set_rflags(rflags);
2117 vmsa.set_cs(virt_seg_to_snp(cs));
2118 vmsa.set_ds(virt_seg_to_snp(ds));
2119 vmsa.set_es(virt_seg_to_snp(es));
2120 vmsa.set_fs(virt_seg_to_snp(fs));
2121 vmsa.set_gs(virt_seg_to_snp(gs));
2122 vmsa.set_ss(virt_seg_to_snp(ss));
2123 vmsa.set_tr(virt_seg_to_snp(tr));
2124 vmsa.set_ldtr(virt_seg_to_snp(ldtr));
2125 vmsa.set_gdtr(virt_table_to_snp(gdtr));
2126 vmsa.set_idtr(virt_table_to_snp(idtr));
2127 vmsa.set_cr0(cr0);
2128 vmsa.set_cr2(cr2);
2129 vmsa.set_cr3(cr3);
2130 vmsa.set_cr4(cr4);
2131 vmsa.v_intr_cntrl_mut().set_tpr(cr8 as u8);
2132 vmsa.set_efer(SnpBacked::calculate_efer(efer, cr0));
2133 Ok(())
2134 }
2135
2136 fn activity(&mut self) -> Result<vp::Activity, Self::Error> {
2137 let lapic = &self.vp.backing.cvm.lapics[self.vtl];
2138
2139 Ok(vp::Activity {
2140 mp_state: lapic.activity,
2141 nmi_pending: lapic.nmi_pending,
2142 nmi_masked: false, interrupt_shadow: false, pending_event: None, pending_interruption: None, })
2147 }
2148
2149 fn set_activity(&mut self, value: &vp::Activity) -> Result<(), Self::Error> {
2150 let &vp::Activity {
2151 mp_state,
2152 nmi_pending,
2153 nmi_masked: _, interrupt_shadow: _, pending_event: _, pending_interruption: _, } = value;
2158 let lapic = &mut self.vp.backing.cvm.lapics[self.vtl];
2159 lapic.activity = mp_state;
2160 lapic.nmi_pending = nmi_pending;
2161
2162 Ok(())
2163 }
2164
2165 fn xsave(&mut self) -> Result<vp::Xsave, Self::Error> {
2166 Err(vp_state::Error::Unimplemented("xsave"))
2167 }
2168
2169 fn set_xsave(&mut self, _value: &vp::Xsave) -> Result<(), Self::Error> {
2170 Err(vp_state::Error::Unimplemented("xsave"))
2171 }
2172
2173 fn apic(&mut self) -> Result<vp::Apic, Self::Error> {
2174 Ok(self.vp.backing.cvm.lapics[self.vtl].lapic.save())
2175 }
2176
2177 fn set_apic(&mut self, value: &vp::Apic) -> Result<(), Self::Error> {
2178 self.vp.backing.cvm.lapics[self.vtl]
2179 .lapic
2180 .restore(value)
2181 .map_err(vp_state::Error::InvalidApicBase)?;
2182 Ok(())
2183 }
2184
2185 fn xcr(&mut self) -> Result<vp::Xcr0, Self::Error> {
2186 let vmsa = self.vp.runner.vmsa(self.vtl);
2187 Ok(vp::Xcr0 { value: vmsa.xcr0() })
2188 }
2189
2190 fn set_xcr(&mut self, value: &vp::Xcr0) -> Result<(), Self::Error> {
2191 let vp::Xcr0 { value } = *value;
2192 self.vp.runner.vmsa_mut(self.vtl).set_xcr0(value);
2193 Ok(())
2194 }
2195
2196 fn xss(&mut self) -> Result<vp::Xss, Self::Error> {
2197 let vmsa = self.vp.runner.vmsa(self.vtl);
2198 Ok(vp::Xss { value: vmsa.xss() })
2199 }
2200
2201 fn set_xss(&mut self, value: &vp::Xss) -> Result<(), Self::Error> {
2202 let vp::Xss { value } = *value;
2203 self.vp.runner.vmsa_mut(self.vtl).set_xss(value);
2204 Ok(())
2205 }
2206
2207 fn mtrrs(&mut self) -> Result<vp::Mtrrs, Self::Error> {
2208 Ok(vp::Mtrrs {
2209 msr_mtrr_def_type: 0,
2210 fixed: [0; 11],
2211 variable: [0; 16],
2212 })
2213 }
2214
2215 fn set_mtrrs(&mut self, _value: &vp::Mtrrs) -> Result<(), Self::Error> {
2216 Ok(())
2217 }
2218
2219 fn pat(&mut self) -> Result<vp::Pat, Self::Error> {
2220 let vmsa = self.vp.runner.vmsa(self.vtl);
2221 Ok(vp::Pat { value: vmsa.pat() })
2222 }
2223
2224 fn set_pat(&mut self, value: &vp::Pat) -> Result<(), Self::Error> {
2225 let vp::Pat { value } = *value;
2226 self.vp.runner.vmsa_mut(self.vtl).set_pat(value);
2227 Ok(())
2228 }
2229
2230 fn virtual_msrs(&mut self) -> Result<vp::VirtualMsrs, Self::Error> {
2231 let vmsa = self.vp.runner.vmsa(self.vtl);
2232
2233 Ok(vp::VirtualMsrs {
2234 kernel_gs_base: vmsa.kernel_gs_base(),
2235 sysenter_cs: vmsa.sysenter_cs(),
2236 sysenter_eip: vmsa.sysenter_eip(),
2237 sysenter_esp: vmsa.sysenter_esp(),
2238 star: vmsa.star(),
2239 lstar: vmsa.lstar(),
2240 cstar: vmsa.cstar(),
2241 sfmask: vmsa.sfmask(),
2242 })
2243 }
2244
2245 fn set_virtual_msrs(&mut self, value: &vp::VirtualMsrs) -> Result<(), Self::Error> {
2246 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2247 let vp::VirtualMsrs {
2248 kernel_gs_base,
2249 sysenter_cs,
2250 sysenter_eip,
2251 sysenter_esp,
2252 star,
2253 lstar,
2254 cstar,
2255 sfmask,
2256 } = *value;
2257 vmsa.set_kernel_gs_base(kernel_gs_base);
2258 vmsa.set_sysenter_cs(sysenter_cs);
2259 vmsa.set_sysenter_eip(sysenter_eip);
2260 vmsa.set_sysenter_esp(sysenter_esp);
2261 vmsa.set_star(star);
2262 vmsa.set_lstar(lstar);
2263 vmsa.set_cstar(cstar);
2264 vmsa.set_sfmask(sfmask);
2265
2266 Ok(())
2267 }
2268
2269 fn debug_regs(&mut self) -> Result<vp::DebugRegisters, Self::Error> {
2270 let vmsa = self.vp.runner.vmsa(self.vtl);
2271 Ok(vp::DebugRegisters {
2272 dr0: vmsa.dr0(),
2273 dr1: vmsa.dr1(),
2274 dr2: vmsa.dr2(),
2275 dr3: vmsa.dr3(),
2276 dr6: vmsa.dr6(),
2277 dr7: vmsa.dr7(),
2278 })
2279 }
2280
2281 fn set_debug_regs(&mut self, value: &vp::DebugRegisters) -> Result<(), Self::Error> {
2282 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2283 let vp::DebugRegisters {
2284 dr0,
2285 dr1,
2286 dr2,
2287 dr3,
2288 dr6,
2289 dr7,
2290 } = *value;
2291 vmsa.set_dr0(dr0);
2292 vmsa.set_dr1(dr1);
2293 vmsa.set_dr2(dr2);
2294 vmsa.set_dr3(dr3);
2295 vmsa.set_dr6(dr6);
2296 vmsa.set_dr7(dr7);
2297 Ok(())
2298 }
2299
2300 fn tsc(&mut self) -> Result<vp::Tsc, Self::Error> {
2301 Err(vp_state::Error::Unimplemented("tsc"))
2302 }
2303
2304 fn set_tsc(&mut self, _value: &vp::Tsc) -> Result<(), Self::Error> {
2305 Err(vp_state::Error::Unimplemented("tsc"))
2306 }
2307
2308 fn tsc_aux(&mut self) -> Result<vp::TscAux, Self::Error> {
2309 let vmsa = self.vp.runner.vmsa(self.vtl);
2310 Ok(vp::TscAux {
2311 value: vmsa.tsc_aux() as u64,
2312 })
2313 }
2314
2315 fn set_tsc_aux(&mut self, value: &vp::TscAux) -> Result<(), Self::Error> {
2316 let vp::TscAux { value } = *value;
2317 self.vp.runner.vmsa_mut(self.vtl).set_tsc_aux(value as u32);
2318 Ok(())
2319 }
2320
2321 fn cet(&mut self) -> Result<vp::Cet, Self::Error> {
2322 let vmsa = self.vp.runner.vmsa(self.vtl);
2323 Ok(vp::Cet { scet: vmsa.s_cet() })
2324 }
2325
2326 fn set_cet(&mut self, value: &vp::Cet) -> Result<(), Self::Error> {
2327 let vp::Cet { scet } = *value;
2328 self.vp.runner.vmsa_mut(self.vtl).set_s_cet(scet);
2329 Ok(())
2330 }
2331
2332 fn cet_ss(&mut self) -> Result<vp::CetSs, Self::Error> {
2333 let vmsa = self.vp.runner.vmsa(self.vtl);
2334 Ok(vp::CetSs {
2335 ssp: vmsa.ssp(),
2336 interrupt_ssp_table_addr: vmsa.interrupt_ssp_table_addr(),
2337 })
2338 }
2339
2340 fn set_cet_ss(&mut self, value: &vp::CetSs) -> Result<(), Self::Error> {
2341 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2342 let vp::CetSs {
2343 ssp,
2344 interrupt_ssp_table_addr,
2345 } = *value;
2346 vmsa.set_ssp(ssp);
2347 vmsa.set_interrupt_ssp_table_addr(interrupt_ssp_table_addr);
2348 Ok(())
2349 }
2350
2351 fn synic_msrs(&mut self) -> Result<vp::SyntheticMsrs, Self::Error> {
2352 Err(vp_state::Error::Unimplemented("synic_msrs"))
2353 }
2354
2355 fn set_synic_msrs(&mut self, _value: &vp::SyntheticMsrs) -> Result<(), Self::Error> {
2356 Err(vp_state::Error::Unimplemented("synic_msrs"))
2357 }
2358
2359 fn synic_message_page(&mut self) -> Result<vp::SynicMessagePage, Self::Error> {
2360 Err(vp_state::Error::Unimplemented("synic_message_page"))
2361 }
2362
2363 fn set_synic_message_page(&mut self, _value: &vp::SynicMessagePage) -> Result<(), Self::Error> {
2364 Err(vp_state::Error::Unimplemented("synic_message_page"))
2365 }
2366
2367 fn synic_event_flags_page(&mut self) -> Result<vp::SynicEventFlagsPage, Self::Error> {
2368 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2369 }
2370
2371 fn set_synic_event_flags_page(
2372 &mut self,
2373 _value: &vp::SynicEventFlagsPage,
2374 ) -> Result<(), Self::Error> {
2375 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2376 }
2377
2378 fn synic_message_queues(&mut self) -> Result<vp::SynicMessageQueues, Self::Error> {
2379 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2380 }
2381
2382 fn set_synic_message_queues(
2383 &mut self,
2384 _value: &vp::SynicMessageQueues,
2385 ) -> Result<(), Self::Error> {
2386 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2387 }
2388
2389 fn synic_timers(&mut self) -> Result<vp::SynicTimers, Self::Error> {
2390 Err(vp_state::Error::Unimplemented("synic_timers"))
2391 }
2392
2393 fn set_synic_timers(&mut self, _value: &vp::SynicTimers) -> Result<(), Self::Error> {
2394 Err(vp_state::Error::Unimplemented("synic_timers"))
2395 }
2396}
2397
2398fn advance_to_next_instruction(vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>) {
2400 vmsa.set_rip(vmsa.next_rip());
2401 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2402}
2403
2404impl UhProcessor<'_, SnpBacked> {
2405 fn read_msr_snp(
2406 &mut self,
2407 _dev: &impl CpuIo,
2408 msr: u32,
2409 vtl: GuestVtl,
2410 ) -> Result<u64, MsrError> {
2411 let vmsa = self.runner.vmsa(vtl);
2412 let value = match msr {
2413 x86defs::X64_MSR_FS_BASE => vmsa.fs().base,
2414 x86defs::X64_MSR_GS_BASE => vmsa.gs().base,
2415 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.kernel_gs_base(),
2416 x86defs::X86X_MSR_TSC_AUX => {
2417 if self.shared.tsc_aux_virtualized {
2418 vmsa.tsc_aux() as u64
2419 } else {
2420 return Err(MsrError::InvalidAccess);
2421 }
2422 }
2423 x86defs::X86X_MSR_SPEC_CTRL => vmsa.spec_ctrl(),
2424 x86defs::X86X_MSR_U_CET => vmsa.u_cet(),
2425 x86defs::X86X_MSR_S_CET => vmsa.s_cet(),
2426 x86defs::X86X_MSR_PL0_SSP => vmsa.pl0_ssp(),
2427 x86defs::X86X_MSR_PL1_SSP => vmsa.pl1_ssp(),
2428 x86defs::X86X_MSR_PL2_SSP => vmsa.pl2_ssp(),
2429 x86defs::X86X_MSR_PL3_SSP => vmsa.pl3_ssp(),
2430 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.interrupt_ssp_table_addr(),
2431 x86defs::X86X_MSR_CR_PAT => vmsa.pat(),
2432 x86defs::X86X_MSR_EFER => vmsa.efer(),
2433 x86defs::X86X_MSR_STAR => vmsa.star(),
2434 x86defs::X86X_MSR_LSTAR => vmsa.lstar(),
2435 x86defs::X86X_MSR_CSTAR => vmsa.cstar(),
2436 x86defs::X86X_MSR_SFMASK => vmsa.sfmask(),
2437 x86defs::X86X_MSR_SYSENTER_CS => vmsa.sysenter_cs(),
2438 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.sysenter_esp(),
2439 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.sysenter_eip(),
2440 x86defs::X86X_MSR_XSS => vmsa.xss(),
2441 x86defs::X86X_AMD_MSR_VM_CR => 0,
2442 x86defs::X86X_MSR_TSC => safe_intrinsics::rdtsc(),
2443 x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => 0xffff_ffff,
2444 x86defs::X86X_MSR_MTRR_CAP => {
2445 0x400
2448 }
2449 x86defs::X86X_MSR_MTRR_DEF_TYPE => {
2450 0
2454 }
2455 x86defs::X86X_AMD_MSR_SYSCFG
2456 | x86defs::X86X_MSR_MCG_CAP
2457 | x86defs::X86X_MSR_MCG_STATUS => 0,
2458
2459 hvdef::HV_X64_MSR_GUEST_IDLE => {
2460 self.backing.cvm.lapics[vtl].activity = MpState::Idle;
2461 let mut vmsa = self.runner.vmsa_mut(vtl);
2462 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2463 0
2464 }
2465 _ => return Err(MsrError::Unknown),
2466 };
2467 Ok(value)
2468 }
2469
2470 fn write_msr_snp(
2471 &mut self,
2472 _dev: &impl CpuIo,
2473 msr: u32,
2474 value: u64,
2475 vtl: GuestVtl,
2476 ) -> Result<(), MsrError> {
2477 let mut vmsa = self.runner.vmsa_mut(vtl);
2480 match msr {
2481 x86defs::X64_MSR_FS_BASE => {
2482 let fs = vmsa.fs();
2483 vmsa.set_fs(SevSelector {
2484 attrib: fs.attrib,
2485 selector: fs.selector,
2486 limit: fs.limit,
2487 base: value,
2488 });
2489 }
2490 x86defs::X64_MSR_GS_BASE => {
2491 let gs = vmsa.gs();
2492 vmsa.set_gs(SevSelector {
2493 attrib: gs.attrib,
2494 selector: gs.selector,
2495 limit: gs.limit,
2496 base: value,
2497 });
2498 }
2499 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.set_kernel_gs_base(value),
2500 x86defs::X86X_MSR_TSC_AUX => {
2501 if self.shared.tsc_aux_virtualized {
2502 vmsa.set_tsc_aux(value as u32);
2503 } else {
2504 return Err(MsrError::InvalidAccess);
2505 }
2506 }
2507 x86defs::X86X_MSR_SPEC_CTRL => vmsa.set_spec_ctrl(value),
2508 x86defs::X86X_MSR_U_CET => vmsa.set_u_cet(value),
2509 x86defs::X86X_MSR_S_CET => vmsa.set_s_cet(value),
2510 x86defs::X86X_MSR_PL0_SSP => vmsa.set_pl0_ssp(value),
2511 x86defs::X86X_MSR_PL1_SSP => vmsa.set_pl1_ssp(value),
2512 x86defs::X86X_MSR_PL2_SSP => vmsa.set_pl2_ssp(value),
2513 x86defs::X86X_MSR_PL3_SSP => vmsa.set_pl3_ssp(value),
2514 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.set_interrupt_ssp_table_addr(value),
2515
2516 x86defs::X86X_MSR_CR_PAT => vmsa.set_pat(value),
2517 x86defs::X86X_MSR_EFER => vmsa.set_efer(SnpBacked::calculate_efer(value, vmsa.cr0())),
2518
2519 x86defs::X86X_MSR_STAR => vmsa.set_star(value),
2520 x86defs::X86X_MSR_LSTAR => vmsa.set_lstar(value),
2521 x86defs::X86X_MSR_CSTAR => vmsa.set_cstar(value),
2522 x86defs::X86X_MSR_SFMASK => vmsa.set_sfmask(value),
2523 x86defs::X86X_MSR_SYSENTER_CS => vmsa.set_sysenter_cs(value),
2524 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.set_sysenter_esp(value),
2525 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.set_sysenter_eip(value),
2526 x86defs::X86X_MSR_XSS => vmsa.set_xss(value),
2527
2528 x86defs::X86X_MSR_TSC => {} x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => {}
2530 x86defs::X86X_MSR_MTRR_DEF_TYPE => {}
2531
2532 x86defs::X86X_AMD_MSR_VM_CR
2533 | x86defs::X86X_MSR_MTRR_CAP
2534 | x86defs::X86X_AMD_MSR_SYSCFG
2535 | x86defs::X86X_MSR_MCG_CAP => return Err(MsrError::InvalidAccess),
2536
2537 x86defs::X86X_MSR_MCG_STATUS => {
2538 if x86defs::X86xMcgStatusRegister::from(value).reserved0() != 0 {
2540 return Err(MsrError::InvalidAccess);
2541 }
2542 }
2543 _ => {
2544 tracing::debug!(msr, value, "unknown cvm msr write");
2545 }
2546 }
2547 Ok(())
2548 }
2549}
2550
2551impl<T: CpuIo> hv1_hypercall::VtlSwitchOps for UhHypercallHandler<'_, '_, T, SnpBacked> {
2552 fn advance_ip(&mut self) {
2553 let is_64bit = self.vp.long_mode(self.intercepted_vtl);
2554 let mut io = hv1_hypercall::X64RegisterIo::new(self, is_64bit);
2555 io.advance_ip();
2556 }
2557
2558 fn inject_invalid_opcode_fault(&mut self) {
2559 self.vp
2560 .runner
2561 .vmsa_mut(self.intercepted_vtl)
2562 .set_event_inject(
2563 SevEventInjectInfo::new()
2564 .with_valid(true)
2565 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
2566 .with_vector(x86defs::Exception::INVALID_OPCODE.0),
2567 );
2568 }
2569}
2570
2571impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressList for UhHypercallHandler<'_, '_, T, SnpBacked> {
2572 fn flush_virtual_address_list(
2573 &mut self,
2574 processor_set: ProcessorSet<'_>,
2575 flags: HvFlushFlags,
2576 gva_ranges: &[HvGvaRange],
2577 ) -> HvRepResult {
2578 hv1_hypercall::FlushVirtualAddressListEx::flush_virtual_address_list_ex(
2579 self,
2580 processor_set,
2581 flags,
2582 gva_ranges,
2583 )
2584 }
2585}
2586
2587impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
2588 for UhHypercallHandler<'_, '_, T, SnpBacked>
2589{
2590 fn flush_virtual_address_list_ex(
2591 &mut self,
2592 processor_set: ProcessorSet<'_>,
2593 flags: HvFlushFlags,
2594 gva_ranges: &[HvGvaRange],
2595 ) -> HvRepResult {
2596 self.hcvm_validate_flush_inputs(processor_set, flags, true)
2597 .map_err(|e| (e, 0))?;
2598
2599 if gva_ranges.len() > 16 || gva_ranges.iter().any(|range| if flags.use_extended_range_format() { range.as_extended().additional_pages() } else { range.as_simple().additional_pages() } > 16) {
2602 self.do_flush_virtual_address_space(processor_set, flags);
2603 } else {
2604 self.do_flush_virtual_address_list(flags, gva_ranges);
2605 }
2606
2607 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2609 Ok(())
2610 }
2611}
2612
2613impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpace
2614 for UhHypercallHandler<'_, '_, T, SnpBacked>
2615{
2616 fn flush_virtual_address_space(
2617 &mut self,
2618 processor_set: ProcessorSet<'_>,
2619 flags: HvFlushFlags,
2620 ) -> hvdef::HvResult<()> {
2621 hv1_hypercall::FlushVirtualAddressSpaceEx::flush_virtual_address_space_ex(
2622 self,
2623 processor_set,
2624 flags,
2625 )
2626 }
2627}
2628
2629impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpaceEx
2630 for UhHypercallHandler<'_, '_, T, SnpBacked>
2631{
2632 fn flush_virtual_address_space_ex(
2633 &mut self,
2634 processor_set: ProcessorSet<'_>,
2635 flags: HvFlushFlags,
2636 ) -> hvdef::HvResult<()> {
2637 self.hcvm_validate_flush_inputs(processor_set, flags, false)?;
2638
2639 self.do_flush_virtual_address_space(processor_set, flags);
2640
2641 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2643 Ok(())
2644 }
2645}
2646
2647impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
2648 fn do_flush_virtual_address_list(&mut self, flags: HvFlushFlags, gva_ranges: &[HvGvaRange]) {
2649 for range in gva_ranges {
2650 let mut rax = SevInvlpgbRax::new()
2651 .with_asid_valid(true)
2652 .with_va_valid(true)
2653 .with_global(!flags.non_global_mappings_only());
2654 let mut ecx = SevInvlpgbEcx::new();
2655 let mut count;
2656 let mut gpn;
2657
2658 if flags.use_extended_range_format() && range.as_extended().large_page() {
2659 ecx.set_large_page(true);
2660 if range.as_extended_large_page().page_size() {
2661 let range = range.as_extended_large_page();
2662 count = range.additional_pages();
2663 gpn = range.gva_large_page_number();
2664 } else {
2665 let range = range.as_extended();
2666 count = range.additional_pages();
2667 gpn = range.gva_page_number();
2668 }
2669 } else {
2670 let range = range.as_simple();
2671 count = range.additional_pages();
2672 gpn = range.gva_page_number();
2673 }
2674 count += 1; while count > 0 {
2677 rax.set_virtual_page_number(gpn);
2678 ecx.set_additional_count(std::cmp::min(
2679 count - 1,
2680 self.vp.shared.invlpgb_count_max.into(),
2681 ));
2682
2683 let edx = SevInvlpgbEdx::new();
2684 self.vp
2685 .partition
2686 .hcl
2687 .invlpgb(rax.into(), edx.into(), ecx.into());
2688
2689 count -= ecx.additional_count() + 1;
2690 gpn += ecx.additional_count() + 1;
2691 }
2692 }
2693
2694 self.vp.partition.hcl.tlbsync();
2695 }
2696
2697 fn do_flush_virtual_address_space(
2698 &mut self,
2699 processor_set: ProcessorSet<'_>,
2700 flags: HvFlushFlags,
2701 ) {
2702 let only_self = [self.vp.vp_index().index()].into_iter().eq(processor_set);
2703 if only_self && flags.non_global_mappings_only() {
2704 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_pcpu_id(0);
2705 } else {
2706 self.vp.partition.hcl.invlpgb(
2707 SevInvlpgbRax::new()
2708 .with_asid_valid(true)
2709 .with_global(!flags.non_global_mappings_only())
2710 .into(),
2711 SevInvlpgbEdx::new().into(),
2712 SevInvlpgbEcx::new().into(),
2713 );
2714 self.vp.partition.hcl.tlbsync();
2715 }
2716 }
2717}
2718
2719struct SnpTlbLockFlushAccess<'a> {
2720 vp_index: Option<VpIndex>,
2721 partition: &'a UhPartitionInner,
2722 shared: &'a SnpBackedShared,
2723}
2724
2725impl TlbFlushLockAccess for SnpTlbLockFlushAccess<'_> {
2726 fn flush(&mut self, vtl: GuestVtl) {
2727 self.partition.hcl.invlpgb(
2730 SevInvlpgbRax::new()
2731 .with_asid_valid(true)
2732 .with_global(true)
2733 .into(),
2734 SevInvlpgbEdx::new().into(),
2735 SevInvlpgbEcx::new().into(),
2736 );
2737 self.partition.hcl.tlbsync();
2738 self.set_wait_for_tlb_locks(vtl);
2739 }
2740
2741 fn flush_entire(&mut self) {
2742 self.partition.hcl.invlpgb(
2743 SevInvlpgbRax::new()
2744 .with_asid_valid(true)
2745 .with_global(true)
2746 .into(),
2747 SevInvlpgbEdx::new().into(),
2748 SevInvlpgbEcx::new().into(),
2749 );
2750 self.partition.hcl.tlbsync();
2751 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
2752 self.set_wait_for_tlb_locks(vtl);
2753 }
2754 }
2755
2756 fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) {
2757 if let Some(vp_index) = self.vp_index {
2758 hardware_cvm::tlb_lock::TlbLockAccess {
2759 vp_index,
2760 cvm_partition: &self.shared.cvm,
2761 }
2762 .set_wait_for_tlb_locks(vtl);
2763 }
2764 }
2765}
2766
2767mod save_restore {
2768 use super::SnpBacked;
2769 use super::UhProcessor;
2770 use vmcore::save_restore::RestoreError;
2771 use vmcore::save_restore::SaveError;
2772 use vmcore::save_restore::SaveRestore;
2773 use vmcore::save_restore::SavedStateNotSupported;
2774
2775 impl SaveRestore for UhProcessor<'_, SnpBacked> {
2776 type SavedState = SavedStateNotSupported;
2777
2778 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
2779 Err(SaveError::NotSupported)
2780 }
2781
2782 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
2783 match state {}
2784 }
2785 }
2786}