1use super::BackingParams;
7use super::BackingPrivate;
8use super::BackingSharedParams;
9use super::HardwareIsolatedBacking;
10use super::InterceptMessageOptionalState;
11use super::InterceptMessageState;
12use super::UhEmulationState;
13use super::hardware_cvm;
14use super::hardware_cvm::HardwareIsolatedGuestTimer;
15use super::vp_state;
16use super::vp_state::UhVpStateAccess;
17use crate::BackingShared;
18use crate::Error;
19use crate::GuestVtl;
20use crate::TlbFlushLockAccess;
21use crate::UhCvmPartitionState;
22use crate::UhCvmVpState;
23use crate::UhPartitionInner;
24use crate::UhPartitionNewParams;
25use crate::WakeReason;
26use crate::processor::UhHypercallHandler;
27use crate::processor::UhProcessor;
28use crate::processor::hardware_cvm::apic::ApicBacking;
29use cvm_tracing::CVM_ALLOWED;
30use cvm_tracing::CVM_CONFIDENTIAL;
31use hcl::vmsa::VmsaWrapper;
32use hv1_emulator::hv::ProcessorVtlHv;
33use hv1_emulator::synic::ProcessorSynic;
34use hv1_hypercall::HvRepResult;
35use hv1_hypercall::HypercallIo;
36use hv1_structs::ProcessorSet;
37use hv1_structs::VtlArray;
38use hvdef::HV_PAGE_SIZE;
39use hvdef::HvDeliverabilityNotificationsRegister;
40use hvdef::HvError;
41use hvdef::HvMessageType;
42use hvdef::HvX64PendingExceptionEvent;
43use hvdef::HvX64RegisterName;
44use hvdef::Vtl;
45use hvdef::hypercall::Control;
46use hvdef::hypercall::HvFlushFlags;
47use hvdef::hypercall::HvGvaRange;
48use hvdef::hypercall::HypercallOutput;
49use inspect::Inspect;
50use inspect::InspectMut;
51use inspect_counters::Counter;
52use virt::EmulatorMonitorSupport;
53use virt::Processor;
54use virt::VpHaltReason;
55use virt::VpIndex;
56use virt::io::CpuIo;
57use virt::state::StateElement;
58use virt::vp;
59use virt::vp::AccessVpState;
60use virt::vp::MpState;
61use virt::x86::MsrError;
62use virt::x86::MsrErrorExt;
63use virt::x86::SegmentRegister;
64use virt::x86::TableRegister;
65use virt_support_apic::ApicClient;
66use virt_support_x86emu::emulate::EmulatorSupport as X86EmulatorSupport;
67use virt_support_x86emu::emulate::emulate_io;
68use virt_support_x86emu::emulate::emulate_translate_gva;
69use virt_support_x86emu::translate::TranslationRegisters;
70use vmcore::vmtime::VmTimeAccess;
71use x86defs::RFlags;
72use x86defs::cpuid::CpuidFunction;
73use x86defs::snp::SevEventInjectInfo;
74use x86defs::snp::SevExitCode;
75use x86defs::snp::SevInvlpgbEcx;
76use x86defs::snp::SevInvlpgbEdx;
77use x86defs::snp::SevInvlpgbRax;
78use x86defs::snp::SevIoAccessInfo;
79use x86defs::snp::SevNpfInfo;
80use x86defs::snp::SevSelector;
81use x86defs::snp::SevStatusMsr;
82use x86defs::snp::SevVmsa;
83use x86defs::snp::Vmpl;
84use zerocopy::FromZeros;
85use zerocopy::IntoBytes;
86
87#[derive(Debug, Error)]
88#[error("invalid vmcb")]
89struct InvalidVmcb;
90
91#[derive(Debug, Error)]
92enum SnpGhcbError {
93 #[error("failed to access GHCB page")]
94 GhcbPageAccess(#[source] guestmem::GuestMemoryError),
95 #[error("ghcb page used for vmgexit does not match overlay page")]
96 GhcbMisconfiguration,
97}
98
99#[derive(Debug, Error)]
100#[error("failed to run")]
101struct SnpRunVpError(#[source] hcl::ioctl::Error);
102
103#[derive(InspectMut)]
105pub struct SnpBacked {
106 #[inspect(hex)]
107 hv_sint_notifications: u16,
108 general_stats: VtlArray<GeneralStats, 2>,
109 exit_stats: VtlArray<ExitStats, 2>,
110 #[inspect(flatten)]
111 cvm: UhCvmVpState,
112}
113
114#[derive(Inspect, Default)]
115struct GeneralStats {
116 guest_busy: Counter,
117 int_ack: Counter,
118 synth_int: Counter,
119}
120
121#[derive(Inspect, Default)]
122struct ExitStats {
123 automatic_exit: Counter,
124 cpuid: Counter,
125 hlt: Counter,
126 intr: Counter,
127 invd: Counter,
128 invlpgb: Counter,
129 ioio: Counter,
130 msr_read: Counter,
131 msr_write: Counter,
132 npf: Counter,
133 npf_no_intercept: Counter,
134 npf_spurious: Counter,
135 rdpmc: Counter,
136 vmgexit: Counter,
137 vmmcall: Counter,
138 xsetbv: Counter,
139 excp_db: Counter,
140 secure_reg_write: Counter,
141}
142
143enum UhDirectOverlay {
144 Sipp,
145 Sifp,
146 Ghcb,
147 Count,
148}
149
150impl SnpBacked {
151 fn calculate_efer(efer: u64, cr0: u64) -> u64 {
153 let new_efer = if efer & x86defs::X64_EFER_LME != 0 && cr0 & x86defs::X64_CR0_PG != 0 {
154 efer | x86defs::X64_EFER_LMA
155 } else {
156 efer & !x86defs::X64_EFER_LMA
157 };
158 new_efer | x86defs::X64_EFER_SVME
159 }
160
161 pub fn shared_pages_required_per_cpu() -> u64 {
164 UhDirectOverlay::Count as u64
165 }
166}
167
168impl HardwareIsolatedBacking for SnpBacked {
169 fn cvm_state(&self) -> &UhCvmVpState {
170 &self.cvm
171 }
172
173 fn cvm_state_mut(&mut self) -> &mut UhCvmVpState {
174 &mut self.cvm
175 }
176
177 fn cvm_partition_state(shared: &Self::Shared) -> &UhCvmPartitionState {
178 &shared.cvm
179 }
180
181 fn switch_vtl(this: &mut UhProcessor<'_, Self>, source_vtl: GuestVtl, target_vtl: GuestVtl) {
182 let [vmsa0, vmsa1] = this.runner.vmsas_mut();
183 let (current_vmsa, mut target_vmsa) = match (source_vtl, target_vtl) {
184 (GuestVtl::Vtl0, GuestVtl::Vtl1) => (vmsa0, vmsa1),
185 (GuestVtl::Vtl1, GuestVtl::Vtl0) => (vmsa1, vmsa0),
186 _ => unreachable!(),
187 };
188
189 target_vmsa.set_rax(current_vmsa.rax());
190 target_vmsa.set_rbx(current_vmsa.rbx());
191 target_vmsa.set_rcx(current_vmsa.rcx());
192 target_vmsa.set_rdx(current_vmsa.rdx());
193 target_vmsa.set_rbp(current_vmsa.rbp());
194 target_vmsa.set_rsi(current_vmsa.rsi());
195 target_vmsa.set_rdi(current_vmsa.rdi());
196 target_vmsa.set_r8(current_vmsa.r8());
197 target_vmsa.set_r9(current_vmsa.r9());
198 target_vmsa.set_r10(current_vmsa.r10());
199 target_vmsa.set_r11(current_vmsa.r11());
200 target_vmsa.set_r12(current_vmsa.r12());
201 target_vmsa.set_r13(current_vmsa.r13());
202 target_vmsa.set_r14(current_vmsa.r14());
203 target_vmsa.set_r15(current_vmsa.r15());
204 target_vmsa.set_xcr0(current_vmsa.xcr0());
205
206 target_vmsa.set_cr2(current_vmsa.cr2());
207
208 target_vmsa.set_dr0(current_vmsa.dr0());
210 target_vmsa.set_dr1(current_vmsa.dr1());
211 target_vmsa.set_dr2(current_vmsa.dr2());
212 target_vmsa.set_dr3(current_vmsa.dr3());
213
214 target_vmsa.set_pl0_ssp(current_vmsa.pl0_ssp());
215 target_vmsa.set_pl1_ssp(current_vmsa.pl1_ssp());
216 target_vmsa.set_pl2_ssp(current_vmsa.pl2_ssp());
217 target_vmsa.set_pl3_ssp(current_vmsa.pl3_ssp());
218 target_vmsa.set_u_cet(current_vmsa.u_cet());
219
220 target_vmsa.set_x87_registers(¤t_vmsa.x87_registers());
221
222 let vec_reg_count = 16;
223 for i in 0..vec_reg_count {
224 target_vmsa.set_xmm_registers(i, current_vmsa.xmm_registers(i));
225 target_vmsa.set_ymm_registers(i, current_vmsa.ymm_registers(i));
226 }
227
228 this.backing.cvm_state_mut().exit_vtl = target_vtl;
229 }
230
231 fn translation_registers(
232 &self,
233 this: &UhProcessor<'_, Self>,
234 vtl: GuestVtl,
235 ) -> TranslationRegisters {
236 let vmsa = this.runner.vmsa(vtl);
237 TranslationRegisters {
238 cr0: vmsa.cr0(),
239 cr4: vmsa.cr4(),
240 efer: vmsa.efer(),
241 cr3: vmsa.cr3(),
242 rflags: vmsa.rflags(),
243 ss: virt_seg_from_snp(vmsa.ss()).into(),
244 encryption_mode: virt_support_x86emu::translate::EncryptionMode::Vtom(
245 this.partition.caps.vtom.unwrap(),
246 ),
247 }
248 }
249
250 fn tlb_flush_lock_access<'a>(
251 vp_index: Option<VpIndex>,
252 partition: &'a UhPartitionInner,
253 shared: &'a Self::Shared,
254 ) -> impl TlbFlushLockAccess + 'a {
255 SnpTlbLockFlushAccess {
256 vp_index,
257 partition,
258 shared,
259 }
260 }
261
262 fn pending_event_vector(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> Option<u8> {
263 let event_inject = this.runner.vmsa(vtl).event_inject();
264 if event_inject.valid() {
265 Some(event_inject.vector())
266 } else {
267 None
268 }
269 }
270
271 fn set_pending_exception(
272 this: &mut UhProcessor<'_, Self>,
273 vtl: GuestVtl,
274 event: HvX64PendingExceptionEvent,
275 ) {
276 let inject_info = SevEventInjectInfo::new()
277 .with_valid(true)
278 .with_deliver_error_code(event.deliver_error_code())
279 .with_error_code(event.error_code())
280 .with_vector(event.vector().try_into().unwrap())
281 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT);
282
283 this.runner.vmsa_mut(vtl).set_event_inject(inject_info);
284 }
285
286 fn cr0(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
287 this.runner.vmsa(vtl).cr0()
288 }
289
290 fn cr4(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
291 this.runner.vmsa(vtl).cr4()
292 }
293
294 fn intercept_message_state(
295 this: &UhProcessor<'_, Self>,
296 vtl: GuestVtl,
297 include_optional_state: bool,
298 ) -> InterceptMessageState {
299 let vmsa = this.runner.vmsa(vtl);
300
301 let instr_len = if SevExitCode(vmsa.guest_error_code()) == SevExitCode::NPF {
303 0
304 } else {
305 (vmsa.next_rip() - vmsa.rip()) as u8
306 };
307
308 InterceptMessageState {
309 instruction_length_and_cr8: instr_len,
310 cpl: vmsa.cpl(),
311 efer_lma: vmsa.efer() & x86defs::X64_EFER_LMA != 0,
312 cs: virt_seg_from_snp(vmsa.cs()).into(),
313 rip: vmsa.rip(),
314 rflags: vmsa.rflags(),
315 rax: vmsa.rax(),
316 rdx: vmsa.rdx(),
317 optional: if include_optional_state {
318 Some(InterceptMessageOptionalState {
319 ds: virt_seg_from_snp(vmsa.ds()).into(),
320 es: virt_seg_from_snp(vmsa.es()).into(),
321 })
322 } else {
323 None
324 },
325 rcx: vmsa.rcx(),
326 rsi: vmsa.rsi(),
327 rdi: vmsa.rdi(),
328 }
329 }
330
331 fn cr_intercept_registration(
332 this: &mut UhProcessor<'_, Self>,
333 intercept_control: hvdef::HvRegisterCrInterceptControl,
334 ) {
335 this.runner
340 .set_vp_registers_hvcall(
341 Vtl::Vtl1,
342 [(
343 HvX64RegisterName::CrInterceptControl,
344 u64::from(intercept_control),
345 )],
346 )
347 .expect("setting intercept control succeeds");
348 }
349
350 fn is_interrupt_pending(
351 this: &mut UhProcessor<'_, Self>,
352 vtl: GuestVtl,
353 check_rflags: bool,
354 dev: &impl CpuIo,
355 ) -> bool {
356 let vmsa = this.runner.vmsa_mut(vtl);
357 if vmsa.event_inject().valid()
358 && vmsa.event_inject().interruption_type() == x86defs::snp::SEV_INTR_TYPE_NMI
359 {
360 return true;
361 }
362
363 let vmsa_priority = vmsa.v_intr_cntrl().priority() as u32;
364 let lapic = &mut this.backing.cvm.lapics[vtl].lapic;
365 let ppr = lapic
366 .access(&mut SnpApicClient {
367 partition: this.partition,
368 vmsa,
369 dev,
370 vmtime: &this.vmtime,
371 vtl,
372 })
373 .get_ppr();
374 let ppr_priority = ppr >> 4;
375 if vmsa_priority <= ppr_priority {
376 return false;
377 }
378
379 let vmsa = this.runner.vmsa_mut(vtl);
380 if (check_rflags && !RFlags::from_bits(vmsa.rflags()).interrupt_enable())
381 || vmsa.v_intr_cntrl().intr_shadow()
382 || !vmsa.v_intr_cntrl().irq()
383 {
384 return false;
385 }
386
387 true
388 }
389
390 fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic> {
391 None
392 }
393
394 fn update_deadline(this: &mut UhProcessor<'_, Self>, ref_time_now: u64, next_ref_time: u64) {
395 this.shared
396 .guest_timer
397 .update_deadline(this, ref_time_now, next_ref_time);
398 }
399
400 fn clear_deadline(this: &mut UhProcessor<'_, Self>) {
401 this.shared.guest_timer.clear_deadline(this);
402 }
403}
404
405#[derive(Inspect)]
407pub struct SnpBackedShared {
408 #[inspect(flatten)]
409 pub(crate) cvm: UhCvmPartitionState,
410 invlpgb_count_max: u16,
411 tsc_aux_virtualized: bool,
412 #[inspect(debug)]
413 sev_status: SevStatusMsr,
414 #[inspect(skip)]
416 guest_timer: hardware_cvm::VmTimeGuestTimer,
417}
418
419impl SnpBackedShared {
420 pub(crate) fn new(
421 _partition_params: &UhPartitionNewParams<'_>,
422 params: BackingSharedParams<'_>,
423 ) -> Result<Self, Error> {
424 let cvm = params.cvm_state.unwrap();
425 let invlpgb_count_max = x86defs::cpuid::ExtendedAddressSpaceSizesEdx::from(
426 params
427 .cpuid
428 .result(CpuidFunction::ExtendedAddressSpaceSizes.0, 0, &[0; 4])[3],
429 )
430 .invlpgb_count_max();
431 let tsc_aux_virtualized = x86defs::cpuid::ExtendedSevFeaturesEax::from(
432 params
433 .cpuid
434 .result(CpuidFunction::ExtendedSevFeatures.0, 0, &[0; 4])[0],
435 )
436 .tsc_aux_virtualization();
437
438 let msr = crate::MsrDevice::new(0).expect("open msr");
441 let sev_status =
442 SevStatusMsr::from(msr.read_msr(x86defs::X86X_AMD_MSR_SEV).expect("read msr"));
443 tracing::info!(CVM_ALLOWED, ?sev_status, "SEV status");
444
445 let guest_timer = hardware_cvm::VmTimeGuestTimer;
447
448 Ok(Self {
449 sev_status,
450 invlpgb_count_max,
451 tsc_aux_virtualized,
452 cvm,
453 guest_timer,
454 })
455 }
456}
457
458#[expect(private_interfaces)]
459impl BackingPrivate for SnpBacked {
460 type HclBacking<'snp> = hcl::ioctl::snp::Snp<'snp>;
461 type Shared = SnpBackedShared;
462 type EmulationCache = ();
463
464 fn shared(shared: &BackingShared) -> &Self::Shared {
465 let BackingShared::Snp(shared) = shared else {
466 unreachable!()
467 };
468 shared
469 }
470
471 fn new(params: BackingParams<'_, '_, Self>, shared: &SnpBackedShared) -> Result<Self, Error> {
472 Ok(Self {
473 hv_sint_notifications: 0,
474 general_stats: VtlArray::from_fn(|_| Default::default()),
475 exit_stats: VtlArray::from_fn(|_| Default::default()),
476 cvm: UhCvmVpState::new(
477 &shared.cvm,
478 params.partition,
479 params.vp_info,
480 UhDirectOverlay::Count as usize,
481 )?,
482 })
483 }
484
485 fn init(this: &mut UhProcessor<'_, Self>) {
486 let sev_status = this.vp().shared.sev_status;
487 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
488 init_vmsa(
489 &mut this.runner.vmsa_mut(vtl),
490 vtl,
491 this.partition.caps.vtom,
492 sev_status,
493 );
494
495 let registers = vp::Registers::at_reset(&this.partition.caps, &this.inner.vp_info);
497 this.access_state(vtl.into())
498 .set_registers(®isters)
499 .expect("Resetting to architectural state should succeed");
500
501 let debug_registers =
502 vp::DebugRegisters::at_reset(&this.partition.caps, &this.inner.vp_info);
503
504 this.access_state(vtl.into())
505 .set_debug_regs(&debug_registers)
506 .expect("Resetting to architectural state should succeed");
507
508 let xcr0 = vp::Xcr0::at_reset(&this.partition.caps, &this.inner.vp_info);
509 this.access_state(vtl.into())
510 .set_xcr(&xcr0)
511 .expect("Resetting to architectural state should succeed");
512
513 let cache_control = vp::Mtrrs::at_reset(&this.partition.caps, &this.inner.vp_info);
514 this.access_state(vtl.into())
515 .set_mtrrs(&cache_control)
516 .expect("Resetting to architectural state should succeed");
517 }
518
519 let pfns = &this.backing.cvm.direct_overlay_handle.pfns();
522 let values: &[(HvX64RegisterName, u64); 3] = &[
523 (
524 HvX64RegisterName::Sipp,
525 hvdef::HvSynicSimpSiefp::new()
526 .with_enabled(true)
527 .with_base_gpn(pfns[UhDirectOverlay::Sipp as usize])
528 .into(),
529 ),
530 (
531 HvX64RegisterName::Sifp,
532 hvdef::HvSynicSimpSiefp::new()
533 .with_enabled(true)
534 .with_base_gpn(pfns[UhDirectOverlay::Sifp as usize])
535 .into(),
536 ),
537 (
538 HvX64RegisterName::Ghcb,
539 x86defs::snp::GhcbMsr::new()
540 .with_info(x86defs::snp::GhcbInfo::REGISTER_REQUEST.0)
541 .with_pfn(pfns[UhDirectOverlay::Ghcb as usize])
542 .into(),
543 ),
544 ];
545
546 this.runner
547 .set_vp_registers_hvcall(Vtl::Vtl0, values)
548 .expect("set_vp_registers hypercall for direct overlays should succeed");
549 }
550
551 type StateAccess<'p, 'a>
552 = UhVpStateAccess<'a, 'p, Self>
553 where
554 Self: 'a + 'p,
555 'p: 'a;
556
557 fn access_vp_state<'a, 'p>(
558 this: &'a mut UhProcessor<'p, Self>,
559 vtl: GuestVtl,
560 ) -> Self::StateAccess<'p, 'a> {
561 UhVpStateAccess::new(this, vtl)
562 }
563
564 async fn run_vp(
565 this: &mut UhProcessor<'_, Self>,
566 dev: &impl CpuIo,
567 _stop: &mut virt::StopVp<'_>,
568 ) -> Result<(), VpHaltReason> {
569 this.run_vp_snp(dev).await
570 }
571
572 fn poll_apic(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl, scan_irr: bool) {
573 this.runner.vmsa_mut(vtl).v_intr_cntrl_mut().set_irq(false);
575
576 hardware_cvm::apic::poll_apic_core(this, vtl, scan_irr)
577 }
578
579 fn request_extint_readiness(_this: &mut UhProcessor<'_, Self>) {
580 unreachable!("extint managed through software apic")
581 }
582
583 fn request_untrusted_sint_readiness(this: &mut UhProcessor<'_, Self>, sints: u16) {
584 let sints = this.backing.hv_sint_notifications | sints;
585 if this.backing.hv_sint_notifications == sints {
586 return;
587 }
588 let notifications = HvDeliverabilityNotificationsRegister::new().with_sints(sints);
589 tracing::trace!(?notifications, "setting notifications");
590 this.runner
591 .set_vp_register(
592 GuestVtl::Vtl0,
593 HvX64RegisterName::DeliverabilityNotifications,
594 u64::from(notifications).into(),
595 )
596 .expect("requesting deliverability is not a fallable operation");
597
598 this.backing.hv_sint_notifications = sints;
599 }
600
601 fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) {
602 let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0);
603 let vtl1_vmsa = if this.backing.cvm_state().vtl1.is_some() {
604 Some(this.runner.vmsa(GuestVtl::Vtl1))
605 } else {
606 None
607 };
608
609 let add_vmsa_inspect = |req: inspect::Request<'_>, vmsa: VmsaWrapper<'_, &SevVmsa>| {
610 req.respond()
611 .hex("guest_error_code", vmsa.guest_error_code())
612 .hex("exit_info1", vmsa.exit_info1())
613 .hex("exit_info2", vmsa.exit_info2())
614 .hex("v_intr_cntrl", u64::from(vmsa.v_intr_cntrl()));
615 };
616
617 resp.child("vmsa_additional", |req| {
618 req.respond()
619 .child("vtl0", |inner_req| add_vmsa_inspect(inner_req, vtl0_vmsa))
620 .child("vtl1", |inner_req| {
621 if let Some(vtl1_vmsa) = vtl1_vmsa {
622 add_vmsa_inspect(inner_req, vtl1_vmsa);
623 }
624 });
625 });
626 }
627
628 fn hv(&self, vtl: GuestVtl) -> Option<&ProcessorVtlHv> {
629 Some(&self.cvm.hv[vtl])
630 }
631
632 fn hv_mut(&mut self, vtl: GuestVtl) -> Option<&mut ProcessorVtlHv> {
633 Some(&mut self.cvm.hv[vtl])
634 }
635
636 fn handle_vp_start_enable_vtl_wake(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl) {
637 this.hcvm_handle_vp_start_enable_vtl(vtl)
638 }
639
640 fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
641 this.hcvm_vtl1_inspectable()
642 }
643
644 fn process_interrupts(
645 this: &mut UhProcessor<'_, Self>,
646 scan_irr: VtlArray<bool, 2>,
647 first_scan_irr: &mut bool,
648 dev: &impl CpuIo,
649 ) -> bool {
650 this.cvm_process_interrupts(scan_irr, first_scan_irr, dev)
651 }
652}
653
654fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
655 SevSelector {
656 selector: val.selector,
657 attrib: (val.attributes & 0xFF) | ((val.attributes >> 4) & 0xF00),
658 limit: val.limit,
659 base: val.base,
660 }
661}
662
663fn virt_table_to_snp(val: TableRegister) -> SevSelector {
664 SevSelector {
665 limit: val.limit as u32,
666 base: val.base,
667 ..FromZeros::new_zeroed()
668 }
669}
670
671fn virt_seg_from_snp(selector: SevSelector) -> SegmentRegister {
672 SegmentRegister {
673 base: selector.base,
674 limit: selector.limit,
675 selector: selector.selector,
676 attributes: (selector.attrib & 0xFF) | ((selector.attrib & 0xF00) << 4),
677 }
678}
679
680fn virt_table_from_snp(selector: SevSelector) -> TableRegister {
681 TableRegister {
682 limit: selector.limit as u16,
683 base: selector.base,
684 }
685}
686
687fn init_vmsa(
688 vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>,
689 vtl: GuestVtl,
690 vtom: Option<u64>,
691 sev_status: SevStatusMsr,
692) {
693 vmsa.reset(sev_status.vmsa_reg_prot());
697 vmsa.sev_features_mut()
698 .set_snp_btb_isolation(sev_status.snp_btb_isolation());
699 vmsa.sev_features_mut()
700 .set_ibpb_on_entry(sev_status.ibpb_on_entry());
701 vmsa.sev_features_mut()
702 .set_prevent_host_ibs(sev_status.prevent_host_ibs());
703 vmsa.sev_features_mut()
704 .set_vmsa_reg_prot(sev_status.vmsa_reg_prot());
705 vmsa.sev_features_mut().set_snp(true);
706 vmsa.sev_features_mut().set_vtom(vtom.is_some());
707 vmsa.set_virtual_tom(vtom.unwrap_or(0));
708
709 vmsa.sev_features_mut().set_alternate_injection(true);
712 vmsa.sev_features_mut().set_reflect_vc(true);
713 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
714 vmsa.sev_features_mut().set_debug_swap(true);
715
716 let vmpl = match vtl {
721 GuestVtl::Vtl0 => Vmpl::Vmpl2,
722 GuestVtl::Vtl1 => Vmpl::Vmpl1,
723 };
724 vmsa.set_vmpl(vmpl.into());
725
726 vmsa.set_guest_error_code(SevExitCode::INTR.0);
729
730 vmsa.set_efer(x86defs::X64_EFER_SVME);
733}
734
735struct SnpApicClient<'a, T> {
736 partition: &'a UhPartitionInner,
737 vmsa: VmsaWrapper<'a, &'a mut SevVmsa>,
738 dev: &'a T,
739 vmtime: &'a VmTimeAccess,
740 vtl: GuestVtl,
741}
742
743impl<T: CpuIo> ApicClient for SnpApicClient<'_, T> {
744 fn cr8(&mut self) -> u32 {
745 self.vmsa.v_intr_cntrl().tpr().into()
746 }
747
748 fn set_cr8(&mut self, value: u32) {
749 self.vmsa.v_intr_cntrl_mut().set_tpr(value as u8);
750 }
751
752 fn set_apic_base(&mut self, _value: u64) {
753 }
755
756 fn wake(&mut self, vp_index: VpIndex) {
757 self.partition.vps[vp_index.index() as usize].wake(self.vtl, WakeReason::INTCON);
758 }
759
760 fn eoi(&mut self, vector: u8) {
761 debug_assert_eq!(self.vtl, GuestVtl::Vtl0);
762 self.dev.handle_eoi(vector.into())
763 }
764
765 fn now(&mut self) -> vmcore::vmtime::VmTime {
766 self.vmtime.now()
767 }
768
769 fn pull_offload(&mut self) -> ([u32; 8], [u32; 8]) {
770 unreachable!()
771 }
772}
773
774impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
775 const TRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
777 Self,
778 [
779 hv1_hypercall::HvModifySparseGpaPageHostVisibility,
780 hv1_hypercall::HvQuerySparseGpaPageHostVisibility,
781 hv1_hypercall::HvX64StartVirtualProcessor,
782 hv1_hypercall::HvGetVpIndexFromApicId,
783 hv1_hypercall::HvGetVpRegisters,
784 hv1_hypercall::HvEnablePartitionVtl,
785 hv1_hypercall::HvRetargetDeviceInterrupt,
786 hv1_hypercall::HvPostMessage,
787 hv1_hypercall::HvSignalEvent,
788 hv1_hypercall::HvX64EnableVpVtl,
789 hv1_hypercall::HvExtQueryCapabilities,
790 hv1_hypercall::HvVtlCall,
791 hv1_hypercall::HvVtlReturn,
792 hv1_hypercall::HvFlushVirtualAddressList,
793 hv1_hypercall::HvFlushVirtualAddressListEx,
794 hv1_hypercall::HvFlushVirtualAddressSpace,
795 hv1_hypercall::HvFlushVirtualAddressSpaceEx,
796 hv1_hypercall::HvSetVpRegisters,
797 hv1_hypercall::HvModifyVtlProtectionMask,
798 hv1_hypercall::HvX64TranslateVirtualAddress,
799 hv1_hypercall::HvSendSyntheticClusterIpi,
800 hv1_hypercall::HvSendSyntheticClusterIpiEx,
801 hv1_hypercall::HvInstallIntercept,
802 hv1_hypercall::HvAssertVirtualInterrupt,
803 ],
804 );
805
806 const UNTRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
809 Self,
810 [hv1_hypercall::HvPostMessage, hv1_hypercall::HvSignalEvent],
811 );
812}
813
814struct GhcbEnlightenedHypercall<'a, 'b, T> {
815 handler: UhHypercallHandler<'a, 'b, T, SnpBacked>,
816 control: u64,
817 output_gpa: u64,
818 input_gpa: u64,
819 result: u64,
820}
821
822impl<'a, 'b, T> hv1_hypercall::AsHandler<UhHypercallHandler<'a, 'b, T, SnpBacked>>
823 for &mut GhcbEnlightenedHypercall<'a, 'b, T>
824{
825 fn as_handler(&mut self) -> &mut UhHypercallHandler<'a, 'b, T, SnpBacked> {
826 &mut self.handler
827 }
828}
829
830impl<T> HypercallIo for GhcbEnlightenedHypercall<'_, '_, T> {
831 fn advance_ip(&mut self) {
832 }
834
835 fn retry(&mut self, control: u64) {
836 let control = Control::from(control);
845 self.set_result(
846 HypercallOutput::from(HvError::Timeout)
847 .with_elements_processed(control.rep_start())
848 .into(),
849 );
850 }
851
852 fn control(&mut self) -> u64 {
853 self.control
854 }
855
856 fn input_gpa(&mut self) -> u64 {
857 self.input_gpa
858 }
859
860 fn output_gpa(&mut self) -> u64 {
861 self.output_gpa
862 }
863
864 fn fast_register_pair_count(&mut self) -> usize {
865 0
866 }
867
868 fn extended_fast_hypercalls_ok(&mut self) -> bool {
869 false
870 }
871
872 fn fast_input(&mut self, _buf: &mut [[u64; 2]], _output_register_pairs: usize) -> usize {
873 unimplemented!("not supported for secure enlightened abi")
874 }
875
876 fn fast_output(&mut self, _starting_pair_index: usize, _buf: &[[u64; 2]]) {
877 unimplemented!("not supported for secure enlightened abi")
878 }
879
880 fn vtl_input(&mut self) -> u64 {
881 unimplemented!("not supported for secure enlightened abi")
882 }
883
884 fn set_result(&mut self, n: u64) {
885 self.result = n;
886 }
887
888 fn fast_regs(&mut self, _starting_pair_index: usize, _buf: &mut [[u64; 2]]) {
889 unimplemented!("not supported for secure enlightened abi")
890 }
891}
892
893impl<'b> ApicBacking<'b, SnpBacked> for UhProcessor<'b, SnpBacked> {
894 fn vp(&mut self) -> &mut UhProcessor<'b, SnpBacked> {
895 self
896 }
897
898 fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8) {
899 let mut vmsa = self.runner.vmsa_mut(vtl);
900 vmsa.v_intr_cntrl_mut().set_vector(vector);
901 vmsa.v_intr_cntrl_mut().set_priority((vector >> 4).into());
902 vmsa.v_intr_cntrl_mut().set_ignore_tpr(false);
903 vmsa.v_intr_cntrl_mut().set_irq(true);
904 self.backing.cvm.lapics[vtl].activity = MpState::Running;
905 }
906
907 fn handle_nmi(&mut self, vtl: GuestVtl) {
908 let mut vmsa = self.runner.vmsa_mut(vtl);
912
913 vmsa.set_event_inject(
917 SevEventInjectInfo::new()
918 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_NMI)
919 .with_vector(2)
920 .with_valid(true),
921 );
922 self.backing.cvm.lapics[vtl].nmi_pending = false;
923 self.backing.cvm.lapics[vtl].activity = MpState::Running;
924 }
925
926 fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister) {
927 let mut vmsa = self.runner.vmsa_mut(vtl);
928 vmsa.set_cs(virt_seg_to_snp(cs));
929 vmsa.set_rip(0);
930 self.backing.cvm.lapics[vtl].activity = MpState::Running;
931 }
932}
933
934impl UhProcessor<'_, SnpBacked> {
935 fn handle_synic_deliverable_exit(&mut self) {
936 let message = self
937 .runner
938 .exit_message()
939 .as_message::<hvdef::HvX64SynicSintDeliverableMessage>();
940
941 tracing::trace!(
942 deliverable_sints = message.deliverable_sints,
943 "sint deliverable"
944 );
945
946 self.backing.hv_sint_notifications &= !message.deliverable_sints;
947
948 self.deliver_synic_messages(GuestVtl::Vtl0, message.deliverable_sints);
950 }
951
952 fn handle_vmgexit(
953 &mut self,
954 dev: &impl CpuIo,
955 intercepted_vtl: GuestVtl,
956 ) -> Result<(), SnpGhcbError> {
957 let message = self
958 .runner
959 .exit_message()
960 .as_message::<hvdef::HvX64VmgexitInterceptMessage>();
961
962 let ghcb_msr = x86defs::snp::GhcbMsr::from(message.ghcb_msr);
963 let flags = message.flags;
964 let sw_exit_code = message.ghcb_page.standard.sw_exit_code;
965 let sw_exit_info1 = message.ghcb_page.standard.sw_exit_info1;
966 let sw_exit_info2 = message.ghcb_page.standard.sw_exit_info2;
967 tracing::trace!(?ghcb_msr, "vmgexit intercept");
968
969 match x86defs::snp::GhcbInfo(ghcb_msr.info()) {
970 x86defs::snp::GhcbInfo::NORMAL => {
971 assert!(message.flags.ghcb_page_valid());
972 let ghcb_pfn = ghcb_msr.pfn();
973
974 let ghcb_overlay =
975 self.backing.cvm.direct_overlay_handle.pfns()[UhDirectOverlay::Ghcb as usize];
976
977 if ghcb_pfn != ghcb_overlay {
979 tracelimit::warn_ratelimited!(
980 CVM_ALLOWED,
981 vmgexit_pfn = ghcb_pfn,
982 overlay_pfn = ghcb_overlay,
983 "ghcb page used for vmgexit does not match overlay page"
984 );
985
986 return Err(SnpGhcbError::GhcbMisconfiguration);
987 }
988
989 match x86defs::snp::GhcbUsage(message.ghcb_page.ghcb_usage) {
990 x86defs::snp::GhcbUsage::HYPERCALL => {
991 let guest_memory = &self.shared.cvm.shared_memory;
992 let overlay_base = ghcb_overlay * HV_PAGE_SIZE;
995 let x86defs::snp::GhcbHypercallParameters {
996 output_gpa,
997 input_control,
998 } = guest_memory
999 .read_plain(
1000 overlay_base
1001 + x86defs::snp::GHCB_PAGE_HYPERCALL_PARAMETERS_OFFSET as u64,
1002 )
1003 .map_err(SnpGhcbError::GhcbPageAccess)?;
1004
1005 let mut handler = GhcbEnlightenedHypercall {
1006 handler: UhHypercallHandler {
1007 vp: self,
1008 bus: dev,
1009 trusted: false,
1010 intercepted_vtl,
1011 },
1012 control: input_control,
1013 output_gpa,
1014 input_gpa: overlay_base,
1015 result: 0,
1016 };
1017
1018 UhHypercallHandler::UNTRUSTED_DISPATCHER
1019 .dispatch(guest_memory, &mut handler);
1020
1021 guest_memory
1029 .write_at(
1030 overlay_base
1031 + x86defs::snp::GHCB_PAGE_HYPERCALL_OUTPUT_OFFSET as u64,
1032 handler.result.as_bytes(),
1033 )
1034 .map_err(SnpGhcbError::GhcbPageAccess)?;
1035 }
1036 usage => unimplemented!(
1037 r#"
1038 Invalid ghcb message.
1039 usage {usage:?}
1040 flags {flags:?}
1041 ghcb_msr {ghcb_msr:?}
1042 sw_exit_code {sw_exit_code:?}
1043 sw_exit_info1 {sw_exit_info1:?}
1044 sw_exit_info2 {sw_exit_info2:?}
1045 "#
1046 ),
1047 }
1048 }
1049 info => unimplemented!("ghcb info {info:?}"),
1050 }
1051
1052 Ok(())
1053 }
1054
1055 fn handle_msr_access(
1056 &mut self,
1057 dev: &impl CpuIo,
1058 entered_from_vtl: GuestVtl,
1059 msr: u32,
1060 is_write: bool,
1061 ) {
1062 if is_write && self.cvm_try_protect_msr_write(entered_from_vtl, msr) {
1063 return;
1064 }
1065
1066 let vmsa = self.runner.vmsa_mut(entered_from_vtl);
1067 let gp = if is_write {
1068 let value = (vmsa.rax() as u32 as u64) | ((vmsa.rdx() as u32 as u64) << 32);
1069
1070 let r = self.backing.cvm.lapics[entered_from_vtl]
1071 .lapic
1072 .access(&mut SnpApicClient {
1073 partition: self.partition,
1074 vmsa,
1075 dev,
1076 vmtime: &self.vmtime,
1077 vtl: entered_from_vtl,
1078 })
1079 .msr_write(msr, value)
1080 .or_else_if_unknown(|| self.write_msr_cvm(msr, value, entered_from_vtl))
1081 .or_else_if_unknown(|| self.write_msr_snp(dev, msr, value, entered_from_vtl));
1082
1083 match r {
1084 Ok(()) => false,
1085 Err(MsrError::Unknown) => {
1086 tracing::debug!(msr, value, "unknown cvm msr write");
1087 false
1088 }
1089 Err(MsrError::InvalidAccess) => true,
1090 }
1091 } else {
1092 let r = self.backing.cvm.lapics[entered_from_vtl]
1093 .lapic
1094 .access(&mut SnpApicClient {
1095 partition: self.partition,
1096 vmsa,
1097 dev,
1098 vmtime: &self.vmtime,
1099 vtl: entered_from_vtl,
1100 })
1101 .msr_read(msr)
1102 .or_else_if_unknown(|| self.read_msr_cvm(msr, entered_from_vtl))
1103 .or_else_if_unknown(|| self.read_msr_snp(dev, msr, entered_from_vtl));
1104
1105 let value = match r {
1106 Ok(v) => Some(v),
1107 Err(MsrError::Unknown) => {
1108 tracing::debug!(msr, "unknown cvm msr read");
1109 Some(0)
1110 }
1111 Err(MsrError::InvalidAccess) => None,
1112 };
1113
1114 if let Some(value) = value {
1115 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1116 vmsa.set_rax((value as u32).into());
1117 vmsa.set_rdx(((value >> 32) as u32).into());
1118 false
1119 } else {
1120 true
1121 }
1122 };
1123
1124 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1125 if gp {
1126 vmsa.set_event_inject(
1127 SevEventInjectInfo::new()
1128 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1129 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1130 .with_deliver_error_code(true)
1131 .with_valid(true),
1132 );
1133 } else {
1134 advance_to_next_instruction(&mut vmsa);
1135 }
1136 }
1137
1138 fn handle_xsetbv(&mut self, entered_from_vtl: GuestVtl) {
1139 let vmsa = self.runner.vmsa(entered_from_vtl);
1140 if let Some(value) = hardware_cvm::validate_xsetbv_exit(hardware_cvm::XsetbvExitInput {
1141 rax: vmsa.rax(),
1142 rcx: vmsa.rcx(),
1143 rdx: vmsa.rdx(),
1144 cr4: vmsa.cr4(),
1145 cpl: vmsa.cpl(),
1146 }) {
1147 if !self.cvm_try_protect_secure_register_write(
1148 entered_from_vtl,
1149 HvX64RegisterName::Xfem,
1150 value,
1151 ) {
1152 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1153 vmsa.set_xcr0(value);
1154 advance_to_next_instruction(&mut vmsa);
1155 }
1156 } else {
1157 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1158 vmsa.set_event_inject(
1159 SevEventInjectInfo::new()
1160 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1161 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1162 .with_deliver_error_code(true)
1163 .with_valid(true),
1164 );
1165 }
1166 }
1167
1168 fn handle_crx_intercept(&mut self, entered_from_vtl: GuestVtl, reg: HvX64RegisterName) {
1169 let vmsa = self.runner.vmsa(entered_from_vtl);
1170 let mov_crx_drx = x86defs::snp::MovCrxDrxInfo::from(vmsa.exit_info1());
1171 let reg_value = {
1172 let gpr_name =
1173 HvX64RegisterName(HvX64RegisterName::Rax.0 + mov_crx_drx.gpr_number() as u32);
1174
1175 match gpr_name {
1176 HvX64RegisterName::Rax => vmsa.rax(),
1177 HvX64RegisterName::Rbx => vmsa.rbx(),
1178 HvX64RegisterName::Rcx => vmsa.rcx(),
1179 HvX64RegisterName::Rdx => vmsa.rdx(),
1180 HvX64RegisterName::Rsp => vmsa.rsp(),
1181 HvX64RegisterName::Rbp => vmsa.rbp(),
1182 HvX64RegisterName::Rsi => vmsa.rsi(),
1183 HvX64RegisterName::Rdi => vmsa.rdi(),
1184 HvX64RegisterName::R8 => vmsa.r8(),
1185 HvX64RegisterName::R9 => vmsa.r9(),
1186 HvX64RegisterName::R10 => vmsa.r10(),
1187 HvX64RegisterName::R11 => vmsa.r11(),
1188 HvX64RegisterName::R12 => vmsa.r12(),
1189 HvX64RegisterName::R13 => vmsa.r13(),
1190 HvX64RegisterName::R14 => vmsa.r14(),
1191 HvX64RegisterName::R15 => vmsa.r15(),
1192 _ => unreachable!("unexpected register"),
1193 }
1194 };
1195
1196 if !mov_crx_drx.mov_crx() {
1203 tracelimit::warn_ratelimited!(
1204 CVM_ALLOWED,
1205 "Intercepted crx access, instruction is not mov crx"
1206 );
1207 return;
1208 }
1209
1210 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, reg_value) {
1211 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1212 match reg {
1213 HvX64RegisterName::Cr0 => vmsa.set_cr0(reg_value),
1214 HvX64RegisterName::Cr4 => vmsa.set_cr4(reg_value),
1215 _ => unreachable!(),
1216 }
1217 advance_to_next_instruction(&mut vmsa);
1218 }
1219 }
1220
1221 #[must_use]
1222 fn sync_lazy_eoi(&mut self, vtl: GuestVtl) -> bool {
1223 if self.backing.cvm.lapics[vtl].lapic.is_lazy_eoi_pending() {
1224 return self.backing.cvm.hv[vtl].set_lazy_eoi();
1225 }
1226
1227 false
1228 }
1229
1230 async fn run_vp_snp(&mut self, dev: &impl CpuIo) -> Result<(), VpHaltReason> {
1231 let next_vtl = self.backing.cvm.exit_vtl;
1232
1233 let mut vmsa = self.runner.vmsa_mut(next_vtl);
1234 let last_interrupt_ctrl = vmsa.v_intr_cntrl();
1235
1236 if vmsa.sev_features().alternate_injection() {
1237 vmsa.v_intr_cntrl_mut().set_guest_busy(false);
1238 }
1239
1240 self.unlock_tlb_lock(Vtl::Vtl2);
1241 let tlb_halt = self.should_halt_for_tlb_unlock(next_vtl);
1242
1243 let halt = self.backing.cvm.lapics[next_vtl].activity != MpState::Running || tlb_halt;
1244
1245 if halt && next_vtl == GuestVtl::Vtl1 && !tlb_halt {
1246 tracelimit::warn_ratelimited!(CVM_ALLOWED, "halting VTL 1, which might halt the guest");
1247 }
1248
1249 self.runner.set_halted(halt);
1250
1251 self.runner.set_exit_vtl(next_vtl);
1252
1253 let lazy_eoi = self.sync_lazy_eoi(next_vtl);
1255
1256 let mut has_intercept = self
1257 .runner
1258 .run()
1259 .map_err(|e| dev.fatal_error(SnpRunVpError(e).into()))?;
1260
1261 let entered_from_vtl = next_vtl;
1262 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1263
1264 let inject = if vmsa.sev_features().alternate_injection() {
1266 if vmsa.v_intr_cntrl().guest_busy() {
1267 self.backing.general_stats[entered_from_vtl]
1268 .guest_busy
1269 .increment();
1270 let exit_int_info = SevEventInjectInfo::from(vmsa.exit_int_info());
1278 assert!(
1279 exit_int_info.valid(),
1280 "event inject info should be valid {exit_int_info:x?}"
1281 );
1282
1283 match exit_int_info.interruption_type() {
1284 x86defs::snp::SEV_INTR_TYPE_EXCEPT => {
1285 if exit_int_info.vector() != 3 && exit_int_info.vector() != 4 {
1286 Some(exit_int_info)
1288 } else {
1289 None
1290 }
1291 }
1292 x86defs::snp::SEV_INTR_TYPE_SW => None,
1293 _ => Some(exit_int_info),
1294 }
1295 } else {
1296 None
1297 }
1298 } else {
1299 unimplemented!("Only alternate injection is supported for SNP")
1300 };
1301
1302 if let Some(inject) = inject {
1303 vmsa.set_event_inject(inject);
1304 }
1305 if vmsa.sev_features().alternate_injection() {
1306 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
1307 }
1308
1309 if last_interrupt_ctrl.irq() && !vmsa.v_intr_cntrl().irq() {
1310 self.backing.general_stats[entered_from_vtl]
1311 .int_ack
1312 .increment();
1313 self.backing.cvm.lapics[entered_from_vtl]
1315 .lapic
1316 .acknowledge_interrupt(last_interrupt_ctrl.vector());
1317 }
1318
1319 vmsa.v_intr_cntrl_mut().set_irq(false);
1320
1321 if lazy_eoi && self.backing.cvm.hv[entered_from_vtl].clear_lazy_eoi() {
1323 self.backing.cvm.lapics[entered_from_vtl]
1324 .lapic
1325 .access(&mut SnpApicClient {
1326 partition: self.partition,
1327 vmsa,
1328 dev,
1329 vmtime: &self.vmtime,
1330 vtl: entered_from_vtl,
1331 })
1332 .lazy_eoi();
1333 }
1334
1335 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1336 let sev_error_code = SevExitCode(vmsa.guest_error_code());
1337
1338 let stat = match sev_error_code {
1339 SevExitCode::CPUID => {
1340 self.handle_cpuid(entered_from_vtl);
1341 &mut self.backing.exit_stats[entered_from_vtl].cpuid
1342 }
1343
1344 SevExitCode::MSR => {
1345 let is_write = vmsa.exit_info1() & 1 != 0;
1346 let msr = vmsa.rcx() as u32;
1347
1348 self.handle_msr_access(dev, entered_from_vtl, msr, is_write);
1349
1350 if is_write {
1351 &mut self.backing.exit_stats[entered_from_vtl].msr_write
1352 } else {
1353 &mut self.backing.exit_stats[entered_from_vtl].msr_read
1354 }
1355 }
1356
1357 SevExitCode::IOIO => {
1358 let io_info =
1359 SevIoAccessInfo::from(self.runner.vmsa(entered_from_vtl).exit_info1() as u32);
1360
1361 let access_size = if io_info.access_size32() {
1362 4
1363 } else if io_info.access_size16() {
1364 2
1365 } else {
1366 1
1367 };
1368
1369 let port_access_protected = self.cvm_try_protect_io_port_access(
1370 entered_from_vtl,
1371 io_info.port(),
1372 io_info.read_access(),
1373 access_size,
1374 io_info.string_access(),
1375 io_info.rep_access(),
1376 );
1377
1378 let vmsa = self.runner.vmsa(entered_from_vtl);
1379 if !port_access_protected {
1380 if io_info.string_access() || io_info.rep_access() {
1381 let interruption_pending = vmsa.event_inject().valid()
1382 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1383
1384 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1389 .await?;
1390 } else {
1391 let mut rax = vmsa.rax();
1392 emulate_io(
1393 self.inner.vp_info.base.vp_index,
1394 !io_info.read_access(),
1395 io_info.port(),
1396 &mut rax,
1397 access_size,
1398 dev,
1399 )
1400 .await;
1401
1402 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1403 vmsa.set_rax(rax);
1404 advance_to_next_instruction(&mut vmsa);
1405 }
1406 }
1407 &mut self.backing.exit_stats[entered_from_vtl].ioio
1408 }
1409
1410 SevExitCode::VMMCALL => {
1411 let is_64bit = self.long_mode(entered_from_vtl);
1412 let guest_memory = &self.partition.gm[entered_from_vtl];
1413 let handler = UhHypercallHandler {
1414 trusted: !self.cvm_partition().hide_isolation,
1415 vp: &mut *self,
1416 bus: dev,
1417 intercepted_vtl: entered_from_vtl,
1418 };
1419
1420 UhHypercallHandler::TRUSTED_DISPATCHER.dispatch(
1423 guest_memory,
1424 hv1_hypercall::X64RegisterIo::new(handler, is_64bit),
1425 );
1426 &mut self.backing.exit_stats[entered_from_vtl].vmmcall
1427 }
1428
1429 SevExitCode::SHUTDOWN => {
1430 return Err(VpHaltReason::TripleFault {
1431 vtl: entered_from_vtl.into(),
1432 });
1433 }
1434
1435 SevExitCode::WBINVD | SevExitCode::INVD => {
1436 advance_to_next_instruction(&mut vmsa);
1440 &mut self.backing.exit_stats[entered_from_vtl].invd
1441 }
1442
1443 SevExitCode::NPF if has_intercept => {
1444 let gpa = vmsa.exit_info2();
1463 let interruption_pending = vmsa.event_inject().valid()
1464 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1465 let exit_info = SevNpfInfo::from(vmsa.exit_info1());
1466 let exit_message = self.runner.exit_message();
1467 let real = match exit_message.header.typ {
1468 HvMessageType::HvMessageTypeExceptionIntercept => {
1469 let exception_message =
1470 exit_message.as_message::<hvdef::HvX64ExceptionInterceptMessage>();
1471
1472 exception_message.vector
1473 == x86defs::Exception::SEV_VMM_COMMUNICATION.0 as u16
1474 }
1475 HvMessageType::HvMessageTypeUnmappedGpa
1476 | HvMessageType::HvMessageTypeGpaIntercept
1477 | HvMessageType::HvMessageTypeUnacceptedGpa => {
1478 let gpa_message =
1479 exit_message.as_message::<hvdef::HvX64MemoryInterceptMessage>();
1480
1481 (gpa_message.guest_physical_address >> hvdef::HV_PAGE_SHIFT)
1483 == (gpa >> hvdef::HV_PAGE_SHIFT)
1484 }
1485 _ => false,
1486 };
1487
1488 if real {
1489 has_intercept = false;
1490 if self.check_mem_fault(entered_from_vtl, gpa, exit_info.is_write(), exit_info)
1491 {
1492 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1493 .await?;
1494 }
1495 &mut self.backing.exit_stats[entered_from_vtl].npf
1496 } else {
1497 &mut self.backing.exit_stats[entered_from_vtl].npf_spurious
1498 }
1499 }
1500
1501 SevExitCode::NPF => &mut self.backing.exit_stats[entered_from_vtl].npf_no_intercept,
1502
1503 SevExitCode::HLT | SevExitCode::IDLE_HLT => {
1504 self.backing.cvm.lapics[entered_from_vtl].activity = MpState::Halted;
1505 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
1507 &mut self.backing.exit_stats[entered_from_vtl].hlt
1508 }
1509
1510 SevExitCode::INVALID_VMCB => {
1511 return Err(dev.fatal_error(InvalidVmcb.into()));
1512 }
1513
1514 SevExitCode::INVLPGB | SevExitCode::ILLEGAL_INVLPGB => {
1515 vmsa.set_event_inject(
1516 SevEventInjectInfo::new()
1517 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1518 .with_vector(x86defs::Exception::INVALID_OPCODE.0)
1519 .with_valid(true),
1520 );
1521 &mut self.backing.exit_stats[entered_from_vtl].invlpgb
1522 }
1523
1524 SevExitCode::RDPMC => {
1525 let cr4 = vmsa.cr4();
1528 if ((vmsa.cpl() > 0) && (cr4 & x86defs::X64_CR4_PCE == 0))
1529 || (vmsa.rcx() as u32 >= 4)
1530 {
1531 vmsa.set_event_inject(
1532 SevEventInjectInfo::new()
1533 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1534 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1535 .with_deliver_error_code(true)
1536 .with_valid(true),
1537 );
1538 } else {
1539 vmsa.set_rax(0);
1540 vmsa.set_rdx(0);
1541 advance_to_next_instruction(&mut vmsa);
1542 }
1543 &mut self.backing.exit_stats[entered_from_vtl].rdpmc
1544 }
1545
1546 SevExitCode::VMGEXIT if has_intercept => {
1547 has_intercept = false;
1548 match self.runner.exit_message().header.typ {
1549 HvMessageType::HvMessageTypeX64SevVmgexitIntercept => {
1550 self.handle_vmgexit(dev, entered_from_vtl)
1551 .map_err(|e| dev.fatal_error(e.into()))?;
1552 }
1553 _ => has_intercept = true,
1554 }
1555 &mut self.backing.exit_stats[entered_from_vtl].vmgexit
1556 }
1557
1558 SevExitCode::NMI
1559 | SevExitCode::PAUSE
1560 | SevExitCode::SMI
1561 | SevExitCode::VMGEXIT
1562 | SevExitCode::BUSLOCK => {
1563 &mut self.backing.exit_stats[entered_from_vtl].automatic_exit
1565 }
1566
1567 SevExitCode::VINTR => {
1568 unimplemented!("SevExitCode::VINTR");
1574 }
1575
1576 SevExitCode::INTR => {
1577 &mut self.backing.exit_stats[entered_from_vtl].intr
1580 }
1581
1582 SevExitCode::XSETBV => {
1583 self.handle_xsetbv(entered_from_vtl);
1584 &mut self.backing.exit_stats[entered_from_vtl].xsetbv
1585 }
1586
1587 SevExitCode::EXCP_DB => &mut self.backing.exit_stats[entered_from_vtl].excp_db,
1588
1589 SevExitCode::CR0_WRITE => {
1590 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr0);
1591 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1592 }
1593 SevExitCode::CR4_WRITE => {
1594 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr4);
1595 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1596 }
1597
1598 tr_exit_code @ (SevExitCode::GDTR_WRITE
1599 | SevExitCode::IDTR_WRITE
1600 | SevExitCode::LDTR_WRITE
1601 | SevExitCode::TR_WRITE) => {
1602 let reg = match tr_exit_code {
1603 SevExitCode::GDTR_WRITE => HvX64RegisterName::Gdtr,
1604 SevExitCode::IDTR_WRITE => HvX64RegisterName::Idtr,
1605 SevExitCode::LDTR_WRITE => HvX64RegisterName::Ldtr,
1606 SevExitCode::TR_WRITE => HvX64RegisterName::Tr,
1607 _ => unreachable!(),
1608 };
1609
1610 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, 0) {
1611 panic!("unexpected secure register");
1618 }
1619
1620 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1621 }
1622
1623 _ => {
1624 tracing::error!(
1625 CVM_CONFIDENTIAL,
1626 "SEV exit code {sev_error_code:x?} sev features {:x?} v_intr_control {:x?} event inject {:x?} \
1627 vmpl {:x?} cpl {:x?} exit_info1 {:x?} exit_info2 {:x?} exit_int_info {:x?} virtual_tom {:x?} \
1628 efer {:x?} cr4 {:x?} cr3 {:x?} cr0 {:x?} rflag {:x?} rip {:x?} next rip {:x?}",
1629 vmsa.sev_features(),
1630 vmsa.v_intr_cntrl(),
1631 vmsa.event_inject(),
1632 vmsa.vmpl(),
1633 vmsa.cpl(),
1634 vmsa.exit_info1(),
1635 vmsa.exit_info2(),
1636 vmsa.exit_int_info(),
1637 vmsa.virtual_tom(),
1638 vmsa.efer(),
1639 vmsa.cr4(),
1640 vmsa.cr3(),
1641 vmsa.cr0(),
1642 vmsa.rflags(),
1643 vmsa.rip(),
1644 vmsa.next_rip(),
1645 );
1646 panic!("Received unexpected SEV exit code {sev_error_code:x?}");
1647 }
1648 };
1649 stat.increment();
1650
1651 if cfg!(feature = "gdb") && sev_error_code == SevExitCode::EXCP_DB {
1653 return self.handle_debug_exception(dev, entered_from_vtl);
1654 }
1655
1656 if has_intercept {
1660 self.backing.general_stats[entered_from_vtl]
1661 .synth_int
1662 .increment();
1663 match self.runner.exit_message().header.typ {
1664 HvMessageType::HvMessageTypeSynicSintDeliverable => {
1665 self.handle_synic_deliverable_exit();
1666 }
1667 HvMessageType::HvMessageTypeX64Halt
1668 | HvMessageType::HvMessageTypeExceptionIntercept => {
1669 }
1672 message_type => {
1673 tracelimit::error_ratelimited!(
1674 CVM_ALLOWED,
1675 ?message_type,
1676 "unknown synthetic exit"
1677 );
1678 }
1679 }
1680 }
1681
1682 self.runner
1691 .vmsa_mut(entered_from_vtl)
1692 .set_guest_error_code(SevExitCode::INTR.0);
1693 Ok(())
1694 }
1695
1696 fn long_mode(&self, vtl: GuestVtl) -> bool {
1697 let vmsa = self.runner.vmsa(vtl);
1698 vmsa.cr0() & x86defs::X64_CR0_PE != 0 && vmsa.efer() & x86defs::X64_EFER_LMA != 0
1699 }
1700
1701 fn handle_cpuid(&mut self, vtl: GuestVtl) {
1702 let vmsa = self.runner.vmsa(vtl);
1703 let leaf = vmsa.rax() as u32;
1704 let subleaf = vmsa.rcx() as u32;
1705 let [mut eax, mut ebx, mut ecx, mut edx] = self.cvm_cpuid_result(vtl, leaf, subleaf);
1706
1707 match CpuidFunction(leaf) {
1715 CpuidFunction::ProcessorTopologyDefinition => {
1716 let apic_id = self.inner.vp_info.apic_id;
1717 let vps_per_socket = self.cvm_partition().vps_per_socket;
1718 eax = x86defs::cpuid::ProcessorTopologyDefinitionEax::from(eax)
1719 .with_extended_apic_id(apic_id)
1720 .into();
1721
1722 let topology_ebx = x86defs::cpuid::ProcessorTopologyDefinitionEbx::from(ebx);
1723 let mut new_unit_id = apic_id & (vps_per_socket - 1);
1724
1725 if topology_ebx.threads_per_compute_unit() > 0 {
1726 new_unit_id /= 2;
1727 }
1728
1729 ebx = topology_ebx.with_compute_unit_id(new_unit_id as u8).into();
1730
1731 let amd_nodes_per_socket = 1u32;
1734
1735 let node_id = apic_id
1736 >> (vps_per_socket
1737 .trailing_zeros()
1738 .saturating_sub(amd_nodes_per_socket.trailing_zeros()));
1739 let nodes_per_processor = amd_nodes_per_socket - 1;
1741
1742 ecx = x86defs::cpuid::ProcessorTopologyDefinitionEcx::from(ecx)
1743 .with_node_id(node_id as u8)
1744 .with_nodes_per_processor(nodes_per_processor as u8)
1745 .into();
1746 }
1747 CpuidFunction::ExtendedSevFeatures => {
1748 eax = 0;
1752 ebx = 0;
1753 ecx = 0;
1754 edx = 0;
1755 }
1756 _ => {}
1757 }
1758
1759 let mut vmsa = self.runner.vmsa_mut(vtl);
1760 vmsa.set_rax(eax.into());
1761 vmsa.set_rbx(ebx.into());
1762 vmsa.set_rcx(ecx.into());
1763 vmsa.set_rdx(edx.into());
1764 advance_to_next_instruction(&mut vmsa);
1765 }
1766}
1767
1768impl<T: CpuIo> X86EmulatorSupport for UhEmulationState<'_, '_, T, SnpBacked> {
1769 fn flush(&mut self) {
1770 }
1772
1773 fn vp_index(&self) -> VpIndex {
1774 self.vp.vp_index()
1775 }
1776
1777 fn vendor(&self) -> x86defs::cpuid::Vendor {
1778 self.vp.partition.caps.vendor
1779 }
1780
1781 fn gp(&mut self, reg: x86emu::Gp) -> u64 {
1782 let vmsa = self.vp.runner.vmsa(self.vtl);
1783 match reg {
1784 x86emu::Gp::RAX => vmsa.rax(),
1785 x86emu::Gp::RCX => vmsa.rcx(),
1786 x86emu::Gp::RDX => vmsa.rdx(),
1787 x86emu::Gp::RBX => vmsa.rbx(),
1788 x86emu::Gp::RSP => vmsa.rsp(),
1789 x86emu::Gp::RBP => vmsa.rbp(),
1790 x86emu::Gp::RSI => vmsa.rsi(),
1791 x86emu::Gp::RDI => vmsa.rdi(),
1792 x86emu::Gp::R8 => vmsa.r8(),
1793 x86emu::Gp::R9 => vmsa.r9(),
1794 x86emu::Gp::R10 => vmsa.r10(),
1795 x86emu::Gp::R11 => vmsa.r11(),
1796 x86emu::Gp::R12 => vmsa.r12(),
1797 x86emu::Gp::R13 => vmsa.r13(),
1798 x86emu::Gp::R14 => vmsa.r14(),
1799 x86emu::Gp::R15 => vmsa.r15(),
1800 }
1801 }
1802
1803 fn set_gp(&mut self, reg: x86emu::Gp, v: u64) {
1804 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1805 match reg {
1806 x86emu::Gp::RAX => vmsa.set_rax(v),
1807 x86emu::Gp::RCX => vmsa.set_rcx(v),
1808 x86emu::Gp::RDX => vmsa.set_rdx(v),
1809 x86emu::Gp::RBX => vmsa.set_rbx(v),
1810 x86emu::Gp::RSP => vmsa.set_rsp(v),
1811 x86emu::Gp::RBP => vmsa.set_rbp(v),
1812 x86emu::Gp::RSI => vmsa.set_rsi(v),
1813 x86emu::Gp::RDI => vmsa.set_rdi(v),
1814 x86emu::Gp::R8 => vmsa.set_r8(v),
1815 x86emu::Gp::R9 => vmsa.set_r9(v),
1816 x86emu::Gp::R10 => vmsa.set_r10(v),
1817 x86emu::Gp::R11 => vmsa.set_r11(v),
1818 x86emu::Gp::R12 => vmsa.set_r12(v),
1819 x86emu::Gp::R13 => vmsa.set_r13(v),
1820 x86emu::Gp::R14 => vmsa.set_r14(v),
1821 x86emu::Gp::R15 => vmsa.set_r15(v),
1822 };
1823 }
1824
1825 fn xmm(&mut self, index: usize) -> u128 {
1826 self.vp.runner.vmsa_mut(self.vtl).xmm_registers(index)
1827 }
1828
1829 fn set_xmm(&mut self, index: usize, v: u128) {
1830 self.vp
1831 .runner
1832 .vmsa_mut(self.vtl)
1833 .set_xmm_registers(index, v);
1834 }
1835
1836 fn rip(&mut self) -> u64 {
1837 let vmsa = self.vp.runner.vmsa(self.vtl);
1838 vmsa.rip()
1839 }
1840
1841 fn set_rip(&mut self, v: u64) {
1842 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1843 vmsa.set_rip(v);
1844 }
1845
1846 fn segment(&mut self, index: x86emu::Segment) -> x86defs::SegmentRegister {
1847 let vmsa = self.vp.runner.vmsa(self.vtl);
1848 match index {
1849 x86emu::Segment::ES => virt_seg_from_snp(vmsa.es()),
1850 x86emu::Segment::CS => virt_seg_from_snp(vmsa.cs()),
1851 x86emu::Segment::SS => virt_seg_from_snp(vmsa.ss()),
1852 x86emu::Segment::DS => virt_seg_from_snp(vmsa.ds()),
1853 x86emu::Segment::FS => virt_seg_from_snp(vmsa.fs()),
1854 x86emu::Segment::GS => virt_seg_from_snp(vmsa.gs()),
1855 }
1856 .into()
1857 }
1858
1859 fn efer(&mut self) -> u64 {
1860 let vmsa = self.vp.runner.vmsa(self.vtl);
1861 vmsa.efer()
1862 }
1863
1864 fn cr0(&mut self) -> u64 {
1865 let vmsa = self.vp.runner.vmsa(self.vtl);
1866 vmsa.cr0()
1867 }
1868
1869 fn rflags(&mut self) -> RFlags {
1870 let vmsa = self.vp.runner.vmsa(self.vtl);
1871 vmsa.rflags().into()
1872 }
1873
1874 fn set_rflags(&mut self, v: RFlags) {
1875 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1876 vmsa.set_rflags(v.into());
1877 }
1878
1879 fn instruction_bytes(&self) -> &[u8] {
1880 &[]
1881 }
1882
1883 fn physical_address(&self) -> Option<u64> {
1884 Some(self.vp.runner.vmsa(self.vtl).exit_info2())
1885 }
1886
1887 fn initial_gva_translation(
1888 &mut self,
1889 ) -> Option<virt_support_x86emu::emulate::InitialTranslation> {
1890 None
1891 }
1892
1893 fn interruption_pending(&self) -> bool {
1894 self.interruption_pending
1895 }
1896
1897 fn check_vtl_access(
1898 &mut self,
1899 _gpa: u64,
1900 _mode: virt_support_x86emu::emulate::TranslateMode,
1901 ) -> Result<(), virt_support_x86emu::emulate::EmuCheckVtlAccessError> {
1902 Ok(())
1904 }
1905
1906 fn translate_gva(
1907 &mut self,
1908 gva: u64,
1909 mode: virt_support_x86emu::emulate::TranslateMode,
1910 ) -> Result<
1911 virt_support_x86emu::emulate::EmuTranslateResult,
1912 virt_support_x86emu::emulate::EmuTranslateError,
1913 > {
1914 emulate_translate_gva(self, gva, mode)
1915 }
1916
1917 fn inject_pending_event(&mut self, event_info: hvdef::HvX64PendingEvent) {
1918 assert!(event_info.reg_0.event_pending());
1919 assert_eq!(
1920 event_info.reg_0.event_type(),
1921 hvdef::HV_X64_PENDING_EVENT_EXCEPTION
1922 );
1923
1924 let exception = HvX64PendingExceptionEvent::from(event_info.reg_0.into_bits());
1925 assert!(!self.interruption_pending);
1926
1927 SnpBacked::set_pending_exception(self.vp, self.vtl, exception);
1930 }
1931
1932 fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
1933 let vtom = self.vp.partition.caps.vtom.unwrap();
1936 debug_assert!(vtom == 0 || vtom.is_power_of_two());
1937 self.vp.partition.is_gpa_mapped(gpa & !vtom, write)
1938 }
1939
1940 fn lapic_base_address(&self) -> Option<u64> {
1941 self.vp.backing.cvm.lapics[self.vtl].lapic.base_address()
1942 }
1943
1944 fn lapic_read(&mut self, address: u64, data: &mut [u8]) {
1945 let vtl = self.vtl;
1946 self.vp.backing.cvm.lapics[vtl]
1947 .lapic
1948 .access(&mut SnpApicClient {
1949 partition: self.vp.partition,
1950 vmsa: self.vp.runner.vmsa_mut(vtl),
1951 dev: self.devices,
1952 vmtime: &self.vp.vmtime,
1953 vtl,
1954 })
1955 .mmio_read(address, data);
1956 }
1957
1958 fn lapic_write(&mut self, address: u64, data: &[u8]) {
1959 let vtl = self.vtl;
1960 self.vp.backing.cvm.lapics[vtl]
1961 .lapic
1962 .access(&mut SnpApicClient {
1963 partition: self.vp.partition,
1964 vmsa: self.vp.runner.vmsa_mut(vtl),
1965 dev: self.devices,
1966 vmtime: &self.vp.vmtime,
1967 vtl,
1968 })
1969 .mmio_write(address, data);
1970 }
1971
1972 fn monitor_support(&self) -> Option<&dyn EmulatorMonitorSupport> {
1973 Some(self)
1974 }
1975}
1976
1977impl<T> hv1_hypercall::X64RegisterState for UhHypercallHandler<'_, '_, T, SnpBacked> {
1978 fn rip(&mut self) -> u64 {
1979 self.vp.runner.vmsa(self.intercepted_vtl).rip()
1980 }
1981
1982 fn set_rip(&mut self, rip: u64) {
1983 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_rip(rip);
1984 }
1985
1986 fn gp(&mut self, n: hv1_hypercall::X64HypercallRegister) -> u64 {
1987 let vmsa = self.vp.runner.vmsa(self.intercepted_vtl);
1988 match n {
1989 hv1_hypercall::X64HypercallRegister::Rax => vmsa.rax(),
1990 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.rcx(),
1991 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.rdx(),
1992 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.rbx(),
1993 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.rsi(),
1994 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.rdi(),
1995 hv1_hypercall::X64HypercallRegister::R8 => vmsa.r8(),
1996 }
1997 }
1998
1999 fn set_gp(&mut self, n: hv1_hypercall::X64HypercallRegister, value: u64) {
2000 let mut vmsa = self.vp.runner.vmsa_mut(self.intercepted_vtl);
2001 match n {
2002 hv1_hypercall::X64HypercallRegister::Rax => vmsa.set_rax(value),
2003 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.set_rcx(value),
2004 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.set_rdx(value),
2005 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.set_rbx(value),
2006 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.set_rsi(value),
2007 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.set_rdi(value),
2008 hv1_hypercall::X64HypercallRegister::R8 => vmsa.set_r8(value),
2009 }
2010 }
2011
2012 fn xmm(&mut self, n: usize) -> u128 {
2013 self.vp.runner.vmsa(self.intercepted_vtl).xmm_registers(n)
2014 }
2015
2016 fn set_xmm(&mut self, n: usize, value: u128) {
2017 self.vp
2018 .runner
2019 .vmsa_mut(self.intercepted_vtl)
2020 .set_xmm_registers(n, value);
2021 }
2022}
2023
2024impl AccessVpState for UhVpStateAccess<'_, '_, SnpBacked> {
2025 type Error = vp_state::Error;
2026
2027 fn caps(&self) -> &virt::x86::X86PartitionCapabilities {
2028 &self.vp.partition.caps
2029 }
2030
2031 fn commit(&mut self) -> Result<(), Self::Error> {
2032 Ok(())
2033 }
2034
2035 fn registers(&mut self) -> Result<vp::Registers, Self::Error> {
2036 let vmsa = self.vp.runner.vmsa(self.vtl);
2037
2038 Ok(vp::Registers {
2039 rax: vmsa.rax(),
2040 rcx: vmsa.rcx(),
2041 rdx: vmsa.rdx(),
2042 rbx: vmsa.rbx(),
2043 rsp: vmsa.rsp(),
2044 rbp: vmsa.rbp(),
2045 rsi: vmsa.rsi(),
2046 rdi: vmsa.rdi(),
2047 r8: vmsa.r8(),
2048 r9: vmsa.r9(),
2049 r10: vmsa.r10(),
2050 r11: vmsa.r11(),
2051 r12: vmsa.r12(),
2052 r13: vmsa.r13(),
2053 r14: vmsa.r14(),
2054 r15: vmsa.r15(),
2055 rip: vmsa.rip(),
2056 rflags: vmsa.rflags(),
2057 cs: virt_seg_from_snp(vmsa.cs()),
2058 ds: virt_seg_from_snp(vmsa.ds()),
2059 es: virt_seg_from_snp(vmsa.es()),
2060 fs: virt_seg_from_snp(vmsa.fs()),
2061 gs: virt_seg_from_snp(vmsa.gs()),
2062 ss: virt_seg_from_snp(vmsa.ss()),
2063 tr: virt_seg_from_snp(vmsa.tr()),
2064 ldtr: virt_seg_from_snp(vmsa.ldtr()),
2065 gdtr: virt_table_from_snp(vmsa.gdtr()),
2066 idtr: virt_table_from_snp(vmsa.idtr()),
2067 cr0: vmsa.cr0(),
2068 cr2: vmsa.cr2(),
2069 cr3: vmsa.cr3(),
2070 cr4: vmsa.cr4(),
2071 cr8: vmsa.v_intr_cntrl().tpr().into(),
2072 efer: vmsa.efer(),
2073 })
2074 }
2075
2076 fn set_registers(&mut self, value: &vp::Registers) -> Result<(), Self::Error> {
2077 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2078
2079 let vp::Registers {
2080 rax,
2081 rcx,
2082 rdx,
2083 rbx,
2084 rsp,
2085 rbp,
2086 rsi,
2087 rdi,
2088 r8,
2089 r9,
2090 r10,
2091 r11,
2092 r12,
2093 r13,
2094 r14,
2095 r15,
2096 rip,
2097 rflags,
2098 cs,
2099 ds,
2100 es,
2101 fs,
2102 gs,
2103 ss,
2104 tr,
2105 ldtr,
2106 gdtr,
2107 idtr,
2108 cr0,
2109 cr2,
2110 cr3,
2111 cr4,
2112 cr8,
2113 efer,
2114 } = *value;
2115 vmsa.set_rax(rax);
2116 vmsa.set_rcx(rcx);
2117 vmsa.set_rdx(rdx);
2118 vmsa.set_rbx(rbx);
2119 vmsa.set_rsp(rsp);
2120 vmsa.set_rbp(rbp);
2121 vmsa.set_rsi(rsi);
2122 vmsa.set_rdi(rdi);
2123 vmsa.set_r8(r8);
2124 vmsa.set_r9(r9);
2125 vmsa.set_r10(r10);
2126 vmsa.set_r11(r11);
2127 vmsa.set_r12(r12);
2128 vmsa.set_r13(r13);
2129 vmsa.set_r14(r14);
2130 vmsa.set_r15(r15);
2131 vmsa.set_rip(rip);
2132 vmsa.set_rflags(rflags);
2133 vmsa.set_cs(virt_seg_to_snp(cs));
2134 vmsa.set_ds(virt_seg_to_snp(ds));
2135 vmsa.set_es(virt_seg_to_snp(es));
2136 vmsa.set_fs(virt_seg_to_snp(fs));
2137 vmsa.set_gs(virt_seg_to_snp(gs));
2138 vmsa.set_ss(virt_seg_to_snp(ss));
2139 vmsa.set_tr(virt_seg_to_snp(tr));
2140 vmsa.set_ldtr(virt_seg_to_snp(ldtr));
2141 vmsa.set_gdtr(virt_table_to_snp(gdtr));
2142 vmsa.set_idtr(virt_table_to_snp(idtr));
2143 vmsa.set_cr0(cr0);
2144 vmsa.set_cr2(cr2);
2145 vmsa.set_cr3(cr3);
2146 vmsa.set_cr4(cr4);
2147 vmsa.v_intr_cntrl_mut().set_tpr(cr8 as u8);
2148 vmsa.set_efer(SnpBacked::calculate_efer(efer, cr0));
2149 Ok(())
2150 }
2151
2152 fn activity(&mut self) -> Result<vp::Activity, Self::Error> {
2153 let lapic = &self.vp.backing.cvm.lapics[self.vtl];
2154
2155 Ok(vp::Activity {
2156 mp_state: lapic.activity,
2157 nmi_pending: lapic.nmi_pending,
2158 nmi_masked: false, interrupt_shadow: false, pending_event: None, pending_interruption: None, })
2163 }
2164
2165 fn set_activity(&mut self, value: &vp::Activity) -> Result<(), Self::Error> {
2166 let &vp::Activity {
2167 mp_state,
2168 nmi_pending,
2169 nmi_masked: _, interrupt_shadow: _, pending_event: _, pending_interruption: _, } = value;
2174 let lapic = &mut self.vp.backing.cvm.lapics[self.vtl];
2175 lapic.activity = mp_state;
2176 lapic.nmi_pending = nmi_pending;
2177
2178 Ok(())
2179 }
2180
2181 fn xsave(&mut self) -> Result<vp::Xsave, Self::Error> {
2182 Err(vp_state::Error::Unimplemented("xsave"))
2183 }
2184
2185 fn set_xsave(&mut self, _value: &vp::Xsave) -> Result<(), Self::Error> {
2186 Err(vp_state::Error::Unimplemented("xsave"))
2187 }
2188
2189 fn apic(&mut self) -> Result<vp::Apic, Self::Error> {
2190 Ok(self.vp.backing.cvm.lapics[self.vtl].lapic.save())
2191 }
2192
2193 fn set_apic(&mut self, value: &vp::Apic) -> Result<(), Self::Error> {
2194 self.vp.backing.cvm.lapics[self.vtl]
2195 .lapic
2196 .restore(value)
2197 .map_err(vp_state::Error::InvalidApicBase)?;
2198 Ok(())
2199 }
2200
2201 fn xcr(&mut self) -> Result<vp::Xcr0, Self::Error> {
2202 let vmsa = self.vp.runner.vmsa(self.vtl);
2203 Ok(vp::Xcr0 { value: vmsa.xcr0() })
2204 }
2205
2206 fn set_xcr(&mut self, value: &vp::Xcr0) -> Result<(), Self::Error> {
2207 let vp::Xcr0 { value } = *value;
2208 self.vp.runner.vmsa_mut(self.vtl).set_xcr0(value);
2209 Ok(())
2210 }
2211
2212 fn xss(&mut self) -> Result<vp::Xss, Self::Error> {
2213 let vmsa = self.vp.runner.vmsa(self.vtl);
2214 Ok(vp::Xss { value: vmsa.xss() })
2215 }
2216
2217 fn set_xss(&mut self, value: &vp::Xss) -> Result<(), Self::Error> {
2218 let vp::Xss { value } = *value;
2219 self.vp.runner.vmsa_mut(self.vtl).set_xss(value);
2220 Ok(())
2221 }
2222
2223 fn mtrrs(&mut self) -> Result<vp::Mtrrs, Self::Error> {
2224 Ok(vp::Mtrrs {
2225 msr_mtrr_def_type: 0,
2226 fixed: [0; 11],
2227 variable: [0; 16],
2228 })
2229 }
2230
2231 fn set_mtrrs(&mut self, _value: &vp::Mtrrs) -> Result<(), Self::Error> {
2232 Ok(())
2233 }
2234
2235 fn pat(&mut self) -> Result<vp::Pat, Self::Error> {
2236 let vmsa = self.vp.runner.vmsa(self.vtl);
2237 Ok(vp::Pat { value: vmsa.pat() })
2238 }
2239
2240 fn set_pat(&mut self, value: &vp::Pat) -> Result<(), Self::Error> {
2241 let vp::Pat { value } = *value;
2242 self.vp.runner.vmsa_mut(self.vtl).set_pat(value);
2243 Ok(())
2244 }
2245
2246 fn virtual_msrs(&mut self) -> Result<vp::VirtualMsrs, Self::Error> {
2247 let vmsa = self.vp.runner.vmsa(self.vtl);
2248
2249 Ok(vp::VirtualMsrs {
2250 kernel_gs_base: vmsa.kernel_gs_base(),
2251 sysenter_cs: vmsa.sysenter_cs(),
2252 sysenter_eip: vmsa.sysenter_eip(),
2253 sysenter_esp: vmsa.sysenter_esp(),
2254 star: vmsa.star(),
2255 lstar: vmsa.lstar(),
2256 cstar: vmsa.cstar(),
2257 sfmask: vmsa.sfmask(),
2258 })
2259 }
2260
2261 fn set_virtual_msrs(&mut self, value: &vp::VirtualMsrs) -> Result<(), Self::Error> {
2262 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2263 let vp::VirtualMsrs {
2264 kernel_gs_base,
2265 sysenter_cs,
2266 sysenter_eip,
2267 sysenter_esp,
2268 star,
2269 lstar,
2270 cstar,
2271 sfmask,
2272 } = *value;
2273 vmsa.set_kernel_gs_base(kernel_gs_base);
2274 vmsa.set_sysenter_cs(sysenter_cs);
2275 vmsa.set_sysenter_eip(sysenter_eip);
2276 vmsa.set_sysenter_esp(sysenter_esp);
2277 vmsa.set_star(star);
2278 vmsa.set_lstar(lstar);
2279 vmsa.set_cstar(cstar);
2280 vmsa.set_sfmask(sfmask);
2281
2282 Ok(())
2283 }
2284
2285 fn debug_regs(&mut self) -> Result<vp::DebugRegisters, Self::Error> {
2286 let vmsa = self.vp.runner.vmsa(self.vtl);
2287 Ok(vp::DebugRegisters {
2288 dr0: vmsa.dr0(),
2289 dr1: vmsa.dr1(),
2290 dr2: vmsa.dr2(),
2291 dr3: vmsa.dr3(),
2292 dr6: vmsa.dr6(),
2293 dr7: vmsa.dr7(),
2294 })
2295 }
2296
2297 fn set_debug_regs(&mut self, value: &vp::DebugRegisters) -> Result<(), Self::Error> {
2298 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2299 let vp::DebugRegisters {
2300 dr0,
2301 dr1,
2302 dr2,
2303 dr3,
2304 dr6,
2305 dr7,
2306 } = *value;
2307 vmsa.set_dr0(dr0);
2308 vmsa.set_dr1(dr1);
2309 vmsa.set_dr2(dr2);
2310 vmsa.set_dr3(dr3);
2311 vmsa.set_dr6(dr6);
2312 vmsa.set_dr7(dr7);
2313 Ok(())
2314 }
2315
2316 fn tsc(&mut self) -> Result<vp::Tsc, Self::Error> {
2317 Err(vp_state::Error::Unimplemented("tsc"))
2318 }
2319
2320 fn set_tsc(&mut self, _value: &vp::Tsc) -> Result<(), Self::Error> {
2321 Err(vp_state::Error::Unimplemented("tsc"))
2322 }
2323
2324 fn tsc_aux(&mut self) -> Result<vp::TscAux, Self::Error> {
2325 let vmsa = self.vp.runner.vmsa(self.vtl);
2326 Ok(vp::TscAux {
2327 value: vmsa.tsc_aux() as u64,
2328 })
2329 }
2330
2331 fn set_tsc_aux(&mut self, value: &vp::TscAux) -> Result<(), Self::Error> {
2332 let vp::TscAux { value } = *value;
2333 self.vp.runner.vmsa_mut(self.vtl).set_tsc_aux(value as u32);
2334 Ok(())
2335 }
2336
2337 fn cet(&mut self) -> Result<vp::Cet, Self::Error> {
2338 let vmsa = self.vp.runner.vmsa(self.vtl);
2339 Ok(vp::Cet { scet: vmsa.s_cet() })
2340 }
2341
2342 fn set_cet(&mut self, value: &vp::Cet) -> Result<(), Self::Error> {
2343 let vp::Cet { scet } = *value;
2344 self.vp.runner.vmsa_mut(self.vtl).set_s_cet(scet);
2345 Ok(())
2346 }
2347
2348 fn cet_ss(&mut self) -> Result<vp::CetSs, Self::Error> {
2349 let vmsa = self.vp.runner.vmsa(self.vtl);
2350 Ok(vp::CetSs {
2351 ssp: vmsa.ssp(),
2352 interrupt_ssp_table_addr: vmsa.interrupt_ssp_table_addr(),
2353 })
2354 }
2355
2356 fn set_cet_ss(&mut self, value: &vp::CetSs) -> Result<(), Self::Error> {
2357 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2358 let vp::CetSs {
2359 ssp,
2360 interrupt_ssp_table_addr,
2361 } = *value;
2362 vmsa.set_ssp(ssp);
2363 vmsa.set_interrupt_ssp_table_addr(interrupt_ssp_table_addr);
2364 Ok(())
2365 }
2366
2367 fn synic_msrs(&mut self) -> Result<vp::SyntheticMsrs, Self::Error> {
2368 Err(vp_state::Error::Unimplemented("synic_msrs"))
2369 }
2370
2371 fn set_synic_msrs(&mut self, _value: &vp::SyntheticMsrs) -> Result<(), Self::Error> {
2372 Err(vp_state::Error::Unimplemented("synic_msrs"))
2373 }
2374
2375 fn synic_message_page(&mut self) -> Result<vp::SynicMessagePage, Self::Error> {
2376 Err(vp_state::Error::Unimplemented("synic_message_page"))
2377 }
2378
2379 fn set_synic_message_page(&mut self, _value: &vp::SynicMessagePage) -> Result<(), Self::Error> {
2380 Err(vp_state::Error::Unimplemented("synic_message_page"))
2381 }
2382
2383 fn synic_event_flags_page(&mut self) -> Result<vp::SynicEventFlagsPage, Self::Error> {
2384 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2385 }
2386
2387 fn set_synic_event_flags_page(
2388 &mut self,
2389 _value: &vp::SynicEventFlagsPage,
2390 ) -> Result<(), Self::Error> {
2391 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2392 }
2393
2394 fn synic_message_queues(&mut self) -> Result<vp::SynicMessageQueues, Self::Error> {
2395 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2396 }
2397
2398 fn set_synic_message_queues(
2399 &mut self,
2400 _value: &vp::SynicMessageQueues,
2401 ) -> Result<(), Self::Error> {
2402 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2403 }
2404
2405 fn synic_timers(&mut self) -> Result<vp::SynicTimers, Self::Error> {
2406 Err(vp_state::Error::Unimplemented("synic_timers"))
2407 }
2408
2409 fn set_synic_timers(&mut self, _value: &vp::SynicTimers) -> Result<(), Self::Error> {
2410 Err(vp_state::Error::Unimplemented("synic_timers"))
2411 }
2412}
2413
2414fn advance_to_next_instruction(vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>) {
2416 vmsa.set_rip(vmsa.next_rip());
2417 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2418}
2419
2420impl UhProcessor<'_, SnpBacked> {
2421 fn read_msr_snp(
2422 &mut self,
2423 _dev: &impl CpuIo,
2424 msr: u32,
2425 vtl: GuestVtl,
2426 ) -> Result<u64, MsrError> {
2427 let vmsa = self.runner.vmsa(vtl);
2428 let value = match msr {
2429 x86defs::X64_MSR_FS_BASE => vmsa.fs().base,
2430 x86defs::X64_MSR_GS_BASE => vmsa.gs().base,
2431 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.kernel_gs_base(),
2432 x86defs::X86X_MSR_TSC_AUX => {
2433 if self.shared.tsc_aux_virtualized {
2434 vmsa.tsc_aux() as u64
2435 } else {
2436 return Err(MsrError::InvalidAccess);
2437 }
2438 }
2439 x86defs::X86X_MSR_SPEC_CTRL => vmsa.spec_ctrl(),
2440 x86defs::X86X_MSR_U_CET => vmsa.u_cet(),
2441 x86defs::X86X_MSR_S_CET => vmsa.s_cet(),
2442 x86defs::X86X_MSR_PL0_SSP => vmsa.pl0_ssp(),
2443 x86defs::X86X_MSR_PL1_SSP => vmsa.pl1_ssp(),
2444 x86defs::X86X_MSR_PL2_SSP => vmsa.pl2_ssp(),
2445 x86defs::X86X_MSR_PL3_SSP => vmsa.pl3_ssp(),
2446 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.interrupt_ssp_table_addr(),
2447 x86defs::X86X_MSR_CR_PAT => vmsa.pat(),
2448 x86defs::X86X_MSR_EFER => vmsa.efer(),
2449 x86defs::X86X_MSR_STAR => vmsa.star(),
2450 x86defs::X86X_MSR_LSTAR => vmsa.lstar(),
2451 x86defs::X86X_MSR_CSTAR => vmsa.cstar(),
2452 x86defs::X86X_MSR_SFMASK => vmsa.sfmask(),
2453 x86defs::X86X_MSR_SYSENTER_CS => vmsa.sysenter_cs(),
2454 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.sysenter_esp(),
2455 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.sysenter_eip(),
2456 x86defs::X86X_MSR_XSS => vmsa.xss(),
2457 x86defs::X86X_AMD_MSR_VM_CR => 0,
2458 x86defs::X86X_MSR_TSC => safe_intrinsics::rdtsc(),
2459 x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => 0xffff_ffff,
2460 x86defs::X86X_MSR_MTRR_CAP => {
2461 0x400
2464 }
2465 x86defs::X86X_MSR_MTRR_DEF_TYPE => {
2466 0
2470 }
2471 x86defs::X86X_AMD_MSR_SYSCFG
2472 | x86defs::X86X_MSR_MCG_CAP
2473 | x86defs::X86X_MSR_MCG_STATUS => 0,
2474
2475 hvdef::HV_X64_MSR_GUEST_IDLE => {
2476 self.backing.cvm.lapics[vtl].activity = MpState::Idle;
2477 let mut vmsa = self.runner.vmsa_mut(vtl);
2478 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2479 0
2480 }
2481 _ => return Err(MsrError::Unknown),
2482 };
2483 Ok(value)
2484 }
2485
2486 fn write_msr_snp(
2487 &mut self,
2488 _dev: &impl CpuIo,
2489 msr: u32,
2490 value: u64,
2491 vtl: GuestVtl,
2492 ) -> Result<(), MsrError> {
2493 let mut vmsa = self.runner.vmsa_mut(vtl);
2496 match msr {
2497 x86defs::X64_MSR_FS_BASE => {
2498 let fs = vmsa.fs();
2499 vmsa.set_fs(SevSelector {
2500 attrib: fs.attrib,
2501 selector: fs.selector,
2502 limit: fs.limit,
2503 base: value,
2504 });
2505 }
2506 x86defs::X64_MSR_GS_BASE => {
2507 let gs = vmsa.gs();
2508 vmsa.set_gs(SevSelector {
2509 attrib: gs.attrib,
2510 selector: gs.selector,
2511 limit: gs.limit,
2512 base: value,
2513 });
2514 }
2515 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.set_kernel_gs_base(value),
2516 x86defs::X86X_MSR_TSC_AUX => {
2517 if self.shared.tsc_aux_virtualized {
2518 vmsa.set_tsc_aux(value as u32);
2519 } else {
2520 return Err(MsrError::InvalidAccess);
2521 }
2522 }
2523 x86defs::X86X_MSR_SPEC_CTRL => vmsa.set_spec_ctrl(value),
2524 x86defs::X86X_MSR_U_CET => vmsa.set_u_cet(value),
2525 x86defs::X86X_MSR_S_CET => vmsa.set_s_cet(value),
2526 x86defs::X86X_MSR_PL0_SSP => vmsa.set_pl0_ssp(value),
2527 x86defs::X86X_MSR_PL1_SSP => vmsa.set_pl1_ssp(value),
2528 x86defs::X86X_MSR_PL2_SSP => vmsa.set_pl2_ssp(value),
2529 x86defs::X86X_MSR_PL3_SSP => vmsa.set_pl3_ssp(value),
2530 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.set_interrupt_ssp_table_addr(value),
2531
2532 x86defs::X86X_MSR_CR_PAT => vmsa.set_pat(value),
2533 x86defs::X86X_MSR_EFER => vmsa.set_efer(SnpBacked::calculate_efer(value, vmsa.cr0())),
2534
2535 x86defs::X86X_MSR_STAR => vmsa.set_star(value),
2536 x86defs::X86X_MSR_LSTAR => vmsa.set_lstar(value),
2537 x86defs::X86X_MSR_CSTAR => vmsa.set_cstar(value),
2538 x86defs::X86X_MSR_SFMASK => vmsa.set_sfmask(value),
2539 x86defs::X86X_MSR_SYSENTER_CS => vmsa.set_sysenter_cs(value),
2540 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.set_sysenter_esp(value),
2541 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.set_sysenter_eip(value),
2542 x86defs::X86X_MSR_XSS => vmsa.set_xss(value),
2543
2544 x86defs::X86X_MSR_TSC => {} x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => {}
2546 x86defs::X86X_MSR_MTRR_DEF_TYPE => {}
2547
2548 x86defs::X86X_AMD_MSR_VM_CR
2549 | x86defs::X86X_MSR_MTRR_CAP
2550 | x86defs::X86X_AMD_MSR_SYSCFG
2551 | x86defs::X86X_MSR_MCG_CAP => return Err(MsrError::InvalidAccess),
2552
2553 x86defs::X86X_MSR_MCG_STATUS => {
2554 if x86defs::X86xMcgStatusRegister::from(value).reserved0() != 0 {
2556 return Err(MsrError::InvalidAccess);
2557 }
2558 }
2559 _ => {
2560 tracing::debug!(msr, value, "unknown cvm msr write");
2561 }
2562 }
2563 Ok(())
2564 }
2565}
2566
2567impl<T: CpuIo> hv1_hypercall::VtlSwitchOps for UhHypercallHandler<'_, '_, T, SnpBacked> {
2568 fn advance_ip(&mut self) {
2569 let is_64bit = self.vp.long_mode(self.intercepted_vtl);
2570 let mut io = hv1_hypercall::X64RegisterIo::new(self, is_64bit);
2571 io.advance_ip();
2572 }
2573
2574 fn inject_invalid_opcode_fault(&mut self) {
2575 self.vp
2576 .runner
2577 .vmsa_mut(self.intercepted_vtl)
2578 .set_event_inject(
2579 SevEventInjectInfo::new()
2580 .with_valid(true)
2581 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
2582 .with_vector(x86defs::Exception::INVALID_OPCODE.0),
2583 );
2584 }
2585}
2586
2587impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressList for UhHypercallHandler<'_, '_, T, SnpBacked> {
2588 fn flush_virtual_address_list(
2589 &mut self,
2590 processor_set: ProcessorSet<'_>,
2591 flags: HvFlushFlags,
2592 gva_ranges: &[HvGvaRange],
2593 ) -> HvRepResult {
2594 hv1_hypercall::FlushVirtualAddressListEx::flush_virtual_address_list_ex(
2595 self,
2596 processor_set,
2597 flags,
2598 gva_ranges,
2599 )
2600 }
2601}
2602
2603impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
2604 for UhHypercallHandler<'_, '_, T, SnpBacked>
2605{
2606 fn flush_virtual_address_list_ex(
2607 &mut self,
2608 processor_set: ProcessorSet<'_>,
2609 flags: HvFlushFlags,
2610 gva_ranges: &[HvGvaRange],
2611 ) -> HvRepResult {
2612 self.hcvm_validate_flush_inputs(processor_set, flags, true)
2613 .map_err(|e| (e, 0))?;
2614
2615 if gva_ranges.len() > 16 || gva_ranges.iter().any(|range| if flags.use_extended_range_format() { range.as_extended().additional_pages() } else { range.as_simple().additional_pages() } > 16) {
2618 self.do_flush_virtual_address_space(processor_set, flags);
2619 } else {
2620 self.do_flush_virtual_address_list(flags, gva_ranges);
2621 }
2622
2623 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2625 Ok(())
2626 }
2627}
2628
2629impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpace
2630 for UhHypercallHandler<'_, '_, T, SnpBacked>
2631{
2632 fn flush_virtual_address_space(
2633 &mut self,
2634 processor_set: ProcessorSet<'_>,
2635 flags: HvFlushFlags,
2636 ) -> hvdef::HvResult<()> {
2637 hv1_hypercall::FlushVirtualAddressSpaceEx::flush_virtual_address_space_ex(
2638 self,
2639 processor_set,
2640 flags,
2641 )
2642 }
2643}
2644
2645impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpaceEx
2646 for UhHypercallHandler<'_, '_, T, SnpBacked>
2647{
2648 fn flush_virtual_address_space_ex(
2649 &mut self,
2650 processor_set: ProcessorSet<'_>,
2651 flags: HvFlushFlags,
2652 ) -> hvdef::HvResult<()> {
2653 self.hcvm_validate_flush_inputs(processor_set, flags, false)?;
2654
2655 self.do_flush_virtual_address_space(processor_set, flags);
2656
2657 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2659 Ok(())
2660 }
2661}
2662
2663impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
2664 fn do_flush_virtual_address_list(&mut self, flags: HvFlushFlags, gva_ranges: &[HvGvaRange]) {
2665 for range in gva_ranges {
2666 let mut rax = SevInvlpgbRax::new()
2667 .with_asid_valid(true)
2668 .with_va_valid(true)
2669 .with_global(!flags.non_global_mappings_only());
2670 let mut ecx = SevInvlpgbEcx::new();
2671 let mut count;
2672 let mut gpn;
2673
2674 if flags.use_extended_range_format() && range.as_extended().large_page() {
2675 ecx.set_large_page(true);
2676 if range.as_extended_large_page().page_size() {
2677 let range = range.as_extended_large_page();
2678 count = range.additional_pages();
2679 gpn = range.gva_large_page_number();
2680 } else {
2681 let range = range.as_extended();
2682 count = range.additional_pages();
2683 gpn = range.gva_page_number();
2684 }
2685 } else {
2686 let range = range.as_simple();
2687 count = range.additional_pages();
2688 gpn = range.gva_page_number();
2689 }
2690 count += 1; while count > 0 {
2693 rax.set_virtual_page_number(gpn);
2694 ecx.set_additional_count(std::cmp::min(
2695 count - 1,
2696 self.vp.shared.invlpgb_count_max.into(),
2697 ));
2698
2699 let edx = SevInvlpgbEdx::new();
2700 self.vp
2701 .partition
2702 .hcl
2703 .invlpgb(rax.into(), edx.into(), ecx.into());
2704
2705 count -= ecx.additional_count() + 1;
2706 gpn += ecx.additional_count() + 1;
2707 }
2708 }
2709
2710 self.vp.partition.hcl.tlbsync();
2711 }
2712
2713 fn do_flush_virtual_address_space(
2714 &mut self,
2715 processor_set: ProcessorSet<'_>,
2716 flags: HvFlushFlags,
2717 ) {
2718 let only_self = [self.vp.vp_index().index()].into_iter().eq(processor_set);
2719 if only_self && flags.non_global_mappings_only() {
2720 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_pcpu_id(0);
2721 } else {
2722 self.vp.partition.hcl.invlpgb(
2723 SevInvlpgbRax::new()
2724 .with_asid_valid(true)
2725 .with_global(!flags.non_global_mappings_only())
2726 .into(),
2727 SevInvlpgbEdx::new().into(),
2728 SevInvlpgbEcx::new().into(),
2729 );
2730 self.vp.partition.hcl.tlbsync();
2731 }
2732 }
2733}
2734
2735struct SnpTlbLockFlushAccess<'a> {
2736 vp_index: Option<VpIndex>,
2737 partition: &'a UhPartitionInner,
2738 shared: &'a SnpBackedShared,
2739}
2740
2741impl TlbFlushLockAccess for SnpTlbLockFlushAccess<'_> {
2742 fn flush(&mut self, vtl: GuestVtl) {
2743 self.partition.hcl.invlpgb(
2746 SevInvlpgbRax::new()
2747 .with_asid_valid(true)
2748 .with_global(true)
2749 .into(),
2750 SevInvlpgbEdx::new().into(),
2751 SevInvlpgbEcx::new().into(),
2752 );
2753 self.partition.hcl.tlbsync();
2754 self.set_wait_for_tlb_locks(vtl);
2755 }
2756
2757 fn flush_entire(&mut self) {
2758 self.partition.hcl.invlpgb(
2759 SevInvlpgbRax::new()
2760 .with_asid_valid(true)
2761 .with_global(true)
2762 .into(),
2763 SevInvlpgbEdx::new().into(),
2764 SevInvlpgbEcx::new().into(),
2765 );
2766 self.partition.hcl.tlbsync();
2767 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
2768 self.set_wait_for_tlb_locks(vtl);
2769 }
2770 }
2771
2772 fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) {
2773 if let Some(vp_index) = self.vp_index {
2774 hardware_cvm::tlb_lock::TlbLockAccess {
2775 vp_index,
2776 cvm_partition: &self.shared.cvm,
2777 }
2778 .set_wait_for_tlb_locks(vtl);
2779 }
2780 }
2781}
2782
2783mod save_restore {
2784 use super::SnpBacked;
2785 use super::UhProcessor;
2786 use vmcore::save_restore::RestoreError;
2787 use vmcore::save_restore::SaveError;
2788 use vmcore::save_restore::SaveRestore;
2789 use vmcore::save_restore::SavedStateNotSupported;
2790
2791 impl SaveRestore for UhProcessor<'_, SnpBacked> {
2792 type SavedState = SavedStateNotSupported;
2793
2794 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
2795 Err(SaveError::NotSupported)
2796 }
2797
2798 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
2799 match state {}
2800 }
2801 }
2802}