1use super::BackingParams;
7use super::BackingPrivate;
8use super::BackingSharedParams;
9use super::HardwareIsolatedBacking;
10use super::InterceptMessageOptionalState;
11use super::InterceptMessageState;
12use super::UhEmulationState;
13use super::hardware_cvm;
14use super::vp_state;
15use super::vp_state::UhVpStateAccess;
16use crate::BackingShared;
17use crate::Error;
18use crate::GuestVtl;
19use crate::TlbFlushLockAccess;
20use crate::UhCvmPartitionState;
21use crate::UhCvmVpState;
22use crate::UhPartitionInner;
23use crate::UhPartitionNewParams;
24use crate::WakeReason;
25use crate::processor::UhHypercallHandler;
26use crate::processor::UhProcessor;
27use crate::processor::hardware_cvm::apic::ApicBacking;
28use cvm_tracing::CVM_ALLOWED;
29use cvm_tracing::CVM_CONFIDENTIAL;
30use hcl::vmsa::VmsaWrapper;
31use hv1_emulator::hv::ProcessorVtlHv;
32use hv1_emulator::synic::ProcessorSynic;
33use hv1_hypercall::HvRepResult;
34use hv1_hypercall::HypercallIo;
35use hv1_structs::ProcessorSet;
36use hv1_structs::VtlArray;
37use hvdef::HV_PAGE_SIZE;
38use hvdef::HvDeliverabilityNotificationsRegister;
39use hvdef::HvError;
40use hvdef::HvMessageType;
41use hvdef::HvX64PendingExceptionEvent;
42use hvdef::HvX64RegisterName;
43use hvdef::Vtl;
44use hvdef::hypercall::Control;
45use hvdef::hypercall::HvFlushFlags;
46use hvdef::hypercall::HvGvaRange;
47use hvdef::hypercall::HypercallOutput;
48use inspect::Inspect;
49use inspect::InspectMut;
50use inspect_counters::Counter;
51use virt::EmulatorMonitorSupport;
52use virt::Processor;
53use virt::VpHaltReason;
54use virt::VpIndex;
55use virt::io::CpuIo;
56use virt::state::StateElement;
57use virt::vp;
58use virt::vp::AccessVpState;
59use virt::vp::MpState;
60use virt::x86::MsrError;
61use virt::x86::MsrErrorExt;
62use virt::x86::SegmentRegister;
63use virt::x86::TableRegister;
64use virt_support_apic::ApicClient;
65use virt_support_x86emu::emulate::EmulatorSupport as X86EmulatorSupport;
66use virt_support_x86emu::emulate::emulate_io;
67use virt_support_x86emu::emulate::emulate_translate_gva;
68use virt_support_x86emu::translate::TranslationRegisters;
69use vmcore::vmtime::VmTimeAccess;
70use x86defs::RFlags;
71use x86defs::cpuid::CpuidFunction;
72use x86defs::snp::SevEventInjectInfo;
73use x86defs::snp::SevExitCode;
74use x86defs::snp::SevInvlpgbEcx;
75use x86defs::snp::SevInvlpgbEdx;
76use x86defs::snp::SevInvlpgbRax;
77use x86defs::snp::SevIoAccessInfo;
78use x86defs::snp::SevNpfInfo;
79use x86defs::snp::SevSelector;
80use x86defs::snp::SevStatusMsr;
81use x86defs::snp::SevVmsa;
82use x86defs::snp::Vmpl;
83use zerocopy::FromZeros;
84use zerocopy::IntoBytes;
85
86#[derive(Debug, Error)]
87#[error("invalid vmcb")]
88struct InvalidVmcb;
89
90#[derive(Debug, Error)]
91enum SnpGhcbError {
92 #[error("failed to access GHCB page")]
93 GhcbPageAccess(#[source] guestmem::GuestMemoryError),
94 #[error("ghcb page used for vmgexit does not match overlay page")]
95 GhcbMisconfiguration,
96}
97
98#[derive(Debug, Error)]
99#[error("failed to run")]
100struct SnpRunVpError(#[source] hcl::ioctl::Error);
101
102#[derive(InspectMut)]
104pub struct SnpBacked {
105 #[inspect(hex)]
106 hv_sint_notifications: u16,
107 general_stats: VtlArray<GeneralStats, 2>,
108 exit_stats: VtlArray<ExitStats, 2>,
109 #[inspect(flatten)]
110 cvm: UhCvmVpState,
111}
112
113#[derive(Inspect, Default)]
114struct GeneralStats {
115 guest_busy: Counter,
116 int_ack: Counter,
117 synth_int: Counter,
118}
119
120#[derive(Inspect, Default)]
121struct ExitStats {
122 automatic_exit: Counter,
123 cpuid: Counter,
124 hlt: Counter,
125 intr: Counter,
126 invd: Counter,
127 invlpgb: Counter,
128 ioio: Counter,
129 msr_read: Counter,
130 msr_write: Counter,
131 npf: Counter,
132 npf_no_intercept: Counter,
133 npf_spurious: Counter,
134 rdpmc: Counter,
135 vmgexit: Counter,
136 vmmcall: Counter,
137 xsetbv: Counter,
138 excp_db: Counter,
139 secure_reg_write: Counter,
140}
141
142enum UhDirectOverlay {
143 Sipp,
144 Sifp,
145 Ghcb,
146 Count,
147}
148
149impl SnpBacked {
150 fn calculate_efer(efer: u64, cr0: u64) -> u64 {
152 let new_efer = if efer & x86defs::X64_EFER_LME != 0 && cr0 & x86defs::X64_CR0_PG != 0 {
153 efer | x86defs::X64_EFER_LMA
154 } else {
155 efer & !x86defs::X64_EFER_LMA
156 };
157 new_efer | x86defs::X64_EFER_SVME
158 }
159
160 pub fn shared_pages_required_per_cpu() -> u64 {
163 UhDirectOverlay::Count as u64
164 }
165}
166
167impl HardwareIsolatedBacking for SnpBacked {
168 fn cvm_state(&self) -> &UhCvmVpState {
169 &self.cvm
170 }
171
172 fn cvm_state_mut(&mut self) -> &mut UhCvmVpState {
173 &mut self.cvm
174 }
175
176 fn cvm_partition_state(shared: &Self::Shared) -> &UhCvmPartitionState {
177 &shared.cvm
178 }
179
180 fn switch_vtl(this: &mut UhProcessor<'_, Self>, source_vtl: GuestVtl, target_vtl: GuestVtl) {
181 let [vmsa0, vmsa1] = this.runner.vmsas_mut();
182 let (current_vmsa, mut target_vmsa) = match (source_vtl, target_vtl) {
183 (GuestVtl::Vtl0, GuestVtl::Vtl1) => (vmsa0, vmsa1),
184 (GuestVtl::Vtl1, GuestVtl::Vtl0) => (vmsa1, vmsa0),
185 _ => unreachable!(),
186 };
187
188 target_vmsa.set_rax(current_vmsa.rax());
189 target_vmsa.set_rbx(current_vmsa.rbx());
190 target_vmsa.set_rcx(current_vmsa.rcx());
191 target_vmsa.set_rdx(current_vmsa.rdx());
192 target_vmsa.set_rbp(current_vmsa.rbp());
193 target_vmsa.set_rsi(current_vmsa.rsi());
194 target_vmsa.set_rdi(current_vmsa.rdi());
195 target_vmsa.set_r8(current_vmsa.r8());
196 target_vmsa.set_r9(current_vmsa.r9());
197 target_vmsa.set_r10(current_vmsa.r10());
198 target_vmsa.set_r11(current_vmsa.r11());
199 target_vmsa.set_r12(current_vmsa.r12());
200 target_vmsa.set_r13(current_vmsa.r13());
201 target_vmsa.set_r14(current_vmsa.r14());
202 target_vmsa.set_r15(current_vmsa.r15());
203 target_vmsa.set_xcr0(current_vmsa.xcr0());
204
205 target_vmsa.set_cr2(current_vmsa.cr2());
206
207 target_vmsa.set_dr0(current_vmsa.dr0());
209 target_vmsa.set_dr1(current_vmsa.dr1());
210 target_vmsa.set_dr2(current_vmsa.dr2());
211 target_vmsa.set_dr3(current_vmsa.dr3());
212
213 target_vmsa.set_pl0_ssp(current_vmsa.pl0_ssp());
214 target_vmsa.set_pl1_ssp(current_vmsa.pl1_ssp());
215 target_vmsa.set_pl2_ssp(current_vmsa.pl2_ssp());
216 target_vmsa.set_pl3_ssp(current_vmsa.pl3_ssp());
217 target_vmsa.set_u_cet(current_vmsa.u_cet());
218
219 target_vmsa.set_x87_registers(¤t_vmsa.x87_registers());
220
221 let vec_reg_count = 16;
222 for i in 0..vec_reg_count {
223 target_vmsa.set_xmm_registers(i, current_vmsa.xmm_registers(i));
224 target_vmsa.set_ymm_registers(i, current_vmsa.ymm_registers(i));
225 }
226
227 this.backing.cvm_state_mut().exit_vtl = target_vtl;
228 }
229
230 fn translation_registers(
231 &self,
232 this: &UhProcessor<'_, Self>,
233 vtl: GuestVtl,
234 ) -> TranslationRegisters {
235 let vmsa = this.runner.vmsa(vtl);
236 TranslationRegisters {
237 cr0: vmsa.cr0(),
238 cr4: vmsa.cr4(),
239 efer: vmsa.efer(),
240 cr3: vmsa.cr3(),
241 rflags: vmsa.rflags(),
242 ss: virt_seg_from_snp(vmsa.ss()).into(),
243 encryption_mode: virt_support_x86emu::translate::EncryptionMode::Vtom(
244 this.partition.caps.vtom.unwrap(),
245 ),
246 }
247 }
248
249 fn tlb_flush_lock_access<'a>(
250 vp_index: Option<VpIndex>,
251 partition: &'a UhPartitionInner,
252 shared: &'a Self::Shared,
253 ) -> impl TlbFlushLockAccess + 'a {
254 SnpTlbLockFlushAccess {
255 vp_index,
256 partition,
257 shared,
258 }
259 }
260
261 fn pending_event_vector(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> Option<u8> {
262 let event_inject = this.runner.vmsa(vtl).event_inject();
263 if event_inject.valid() {
264 Some(event_inject.vector())
265 } else {
266 None
267 }
268 }
269
270 fn set_pending_exception(
271 this: &mut UhProcessor<'_, Self>,
272 vtl: GuestVtl,
273 event: HvX64PendingExceptionEvent,
274 ) {
275 let inject_info = SevEventInjectInfo::new()
276 .with_valid(true)
277 .with_deliver_error_code(event.deliver_error_code())
278 .with_error_code(event.error_code())
279 .with_vector(event.vector().try_into().unwrap())
280 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT);
281
282 this.runner.vmsa_mut(vtl).set_event_inject(inject_info);
283 }
284
285 fn cr0(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
286 this.runner.vmsa(vtl).cr0()
287 }
288
289 fn cr4(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
290 this.runner.vmsa(vtl).cr4()
291 }
292
293 fn intercept_message_state(
294 this: &UhProcessor<'_, Self>,
295 vtl: GuestVtl,
296 include_optional_state: bool,
297 ) -> InterceptMessageState {
298 let vmsa = this.runner.vmsa(vtl);
299
300 let instr_len = if SevExitCode(vmsa.guest_error_code()) == SevExitCode::NPF {
302 0
303 } else {
304 (vmsa.next_rip() - vmsa.rip()) as u8
305 };
306
307 InterceptMessageState {
308 instruction_length_and_cr8: instr_len,
309 cpl: vmsa.cpl(),
310 efer_lma: vmsa.efer() & x86defs::X64_EFER_LMA != 0,
311 cs: virt_seg_from_snp(vmsa.cs()).into(),
312 rip: vmsa.rip(),
313 rflags: vmsa.rflags(),
314 rax: vmsa.rax(),
315 rdx: vmsa.rdx(),
316 optional: if include_optional_state {
317 Some(InterceptMessageOptionalState {
318 ds: virt_seg_from_snp(vmsa.ds()).into(),
319 es: virt_seg_from_snp(vmsa.es()).into(),
320 })
321 } else {
322 None
323 },
324 rcx: vmsa.rcx(),
325 rsi: vmsa.rsi(),
326 rdi: vmsa.rdi(),
327 }
328 }
329
330 fn cr_intercept_registration(
331 this: &mut UhProcessor<'_, Self>,
332 intercept_control: hvdef::HvRegisterCrInterceptControl,
333 ) {
334 this.runner
339 .set_vp_registers_hvcall(
340 Vtl::Vtl1,
341 [(
342 HvX64RegisterName::CrInterceptControl,
343 u64::from(intercept_control),
344 )],
345 )
346 .expect("setting intercept control succeeds");
347 }
348
349 fn is_interrupt_pending(
350 this: &mut UhProcessor<'_, Self>,
351 vtl: GuestVtl,
352 check_rflags: bool,
353 dev: &impl CpuIo,
354 ) -> bool {
355 let vmsa = this.runner.vmsa_mut(vtl);
356 if vmsa.event_inject().valid()
357 && vmsa.event_inject().interruption_type() == x86defs::snp::SEV_INTR_TYPE_NMI
358 {
359 return true;
360 }
361
362 let vmsa_priority = vmsa.v_intr_cntrl().priority() as u32;
363 let lapic = &mut this.backing.cvm.lapics[vtl].lapic;
364 let ppr = lapic
365 .access(&mut SnpApicClient {
366 partition: this.partition,
367 vmsa,
368 dev,
369 vmtime: &this.vmtime,
370 vtl,
371 })
372 .get_ppr();
373 let ppr_priority = ppr >> 4;
374 if vmsa_priority <= ppr_priority {
375 return false;
376 }
377
378 let vmsa = this.runner.vmsa_mut(vtl);
379 if (check_rflags && !RFlags::from_bits(vmsa.rflags()).interrupt_enable())
380 || vmsa.v_intr_cntrl().intr_shadow()
381 || !vmsa.v_intr_cntrl().irq()
382 {
383 return false;
384 }
385
386 true
387 }
388
389 fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic> {
390 None
391 }
392}
393
394#[derive(Inspect)]
396pub struct SnpBackedShared {
397 #[inspect(flatten)]
398 pub(crate) cvm: UhCvmPartitionState,
399 invlpgb_count_max: u16,
400 tsc_aux_virtualized: bool,
401 #[inspect(debug)]
402 sev_status: SevStatusMsr,
403}
404
405impl SnpBackedShared {
406 pub(crate) fn new(
407 _partition_params: &UhPartitionNewParams<'_>,
408 params: BackingSharedParams<'_>,
409 ) -> Result<Self, Error> {
410 let cvm = params.cvm_state.unwrap();
411 let invlpgb_count_max = x86defs::cpuid::ExtendedAddressSpaceSizesEdx::from(
412 params
413 .cpuid
414 .result(CpuidFunction::ExtendedAddressSpaceSizes.0, 0, &[0; 4])[3],
415 )
416 .invlpgb_count_max();
417 let tsc_aux_virtualized = x86defs::cpuid::ExtendedSevFeaturesEax::from(
418 params
419 .cpuid
420 .result(CpuidFunction::ExtendedSevFeatures.0, 0, &[0; 4])[0],
421 )
422 .tsc_aux_virtualization();
423
424 let msr = crate::MsrDevice::new(0).expect("open msr");
427 let sev_status =
428 SevStatusMsr::from(msr.read_msr(x86defs::X86X_AMD_MSR_SEV).expect("read msr"));
429 tracing::info!(CVM_ALLOWED, ?sev_status, "SEV status");
430
431 Ok(Self {
432 sev_status,
433 invlpgb_count_max,
434 tsc_aux_virtualized,
435 cvm,
436 })
437 }
438}
439
440#[expect(private_interfaces)]
441impl BackingPrivate for SnpBacked {
442 type HclBacking<'snp> = hcl::ioctl::snp::Snp<'snp>;
443 type Shared = SnpBackedShared;
444 type EmulationCache = ();
445
446 fn shared(shared: &BackingShared) -> &Self::Shared {
447 let BackingShared::Snp(shared) = shared else {
448 unreachable!()
449 };
450 shared
451 }
452
453 fn new(params: BackingParams<'_, '_, Self>, shared: &SnpBackedShared) -> Result<Self, Error> {
454 Ok(Self {
455 hv_sint_notifications: 0,
456 general_stats: VtlArray::from_fn(|_| Default::default()),
457 exit_stats: VtlArray::from_fn(|_| Default::default()),
458 cvm: UhCvmVpState::new(
459 &shared.cvm,
460 params.partition,
461 params.vp_info,
462 UhDirectOverlay::Count as usize,
463 )?,
464 })
465 }
466
467 fn init(this: &mut UhProcessor<'_, Self>) {
468 let sev_status = this.vp().shared.sev_status;
469 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
470 init_vmsa(
471 &mut this.runner.vmsa_mut(vtl),
472 vtl,
473 this.partition.caps.vtom,
474 sev_status,
475 );
476
477 let registers = vp::Registers::at_reset(&this.partition.caps, &this.inner.vp_info);
479 this.access_state(vtl.into())
480 .set_registers(®isters)
481 .expect("Resetting to architectural state should succeed");
482
483 let debug_registers =
484 vp::DebugRegisters::at_reset(&this.partition.caps, &this.inner.vp_info);
485
486 this.access_state(vtl.into())
487 .set_debug_regs(&debug_registers)
488 .expect("Resetting to architectural state should succeed");
489
490 let xcr0 = vp::Xcr0::at_reset(&this.partition.caps, &this.inner.vp_info);
491 this.access_state(vtl.into())
492 .set_xcr(&xcr0)
493 .expect("Resetting to architectural state should succeed");
494
495 let cache_control = vp::Mtrrs::at_reset(&this.partition.caps, &this.inner.vp_info);
496 this.access_state(vtl.into())
497 .set_mtrrs(&cache_control)
498 .expect("Resetting to architectural state should succeed");
499 }
500
501 let pfns = &this.backing.cvm.direct_overlay_handle.pfns();
504 let values: &[(HvX64RegisterName, u64); 3] = &[
505 (
506 HvX64RegisterName::Sipp,
507 hvdef::HvSynicSimpSiefp::new()
508 .with_enabled(true)
509 .with_base_gpn(pfns[UhDirectOverlay::Sipp as usize])
510 .into(),
511 ),
512 (
513 HvX64RegisterName::Sifp,
514 hvdef::HvSynicSimpSiefp::new()
515 .with_enabled(true)
516 .with_base_gpn(pfns[UhDirectOverlay::Sifp as usize])
517 .into(),
518 ),
519 (
520 HvX64RegisterName::Ghcb,
521 x86defs::snp::GhcbMsr::new()
522 .with_info(x86defs::snp::GhcbInfo::REGISTER_REQUEST.0)
523 .with_pfn(pfns[UhDirectOverlay::Ghcb as usize])
524 .into(),
525 ),
526 ];
527
528 this.runner
529 .set_vp_registers_hvcall(Vtl::Vtl0, values)
530 .expect("set_vp_registers hypercall for direct overlays should succeed");
531 }
532
533 type StateAccess<'p, 'a>
534 = UhVpStateAccess<'a, 'p, Self>
535 where
536 Self: 'a + 'p,
537 'p: 'a;
538
539 fn access_vp_state<'a, 'p>(
540 this: &'a mut UhProcessor<'p, Self>,
541 vtl: GuestVtl,
542 ) -> Self::StateAccess<'p, 'a> {
543 UhVpStateAccess::new(this, vtl)
544 }
545
546 async fn run_vp(
547 this: &mut UhProcessor<'_, Self>,
548 dev: &impl CpuIo,
549 _stop: &mut virt::StopVp<'_>,
550 ) -> Result<(), VpHaltReason> {
551 this.run_vp_snp(dev).await
552 }
553
554 fn poll_apic(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl, scan_irr: bool) {
555 this.runner.vmsa_mut(vtl).v_intr_cntrl_mut().set_irq(false);
557
558 hardware_cvm::apic::poll_apic_core(this, vtl, scan_irr)
559 }
560
561 fn request_extint_readiness(_this: &mut UhProcessor<'_, Self>) {
562 unreachable!("extint managed through software apic")
563 }
564
565 fn request_untrusted_sint_readiness(this: &mut UhProcessor<'_, Self>, sints: u16) {
566 let sints = this.backing.hv_sint_notifications | sints;
567 if this.backing.hv_sint_notifications == sints {
568 return;
569 }
570 let notifications = HvDeliverabilityNotificationsRegister::new().with_sints(sints);
571 tracing::trace!(?notifications, "setting notifications");
572 this.runner
573 .set_vp_register(
574 GuestVtl::Vtl0,
575 HvX64RegisterName::DeliverabilityNotifications,
576 u64::from(notifications).into(),
577 )
578 .expect("requesting deliverability is not a fallable operation");
579
580 this.backing.hv_sint_notifications = sints;
581 }
582
583 fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) {
584 let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0);
585 let vtl1_vmsa = if this.backing.cvm_state().vtl1.is_some() {
586 Some(this.runner.vmsa(GuestVtl::Vtl1))
587 } else {
588 None
589 };
590
591 let add_vmsa_inspect = |req: inspect::Request<'_>, vmsa: VmsaWrapper<'_, &SevVmsa>| {
592 req.respond()
593 .hex("guest_error_code", vmsa.guest_error_code())
594 .hex("exit_info1", vmsa.exit_info1())
595 .hex("exit_info2", vmsa.exit_info2())
596 .hex("v_intr_cntrl", u64::from(vmsa.v_intr_cntrl()));
597 };
598
599 resp.child("vmsa_additional", |req| {
600 req.respond()
601 .child("vtl0", |inner_req| add_vmsa_inspect(inner_req, vtl0_vmsa))
602 .child("vtl1", |inner_req| {
603 if let Some(vtl1_vmsa) = vtl1_vmsa {
604 add_vmsa_inspect(inner_req, vtl1_vmsa);
605 }
606 });
607 });
608 }
609
610 fn hv(&self, vtl: GuestVtl) -> Option<&ProcessorVtlHv> {
611 Some(&self.cvm.hv[vtl])
612 }
613
614 fn hv_mut(&mut self, vtl: GuestVtl) -> Option<&mut ProcessorVtlHv> {
615 Some(&mut self.cvm.hv[vtl])
616 }
617
618 fn handle_vp_start_enable_vtl_wake(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl) {
619 this.hcvm_handle_vp_start_enable_vtl(vtl)
620 }
621
622 fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
623 this.hcvm_vtl1_inspectable()
624 }
625
626 fn process_interrupts(
627 this: &mut UhProcessor<'_, Self>,
628 scan_irr: VtlArray<bool, 2>,
629 first_scan_irr: &mut bool,
630 dev: &impl CpuIo,
631 ) -> bool {
632 this.cvm_process_interrupts(scan_irr, first_scan_irr, dev)
633 }
634}
635
636fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
637 SevSelector {
638 selector: val.selector,
639 attrib: (val.attributes & 0xFF) | ((val.attributes >> 4) & 0xF00),
640 limit: val.limit,
641 base: val.base,
642 }
643}
644
645fn virt_table_to_snp(val: TableRegister) -> SevSelector {
646 SevSelector {
647 limit: val.limit as u32,
648 base: val.base,
649 ..FromZeros::new_zeroed()
650 }
651}
652
653fn virt_seg_from_snp(selector: SevSelector) -> SegmentRegister {
654 SegmentRegister {
655 base: selector.base,
656 limit: selector.limit,
657 selector: selector.selector,
658 attributes: (selector.attrib & 0xFF) | ((selector.attrib & 0xF00) << 4),
659 }
660}
661
662fn virt_table_from_snp(selector: SevSelector) -> TableRegister {
663 TableRegister {
664 limit: selector.limit as u16,
665 base: selector.base,
666 }
667}
668
669fn init_vmsa(
670 vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>,
671 vtl: GuestVtl,
672 vtom: Option<u64>,
673 sev_status: SevStatusMsr,
674) {
675 vmsa.reset(sev_status.vmsa_reg_prot());
679 vmsa.sev_features_mut()
680 .set_snp_btb_isolation(sev_status.snp_btb_isolation());
681 vmsa.sev_features_mut()
682 .set_prevent_host_ibs(sev_status.prevent_host_ibs());
683 vmsa.sev_features_mut()
684 .set_vmsa_reg_prot(sev_status.vmsa_reg_prot());
685 vmsa.sev_features_mut().set_snp(true);
686 vmsa.sev_features_mut().set_vtom(vtom.is_some());
687 vmsa.set_virtual_tom(vtom.unwrap_or(0));
688
689 vmsa.sev_features_mut().set_alternate_injection(true);
692 vmsa.sev_features_mut().set_reflect_vc(true);
693 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
694 vmsa.sev_features_mut().set_debug_swap(true);
695
696 let vmpl = match vtl {
697 GuestVtl::Vtl0 => Vmpl::Vmpl2,
698 GuestVtl::Vtl1 => Vmpl::Vmpl1,
699 };
700 vmsa.set_vmpl(vmpl.into());
701
702 vmsa.set_guest_error_code(SevExitCode::INTR.0);
705
706 vmsa.set_efer(x86defs::X64_EFER_SVME);
709}
710
711struct SnpApicClient<'a, T> {
712 partition: &'a UhPartitionInner,
713 vmsa: VmsaWrapper<'a, &'a mut SevVmsa>,
714 dev: &'a T,
715 vmtime: &'a VmTimeAccess,
716 vtl: GuestVtl,
717}
718
719impl<T: CpuIo> ApicClient for SnpApicClient<'_, T> {
720 fn cr8(&mut self) -> u32 {
721 self.vmsa.v_intr_cntrl().tpr().into()
722 }
723
724 fn set_cr8(&mut self, value: u32) {
725 self.vmsa.v_intr_cntrl_mut().set_tpr(value as u8);
726 }
727
728 fn set_apic_base(&mut self, _value: u64) {
729 }
731
732 fn wake(&mut self, vp_index: VpIndex) {
733 self.partition.vps[vp_index.index() as usize].wake(self.vtl, WakeReason::INTCON);
734 }
735
736 fn eoi(&mut self, vector: u8) {
737 debug_assert_eq!(self.vtl, GuestVtl::Vtl0);
738 self.dev.handle_eoi(vector.into())
739 }
740
741 fn now(&mut self) -> vmcore::vmtime::VmTime {
742 self.vmtime.now()
743 }
744
745 fn pull_offload(&mut self) -> ([u32; 8], [u32; 8]) {
746 unreachable!()
747 }
748}
749
750impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
751 const TRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
753 Self,
754 [
755 hv1_hypercall::HvModifySparseGpaPageHostVisibility,
756 hv1_hypercall::HvQuerySparseGpaPageHostVisibility,
757 hv1_hypercall::HvX64StartVirtualProcessor,
758 hv1_hypercall::HvGetVpIndexFromApicId,
759 hv1_hypercall::HvGetVpRegisters,
760 hv1_hypercall::HvEnablePartitionVtl,
761 hv1_hypercall::HvRetargetDeviceInterrupt,
762 hv1_hypercall::HvPostMessage,
763 hv1_hypercall::HvSignalEvent,
764 hv1_hypercall::HvX64EnableVpVtl,
765 hv1_hypercall::HvExtQueryCapabilities,
766 hv1_hypercall::HvVtlCall,
767 hv1_hypercall::HvVtlReturn,
768 hv1_hypercall::HvFlushVirtualAddressList,
769 hv1_hypercall::HvFlushVirtualAddressListEx,
770 hv1_hypercall::HvFlushVirtualAddressSpace,
771 hv1_hypercall::HvFlushVirtualAddressSpaceEx,
772 hv1_hypercall::HvSetVpRegisters,
773 hv1_hypercall::HvModifyVtlProtectionMask,
774 hv1_hypercall::HvX64TranslateVirtualAddress,
775 hv1_hypercall::HvSendSyntheticClusterIpi,
776 hv1_hypercall::HvSendSyntheticClusterIpiEx,
777 hv1_hypercall::HvInstallIntercept,
778 hv1_hypercall::HvAssertVirtualInterrupt,
779 ],
780 );
781
782 const UNTRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
785 Self,
786 [hv1_hypercall::HvPostMessage, hv1_hypercall::HvSignalEvent],
787 );
788}
789
790struct GhcbEnlightenedHypercall<'a, 'b, T> {
791 handler: UhHypercallHandler<'a, 'b, T, SnpBacked>,
792 control: u64,
793 output_gpa: u64,
794 input_gpa: u64,
795 result: u64,
796}
797
798impl<'a, 'b, T> hv1_hypercall::AsHandler<UhHypercallHandler<'a, 'b, T, SnpBacked>>
799 for &mut GhcbEnlightenedHypercall<'a, 'b, T>
800{
801 fn as_handler(&mut self) -> &mut UhHypercallHandler<'a, 'b, T, SnpBacked> {
802 &mut self.handler
803 }
804}
805
806impl<T> HypercallIo for GhcbEnlightenedHypercall<'_, '_, T> {
807 fn advance_ip(&mut self) {
808 }
810
811 fn retry(&mut self, control: u64) {
812 let control = Control::from(control);
821 self.set_result(
822 HypercallOutput::from(HvError::Timeout)
823 .with_elements_processed(control.rep_start())
824 .into(),
825 );
826 }
827
828 fn control(&mut self) -> u64 {
829 self.control
830 }
831
832 fn input_gpa(&mut self) -> u64 {
833 self.input_gpa
834 }
835
836 fn output_gpa(&mut self) -> u64 {
837 self.output_gpa
838 }
839
840 fn fast_register_pair_count(&mut self) -> usize {
841 0
842 }
843
844 fn extended_fast_hypercalls_ok(&mut self) -> bool {
845 false
846 }
847
848 fn fast_input(&mut self, _buf: &mut [[u64; 2]], _output_register_pairs: usize) -> usize {
849 unimplemented!("not supported for secure enlightened abi")
850 }
851
852 fn fast_output(&mut self, _starting_pair_index: usize, _buf: &[[u64; 2]]) {
853 unimplemented!("not supported for secure enlightened abi")
854 }
855
856 fn vtl_input(&mut self) -> u64 {
857 unimplemented!("not supported for secure enlightened abi")
858 }
859
860 fn set_result(&mut self, n: u64) {
861 self.result = n;
862 }
863
864 fn fast_regs(&mut self, _starting_pair_index: usize, _buf: &mut [[u64; 2]]) {
865 unimplemented!("not supported for secure enlightened abi")
866 }
867}
868
869impl<'b> ApicBacking<'b, SnpBacked> for UhProcessor<'b, SnpBacked> {
870 fn vp(&mut self) -> &mut UhProcessor<'b, SnpBacked> {
871 self
872 }
873
874 fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8) {
875 let mut vmsa = self.runner.vmsa_mut(vtl);
876 vmsa.v_intr_cntrl_mut().set_vector(vector);
877 vmsa.v_intr_cntrl_mut().set_priority((vector >> 4).into());
878 vmsa.v_intr_cntrl_mut().set_ignore_tpr(false);
879 vmsa.v_intr_cntrl_mut().set_irq(true);
880 self.backing.cvm.lapics[vtl].activity = MpState::Running;
881 }
882
883 fn handle_nmi(&mut self, vtl: GuestVtl) {
884 let mut vmsa = self.runner.vmsa_mut(vtl);
888
889 vmsa.set_event_inject(
893 SevEventInjectInfo::new()
894 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_NMI)
895 .with_vector(2)
896 .with_valid(true),
897 );
898 self.backing.cvm.lapics[vtl].nmi_pending = false;
899 self.backing.cvm.lapics[vtl].activity = MpState::Running;
900 }
901
902 fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister) {
903 let mut vmsa = self.runner.vmsa_mut(vtl);
904 vmsa.set_cs(virt_seg_to_snp(cs));
905 vmsa.set_rip(0);
906 self.backing.cvm.lapics[vtl].activity = MpState::Running;
907 }
908}
909
910impl UhProcessor<'_, SnpBacked> {
911 fn handle_synic_deliverable_exit(&mut self) {
912 let message = self
913 .runner
914 .exit_message()
915 .as_message::<hvdef::HvX64SynicSintDeliverableMessage>();
916
917 tracing::trace!(
918 deliverable_sints = message.deliverable_sints,
919 "sint deliverable"
920 );
921
922 self.backing.hv_sint_notifications &= !message.deliverable_sints;
923
924 self.deliver_synic_messages(GuestVtl::Vtl0, message.deliverable_sints);
926 }
927
928 fn handle_vmgexit(
929 &mut self,
930 dev: &impl CpuIo,
931 intercepted_vtl: GuestVtl,
932 ) -> Result<(), SnpGhcbError> {
933 let message = self
934 .runner
935 .exit_message()
936 .as_message::<hvdef::HvX64VmgexitInterceptMessage>();
937
938 let ghcb_msr = x86defs::snp::GhcbMsr::from(message.ghcb_msr);
939 tracing::trace!(?ghcb_msr, "vmgexit intercept");
940
941 match x86defs::snp::GhcbInfo(ghcb_msr.info()) {
942 x86defs::snp::GhcbInfo::NORMAL => {
943 assert!(message.flags.ghcb_page_valid());
944 let ghcb_pfn = ghcb_msr.pfn();
945
946 let ghcb_overlay =
947 self.backing.cvm.direct_overlay_handle.pfns()[UhDirectOverlay::Ghcb as usize];
948
949 if ghcb_pfn != ghcb_overlay {
951 tracelimit::warn_ratelimited!(
952 CVM_ALLOWED,
953 vmgexit_pfn = ghcb_pfn,
954 overlay_pfn = ghcb_overlay,
955 "ghcb page used for vmgexit does not match overlay page"
956 );
957
958 return Err(SnpGhcbError::GhcbMisconfiguration);
959 }
960
961 match x86defs::snp::GhcbUsage(message.ghcb_page.ghcb_usage) {
962 x86defs::snp::GhcbUsage::HYPERCALL => {
963 let guest_memory = &self.shared.cvm.shared_memory;
964 let overlay_base = ghcb_overlay * HV_PAGE_SIZE;
967 let x86defs::snp::GhcbHypercallParameters {
968 output_gpa,
969 input_control,
970 } = guest_memory
971 .read_plain(
972 overlay_base
973 + x86defs::snp::GHCB_PAGE_HYPERCALL_PARAMETERS_OFFSET as u64,
974 )
975 .map_err(SnpGhcbError::GhcbPageAccess)?;
976
977 let mut handler = GhcbEnlightenedHypercall {
978 handler: UhHypercallHandler {
979 vp: self,
980 bus: dev,
981 trusted: false,
982 intercepted_vtl,
983 },
984 control: input_control,
985 output_gpa,
986 input_gpa: overlay_base,
987 result: 0,
988 };
989
990 UhHypercallHandler::UNTRUSTED_DISPATCHER
991 .dispatch(guest_memory, &mut handler);
992
993 guest_memory
1001 .write_at(
1002 overlay_base
1003 + x86defs::snp::GHCB_PAGE_HYPERCALL_OUTPUT_OFFSET as u64,
1004 handler.result.as_bytes(),
1005 )
1006 .map_err(SnpGhcbError::GhcbPageAccess)?;
1007 }
1008 usage => unimplemented!("ghcb usage {usage:?}"),
1009 }
1010 }
1011 info => unimplemented!("ghcb info {info:?}"),
1012 }
1013
1014 Ok(())
1015 }
1016
1017 fn handle_msr_access(
1018 &mut self,
1019 dev: &impl CpuIo,
1020 entered_from_vtl: GuestVtl,
1021 msr: u32,
1022 is_write: bool,
1023 ) {
1024 if is_write && self.cvm_try_protect_msr_write(entered_from_vtl, msr) {
1025 return;
1026 }
1027
1028 let vmsa = self.runner.vmsa_mut(entered_from_vtl);
1029 let gp = if is_write {
1030 let value = (vmsa.rax() as u32 as u64) | ((vmsa.rdx() as u32 as u64) << 32);
1031
1032 let r = self.backing.cvm.lapics[entered_from_vtl]
1033 .lapic
1034 .access(&mut SnpApicClient {
1035 partition: self.partition,
1036 vmsa,
1037 dev,
1038 vmtime: &self.vmtime,
1039 vtl: entered_from_vtl,
1040 })
1041 .msr_write(msr, value)
1042 .or_else_if_unknown(|| self.write_msr_cvm(msr, value, entered_from_vtl))
1043 .or_else_if_unknown(|| self.write_msr_snp(dev, msr, value, entered_from_vtl));
1044
1045 match r {
1046 Ok(()) => false,
1047 Err(MsrError::Unknown) => {
1048 tracing::debug!(msr, value, "unknown cvm msr write");
1049 false
1050 }
1051 Err(MsrError::InvalidAccess) => true,
1052 }
1053 } else {
1054 let r = self.backing.cvm.lapics[entered_from_vtl]
1055 .lapic
1056 .access(&mut SnpApicClient {
1057 partition: self.partition,
1058 vmsa,
1059 dev,
1060 vmtime: &self.vmtime,
1061 vtl: entered_from_vtl,
1062 })
1063 .msr_read(msr)
1064 .or_else_if_unknown(|| self.read_msr_cvm(msr, entered_from_vtl))
1065 .or_else_if_unknown(|| self.read_msr_snp(dev, msr, entered_from_vtl));
1066
1067 let value = match r {
1068 Ok(v) => Some(v),
1069 Err(MsrError::Unknown) => {
1070 tracing::debug!(msr, "unknown cvm msr read");
1071 Some(0)
1072 }
1073 Err(MsrError::InvalidAccess) => None,
1074 };
1075
1076 if let Some(value) = value {
1077 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1078 vmsa.set_rax((value as u32).into());
1079 vmsa.set_rdx(((value >> 32) as u32).into());
1080 false
1081 } else {
1082 true
1083 }
1084 };
1085
1086 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1087 if gp {
1088 vmsa.set_event_inject(
1089 SevEventInjectInfo::new()
1090 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1091 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1092 .with_deliver_error_code(true)
1093 .with_valid(true),
1094 );
1095 } else {
1096 advance_to_next_instruction(&mut vmsa);
1097 }
1098 }
1099
1100 fn handle_xsetbv(&mut self, entered_from_vtl: GuestVtl) {
1101 let vmsa = self.runner.vmsa(entered_from_vtl);
1102 if let Some(value) = hardware_cvm::validate_xsetbv_exit(hardware_cvm::XsetbvExitInput {
1103 rax: vmsa.rax(),
1104 rcx: vmsa.rcx(),
1105 rdx: vmsa.rdx(),
1106 cr4: vmsa.cr4(),
1107 cpl: vmsa.cpl(),
1108 }) {
1109 if !self.cvm_try_protect_secure_register_write(
1110 entered_from_vtl,
1111 HvX64RegisterName::Xfem,
1112 value,
1113 ) {
1114 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1115 vmsa.set_xcr0(value);
1116 advance_to_next_instruction(&mut vmsa);
1117 }
1118 } else {
1119 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1120 vmsa.set_event_inject(
1121 SevEventInjectInfo::new()
1122 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1123 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1124 .with_deliver_error_code(true)
1125 .with_valid(true),
1126 );
1127 }
1128 }
1129
1130 fn handle_crx_intercept(&mut self, entered_from_vtl: GuestVtl, reg: HvX64RegisterName) {
1131 let vmsa = self.runner.vmsa(entered_from_vtl);
1132 let mov_crx_drx = x86defs::snp::MovCrxDrxInfo::from(vmsa.exit_info1());
1133 let reg_value = {
1134 let gpr_name =
1135 HvX64RegisterName(HvX64RegisterName::Rax.0 + mov_crx_drx.gpr_number() as u32);
1136
1137 match gpr_name {
1138 HvX64RegisterName::Rax => vmsa.rax(),
1139 HvX64RegisterName::Rbx => vmsa.rbx(),
1140 HvX64RegisterName::Rcx => vmsa.rcx(),
1141 HvX64RegisterName::Rdx => vmsa.rdx(),
1142 HvX64RegisterName::Rsp => vmsa.rsp(),
1143 HvX64RegisterName::Rbp => vmsa.rbp(),
1144 HvX64RegisterName::Rsi => vmsa.rsi(),
1145 HvX64RegisterName::Rdi => vmsa.rdi(),
1146 HvX64RegisterName::R8 => vmsa.r8(),
1147 HvX64RegisterName::R9 => vmsa.r9(),
1148 HvX64RegisterName::R10 => vmsa.r10(),
1149 HvX64RegisterName::R11 => vmsa.r11(),
1150 HvX64RegisterName::R12 => vmsa.r12(),
1151 HvX64RegisterName::R13 => vmsa.r13(),
1152 HvX64RegisterName::R14 => vmsa.r14(),
1153 HvX64RegisterName::R15 => vmsa.r15(),
1154 _ => unreachable!("unexpected register"),
1155 }
1156 };
1157
1158 if !mov_crx_drx.mov_crx() {
1165 tracelimit::warn_ratelimited!(
1166 CVM_ALLOWED,
1167 "Intercepted crx access, instruction is not mov crx"
1168 );
1169 return;
1170 }
1171
1172 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, reg_value) {
1173 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1174 match reg {
1175 HvX64RegisterName::Cr0 => vmsa.set_cr0(reg_value),
1176 HvX64RegisterName::Cr4 => vmsa.set_cr4(reg_value),
1177 _ => unreachable!(),
1178 }
1179 advance_to_next_instruction(&mut vmsa);
1180 }
1181 }
1182
1183 #[must_use]
1184 fn sync_lazy_eoi(&mut self, vtl: GuestVtl) -> bool {
1185 if self.backing.cvm.lapics[vtl].lapic.is_lazy_eoi_pending() {
1186 return self.backing.cvm.hv[vtl].set_lazy_eoi();
1187 }
1188
1189 false
1190 }
1191
1192 async fn run_vp_snp(&mut self, dev: &impl CpuIo) -> Result<(), VpHaltReason> {
1193 let next_vtl = self.backing.cvm.exit_vtl;
1194
1195 let mut vmsa = self.runner.vmsa_mut(next_vtl);
1196 let last_interrupt_ctrl = vmsa.v_intr_cntrl();
1197
1198 if vmsa.sev_features().alternate_injection() {
1199 vmsa.v_intr_cntrl_mut().set_guest_busy(false);
1200 }
1201
1202 self.unlock_tlb_lock(Vtl::Vtl2);
1203 let tlb_halt = self.should_halt_for_tlb_unlock(next_vtl);
1204
1205 let halt = self.backing.cvm.lapics[next_vtl].activity != MpState::Running || tlb_halt;
1206
1207 if halt && next_vtl == GuestVtl::Vtl1 && !tlb_halt {
1208 tracelimit::warn_ratelimited!(CVM_ALLOWED, "halting VTL 1, which might halt the guest");
1209 }
1210
1211 self.runner.set_halted(halt);
1212
1213 self.runner.set_exit_vtl(next_vtl);
1214
1215 let lazy_eoi = self.sync_lazy_eoi(next_vtl);
1217
1218 let mut has_intercept = self
1219 .runner
1220 .run()
1221 .map_err(|e| VpHaltReason::Hypervisor(SnpRunVpError(e).into()))?;
1222
1223 let entered_from_vtl = next_vtl;
1224 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1225
1226 let inject = if vmsa.sev_features().alternate_injection() {
1228 if vmsa.v_intr_cntrl().guest_busy() {
1229 self.backing.general_stats[entered_from_vtl]
1230 .guest_busy
1231 .increment();
1232 let exit_int_info = SevEventInjectInfo::from(vmsa.exit_int_info());
1240 assert!(
1241 exit_int_info.valid(),
1242 "event inject info should be valid {exit_int_info:x?}"
1243 );
1244
1245 match exit_int_info.interruption_type() {
1246 x86defs::snp::SEV_INTR_TYPE_EXCEPT => {
1247 if exit_int_info.vector() != 3 && exit_int_info.vector() != 4 {
1248 Some(exit_int_info)
1250 } else {
1251 None
1252 }
1253 }
1254 x86defs::snp::SEV_INTR_TYPE_SW => None,
1255 _ => Some(exit_int_info),
1256 }
1257 } else {
1258 None
1259 }
1260 } else {
1261 unimplemented!("Only alternate injection is supported for SNP")
1262 };
1263
1264 if let Some(inject) = inject {
1265 vmsa.set_event_inject(inject);
1266 }
1267 if vmsa.sev_features().alternate_injection() {
1268 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
1269 }
1270
1271 if last_interrupt_ctrl.irq() && !vmsa.v_intr_cntrl().irq() {
1272 self.backing.general_stats[entered_from_vtl]
1273 .int_ack
1274 .increment();
1275 self.backing.cvm.lapics[entered_from_vtl]
1277 .lapic
1278 .acknowledge_interrupt(last_interrupt_ctrl.vector());
1279 }
1280
1281 vmsa.v_intr_cntrl_mut().set_irq(false);
1282
1283 if lazy_eoi && self.backing.cvm.hv[entered_from_vtl].clear_lazy_eoi() {
1285 self.backing.cvm.lapics[entered_from_vtl]
1286 .lapic
1287 .access(&mut SnpApicClient {
1288 partition: self.partition,
1289 vmsa,
1290 dev,
1291 vmtime: &self.vmtime,
1292 vtl: entered_from_vtl,
1293 })
1294 .lazy_eoi();
1295 }
1296
1297 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1298 let sev_error_code = SevExitCode(vmsa.guest_error_code());
1299
1300 let stat = match sev_error_code {
1301 SevExitCode::CPUID => {
1302 let leaf = vmsa.rax() as u32;
1303 let subleaf = vmsa.rcx() as u32;
1304 let [eax, ebx, ecx, edx] = self.cvm_cpuid_result(entered_from_vtl, leaf, subleaf);
1305 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1306 vmsa.set_rax(eax.into());
1307 vmsa.set_rbx(ebx.into());
1308 vmsa.set_rcx(ecx.into());
1309 vmsa.set_rdx(edx.into());
1310 advance_to_next_instruction(&mut vmsa);
1311 &mut self.backing.exit_stats[entered_from_vtl].cpuid
1312 }
1313
1314 SevExitCode::MSR => {
1315 let is_write = vmsa.exit_info1() & 1 != 0;
1316 let msr = vmsa.rcx() as u32;
1317
1318 self.handle_msr_access(dev, entered_from_vtl, msr, is_write);
1319
1320 if is_write {
1321 &mut self.backing.exit_stats[entered_from_vtl].msr_write
1322 } else {
1323 &mut self.backing.exit_stats[entered_from_vtl].msr_read
1324 }
1325 }
1326
1327 SevExitCode::IOIO => {
1328 let io_info =
1329 SevIoAccessInfo::from(self.runner.vmsa(entered_from_vtl).exit_info1() as u32);
1330
1331 let access_size = if io_info.access_size32() {
1332 4
1333 } else if io_info.access_size16() {
1334 2
1335 } else {
1336 1
1337 };
1338
1339 let port_access_protected = self.cvm_try_protect_io_port_access(
1340 entered_from_vtl,
1341 io_info.port(),
1342 io_info.read_access(),
1343 access_size,
1344 io_info.string_access(),
1345 io_info.rep_access(),
1346 );
1347
1348 let vmsa = self.runner.vmsa(entered_from_vtl);
1349 if !port_access_protected {
1350 if io_info.string_access() || io_info.rep_access() {
1351 let interruption_pending = vmsa.event_inject().valid()
1352 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1353
1354 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1359 .await?;
1360 } else {
1361 let mut rax = vmsa.rax();
1362 emulate_io(
1363 self.inner.vp_info.base.vp_index,
1364 !io_info.read_access(),
1365 io_info.port(),
1366 &mut rax,
1367 access_size,
1368 dev,
1369 )
1370 .await;
1371
1372 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1373 vmsa.set_rax(rax);
1374 advance_to_next_instruction(&mut vmsa);
1375 }
1376 }
1377 &mut self.backing.exit_stats[entered_from_vtl].ioio
1378 }
1379
1380 SevExitCode::VMMCALL => {
1381 let is_64bit = self.long_mode(entered_from_vtl);
1382 let guest_memory = &self.partition.gm[entered_from_vtl];
1383 let handler = UhHypercallHandler {
1384 trusted: !self.cvm_partition().hide_isolation,
1385 vp: &mut *self,
1386 bus: dev,
1387 intercepted_vtl: entered_from_vtl,
1388 };
1389
1390 UhHypercallHandler::TRUSTED_DISPATCHER.dispatch(
1393 guest_memory,
1394 hv1_hypercall::X64RegisterIo::new(handler, is_64bit),
1395 );
1396 &mut self.backing.exit_stats[entered_from_vtl].vmmcall
1397 }
1398
1399 SevExitCode::SHUTDOWN => {
1400 return Err(VpHaltReason::TripleFault {
1401 vtl: entered_from_vtl.into(),
1402 });
1403 }
1404
1405 SevExitCode::WBINVD | SevExitCode::INVD => {
1406 advance_to_next_instruction(&mut vmsa);
1410 &mut self.backing.exit_stats[entered_from_vtl].invd
1411 }
1412
1413 SevExitCode::NPF if has_intercept => {
1414 let gpa = vmsa.exit_info2();
1433 let interruption_pending = vmsa.event_inject().valid()
1434 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1435 let exit_info = SevNpfInfo::from(vmsa.exit_info1());
1436 let exit_message = self.runner.exit_message();
1437 let real = match exit_message.header.typ {
1438 HvMessageType::HvMessageTypeExceptionIntercept => {
1439 let exception_message =
1440 exit_message.as_message::<hvdef::HvX64ExceptionInterceptMessage>();
1441
1442 exception_message.vector
1443 == x86defs::Exception::SEV_VMM_COMMUNICATION.0 as u16
1444 }
1445 HvMessageType::HvMessageTypeUnmappedGpa
1446 | HvMessageType::HvMessageTypeGpaIntercept
1447 | HvMessageType::HvMessageTypeUnacceptedGpa => {
1448 let gpa_message =
1449 exit_message.as_message::<hvdef::HvX64MemoryInterceptMessage>();
1450
1451 (gpa_message.guest_physical_address >> hvdef::HV_PAGE_SHIFT)
1453 == (gpa >> hvdef::HV_PAGE_SHIFT)
1454 }
1455 _ => false,
1456 };
1457
1458 if real {
1459 has_intercept = false;
1460 if self.check_mem_fault(entered_from_vtl, gpa, exit_info.is_write(), exit_info)
1461 {
1462 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1463 .await?;
1464 }
1465 &mut self.backing.exit_stats[entered_from_vtl].npf
1466 } else {
1467 &mut self.backing.exit_stats[entered_from_vtl].npf_spurious
1468 }
1469 }
1470
1471 SevExitCode::NPF => &mut self.backing.exit_stats[entered_from_vtl].npf_no_intercept,
1472
1473 SevExitCode::HLT => {
1474 self.backing.cvm.lapics[entered_from_vtl].activity = MpState::Halted;
1475 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
1477 &mut self.backing.exit_stats[entered_from_vtl].hlt
1478 }
1479
1480 SevExitCode::INVALID_VMCB => {
1481 return Err(VpHaltReason::InvalidVmState(InvalidVmcb.into()));
1482 }
1483
1484 SevExitCode::INVLPGB | SevExitCode::ILLEGAL_INVLPGB => {
1485 vmsa.set_event_inject(
1486 SevEventInjectInfo::new()
1487 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1488 .with_vector(x86defs::Exception::INVALID_OPCODE.0)
1489 .with_valid(true),
1490 );
1491 &mut self.backing.exit_stats[entered_from_vtl].invlpgb
1492 }
1493
1494 SevExitCode::RDPMC => {
1495 let cr4 = vmsa.cr4();
1498 if ((vmsa.cpl() > 0) && (cr4 & x86defs::X64_CR4_PCE == 0))
1499 || (vmsa.rcx() as u32 >= 4)
1500 {
1501 vmsa.set_event_inject(
1502 SevEventInjectInfo::new()
1503 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1504 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1505 .with_deliver_error_code(true)
1506 .with_valid(true),
1507 );
1508 } else {
1509 vmsa.set_rax(0);
1510 vmsa.set_rdx(0);
1511 advance_to_next_instruction(&mut vmsa);
1512 }
1513 &mut self.backing.exit_stats[entered_from_vtl].rdpmc
1514 }
1515
1516 SevExitCode::VMGEXIT if has_intercept => {
1517 has_intercept = false;
1518 match self.runner.exit_message().header.typ {
1519 HvMessageType::HvMessageTypeX64SevVmgexitIntercept => {
1520 self.handle_vmgexit(dev, entered_from_vtl)
1521 .map_err(|e| VpHaltReason::InvalidVmState(e.into()))?;
1522 }
1523 _ => has_intercept = true,
1524 }
1525 &mut self.backing.exit_stats[entered_from_vtl].vmgexit
1526 }
1527
1528 SevExitCode::NMI
1529 | SevExitCode::PAUSE
1530 | SevExitCode::SMI
1531 | SevExitCode::VMGEXIT
1532 | SevExitCode::BUSLOCK
1533 | SevExitCode::IDLE_HLT => {
1534 &mut self.backing.exit_stats[entered_from_vtl].automatic_exit
1536 }
1537
1538 SevExitCode::VINTR => {
1539 unimplemented!("SevExitCode::VINTR");
1545 }
1546
1547 SevExitCode::INTR => {
1548 &mut self.backing.exit_stats[entered_from_vtl].intr
1551 }
1552
1553 SevExitCode::XSETBV => {
1554 self.handle_xsetbv(entered_from_vtl);
1555 &mut self.backing.exit_stats[entered_from_vtl].xsetbv
1556 }
1557
1558 SevExitCode::EXCP_DB => &mut self.backing.exit_stats[entered_from_vtl].excp_db,
1559
1560 SevExitCode::CR0_WRITE => {
1561 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr0);
1562 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1563 }
1564 SevExitCode::CR4_WRITE => {
1565 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr4);
1566 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1567 }
1568
1569 tr_exit_code @ (SevExitCode::GDTR_WRITE
1570 | SevExitCode::IDTR_WRITE
1571 | SevExitCode::LDTR_WRITE
1572 | SevExitCode::TR_WRITE) => {
1573 let reg = match tr_exit_code {
1574 SevExitCode::GDTR_WRITE => HvX64RegisterName::Gdtr,
1575 SevExitCode::IDTR_WRITE => HvX64RegisterName::Idtr,
1576 SevExitCode::LDTR_WRITE => HvX64RegisterName::Ldtr,
1577 SevExitCode::TR_WRITE => HvX64RegisterName::Tr,
1578 _ => unreachable!(),
1579 };
1580
1581 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, 0) {
1582 panic!("unexpected secure register");
1589 }
1590
1591 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1592 }
1593
1594 _ => {
1595 tracing::error!(
1596 CVM_CONFIDENTIAL,
1597 "SEV exit code {sev_error_code:x?} sev features {:x?} v_intr_control {:x?} event inject {:x?} \
1598 vmpl {:x?} cpl {:x?} exit_info1 {:x?} exit_info2 {:x?} exit_int_info {:x?} virtual_tom {:x?} \
1599 efer {:x?} cr4 {:x?} cr3 {:x?} cr0 {:x?} rflag {:x?} rip {:x?} next rip {:x?}",
1600 vmsa.sev_features(),
1601 vmsa.v_intr_cntrl(),
1602 vmsa.event_inject(),
1603 vmsa.vmpl(),
1604 vmsa.cpl(),
1605 vmsa.exit_info1(),
1606 vmsa.exit_info2(),
1607 vmsa.exit_int_info(),
1608 vmsa.virtual_tom(),
1609 vmsa.efer(),
1610 vmsa.cr4(),
1611 vmsa.cr3(),
1612 vmsa.cr0(),
1613 vmsa.rflags(),
1614 vmsa.rip(),
1615 vmsa.next_rip(),
1616 );
1617 panic!("Received unexpected SEV exit code {sev_error_code:x?}");
1618 }
1619 };
1620 stat.increment();
1621
1622 if cfg!(feature = "gdb") && sev_error_code == SevExitCode::EXCP_DB {
1624 return self.handle_debug_exception(entered_from_vtl);
1625 }
1626
1627 if has_intercept {
1631 self.backing.general_stats[entered_from_vtl]
1632 .synth_int
1633 .increment();
1634 match self.runner.exit_message().header.typ {
1635 HvMessageType::HvMessageTypeSynicSintDeliverable => {
1636 self.handle_synic_deliverable_exit();
1637 }
1638 HvMessageType::HvMessageTypeX64Halt
1639 | HvMessageType::HvMessageTypeExceptionIntercept => {
1640 }
1644 message_type => {
1645 tracelimit::error_ratelimited!(
1646 CVM_ALLOWED,
1647 ?message_type,
1648 "unknown synthetic exit"
1649 );
1650 }
1651 }
1652 }
1653
1654 self.runner
1663 .vmsa_mut(entered_from_vtl)
1664 .set_guest_error_code(SevExitCode::INTR.0);
1665 Ok(())
1666 }
1667
1668 fn long_mode(&self, vtl: GuestVtl) -> bool {
1669 let vmsa = self.runner.vmsa(vtl);
1670 vmsa.cr0() & x86defs::X64_CR0_PE != 0 && vmsa.efer() & x86defs::X64_EFER_LMA != 0
1671 }
1672}
1673
1674impl<T: CpuIo> X86EmulatorSupport for UhEmulationState<'_, '_, T, SnpBacked> {
1675 fn flush(&mut self) {
1676 }
1678
1679 fn vp_index(&self) -> VpIndex {
1680 self.vp.vp_index()
1681 }
1682
1683 fn vendor(&self) -> x86defs::cpuid::Vendor {
1684 self.vp.partition.caps.vendor
1685 }
1686
1687 fn gp(&mut self, reg: x86emu::Gp) -> u64 {
1688 let vmsa = self.vp.runner.vmsa(self.vtl);
1689 match reg {
1690 x86emu::Gp::RAX => vmsa.rax(),
1691 x86emu::Gp::RCX => vmsa.rcx(),
1692 x86emu::Gp::RDX => vmsa.rdx(),
1693 x86emu::Gp::RBX => vmsa.rbx(),
1694 x86emu::Gp::RSP => vmsa.rsp(),
1695 x86emu::Gp::RBP => vmsa.rbp(),
1696 x86emu::Gp::RSI => vmsa.rsi(),
1697 x86emu::Gp::RDI => vmsa.rdi(),
1698 x86emu::Gp::R8 => vmsa.r8(),
1699 x86emu::Gp::R9 => vmsa.r9(),
1700 x86emu::Gp::R10 => vmsa.r10(),
1701 x86emu::Gp::R11 => vmsa.r11(),
1702 x86emu::Gp::R12 => vmsa.r12(),
1703 x86emu::Gp::R13 => vmsa.r13(),
1704 x86emu::Gp::R14 => vmsa.r14(),
1705 x86emu::Gp::R15 => vmsa.r15(),
1706 }
1707 }
1708
1709 fn set_gp(&mut self, reg: x86emu::Gp, v: u64) {
1710 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1711 match reg {
1712 x86emu::Gp::RAX => vmsa.set_rax(v),
1713 x86emu::Gp::RCX => vmsa.set_rcx(v),
1714 x86emu::Gp::RDX => vmsa.set_rdx(v),
1715 x86emu::Gp::RBX => vmsa.set_rbx(v),
1716 x86emu::Gp::RSP => vmsa.set_rsp(v),
1717 x86emu::Gp::RBP => vmsa.set_rbp(v),
1718 x86emu::Gp::RSI => vmsa.set_rsi(v),
1719 x86emu::Gp::RDI => vmsa.set_rdi(v),
1720 x86emu::Gp::R8 => vmsa.set_r8(v),
1721 x86emu::Gp::R9 => vmsa.set_r9(v),
1722 x86emu::Gp::R10 => vmsa.set_r10(v),
1723 x86emu::Gp::R11 => vmsa.set_r11(v),
1724 x86emu::Gp::R12 => vmsa.set_r12(v),
1725 x86emu::Gp::R13 => vmsa.set_r13(v),
1726 x86emu::Gp::R14 => vmsa.set_r14(v),
1727 x86emu::Gp::R15 => vmsa.set_r15(v),
1728 };
1729 }
1730
1731 fn xmm(&mut self, index: usize) -> u128 {
1732 self.vp.runner.vmsa_mut(self.vtl).xmm_registers(index)
1733 }
1734
1735 fn set_xmm(&mut self, index: usize, v: u128) {
1736 self.vp
1737 .runner
1738 .vmsa_mut(self.vtl)
1739 .set_xmm_registers(index, v);
1740 }
1741
1742 fn rip(&mut self) -> u64 {
1743 let vmsa = self.vp.runner.vmsa(self.vtl);
1744 vmsa.rip()
1745 }
1746
1747 fn set_rip(&mut self, v: u64) {
1748 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1749 vmsa.set_rip(v);
1750 }
1751
1752 fn segment(&mut self, index: x86emu::Segment) -> x86defs::SegmentRegister {
1753 let vmsa = self.vp.runner.vmsa(self.vtl);
1754 match index {
1755 x86emu::Segment::ES => virt_seg_from_snp(vmsa.es()),
1756 x86emu::Segment::CS => virt_seg_from_snp(vmsa.cs()),
1757 x86emu::Segment::SS => virt_seg_from_snp(vmsa.ss()),
1758 x86emu::Segment::DS => virt_seg_from_snp(vmsa.ds()),
1759 x86emu::Segment::FS => virt_seg_from_snp(vmsa.fs()),
1760 x86emu::Segment::GS => virt_seg_from_snp(vmsa.gs()),
1761 }
1762 .into()
1763 }
1764
1765 fn efer(&mut self) -> u64 {
1766 let vmsa = self.vp.runner.vmsa(self.vtl);
1767 vmsa.efer()
1768 }
1769
1770 fn cr0(&mut self) -> u64 {
1771 let vmsa = self.vp.runner.vmsa(self.vtl);
1772 vmsa.cr0()
1773 }
1774
1775 fn rflags(&mut self) -> RFlags {
1776 let vmsa = self.vp.runner.vmsa(self.vtl);
1777 vmsa.rflags().into()
1778 }
1779
1780 fn set_rflags(&mut self, v: RFlags) {
1781 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1782 vmsa.set_rflags(v.into());
1783 }
1784
1785 fn instruction_bytes(&self) -> &[u8] {
1786 &[]
1787 }
1788
1789 fn physical_address(&self) -> Option<u64> {
1790 Some(self.vp.runner.vmsa(self.vtl).exit_info2())
1791 }
1792
1793 fn initial_gva_translation(
1794 &mut self,
1795 ) -> Option<virt_support_x86emu::emulate::InitialTranslation> {
1796 None
1797 }
1798
1799 fn interruption_pending(&self) -> bool {
1800 self.interruption_pending
1801 }
1802
1803 fn check_vtl_access(
1804 &mut self,
1805 _gpa: u64,
1806 _mode: virt_support_x86emu::emulate::TranslateMode,
1807 ) -> Result<(), virt_support_x86emu::emulate::EmuCheckVtlAccessError> {
1808 Ok(())
1810 }
1811
1812 fn translate_gva(
1813 &mut self,
1814 gva: u64,
1815 mode: virt_support_x86emu::emulate::TranslateMode,
1816 ) -> Result<
1817 virt_support_x86emu::emulate::EmuTranslateResult,
1818 virt_support_x86emu::emulate::EmuTranslateError,
1819 > {
1820 emulate_translate_gva(self, gva, mode)
1821 }
1822
1823 fn inject_pending_event(&mut self, event_info: hvdef::HvX64PendingEvent) {
1824 assert!(event_info.reg_0.event_pending());
1825 assert_eq!(
1826 event_info.reg_0.event_type(),
1827 hvdef::HV_X64_PENDING_EVENT_EXCEPTION
1828 );
1829
1830 let exception = HvX64PendingExceptionEvent::from(event_info.reg_0.into_bits());
1831 assert!(!self.interruption_pending);
1832
1833 SnpBacked::set_pending_exception(self.vp, self.vtl, exception);
1836 }
1837
1838 fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
1839 let vtom = self.vp.partition.caps.vtom.unwrap();
1842 debug_assert!(vtom == 0 || vtom.is_power_of_two());
1843 self.vp.partition.is_gpa_mapped(gpa & !vtom, write)
1844 }
1845
1846 fn lapic_base_address(&self) -> Option<u64> {
1847 self.vp.backing.cvm.lapics[self.vtl].lapic.base_address()
1848 }
1849
1850 fn lapic_read(&mut self, address: u64, data: &mut [u8]) {
1851 let vtl = self.vtl;
1852 self.vp.backing.cvm.lapics[vtl]
1853 .lapic
1854 .access(&mut SnpApicClient {
1855 partition: self.vp.partition,
1856 vmsa: self.vp.runner.vmsa_mut(vtl),
1857 dev: self.devices,
1858 vmtime: &self.vp.vmtime,
1859 vtl,
1860 })
1861 .mmio_read(address, data);
1862 }
1863
1864 fn lapic_write(&mut self, address: u64, data: &[u8]) {
1865 let vtl = self.vtl;
1866 self.vp.backing.cvm.lapics[vtl]
1867 .lapic
1868 .access(&mut SnpApicClient {
1869 partition: self.vp.partition,
1870 vmsa: self.vp.runner.vmsa_mut(vtl),
1871 dev: self.devices,
1872 vmtime: &self.vp.vmtime,
1873 vtl,
1874 })
1875 .mmio_write(address, data);
1876 }
1877
1878 fn monitor_support(&self) -> Option<&dyn EmulatorMonitorSupport> {
1879 Some(self)
1880 }
1881}
1882
1883impl<T> hv1_hypercall::X64RegisterState for UhHypercallHandler<'_, '_, T, SnpBacked> {
1884 fn rip(&mut self) -> u64 {
1885 self.vp.runner.vmsa(self.intercepted_vtl).rip()
1886 }
1887
1888 fn set_rip(&mut self, rip: u64) {
1889 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_rip(rip);
1890 }
1891
1892 fn gp(&mut self, n: hv1_hypercall::X64HypercallRegister) -> u64 {
1893 let vmsa = self.vp.runner.vmsa(self.intercepted_vtl);
1894 match n {
1895 hv1_hypercall::X64HypercallRegister::Rax => vmsa.rax(),
1896 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.rcx(),
1897 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.rdx(),
1898 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.rbx(),
1899 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.rsi(),
1900 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.rdi(),
1901 hv1_hypercall::X64HypercallRegister::R8 => vmsa.r8(),
1902 }
1903 }
1904
1905 fn set_gp(&mut self, n: hv1_hypercall::X64HypercallRegister, value: u64) {
1906 let mut vmsa = self.vp.runner.vmsa_mut(self.intercepted_vtl);
1907 match n {
1908 hv1_hypercall::X64HypercallRegister::Rax => vmsa.set_rax(value),
1909 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.set_rcx(value),
1910 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.set_rdx(value),
1911 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.set_rbx(value),
1912 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.set_rsi(value),
1913 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.set_rdi(value),
1914 hv1_hypercall::X64HypercallRegister::R8 => vmsa.set_r8(value),
1915 }
1916 }
1917
1918 fn xmm(&mut self, n: usize) -> u128 {
1919 self.vp.runner.vmsa(self.intercepted_vtl).xmm_registers(n)
1920 }
1921
1922 fn set_xmm(&mut self, n: usize, value: u128) {
1923 self.vp
1924 .runner
1925 .vmsa_mut(self.intercepted_vtl)
1926 .set_xmm_registers(n, value);
1927 }
1928}
1929
1930impl AccessVpState for UhVpStateAccess<'_, '_, SnpBacked> {
1931 type Error = vp_state::Error;
1932
1933 fn caps(&self) -> &virt::x86::X86PartitionCapabilities {
1934 &self.vp.partition.caps
1935 }
1936
1937 fn commit(&mut self) -> Result<(), Self::Error> {
1938 Ok(())
1939 }
1940
1941 fn registers(&mut self) -> Result<vp::Registers, Self::Error> {
1942 let vmsa = self.vp.runner.vmsa(self.vtl);
1943
1944 Ok(vp::Registers {
1945 rax: vmsa.rax(),
1946 rcx: vmsa.rcx(),
1947 rdx: vmsa.rdx(),
1948 rbx: vmsa.rbx(),
1949 rsp: vmsa.rsp(),
1950 rbp: vmsa.rbp(),
1951 rsi: vmsa.rsi(),
1952 rdi: vmsa.rdi(),
1953 r8: vmsa.r8(),
1954 r9: vmsa.r9(),
1955 r10: vmsa.r10(),
1956 r11: vmsa.r11(),
1957 r12: vmsa.r12(),
1958 r13: vmsa.r13(),
1959 r14: vmsa.r14(),
1960 r15: vmsa.r15(),
1961 rip: vmsa.rip(),
1962 rflags: vmsa.rflags(),
1963 cs: virt_seg_from_snp(vmsa.cs()),
1964 ds: virt_seg_from_snp(vmsa.ds()),
1965 es: virt_seg_from_snp(vmsa.es()),
1966 fs: virt_seg_from_snp(vmsa.fs()),
1967 gs: virt_seg_from_snp(vmsa.gs()),
1968 ss: virt_seg_from_snp(vmsa.ss()),
1969 tr: virt_seg_from_snp(vmsa.tr()),
1970 ldtr: virt_seg_from_snp(vmsa.ldtr()),
1971 gdtr: virt_table_from_snp(vmsa.gdtr()),
1972 idtr: virt_table_from_snp(vmsa.idtr()),
1973 cr0: vmsa.cr0(),
1974 cr2: vmsa.cr2(),
1975 cr3: vmsa.cr3(),
1976 cr4: vmsa.cr4(),
1977 cr8: vmsa.v_intr_cntrl().tpr().into(),
1978 efer: vmsa.efer(),
1979 })
1980 }
1981
1982 fn set_registers(&mut self, value: &vp::Registers) -> Result<(), Self::Error> {
1983 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1984
1985 let vp::Registers {
1986 rax,
1987 rcx,
1988 rdx,
1989 rbx,
1990 rsp,
1991 rbp,
1992 rsi,
1993 rdi,
1994 r8,
1995 r9,
1996 r10,
1997 r11,
1998 r12,
1999 r13,
2000 r14,
2001 r15,
2002 rip,
2003 rflags,
2004 cs,
2005 ds,
2006 es,
2007 fs,
2008 gs,
2009 ss,
2010 tr,
2011 ldtr,
2012 gdtr,
2013 idtr,
2014 cr0,
2015 cr2,
2016 cr3,
2017 cr4,
2018 cr8,
2019 efer,
2020 } = *value;
2021 vmsa.set_rax(rax);
2022 vmsa.set_rcx(rcx);
2023 vmsa.set_rdx(rdx);
2024 vmsa.set_rbx(rbx);
2025 vmsa.set_rsp(rsp);
2026 vmsa.set_rbp(rbp);
2027 vmsa.set_rsi(rsi);
2028 vmsa.set_rdi(rdi);
2029 vmsa.set_r8(r8);
2030 vmsa.set_r9(r9);
2031 vmsa.set_r10(r10);
2032 vmsa.set_r11(r11);
2033 vmsa.set_r12(r12);
2034 vmsa.set_r13(r13);
2035 vmsa.set_r14(r14);
2036 vmsa.set_r15(r15);
2037 vmsa.set_rip(rip);
2038 vmsa.set_rflags(rflags);
2039 vmsa.set_cs(virt_seg_to_snp(cs));
2040 vmsa.set_ds(virt_seg_to_snp(ds));
2041 vmsa.set_es(virt_seg_to_snp(es));
2042 vmsa.set_fs(virt_seg_to_snp(fs));
2043 vmsa.set_gs(virt_seg_to_snp(gs));
2044 vmsa.set_ss(virt_seg_to_snp(ss));
2045 vmsa.set_tr(virt_seg_to_snp(tr));
2046 vmsa.set_ldtr(virt_seg_to_snp(ldtr));
2047 vmsa.set_gdtr(virt_table_to_snp(gdtr));
2048 vmsa.set_idtr(virt_table_to_snp(idtr));
2049 vmsa.set_cr0(cr0);
2050 vmsa.set_cr2(cr2);
2051 vmsa.set_cr3(cr3);
2052 vmsa.set_cr4(cr4);
2053 vmsa.v_intr_cntrl_mut().set_tpr(cr8 as u8);
2054 vmsa.set_efer(SnpBacked::calculate_efer(efer, cr0));
2055 Ok(())
2056 }
2057
2058 fn activity(&mut self) -> Result<vp::Activity, Self::Error> {
2059 let lapic = &self.vp.backing.cvm.lapics[self.vtl];
2060
2061 Ok(vp::Activity {
2062 mp_state: lapic.activity,
2063 nmi_pending: lapic.nmi_pending,
2064 nmi_masked: false, interrupt_shadow: false, pending_event: None, pending_interruption: None, })
2069 }
2070
2071 fn set_activity(&mut self, value: &vp::Activity) -> Result<(), Self::Error> {
2072 let &vp::Activity {
2073 mp_state,
2074 nmi_pending,
2075 nmi_masked: _, interrupt_shadow: _, pending_event: _, pending_interruption: _, } = value;
2080 let lapic = &mut self.vp.backing.cvm.lapics[self.vtl];
2081 lapic.activity = mp_state;
2082 lapic.nmi_pending = nmi_pending;
2083
2084 Ok(())
2085 }
2086
2087 fn xsave(&mut self) -> Result<vp::Xsave, Self::Error> {
2088 Err(vp_state::Error::Unimplemented("xsave"))
2089 }
2090
2091 fn set_xsave(&mut self, _value: &vp::Xsave) -> Result<(), Self::Error> {
2092 Err(vp_state::Error::Unimplemented("xsave"))
2093 }
2094
2095 fn apic(&mut self) -> Result<vp::Apic, Self::Error> {
2096 Ok(self.vp.backing.cvm.lapics[self.vtl].lapic.save())
2097 }
2098
2099 fn set_apic(&mut self, value: &vp::Apic) -> Result<(), Self::Error> {
2100 self.vp.backing.cvm.lapics[self.vtl]
2101 .lapic
2102 .restore(value)
2103 .map_err(vp_state::Error::InvalidApicBase)?;
2104 Ok(())
2105 }
2106
2107 fn xcr(&mut self) -> Result<vp::Xcr0, Self::Error> {
2108 let vmsa = self.vp.runner.vmsa(self.vtl);
2109 Ok(vp::Xcr0 { value: vmsa.xcr0() })
2110 }
2111
2112 fn set_xcr(&mut self, value: &vp::Xcr0) -> Result<(), Self::Error> {
2113 let vp::Xcr0 { value } = *value;
2114 self.vp.runner.vmsa_mut(self.vtl).set_xcr0(value);
2115 Ok(())
2116 }
2117
2118 fn xss(&mut self) -> Result<vp::Xss, Self::Error> {
2119 let vmsa = self.vp.runner.vmsa(self.vtl);
2120 Ok(vp::Xss { value: vmsa.xss() })
2121 }
2122
2123 fn set_xss(&mut self, value: &vp::Xss) -> Result<(), Self::Error> {
2124 let vp::Xss { value } = *value;
2125 self.vp.runner.vmsa_mut(self.vtl).set_xss(value);
2126 Ok(())
2127 }
2128
2129 fn mtrrs(&mut self) -> Result<vp::Mtrrs, Self::Error> {
2130 Ok(vp::Mtrrs {
2131 msr_mtrr_def_type: 0,
2132 fixed: [0; 11],
2133 variable: [0; 16],
2134 })
2135 }
2136
2137 fn set_mtrrs(&mut self, _value: &vp::Mtrrs) -> Result<(), Self::Error> {
2138 Ok(())
2139 }
2140
2141 fn pat(&mut self) -> Result<vp::Pat, Self::Error> {
2142 let vmsa = self.vp.runner.vmsa(self.vtl);
2143 Ok(vp::Pat { value: vmsa.pat() })
2144 }
2145
2146 fn set_pat(&mut self, value: &vp::Pat) -> Result<(), Self::Error> {
2147 let vp::Pat { value } = *value;
2148 self.vp.runner.vmsa_mut(self.vtl).set_pat(value);
2149 Ok(())
2150 }
2151
2152 fn virtual_msrs(&mut self) -> Result<vp::VirtualMsrs, Self::Error> {
2153 let vmsa = self.vp.runner.vmsa(self.vtl);
2154
2155 Ok(vp::VirtualMsrs {
2156 kernel_gs_base: vmsa.kernel_gs_base(),
2157 sysenter_cs: vmsa.sysenter_cs(),
2158 sysenter_eip: vmsa.sysenter_eip(),
2159 sysenter_esp: vmsa.sysenter_esp(),
2160 star: vmsa.star(),
2161 lstar: vmsa.lstar(),
2162 cstar: vmsa.cstar(),
2163 sfmask: vmsa.sfmask(),
2164 })
2165 }
2166
2167 fn set_virtual_msrs(&mut self, value: &vp::VirtualMsrs) -> Result<(), Self::Error> {
2168 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2169 let vp::VirtualMsrs {
2170 kernel_gs_base,
2171 sysenter_cs,
2172 sysenter_eip,
2173 sysenter_esp,
2174 star,
2175 lstar,
2176 cstar,
2177 sfmask,
2178 } = *value;
2179 vmsa.set_kernel_gs_base(kernel_gs_base);
2180 vmsa.set_sysenter_cs(sysenter_cs);
2181 vmsa.set_sysenter_eip(sysenter_eip);
2182 vmsa.set_sysenter_esp(sysenter_esp);
2183 vmsa.set_star(star);
2184 vmsa.set_lstar(lstar);
2185 vmsa.set_cstar(cstar);
2186 vmsa.set_sfmask(sfmask);
2187
2188 Ok(())
2189 }
2190
2191 fn debug_regs(&mut self) -> Result<vp::DebugRegisters, Self::Error> {
2192 let vmsa = self.vp.runner.vmsa(self.vtl);
2193 Ok(vp::DebugRegisters {
2194 dr0: vmsa.dr0(),
2195 dr1: vmsa.dr1(),
2196 dr2: vmsa.dr2(),
2197 dr3: vmsa.dr3(),
2198 dr6: vmsa.dr6(),
2199 dr7: vmsa.dr7(),
2200 })
2201 }
2202
2203 fn set_debug_regs(&mut self, value: &vp::DebugRegisters) -> Result<(), Self::Error> {
2204 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2205 let vp::DebugRegisters {
2206 dr0,
2207 dr1,
2208 dr2,
2209 dr3,
2210 dr6,
2211 dr7,
2212 } = *value;
2213 vmsa.set_dr0(dr0);
2214 vmsa.set_dr1(dr1);
2215 vmsa.set_dr2(dr2);
2216 vmsa.set_dr3(dr3);
2217 vmsa.set_dr6(dr6);
2218 vmsa.set_dr7(dr7);
2219 Ok(())
2220 }
2221
2222 fn tsc(&mut self) -> Result<vp::Tsc, Self::Error> {
2223 Err(vp_state::Error::Unimplemented("tsc"))
2224 }
2225
2226 fn set_tsc(&mut self, _value: &vp::Tsc) -> Result<(), Self::Error> {
2227 Err(vp_state::Error::Unimplemented("tsc"))
2228 }
2229
2230 fn tsc_aux(&mut self) -> Result<vp::TscAux, Self::Error> {
2231 let vmsa = self.vp.runner.vmsa(self.vtl);
2232 Ok(vp::TscAux {
2233 value: vmsa.tsc_aux() as u64,
2234 })
2235 }
2236
2237 fn set_tsc_aux(&mut self, value: &vp::TscAux) -> Result<(), Self::Error> {
2238 let vp::TscAux { value } = *value;
2239 self.vp.runner.vmsa_mut(self.vtl).set_tsc_aux(value as u32);
2240 Ok(())
2241 }
2242
2243 fn cet(&mut self) -> Result<vp::Cet, Self::Error> {
2244 let vmsa = self.vp.runner.vmsa(self.vtl);
2245 Ok(vp::Cet { scet: vmsa.s_cet() })
2246 }
2247
2248 fn set_cet(&mut self, value: &vp::Cet) -> Result<(), Self::Error> {
2249 let vp::Cet { scet } = *value;
2250 self.vp.runner.vmsa_mut(self.vtl).set_s_cet(scet);
2251 Ok(())
2252 }
2253
2254 fn cet_ss(&mut self) -> Result<vp::CetSs, Self::Error> {
2255 let vmsa = self.vp.runner.vmsa(self.vtl);
2256 Ok(vp::CetSs {
2257 ssp: vmsa.ssp(),
2258 interrupt_ssp_table_addr: vmsa.interrupt_ssp_table_addr(),
2259 })
2260 }
2261
2262 fn set_cet_ss(&mut self, value: &vp::CetSs) -> Result<(), Self::Error> {
2263 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2264 let vp::CetSs {
2265 ssp,
2266 interrupt_ssp_table_addr,
2267 } = *value;
2268 vmsa.set_ssp(ssp);
2269 vmsa.set_interrupt_ssp_table_addr(interrupt_ssp_table_addr);
2270 Ok(())
2271 }
2272
2273 fn synic_msrs(&mut self) -> Result<vp::SyntheticMsrs, Self::Error> {
2274 Err(vp_state::Error::Unimplemented("synic_msrs"))
2275 }
2276
2277 fn set_synic_msrs(&mut self, _value: &vp::SyntheticMsrs) -> Result<(), Self::Error> {
2278 Err(vp_state::Error::Unimplemented("synic_msrs"))
2279 }
2280
2281 fn synic_message_page(&mut self) -> Result<vp::SynicMessagePage, Self::Error> {
2282 Err(vp_state::Error::Unimplemented("synic_message_page"))
2283 }
2284
2285 fn set_synic_message_page(&mut self, _value: &vp::SynicMessagePage) -> Result<(), Self::Error> {
2286 Err(vp_state::Error::Unimplemented("synic_message_page"))
2287 }
2288
2289 fn synic_event_flags_page(&mut self) -> Result<vp::SynicEventFlagsPage, Self::Error> {
2290 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2291 }
2292
2293 fn set_synic_event_flags_page(
2294 &mut self,
2295 _value: &vp::SynicEventFlagsPage,
2296 ) -> Result<(), Self::Error> {
2297 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2298 }
2299
2300 fn synic_message_queues(&mut self) -> Result<vp::SynicMessageQueues, Self::Error> {
2301 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2302 }
2303
2304 fn set_synic_message_queues(
2305 &mut self,
2306 _value: &vp::SynicMessageQueues,
2307 ) -> Result<(), Self::Error> {
2308 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2309 }
2310
2311 fn synic_timers(&mut self) -> Result<vp::SynicTimers, Self::Error> {
2312 Err(vp_state::Error::Unimplemented("synic_timers"))
2313 }
2314
2315 fn set_synic_timers(&mut self, _value: &vp::SynicTimers) -> Result<(), Self::Error> {
2316 Err(vp_state::Error::Unimplemented("synic_timers"))
2317 }
2318}
2319
2320fn advance_to_next_instruction(vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>) {
2322 vmsa.set_rip(vmsa.next_rip());
2323 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2324}
2325
2326impl UhProcessor<'_, SnpBacked> {
2327 fn read_msr_snp(
2328 &mut self,
2329 _dev: &impl CpuIo,
2330 msr: u32,
2331 vtl: GuestVtl,
2332 ) -> Result<u64, MsrError> {
2333 let vmsa = self.runner.vmsa(vtl);
2334 let value = match msr {
2335 x86defs::X64_MSR_FS_BASE => vmsa.fs().base,
2336 x86defs::X64_MSR_GS_BASE => vmsa.gs().base,
2337 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.kernel_gs_base(),
2338 x86defs::X86X_MSR_TSC_AUX => {
2339 if self.shared.tsc_aux_virtualized {
2340 vmsa.tsc_aux() as u64
2341 } else {
2342 return Err(MsrError::InvalidAccess);
2343 }
2344 }
2345 x86defs::X86X_MSR_SPEC_CTRL => vmsa.spec_ctrl(),
2346 x86defs::X86X_MSR_U_CET => vmsa.u_cet(),
2347 x86defs::X86X_MSR_S_CET => vmsa.s_cet(),
2348 x86defs::X86X_MSR_PL0_SSP => vmsa.pl0_ssp(),
2349 x86defs::X86X_MSR_PL1_SSP => vmsa.pl1_ssp(),
2350 x86defs::X86X_MSR_PL2_SSP => vmsa.pl2_ssp(),
2351 x86defs::X86X_MSR_PL3_SSP => vmsa.pl3_ssp(),
2352 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.interrupt_ssp_table_addr(),
2353 x86defs::X86X_MSR_CR_PAT => vmsa.pat(),
2354 x86defs::X86X_MSR_EFER => vmsa.efer(),
2355 x86defs::X86X_MSR_STAR => vmsa.star(),
2356 x86defs::X86X_MSR_LSTAR => vmsa.lstar(),
2357 x86defs::X86X_MSR_CSTAR => vmsa.cstar(),
2358 x86defs::X86X_MSR_SFMASK => vmsa.sfmask(),
2359 x86defs::X86X_MSR_SYSENTER_CS => vmsa.sysenter_cs(),
2360 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.sysenter_esp(),
2361 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.sysenter_eip(),
2362 x86defs::X86X_MSR_XSS => vmsa.xss(),
2363 x86defs::X86X_AMD_MSR_VM_CR => 0,
2364 x86defs::X86X_MSR_TSC => safe_intrinsics::rdtsc(),
2365 x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => 0xffff_ffff,
2366 x86defs::X86X_MSR_MTRR_CAP => {
2367 0x400
2370 }
2371 x86defs::X86X_MSR_MTRR_DEF_TYPE => {
2372 0
2376 }
2377 x86defs::X86X_AMD_MSR_SYSCFG
2378 | x86defs::X86X_MSR_MCG_CAP
2379 | x86defs::X86X_MSR_MCG_STATUS => 0,
2380
2381 hvdef::HV_X64_MSR_GUEST_IDLE => {
2382 self.backing.cvm.lapics[vtl].activity = MpState::Idle;
2383 let mut vmsa = self.runner.vmsa_mut(vtl);
2384 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2385 0
2386 }
2387 _ => return Err(MsrError::Unknown),
2388 };
2389 Ok(value)
2390 }
2391
2392 fn write_msr_snp(
2393 &mut self,
2394 _dev: &impl CpuIo,
2395 msr: u32,
2396 value: u64,
2397 vtl: GuestVtl,
2398 ) -> Result<(), MsrError> {
2399 let mut vmsa = self.runner.vmsa_mut(vtl);
2402 match msr {
2403 x86defs::X64_MSR_FS_BASE => {
2404 let fs = vmsa.fs();
2405 vmsa.set_fs(SevSelector {
2406 attrib: fs.attrib,
2407 selector: fs.selector,
2408 limit: fs.limit,
2409 base: value,
2410 });
2411 }
2412 x86defs::X64_MSR_GS_BASE => {
2413 let gs = vmsa.gs();
2414 vmsa.set_gs(SevSelector {
2415 attrib: gs.attrib,
2416 selector: gs.selector,
2417 limit: gs.limit,
2418 base: value,
2419 });
2420 }
2421 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.set_kernel_gs_base(value),
2422 x86defs::X86X_MSR_TSC_AUX => {
2423 if self.shared.tsc_aux_virtualized {
2424 vmsa.set_tsc_aux(value as u32);
2425 } else {
2426 return Err(MsrError::InvalidAccess);
2427 }
2428 }
2429 x86defs::X86X_MSR_SPEC_CTRL => vmsa.set_spec_ctrl(value),
2430 x86defs::X86X_MSR_U_CET => vmsa.set_u_cet(value),
2431 x86defs::X86X_MSR_S_CET => vmsa.set_s_cet(value),
2432 x86defs::X86X_MSR_PL0_SSP => vmsa.set_pl0_ssp(value),
2433 x86defs::X86X_MSR_PL1_SSP => vmsa.set_pl1_ssp(value),
2434 x86defs::X86X_MSR_PL2_SSP => vmsa.set_pl2_ssp(value),
2435 x86defs::X86X_MSR_PL3_SSP => vmsa.set_pl3_ssp(value),
2436 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.set_interrupt_ssp_table_addr(value),
2437
2438 x86defs::X86X_MSR_CR_PAT => vmsa.set_pat(value),
2439 x86defs::X86X_MSR_EFER => vmsa.set_efer(SnpBacked::calculate_efer(value, vmsa.cr0())),
2440
2441 x86defs::X86X_MSR_STAR => vmsa.set_star(value),
2442 x86defs::X86X_MSR_LSTAR => vmsa.set_lstar(value),
2443 x86defs::X86X_MSR_CSTAR => vmsa.set_cstar(value),
2444 x86defs::X86X_MSR_SFMASK => vmsa.set_sfmask(value),
2445 x86defs::X86X_MSR_SYSENTER_CS => vmsa.set_sysenter_cs(value),
2446 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.set_sysenter_esp(value),
2447 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.set_sysenter_eip(value),
2448 x86defs::X86X_MSR_XSS => vmsa.set_xss(value),
2449
2450 x86defs::X86X_MSR_TSC => {} x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => {}
2452 x86defs::X86X_MSR_MTRR_DEF_TYPE => {}
2453
2454 x86defs::X86X_AMD_MSR_VM_CR
2455 | x86defs::X86X_MSR_MTRR_CAP
2456 | x86defs::X86X_AMD_MSR_SYSCFG
2457 | x86defs::X86X_MSR_MCG_CAP => return Err(MsrError::InvalidAccess),
2458
2459 x86defs::X86X_MSR_MCG_STATUS => {
2460 if x86defs::X86xMcgStatusRegister::from(value).reserved0() != 0 {
2462 return Err(MsrError::InvalidAccess);
2463 }
2464 }
2465 _ => {
2466 tracing::debug!(msr, value, "unknown cvm msr write");
2467 }
2468 }
2469 Ok(())
2470 }
2471}
2472
2473impl<T: CpuIo> hv1_hypercall::VtlSwitchOps for UhHypercallHandler<'_, '_, T, SnpBacked> {
2474 fn advance_ip(&mut self) {
2475 let is_64bit = self.vp.long_mode(self.intercepted_vtl);
2476 let mut io = hv1_hypercall::X64RegisterIo::new(self, is_64bit);
2477 io.advance_ip();
2478 }
2479
2480 fn inject_invalid_opcode_fault(&mut self) {
2481 self.vp
2482 .runner
2483 .vmsa_mut(self.intercepted_vtl)
2484 .set_event_inject(
2485 SevEventInjectInfo::new()
2486 .with_valid(true)
2487 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
2488 .with_vector(x86defs::Exception::INVALID_OPCODE.0),
2489 );
2490 }
2491}
2492
2493impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressList for UhHypercallHandler<'_, '_, T, SnpBacked> {
2494 fn flush_virtual_address_list(
2495 &mut self,
2496 processor_set: ProcessorSet<'_>,
2497 flags: HvFlushFlags,
2498 gva_ranges: &[HvGvaRange],
2499 ) -> HvRepResult {
2500 hv1_hypercall::FlushVirtualAddressListEx::flush_virtual_address_list_ex(
2501 self,
2502 processor_set,
2503 flags,
2504 gva_ranges,
2505 )
2506 }
2507}
2508
2509impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
2510 for UhHypercallHandler<'_, '_, T, SnpBacked>
2511{
2512 fn flush_virtual_address_list_ex(
2513 &mut self,
2514 processor_set: ProcessorSet<'_>,
2515 flags: HvFlushFlags,
2516 gva_ranges: &[HvGvaRange],
2517 ) -> HvRepResult {
2518 self.hcvm_validate_flush_inputs(processor_set, flags, true)
2519 .map_err(|e| (e, 0))?;
2520
2521 if gva_ranges.len() > 16 || gva_ranges.iter().any(|range| if flags.use_extended_range_format() { range.as_extended().additional_pages() } else { range.as_simple().additional_pages() } > 16) {
2524 self.do_flush_virtual_address_space(processor_set, flags);
2525 } else {
2526 self.do_flush_virtual_address_list(flags, gva_ranges);
2527 }
2528
2529 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2531 Ok(())
2532 }
2533}
2534
2535impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpace
2536 for UhHypercallHandler<'_, '_, T, SnpBacked>
2537{
2538 fn flush_virtual_address_space(
2539 &mut self,
2540 processor_set: ProcessorSet<'_>,
2541 flags: HvFlushFlags,
2542 ) -> hvdef::HvResult<()> {
2543 hv1_hypercall::FlushVirtualAddressSpaceEx::flush_virtual_address_space_ex(
2544 self,
2545 processor_set,
2546 flags,
2547 )
2548 }
2549}
2550
2551impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpaceEx
2552 for UhHypercallHandler<'_, '_, T, SnpBacked>
2553{
2554 fn flush_virtual_address_space_ex(
2555 &mut self,
2556 processor_set: ProcessorSet<'_>,
2557 flags: HvFlushFlags,
2558 ) -> hvdef::HvResult<()> {
2559 self.hcvm_validate_flush_inputs(processor_set, flags, false)?;
2560
2561 self.do_flush_virtual_address_space(processor_set, flags);
2562
2563 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2565 Ok(())
2566 }
2567}
2568
2569impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
2570 fn do_flush_virtual_address_list(&mut self, flags: HvFlushFlags, gva_ranges: &[HvGvaRange]) {
2571 for range in gva_ranges {
2572 let mut rax = SevInvlpgbRax::new()
2573 .with_asid_valid(true)
2574 .with_va_valid(true)
2575 .with_global(!flags.non_global_mappings_only());
2576 let mut ecx = SevInvlpgbEcx::new();
2577 let mut count;
2578 let mut gpn;
2579
2580 if flags.use_extended_range_format() && range.as_extended().large_page() {
2581 ecx.set_large_page(true);
2582 if range.as_extended_large_page().page_size() {
2583 let range = range.as_extended_large_page();
2584 count = range.additional_pages();
2585 gpn = range.gva_large_page_number();
2586 } else {
2587 let range = range.as_extended();
2588 count = range.additional_pages();
2589 gpn = range.gva_page_number();
2590 }
2591 } else {
2592 let range = range.as_simple();
2593 count = range.additional_pages();
2594 gpn = range.gva_page_number();
2595 }
2596 count += 1; while count > 0 {
2599 rax.set_virtual_page_number(gpn);
2600 ecx.set_additional_count(std::cmp::min(
2601 count - 1,
2602 self.vp.shared.invlpgb_count_max.into(),
2603 ));
2604
2605 let edx = SevInvlpgbEdx::new();
2606 self.vp
2607 .partition
2608 .hcl
2609 .invlpgb(rax.into(), edx.into(), ecx.into());
2610
2611 count -= ecx.additional_count() + 1;
2612 gpn += ecx.additional_count() + 1;
2613 }
2614 }
2615
2616 self.vp.partition.hcl.tlbsync();
2617 }
2618
2619 fn do_flush_virtual_address_space(
2620 &mut self,
2621 processor_set: ProcessorSet<'_>,
2622 flags: HvFlushFlags,
2623 ) {
2624 let only_self = [self.vp.vp_index().index()].into_iter().eq(processor_set);
2625 if only_self && flags.non_global_mappings_only() {
2626 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_pcpu_id(0);
2627 } else {
2628 self.vp.partition.hcl.invlpgb(
2629 SevInvlpgbRax::new()
2630 .with_asid_valid(true)
2631 .with_global(!flags.non_global_mappings_only())
2632 .into(),
2633 SevInvlpgbEdx::new().into(),
2634 SevInvlpgbEcx::new().into(),
2635 );
2636 self.vp.partition.hcl.tlbsync();
2637 }
2638 }
2639}
2640
2641struct SnpTlbLockFlushAccess<'a> {
2642 vp_index: Option<VpIndex>,
2643 partition: &'a UhPartitionInner,
2644 shared: &'a SnpBackedShared,
2645}
2646
2647impl TlbFlushLockAccess for SnpTlbLockFlushAccess<'_> {
2648 fn flush(&mut self, vtl: GuestVtl) {
2649 self.partition.hcl.invlpgb(
2652 SevInvlpgbRax::new()
2653 .with_asid_valid(true)
2654 .with_global(true)
2655 .into(),
2656 SevInvlpgbEdx::new().into(),
2657 SevInvlpgbEcx::new().into(),
2658 );
2659 self.partition.hcl.tlbsync();
2660 self.set_wait_for_tlb_locks(vtl);
2661 }
2662
2663 fn flush_entire(&mut self) {
2664 self.partition.hcl.invlpgb(
2665 SevInvlpgbRax::new()
2666 .with_asid_valid(true)
2667 .with_global(true)
2668 .into(),
2669 SevInvlpgbEdx::new().into(),
2670 SevInvlpgbEcx::new().into(),
2671 );
2672 self.partition.hcl.tlbsync();
2673 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
2674 self.set_wait_for_tlb_locks(vtl);
2675 }
2676 }
2677
2678 fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) {
2679 if let Some(vp_index) = self.vp_index {
2680 hardware_cvm::tlb_lock::TlbLockAccess {
2681 vp_index,
2682 cvm_partition: &self.shared.cvm,
2683 }
2684 .set_wait_for_tlb_locks(vtl);
2685 }
2686 }
2687}
2688
2689mod save_restore {
2690 use super::SnpBacked;
2691 use super::UhProcessor;
2692 use vmcore::save_restore::RestoreError;
2693 use vmcore::save_restore::SaveError;
2694 use vmcore::save_restore::SaveRestore;
2695 use vmcore::save_restore::SavedStateNotSupported;
2696
2697 impl SaveRestore for UhProcessor<'_, SnpBacked> {
2698 type SavedState = SavedStateNotSupported;
2699
2700 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
2701 Err(SaveError::NotSupported)
2702 }
2703
2704 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
2705 match state {}
2706 }
2707 }
2708}