1use super::BackingParams;
7use super::BackingPrivate;
8use super::BackingSharedParams;
9use super::HardwareIsolatedBacking;
10use super::InterceptMessageOptionalState;
11use super::InterceptMessageState;
12use super::UhEmulationState;
13use super::hardware_cvm;
14use super::vp_state;
15use super::vp_state::UhVpStateAccess;
16use crate::BackingShared;
17use crate::Error;
18use crate::GuestVtl;
19use crate::TlbFlushLockAccess;
20use crate::UhCvmPartitionState;
21use crate::UhCvmVpState;
22use crate::UhPartitionInner;
23use crate::UhPartitionNewParams;
24use crate::WakeReason;
25use crate::processor::UhHypercallHandler;
26use crate::processor::UhProcessor;
27use crate::processor::hardware_cvm::apic::ApicBacking;
28use cvm_tracing::CVM_ALLOWED;
29use cvm_tracing::CVM_CONFIDENTIAL;
30use hcl::vmsa::VmsaWrapper;
31use hv1_emulator::hv::ProcessorVtlHv;
32use hv1_emulator::synic::ProcessorSynic;
33use hv1_hypercall::HvRepResult;
34use hv1_hypercall::HypercallIo;
35use hv1_structs::ProcessorSet;
36use hv1_structs::VtlArray;
37use hvdef::HV_PAGE_SIZE;
38use hvdef::HvDeliverabilityNotificationsRegister;
39use hvdef::HvError;
40use hvdef::HvMessageType;
41use hvdef::HvX64PendingExceptionEvent;
42use hvdef::HvX64RegisterName;
43use hvdef::Vtl;
44use hvdef::hypercall::Control;
45use hvdef::hypercall::HvFlushFlags;
46use hvdef::hypercall::HvGvaRange;
47use hvdef::hypercall::HypercallOutput;
48use inspect::Inspect;
49use inspect::InspectMut;
50use inspect_counters::Counter;
51use virt::EmulatorMonitorSupport;
52use virt::Processor;
53use virt::VpHaltReason;
54use virt::VpIndex;
55use virt::io::CpuIo;
56use virt::state::StateElement;
57use virt::vp;
58use virt::vp::AccessVpState;
59use virt::vp::MpState;
60use virt::x86::MsrError;
61use virt::x86::MsrErrorExt;
62use virt::x86::SegmentRegister;
63use virt::x86::TableRegister;
64use virt_support_apic::ApicClient;
65use virt_support_x86emu::emulate::EmulatorSupport as X86EmulatorSupport;
66use virt_support_x86emu::emulate::emulate_io;
67use virt_support_x86emu::emulate::emulate_translate_gva;
68use virt_support_x86emu::translate::TranslationRegisters;
69use vmcore::vmtime::VmTimeAccess;
70use x86defs::RFlags;
71use x86defs::cpuid::CpuidFunction;
72use x86defs::snp::SevEventInjectInfo;
73use x86defs::snp::SevExitCode;
74use x86defs::snp::SevInvlpgbEcx;
75use x86defs::snp::SevInvlpgbEdx;
76use x86defs::snp::SevInvlpgbRax;
77use x86defs::snp::SevIoAccessInfo;
78use x86defs::snp::SevNpfInfo;
79use x86defs::snp::SevSelector;
80use x86defs::snp::SevStatusMsr;
81use x86defs::snp::SevVmsa;
82use x86defs::snp::Vmpl;
83use zerocopy::FromZeros;
84use zerocopy::IntoBytes;
85
86#[derive(Debug, Error)]
87#[error("invalid vmcb")]
88struct InvalidVmcb;
89
90#[derive(Debug, Error)]
91enum SnpGhcbError {
92 #[error("failed to access GHCB page")]
93 GhcbPageAccess(#[source] guestmem::GuestMemoryError),
94 #[error("ghcb page used for vmgexit does not match overlay page")]
95 GhcbMisconfiguration,
96}
97
98#[derive(Debug, Error)]
99#[error("failed to run")]
100struct SnpRunVpError(#[source] hcl::ioctl::Error);
101
102#[derive(InspectMut)]
104pub struct SnpBacked {
105 #[inspect(hex)]
106 hv_sint_notifications: u16,
107 general_stats: VtlArray<GeneralStats, 2>,
108 exit_stats: VtlArray<ExitStats, 2>,
109 #[inspect(flatten)]
110 cvm: UhCvmVpState,
111}
112
113#[derive(Inspect, Default)]
114struct GeneralStats {
115 guest_busy: Counter,
116 int_ack: Counter,
117 synth_int: Counter,
118}
119
120#[derive(Inspect, Default)]
121struct ExitStats {
122 automatic_exit: Counter,
123 cpuid: Counter,
124 hlt: Counter,
125 intr: Counter,
126 invd: Counter,
127 invlpgb: Counter,
128 ioio: Counter,
129 msr_read: Counter,
130 msr_write: Counter,
131 npf: Counter,
132 npf_no_intercept: Counter,
133 npf_spurious: Counter,
134 rdpmc: Counter,
135 vmgexit: Counter,
136 vmmcall: Counter,
137 xsetbv: Counter,
138 excp_db: Counter,
139 secure_reg_write: Counter,
140}
141
142enum UhDirectOverlay {
143 Sipp,
144 Sifp,
145 Ghcb,
146 Count,
147}
148
149impl SnpBacked {
150 fn calculate_efer(efer: u64, cr0: u64) -> u64 {
152 let new_efer = if efer & x86defs::X64_EFER_LME != 0 && cr0 & x86defs::X64_CR0_PG != 0 {
153 efer | x86defs::X64_EFER_LMA
154 } else {
155 efer & !x86defs::X64_EFER_LMA
156 };
157 new_efer | x86defs::X64_EFER_SVME
158 }
159
160 pub fn shared_pages_required_per_cpu() -> u64 {
163 UhDirectOverlay::Count as u64
164 }
165}
166
167impl HardwareIsolatedBacking for SnpBacked {
168 fn cvm_state(&self) -> &UhCvmVpState {
169 &self.cvm
170 }
171
172 fn cvm_state_mut(&mut self) -> &mut UhCvmVpState {
173 &mut self.cvm
174 }
175
176 fn cvm_partition_state(shared: &Self::Shared) -> &UhCvmPartitionState {
177 &shared.cvm
178 }
179
180 fn switch_vtl(this: &mut UhProcessor<'_, Self>, source_vtl: GuestVtl, target_vtl: GuestVtl) {
181 let [vmsa0, vmsa1] = this.runner.vmsas_mut();
182 let (current_vmsa, mut target_vmsa) = match (source_vtl, target_vtl) {
183 (GuestVtl::Vtl0, GuestVtl::Vtl1) => (vmsa0, vmsa1),
184 (GuestVtl::Vtl1, GuestVtl::Vtl0) => (vmsa1, vmsa0),
185 _ => unreachable!(),
186 };
187
188 target_vmsa.set_rax(current_vmsa.rax());
189 target_vmsa.set_rbx(current_vmsa.rbx());
190 target_vmsa.set_rcx(current_vmsa.rcx());
191 target_vmsa.set_rdx(current_vmsa.rdx());
192 target_vmsa.set_rbp(current_vmsa.rbp());
193 target_vmsa.set_rsi(current_vmsa.rsi());
194 target_vmsa.set_rdi(current_vmsa.rdi());
195 target_vmsa.set_r8(current_vmsa.r8());
196 target_vmsa.set_r9(current_vmsa.r9());
197 target_vmsa.set_r10(current_vmsa.r10());
198 target_vmsa.set_r11(current_vmsa.r11());
199 target_vmsa.set_r12(current_vmsa.r12());
200 target_vmsa.set_r13(current_vmsa.r13());
201 target_vmsa.set_r14(current_vmsa.r14());
202 target_vmsa.set_r15(current_vmsa.r15());
203 target_vmsa.set_xcr0(current_vmsa.xcr0());
204
205 target_vmsa.set_cr2(current_vmsa.cr2());
206
207 target_vmsa.set_dr0(current_vmsa.dr0());
209 target_vmsa.set_dr1(current_vmsa.dr1());
210 target_vmsa.set_dr2(current_vmsa.dr2());
211 target_vmsa.set_dr3(current_vmsa.dr3());
212
213 target_vmsa.set_pl0_ssp(current_vmsa.pl0_ssp());
214 target_vmsa.set_pl1_ssp(current_vmsa.pl1_ssp());
215 target_vmsa.set_pl2_ssp(current_vmsa.pl2_ssp());
216 target_vmsa.set_pl3_ssp(current_vmsa.pl3_ssp());
217 target_vmsa.set_u_cet(current_vmsa.u_cet());
218
219 target_vmsa.set_x87_registers(¤t_vmsa.x87_registers());
220
221 let vec_reg_count = 16;
222 for i in 0..vec_reg_count {
223 target_vmsa.set_xmm_registers(i, current_vmsa.xmm_registers(i));
224 target_vmsa.set_ymm_registers(i, current_vmsa.ymm_registers(i));
225 }
226
227 this.backing.cvm_state_mut().exit_vtl = target_vtl;
228 }
229
230 fn translation_registers(
231 &self,
232 this: &UhProcessor<'_, Self>,
233 vtl: GuestVtl,
234 ) -> TranslationRegisters {
235 let vmsa = this.runner.vmsa(vtl);
236 TranslationRegisters {
237 cr0: vmsa.cr0(),
238 cr4: vmsa.cr4(),
239 efer: vmsa.efer(),
240 cr3: vmsa.cr3(),
241 rflags: vmsa.rflags(),
242 ss: virt_seg_from_snp(vmsa.ss()).into(),
243 encryption_mode: virt_support_x86emu::translate::EncryptionMode::Vtom(
244 this.partition.caps.vtom.unwrap(),
245 ),
246 }
247 }
248
249 fn tlb_flush_lock_access<'a>(
250 vp_index: Option<VpIndex>,
251 partition: &'a UhPartitionInner,
252 shared: &'a Self::Shared,
253 ) -> impl TlbFlushLockAccess + 'a {
254 SnpTlbLockFlushAccess {
255 vp_index,
256 partition,
257 shared,
258 }
259 }
260
261 fn pending_event_vector(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> Option<u8> {
262 let event_inject = this.runner.vmsa(vtl).event_inject();
263 if event_inject.valid() {
264 Some(event_inject.vector())
265 } else {
266 None
267 }
268 }
269
270 fn set_pending_exception(
271 this: &mut UhProcessor<'_, Self>,
272 vtl: GuestVtl,
273 event: HvX64PendingExceptionEvent,
274 ) {
275 let inject_info = SevEventInjectInfo::new()
276 .with_valid(true)
277 .with_deliver_error_code(event.deliver_error_code())
278 .with_error_code(event.error_code())
279 .with_vector(event.vector().try_into().unwrap())
280 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT);
281
282 this.runner.vmsa_mut(vtl).set_event_inject(inject_info);
283 }
284
285 fn cr0(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
286 this.runner.vmsa(vtl).cr0()
287 }
288
289 fn cr4(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
290 this.runner.vmsa(vtl).cr4()
291 }
292
293 fn intercept_message_state(
294 this: &UhProcessor<'_, Self>,
295 vtl: GuestVtl,
296 include_optional_state: bool,
297 ) -> InterceptMessageState {
298 let vmsa = this.runner.vmsa(vtl);
299
300 let instr_len = if SevExitCode(vmsa.guest_error_code()) == SevExitCode::NPF {
302 0
303 } else {
304 (vmsa.next_rip() - vmsa.rip()) as u8
305 };
306
307 InterceptMessageState {
308 instruction_length_and_cr8: instr_len,
309 cpl: vmsa.cpl(),
310 efer_lma: vmsa.efer() & x86defs::X64_EFER_LMA != 0,
311 cs: virt_seg_from_snp(vmsa.cs()).into(),
312 rip: vmsa.rip(),
313 rflags: vmsa.rflags(),
314 rax: vmsa.rax(),
315 rdx: vmsa.rdx(),
316 optional: if include_optional_state {
317 Some(InterceptMessageOptionalState {
318 ds: virt_seg_from_snp(vmsa.ds()).into(),
319 es: virt_seg_from_snp(vmsa.es()).into(),
320 })
321 } else {
322 None
323 },
324 rcx: vmsa.rcx(),
325 rsi: vmsa.rsi(),
326 rdi: vmsa.rdi(),
327 }
328 }
329
330 fn cr_intercept_registration(
331 this: &mut UhProcessor<'_, Self>,
332 intercept_control: hvdef::HvRegisterCrInterceptControl,
333 ) {
334 this.runner
339 .set_vp_registers_hvcall(
340 Vtl::Vtl1,
341 [(
342 HvX64RegisterName::CrInterceptControl,
343 u64::from(intercept_control),
344 )],
345 )
346 .expect("setting intercept control succeeds");
347 }
348
349 fn is_interrupt_pending(
350 this: &mut UhProcessor<'_, Self>,
351 vtl: GuestVtl,
352 check_rflags: bool,
353 dev: &impl CpuIo,
354 ) -> bool {
355 let vmsa = this.runner.vmsa_mut(vtl);
356 if vmsa.event_inject().valid()
357 && vmsa.event_inject().interruption_type() == x86defs::snp::SEV_INTR_TYPE_NMI
358 {
359 return true;
360 }
361
362 let vmsa_priority = vmsa.v_intr_cntrl().priority() as u32;
363 let lapic = &mut this.backing.cvm.lapics[vtl].lapic;
364 let ppr = lapic
365 .access(&mut SnpApicClient {
366 partition: this.partition,
367 vmsa,
368 dev,
369 vmtime: &this.vmtime,
370 vtl,
371 })
372 .get_ppr();
373 let ppr_priority = ppr >> 4;
374 if vmsa_priority <= ppr_priority {
375 return false;
376 }
377
378 let vmsa = this.runner.vmsa_mut(vtl);
379 if (check_rflags && !RFlags::from_bits(vmsa.rflags()).interrupt_enable())
380 || vmsa.v_intr_cntrl().intr_shadow()
381 || !vmsa.v_intr_cntrl().irq()
382 {
383 return false;
384 }
385
386 true
387 }
388
389 fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic> {
390 None
391 }
392}
393
394#[derive(Inspect)]
396pub struct SnpBackedShared {
397 #[inspect(flatten)]
398 pub(crate) cvm: UhCvmPartitionState,
399 invlpgb_count_max: u16,
400 tsc_aux_virtualized: bool,
401 #[inspect(debug)]
402 sev_status: SevStatusMsr,
403}
404
405impl SnpBackedShared {
406 pub(crate) fn new(
407 _partition_params: &UhPartitionNewParams<'_>,
408 params: BackingSharedParams<'_>,
409 ) -> Result<Self, Error> {
410 let cvm = params.cvm_state.unwrap();
411 let invlpgb_count_max = x86defs::cpuid::ExtendedAddressSpaceSizesEdx::from(
412 params
413 .cpuid
414 .result(CpuidFunction::ExtendedAddressSpaceSizes.0, 0, &[0; 4])[3],
415 )
416 .invlpgb_count_max();
417 let tsc_aux_virtualized = x86defs::cpuid::ExtendedSevFeaturesEax::from(
418 params
419 .cpuid
420 .result(CpuidFunction::ExtendedSevFeatures.0, 0, &[0; 4])[0],
421 )
422 .tsc_aux_virtualization();
423
424 let msr = crate::MsrDevice::new(0).expect("open msr");
427 let sev_status =
428 SevStatusMsr::from(msr.read_msr(x86defs::X86X_AMD_MSR_SEV).expect("read msr"));
429 tracing::info!(CVM_ALLOWED, ?sev_status, "SEV status");
430
431 Ok(Self {
432 sev_status,
433 invlpgb_count_max,
434 tsc_aux_virtualized,
435 cvm,
436 })
437 }
438}
439
440#[expect(private_interfaces)]
441impl BackingPrivate for SnpBacked {
442 type HclBacking<'snp> = hcl::ioctl::snp::Snp<'snp>;
443 type Shared = SnpBackedShared;
444 type EmulationCache = ();
445
446 fn shared(shared: &BackingShared) -> &Self::Shared {
447 let BackingShared::Snp(shared) = shared else {
448 unreachable!()
449 };
450 shared
451 }
452
453 fn new(params: BackingParams<'_, '_, Self>, shared: &SnpBackedShared) -> Result<Self, Error> {
454 Ok(Self {
455 hv_sint_notifications: 0,
456 general_stats: VtlArray::from_fn(|_| Default::default()),
457 exit_stats: VtlArray::from_fn(|_| Default::default()),
458 cvm: UhCvmVpState::new(
459 &shared.cvm,
460 params.partition,
461 params.vp_info,
462 UhDirectOverlay::Count as usize,
463 )?,
464 })
465 }
466
467 fn init(this: &mut UhProcessor<'_, Self>) {
468 let sev_status = this.vp().shared.sev_status;
469 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
470 init_vmsa(
471 &mut this.runner.vmsa_mut(vtl),
472 vtl,
473 this.partition.caps.vtom,
474 sev_status,
475 );
476
477 let registers = vp::Registers::at_reset(&this.partition.caps, &this.inner.vp_info);
479 this.access_state(vtl.into())
480 .set_registers(®isters)
481 .expect("Resetting to architectural state should succeed");
482
483 let debug_registers =
484 vp::DebugRegisters::at_reset(&this.partition.caps, &this.inner.vp_info);
485
486 this.access_state(vtl.into())
487 .set_debug_regs(&debug_registers)
488 .expect("Resetting to architectural state should succeed");
489
490 let xcr0 = vp::Xcr0::at_reset(&this.partition.caps, &this.inner.vp_info);
491 this.access_state(vtl.into())
492 .set_xcr(&xcr0)
493 .expect("Resetting to architectural state should succeed");
494
495 let cache_control = vp::Mtrrs::at_reset(&this.partition.caps, &this.inner.vp_info);
496 this.access_state(vtl.into())
497 .set_mtrrs(&cache_control)
498 .expect("Resetting to architectural state should succeed");
499 }
500
501 let pfns = &this.backing.cvm.direct_overlay_handle.pfns();
504 let values: &[(HvX64RegisterName, u64); 3] = &[
505 (
506 HvX64RegisterName::Sipp,
507 hvdef::HvSynicSimpSiefp::new()
508 .with_enabled(true)
509 .with_base_gpn(pfns[UhDirectOverlay::Sipp as usize])
510 .into(),
511 ),
512 (
513 HvX64RegisterName::Sifp,
514 hvdef::HvSynicSimpSiefp::new()
515 .with_enabled(true)
516 .with_base_gpn(pfns[UhDirectOverlay::Sifp as usize])
517 .into(),
518 ),
519 (
520 HvX64RegisterName::Ghcb,
521 x86defs::snp::GhcbMsr::new()
522 .with_info(x86defs::snp::GhcbInfo::REGISTER_REQUEST.0)
523 .with_pfn(pfns[UhDirectOverlay::Ghcb as usize])
524 .into(),
525 ),
526 ];
527
528 this.runner
529 .set_vp_registers_hvcall(Vtl::Vtl0, values)
530 .expect("set_vp_registers hypercall for direct overlays should succeed");
531 }
532
533 type StateAccess<'p, 'a>
534 = UhVpStateAccess<'a, 'p, Self>
535 where
536 Self: 'a + 'p,
537 'p: 'a;
538
539 fn access_vp_state<'a, 'p>(
540 this: &'a mut UhProcessor<'p, Self>,
541 vtl: GuestVtl,
542 ) -> Self::StateAccess<'p, 'a> {
543 UhVpStateAccess::new(this, vtl)
544 }
545
546 async fn run_vp(
547 this: &mut UhProcessor<'_, Self>,
548 dev: &impl CpuIo,
549 _stop: &mut virt::StopVp<'_>,
550 ) -> Result<(), VpHaltReason> {
551 this.run_vp_snp(dev).await
552 }
553
554 fn poll_apic(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl, scan_irr: bool) {
555 this.runner.vmsa_mut(vtl).v_intr_cntrl_mut().set_irq(false);
557
558 hardware_cvm::apic::poll_apic_core(this, vtl, scan_irr)
559 }
560
561 fn request_extint_readiness(_this: &mut UhProcessor<'_, Self>) {
562 unreachable!("extint managed through software apic")
563 }
564
565 fn request_untrusted_sint_readiness(this: &mut UhProcessor<'_, Self>, sints: u16) {
566 let sints = this.backing.hv_sint_notifications | sints;
567 if this.backing.hv_sint_notifications == sints {
568 return;
569 }
570 let notifications = HvDeliverabilityNotificationsRegister::new().with_sints(sints);
571 tracing::trace!(?notifications, "setting notifications");
572 this.runner
573 .set_vp_register(
574 GuestVtl::Vtl0,
575 HvX64RegisterName::DeliverabilityNotifications,
576 u64::from(notifications).into(),
577 )
578 .expect("requesting deliverability is not a fallable operation");
579
580 this.backing.hv_sint_notifications = sints;
581 }
582
583 fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) {
584 let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0);
585 let vtl1_vmsa = if this.backing.cvm_state().vtl1.is_some() {
586 Some(this.runner.vmsa(GuestVtl::Vtl1))
587 } else {
588 None
589 };
590
591 let add_vmsa_inspect = |req: inspect::Request<'_>, vmsa: VmsaWrapper<'_, &SevVmsa>| {
592 req.respond()
593 .hex("guest_error_code", vmsa.guest_error_code())
594 .hex("exit_info1", vmsa.exit_info1())
595 .hex("exit_info2", vmsa.exit_info2())
596 .hex("v_intr_cntrl", u64::from(vmsa.v_intr_cntrl()));
597 };
598
599 resp.child("vmsa_additional", |req| {
600 req.respond()
601 .child("vtl0", |inner_req| add_vmsa_inspect(inner_req, vtl0_vmsa))
602 .child("vtl1", |inner_req| {
603 if let Some(vtl1_vmsa) = vtl1_vmsa {
604 add_vmsa_inspect(inner_req, vtl1_vmsa);
605 }
606 });
607 });
608 }
609
610 fn hv(&self, vtl: GuestVtl) -> Option<&ProcessorVtlHv> {
611 Some(&self.cvm.hv[vtl])
612 }
613
614 fn hv_mut(&mut self, vtl: GuestVtl) -> Option<&mut ProcessorVtlHv> {
615 Some(&mut self.cvm.hv[vtl])
616 }
617
618 fn handle_vp_start_enable_vtl_wake(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl) {
619 this.hcvm_handle_vp_start_enable_vtl(vtl)
620 }
621
622 fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
623 this.hcvm_vtl1_inspectable()
624 }
625
626 fn process_interrupts(
627 this: &mut UhProcessor<'_, Self>,
628 scan_irr: VtlArray<bool, 2>,
629 first_scan_irr: &mut bool,
630 dev: &impl CpuIo,
631 ) -> bool {
632 this.cvm_process_interrupts(scan_irr, first_scan_irr, dev)
633 }
634}
635
636fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
637 SevSelector {
638 selector: val.selector,
639 attrib: (val.attributes & 0xFF) | ((val.attributes >> 4) & 0xF00),
640 limit: val.limit,
641 base: val.base,
642 }
643}
644
645fn virt_table_to_snp(val: TableRegister) -> SevSelector {
646 SevSelector {
647 limit: val.limit as u32,
648 base: val.base,
649 ..FromZeros::new_zeroed()
650 }
651}
652
653fn virt_seg_from_snp(selector: SevSelector) -> SegmentRegister {
654 SegmentRegister {
655 base: selector.base,
656 limit: selector.limit,
657 selector: selector.selector,
658 attributes: (selector.attrib & 0xFF) | ((selector.attrib & 0xF00) << 4),
659 }
660}
661
662fn virt_table_from_snp(selector: SevSelector) -> TableRegister {
663 TableRegister {
664 limit: selector.limit as u16,
665 base: selector.base,
666 }
667}
668
669fn init_vmsa(
670 vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>,
671 vtl: GuestVtl,
672 vtom: Option<u64>,
673 sev_status: SevStatusMsr,
674) {
675 vmsa.reset(sev_status.vmsa_reg_prot());
679 vmsa.sev_features_mut()
680 .set_snp_btb_isolation(sev_status.snp_btb_isolation());
681 vmsa.sev_features_mut()
682 .set_ibpb_on_entry(sev_status.ibpb_on_entry());
683 vmsa.sev_features_mut()
684 .set_prevent_host_ibs(sev_status.prevent_host_ibs());
685 vmsa.sev_features_mut()
686 .set_vmsa_reg_prot(sev_status.vmsa_reg_prot());
687 vmsa.sev_features_mut().set_snp(true);
688 vmsa.sev_features_mut().set_vtom(vtom.is_some());
689 vmsa.set_virtual_tom(vtom.unwrap_or(0));
690
691 vmsa.sev_features_mut().set_alternate_injection(true);
694 vmsa.sev_features_mut().set_reflect_vc(true);
695 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
696 vmsa.sev_features_mut().set_debug_swap(true);
697
698 let vmpl = match vtl {
699 GuestVtl::Vtl0 => Vmpl::Vmpl2,
700 GuestVtl::Vtl1 => Vmpl::Vmpl1,
701 };
702 vmsa.set_vmpl(vmpl.into());
703
704 vmsa.set_guest_error_code(SevExitCode::INTR.0);
707
708 vmsa.set_efer(x86defs::X64_EFER_SVME);
711}
712
713struct SnpApicClient<'a, T> {
714 partition: &'a UhPartitionInner,
715 vmsa: VmsaWrapper<'a, &'a mut SevVmsa>,
716 dev: &'a T,
717 vmtime: &'a VmTimeAccess,
718 vtl: GuestVtl,
719}
720
721impl<T: CpuIo> ApicClient for SnpApicClient<'_, T> {
722 fn cr8(&mut self) -> u32 {
723 self.vmsa.v_intr_cntrl().tpr().into()
724 }
725
726 fn set_cr8(&mut self, value: u32) {
727 self.vmsa.v_intr_cntrl_mut().set_tpr(value as u8);
728 }
729
730 fn set_apic_base(&mut self, _value: u64) {
731 }
733
734 fn wake(&mut self, vp_index: VpIndex) {
735 self.partition.vps[vp_index.index() as usize].wake(self.vtl, WakeReason::INTCON);
736 }
737
738 fn eoi(&mut self, vector: u8) {
739 debug_assert_eq!(self.vtl, GuestVtl::Vtl0);
740 self.dev.handle_eoi(vector.into())
741 }
742
743 fn now(&mut self) -> vmcore::vmtime::VmTime {
744 self.vmtime.now()
745 }
746
747 fn pull_offload(&mut self) -> ([u32; 8], [u32; 8]) {
748 unreachable!()
749 }
750}
751
752impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
753 const TRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
755 Self,
756 [
757 hv1_hypercall::HvModifySparseGpaPageHostVisibility,
758 hv1_hypercall::HvQuerySparseGpaPageHostVisibility,
759 hv1_hypercall::HvX64StartVirtualProcessor,
760 hv1_hypercall::HvGetVpIndexFromApicId,
761 hv1_hypercall::HvGetVpRegisters,
762 hv1_hypercall::HvEnablePartitionVtl,
763 hv1_hypercall::HvRetargetDeviceInterrupt,
764 hv1_hypercall::HvPostMessage,
765 hv1_hypercall::HvSignalEvent,
766 hv1_hypercall::HvX64EnableVpVtl,
767 hv1_hypercall::HvExtQueryCapabilities,
768 hv1_hypercall::HvVtlCall,
769 hv1_hypercall::HvVtlReturn,
770 hv1_hypercall::HvFlushVirtualAddressList,
771 hv1_hypercall::HvFlushVirtualAddressListEx,
772 hv1_hypercall::HvFlushVirtualAddressSpace,
773 hv1_hypercall::HvFlushVirtualAddressSpaceEx,
774 hv1_hypercall::HvSetVpRegisters,
775 hv1_hypercall::HvModifyVtlProtectionMask,
776 hv1_hypercall::HvX64TranslateVirtualAddress,
777 hv1_hypercall::HvSendSyntheticClusterIpi,
778 hv1_hypercall::HvSendSyntheticClusterIpiEx,
779 hv1_hypercall::HvInstallIntercept,
780 hv1_hypercall::HvAssertVirtualInterrupt,
781 ],
782 );
783
784 const UNTRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
787 Self,
788 [hv1_hypercall::HvPostMessage, hv1_hypercall::HvSignalEvent],
789 );
790}
791
792struct GhcbEnlightenedHypercall<'a, 'b, T> {
793 handler: UhHypercallHandler<'a, 'b, T, SnpBacked>,
794 control: u64,
795 output_gpa: u64,
796 input_gpa: u64,
797 result: u64,
798}
799
800impl<'a, 'b, T> hv1_hypercall::AsHandler<UhHypercallHandler<'a, 'b, T, SnpBacked>>
801 for &mut GhcbEnlightenedHypercall<'a, 'b, T>
802{
803 fn as_handler(&mut self) -> &mut UhHypercallHandler<'a, 'b, T, SnpBacked> {
804 &mut self.handler
805 }
806}
807
808impl<T> HypercallIo for GhcbEnlightenedHypercall<'_, '_, T> {
809 fn advance_ip(&mut self) {
810 }
812
813 fn retry(&mut self, control: u64) {
814 let control = Control::from(control);
823 self.set_result(
824 HypercallOutput::from(HvError::Timeout)
825 .with_elements_processed(control.rep_start())
826 .into(),
827 );
828 }
829
830 fn control(&mut self) -> u64 {
831 self.control
832 }
833
834 fn input_gpa(&mut self) -> u64 {
835 self.input_gpa
836 }
837
838 fn output_gpa(&mut self) -> u64 {
839 self.output_gpa
840 }
841
842 fn fast_register_pair_count(&mut self) -> usize {
843 0
844 }
845
846 fn extended_fast_hypercalls_ok(&mut self) -> bool {
847 false
848 }
849
850 fn fast_input(&mut self, _buf: &mut [[u64; 2]], _output_register_pairs: usize) -> usize {
851 unimplemented!("not supported for secure enlightened abi")
852 }
853
854 fn fast_output(&mut self, _starting_pair_index: usize, _buf: &[[u64; 2]]) {
855 unimplemented!("not supported for secure enlightened abi")
856 }
857
858 fn vtl_input(&mut self) -> u64 {
859 unimplemented!("not supported for secure enlightened abi")
860 }
861
862 fn set_result(&mut self, n: u64) {
863 self.result = n;
864 }
865
866 fn fast_regs(&mut self, _starting_pair_index: usize, _buf: &mut [[u64; 2]]) {
867 unimplemented!("not supported for secure enlightened abi")
868 }
869}
870
871impl<'b> ApicBacking<'b, SnpBacked> for UhProcessor<'b, SnpBacked> {
872 fn vp(&mut self) -> &mut UhProcessor<'b, SnpBacked> {
873 self
874 }
875
876 fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8) {
877 let mut vmsa = self.runner.vmsa_mut(vtl);
878 vmsa.v_intr_cntrl_mut().set_vector(vector);
879 vmsa.v_intr_cntrl_mut().set_priority((vector >> 4).into());
880 vmsa.v_intr_cntrl_mut().set_ignore_tpr(false);
881 vmsa.v_intr_cntrl_mut().set_irq(true);
882 self.backing.cvm.lapics[vtl].activity = MpState::Running;
883 }
884
885 fn handle_nmi(&mut self, vtl: GuestVtl) {
886 let mut vmsa = self.runner.vmsa_mut(vtl);
890
891 vmsa.set_event_inject(
895 SevEventInjectInfo::new()
896 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_NMI)
897 .with_vector(2)
898 .with_valid(true),
899 );
900 self.backing.cvm.lapics[vtl].nmi_pending = false;
901 self.backing.cvm.lapics[vtl].activity = MpState::Running;
902 }
903
904 fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister) {
905 let mut vmsa = self.runner.vmsa_mut(vtl);
906 vmsa.set_cs(virt_seg_to_snp(cs));
907 vmsa.set_rip(0);
908 self.backing.cvm.lapics[vtl].activity = MpState::Running;
909 }
910}
911
912impl UhProcessor<'_, SnpBacked> {
913 fn handle_synic_deliverable_exit(&mut self) {
914 let message = self
915 .runner
916 .exit_message()
917 .as_message::<hvdef::HvX64SynicSintDeliverableMessage>();
918
919 tracing::trace!(
920 deliverable_sints = message.deliverable_sints,
921 "sint deliverable"
922 );
923
924 self.backing.hv_sint_notifications &= !message.deliverable_sints;
925
926 self.deliver_synic_messages(GuestVtl::Vtl0, message.deliverable_sints);
928 }
929
930 fn handle_vmgexit(
931 &mut self,
932 dev: &impl CpuIo,
933 intercepted_vtl: GuestVtl,
934 ) -> Result<(), SnpGhcbError> {
935 let message = self
936 .runner
937 .exit_message()
938 .as_message::<hvdef::HvX64VmgexitInterceptMessage>();
939
940 let ghcb_msr = x86defs::snp::GhcbMsr::from(message.ghcb_msr);
941 tracing::trace!(?ghcb_msr, "vmgexit intercept");
942
943 match x86defs::snp::GhcbInfo(ghcb_msr.info()) {
944 x86defs::snp::GhcbInfo::NORMAL => {
945 assert!(message.flags.ghcb_page_valid());
946 let ghcb_pfn = ghcb_msr.pfn();
947
948 let ghcb_overlay =
949 self.backing.cvm.direct_overlay_handle.pfns()[UhDirectOverlay::Ghcb as usize];
950
951 if ghcb_pfn != ghcb_overlay {
953 tracelimit::warn_ratelimited!(
954 CVM_ALLOWED,
955 vmgexit_pfn = ghcb_pfn,
956 overlay_pfn = ghcb_overlay,
957 "ghcb page used for vmgexit does not match overlay page"
958 );
959
960 return Err(SnpGhcbError::GhcbMisconfiguration);
961 }
962
963 match x86defs::snp::GhcbUsage(message.ghcb_page.ghcb_usage) {
964 x86defs::snp::GhcbUsage::HYPERCALL => {
965 let guest_memory = &self.shared.cvm.shared_memory;
966 let overlay_base = ghcb_overlay * HV_PAGE_SIZE;
969 let x86defs::snp::GhcbHypercallParameters {
970 output_gpa,
971 input_control,
972 } = guest_memory
973 .read_plain(
974 overlay_base
975 + x86defs::snp::GHCB_PAGE_HYPERCALL_PARAMETERS_OFFSET as u64,
976 )
977 .map_err(SnpGhcbError::GhcbPageAccess)?;
978
979 let mut handler = GhcbEnlightenedHypercall {
980 handler: UhHypercallHandler {
981 vp: self,
982 bus: dev,
983 trusted: false,
984 intercepted_vtl,
985 },
986 control: input_control,
987 output_gpa,
988 input_gpa: overlay_base,
989 result: 0,
990 };
991
992 UhHypercallHandler::UNTRUSTED_DISPATCHER
993 .dispatch(guest_memory, &mut handler);
994
995 guest_memory
1003 .write_at(
1004 overlay_base
1005 + x86defs::snp::GHCB_PAGE_HYPERCALL_OUTPUT_OFFSET as u64,
1006 handler.result.as_bytes(),
1007 )
1008 .map_err(SnpGhcbError::GhcbPageAccess)?;
1009 }
1010 usage => unimplemented!("ghcb usage {usage:?}"),
1011 }
1012 }
1013 info => unimplemented!("ghcb info {info:?}"),
1014 }
1015
1016 Ok(())
1017 }
1018
1019 fn handle_msr_access(
1020 &mut self,
1021 dev: &impl CpuIo,
1022 entered_from_vtl: GuestVtl,
1023 msr: u32,
1024 is_write: bool,
1025 ) {
1026 if is_write && self.cvm_try_protect_msr_write(entered_from_vtl, msr) {
1027 return;
1028 }
1029
1030 let vmsa = self.runner.vmsa_mut(entered_from_vtl);
1031 let gp = if is_write {
1032 let value = (vmsa.rax() as u32 as u64) | ((vmsa.rdx() as u32 as u64) << 32);
1033
1034 let r = self.backing.cvm.lapics[entered_from_vtl]
1035 .lapic
1036 .access(&mut SnpApicClient {
1037 partition: self.partition,
1038 vmsa,
1039 dev,
1040 vmtime: &self.vmtime,
1041 vtl: entered_from_vtl,
1042 })
1043 .msr_write(msr, value)
1044 .or_else_if_unknown(|| self.write_msr_cvm(msr, value, entered_from_vtl))
1045 .or_else_if_unknown(|| self.write_msr_snp(dev, msr, value, entered_from_vtl));
1046
1047 match r {
1048 Ok(()) => false,
1049 Err(MsrError::Unknown) => {
1050 tracing::debug!(msr, value, "unknown cvm msr write");
1051 false
1052 }
1053 Err(MsrError::InvalidAccess) => true,
1054 }
1055 } else {
1056 let r = self.backing.cvm.lapics[entered_from_vtl]
1057 .lapic
1058 .access(&mut SnpApicClient {
1059 partition: self.partition,
1060 vmsa,
1061 dev,
1062 vmtime: &self.vmtime,
1063 vtl: entered_from_vtl,
1064 })
1065 .msr_read(msr)
1066 .or_else_if_unknown(|| self.read_msr_cvm(msr, entered_from_vtl))
1067 .or_else_if_unknown(|| self.read_msr_snp(dev, msr, entered_from_vtl));
1068
1069 let value = match r {
1070 Ok(v) => Some(v),
1071 Err(MsrError::Unknown) => {
1072 tracing::debug!(msr, "unknown cvm msr read");
1073 Some(0)
1074 }
1075 Err(MsrError::InvalidAccess) => None,
1076 };
1077
1078 if let Some(value) = value {
1079 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1080 vmsa.set_rax((value as u32).into());
1081 vmsa.set_rdx(((value >> 32) as u32).into());
1082 false
1083 } else {
1084 true
1085 }
1086 };
1087
1088 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1089 if gp {
1090 vmsa.set_event_inject(
1091 SevEventInjectInfo::new()
1092 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1093 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1094 .with_deliver_error_code(true)
1095 .with_valid(true),
1096 );
1097 } else {
1098 advance_to_next_instruction(&mut vmsa);
1099 }
1100 }
1101
1102 fn handle_xsetbv(&mut self, entered_from_vtl: GuestVtl) {
1103 let vmsa = self.runner.vmsa(entered_from_vtl);
1104 if let Some(value) = hardware_cvm::validate_xsetbv_exit(hardware_cvm::XsetbvExitInput {
1105 rax: vmsa.rax(),
1106 rcx: vmsa.rcx(),
1107 rdx: vmsa.rdx(),
1108 cr4: vmsa.cr4(),
1109 cpl: vmsa.cpl(),
1110 }) {
1111 if !self.cvm_try_protect_secure_register_write(
1112 entered_from_vtl,
1113 HvX64RegisterName::Xfem,
1114 value,
1115 ) {
1116 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1117 vmsa.set_xcr0(value);
1118 advance_to_next_instruction(&mut vmsa);
1119 }
1120 } else {
1121 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1122 vmsa.set_event_inject(
1123 SevEventInjectInfo::new()
1124 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1125 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1126 .with_deliver_error_code(true)
1127 .with_valid(true),
1128 );
1129 }
1130 }
1131
1132 fn handle_crx_intercept(&mut self, entered_from_vtl: GuestVtl, reg: HvX64RegisterName) {
1133 let vmsa = self.runner.vmsa(entered_from_vtl);
1134 let mov_crx_drx = x86defs::snp::MovCrxDrxInfo::from(vmsa.exit_info1());
1135 let reg_value = {
1136 let gpr_name =
1137 HvX64RegisterName(HvX64RegisterName::Rax.0 + mov_crx_drx.gpr_number() as u32);
1138
1139 match gpr_name {
1140 HvX64RegisterName::Rax => vmsa.rax(),
1141 HvX64RegisterName::Rbx => vmsa.rbx(),
1142 HvX64RegisterName::Rcx => vmsa.rcx(),
1143 HvX64RegisterName::Rdx => vmsa.rdx(),
1144 HvX64RegisterName::Rsp => vmsa.rsp(),
1145 HvX64RegisterName::Rbp => vmsa.rbp(),
1146 HvX64RegisterName::Rsi => vmsa.rsi(),
1147 HvX64RegisterName::Rdi => vmsa.rdi(),
1148 HvX64RegisterName::R8 => vmsa.r8(),
1149 HvX64RegisterName::R9 => vmsa.r9(),
1150 HvX64RegisterName::R10 => vmsa.r10(),
1151 HvX64RegisterName::R11 => vmsa.r11(),
1152 HvX64RegisterName::R12 => vmsa.r12(),
1153 HvX64RegisterName::R13 => vmsa.r13(),
1154 HvX64RegisterName::R14 => vmsa.r14(),
1155 HvX64RegisterName::R15 => vmsa.r15(),
1156 _ => unreachable!("unexpected register"),
1157 }
1158 };
1159
1160 if !mov_crx_drx.mov_crx() {
1167 tracelimit::warn_ratelimited!(
1168 CVM_ALLOWED,
1169 "Intercepted crx access, instruction is not mov crx"
1170 );
1171 return;
1172 }
1173
1174 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, reg_value) {
1175 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1176 match reg {
1177 HvX64RegisterName::Cr0 => vmsa.set_cr0(reg_value),
1178 HvX64RegisterName::Cr4 => vmsa.set_cr4(reg_value),
1179 _ => unreachable!(),
1180 }
1181 advance_to_next_instruction(&mut vmsa);
1182 }
1183 }
1184
1185 #[must_use]
1186 fn sync_lazy_eoi(&mut self, vtl: GuestVtl) -> bool {
1187 if self.backing.cvm.lapics[vtl].lapic.is_lazy_eoi_pending() {
1188 return self.backing.cvm.hv[vtl].set_lazy_eoi();
1189 }
1190
1191 false
1192 }
1193
1194 async fn run_vp_snp(&mut self, dev: &impl CpuIo) -> Result<(), VpHaltReason> {
1195 let next_vtl = self.backing.cvm.exit_vtl;
1196
1197 let mut vmsa = self.runner.vmsa_mut(next_vtl);
1198 let last_interrupt_ctrl = vmsa.v_intr_cntrl();
1199
1200 if vmsa.sev_features().alternate_injection() {
1201 vmsa.v_intr_cntrl_mut().set_guest_busy(false);
1202 }
1203
1204 self.unlock_tlb_lock(Vtl::Vtl2);
1205 let tlb_halt = self.should_halt_for_tlb_unlock(next_vtl);
1206
1207 let halt = self.backing.cvm.lapics[next_vtl].activity != MpState::Running || tlb_halt;
1208
1209 if halt && next_vtl == GuestVtl::Vtl1 && !tlb_halt {
1210 tracelimit::warn_ratelimited!(CVM_ALLOWED, "halting VTL 1, which might halt the guest");
1211 }
1212
1213 self.runner.set_halted(halt);
1214
1215 self.runner.set_exit_vtl(next_vtl);
1216
1217 let lazy_eoi = self.sync_lazy_eoi(next_vtl);
1219
1220 let mut has_intercept = self
1221 .runner
1222 .run()
1223 .map_err(|e| dev.fatal_error(SnpRunVpError(e).into()))?;
1224
1225 let entered_from_vtl = next_vtl;
1226 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1227
1228 let inject = if vmsa.sev_features().alternate_injection() {
1230 if vmsa.v_intr_cntrl().guest_busy() {
1231 self.backing.general_stats[entered_from_vtl]
1232 .guest_busy
1233 .increment();
1234 let exit_int_info = SevEventInjectInfo::from(vmsa.exit_int_info());
1242 assert!(
1243 exit_int_info.valid(),
1244 "event inject info should be valid {exit_int_info:x?}"
1245 );
1246
1247 match exit_int_info.interruption_type() {
1248 x86defs::snp::SEV_INTR_TYPE_EXCEPT => {
1249 if exit_int_info.vector() != 3 && exit_int_info.vector() != 4 {
1250 Some(exit_int_info)
1252 } else {
1253 None
1254 }
1255 }
1256 x86defs::snp::SEV_INTR_TYPE_SW => None,
1257 _ => Some(exit_int_info),
1258 }
1259 } else {
1260 None
1261 }
1262 } else {
1263 unimplemented!("Only alternate injection is supported for SNP")
1264 };
1265
1266 if let Some(inject) = inject {
1267 vmsa.set_event_inject(inject);
1268 }
1269 if vmsa.sev_features().alternate_injection() {
1270 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
1271 }
1272
1273 if last_interrupt_ctrl.irq() && !vmsa.v_intr_cntrl().irq() {
1274 self.backing.general_stats[entered_from_vtl]
1275 .int_ack
1276 .increment();
1277 self.backing.cvm.lapics[entered_from_vtl]
1279 .lapic
1280 .acknowledge_interrupt(last_interrupt_ctrl.vector());
1281 }
1282
1283 vmsa.v_intr_cntrl_mut().set_irq(false);
1284
1285 if lazy_eoi && self.backing.cvm.hv[entered_from_vtl].clear_lazy_eoi() {
1287 self.backing.cvm.lapics[entered_from_vtl]
1288 .lapic
1289 .access(&mut SnpApicClient {
1290 partition: self.partition,
1291 vmsa,
1292 dev,
1293 vmtime: &self.vmtime,
1294 vtl: entered_from_vtl,
1295 })
1296 .lazy_eoi();
1297 }
1298
1299 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1300 let sev_error_code = SevExitCode(vmsa.guest_error_code());
1301
1302 let stat = match sev_error_code {
1303 SevExitCode::CPUID => {
1304 self.handle_cpuid(entered_from_vtl);
1305 &mut self.backing.exit_stats[entered_from_vtl].cpuid
1306 }
1307
1308 SevExitCode::MSR => {
1309 let is_write = vmsa.exit_info1() & 1 != 0;
1310 let msr = vmsa.rcx() as u32;
1311
1312 self.handle_msr_access(dev, entered_from_vtl, msr, is_write);
1313
1314 if is_write {
1315 &mut self.backing.exit_stats[entered_from_vtl].msr_write
1316 } else {
1317 &mut self.backing.exit_stats[entered_from_vtl].msr_read
1318 }
1319 }
1320
1321 SevExitCode::IOIO => {
1322 let io_info =
1323 SevIoAccessInfo::from(self.runner.vmsa(entered_from_vtl).exit_info1() as u32);
1324
1325 let access_size = if io_info.access_size32() {
1326 4
1327 } else if io_info.access_size16() {
1328 2
1329 } else {
1330 1
1331 };
1332
1333 let port_access_protected = self.cvm_try_protect_io_port_access(
1334 entered_from_vtl,
1335 io_info.port(),
1336 io_info.read_access(),
1337 access_size,
1338 io_info.string_access(),
1339 io_info.rep_access(),
1340 );
1341
1342 let vmsa = self.runner.vmsa(entered_from_vtl);
1343 if !port_access_protected {
1344 if io_info.string_access() || io_info.rep_access() {
1345 let interruption_pending = vmsa.event_inject().valid()
1346 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1347
1348 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1353 .await?;
1354 } else {
1355 let mut rax = vmsa.rax();
1356 emulate_io(
1357 self.inner.vp_info.base.vp_index,
1358 !io_info.read_access(),
1359 io_info.port(),
1360 &mut rax,
1361 access_size,
1362 dev,
1363 )
1364 .await;
1365
1366 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1367 vmsa.set_rax(rax);
1368 advance_to_next_instruction(&mut vmsa);
1369 }
1370 }
1371 &mut self.backing.exit_stats[entered_from_vtl].ioio
1372 }
1373
1374 SevExitCode::VMMCALL => {
1375 let is_64bit = self.long_mode(entered_from_vtl);
1376 let guest_memory = &self.partition.gm[entered_from_vtl];
1377 let handler = UhHypercallHandler {
1378 trusted: !self.cvm_partition().hide_isolation,
1379 vp: &mut *self,
1380 bus: dev,
1381 intercepted_vtl: entered_from_vtl,
1382 };
1383
1384 UhHypercallHandler::TRUSTED_DISPATCHER.dispatch(
1387 guest_memory,
1388 hv1_hypercall::X64RegisterIo::new(handler, is_64bit),
1389 );
1390 &mut self.backing.exit_stats[entered_from_vtl].vmmcall
1391 }
1392
1393 SevExitCode::SHUTDOWN => {
1394 return Err(VpHaltReason::TripleFault {
1395 vtl: entered_from_vtl.into(),
1396 });
1397 }
1398
1399 SevExitCode::WBINVD | SevExitCode::INVD => {
1400 advance_to_next_instruction(&mut vmsa);
1404 &mut self.backing.exit_stats[entered_from_vtl].invd
1405 }
1406
1407 SevExitCode::NPF if has_intercept => {
1408 let gpa = vmsa.exit_info2();
1427 let interruption_pending = vmsa.event_inject().valid()
1428 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1429 let exit_info = SevNpfInfo::from(vmsa.exit_info1());
1430 let exit_message = self.runner.exit_message();
1431 let real = match exit_message.header.typ {
1432 HvMessageType::HvMessageTypeExceptionIntercept => {
1433 let exception_message =
1434 exit_message.as_message::<hvdef::HvX64ExceptionInterceptMessage>();
1435
1436 exception_message.vector
1437 == x86defs::Exception::SEV_VMM_COMMUNICATION.0 as u16
1438 }
1439 HvMessageType::HvMessageTypeUnmappedGpa
1440 | HvMessageType::HvMessageTypeGpaIntercept
1441 | HvMessageType::HvMessageTypeUnacceptedGpa => {
1442 let gpa_message =
1443 exit_message.as_message::<hvdef::HvX64MemoryInterceptMessage>();
1444
1445 (gpa_message.guest_physical_address >> hvdef::HV_PAGE_SHIFT)
1447 == (gpa >> hvdef::HV_PAGE_SHIFT)
1448 }
1449 _ => false,
1450 };
1451
1452 if real {
1453 has_intercept = false;
1454 if self.check_mem_fault(entered_from_vtl, gpa, exit_info.is_write(), exit_info)
1455 {
1456 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1457 .await?;
1458 }
1459 &mut self.backing.exit_stats[entered_from_vtl].npf
1460 } else {
1461 &mut self.backing.exit_stats[entered_from_vtl].npf_spurious
1462 }
1463 }
1464
1465 SevExitCode::NPF => &mut self.backing.exit_stats[entered_from_vtl].npf_no_intercept,
1466
1467 SevExitCode::HLT => {
1468 self.backing.cvm.lapics[entered_from_vtl].activity = MpState::Halted;
1469 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
1471 &mut self.backing.exit_stats[entered_from_vtl].hlt
1472 }
1473
1474 SevExitCode::INVALID_VMCB => {
1475 return Err(dev.fatal_error(InvalidVmcb.into()));
1476 }
1477
1478 SevExitCode::INVLPGB | SevExitCode::ILLEGAL_INVLPGB => {
1479 vmsa.set_event_inject(
1480 SevEventInjectInfo::new()
1481 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1482 .with_vector(x86defs::Exception::INVALID_OPCODE.0)
1483 .with_valid(true),
1484 );
1485 &mut self.backing.exit_stats[entered_from_vtl].invlpgb
1486 }
1487
1488 SevExitCode::RDPMC => {
1489 let cr4 = vmsa.cr4();
1492 if ((vmsa.cpl() > 0) && (cr4 & x86defs::X64_CR4_PCE == 0))
1493 || (vmsa.rcx() as u32 >= 4)
1494 {
1495 vmsa.set_event_inject(
1496 SevEventInjectInfo::new()
1497 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1498 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1499 .with_deliver_error_code(true)
1500 .with_valid(true),
1501 );
1502 } else {
1503 vmsa.set_rax(0);
1504 vmsa.set_rdx(0);
1505 advance_to_next_instruction(&mut vmsa);
1506 }
1507 &mut self.backing.exit_stats[entered_from_vtl].rdpmc
1508 }
1509
1510 SevExitCode::VMGEXIT if has_intercept => {
1511 has_intercept = false;
1512 match self.runner.exit_message().header.typ {
1513 HvMessageType::HvMessageTypeX64SevVmgexitIntercept => {
1514 self.handle_vmgexit(dev, entered_from_vtl)
1515 .map_err(|e| dev.fatal_error(e.into()))?;
1516 }
1517 _ => has_intercept = true,
1518 }
1519 &mut self.backing.exit_stats[entered_from_vtl].vmgexit
1520 }
1521
1522 SevExitCode::NMI
1523 | SevExitCode::PAUSE
1524 | SevExitCode::SMI
1525 | SevExitCode::VMGEXIT
1526 | SevExitCode::BUSLOCK
1527 | SevExitCode::IDLE_HLT => {
1528 &mut self.backing.exit_stats[entered_from_vtl].automatic_exit
1530 }
1531
1532 SevExitCode::VINTR => {
1533 unimplemented!("SevExitCode::VINTR");
1539 }
1540
1541 SevExitCode::INTR => {
1542 &mut self.backing.exit_stats[entered_from_vtl].intr
1545 }
1546
1547 SevExitCode::XSETBV => {
1548 self.handle_xsetbv(entered_from_vtl);
1549 &mut self.backing.exit_stats[entered_from_vtl].xsetbv
1550 }
1551
1552 SevExitCode::EXCP_DB => &mut self.backing.exit_stats[entered_from_vtl].excp_db,
1553
1554 SevExitCode::CR0_WRITE => {
1555 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr0);
1556 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1557 }
1558 SevExitCode::CR4_WRITE => {
1559 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr4);
1560 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1561 }
1562
1563 tr_exit_code @ (SevExitCode::GDTR_WRITE
1564 | SevExitCode::IDTR_WRITE
1565 | SevExitCode::LDTR_WRITE
1566 | SevExitCode::TR_WRITE) => {
1567 let reg = match tr_exit_code {
1568 SevExitCode::GDTR_WRITE => HvX64RegisterName::Gdtr,
1569 SevExitCode::IDTR_WRITE => HvX64RegisterName::Idtr,
1570 SevExitCode::LDTR_WRITE => HvX64RegisterName::Ldtr,
1571 SevExitCode::TR_WRITE => HvX64RegisterName::Tr,
1572 _ => unreachable!(),
1573 };
1574
1575 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, 0) {
1576 panic!("unexpected secure register");
1583 }
1584
1585 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1586 }
1587
1588 _ => {
1589 tracing::error!(
1590 CVM_CONFIDENTIAL,
1591 "SEV exit code {sev_error_code:x?} sev features {:x?} v_intr_control {:x?} event inject {:x?} \
1592 vmpl {:x?} cpl {:x?} exit_info1 {:x?} exit_info2 {:x?} exit_int_info {:x?} virtual_tom {:x?} \
1593 efer {:x?} cr4 {:x?} cr3 {:x?} cr0 {:x?} rflag {:x?} rip {:x?} next rip {:x?}",
1594 vmsa.sev_features(),
1595 vmsa.v_intr_cntrl(),
1596 vmsa.event_inject(),
1597 vmsa.vmpl(),
1598 vmsa.cpl(),
1599 vmsa.exit_info1(),
1600 vmsa.exit_info2(),
1601 vmsa.exit_int_info(),
1602 vmsa.virtual_tom(),
1603 vmsa.efer(),
1604 vmsa.cr4(),
1605 vmsa.cr3(),
1606 vmsa.cr0(),
1607 vmsa.rflags(),
1608 vmsa.rip(),
1609 vmsa.next_rip(),
1610 );
1611 panic!("Received unexpected SEV exit code {sev_error_code:x?}");
1612 }
1613 };
1614 stat.increment();
1615
1616 if cfg!(feature = "gdb") && sev_error_code == SevExitCode::EXCP_DB {
1618 return self.handle_debug_exception(dev, entered_from_vtl);
1619 }
1620
1621 if has_intercept {
1625 self.backing.general_stats[entered_from_vtl]
1626 .synth_int
1627 .increment();
1628 match self.runner.exit_message().header.typ {
1629 HvMessageType::HvMessageTypeSynicSintDeliverable => {
1630 self.handle_synic_deliverable_exit();
1631 }
1632 HvMessageType::HvMessageTypeX64Halt
1633 | HvMessageType::HvMessageTypeExceptionIntercept => {
1634 }
1638 message_type => {
1639 tracelimit::error_ratelimited!(
1640 CVM_ALLOWED,
1641 ?message_type,
1642 "unknown synthetic exit"
1643 );
1644 }
1645 }
1646 }
1647
1648 self.runner
1657 .vmsa_mut(entered_from_vtl)
1658 .set_guest_error_code(SevExitCode::INTR.0);
1659 Ok(())
1660 }
1661
1662 fn long_mode(&self, vtl: GuestVtl) -> bool {
1663 let vmsa = self.runner.vmsa(vtl);
1664 vmsa.cr0() & x86defs::X64_CR0_PE != 0 && vmsa.efer() & x86defs::X64_EFER_LMA != 0
1665 }
1666
1667 fn handle_cpuid(&mut self, vtl: GuestVtl) {
1668 let vmsa = self.runner.vmsa(vtl);
1669 let leaf = vmsa.rax() as u32;
1670 let subleaf = vmsa.rcx() as u32;
1671 let [mut eax, mut ebx, mut ecx, mut edx] = self.cvm_cpuid_result(vtl, leaf, subleaf);
1672
1673 match CpuidFunction(leaf) {
1681 CpuidFunction::ProcessorTopologyDefinition => {
1682 let apic_id = self.inner.vp_info.apic_id;
1683 let vps_per_socket = self.cvm_partition().vps_per_socket;
1684 eax = x86defs::cpuid::ProcessorTopologyDefinitionEax::from(eax)
1685 .with_extended_apic_id(apic_id)
1686 .into();
1687
1688 let topology_ebx = x86defs::cpuid::ProcessorTopologyDefinitionEbx::from(ebx);
1689 let mut new_unit_id = apic_id & (vps_per_socket - 1);
1690
1691 if topology_ebx.threads_per_compute_unit() > 0 {
1692 new_unit_id /= 2;
1693 }
1694
1695 ebx = topology_ebx.with_compute_unit_id(new_unit_id as u8).into();
1696
1697 let amd_nodes_per_socket = 1u32;
1700
1701 let node_id = apic_id
1702 >> (vps_per_socket
1703 .trailing_zeros()
1704 .saturating_sub(amd_nodes_per_socket.trailing_zeros()));
1705 let nodes_per_processor = amd_nodes_per_socket - 1;
1707
1708 ecx = x86defs::cpuid::ProcessorTopologyDefinitionEcx::from(ecx)
1709 .with_node_id(node_id as u8)
1710 .with_nodes_per_processor(nodes_per_processor as u8)
1711 .into();
1712 }
1713 CpuidFunction::ExtendedSevFeatures => {
1714 eax = 0;
1718 ebx = 0;
1719 ecx = 0;
1720 edx = 0;
1721 }
1722 _ => {}
1723 }
1724
1725 let mut vmsa = self.runner.vmsa_mut(vtl);
1726 vmsa.set_rax(eax.into());
1727 vmsa.set_rbx(ebx.into());
1728 vmsa.set_rcx(ecx.into());
1729 vmsa.set_rdx(edx.into());
1730 advance_to_next_instruction(&mut vmsa);
1731 }
1732}
1733
1734impl<T: CpuIo> X86EmulatorSupport for UhEmulationState<'_, '_, T, SnpBacked> {
1735 fn flush(&mut self) {
1736 }
1738
1739 fn vp_index(&self) -> VpIndex {
1740 self.vp.vp_index()
1741 }
1742
1743 fn vendor(&self) -> x86defs::cpuid::Vendor {
1744 self.vp.partition.caps.vendor
1745 }
1746
1747 fn gp(&mut self, reg: x86emu::Gp) -> u64 {
1748 let vmsa = self.vp.runner.vmsa(self.vtl);
1749 match reg {
1750 x86emu::Gp::RAX => vmsa.rax(),
1751 x86emu::Gp::RCX => vmsa.rcx(),
1752 x86emu::Gp::RDX => vmsa.rdx(),
1753 x86emu::Gp::RBX => vmsa.rbx(),
1754 x86emu::Gp::RSP => vmsa.rsp(),
1755 x86emu::Gp::RBP => vmsa.rbp(),
1756 x86emu::Gp::RSI => vmsa.rsi(),
1757 x86emu::Gp::RDI => vmsa.rdi(),
1758 x86emu::Gp::R8 => vmsa.r8(),
1759 x86emu::Gp::R9 => vmsa.r9(),
1760 x86emu::Gp::R10 => vmsa.r10(),
1761 x86emu::Gp::R11 => vmsa.r11(),
1762 x86emu::Gp::R12 => vmsa.r12(),
1763 x86emu::Gp::R13 => vmsa.r13(),
1764 x86emu::Gp::R14 => vmsa.r14(),
1765 x86emu::Gp::R15 => vmsa.r15(),
1766 }
1767 }
1768
1769 fn set_gp(&mut self, reg: x86emu::Gp, v: u64) {
1770 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1771 match reg {
1772 x86emu::Gp::RAX => vmsa.set_rax(v),
1773 x86emu::Gp::RCX => vmsa.set_rcx(v),
1774 x86emu::Gp::RDX => vmsa.set_rdx(v),
1775 x86emu::Gp::RBX => vmsa.set_rbx(v),
1776 x86emu::Gp::RSP => vmsa.set_rsp(v),
1777 x86emu::Gp::RBP => vmsa.set_rbp(v),
1778 x86emu::Gp::RSI => vmsa.set_rsi(v),
1779 x86emu::Gp::RDI => vmsa.set_rdi(v),
1780 x86emu::Gp::R8 => vmsa.set_r8(v),
1781 x86emu::Gp::R9 => vmsa.set_r9(v),
1782 x86emu::Gp::R10 => vmsa.set_r10(v),
1783 x86emu::Gp::R11 => vmsa.set_r11(v),
1784 x86emu::Gp::R12 => vmsa.set_r12(v),
1785 x86emu::Gp::R13 => vmsa.set_r13(v),
1786 x86emu::Gp::R14 => vmsa.set_r14(v),
1787 x86emu::Gp::R15 => vmsa.set_r15(v),
1788 };
1789 }
1790
1791 fn xmm(&mut self, index: usize) -> u128 {
1792 self.vp.runner.vmsa_mut(self.vtl).xmm_registers(index)
1793 }
1794
1795 fn set_xmm(&mut self, index: usize, v: u128) {
1796 self.vp
1797 .runner
1798 .vmsa_mut(self.vtl)
1799 .set_xmm_registers(index, v);
1800 }
1801
1802 fn rip(&mut self) -> u64 {
1803 let vmsa = self.vp.runner.vmsa(self.vtl);
1804 vmsa.rip()
1805 }
1806
1807 fn set_rip(&mut self, v: u64) {
1808 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1809 vmsa.set_rip(v);
1810 }
1811
1812 fn segment(&mut self, index: x86emu::Segment) -> x86defs::SegmentRegister {
1813 let vmsa = self.vp.runner.vmsa(self.vtl);
1814 match index {
1815 x86emu::Segment::ES => virt_seg_from_snp(vmsa.es()),
1816 x86emu::Segment::CS => virt_seg_from_snp(vmsa.cs()),
1817 x86emu::Segment::SS => virt_seg_from_snp(vmsa.ss()),
1818 x86emu::Segment::DS => virt_seg_from_snp(vmsa.ds()),
1819 x86emu::Segment::FS => virt_seg_from_snp(vmsa.fs()),
1820 x86emu::Segment::GS => virt_seg_from_snp(vmsa.gs()),
1821 }
1822 .into()
1823 }
1824
1825 fn efer(&mut self) -> u64 {
1826 let vmsa = self.vp.runner.vmsa(self.vtl);
1827 vmsa.efer()
1828 }
1829
1830 fn cr0(&mut self) -> u64 {
1831 let vmsa = self.vp.runner.vmsa(self.vtl);
1832 vmsa.cr0()
1833 }
1834
1835 fn rflags(&mut self) -> RFlags {
1836 let vmsa = self.vp.runner.vmsa(self.vtl);
1837 vmsa.rflags().into()
1838 }
1839
1840 fn set_rflags(&mut self, v: RFlags) {
1841 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1842 vmsa.set_rflags(v.into());
1843 }
1844
1845 fn instruction_bytes(&self) -> &[u8] {
1846 &[]
1847 }
1848
1849 fn physical_address(&self) -> Option<u64> {
1850 Some(self.vp.runner.vmsa(self.vtl).exit_info2())
1851 }
1852
1853 fn initial_gva_translation(
1854 &mut self,
1855 ) -> Option<virt_support_x86emu::emulate::InitialTranslation> {
1856 None
1857 }
1858
1859 fn interruption_pending(&self) -> bool {
1860 self.interruption_pending
1861 }
1862
1863 fn check_vtl_access(
1864 &mut self,
1865 _gpa: u64,
1866 _mode: virt_support_x86emu::emulate::TranslateMode,
1867 ) -> Result<(), virt_support_x86emu::emulate::EmuCheckVtlAccessError> {
1868 Ok(())
1870 }
1871
1872 fn translate_gva(
1873 &mut self,
1874 gva: u64,
1875 mode: virt_support_x86emu::emulate::TranslateMode,
1876 ) -> Result<
1877 virt_support_x86emu::emulate::EmuTranslateResult,
1878 virt_support_x86emu::emulate::EmuTranslateError,
1879 > {
1880 emulate_translate_gva(self, gva, mode)
1881 }
1882
1883 fn inject_pending_event(&mut self, event_info: hvdef::HvX64PendingEvent) {
1884 assert!(event_info.reg_0.event_pending());
1885 assert_eq!(
1886 event_info.reg_0.event_type(),
1887 hvdef::HV_X64_PENDING_EVENT_EXCEPTION
1888 );
1889
1890 let exception = HvX64PendingExceptionEvent::from(event_info.reg_0.into_bits());
1891 assert!(!self.interruption_pending);
1892
1893 SnpBacked::set_pending_exception(self.vp, self.vtl, exception);
1896 }
1897
1898 fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
1899 let vtom = self.vp.partition.caps.vtom.unwrap();
1902 debug_assert!(vtom == 0 || vtom.is_power_of_two());
1903 self.vp.partition.is_gpa_mapped(gpa & !vtom, write)
1904 }
1905
1906 fn lapic_base_address(&self) -> Option<u64> {
1907 self.vp.backing.cvm.lapics[self.vtl].lapic.base_address()
1908 }
1909
1910 fn lapic_read(&mut self, address: u64, data: &mut [u8]) {
1911 let vtl = self.vtl;
1912 self.vp.backing.cvm.lapics[vtl]
1913 .lapic
1914 .access(&mut SnpApicClient {
1915 partition: self.vp.partition,
1916 vmsa: self.vp.runner.vmsa_mut(vtl),
1917 dev: self.devices,
1918 vmtime: &self.vp.vmtime,
1919 vtl,
1920 })
1921 .mmio_read(address, data);
1922 }
1923
1924 fn lapic_write(&mut self, address: u64, data: &[u8]) {
1925 let vtl = self.vtl;
1926 self.vp.backing.cvm.lapics[vtl]
1927 .lapic
1928 .access(&mut SnpApicClient {
1929 partition: self.vp.partition,
1930 vmsa: self.vp.runner.vmsa_mut(vtl),
1931 dev: self.devices,
1932 vmtime: &self.vp.vmtime,
1933 vtl,
1934 })
1935 .mmio_write(address, data);
1936 }
1937
1938 fn monitor_support(&self) -> Option<&dyn EmulatorMonitorSupport> {
1939 Some(self)
1940 }
1941}
1942
1943impl<T> hv1_hypercall::X64RegisterState for UhHypercallHandler<'_, '_, T, SnpBacked> {
1944 fn rip(&mut self) -> u64 {
1945 self.vp.runner.vmsa(self.intercepted_vtl).rip()
1946 }
1947
1948 fn set_rip(&mut self, rip: u64) {
1949 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_rip(rip);
1950 }
1951
1952 fn gp(&mut self, n: hv1_hypercall::X64HypercallRegister) -> u64 {
1953 let vmsa = self.vp.runner.vmsa(self.intercepted_vtl);
1954 match n {
1955 hv1_hypercall::X64HypercallRegister::Rax => vmsa.rax(),
1956 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.rcx(),
1957 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.rdx(),
1958 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.rbx(),
1959 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.rsi(),
1960 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.rdi(),
1961 hv1_hypercall::X64HypercallRegister::R8 => vmsa.r8(),
1962 }
1963 }
1964
1965 fn set_gp(&mut self, n: hv1_hypercall::X64HypercallRegister, value: u64) {
1966 let mut vmsa = self.vp.runner.vmsa_mut(self.intercepted_vtl);
1967 match n {
1968 hv1_hypercall::X64HypercallRegister::Rax => vmsa.set_rax(value),
1969 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.set_rcx(value),
1970 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.set_rdx(value),
1971 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.set_rbx(value),
1972 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.set_rsi(value),
1973 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.set_rdi(value),
1974 hv1_hypercall::X64HypercallRegister::R8 => vmsa.set_r8(value),
1975 }
1976 }
1977
1978 fn xmm(&mut self, n: usize) -> u128 {
1979 self.vp.runner.vmsa(self.intercepted_vtl).xmm_registers(n)
1980 }
1981
1982 fn set_xmm(&mut self, n: usize, value: u128) {
1983 self.vp
1984 .runner
1985 .vmsa_mut(self.intercepted_vtl)
1986 .set_xmm_registers(n, value);
1987 }
1988}
1989
1990impl AccessVpState for UhVpStateAccess<'_, '_, SnpBacked> {
1991 type Error = vp_state::Error;
1992
1993 fn caps(&self) -> &virt::x86::X86PartitionCapabilities {
1994 &self.vp.partition.caps
1995 }
1996
1997 fn commit(&mut self) -> Result<(), Self::Error> {
1998 Ok(())
1999 }
2000
2001 fn registers(&mut self) -> Result<vp::Registers, Self::Error> {
2002 let vmsa = self.vp.runner.vmsa(self.vtl);
2003
2004 Ok(vp::Registers {
2005 rax: vmsa.rax(),
2006 rcx: vmsa.rcx(),
2007 rdx: vmsa.rdx(),
2008 rbx: vmsa.rbx(),
2009 rsp: vmsa.rsp(),
2010 rbp: vmsa.rbp(),
2011 rsi: vmsa.rsi(),
2012 rdi: vmsa.rdi(),
2013 r8: vmsa.r8(),
2014 r9: vmsa.r9(),
2015 r10: vmsa.r10(),
2016 r11: vmsa.r11(),
2017 r12: vmsa.r12(),
2018 r13: vmsa.r13(),
2019 r14: vmsa.r14(),
2020 r15: vmsa.r15(),
2021 rip: vmsa.rip(),
2022 rflags: vmsa.rflags(),
2023 cs: virt_seg_from_snp(vmsa.cs()),
2024 ds: virt_seg_from_snp(vmsa.ds()),
2025 es: virt_seg_from_snp(vmsa.es()),
2026 fs: virt_seg_from_snp(vmsa.fs()),
2027 gs: virt_seg_from_snp(vmsa.gs()),
2028 ss: virt_seg_from_snp(vmsa.ss()),
2029 tr: virt_seg_from_snp(vmsa.tr()),
2030 ldtr: virt_seg_from_snp(vmsa.ldtr()),
2031 gdtr: virt_table_from_snp(vmsa.gdtr()),
2032 idtr: virt_table_from_snp(vmsa.idtr()),
2033 cr0: vmsa.cr0(),
2034 cr2: vmsa.cr2(),
2035 cr3: vmsa.cr3(),
2036 cr4: vmsa.cr4(),
2037 cr8: vmsa.v_intr_cntrl().tpr().into(),
2038 efer: vmsa.efer(),
2039 })
2040 }
2041
2042 fn set_registers(&mut self, value: &vp::Registers) -> Result<(), Self::Error> {
2043 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2044
2045 let vp::Registers {
2046 rax,
2047 rcx,
2048 rdx,
2049 rbx,
2050 rsp,
2051 rbp,
2052 rsi,
2053 rdi,
2054 r8,
2055 r9,
2056 r10,
2057 r11,
2058 r12,
2059 r13,
2060 r14,
2061 r15,
2062 rip,
2063 rflags,
2064 cs,
2065 ds,
2066 es,
2067 fs,
2068 gs,
2069 ss,
2070 tr,
2071 ldtr,
2072 gdtr,
2073 idtr,
2074 cr0,
2075 cr2,
2076 cr3,
2077 cr4,
2078 cr8,
2079 efer,
2080 } = *value;
2081 vmsa.set_rax(rax);
2082 vmsa.set_rcx(rcx);
2083 vmsa.set_rdx(rdx);
2084 vmsa.set_rbx(rbx);
2085 vmsa.set_rsp(rsp);
2086 vmsa.set_rbp(rbp);
2087 vmsa.set_rsi(rsi);
2088 vmsa.set_rdi(rdi);
2089 vmsa.set_r8(r8);
2090 vmsa.set_r9(r9);
2091 vmsa.set_r10(r10);
2092 vmsa.set_r11(r11);
2093 vmsa.set_r12(r12);
2094 vmsa.set_r13(r13);
2095 vmsa.set_r14(r14);
2096 vmsa.set_r15(r15);
2097 vmsa.set_rip(rip);
2098 vmsa.set_rflags(rflags);
2099 vmsa.set_cs(virt_seg_to_snp(cs));
2100 vmsa.set_ds(virt_seg_to_snp(ds));
2101 vmsa.set_es(virt_seg_to_snp(es));
2102 vmsa.set_fs(virt_seg_to_snp(fs));
2103 vmsa.set_gs(virt_seg_to_snp(gs));
2104 vmsa.set_ss(virt_seg_to_snp(ss));
2105 vmsa.set_tr(virt_seg_to_snp(tr));
2106 vmsa.set_ldtr(virt_seg_to_snp(ldtr));
2107 vmsa.set_gdtr(virt_table_to_snp(gdtr));
2108 vmsa.set_idtr(virt_table_to_snp(idtr));
2109 vmsa.set_cr0(cr0);
2110 vmsa.set_cr2(cr2);
2111 vmsa.set_cr3(cr3);
2112 vmsa.set_cr4(cr4);
2113 vmsa.v_intr_cntrl_mut().set_tpr(cr8 as u8);
2114 vmsa.set_efer(SnpBacked::calculate_efer(efer, cr0));
2115 Ok(())
2116 }
2117
2118 fn activity(&mut self) -> Result<vp::Activity, Self::Error> {
2119 let lapic = &self.vp.backing.cvm.lapics[self.vtl];
2120
2121 Ok(vp::Activity {
2122 mp_state: lapic.activity,
2123 nmi_pending: lapic.nmi_pending,
2124 nmi_masked: false, interrupt_shadow: false, pending_event: None, pending_interruption: None, })
2129 }
2130
2131 fn set_activity(&mut self, value: &vp::Activity) -> Result<(), Self::Error> {
2132 let &vp::Activity {
2133 mp_state,
2134 nmi_pending,
2135 nmi_masked: _, interrupt_shadow: _, pending_event: _, pending_interruption: _, } = value;
2140 let lapic = &mut self.vp.backing.cvm.lapics[self.vtl];
2141 lapic.activity = mp_state;
2142 lapic.nmi_pending = nmi_pending;
2143
2144 Ok(())
2145 }
2146
2147 fn xsave(&mut self) -> Result<vp::Xsave, Self::Error> {
2148 Err(vp_state::Error::Unimplemented("xsave"))
2149 }
2150
2151 fn set_xsave(&mut self, _value: &vp::Xsave) -> Result<(), Self::Error> {
2152 Err(vp_state::Error::Unimplemented("xsave"))
2153 }
2154
2155 fn apic(&mut self) -> Result<vp::Apic, Self::Error> {
2156 Ok(self.vp.backing.cvm.lapics[self.vtl].lapic.save())
2157 }
2158
2159 fn set_apic(&mut self, value: &vp::Apic) -> Result<(), Self::Error> {
2160 self.vp.backing.cvm.lapics[self.vtl]
2161 .lapic
2162 .restore(value)
2163 .map_err(vp_state::Error::InvalidApicBase)?;
2164 Ok(())
2165 }
2166
2167 fn xcr(&mut self) -> Result<vp::Xcr0, Self::Error> {
2168 let vmsa = self.vp.runner.vmsa(self.vtl);
2169 Ok(vp::Xcr0 { value: vmsa.xcr0() })
2170 }
2171
2172 fn set_xcr(&mut self, value: &vp::Xcr0) -> Result<(), Self::Error> {
2173 let vp::Xcr0 { value } = *value;
2174 self.vp.runner.vmsa_mut(self.vtl).set_xcr0(value);
2175 Ok(())
2176 }
2177
2178 fn xss(&mut self) -> Result<vp::Xss, Self::Error> {
2179 let vmsa = self.vp.runner.vmsa(self.vtl);
2180 Ok(vp::Xss { value: vmsa.xss() })
2181 }
2182
2183 fn set_xss(&mut self, value: &vp::Xss) -> Result<(), Self::Error> {
2184 let vp::Xss { value } = *value;
2185 self.vp.runner.vmsa_mut(self.vtl).set_xss(value);
2186 Ok(())
2187 }
2188
2189 fn mtrrs(&mut self) -> Result<vp::Mtrrs, Self::Error> {
2190 Ok(vp::Mtrrs {
2191 msr_mtrr_def_type: 0,
2192 fixed: [0; 11],
2193 variable: [0; 16],
2194 })
2195 }
2196
2197 fn set_mtrrs(&mut self, _value: &vp::Mtrrs) -> Result<(), Self::Error> {
2198 Ok(())
2199 }
2200
2201 fn pat(&mut self) -> Result<vp::Pat, Self::Error> {
2202 let vmsa = self.vp.runner.vmsa(self.vtl);
2203 Ok(vp::Pat { value: vmsa.pat() })
2204 }
2205
2206 fn set_pat(&mut self, value: &vp::Pat) -> Result<(), Self::Error> {
2207 let vp::Pat { value } = *value;
2208 self.vp.runner.vmsa_mut(self.vtl).set_pat(value);
2209 Ok(())
2210 }
2211
2212 fn virtual_msrs(&mut self) -> Result<vp::VirtualMsrs, Self::Error> {
2213 let vmsa = self.vp.runner.vmsa(self.vtl);
2214
2215 Ok(vp::VirtualMsrs {
2216 kernel_gs_base: vmsa.kernel_gs_base(),
2217 sysenter_cs: vmsa.sysenter_cs(),
2218 sysenter_eip: vmsa.sysenter_eip(),
2219 sysenter_esp: vmsa.sysenter_esp(),
2220 star: vmsa.star(),
2221 lstar: vmsa.lstar(),
2222 cstar: vmsa.cstar(),
2223 sfmask: vmsa.sfmask(),
2224 })
2225 }
2226
2227 fn set_virtual_msrs(&mut self, value: &vp::VirtualMsrs) -> Result<(), Self::Error> {
2228 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2229 let vp::VirtualMsrs {
2230 kernel_gs_base,
2231 sysenter_cs,
2232 sysenter_eip,
2233 sysenter_esp,
2234 star,
2235 lstar,
2236 cstar,
2237 sfmask,
2238 } = *value;
2239 vmsa.set_kernel_gs_base(kernel_gs_base);
2240 vmsa.set_sysenter_cs(sysenter_cs);
2241 vmsa.set_sysenter_eip(sysenter_eip);
2242 vmsa.set_sysenter_esp(sysenter_esp);
2243 vmsa.set_star(star);
2244 vmsa.set_lstar(lstar);
2245 vmsa.set_cstar(cstar);
2246 vmsa.set_sfmask(sfmask);
2247
2248 Ok(())
2249 }
2250
2251 fn debug_regs(&mut self) -> Result<vp::DebugRegisters, Self::Error> {
2252 let vmsa = self.vp.runner.vmsa(self.vtl);
2253 Ok(vp::DebugRegisters {
2254 dr0: vmsa.dr0(),
2255 dr1: vmsa.dr1(),
2256 dr2: vmsa.dr2(),
2257 dr3: vmsa.dr3(),
2258 dr6: vmsa.dr6(),
2259 dr7: vmsa.dr7(),
2260 })
2261 }
2262
2263 fn set_debug_regs(&mut self, value: &vp::DebugRegisters) -> Result<(), Self::Error> {
2264 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2265 let vp::DebugRegisters {
2266 dr0,
2267 dr1,
2268 dr2,
2269 dr3,
2270 dr6,
2271 dr7,
2272 } = *value;
2273 vmsa.set_dr0(dr0);
2274 vmsa.set_dr1(dr1);
2275 vmsa.set_dr2(dr2);
2276 vmsa.set_dr3(dr3);
2277 vmsa.set_dr6(dr6);
2278 vmsa.set_dr7(dr7);
2279 Ok(())
2280 }
2281
2282 fn tsc(&mut self) -> Result<vp::Tsc, Self::Error> {
2283 Err(vp_state::Error::Unimplemented("tsc"))
2284 }
2285
2286 fn set_tsc(&mut self, _value: &vp::Tsc) -> Result<(), Self::Error> {
2287 Err(vp_state::Error::Unimplemented("tsc"))
2288 }
2289
2290 fn tsc_aux(&mut self) -> Result<vp::TscAux, Self::Error> {
2291 let vmsa = self.vp.runner.vmsa(self.vtl);
2292 Ok(vp::TscAux {
2293 value: vmsa.tsc_aux() as u64,
2294 })
2295 }
2296
2297 fn set_tsc_aux(&mut self, value: &vp::TscAux) -> Result<(), Self::Error> {
2298 let vp::TscAux { value } = *value;
2299 self.vp.runner.vmsa_mut(self.vtl).set_tsc_aux(value as u32);
2300 Ok(())
2301 }
2302
2303 fn cet(&mut self) -> Result<vp::Cet, Self::Error> {
2304 let vmsa = self.vp.runner.vmsa(self.vtl);
2305 Ok(vp::Cet { scet: vmsa.s_cet() })
2306 }
2307
2308 fn set_cet(&mut self, value: &vp::Cet) -> Result<(), Self::Error> {
2309 let vp::Cet { scet } = *value;
2310 self.vp.runner.vmsa_mut(self.vtl).set_s_cet(scet);
2311 Ok(())
2312 }
2313
2314 fn cet_ss(&mut self) -> Result<vp::CetSs, Self::Error> {
2315 let vmsa = self.vp.runner.vmsa(self.vtl);
2316 Ok(vp::CetSs {
2317 ssp: vmsa.ssp(),
2318 interrupt_ssp_table_addr: vmsa.interrupt_ssp_table_addr(),
2319 })
2320 }
2321
2322 fn set_cet_ss(&mut self, value: &vp::CetSs) -> Result<(), Self::Error> {
2323 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2324 let vp::CetSs {
2325 ssp,
2326 interrupt_ssp_table_addr,
2327 } = *value;
2328 vmsa.set_ssp(ssp);
2329 vmsa.set_interrupt_ssp_table_addr(interrupt_ssp_table_addr);
2330 Ok(())
2331 }
2332
2333 fn synic_msrs(&mut self) -> Result<vp::SyntheticMsrs, Self::Error> {
2334 Err(vp_state::Error::Unimplemented("synic_msrs"))
2335 }
2336
2337 fn set_synic_msrs(&mut self, _value: &vp::SyntheticMsrs) -> Result<(), Self::Error> {
2338 Err(vp_state::Error::Unimplemented("synic_msrs"))
2339 }
2340
2341 fn synic_message_page(&mut self) -> Result<vp::SynicMessagePage, Self::Error> {
2342 Err(vp_state::Error::Unimplemented("synic_message_page"))
2343 }
2344
2345 fn set_synic_message_page(&mut self, _value: &vp::SynicMessagePage) -> Result<(), Self::Error> {
2346 Err(vp_state::Error::Unimplemented("synic_message_page"))
2347 }
2348
2349 fn synic_event_flags_page(&mut self) -> Result<vp::SynicEventFlagsPage, Self::Error> {
2350 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2351 }
2352
2353 fn set_synic_event_flags_page(
2354 &mut self,
2355 _value: &vp::SynicEventFlagsPage,
2356 ) -> Result<(), Self::Error> {
2357 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2358 }
2359
2360 fn synic_message_queues(&mut self) -> Result<vp::SynicMessageQueues, Self::Error> {
2361 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2362 }
2363
2364 fn set_synic_message_queues(
2365 &mut self,
2366 _value: &vp::SynicMessageQueues,
2367 ) -> Result<(), Self::Error> {
2368 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2369 }
2370
2371 fn synic_timers(&mut self) -> Result<vp::SynicTimers, Self::Error> {
2372 Err(vp_state::Error::Unimplemented("synic_timers"))
2373 }
2374
2375 fn set_synic_timers(&mut self, _value: &vp::SynicTimers) -> Result<(), Self::Error> {
2376 Err(vp_state::Error::Unimplemented("synic_timers"))
2377 }
2378}
2379
2380fn advance_to_next_instruction(vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>) {
2382 vmsa.set_rip(vmsa.next_rip());
2383 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2384}
2385
2386impl UhProcessor<'_, SnpBacked> {
2387 fn read_msr_snp(
2388 &mut self,
2389 _dev: &impl CpuIo,
2390 msr: u32,
2391 vtl: GuestVtl,
2392 ) -> Result<u64, MsrError> {
2393 let vmsa = self.runner.vmsa(vtl);
2394 let value = match msr {
2395 x86defs::X64_MSR_FS_BASE => vmsa.fs().base,
2396 x86defs::X64_MSR_GS_BASE => vmsa.gs().base,
2397 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.kernel_gs_base(),
2398 x86defs::X86X_MSR_TSC_AUX => {
2399 if self.shared.tsc_aux_virtualized {
2400 vmsa.tsc_aux() as u64
2401 } else {
2402 return Err(MsrError::InvalidAccess);
2403 }
2404 }
2405 x86defs::X86X_MSR_SPEC_CTRL => vmsa.spec_ctrl(),
2406 x86defs::X86X_MSR_U_CET => vmsa.u_cet(),
2407 x86defs::X86X_MSR_S_CET => vmsa.s_cet(),
2408 x86defs::X86X_MSR_PL0_SSP => vmsa.pl0_ssp(),
2409 x86defs::X86X_MSR_PL1_SSP => vmsa.pl1_ssp(),
2410 x86defs::X86X_MSR_PL2_SSP => vmsa.pl2_ssp(),
2411 x86defs::X86X_MSR_PL3_SSP => vmsa.pl3_ssp(),
2412 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.interrupt_ssp_table_addr(),
2413 x86defs::X86X_MSR_CR_PAT => vmsa.pat(),
2414 x86defs::X86X_MSR_EFER => vmsa.efer(),
2415 x86defs::X86X_MSR_STAR => vmsa.star(),
2416 x86defs::X86X_MSR_LSTAR => vmsa.lstar(),
2417 x86defs::X86X_MSR_CSTAR => vmsa.cstar(),
2418 x86defs::X86X_MSR_SFMASK => vmsa.sfmask(),
2419 x86defs::X86X_MSR_SYSENTER_CS => vmsa.sysenter_cs(),
2420 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.sysenter_esp(),
2421 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.sysenter_eip(),
2422 x86defs::X86X_MSR_XSS => vmsa.xss(),
2423 x86defs::X86X_AMD_MSR_VM_CR => 0,
2424 x86defs::X86X_MSR_TSC => safe_intrinsics::rdtsc(),
2425 x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => 0xffff_ffff,
2426 x86defs::X86X_MSR_MTRR_CAP => {
2427 0x400
2430 }
2431 x86defs::X86X_MSR_MTRR_DEF_TYPE => {
2432 0
2436 }
2437 x86defs::X86X_AMD_MSR_SYSCFG
2438 | x86defs::X86X_MSR_MCG_CAP
2439 | x86defs::X86X_MSR_MCG_STATUS => 0,
2440
2441 hvdef::HV_X64_MSR_GUEST_IDLE => {
2442 self.backing.cvm.lapics[vtl].activity = MpState::Idle;
2443 let mut vmsa = self.runner.vmsa_mut(vtl);
2444 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2445 0
2446 }
2447 _ => return Err(MsrError::Unknown),
2448 };
2449 Ok(value)
2450 }
2451
2452 fn write_msr_snp(
2453 &mut self,
2454 _dev: &impl CpuIo,
2455 msr: u32,
2456 value: u64,
2457 vtl: GuestVtl,
2458 ) -> Result<(), MsrError> {
2459 let mut vmsa = self.runner.vmsa_mut(vtl);
2462 match msr {
2463 x86defs::X64_MSR_FS_BASE => {
2464 let fs = vmsa.fs();
2465 vmsa.set_fs(SevSelector {
2466 attrib: fs.attrib,
2467 selector: fs.selector,
2468 limit: fs.limit,
2469 base: value,
2470 });
2471 }
2472 x86defs::X64_MSR_GS_BASE => {
2473 let gs = vmsa.gs();
2474 vmsa.set_gs(SevSelector {
2475 attrib: gs.attrib,
2476 selector: gs.selector,
2477 limit: gs.limit,
2478 base: value,
2479 });
2480 }
2481 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.set_kernel_gs_base(value),
2482 x86defs::X86X_MSR_TSC_AUX => {
2483 if self.shared.tsc_aux_virtualized {
2484 vmsa.set_tsc_aux(value as u32);
2485 } else {
2486 return Err(MsrError::InvalidAccess);
2487 }
2488 }
2489 x86defs::X86X_MSR_SPEC_CTRL => vmsa.set_spec_ctrl(value),
2490 x86defs::X86X_MSR_U_CET => vmsa.set_u_cet(value),
2491 x86defs::X86X_MSR_S_CET => vmsa.set_s_cet(value),
2492 x86defs::X86X_MSR_PL0_SSP => vmsa.set_pl0_ssp(value),
2493 x86defs::X86X_MSR_PL1_SSP => vmsa.set_pl1_ssp(value),
2494 x86defs::X86X_MSR_PL2_SSP => vmsa.set_pl2_ssp(value),
2495 x86defs::X86X_MSR_PL3_SSP => vmsa.set_pl3_ssp(value),
2496 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.set_interrupt_ssp_table_addr(value),
2497
2498 x86defs::X86X_MSR_CR_PAT => vmsa.set_pat(value),
2499 x86defs::X86X_MSR_EFER => vmsa.set_efer(SnpBacked::calculate_efer(value, vmsa.cr0())),
2500
2501 x86defs::X86X_MSR_STAR => vmsa.set_star(value),
2502 x86defs::X86X_MSR_LSTAR => vmsa.set_lstar(value),
2503 x86defs::X86X_MSR_CSTAR => vmsa.set_cstar(value),
2504 x86defs::X86X_MSR_SFMASK => vmsa.set_sfmask(value),
2505 x86defs::X86X_MSR_SYSENTER_CS => vmsa.set_sysenter_cs(value),
2506 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.set_sysenter_esp(value),
2507 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.set_sysenter_eip(value),
2508 x86defs::X86X_MSR_XSS => vmsa.set_xss(value),
2509
2510 x86defs::X86X_MSR_TSC => {} x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => {}
2512 x86defs::X86X_MSR_MTRR_DEF_TYPE => {}
2513
2514 x86defs::X86X_AMD_MSR_VM_CR
2515 | x86defs::X86X_MSR_MTRR_CAP
2516 | x86defs::X86X_AMD_MSR_SYSCFG
2517 | x86defs::X86X_MSR_MCG_CAP => return Err(MsrError::InvalidAccess),
2518
2519 x86defs::X86X_MSR_MCG_STATUS => {
2520 if x86defs::X86xMcgStatusRegister::from(value).reserved0() != 0 {
2522 return Err(MsrError::InvalidAccess);
2523 }
2524 }
2525 _ => {
2526 tracing::debug!(msr, value, "unknown cvm msr write");
2527 }
2528 }
2529 Ok(())
2530 }
2531}
2532
2533impl<T: CpuIo> hv1_hypercall::VtlSwitchOps for UhHypercallHandler<'_, '_, T, SnpBacked> {
2534 fn advance_ip(&mut self) {
2535 let is_64bit = self.vp.long_mode(self.intercepted_vtl);
2536 let mut io = hv1_hypercall::X64RegisterIo::new(self, is_64bit);
2537 io.advance_ip();
2538 }
2539
2540 fn inject_invalid_opcode_fault(&mut self) {
2541 self.vp
2542 .runner
2543 .vmsa_mut(self.intercepted_vtl)
2544 .set_event_inject(
2545 SevEventInjectInfo::new()
2546 .with_valid(true)
2547 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
2548 .with_vector(x86defs::Exception::INVALID_OPCODE.0),
2549 );
2550 }
2551}
2552
2553impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressList for UhHypercallHandler<'_, '_, T, SnpBacked> {
2554 fn flush_virtual_address_list(
2555 &mut self,
2556 processor_set: ProcessorSet<'_>,
2557 flags: HvFlushFlags,
2558 gva_ranges: &[HvGvaRange],
2559 ) -> HvRepResult {
2560 hv1_hypercall::FlushVirtualAddressListEx::flush_virtual_address_list_ex(
2561 self,
2562 processor_set,
2563 flags,
2564 gva_ranges,
2565 )
2566 }
2567}
2568
2569impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
2570 for UhHypercallHandler<'_, '_, T, SnpBacked>
2571{
2572 fn flush_virtual_address_list_ex(
2573 &mut self,
2574 processor_set: ProcessorSet<'_>,
2575 flags: HvFlushFlags,
2576 gva_ranges: &[HvGvaRange],
2577 ) -> HvRepResult {
2578 self.hcvm_validate_flush_inputs(processor_set, flags, true)
2579 .map_err(|e| (e, 0))?;
2580
2581 if gva_ranges.len() > 16 || gva_ranges.iter().any(|range| if flags.use_extended_range_format() { range.as_extended().additional_pages() } else { range.as_simple().additional_pages() } > 16) {
2584 self.do_flush_virtual_address_space(processor_set, flags);
2585 } else {
2586 self.do_flush_virtual_address_list(flags, gva_ranges);
2587 }
2588
2589 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2591 Ok(())
2592 }
2593}
2594
2595impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpace
2596 for UhHypercallHandler<'_, '_, T, SnpBacked>
2597{
2598 fn flush_virtual_address_space(
2599 &mut self,
2600 processor_set: ProcessorSet<'_>,
2601 flags: HvFlushFlags,
2602 ) -> hvdef::HvResult<()> {
2603 hv1_hypercall::FlushVirtualAddressSpaceEx::flush_virtual_address_space_ex(
2604 self,
2605 processor_set,
2606 flags,
2607 )
2608 }
2609}
2610
2611impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpaceEx
2612 for UhHypercallHandler<'_, '_, T, SnpBacked>
2613{
2614 fn flush_virtual_address_space_ex(
2615 &mut self,
2616 processor_set: ProcessorSet<'_>,
2617 flags: HvFlushFlags,
2618 ) -> hvdef::HvResult<()> {
2619 self.hcvm_validate_flush_inputs(processor_set, flags, false)?;
2620
2621 self.do_flush_virtual_address_space(processor_set, flags);
2622
2623 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2625 Ok(())
2626 }
2627}
2628
2629impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
2630 fn do_flush_virtual_address_list(&mut self, flags: HvFlushFlags, gva_ranges: &[HvGvaRange]) {
2631 for range in gva_ranges {
2632 let mut rax = SevInvlpgbRax::new()
2633 .with_asid_valid(true)
2634 .with_va_valid(true)
2635 .with_global(!flags.non_global_mappings_only());
2636 let mut ecx = SevInvlpgbEcx::new();
2637 let mut count;
2638 let mut gpn;
2639
2640 if flags.use_extended_range_format() && range.as_extended().large_page() {
2641 ecx.set_large_page(true);
2642 if range.as_extended_large_page().page_size() {
2643 let range = range.as_extended_large_page();
2644 count = range.additional_pages();
2645 gpn = range.gva_large_page_number();
2646 } else {
2647 let range = range.as_extended();
2648 count = range.additional_pages();
2649 gpn = range.gva_page_number();
2650 }
2651 } else {
2652 let range = range.as_simple();
2653 count = range.additional_pages();
2654 gpn = range.gva_page_number();
2655 }
2656 count += 1; while count > 0 {
2659 rax.set_virtual_page_number(gpn);
2660 ecx.set_additional_count(std::cmp::min(
2661 count - 1,
2662 self.vp.shared.invlpgb_count_max.into(),
2663 ));
2664
2665 let edx = SevInvlpgbEdx::new();
2666 self.vp
2667 .partition
2668 .hcl
2669 .invlpgb(rax.into(), edx.into(), ecx.into());
2670
2671 count -= ecx.additional_count() + 1;
2672 gpn += ecx.additional_count() + 1;
2673 }
2674 }
2675
2676 self.vp.partition.hcl.tlbsync();
2677 }
2678
2679 fn do_flush_virtual_address_space(
2680 &mut self,
2681 processor_set: ProcessorSet<'_>,
2682 flags: HvFlushFlags,
2683 ) {
2684 let only_self = [self.vp.vp_index().index()].into_iter().eq(processor_set);
2685 if only_self && flags.non_global_mappings_only() {
2686 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_pcpu_id(0);
2687 } else {
2688 self.vp.partition.hcl.invlpgb(
2689 SevInvlpgbRax::new()
2690 .with_asid_valid(true)
2691 .with_global(!flags.non_global_mappings_only())
2692 .into(),
2693 SevInvlpgbEdx::new().into(),
2694 SevInvlpgbEcx::new().into(),
2695 );
2696 self.vp.partition.hcl.tlbsync();
2697 }
2698 }
2699}
2700
2701struct SnpTlbLockFlushAccess<'a> {
2702 vp_index: Option<VpIndex>,
2703 partition: &'a UhPartitionInner,
2704 shared: &'a SnpBackedShared,
2705}
2706
2707impl TlbFlushLockAccess for SnpTlbLockFlushAccess<'_> {
2708 fn flush(&mut self, vtl: GuestVtl) {
2709 self.partition.hcl.invlpgb(
2712 SevInvlpgbRax::new()
2713 .with_asid_valid(true)
2714 .with_global(true)
2715 .into(),
2716 SevInvlpgbEdx::new().into(),
2717 SevInvlpgbEcx::new().into(),
2718 );
2719 self.partition.hcl.tlbsync();
2720 self.set_wait_for_tlb_locks(vtl);
2721 }
2722
2723 fn flush_entire(&mut self) {
2724 self.partition.hcl.invlpgb(
2725 SevInvlpgbRax::new()
2726 .with_asid_valid(true)
2727 .with_global(true)
2728 .into(),
2729 SevInvlpgbEdx::new().into(),
2730 SevInvlpgbEcx::new().into(),
2731 );
2732 self.partition.hcl.tlbsync();
2733 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
2734 self.set_wait_for_tlb_locks(vtl);
2735 }
2736 }
2737
2738 fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) {
2739 if let Some(vp_index) = self.vp_index {
2740 hardware_cvm::tlb_lock::TlbLockAccess {
2741 vp_index,
2742 cvm_partition: &self.shared.cvm,
2743 }
2744 .set_wait_for_tlb_locks(vtl);
2745 }
2746 }
2747}
2748
2749mod save_restore {
2750 use super::SnpBacked;
2751 use super::UhProcessor;
2752 use vmcore::save_restore::RestoreError;
2753 use vmcore::save_restore::SaveError;
2754 use vmcore::save_restore::SaveRestore;
2755 use vmcore::save_restore::SavedStateNotSupported;
2756
2757 impl SaveRestore for UhProcessor<'_, SnpBacked> {
2758 type SavedState = SavedStateNotSupported;
2759
2760 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
2761 Err(SaveError::NotSupported)
2762 }
2763
2764 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
2765 match state {}
2766 }
2767 }
2768}