1use super::BackingParams;
7use super::BackingPrivate;
8use super::BackingSharedParams;
9use super::HardwareIsolatedBacking;
10use super::InterceptMessageOptionalState;
11use super::InterceptMessageState;
12use super::UhEmulationState;
13use super::UhRunVpError;
14use super::hardware_cvm;
15use super::vp_state;
16use super::vp_state::UhVpStateAccess;
17use crate::BackingShared;
18use crate::Error;
19use crate::GuestVtl;
20use crate::TlbFlushLockAccess;
21use crate::UhCvmPartitionState;
22use crate::UhCvmVpState;
23use crate::UhPartitionInner;
24use crate::UhPartitionNewParams;
25use crate::WakeReason;
26use crate::processor::UhHypercallHandler;
27use crate::processor::UhProcessor;
28use crate::processor::hardware_cvm::apic::ApicBacking;
29use cvm_tracing::CVM_ALLOWED;
30use cvm_tracing::CVM_CONFIDENTIAL;
31use hcl::vmsa::VmsaWrapper;
32use hv1_emulator::hv::ProcessorVtlHv;
33use hv1_emulator::synic::ProcessorSynic;
34use hv1_hypercall::HvRepResult;
35use hv1_hypercall::HypercallIo;
36use hv1_structs::ProcessorSet;
37use hv1_structs::VtlArray;
38use hvdef::HV_PAGE_SIZE;
39use hvdef::HvDeliverabilityNotificationsRegister;
40use hvdef::HvError;
41use hvdef::HvMessageType;
42use hvdef::HvX64PendingExceptionEvent;
43use hvdef::HvX64RegisterName;
44use hvdef::Vtl;
45use hvdef::hypercall::Control;
46use hvdef::hypercall::HvFlushFlags;
47use hvdef::hypercall::HvGvaRange;
48use hvdef::hypercall::HypercallOutput;
49use inspect::Inspect;
50use inspect::InspectMut;
51use inspect_counters::Counter;
52use virt::Processor;
53use virt::VpHaltReason;
54use virt::VpIndex;
55use virt::io::CpuIo;
56use virt::state::StateElement;
57use virt::vp;
58use virt::vp::AccessVpState;
59use virt::vp::MpState;
60use virt::x86::MsrError;
61use virt::x86::MsrErrorExt;
62use virt::x86::SegmentRegister;
63use virt::x86::TableRegister;
64use virt_support_apic::ApicClient;
65use virt_support_x86emu::emulate::EmulatorSupport as X86EmulatorSupport;
66use virt_support_x86emu::emulate::emulate_io;
67use virt_support_x86emu::emulate::emulate_translate_gva;
68use virt_support_x86emu::translate::TranslationRegisters;
69use vmcore::vmtime::VmTimeAccess;
70use x86defs::RFlags;
71use x86defs::cpuid::CpuidFunction;
72use x86defs::snp::SevEventInjectInfo;
73use x86defs::snp::SevExitCode;
74use x86defs::snp::SevInvlpgbEcx;
75use x86defs::snp::SevInvlpgbEdx;
76use x86defs::snp::SevInvlpgbRax;
77use x86defs::snp::SevIoAccessInfo;
78use x86defs::snp::SevSelector;
79use x86defs::snp::SevStatusMsr;
80use x86defs::snp::SevVmsa;
81use x86defs::snp::Vmpl;
82use zerocopy::FromZeros;
83use zerocopy::IntoBytes;
84
85#[derive(InspectMut)]
87pub struct SnpBacked {
88 #[inspect(hex)]
89 hv_sint_notifications: u16,
90 general_stats: VtlArray<GeneralStats, 2>,
91 exit_stats: VtlArray<ExitStats, 2>,
92 #[inspect(flatten)]
93 cvm: UhCvmVpState,
94}
95
96#[derive(Inspect, Default)]
97struct GeneralStats {
98 guest_busy: Counter,
99 int_ack: Counter,
100 synth_int: Counter,
101}
102
103#[derive(Inspect, Default)]
104struct ExitStats {
105 automatic_exit: Counter,
106 cpuid: Counter,
107 hlt: Counter,
108 intr: Counter,
109 invd: Counter,
110 invlpgb: Counter,
111 ioio: Counter,
112 msr_read: Counter,
113 msr_write: Counter,
114 npf: Counter,
115 npf_no_intercept: Counter,
116 npf_spurious: Counter,
117 rdpmc: Counter,
118 vmgexit: Counter,
119 vmmcall: Counter,
120 xsetbv: Counter,
121 excp_db: Counter,
122 secure_reg_write: Counter,
123}
124
125enum UhDirectOverlay {
126 Sipp,
127 Sifp,
128 Ghcb,
129 Count,
130}
131
132impl SnpBacked {
133 fn calculate_efer(efer: u64, cr0: u64) -> u64 {
135 let new_efer = if efer & x86defs::X64_EFER_LME != 0 && cr0 & x86defs::X64_CR0_PG != 0 {
136 efer | x86defs::X64_EFER_LMA
137 } else {
138 efer & !x86defs::X64_EFER_LMA
139 };
140 new_efer | x86defs::X64_EFER_SVME
141 }
142
143 pub fn shared_pages_required_per_cpu() -> u64 {
146 UhDirectOverlay::Count as u64
147 }
148}
149
150impl HardwareIsolatedBacking for SnpBacked {
151 fn cvm_state(&self) -> &UhCvmVpState {
152 &self.cvm
153 }
154
155 fn cvm_state_mut(&mut self) -> &mut UhCvmVpState {
156 &mut self.cvm
157 }
158
159 fn cvm_partition_state(shared: &Self::Shared) -> &UhCvmPartitionState {
160 &shared.cvm
161 }
162
163 fn switch_vtl(this: &mut UhProcessor<'_, Self>, source_vtl: GuestVtl, target_vtl: GuestVtl) {
164 let [vmsa0, vmsa1] = this.runner.vmsas_mut();
165 let (current_vmsa, mut target_vmsa) = match (source_vtl, target_vtl) {
166 (GuestVtl::Vtl0, GuestVtl::Vtl1) => (vmsa0, vmsa1),
167 (GuestVtl::Vtl1, GuestVtl::Vtl0) => (vmsa1, vmsa0),
168 _ => unreachable!(),
169 };
170
171 target_vmsa.set_rax(current_vmsa.rax());
172 target_vmsa.set_rbx(current_vmsa.rbx());
173 target_vmsa.set_rcx(current_vmsa.rcx());
174 target_vmsa.set_rdx(current_vmsa.rdx());
175 target_vmsa.set_rbp(current_vmsa.rbp());
176 target_vmsa.set_rsi(current_vmsa.rsi());
177 target_vmsa.set_rdi(current_vmsa.rdi());
178 target_vmsa.set_r8(current_vmsa.r8());
179 target_vmsa.set_r9(current_vmsa.r9());
180 target_vmsa.set_r10(current_vmsa.r10());
181 target_vmsa.set_r11(current_vmsa.r11());
182 target_vmsa.set_r12(current_vmsa.r12());
183 target_vmsa.set_r13(current_vmsa.r13());
184 target_vmsa.set_r14(current_vmsa.r14());
185 target_vmsa.set_r15(current_vmsa.r15());
186 target_vmsa.set_xcr0(current_vmsa.xcr0());
187
188 target_vmsa.set_cr2(current_vmsa.cr2());
189
190 target_vmsa.set_dr0(current_vmsa.dr0());
192 target_vmsa.set_dr1(current_vmsa.dr1());
193 target_vmsa.set_dr2(current_vmsa.dr2());
194 target_vmsa.set_dr3(current_vmsa.dr3());
195
196 target_vmsa.set_pl0_ssp(current_vmsa.pl0_ssp());
197 target_vmsa.set_pl1_ssp(current_vmsa.pl1_ssp());
198 target_vmsa.set_pl2_ssp(current_vmsa.pl2_ssp());
199 target_vmsa.set_pl3_ssp(current_vmsa.pl3_ssp());
200 target_vmsa.set_u_cet(current_vmsa.u_cet());
201
202 target_vmsa.set_x87_registers(¤t_vmsa.x87_registers());
203
204 let vec_reg_count = 16;
205 for i in 0..vec_reg_count {
206 target_vmsa.set_xmm_registers(i, current_vmsa.xmm_registers(i));
207 target_vmsa.set_ymm_registers(i, current_vmsa.ymm_registers(i));
208 }
209
210 this.backing.cvm_state_mut().exit_vtl = target_vtl;
211 }
212
213 fn translation_registers(
214 &self,
215 this: &UhProcessor<'_, Self>,
216 vtl: GuestVtl,
217 ) -> TranslationRegisters {
218 let vmsa = this.runner.vmsa(vtl);
219 TranslationRegisters {
220 cr0: vmsa.cr0(),
221 cr4: vmsa.cr4(),
222 efer: vmsa.efer(),
223 cr3: vmsa.cr3(),
224 rflags: vmsa.rflags(),
225 ss: virt_seg_from_snp(vmsa.ss()).into(),
226 encryption_mode: virt_support_x86emu::translate::EncryptionMode::Vtom(
227 this.partition.caps.vtom.unwrap(),
228 ),
229 }
230 }
231
232 fn tlb_flush_lock_access<'a>(
233 vp_index: Option<VpIndex>,
234 partition: &'a UhPartitionInner,
235 shared: &'a Self::Shared,
236 ) -> impl TlbFlushLockAccess + 'a {
237 SnpTlbLockFlushAccess {
238 vp_index,
239 partition,
240 shared,
241 }
242 }
243
244 fn pending_event_vector(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> Option<u8> {
245 let event_inject = this.runner.vmsa(vtl).event_inject();
246 if event_inject.valid() {
247 Some(event_inject.vector())
248 } else {
249 None
250 }
251 }
252
253 fn set_pending_exception(
254 this: &mut UhProcessor<'_, Self>,
255 vtl: GuestVtl,
256 event: HvX64PendingExceptionEvent,
257 ) {
258 let inject_info = SevEventInjectInfo::new()
259 .with_valid(true)
260 .with_deliver_error_code(event.deliver_error_code())
261 .with_error_code(event.error_code())
262 .with_vector(event.vector().try_into().unwrap())
263 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT);
264
265 this.runner.vmsa_mut(vtl).set_event_inject(inject_info);
266 }
267
268 fn cr0(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
269 this.runner.vmsa(vtl).cr0()
270 }
271
272 fn cr4(this: &UhProcessor<'_, Self>, vtl: GuestVtl) -> u64 {
273 this.runner.vmsa(vtl).cr4()
274 }
275
276 fn intercept_message_state(
277 this: &UhProcessor<'_, Self>,
278 vtl: GuestVtl,
279 include_optional_state: bool,
280 ) -> InterceptMessageState {
281 let vmsa = this.runner.vmsa(vtl);
282
283 InterceptMessageState {
284 instruction_length_and_cr8: (vmsa.next_rip() - vmsa.rip()) as u8,
285 cpl: vmsa.cpl(),
286 efer_lma: vmsa.efer() & x86defs::X64_EFER_LMA != 0,
287 cs: virt_seg_from_snp(vmsa.cs()).into(),
288 rip: vmsa.rip(),
289 rflags: vmsa.rflags(),
290 rax: vmsa.rax(),
291 rdx: vmsa.rdx(),
292 optional: if include_optional_state {
293 Some(InterceptMessageOptionalState {
294 ds: virt_seg_from_snp(vmsa.ds()).into(),
295 es: virt_seg_from_snp(vmsa.es()).into(),
296 })
297 } else {
298 None
299 },
300 rcx: vmsa.rcx(),
301 rsi: vmsa.rsi(),
302 rdi: vmsa.rdi(),
303 }
304 }
305
306 fn cr_intercept_registration(
307 this: &mut UhProcessor<'_, Self>,
308 intercept_control: hvdef::HvRegisterCrInterceptControl,
309 ) {
310 this.runner
315 .set_vp_registers_hvcall(
316 Vtl::Vtl1,
317 [(
318 HvX64RegisterName::CrInterceptControl,
319 u64::from(intercept_control),
320 )],
321 )
322 .expect("setting intercept control succeeds");
323 }
324
325 fn is_interrupt_pending(
326 this: &mut UhProcessor<'_, Self>,
327 vtl: GuestVtl,
328 check_rflags: bool,
329 dev: &impl CpuIo,
330 ) -> bool {
331 let vmsa = this.runner.vmsa_mut(vtl);
332 if vmsa.event_inject().valid()
333 && vmsa.event_inject().interruption_type() == x86defs::snp::SEV_INTR_TYPE_NMI
334 {
335 return true;
336 }
337
338 let vmsa_priority = vmsa.v_intr_cntrl().priority() as u32;
339 let lapic = &mut this.backing.cvm.lapics[vtl].lapic;
340 let ppr = lapic
341 .access(&mut SnpApicClient {
342 partition: this.partition,
343 vmsa,
344 dev,
345 vmtime: &this.vmtime,
346 vtl,
347 })
348 .get_ppr();
349 let ppr_priority = ppr >> 4;
350 if vmsa_priority <= ppr_priority {
351 return false;
352 }
353
354 let vmsa = this.runner.vmsa_mut(vtl);
355 if (check_rflags && !RFlags::from_bits(vmsa.rflags()).interrupt_enable())
356 || vmsa.v_intr_cntrl().intr_shadow()
357 || !vmsa.v_intr_cntrl().irq()
358 {
359 return false;
360 }
361
362 true
363 }
364
365 fn untrusted_synic_mut(&mut self) -> Option<&mut ProcessorSynic> {
366 None
367 }
368}
369
370#[derive(Inspect)]
372pub struct SnpBackedShared {
373 #[inspect(flatten)]
374 pub(crate) cvm: UhCvmPartitionState,
375 invlpgb_count_max: u16,
376 tsc_aux_virtualized: bool,
377 #[inspect(debug)]
378 sev_status: SevStatusMsr,
379}
380
381impl SnpBackedShared {
382 pub(crate) fn new(
383 _partition_params: &UhPartitionNewParams<'_>,
384 params: BackingSharedParams<'_>,
385 ) -> Result<Self, Error> {
386 let cvm = params.cvm_state.unwrap();
387 let invlpgb_count_max = x86defs::cpuid::ExtendedAddressSpaceSizesEdx::from(
388 params
389 .cpuid
390 .result(CpuidFunction::ExtendedAddressSpaceSizes.0, 0, &[0; 4])[3],
391 )
392 .invlpgb_count_max();
393 let tsc_aux_virtualized = x86defs::cpuid::ExtendedSevFeaturesEax::from(
394 params
395 .cpuid
396 .result(CpuidFunction::ExtendedSevFeatures.0, 0, &[0; 4])[0],
397 )
398 .tsc_aux_virtualization();
399
400 let msr = crate::MsrDevice::new(0).expect("open msr");
403 let sev_status =
404 SevStatusMsr::from(msr.read_msr(x86defs::X86X_AMD_MSR_SEV).expect("read msr"));
405 tracing::info!(CVM_ALLOWED, ?sev_status, "SEV status");
406
407 Ok(Self {
408 sev_status,
409 invlpgb_count_max,
410 tsc_aux_virtualized,
411 cvm,
412 })
413 }
414}
415
416#[expect(private_interfaces)]
417impl BackingPrivate for SnpBacked {
418 type HclBacking<'snp> = hcl::ioctl::snp::Snp<'snp>;
419 type Shared = SnpBackedShared;
420 type EmulationCache = ();
421
422 fn shared(shared: &BackingShared) -> &Self::Shared {
423 let BackingShared::Snp(shared) = shared else {
424 unreachable!()
425 };
426 shared
427 }
428
429 fn new(params: BackingParams<'_, '_, Self>, shared: &SnpBackedShared) -> Result<Self, Error> {
430 Ok(Self {
431 hv_sint_notifications: 0,
432 general_stats: VtlArray::from_fn(|_| Default::default()),
433 exit_stats: VtlArray::from_fn(|_| Default::default()),
434 cvm: UhCvmVpState::new(
435 &shared.cvm,
436 params.partition,
437 params.vp_info,
438 UhDirectOverlay::Count as usize,
439 )?,
440 })
441 }
442
443 fn init(this: &mut UhProcessor<'_, Self>) {
444 let sev_status = this.vp().shared.sev_status;
445 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
446 init_vmsa(
447 &mut this.runner.vmsa_mut(vtl),
448 vtl,
449 this.partition.caps.vtom,
450 sev_status,
451 );
452
453 let registers = vp::Registers::at_reset(&this.partition.caps, &this.inner.vp_info);
455 this.access_state(vtl.into())
456 .set_registers(®isters)
457 .expect("Resetting to architectural state should succeed");
458
459 let debug_registers =
460 vp::DebugRegisters::at_reset(&this.partition.caps, &this.inner.vp_info);
461
462 this.access_state(vtl.into())
463 .set_debug_regs(&debug_registers)
464 .expect("Resetting to architectural state should succeed");
465
466 let xcr0 = vp::Xcr0::at_reset(&this.partition.caps, &this.inner.vp_info);
467 this.access_state(vtl.into())
468 .set_xcr(&xcr0)
469 .expect("Resetting to architectural state should succeed");
470
471 let cache_control = vp::Mtrrs::at_reset(&this.partition.caps, &this.inner.vp_info);
472 this.access_state(vtl.into())
473 .set_mtrrs(&cache_control)
474 .expect("Resetting to architectural state should succeed");
475 }
476
477 let pfns = &this.backing.cvm.direct_overlay_handle.pfns();
480 let values: &[(HvX64RegisterName, u64); 3] = &[
481 (
482 HvX64RegisterName::Sipp,
483 hvdef::HvSynicSimpSiefp::new()
484 .with_enabled(true)
485 .with_base_gpn(pfns[UhDirectOverlay::Sipp as usize])
486 .into(),
487 ),
488 (
489 HvX64RegisterName::Sifp,
490 hvdef::HvSynicSimpSiefp::new()
491 .with_enabled(true)
492 .with_base_gpn(pfns[UhDirectOverlay::Sifp as usize])
493 .into(),
494 ),
495 (
496 HvX64RegisterName::Ghcb,
497 x86defs::snp::GhcbMsr::new()
498 .with_info(x86defs::snp::GhcbInfo::REGISTER_REQUEST.0)
499 .with_pfn(pfns[UhDirectOverlay::Ghcb as usize])
500 .into(),
501 ),
502 ];
503
504 this.runner
505 .set_vp_registers_hvcall(Vtl::Vtl0, values)
506 .expect("set_vp_registers hypercall for direct overlays should succeed");
507 }
508
509 type StateAccess<'p, 'a>
510 = UhVpStateAccess<'a, 'p, Self>
511 where
512 Self: 'a + 'p,
513 'p: 'a;
514
515 fn access_vp_state<'a, 'p>(
516 this: &'a mut UhProcessor<'p, Self>,
517 vtl: GuestVtl,
518 ) -> Self::StateAccess<'p, 'a> {
519 UhVpStateAccess::new(this, vtl)
520 }
521
522 async fn run_vp(
523 this: &mut UhProcessor<'_, Self>,
524 dev: &impl CpuIo,
525 _stop: &mut virt::StopVp<'_>,
526 ) -> Result<(), VpHaltReason<UhRunVpError>> {
527 this.run_vp_snp(dev).await
528 }
529
530 fn poll_apic(
531 this: &mut UhProcessor<'_, Self>,
532 vtl: GuestVtl,
533 scan_irr: bool,
534 ) -> Result<(), UhRunVpError> {
535 this.runner.vmsa_mut(vtl).v_intr_cntrl_mut().set_irq(false);
537
538 hardware_cvm::apic::poll_apic_core(this, vtl, scan_irr)
539 }
540
541 fn request_extint_readiness(_this: &mut UhProcessor<'_, Self>) {
542 unreachable!("extint managed through software apic")
543 }
544
545 fn request_untrusted_sint_readiness(this: &mut UhProcessor<'_, Self>, sints: u16) {
546 let sints = this.backing.hv_sint_notifications | sints;
547 if this.backing.hv_sint_notifications == sints {
548 return;
549 }
550 let notifications = HvDeliverabilityNotificationsRegister::new().with_sints(sints);
551 tracing::trace!(?notifications, "setting notifications");
552 this.runner
553 .set_vp_register(
554 GuestVtl::Vtl0,
555 HvX64RegisterName::DeliverabilityNotifications,
556 u64::from(notifications).into(),
557 )
558 .expect("requesting deliverability is not a fallable operation");
559
560 this.backing.hv_sint_notifications = sints;
561 }
562
563 fn inspect_extra(this: &mut UhProcessor<'_, Self>, resp: &mut inspect::Response<'_>) {
564 let vtl0_vmsa = this.runner.vmsa(GuestVtl::Vtl0);
565 let vtl1_vmsa = if this.backing.cvm_state().vtl1.is_some() {
566 Some(this.runner.vmsa(GuestVtl::Vtl1))
567 } else {
568 None
569 };
570
571 let add_vmsa_inspect = |req: inspect::Request<'_>, vmsa: VmsaWrapper<'_, &SevVmsa>| {
572 req.respond()
573 .hex("guest_error_code", vmsa.guest_error_code())
574 .hex("exit_info1", vmsa.exit_info1())
575 .hex("exit_info2", vmsa.exit_info2())
576 .hex("v_intr_cntrl", u64::from(vmsa.v_intr_cntrl()));
577 };
578
579 resp.child("vmsa_additional", |req| {
580 req.respond()
581 .child("vtl0", |inner_req| add_vmsa_inspect(inner_req, vtl0_vmsa))
582 .child("vtl1", |inner_req| {
583 if let Some(vtl1_vmsa) = vtl1_vmsa {
584 add_vmsa_inspect(inner_req, vtl1_vmsa);
585 }
586 });
587 });
588 }
589
590 fn hv(&self, vtl: GuestVtl) -> Option<&ProcessorVtlHv> {
591 Some(&self.cvm.hv[vtl])
592 }
593
594 fn hv_mut(&mut self, vtl: GuestVtl) -> Option<&mut ProcessorVtlHv> {
595 Some(&mut self.cvm.hv[vtl])
596 }
597
598 fn handle_vp_start_enable_vtl_wake(
599 this: &mut UhProcessor<'_, Self>,
600 vtl: GuestVtl,
601 ) -> Result<(), UhRunVpError> {
602 this.hcvm_handle_vp_start_enable_vtl(vtl)
603 }
604
605 fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
606 this.hcvm_vtl1_inspectable()
607 }
608
609 fn process_interrupts(
610 this: &mut UhProcessor<'_, Self>,
611 scan_irr: VtlArray<bool, 2>,
612 first_scan_irr: &mut bool,
613 dev: &impl CpuIo,
614 ) -> Result<bool, VpHaltReason<UhRunVpError>> {
615 this.cvm_process_interrupts(scan_irr, first_scan_irr, dev)
616 }
617}
618
619fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
620 SevSelector {
621 selector: val.selector,
622 attrib: (val.attributes & 0xFF) | ((val.attributes >> 4) & 0xF00),
623 limit: val.limit,
624 base: val.base,
625 }
626}
627
628fn virt_table_to_snp(val: TableRegister) -> SevSelector {
629 SevSelector {
630 limit: val.limit as u32,
631 base: val.base,
632 ..FromZeros::new_zeroed()
633 }
634}
635
636fn virt_seg_from_snp(selector: SevSelector) -> SegmentRegister {
637 SegmentRegister {
638 base: selector.base,
639 limit: selector.limit,
640 selector: selector.selector,
641 attributes: (selector.attrib & 0xFF) | ((selector.attrib & 0xF00) << 4),
642 }
643}
644
645fn virt_table_from_snp(selector: SevSelector) -> TableRegister {
646 TableRegister {
647 limit: selector.limit as u16,
648 base: selector.base,
649 }
650}
651
652fn init_vmsa(
653 vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>,
654 vtl: GuestVtl,
655 vtom: Option<u64>,
656 sev_status: SevStatusMsr,
657) {
658 vmsa.reset(sev_status.vmsa_reg_prot());
662 vmsa.sev_features_mut()
663 .set_snp_btb_isolation(sev_status.snp_btb_isolation());
664 vmsa.sev_features_mut()
665 .set_prevent_host_ibs(sev_status.prevent_host_ibs());
666 vmsa.sev_features_mut()
667 .set_vmsa_reg_prot(sev_status.vmsa_reg_prot());
668 vmsa.sev_features_mut().set_snp(true);
669 vmsa.sev_features_mut().set_vtom(vtom.is_some());
670 vmsa.set_virtual_tom(vtom.unwrap_or(0));
671
672 vmsa.sev_features_mut().set_alternate_injection(true);
675 vmsa.sev_features_mut().set_reflect_vc(true);
676 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
677 vmsa.sev_features_mut().set_debug_swap(true);
678
679 let vmpl = match vtl {
680 GuestVtl::Vtl0 => Vmpl::Vmpl2,
681 GuestVtl::Vtl1 => Vmpl::Vmpl1,
682 };
683 vmsa.set_vmpl(vmpl.into());
684
685 vmsa.set_guest_error_code(SevExitCode::INTR.0);
688
689 vmsa.set_efer(x86defs::X64_EFER_SVME);
692}
693
694struct SnpApicClient<'a, T> {
695 partition: &'a UhPartitionInner,
696 vmsa: VmsaWrapper<'a, &'a mut SevVmsa>,
697 dev: &'a T,
698 vmtime: &'a VmTimeAccess,
699 vtl: GuestVtl,
700}
701
702impl<T: CpuIo> ApicClient for SnpApicClient<'_, T> {
703 fn cr8(&mut self) -> u32 {
704 self.vmsa.v_intr_cntrl().tpr().into()
705 }
706
707 fn set_cr8(&mut self, value: u32) {
708 self.vmsa.v_intr_cntrl_mut().set_tpr(value as u8);
709 }
710
711 fn set_apic_base(&mut self, _value: u64) {
712 }
714
715 fn wake(&mut self, vp_index: VpIndex) {
716 self.partition.vps[vp_index.index() as usize].wake(self.vtl, WakeReason::INTCON);
717 }
718
719 fn eoi(&mut self, vector: u8) {
720 debug_assert_eq!(self.vtl, GuestVtl::Vtl0);
721 self.dev.handle_eoi(vector.into())
722 }
723
724 fn now(&mut self) -> vmcore::vmtime::VmTime {
725 self.vmtime.now()
726 }
727
728 fn pull_offload(&mut self) -> ([u32; 8], [u32; 8]) {
729 unreachable!()
730 }
731}
732
733impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
734 const TRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
736 Self,
737 [
738 hv1_hypercall::HvModifySparseGpaPageHostVisibility,
739 hv1_hypercall::HvQuerySparseGpaPageHostVisibility,
740 hv1_hypercall::HvX64StartVirtualProcessor,
741 hv1_hypercall::HvGetVpIndexFromApicId,
742 hv1_hypercall::HvGetVpRegisters,
743 hv1_hypercall::HvEnablePartitionVtl,
744 hv1_hypercall::HvRetargetDeviceInterrupt,
745 hv1_hypercall::HvPostMessage,
746 hv1_hypercall::HvSignalEvent,
747 hv1_hypercall::HvX64EnableVpVtl,
748 hv1_hypercall::HvExtQueryCapabilities,
749 hv1_hypercall::HvVtlCall,
750 hv1_hypercall::HvVtlReturn,
751 hv1_hypercall::HvFlushVirtualAddressList,
752 hv1_hypercall::HvFlushVirtualAddressListEx,
753 hv1_hypercall::HvFlushVirtualAddressSpace,
754 hv1_hypercall::HvFlushVirtualAddressSpaceEx,
755 hv1_hypercall::HvSetVpRegisters,
756 hv1_hypercall::HvModifyVtlProtectionMask,
757 hv1_hypercall::HvX64TranslateVirtualAddress,
758 hv1_hypercall::HvSendSyntheticClusterIpi,
759 hv1_hypercall::HvSendSyntheticClusterIpiEx,
760 hv1_hypercall::HvInstallIntercept,
761 hv1_hypercall::HvAssertVirtualInterrupt,
762 ],
763 );
764
765 const UNTRUSTED_DISPATCHER: hv1_hypercall::Dispatcher<Self> = hv1_hypercall::dispatcher!(
768 Self,
769 [hv1_hypercall::HvPostMessage, hv1_hypercall::HvSignalEvent],
770 );
771}
772
773struct GhcbEnlightenedHypercall<'a, 'b, T> {
774 handler: UhHypercallHandler<'a, 'b, T, SnpBacked>,
775 control: u64,
776 output_gpa: u64,
777 input_gpa: u64,
778 result: u64,
779}
780
781impl<'a, 'b, T> hv1_hypercall::AsHandler<UhHypercallHandler<'a, 'b, T, SnpBacked>>
782 for &mut GhcbEnlightenedHypercall<'a, 'b, T>
783{
784 fn as_handler(&mut self) -> &mut UhHypercallHandler<'a, 'b, T, SnpBacked> {
785 &mut self.handler
786 }
787}
788
789impl<T> HypercallIo for GhcbEnlightenedHypercall<'_, '_, T> {
790 fn advance_ip(&mut self) {
791 }
793
794 fn retry(&mut self, control: u64) {
795 let control = Control::from(control);
804 self.set_result(
805 HypercallOutput::from(HvError::Timeout)
806 .with_elements_processed(control.rep_start())
807 .into(),
808 );
809 }
810
811 fn control(&mut self) -> u64 {
812 self.control
813 }
814
815 fn input_gpa(&mut self) -> u64 {
816 self.input_gpa
817 }
818
819 fn output_gpa(&mut self) -> u64 {
820 self.output_gpa
821 }
822
823 fn fast_register_pair_count(&mut self) -> usize {
824 0
825 }
826
827 fn extended_fast_hypercalls_ok(&mut self) -> bool {
828 false
829 }
830
831 fn fast_input(&mut self, _buf: &mut [[u64; 2]], _output_register_pairs: usize) -> usize {
832 unimplemented!("not supported for secure enlightened abi")
833 }
834
835 fn fast_output(&mut self, _starting_pair_index: usize, _buf: &[[u64; 2]]) {
836 unimplemented!("not supported for secure enlightened abi")
837 }
838
839 fn vtl_input(&mut self) -> u64 {
840 unimplemented!("not supported for secure enlightened abi")
841 }
842
843 fn set_result(&mut self, n: u64) {
844 self.result = n;
845 }
846
847 fn fast_regs(&mut self, _starting_pair_index: usize, _buf: &mut [[u64; 2]]) {
848 unimplemented!("not supported for secure enlightened abi")
849 }
850}
851
852impl<'b> ApicBacking<'b, SnpBacked> for UhProcessor<'b, SnpBacked> {
853 fn vp(&mut self) -> &mut UhProcessor<'b, SnpBacked> {
854 self
855 }
856
857 fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8) -> Result<(), UhRunVpError> {
858 let mut vmsa = self.runner.vmsa_mut(vtl);
859 vmsa.v_intr_cntrl_mut().set_vector(vector);
860 vmsa.v_intr_cntrl_mut().set_priority((vector >> 4).into());
861 vmsa.v_intr_cntrl_mut().set_ignore_tpr(false);
862 vmsa.v_intr_cntrl_mut().set_irq(true);
863 self.backing.cvm.lapics[vtl].activity = MpState::Running;
864 Ok(())
865 }
866
867 fn handle_nmi(&mut self, vtl: GuestVtl) -> Result<(), UhRunVpError> {
868 let mut vmsa = self.runner.vmsa_mut(vtl);
872
873 vmsa.set_event_inject(
877 SevEventInjectInfo::new()
878 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_NMI)
879 .with_vector(2)
880 .with_valid(true),
881 );
882 self.backing.cvm.lapics[vtl].nmi_pending = false;
883 self.backing.cvm.lapics[vtl].activity = MpState::Running;
884 Ok(())
885 }
886
887 fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister) -> Result<(), UhRunVpError> {
888 let mut vmsa = self.runner.vmsa_mut(vtl);
889 vmsa.set_cs(virt_seg_to_snp(cs));
890 vmsa.set_rip(0);
891 self.backing.cvm.lapics[vtl].activity = MpState::Running;
892
893 Ok(())
894 }
895}
896
897impl UhProcessor<'_, SnpBacked> {
898 fn handle_synic_deliverable_exit(&mut self) {
899 let message = self
900 .runner
901 .exit_message()
902 .as_message::<hvdef::HvX64SynicSintDeliverableMessage>();
903
904 tracing::trace!(
905 deliverable_sints = message.deliverable_sints,
906 "sint deliverable"
907 );
908
909 self.backing.hv_sint_notifications &= !message.deliverable_sints;
910
911 self.deliver_synic_messages(GuestVtl::Vtl0, message.deliverable_sints);
913 }
914
915 fn handle_vmgexit(
916 &mut self,
917 dev: &impl CpuIo,
918 intercepted_vtl: GuestVtl,
919 ) -> Result<(), UhRunVpError> {
920 let message = self
921 .runner
922 .exit_message()
923 .as_message::<hvdef::HvX64VmgexitInterceptMessage>();
924
925 let ghcb_msr = x86defs::snp::GhcbMsr::from(message.ghcb_msr);
926 tracing::trace!(?ghcb_msr, "vmgexit intercept");
927
928 match x86defs::snp::GhcbInfo(ghcb_msr.info()) {
929 x86defs::snp::GhcbInfo::NORMAL => {
930 assert!(message.flags.ghcb_page_valid());
931 let ghcb_pfn = ghcb_msr.pfn();
932
933 let ghcb_overlay =
934 self.backing.cvm.direct_overlay_handle.pfns()[UhDirectOverlay::Ghcb as usize];
935
936 if ghcb_pfn != ghcb_overlay {
938 tracelimit::warn_ratelimited!(
939 CVM_ALLOWED,
940 vmgexit_pfn = ghcb_pfn,
941 overlay_pfn = ghcb_overlay,
942 "ghcb page used for vmgexit does not match overlay page"
943 );
944
945 return Err(UhRunVpError::EmulationState(
946 hcl::ioctl::Error::InvalidRegisterValue,
947 ));
948 }
949
950 match x86defs::snp::GhcbUsage(message.ghcb_page.ghcb_usage) {
951 x86defs::snp::GhcbUsage::HYPERCALL => {
952 let guest_memory = &self.shared.cvm.shared_memory;
953 let overlay_base = ghcb_overlay * HV_PAGE_SIZE;
956 let x86defs::snp::GhcbHypercallParameters {
957 output_gpa,
958 input_control,
959 } = guest_memory
960 .read_plain(
961 overlay_base
962 + x86defs::snp::GHCB_PAGE_HYPERCALL_PARAMETERS_OFFSET as u64,
963 )
964 .map_err(UhRunVpError::HypercallParameters)?;
965
966 let mut handler = GhcbEnlightenedHypercall {
967 handler: UhHypercallHandler {
968 vp: self,
969 bus: dev,
970 trusted: false,
971 intercepted_vtl,
972 },
973 control: input_control,
974 output_gpa,
975 input_gpa: overlay_base,
976 result: 0,
977 };
978
979 UhHypercallHandler::UNTRUSTED_DISPATCHER
980 .dispatch(guest_memory, &mut handler);
981
982 guest_memory
990 .write_at(
991 overlay_base
992 + x86defs::snp::GHCB_PAGE_HYPERCALL_OUTPUT_OFFSET as u64,
993 handler.result.as_bytes(),
994 )
995 .map_err(UhRunVpError::HypercallResult)?;
996 }
997 usage => unimplemented!("ghcb usage {usage:?}"),
998 }
999 }
1000 info => unimplemented!("ghcb info {info:?}"),
1001 }
1002
1003 Ok(())
1004 }
1005
1006 fn handle_msr_access(
1007 &mut self,
1008 dev: &impl CpuIo,
1009 entered_from_vtl: GuestVtl,
1010 msr: u32,
1011 is_write: bool,
1012 ) {
1013 if is_write && self.cvm_try_protect_msr_write(entered_from_vtl, msr) {
1014 return;
1015 }
1016
1017 let vmsa = self.runner.vmsa_mut(entered_from_vtl);
1018 let gp = if is_write {
1019 let value = (vmsa.rax() as u32 as u64) | ((vmsa.rdx() as u32 as u64) << 32);
1020
1021 let r = self.backing.cvm.lapics[entered_from_vtl]
1022 .lapic
1023 .access(&mut SnpApicClient {
1024 partition: self.partition,
1025 vmsa,
1026 dev,
1027 vmtime: &self.vmtime,
1028 vtl: entered_from_vtl,
1029 })
1030 .msr_write(msr, value)
1031 .or_else_if_unknown(|| self.write_msr_cvm(msr, value, entered_from_vtl))
1032 .or_else_if_unknown(|| self.write_msr_snp(dev, msr, value, entered_from_vtl));
1033
1034 match r {
1035 Ok(()) => false,
1036 Err(MsrError::Unknown) => {
1037 tracing::debug!(msr, value, "unknown cvm msr write");
1038 false
1039 }
1040 Err(MsrError::InvalidAccess) => true,
1041 }
1042 } else {
1043 let r = self.backing.cvm.lapics[entered_from_vtl]
1044 .lapic
1045 .access(&mut SnpApicClient {
1046 partition: self.partition,
1047 vmsa,
1048 dev,
1049 vmtime: &self.vmtime,
1050 vtl: entered_from_vtl,
1051 })
1052 .msr_read(msr)
1053 .or_else_if_unknown(|| self.read_msr_cvm(msr, entered_from_vtl))
1054 .or_else_if_unknown(|| self.read_msr_snp(dev, msr, entered_from_vtl));
1055
1056 let value = match r {
1057 Ok(v) => Some(v),
1058 Err(MsrError::Unknown) => {
1059 tracing::debug!(msr, "unknown cvm msr read");
1060 Some(0)
1061 }
1062 Err(MsrError::InvalidAccess) => None,
1063 };
1064
1065 if let Some(value) = value {
1066 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1067 vmsa.set_rax((value as u32).into());
1068 vmsa.set_rdx(((value >> 32) as u32).into());
1069 false
1070 } else {
1071 true
1072 }
1073 };
1074
1075 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1076 if gp {
1077 vmsa.set_event_inject(
1078 SevEventInjectInfo::new()
1079 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1080 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1081 .with_deliver_error_code(true)
1082 .with_valid(true),
1083 );
1084 } else {
1085 advance_to_next_instruction(&mut vmsa);
1086 }
1087 }
1088
1089 fn handle_xsetbv(&mut self, entered_from_vtl: GuestVtl) {
1090 let vmsa = self.runner.vmsa(entered_from_vtl);
1091 if let Some(value) = hardware_cvm::validate_xsetbv_exit(hardware_cvm::XsetbvExitInput {
1092 rax: vmsa.rax(),
1093 rcx: vmsa.rcx(),
1094 rdx: vmsa.rdx(),
1095 cr4: vmsa.cr4(),
1096 cpl: vmsa.cpl(),
1097 }) {
1098 if !self.cvm_try_protect_secure_register_write(
1099 entered_from_vtl,
1100 HvX64RegisterName::Xfem,
1101 value,
1102 ) {
1103 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1104 vmsa.set_xcr0(value);
1105 advance_to_next_instruction(&mut vmsa);
1106 }
1107 } else {
1108 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1109 vmsa.set_event_inject(
1110 SevEventInjectInfo::new()
1111 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1112 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1113 .with_deliver_error_code(true)
1114 .with_valid(true),
1115 );
1116 }
1117 }
1118
1119 fn handle_crx_intercept(&mut self, entered_from_vtl: GuestVtl, reg: HvX64RegisterName) {
1120 let vmsa = self.runner.vmsa(entered_from_vtl);
1121 let mov_crx_drx = x86defs::snp::MovCrxDrxInfo::from(vmsa.exit_info1());
1122 let reg_value = {
1123 let gpr_name =
1124 HvX64RegisterName(HvX64RegisterName::Rax.0 + mov_crx_drx.gpr_number() as u32);
1125
1126 match gpr_name {
1127 HvX64RegisterName::Rax => vmsa.rax(),
1128 HvX64RegisterName::Rbx => vmsa.rbx(),
1129 HvX64RegisterName::Rcx => vmsa.rcx(),
1130 HvX64RegisterName::Rdx => vmsa.rdx(),
1131 HvX64RegisterName::Rsp => vmsa.rsp(),
1132 HvX64RegisterName::Rbp => vmsa.rbp(),
1133 HvX64RegisterName::Rsi => vmsa.rsi(),
1134 HvX64RegisterName::Rdi => vmsa.rdi(),
1135 HvX64RegisterName::R8 => vmsa.r8(),
1136 HvX64RegisterName::R9 => vmsa.r9(),
1137 HvX64RegisterName::R10 => vmsa.r10(),
1138 HvX64RegisterName::R11 => vmsa.r11(),
1139 HvX64RegisterName::R12 => vmsa.r12(),
1140 HvX64RegisterName::R13 => vmsa.r13(),
1141 HvX64RegisterName::R14 => vmsa.r14(),
1142 HvX64RegisterName::R15 => vmsa.r15(),
1143 _ => unreachable!("unexpected register"),
1144 }
1145 };
1146
1147 if !mov_crx_drx.mov_crx() {
1154 tracelimit::warn_ratelimited!(
1155 CVM_ALLOWED,
1156 "Intercepted crx access, instruction is not mov crx"
1157 );
1158 return;
1159 }
1160
1161 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, reg_value) {
1162 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1163 match reg {
1164 HvX64RegisterName::Cr0 => vmsa.set_cr0(reg_value),
1165 HvX64RegisterName::Cr4 => vmsa.set_cr4(reg_value),
1166 _ => unreachable!(),
1167 }
1168 advance_to_next_instruction(&mut vmsa);
1169 }
1170 }
1171
1172 #[must_use]
1173 fn sync_lazy_eoi(&mut self, vtl: GuestVtl) -> bool {
1174 if self.backing.cvm.lapics[vtl].lapic.is_lazy_eoi_pending() {
1175 return self.backing.cvm.hv[vtl].set_lazy_eoi();
1176 }
1177
1178 false
1179 }
1180
1181 async fn run_vp_snp(&mut self, dev: &impl CpuIo) -> Result<(), VpHaltReason<UhRunVpError>> {
1182 let next_vtl = self.backing.cvm.exit_vtl;
1183
1184 let mut vmsa = self.runner.vmsa_mut(next_vtl);
1185 let last_interrupt_ctrl = vmsa.v_intr_cntrl();
1186
1187 if vmsa.sev_features().alternate_injection() {
1188 vmsa.v_intr_cntrl_mut().set_guest_busy(false);
1189 }
1190
1191 self.unlock_tlb_lock(Vtl::Vtl2);
1192 let tlb_halt = self.should_halt_for_tlb_unlock(next_vtl);
1193
1194 let halt = self.backing.cvm.lapics[next_vtl].activity != MpState::Running || tlb_halt;
1195
1196 if halt && next_vtl == GuestVtl::Vtl1 && !tlb_halt {
1197 tracelimit::warn_ratelimited!(CVM_ALLOWED, "halting VTL 1, which might halt the guest");
1198 }
1199
1200 self.runner.set_halted(halt);
1201
1202 self.runner.set_exit_vtl(next_vtl);
1203
1204 let lazy_eoi = self.sync_lazy_eoi(next_vtl);
1206
1207 let mut has_intercept = self
1208 .runner
1209 .run()
1210 .map_err(|err| VpHaltReason::Hypervisor(UhRunVpError::Run(err)))?;
1211
1212 let entered_from_vtl = next_vtl;
1213 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1214
1215 let inject = if vmsa.sev_features().alternate_injection() {
1217 if vmsa.v_intr_cntrl().guest_busy() {
1218 self.backing.general_stats[entered_from_vtl]
1219 .guest_busy
1220 .increment();
1221 let exit_int_info = SevEventInjectInfo::from(vmsa.exit_int_info());
1229 assert!(
1230 exit_int_info.valid(),
1231 "event inject info should be valid {exit_int_info:x?}"
1232 );
1233
1234 match exit_int_info.interruption_type() {
1235 x86defs::snp::SEV_INTR_TYPE_EXCEPT => {
1236 if exit_int_info.vector() != 3 && exit_int_info.vector() != 4 {
1237 Some(exit_int_info)
1239 } else {
1240 None
1241 }
1242 }
1243 x86defs::snp::SEV_INTR_TYPE_SW => None,
1244 _ => Some(exit_int_info),
1245 }
1246 } else {
1247 None
1248 }
1249 } else {
1250 unimplemented!("Only alternate injection is supported for SNP")
1251 };
1252
1253 if let Some(inject) = inject {
1254 vmsa.set_event_inject(inject);
1255 }
1256 if vmsa.sev_features().alternate_injection() {
1257 vmsa.v_intr_cntrl_mut().set_guest_busy(true);
1258 }
1259
1260 if last_interrupt_ctrl.irq() && !vmsa.v_intr_cntrl().irq() {
1261 self.backing.general_stats[entered_from_vtl]
1262 .int_ack
1263 .increment();
1264 self.backing.cvm.lapics[entered_from_vtl]
1266 .lapic
1267 .acknowledge_interrupt(last_interrupt_ctrl.vector());
1268 }
1269
1270 vmsa.v_intr_cntrl_mut().set_irq(false);
1271
1272 if lazy_eoi && self.backing.cvm.hv[entered_from_vtl].clear_lazy_eoi() {
1274 self.backing.cvm.lapics[entered_from_vtl]
1275 .lapic
1276 .access(&mut SnpApicClient {
1277 partition: self.partition,
1278 vmsa,
1279 dev,
1280 vmtime: &self.vmtime,
1281 vtl: entered_from_vtl,
1282 })
1283 .lazy_eoi();
1284 }
1285
1286 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1287 let sev_error_code = SevExitCode(vmsa.guest_error_code());
1288
1289 let stat = match sev_error_code {
1290 SevExitCode::CPUID => {
1291 let leaf = vmsa.rax() as u32;
1292 let subleaf = vmsa.rcx() as u32;
1293 let [eax, ebx, ecx, edx] = self.cvm_cpuid_result(entered_from_vtl, leaf, subleaf);
1294 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1295 vmsa.set_rax(eax.into());
1296 vmsa.set_rbx(ebx.into());
1297 vmsa.set_rcx(ecx.into());
1298 vmsa.set_rdx(edx.into());
1299 advance_to_next_instruction(&mut vmsa);
1300 &mut self.backing.exit_stats[entered_from_vtl].cpuid
1301 }
1302
1303 SevExitCode::MSR => {
1304 let is_write = vmsa.exit_info1() & 1 != 0;
1305 let msr = vmsa.rcx() as u32;
1306
1307 self.handle_msr_access(dev, entered_from_vtl, msr, is_write);
1308
1309 if is_write {
1310 &mut self.backing.exit_stats[entered_from_vtl].msr_write
1311 } else {
1312 &mut self.backing.exit_stats[entered_from_vtl].msr_read
1313 }
1314 }
1315
1316 SevExitCode::IOIO => {
1317 let io_info =
1318 SevIoAccessInfo::from(self.runner.vmsa(entered_from_vtl).exit_info1() as u32);
1319
1320 let access_size = if io_info.access_size32() {
1321 4
1322 } else if io_info.access_size16() {
1323 2
1324 } else {
1325 1
1326 };
1327
1328 let port_access_protected = self.cvm_try_protect_io_port_access(
1329 entered_from_vtl,
1330 io_info.port(),
1331 io_info.read_access(),
1332 access_size,
1333 io_info.string_access(),
1334 io_info.rep_access(),
1335 );
1336
1337 let vmsa = self.runner.vmsa(entered_from_vtl);
1338 if !port_access_protected {
1339 if io_info.string_access() || io_info.rep_access() {
1340 let interruption_pending = vmsa.event_inject().valid()
1341 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1342
1343 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1348 .await?;
1349 } else {
1350 let mut rax = vmsa.rax();
1351 emulate_io(
1352 self.inner.vp_info.base.vp_index,
1353 !io_info.read_access(),
1354 io_info.port(),
1355 &mut rax,
1356 access_size,
1357 dev,
1358 )
1359 .await;
1360
1361 let mut vmsa = self.runner.vmsa_mut(entered_from_vtl);
1362 vmsa.set_rax(rax);
1363 advance_to_next_instruction(&mut vmsa);
1364 }
1365 }
1366 &mut self.backing.exit_stats[entered_from_vtl].ioio
1367 }
1368
1369 SevExitCode::VMMCALL => {
1370 let is_64bit = self.long_mode(entered_from_vtl);
1371 let guest_memory = &self.partition.gm[entered_from_vtl];
1372 let handler = UhHypercallHandler {
1373 trusted: !self.cvm_partition().hide_isolation,
1374 vp: &mut *self,
1375 bus: dev,
1376 intercepted_vtl: entered_from_vtl,
1377 };
1378
1379 UhHypercallHandler::TRUSTED_DISPATCHER.dispatch(
1382 guest_memory,
1383 hv1_hypercall::X64RegisterIo::new(handler, is_64bit),
1384 );
1385 &mut self.backing.exit_stats[entered_from_vtl].vmmcall
1386 }
1387
1388 SevExitCode::SHUTDOWN => {
1389 return Err(VpHaltReason::TripleFault {
1390 vtl: entered_from_vtl.into(),
1391 });
1392 }
1393
1394 SevExitCode::WBINVD | SevExitCode::INVD => {
1395 advance_to_next_instruction(&mut vmsa);
1399 &mut self.backing.exit_stats[entered_from_vtl].invd
1400 }
1401
1402 SevExitCode::NPF if has_intercept => {
1403 let exit_info2 = vmsa.exit_info2();
1422 let interruption_pending = vmsa.event_inject().valid()
1423 || SevEventInjectInfo::from(vmsa.exit_int_info()).valid();
1424 let exit_message = self.runner.exit_message();
1425 let emulate = match exit_message.header.typ {
1426 HvMessageType::HvMessageTypeExceptionIntercept => {
1427 let exception_message =
1428 exit_message.as_message::<hvdef::HvX64ExceptionInterceptMessage>();
1429
1430 exception_message.vector
1431 == x86defs::Exception::SEV_VMM_COMMUNICATION.0 as u16
1432 }
1433 HvMessageType::HvMessageTypeUnmappedGpa
1434 | HvMessageType::HvMessageTypeGpaIntercept
1435 | HvMessageType::HvMessageTypeUnacceptedGpa => {
1436 let gpa_message =
1440 exit_message.as_message::<hvdef::HvX64MemoryInterceptMessage>();
1441
1442 (gpa_message.guest_physical_address >> hvdef::HV_PAGE_SHIFT)
1444 == (exit_info2 >> hvdef::HV_PAGE_SHIFT)
1445 }
1446 _ => false,
1447 };
1448
1449 if emulate {
1450 has_intercept = false;
1451 self.emulate(dev, interruption_pending, entered_from_vtl, ())
1452 .await?;
1453 &mut self.backing.exit_stats[entered_from_vtl].npf
1454 } else {
1455 &mut self.backing.exit_stats[entered_from_vtl].npf_spurious
1456 }
1457 }
1458
1459 SevExitCode::NPF => &mut self.backing.exit_stats[entered_from_vtl].npf_no_intercept,
1460
1461 SevExitCode::HLT => {
1462 self.backing.cvm.lapics[entered_from_vtl].activity = MpState::Halted;
1463 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
1465 &mut self.backing.exit_stats[entered_from_vtl].hlt
1466 }
1467
1468 SevExitCode::INVALID_VMCB => {
1469 return Err(VpHaltReason::InvalidVmState(UhRunVpError::InvalidVmcb));
1470 }
1471
1472 SevExitCode::INVLPGB | SevExitCode::ILLEGAL_INVLPGB => {
1473 vmsa.set_event_inject(
1474 SevEventInjectInfo::new()
1475 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1476 .with_vector(x86defs::Exception::INVALID_OPCODE.0)
1477 .with_valid(true),
1478 );
1479 &mut self.backing.exit_stats[entered_from_vtl].invlpgb
1480 }
1481
1482 SevExitCode::RDPMC => {
1483 let cr4 = vmsa.cr4();
1486 if ((vmsa.cpl() > 0) && (cr4 & x86defs::X64_CR4_PCE == 0))
1487 || (vmsa.rcx() as u32 >= 4)
1488 {
1489 vmsa.set_event_inject(
1490 SevEventInjectInfo::new()
1491 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
1492 .with_vector(x86defs::Exception::GENERAL_PROTECTION_FAULT.0)
1493 .with_deliver_error_code(true)
1494 .with_valid(true),
1495 );
1496 } else {
1497 vmsa.set_rax(0);
1498 vmsa.set_rdx(0);
1499 advance_to_next_instruction(&mut vmsa);
1500 }
1501 &mut self.backing.exit_stats[entered_from_vtl].rdpmc
1502 }
1503
1504 SevExitCode::VMGEXIT if has_intercept => {
1505 has_intercept = false;
1506 match self.runner.exit_message().header.typ {
1507 HvMessageType::HvMessageTypeX64SevVmgexitIntercept => {
1508 self.handle_vmgexit(dev, entered_from_vtl)
1509 .map_err(VpHaltReason::InvalidVmState)?;
1510 }
1511 _ => has_intercept = true,
1512 }
1513 &mut self.backing.exit_stats[entered_from_vtl].vmgexit
1514 }
1515
1516 SevExitCode::NMI
1517 | SevExitCode::PAUSE
1518 | SevExitCode::SMI
1519 | SevExitCode::VMGEXIT
1520 | SevExitCode::BUSLOCK
1521 | SevExitCode::IDLE_HLT => {
1522 &mut self.backing.exit_stats[entered_from_vtl].automatic_exit
1524 }
1525
1526 SevExitCode::VINTR => {
1527 unimplemented!("SevExitCode::VINTR");
1533 }
1534
1535 SevExitCode::INTR => {
1536 &mut self.backing.exit_stats[entered_from_vtl].intr
1539 }
1540
1541 SevExitCode::XSETBV => {
1542 self.handle_xsetbv(entered_from_vtl);
1543 &mut self.backing.exit_stats[entered_from_vtl].xsetbv
1544 }
1545
1546 SevExitCode::EXCP_DB => &mut self.backing.exit_stats[entered_from_vtl].excp_db,
1547
1548 SevExitCode::CR0_WRITE => {
1549 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr0);
1550 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1551 }
1552 SevExitCode::CR4_WRITE => {
1553 self.handle_crx_intercept(entered_from_vtl, HvX64RegisterName::Cr4);
1554 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1555 }
1556
1557 tr_exit_code @ (SevExitCode::GDTR_WRITE
1558 | SevExitCode::IDTR_WRITE
1559 | SevExitCode::LDTR_WRITE
1560 | SevExitCode::TR_WRITE) => {
1561 let reg = match tr_exit_code {
1562 SevExitCode::GDTR_WRITE => HvX64RegisterName::Gdtr,
1563 SevExitCode::IDTR_WRITE => HvX64RegisterName::Idtr,
1564 SevExitCode::LDTR_WRITE => HvX64RegisterName::Ldtr,
1565 SevExitCode::TR_WRITE => HvX64RegisterName::Tr,
1566 _ => unreachable!(),
1567 };
1568
1569 if !self.cvm_try_protect_secure_register_write(entered_from_vtl, reg, 0) {
1570 panic!("unexpected secure register");
1577 }
1578
1579 &mut self.backing.exit_stats[entered_from_vtl].secure_reg_write
1580 }
1581
1582 _ => {
1583 tracing::error!(
1584 CVM_CONFIDENTIAL,
1585 "SEV exit code {sev_error_code:x?} sev features {:x?} v_intr_control {:x?} event inject {:x?} \
1586 vmpl {:x?} cpl {:x?} exit_info1 {:x?} exit_info2 {:x?} exit_int_info {:x?} virtual_tom {:x?} \
1587 efer {:x?} cr4 {:x?} cr3 {:x?} cr0 {:x?} rflag {:x?} rip {:x?} next rip {:x?}",
1588 vmsa.sev_features(),
1589 vmsa.v_intr_cntrl(),
1590 vmsa.event_inject(),
1591 vmsa.vmpl(),
1592 vmsa.cpl(),
1593 vmsa.exit_info1(),
1594 vmsa.exit_info2(),
1595 vmsa.exit_int_info(),
1596 vmsa.virtual_tom(),
1597 vmsa.efer(),
1598 vmsa.cr4(),
1599 vmsa.cr3(),
1600 vmsa.cr0(),
1601 vmsa.rflags(),
1602 vmsa.rip(),
1603 vmsa.next_rip(),
1604 );
1605 panic!("Received unexpected SEV exit code {sev_error_code:x?}");
1606 }
1607 };
1608 stat.increment();
1609
1610 if cfg!(feature = "gdb") && sev_error_code == SevExitCode::EXCP_DB {
1612 return self.handle_debug_exception(entered_from_vtl);
1613 }
1614
1615 if has_intercept {
1619 self.backing.general_stats[entered_from_vtl]
1620 .synth_int
1621 .increment();
1622 match self.runner.exit_message().header.typ {
1623 HvMessageType::HvMessageTypeSynicSintDeliverable => {
1624 self.handle_synic_deliverable_exit();
1625 }
1626 HvMessageType::HvMessageTypeX64Halt
1627 | HvMessageType::HvMessageTypeExceptionIntercept => {
1628 }
1632 message_type => {
1633 tracelimit::error_ratelimited!(
1634 CVM_ALLOWED,
1635 ?message_type,
1636 "unknown synthetic exit"
1637 );
1638 }
1639 }
1640 }
1641
1642 self.runner
1651 .vmsa_mut(entered_from_vtl)
1652 .set_guest_error_code(SevExitCode::INTR.0);
1653 Ok(())
1654 }
1655
1656 fn long_mode(&self, vtl: GuestVtl) -> bool {
1657 let vmsa = self.runner.vmsa(vtl);
1658 vmsa.cr0() & x86defs::X64_CR0_PE != 0 && vmsa.efer() & x86defs::X64_EFER_LMA != 0
1659 }
1660}
1661
1662impl<T: CpuIo> X86EmulatorSupport for UhEmulationState<'_, '_, T, SnpBacked> {
1663 type Error = UhRunVpError;
1664
1665 fn flush(&mut self) -> Result<(), Self::Error> {
1666 Ok(())
1667 }
1669
1670 fn vp_index(&self) -> VpIndex {
1671 self.vp.vp_index()
1672 }
1673
1674 fn vendor(&self) -> x86defs::cpuid::Vendor {
1675 self.vp.partition.caps.vendor
1676 }
1677
1678 fn gp(&mut self, reg: x86emu::Gp) -> u64 {
1679 let vmsa = self.vp.runner.vmsa(self.vtl);
1680 match reg {
1681 x86emu::Gp::RAX => vmsa.rax(),
1682 x86emu::Gp::RCX => vmsa.rcx(),
1683 x86emu::Gp::RDX => vmsa.rdx(),
1684 x86emu::Gp::RBX => vmsa.rbx(),
1685 x86emu::Gp::RSP => vmsa.rsp(),
1686 x86emu::Gp::RBP => vmsa.rbp(),
1687 x86emu::Gp::RSI => vmsa.rsi(),
1688 x86emu::Gp::RDI => vmsa.rdi(),
1689 x86emu::Gp::R8 => vmsa.r8(),
1690 x86emu::Gp::R9 => vmsa.r9(),
1691 x86emu::Gp::R10 => vmsa.r10(),
1692 x86emu::Gp::R11 => vmsa.r11(),
1693 x86emu::Gp::R12 => vmsa.r12(),
1694 x86emu::Gp::R13 => vmsa.r13(),
1695 x86emu::Gp::R14 => vmsa.r14(),
1696 x86emu::Gp::R15 => vmsa.r15(),
1697 }
1698 }
1699
1700 fn set_gp(&mut self, reg: x86emu::Gp, v: u64) {
1701 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1702 match reg {
1703 x86emu::Gp::RAX => vmsa.set_rax(v),
1704 x86emu::Gp::RCX => vmsa.set_rcx(v),
1705 x86emu::Gp::RDX => vmsa.set_rdx(v),
1706 x86emu::Gp::RBX => vmsa.set_rbx(v),
1707 x86emu::Gp::RSP => vmsa.set_rsp(v),
1708 x86emu::Gp::RBP => vmsa.set_rbp(v),
1709 x86emu::Gp::RSI => vmsa.set_rsi(v),
1710 x86emu::Gp::RDI => vmsa.set_rdi(v),
1711 x86emu::Gp::R8 => vmsa.set_r8(v),
1712 x86emu::Gp::R9 => vmsa.set_r9(v),
1713 x86emu::Gp::R10 => vmsa.set_r10(v),
1714 x86emu::Gp::R11 => vmsa.set_r11(v),
1715 x86emu::Gp::R12 => vmsa.set_r12(v),
1716 x86emu::Gp::R13 => vmsa.set_r13(v),
1717 x86emu::Gp::R14 => vmsa.set_r14(v),
1718 x86emu::Gp::R15 => vmsa.set_r15(v),
1719 };
1720 }
1721
1722 fn xmm(&mut self, index: usize) -> u128 {
1723 self.vp.runner.vmsa_mut(self.vtl).xmm_registers(index)
1724 }
1725
1726 fn set_xmm(&mut self, index: usize, v: u128) -> Result<(), Self::Error> {
1727 self.vp
1728 .runner
1729 .vmsa_mut(self.vtl)
1730 .set_xmm_registers(index, v);
1731 Ok(())
1732 }
1733
1734 fn rip(&mut self) -> u64 {
1735 let vmsa = self.vp.runner.vmsa(self.vtl);
1736 vmsa.rip()
1737 }
1738
1739 fn set_rip(&mut self, v: u64) {
1740 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1741 vmsa.set_rip(v);
1742 }
1743
1744 fn segment(&mut self, index: x86emu::Segment) -> x86defs::SegmentRegister {
1745 let vmsa = self.vp.runner.vmsa(self.vtl);
1746 match index {
1747 x86emu::Segment::ES => virt_seg_from_snp(vmsa.es()),
1748 x86emu::Segment::CS => virt_seg_from_snp(vmsa.cs()),
1749 x86emu::Segment::SS => virt_seg_from_snp(vmsa.ss()),
1750 x86emu::Segment::DS => virt_seg_from_snp(vmsa.ds()),
1751 x86emu::Segment::FS => virt_seg_from_snp(vmsa.fs()),
1752 x86emu::Segment::GS => virt_seg_from_snp(vmsa.gs()),
1753 }
1754 .into()
1755 }
1756
1757 fn efer(&mut self) -> u64 {
1758 let vmsa = self.vp.runner.vmsa(self.vtl);
1759 vmsa.efer()
1760 }
1761
1762 fn cr0(&mut self) -> u64 {
1763 let vmsa = self.vp.runner.vmsa(self.vtl);
1764 vmsa.cr0()
1765 }
1766
1767 fn rflags(&mut self) -> RFlags {
1768 let vmsa = self.vp.runner.vmsa(self.vtl);
1769 vmsa.rflags().into()
1770 }
1771
1772 fn set_rflags(&mut self, v: RFlags) {
1773 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1774 vmsa.set_rflags(v.into());
1775 }
1776
1777 fn instruction_bytes(&self) -> &[u8] {
1778 &[]
1779 }
1780
1781 fn physical_address(&self) -> Option<u64> {
1782 Some(self.vp.runner.vmsa(self.vtl).exit_info2())
1783 }
1784
1785 fn initial_gva_translation(
1786 &mut self,
1787 ) -> Option<virt_support_x86emu::emulate::InitialTranslation> {
1788 None
1789 }
1790
1791 fn interruption_pending(&self) -> bool {
1792 self.interruption_pending
1793 }
1794
1795 fn check_vtl_access(
1796 &mut self,
1797 _gpa: u64,
1798 _mode: virt_support_x86emu::emulate::TranslateMode,
1799 ) -> Result<(), virt_support_x86emu::emulate::EmuCheckVtlAccessError<Self::Error>> {
1800 Ok(())
1802 }
1803
1804 fn translate_gva(
1805 &mut self,
1806 gva: u64,
1807 mode: virt_support_x86emu::emulate::TranslateMode,
1808 ) -> Result<
1809 Result<
1810 virt_support_x86emu::emulate::EmuTranslateResult,
1811 virt_support_x86emu::emulate::EmuTranslateError,
1812 >,
1813 Self::Error,
1814 > {
1815 emulate_translate_gva(self, gva, mode)
1816 }
1817
1818 fn inject_pending_event(&mut self, event_info: hvdef::HvX64PendingEvent) {
1819 assert!(event_info.reg_0.event_pending());
1820 assert_eq!(
1821 event_info.reg_0.event_type(),
1822 hvdef::HV_X64_PENDING_EVENT_EXCEPTION
1823 );
1824
1825 let exception = HvX64PendingExceptionEvent::from(event_info.reg_0.into_bits());
1826 assert!(!self.interruption_pending);
1827
1828 SnpBacked::set_pending_exception(self.vp, self.vtl, exception);
1831 }
1832
1833 fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool {
1834 let vtom = self.vp.partition.caps.vtom.unwrap();
1837 debug_assert!(vtom == 0 || vtom.is_power_of_two());
1838 self.vp.partition.is_gpa_mapped(gpa & !vtom, write)
1839 }
1840
1841 fn lapic_base_address(&self) -> Option<u64> {
1842 self.vp.backing.cvm.lapics[self.vtl].lapic.base_address()
1843 }
1844
1845 fn lapic_read(&mut self, address: u64, data: &mut [u8]) {
1846 let vtl = self.vtl;
1847 self.vp.backing.cvm.lapics[vtl]
1848 .lapic
1849 .access(&mut SnpApicClient {
1850 partition: self.vp.partition,
1851 vmsa: self.vp.runner.vmsa_mut(vtl),
1852 dev: self.devices,
1853 vmtime: &self.vp.vmtime,
1854 vtl,
1855 })
1856 .mmio_read(address, data);
1857 }
1858
1859 fn lapic_write(&mut self, address: u64, data: &[u8]) {
1860 let vtl = self.vtl;
1861 self.vp.backing.cvm.lapics[vtl]
1862 .lapic
1863 .access(&mut SnpApicClient {
1864 partition: self.vp.partition,
1865 vmsa: self.vp.runner.vmsa_mut(vtl),
1866 dev: self.devices,
1867 vmtime: &self.vp.vmtime,
1868 vtl,
1869 })
1870 .mmio_write(address, data);
1871 }
1872}
1873
1874impl<T> hv1_hypercall::X64RegisterState for UhHypercallHandler<'_, '_, T, SnpBacked> {
1875 fn rip(&mut self) -> u64 {
1876 self.vp.runner.vmsa(self.intercepted_vtl).rip()
1877 }
1878
1879 fn set_rip(&mut self, rip: u64) {
1880 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_rip(rip);
1881 }
1882
1883 fn gp(&mut self, n: hv1_hypercall::X64HypercallRegister) -> u64 {
1884 let vmsa = self.vp.runner.vmsa(self.intercepted_vtl);
1885 match n {
1886 hv1_hypercall::X64HypercallRegister::Rax => vmsa.rax(),
1887 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.rcx(),
1888 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.rdx(),
1889 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.rbx(),
1890 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.rsi(),
1891 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.rdi(),
1892 hv1_hypercall::X64HypercallRegister::R8 => vmsa.r8(),
1893 }
1894 }
1895
1896 fn set_gp(&mut self, n: hv1_hypercall::X64HypercallRegister, value: u64) {
1897 let mut vmsa = self.vp.runner.vmsa_mut(self.intercepted_vtl);
1898 match n {
1899 hv1_hypercall::X64HypercallRegister::Rax => vmsa.set_rax(value),
1900 hv1_hypercall::X64HypercallRegister::Rcx => vmsa.set_rcx(value),
1901 hv1_hypercall::X64HypercallRegister::Rdx => vmsa.set_rdx(value),
1902 hv1_hypercall::X64HypercallRegister::Rbx => vmsa.set_rbx(value),
1903 hv1_hypercall::X64HypercallRegister::Rsi => vmsa.set_rsi(value),
1904 hv1_hypercall::X64HypercallRegister::Rdi => vmsa.set_rdi(value),
1905 hv1_hypercall::X64HypercallRegister::R8 => vmsa.set_r8(value),
1906 }
1907 }
1908
1909 fn xmm(&mut self, n: usize) -> u128 {
1910 self.vp.runner.vmsa(self.intercepted_vtl).xmm_registers(n)
1911 }
1912
1913 fn set_xmm(&mut self, n: usize, value: u128) {
1914 self.vp
1915 .runner
1916 .vmsa_mut(self.intercepted_vtl)
1917 .set_xmm_registers(n, value);
1918 }
1919}
1920
1921impl AccessVpState for UhVpStateAccess<'_, '_, SnpBacked> {
1922 type Error = vp_state::Error;
1923
1924 fn caps(&self) -> &virt::x86::X86PartitionCapabilities {
1925 &self.vp.partition.caps
1926 }
1927
1928 fn commit(&mut self) -> Result<(), Self::Error> {
1929 Ok(())
1930 }
1931
1932 fn registers(&mut self) -> Result<vp::Registers, Self::Error> {
1933 let vmsa = self.vp.runner.vmsa(self.vtl);
1934
1935 Ok(vp::Registers {
1936 rax: vmsa.rax(),
1937 rcx: vmsa.rcx(),
1938 rdx: vmsa.rdx(),
1939 rbx: vmsa.rbx(),
1940 rsp: vmsa.rsp(),
1941 rbp: vmsa.rbp(),
1942 rsi: vmsa.rsi(),
1943 rdi: vmsa.rdi(),
1944 r8: vmsa.r8(),
1945 r9: vmsa.r9(),
1946 r10: vmsa.r10(),
1947 r11: vmsa.r11(),
1948 r12: vmsa.r12(),
1949 r13: vmsa.r13(),
1950 r14: vmsa.r14(),
1951 r15: vmsa.r15(),
1952 rip: vmsa.rip(),
1953 rflags: vmsa.rflags(),
1954 cs: virt_seg_from_snp(vmsa.cs()),
1955 ds: virt_seg_from_snp(vmsa.ds()),
1956 es: virt_seg_from_snp(vmsa.es()),
1957 fs: virt_seg_from_snp(vmsa.fs()),
1958 gs: virt_seg_from_snp(vmsa.gs()),
1959 ss: virt_seg_from_snp(vmsa.ss()),
1960 tr: virt_seg_from_snp(vmsa.tr()),
1961 ldtr: virt_seg_from_snp(vmsa.ldtr()),
1962 gdtr: virt_table_from_snp(vmsa.gdtr()),
1963 idtr: virt_table_from_snp(vmsa.idtr()),
1964 cr0: vmsa.cr0(),
1965 cr2: vmsa.cr2(),
1966 cr3: vmsa.cr3(),
1967 cr4: vmsa.cr4(),
1968 cr8: vmsa.v_intr_cntrl().tpr().into(),
1969 efer: vmsa.efer(),
1970 })
1971 }
1972
1973 fn set_registers(&mut self, value: &vp::Registers) -> Result<(), Self::Error> {
1974 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
1975
1976 let vp::Registers {
1977 rax,
1978 rcx,
1979 rdx,
1980 rbx,
1981 rsp,
1982 rbp,
1983 rsi,
1984 rdi,
1985 r8,
1986 r9,
1987 r10,
1988 r11,
1989 r12,
1990 r13,
1991 r14,
1992 r15,
1993 rip,
1994 rflags,
1995 cs,
1996 ds,
1997 es,
1998 fs,
1999 gs,
2000 ss,
2001 tr,
2002 ldtr,
2003 gdtr,
2004 idtr,
2005 cr0,
2006 cr2,
2007 cr3,
2008 cr4,
2009 cr8,
2010 efer,
2011 } = *value;
2012 vmsa.set_rax(rax);
2013 vmsa.set_rcx(rcx);
2014 vmsa.set_rdx(rdx);
2015 vmsa.set_rbx(rbx);
2016 vmsa.set_rsp(rsp);
2017 vmsa.set_rbp(rbp);
2018 vmsa.set_rsi(rsi);
2019 vmsa.set_rdi(rdi);
2020 vmsa.set_r8(r8);
2021 vmsa.set_r9(r9);
2022 vmsa.set_r10(r10);
2023 vmsa.set_r11(r11);
2024 vmsa.set_r12(r12);
2025 vmsa.set_r13(r13);
2026 vmsa.set_r14(r14);
2027 vmsa.set_r15(r15);
2028 vmsa.set_rip(rip);
2029 vmsa.set_rflags(rflags);
2030 vmsa.set_cs(virt_seg_to_snp(cs));
2031 vmsa.set_ds(virt_seg_to_snp(ds));
2032 vmsa.set_es(virt_seg_to_snp(es));
2033 vmsa.set_fs(virt_seg_to_snp(fs));
2034 vmsa.set_gs(virt_seg_to_snp(gs));
2035 vmsa.set_ss(virt_seg_to_snp(ss));
2036 vmsa.set_tr(virt_seg_to_snp(tr));
2037 vmsa.set_ldtr(virt_seg_to_snp(ldtr));
2038 vmsa.set_gdtr(virt_table_to_snp(gdtr));
2039 vmsa.set_idtr(virt_table_to_snp(idtr));
2040 vmsa.set_cr0(cr0);
2041 vmsa.set_cr2(cr2);
2042 vmsa.set_cr3(cr3);
2043 vmsa.set_cr4(cr4);
2044 vmsa.v_intr_cntrl_mut().set_tpr(cr8 as u8);
2045 vmsa.set_efer(SnpBacked::calculate_efer(efer, cr0));
2046 Ok(())
2047 }
2048
2049 fn activity(&mut self) -> Result<vp::Activity, Self::Error> {
2050 let lapic = &self.vp.backing.cvm.lapics[self.vtl];
2051
2052 Ok(vp::Activity {
2053 mp_state: lapic.activity,
2054 nmi_pending: lapic.nmi_pending,
2055 nmi_masked: false, interrupt_shadow: false, pending_event: None, pending_interruption: None, })
2060 }
2061
2062 fn set_activity(&mut self, value: &vp::Activity) -> Result<(), Self::Error> {
2063 let &vp::Activity {
2064 mp_state,
2065 nmi_pending,
2066 nmi_masked: _, interrupt_shadow: _, pending_event: _, pending_interruption: _, } = value;
2071 let lapic = &mut self.vp.backing.cvm.lapics[self.vtl];
2072 lapic.activity = mp_state;
2073 lapic.nmi_pending = nmi_pending;
2074
2075 Ok(())
2076 }
2077
2078 fn xsave(&mut self) -> Result<vp::Xsave, Self::Error> {
2079 Err(vp_state::Error::Unimplemented("xsave"))
2080 }
2081
2082 fn set_xsave(&mut self, _value: &vp::Xsave) -> Result<(), Self::Error> {
2083 Err(vp_state::Error::Unimplemented("xsave"))
2084 }
2085
2086 fn apic(&mut self) -> Result<vp::Apic, Self::Error> {
2087 Ok(self.vp.backing.cvm.lapics[self.vtl].lapic.save())
2088 }
2089
2090 fn set_apic(&mut self, value: &vp::Apic) -> Result<(), Self::Error> {
2091 self.vp.backing.cvm.lapics[self.vtl]
2092 .lapic
2093 .restore(value)
2094 .map_err(vp_state::Error::InvalidApicBase)?;
2095 Ok(())
2096 }
2097
2098 fn xcr(&mut self) -> Result<vp::Xcr0, Self::Error> {
2099 let vmsa = self.vp.runner.vmsa(self.vtl);
2100 Ok(vp::Xcr0 { value: vmsa.xcr0() })
2101 }
2102
2103 fn set_xcr(&mut self, value: &vp::Xcr0) -> Result<(), Self::Error> {
2104 let vp::Xcr0 { value } = *value;
2105 self.vp.runner.vmsa_mut(self.vtl).set_xcr0(value);
2106 Ok(())
2107 }
2108
2109 fn xss(&mut self) -> Result<vp::Xss, Self::Error> {
2110 let vmsa = self.vp.runner.vmsa(self.vtl);
2111 Ok(vp::Xss { value: vmsa.xss() })
2112 }
2113
2114 fn set_xss(&mut self, value: &vp::Xss) -> Result<(), Self::Error> {
2115 let vp::Xss { value } = *value;
2116 self.vp.runner.vmsa_mut(self.vtl).set_xss(value);
2117 Ok(())
2118 }
2119
2120 fn mtrrs(&mut self) -> Result<vp::Mtrrs, Self::Error> {
2121 Ok(vp::Mtrrs {
2122 msr_mtrr_def_type: 0,
2123 fixed: [0; 11],
2124 variable: [0; 16],
2125 })
2126 }
2127
2128 fn set_mtrrs(&mut self, _value: &vp::Mtrrs) -> Result<(), Self::Error> {
2129 Ok(())
2130 }
2131
2132 fn pat(&mut self) -> Result<vp::Pat, Self::Error> {
2133 let vmsa = self.vp.runner.vmsa(self.vtl);
2134 Ok(vp::Pat { value: vmsa.pat() })
2135 }
2136
2137 fn set_pat(&mut self, value: &vp::Pat) -> Result<(), Self::Error> {
2138 let vp::Pat { value } = *value;
2139 self.vp.runner.vmsa_mut(self.vtl).set_pat(value);
2140 Ok(())
2141 }
2142
2143 fn virtual_msrs(&mut self) -> Result<vp::VirtualMsrs, Self::Error> {
2144 let vmsa = self.vp.runner.vmsa(self.vtl);
2145
2146 Ok(vp::VirtualMsrs {
2147 kernel_gs_base: vmsa.kernel_gs_base(),
2148 sysenter_cs: vmsa.sysenter_cs(),
2149 sysenter_eip: vmsa.sysenter_eip(),
2150 sysenter_esp: vmsa.sysenter_esp(),
2151 star: vmsa.star(),
2152 lstar: vmsa.lstar(),
2153 cstar: vmsa.cstar(),
2154 sfmask: vmsa.sfmask(),
2155 })
2156 }
2157
2158 fn set_virtual_msrs(&mut self, value: &vp::VirtualMsrs) -> Result<(), Self::Error> {
2159 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2160 let vp::VirtualMsrs {
2161 kernel_gs_base,
2162 sysenter_cs,
2163 sysenter_eip,
2164 sysenter_esp,
2165 star,
2166 lstar,
2167 cstar,
2168 sfmask,
2169 } = *value;
2170 vmsa.set_kernel_gs_base(kernel_gs_base);
2171 vmsa.set_sysenter_cs(sysenter_cs);
2172 vmsa.set_sysenter_eip(sysenter_eip);
2173 vmsa.set_sysenter_esp(sysenter_esp);
2174 vmsa.set_star(star);
2175 vmsa.set_lstar(lstar);
2176 vmsa.set_cstar(cstar);
2177 vmsa.set_sfmask(sfmask);
2178
2179 Ok(())
2180 }
2181
2182 fn debug_regs(&mut self) -> Result<vp::DebugRegisters, Self::Error> {
2183 let vmsa = self.vp.runner.vmsa(self.vtl);
2184 Ok(vp::DebugRegisters {
2185 dr0: vmsa.dr0(),
2186 dr1: vmsa.dr1(),
2187 dr2: vmsa.dr2(),
2188 dr3: vmsa.dr3(),
2189 dr6: vmsa.dr6(),
2190 dr7: vmsa.dr7(),
2191 })
2192 }
2193
2194 fn set_debug_regs(&mut self, value: &vp::DebugRegisters) -> Result<(), Self::Error> {
2195 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2196 let vp::DebugRegisters {
2197 dr0,
2198 dr1,
2199 dr2,
2200 dr3,
2201 dr6,
2202 dr7,
2203 } = *value;
2204 vmsa.set_dr0(dr0);
2205 vmsa.set_dr1(dr1);
2206 vmsa.set_dr2(dr2);
2207 vmsa.set_dr3(dr3);
2208 vmsa.set_dr6(dr6);
2209 vmsa.set_dr7(dr7);
2210 Ok(())
2211 }
2212
2213 fn tsc(&mut self) -> Result<vp::Tsc, Self::Error> {
2214 Err(vp_state::Error::Unimplemented("tsc"))
2215 }
2216
2217 fn set_tsc(&mut self, _value: &vp::Tsc) -> Result<(), Self::Error> {
2218 Err(vp_state::Error::Unimplemented("tsc"))
2219 }
2220
2221 fn tsc_aux(&mut self) -> Result<vp::TscAux, Self::Error> {
2222 let vmsa = self.vp.runner.vmsa(self.vtl);
2223 Ok(vp::TscAux {
2224 value: vmsa.tsc_aux() as u64,
2225 })
2226 }
2227
2228 fn set_tsc_aux(&mut self, value: &vp::TscAux) -> Result<(), Self::Error> {
2229 let vp::TscAux { value } = *value;
2230 self.vp.runner.vmsa_mut(self.vtl).set_tsc_aux(value as u32);
2231 Ok(())
2232 }
2233
2234 fn cet(&mut self) -> Result<vp::Cet, Self::Error> {
2235 let vmsa = self.vp.runner.vmsa(self.vtl);
2236 Ok(vp::Cet { scet: vmsa.s_cet() })
2237 }
2238
2239 fn set_cet(&mut self, value: &vp::Cet) -> Result<(), Self::Error> {
2240 let vp::Cet { scet } = *value;
2241 self.vp.runner.vmsa_mut(self.vtl).set_s_cet(scet);
2242 Ok(())
2243 }
2244
2245 fn cet_ss(&mut self) -> Result<vp::CetSs, Self::Error> {
2246 let vmsa = self.vp.runner.vmsa(self.vtl);
2247 Ok(vp::CetSs {
2248 ssp: vmsa.ssp(),
2249 interrupt_ssp_table_addr: vmsa.interrupt_ssp_table_addr(),
2250 })
2251 }
2252
2253 fn set_cet_ss(&mut self, value: &vp::CetSs) -> Result<(), Self::Error> {
2254 let mut vmsa = self.vp.runner.vmsa_mut(self.vtl);
2255 let vp::CetSs {
2256 ssp,
2257 interrupt_ssp_table_addr,
2258 } = *value;
2259 vmsa.set_ssp(ssp);
2260 vmsa.set_interrupt_ssp_table_addr(interrupt_ssp_table_addr);
2261 Ok(())
2262 }
2263
2264 fn synic_msrs(&mut self) -> Result<vp::SyntheticMsrs, Self::Error> {
2265 Err(vp_state::Error::Unimplemented("synic_msrs"))
2266 }
2267
2268 fn set_synic_msrs(&mut self, _value: &vp::SyntheticMsrs) -> Result<(), Self::Error> {
2269 Err(vp_state::Error::Unimplemented("synic_msrs"))
2270 }
2271
2272 fn synic_message_page(&mut self) -> Result<vp::SynicMessagePage, Self::Error> {
2273 Err(vp_state::Error::Unimplemented("synic_message_page"))
2274 }
2275
2276 fn set_synic_message_page(&mut self, _value: &vp::SynicMessagePage) -> Result<(), Self::Error> {
2277 Err(vp_state::Error::Unimplemented("synic_message_page"))
2278 }
2279
2280 fn synic_event_flags_page(&mut self) -> Result<vp::SynicEventFlagsPage, Self::Error> {
2281 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2282 }
2283
2284 fn set_synic_event_flags_page(
2285 &mut self,
2286 _value: &vp::SynicEventFlagsPage,
2287 ) -> Result<(), Self::Error> {
2288 Err(vp_state::Error::Unimplemented("synic_event_flags_page"))
2289 }
2290
2291 fn synic_message_queues(&mut self) -> Result<vp::SynicMessageQueues, Self::Error> {
2292 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2293 }
2294
2295 fn set_synic_message_queues(
2296 &mut self,
2297 _value: &vp::SynicMessageQueues,
2298 ) -> Result<(), Self::Error> {
2299 Err(vp_state::Error::Unimplemented("synic_message_queues"))
2300 }
2301
2302 fn synic_timers(&mut self) -> Result<vp::SynicTimers, Self::Error> {
2303 Err(vp_state::Error::Unimplemented("synic_timers"))
2304 }
2305
2306 fn set_synic_timers(&mut self, _value: &vp::SynicTimers) -> Result<(), Self::Error> {
2307 Err(vp_state::Error::Unimplemented("synic_timers"))
2308 }
2309}
2310
2311fn advance_to_next_instruction(vmsa: &mut VmsaWrapper<'_, &mut SevVmsa>) {
2313 vmsa.set_rip(vmsa.next_rip());
2314 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2315}
2316
2317impl UhProcessor<'_, SnpBacked> {
2318 fn read_msr_snp(
2319 &mut self,
2320 _dev: &impl CpuIo,
2321 msr: u32,
2322 vtl: GuestVtl,
2323 ) -> Result<u64, MsrError> {
2324 let vmsa = self.runner.vmsa(vtl);
2325 let value = match msr {
2326 x86defs::X64_MSR_FS_BASE => vmsa.fs().base,
2327 x86defs::X64_MSR_GS_BASE => vmsa.gs().base,
2328 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.kernel_gs_base(),
2329 x86defs::X86X_MSR_TSC_AUX => {
2330 if self.shared.tsc_aux_virtualized {
2331 vmsa.tsc_aux() as u64
2332 } else {
2333 return Err(MsrError::InvalidAccess);
2334 }
2335 }
2336 x86defs::X86X_MSR_SPEC_CTRL => vmsa.spec_ctrl(),
2337 x86defs::X86X_MSR_U_CET => vmsa.u_cet(),
2338 x86defs::X86X_MSR_S_CET => vmsa.s_cet(),
2339 x86defs::X86X_MSR_PL0_SSP => vmsa.pl0_ssp(),
2340 x86defs::X86X_MSR_PL1_SSP => vmsa.pl1_ssp(),
2341 x86defs::X86X_MSR_PL2_SSP => vmsa.pl2_ssp(),
2342 x86defs::X86X_MSR_PL3_SSP => vmsa.pl3_ssp(),
2343 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.interrupt_ssp_table_addr(),
2344 x86defs::X86X_MSR_CR_PAT => vmsa.pat(),
2345 x86defs::X86X_MSR_EFER => vmsa.efer(),
2346 x86defs::X86X_MSR_STAR => vmsa.star(),
2347 x86defs::X86X_MSR_LSTAR => vmsa.lstar(),
2348 x86defs::X86X_MSR_CSTAR => vmsa.cstar(),
2349 x86defs::X86X_MSR_SFMASK => vmsa.sfmask(),
2350 x86defs::X86X_MSR_SYSENTER_CS => vmsa.sysenter_cs(),
2351 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.sysenter_esp(),
2352 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.sysenter_eip(),
2353 x86defs::X86X_MSR_XSS => vmsa.xss(),
2354 x86defs::X86X_AMD_MSR_VM_CR => 0,
2355 x86defs::X86X_MSR_TSC => safe_intrinsics::rdtsc(),
2356 x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => 0xffff_ffff,
2357 x86defs::X86X_MSR_MTRR_CAP => {
2358 0x400
2361 }
2362 x86defs::X86X_MSR_MTRR_DEF_TYPE => {
2363 0
2367 }
2368 x86defs::X86X_AMD_MSR_SYSCFG
2369 | x86defs::X86X_MSR_MCG_CAP
2370 | x86defs::X86X_MSR_MCG_STATUS => 0,
2371
2372 hvdef::HV_X64_MSR_GUEST_IDLE => {
2373 self.backing.cvm.lapics[vtl].activity = MpState::Idle;
2374 let mut vmsa = self.runner.vmsa_mut(vtl);
2375 vmsa.v_intr_cntrl_mut().set_intr_shadow(false);
2376 0
2377 }
2378 _ => return Err(MsrError::Unknown),
2379 };
2380 Ok(value)
2381 }
2382
2383 fn write_msr_snp(
2384 &mut self,
2385 _dev: &impl CpuIo,
2386 msr: u32,
2387 value: u64,
2388 vtl: GuestVtl,
2389 ) -> Result<(), MsrError> {
2390 let mut vmsa = self.runner.vmsa_mut(vtl);
2393 match msr {
2394 x86defs::X64_MSR_FS_BASE => {
2395 let fs = vmsa.fs();
2396 vmsa.set_fs(SevSelector {
2397 attrib: fs.attrib,
2398 selector: fs.selector,
2399 limit: fs.limit,
2400 base: value,
2401 });
2402 }
2403 x86defs::X64_MSR_GS_BASE => {
2404 let gs = vmsa.gs();
2405 vmsa.set_gs(SevSelector {
2406 attrib: gs.attrib,
2407 selector: gs.selector,
2408 limit: gs.limit,
2409 base: value,
2410 });
2411 }
2412 x86defs::X64_MSR_KERNEL_GS_BASE => vmsa.set_kernel_gs_base(value),
2413 x86defs::X86X_MSR_TSC_AUX => {
2414 if self.shared.tsc_aux_virtualized {
2415 vmsa.set_tsc_aux(value as u32);
2416 } else {
2417 return Err(MsrError::InvalidAccess);
2418 }
2419 }
2420 x86defs::X86X_MSR_SPEC_CTRL => vmsa.set_spec_ctrl(value),
2421 x86defs::X86X_MSR_U_CET => vmsa.set_u_cet(value),
2422 x86defs::X86X_MSR_S_CET => vmsa.set_s_cet(value),
2423 x86defs::X86X_MSR_PL0_SSP => vmsa.set_pl0_ssp(value),
2424 x86defs::X86X_MSR_PL1_SSP => vmsa.set_pl1_ssp(value),
2425 x86defs::X86X_MSR_PL2_SSP => vmsa.set_pl2_ssp(value),
2426 x86defs::X86X_MSR_PL3_SSP => vmsa.set_pl3_ssp(value),
2427 x86defs::X86X_MSR_INTERRUPT_SSP_TABLE_ADDR => vmsa.set_interrupt_ssp_table_addr(value),
2428
2429 x86defs::X86X_MSR_CR_PAT => vmsa.set_pat(value),
2430 x86defs::X86X_MSR_EFER => vmsa.set_efer(SnpBacked::calculate_efer(value, vmsa.cr0())),
2431
2432 x86defs::X86X_MSR_STAR => vmsa.set_star(value),
2433 x86defs::X86X_MSR_LSTAR => vmsa.set_lstar(value),
2434 x86defs::X86X_MSR_CSTAR => vmsa.set_cstar(value),
2435 x86defs::X86X_MSR_SFMASK => vmsa.set_sfmask(value),
2436 x86defs::X86X_MSR_SYSENTER_CS => vmsa.set_sysenter_cs(value),
2437 x86defs::X86X_MSR_SYSENTER_ESP => vmsa.set_sysenter_esp(value),
2438 x86defs::X86X_MSR_SYSENTER_EIP => vmsa.set_sysenter_eip(value),
2439 x86defs::X86X_MSR_XSS => vmsa.set_xss(value),
2440
2441 x86defs::X86X_MSR_TSC => {} x86defs::X86X_MSR_MC_UPDATE_PATCH_LEVEL => {}
2443 x86defs::X86X_MSR_MTRR_DEF_TYPE => {}
2444
2445 x86defs::X86X_AMD_MSR_VM_CR
2446 | x86defs::X86X_MSR_MTRR_CAP
2447 | x86defs::X86X_AMD_MSR_SYSCFG
2448 | x86defs::X86X_MSR_MCG_CAP => return Err(MsrError::InvalidAccess),
2449
2450 x86defs::X86X_MSR_MCG_STATUS => {
2451 if x86defs::X86xMcgStatusRegister::from(value).reserved0() != 0 {
2453 return Err(MsrError::InvalidAccess);
2454 }
2455 }
2456 _ => {
2457 tracing::debug!(msr, value, "unknown cvm msr write");
2458 }
2459 }
2460 Ok(())
2461 }
2462}
2463
2464impl<T: CpuIo> hv1_hypercall::VtlSwitchOps for UhHypercallHandler<'_, '_, T, SnpBacked> {
2465 fn advance_ip(&mut self) {
2466 let is_64bit = self.vp.long_mode(self.intercepted_vtl);
2467 let mut io = hv1_hypercall::X64RegisterIo::new(self, is_64bit);
2468 io.advance_ip();
2469 }
2470
2471 fn inject_invalid_opcode_fault(&mut self) {
2472 self.vp
2473 .runner
2474 .vmsa_mut(self.intercepted_vtl)
2475 .set_event_inject(
2476 SevEventInjectInfo::new()
2477 .with_valid(true)
2478 .with_interruption_type(x86defs::snp::SEV_INTR_TYPE_EXCEPT)
2479 .with_vector(x86defs::Exception::INVALID_OPCODE.0),
2480 );
2481 }
2482}
2483
2484impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressList for UhHypercallHandler<'_, '_, T, SnpBacked> {
2485 fn flush_virtual_address_list(
2486 &mut self,
2487 processor_set: ProcessorSet<'_>,
2488 flags: HvFlushFlags,
2489 gva_ranges: &[HvGvaRange],
2490 ) -> HvRepResult {
2491 hv1_hypercall::FlushVirtualAddressListEx::flush_virtual_address_list_ex(
2492 self,
2493 processor_set,
2494 flags,
2495 gva_ranges,
2496 )
2497 }
2498}
2499
2500impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
2501 for UhHypercallHandler<'_, '_, T, SnpBacked>
2502{
2503 fn flush_virtual_address_list_ex(
2504 &mut self,
2505 processor_set: ProcessorSet<'_>,
2506 flags: HvFlushFlags,
2507 gva_ranges: &[HvGvaRange],
2508 ) -> HvRepResult {
2509 self.hcvm_validate_flush_inputs(processor_set, flags, true)
2510 .map_err(|e| (e, 0))?;
2511
2512 if gva_ranges.len() > 16 || gva_ranges.iter().any(|range| if flags.use_extended_range_format() { range.as_extended().additional_pages() } else { range.as_simple().additional_pages() } > 16) {
2515 self.do_flush_virtual_address_space(processor_set, flags);
2516 } else {
2517 self.do_flush_virtual_address_list(flags, gva_ranges);
2518 }
2519
2520 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2522 Ok(())
2523 }
2524}
2525
2526impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpace
2527 for UhHypercallHandler<'_, '_, T, SnpBacked>
2528{
2529 fn flush_virtual_address_space(
2530 &mut self,
2531 processor_set: ProcessorSet<'_>,
2532 flags: HvFlushFlags,
2533 ) -> hvdef::HvResult<()> {
2534 hv1_hypercall::FlushVirtualAddressSpaceEx::flush_virtual_address_space_ex(
2535 self,
2536 processor_set,
2537 flags,
2538 )
2539 }
2540}
2541
2542impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressSpaceEx
2543 for UhHypercallHandler<'_, '_, T, SnpBacked>
2544{
2545 fn flush_virtual_address_space_ex(
2546 &mut self,
2547 processor_set: ProcessorSet<'_>,
2548 flags: HvFlushFlags,
2549 ) -> hvdef::HvResult<()> {
2550 self.hcvm_validate_flush_inputs(processor_set, flags, false)?;
2551
2552 self.do_flush_virtual_address_space(processor_set, flags);
2553
2554 self.vp.set_wait_for_tlb_locks(self.intercepted_vtl);
2556 Ok(())
2557 }
2558}
2559
2560impl<T: CpuIo> UhHypercallHandler<'_, '_, T, SnpBacked> {
2561 fn do_flush_virtual_address_list(&mut self, flags: HvFlushFlags, gva_ranges: &[HvGvaRange]) {
2562 for range in gva_ranges {
2563 let mut rax = SevInvlpgbRax::new()
2564 .with_asid_valid(true)
2565 .with_va_valid(true)
2566 .with_global(!flags.non_global_mappings_only());
2567 let mut ecx = SevInvlpgbEcx::new();
2568 let mut count;
2569 let mut gpn;
2570
2571 if flags.use_extended_range_format() && range.as_extended().large_page() {
2572 ecx.set_large_page(true);
2573 if range.as_extended_large_page().page_size() {
2574 let range = range.as_extended_large_page();
2575 count = range.additional_pages();
2576 gpn = range.gva_large_page_number();
2577 } else {
2578 let range = range.as_extended();
2579 count = range.additional_pages();
2580 gpn = range.gva_page_number();
2581 }
2582 } else {
2583 let range = range.as_simple();
2584 count = range.additional_pages();
2585 gpn = range.gva_page_number();
2586 }
2587 count += 1; while count > 0 {
2590 rax.set_virtual_page_number(gpn);
2591 ecx.set_additional_count(std::cmp::min(
2592 count - 1,
2593 self.vp.shared.invlpgb_count_max.into(),
2594 ));
2595
2596 let edx = SevInvlpgbEdx::new();
2597 self.vp
2598 .partition
2599 .hcl
2600 .invlpgb(rax.into(), edx.into(), ecx.into());
2601
2602 count -= ecx.additional_count() + 1;
2603 gpn += ecx.additional_count() + 1;
2604 }
2605 }
2606
2607 self.vp.partition.hcl.tlbsync();
2608 }
2609
2610 fn do_flush_virtual_address_space(
2611 &mut self,
2612 processor_set: ProcessorSet<'_>,
2613 flags: HvFlushFlags,
2614 ) {
2615 let only_self = [self.vp.vp_index().index()].into_iter().eq(processor_set);
2616 if only_self && flags.non_global_mappings_only() {
2617 self.vp.runner.vmsa_mut(self.intercepted_vtl).set_pcpu_id(0);
2618 } else {
2619 self.vp.partition.hcl.invlpgb(
2620 SevInvlpgbRax::new()
2621 .with_asid_valid(true)
2622 .with_global(!flags.non_global_mappings_only())
2623 .into(),
2624 SevInvlpgbEdx::new().into(),
2625 SevInvlpgbEcx::new().into(),
2626 );
2627 self.vp.partition.hcl.tlbsync();
2628 }
2629 }
2630}
2631
2632struct SnpTlbLockFlushAccess<'a> {
2633 vp_index: Option<VpIndex>,
2634 partition: &'a UhPartitionInner,
2635 shared: &'a SnpBackedShared,
2636}
2637
2638impl TlbFlushLockAccess for SnpTlbLockFlushAccess<'_> {
2639 fn flush(&mut self, vtl: GuestVtl) {
2640 self.partition.hcl.invlpgb(
2643 SevInvlpgbRax::new()
2644 .with_asid_valid(true)
2645 .with_global(true)
2646 .into(),
2647 SevInvlpgbEdx::new().into(),
2648 SevInvlpgbEcx::new().into(),
2649 );
2650 self.partition.hcl.tlbsync();
2651 self.set_wait_for_tlb_locks(vtl);
2652 }
2653
2654 fn flush_entire(&mut self) {
2655 self.partition.hcl.invlpgb(
2656 SevInvlpgbRax::new()
2657 .with_asid_valid(true)
2658 .with_global(true)
2659 .into(),
2660 SevInvlpgbEdx::new().into(),
2661 SevInvlpgbEcx::new().into(),
2662 );
2663 self.partition.hcl.tlbsync();
2664 for vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
2665 self.set_wait_for_tlb_locks(vtl);
2666 }
2667 }
2668
2669 fn set_wait_for_tlb_locks(&mut self, vtl: GuestVtl) {
2670 if let Some(vp_index) = self.vp_index {
2671 hardware_cvm::tlb_lock::TlbLockAccess {
2672 vp_index,
2673 cvm_partition: &self.shared.cvm,
2674 }
2675 .set_wait_for_tlb_locks(vtl);
2676 }
2677 }
2678}
2679
2680mod save_restore {
2681 use super::SnpBacked;
2682 use super::UhProcessor;
2683 use vmcore::save_restore::RestoreError;
2684 use vmcore::save_restore::SaveError;
2685 use vmcore::save_restore::SaveRestore;
2686 use vmcore::save_restore::SavedStateNotSupported;
2687
2688 impl SaveRestore for UhProcessor<'_, SnpBacked> {
2689 type SavedState = SavedStateNotSupported;
2690
2691 fn save(&mut self) -> Result<Self::SavedState, SaveError> {
2692 Err(SaveError::NotSupported)
2693 }
2694
2695 fn restore(&mut self, state: Self::SavedState) -> Result<(), RestoreError> {
2696 match state {}
2697 }
2698 }
2699}