virt_mshv_vtl/processor/mshv/
tlb_lock.rs1use crate::HypervisorBacked;
7use crate::UhProcessor;
8use hcl::GuestVtl;
9use hvdef::HvAllArchRegisterName;
10use hvdef::Vtl;
11
12impl UhProcessor<'_, HypervisorBacked> {
13 #[expect(dead_code)]
17 pub(crate) fn set_wait_for_tlb_locks(&mut self, target_vtl: Vtl) {
18 let reg = [(
19 HvAllArchRegisterName::VsmVpWaitForTlbLock,
20 u64::from(hvdef::HvRegisterVsmWpWaitForTlbLock::new().with_wait(true)),
21 )];
22 self.runner
23 .set_vp_registers_hvcall(target_vtl, reg)
24 .expect("set_vp_registers hypercall for waiting for tlb lock should not fail");
25 }
26
27 #[expect(dead_code)]
29 pub(crate) fn set_tlb_lock(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) {
30 debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
31
32 if self.vtls_tlb_locked.get(requesting_vtl, target_vtl) {
33 return;
34 }
35
36 let reg = [(
37 HvAllArchRegisterName(
38 HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
39 ),
40 u64::from(hvdef::HvRegisterVsmVpSecureVtlConfig::new().with_tlb_locked(true)),
41 )];
42 self.runner
43 .set_vp_registers_hvcall(requesting_vtl, reg)
44 .expect("set_vp_registers hypercall for setting tlb lock should not fail");
45
46 self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
47 }
48
49 pub(crate) fn mark_tlb_locked(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) {
53 debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
54 debug_assert!(self.is_tlb_locked_in_hypervisor(target_vtl));
55 self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
56 }
57
58 pub(crate) fn is_tlb_locked(&self, requesting_vtl: Vtl, target_vtl: GuestVtl) -> bool {
60 assert!(cfg!(debug_assertions));
62 debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
63 let local_status = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
64 if local_status {
66 debug_assert!(self.is_tlb_locked_in_hypervisor(target_vtl));
67 }
68 local_status
69 }
70
71 fn is_tlb_locked_in_hypervisor(&self, target_vtl: GuestVtl) -> bool {
72 assert!(cfg!(debug_assertions));
74 let name = HvAllArchRegisterName(
75 HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
76 );
77 let result = self
78 .partition
79 .hcl
80 .get_vp_register(name, hvdef::hypercall::HvInputVtl::CURRENT_VTL)
81 .expect("failure is a misconfiguration");
82 let config = hvdef::HvRegisterVsmVpSecureVtlConfig::from(result.as_u64());
83 config.tlb_locked()
84 }
85
86 pub(crate) fn unlock_tlb_lock(&mut self, unlocking_vtl: Vtl) {
89 debug_assert_eq!(unlocking_vtl, Vtl::Vtl2);
90 self.vtls_tlb_locked.fill(unlocking_vtl, false);
91 }
92}