virt_mshv_vtl/processor/mshv/
tlb_lock.rsuse crate::HypervisorBacked;
use crate::UhProcessor;
use hcl::GuestVtl;
use hvdef::HvAllArchRegisterName;
use hvdef::Vtl;
use hvdef::hypercall::HvInputVtl;
impl UhProcessor<'_, HypervisorBacked> {
#[expect(dead_code)]
pub(crate) fn set_wait_for_tlb_locks(&mut self, target_vtl: Vtl) {
let reg = [(
HvAllArchRegisterName::VsmVpWaitForTlbLock,
u64::from(hvdef::HvRegisterVsmWpWaitForTlbLock::new().with_wait(true)),
)];
self.runner
.set_vp_registers_hvcall(target_vtl, reg)
.expect("set_vp_registers hypercall for waiting for tlb lock should not fail");
}
#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
pub(crate) fn set_tlb_lock(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) {
debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
if self.is_tlb_locked(requesting_vtl, target_vtl) {
return;
}
let reg = [(
HvAllArchRegisterName(
HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
),
u64::from(hvdef::HvRegisterVsmVpSecureVtlConfig::new().with_tlb_locked(true)),
)];
self.runner
.set_vp_registers_hvcall(requesting_vtl, reg)
.expect("set_vp_registers hypercall for setting tlb lock should not fail");
self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
}
pub(crate) fn is_tlb_locked(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) -> bool {
debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
let local_status = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
if local_status {
debug_assert!(self.is_tlb_locked_in_hypervisor(target_vtl))
};
local_status
}
fn is_tlb_locked_in_hypervisor(&self, target_vtl: GuestVtl) -> bool {
let name = HvAllArchRegisterName(
HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
);
let result = self
.partition
.hcl
.get_vp_register(name, HvInputVtl::CURRENT_VTL)
.expect("failure is a misconfiguration");
let config = hvdef::HvRegisterVsmVpSecureVtlConfig::from(result.as_u64());
config.tlb_locked()
}
#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
pub(crate) fn unlock_tlb_lock(&mut self, unlocking_vtl: Vtl) {
debug_assert_eq!(unlocking_vtl, Vtl::Vtl2);
self.vtls_tlb_locked.fill(unlocking_vtl, false);
}
}