virt_mshv_vtl/processor/mshv/
tlb_lock.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! TLB lock infrastructure support for Microsoft hypervisor-backed partitions.
5
6use crate::HypervisorBacked;
7use crate::UhProcessor;
8use hcl::GuestVtl;
9use hvdef::HvAllArchRegisterName;
10use hvdef::Vtl;
11
12impl UhProcessor<'_, HypervisorBacked> {
13    /// Causes the specified VTL on the current VP to wait on all TLB locks.
14    /// This is typically used to synchronize VTL permission changes with
15    /// concurrent instruction emulation.
16    #[expect(dead_code)]
17    pub(crate) fn set_wait_for_tlb_locks(&mut self, target_vtl: Vtl) {
18        let reg = [(
19            HvAllArchRegisterName::VsmVpWaitForTlbLock,
20            u64::from(hvdef::HvRegisterVsmWpWaitForTlbLock::new().with_wait(true)),
21        )];
22        self.runner
23            .set_vp_registers_hvcall(target_vtl, reg)
24            .expect("set_vp_registers hypercall for waiting for tlb lock should not fail");
25    }
26
27    /// Lock the TLB of the target VTL on the current VP.
28    #[expect(dead_code)]
29    pub(crate) fn set_tlb_lock(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) {
30        debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
31
32        if self.vtls_tlb_locked.get(requesting_vtl, target_vtl) {
33            return;
34        }
35
36        let reg = [(
37            HvAllArchRegisterName(
38                HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
39            ),
40            u64::from(hvdef::HvRegisterVsmVpSecureVtlConfig::new().with_tlb_locked(true)),
41        )];
42        self.runner
43            .set_vp_registers_hvcall(requesting_vtl, reg)
44            .expect("set_vp_registers hypercall for setting tlb lock should not fail");
45
46        self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
47    }
48
49    /// Mark the TLB of the target VTL on the current VP as locked without
50    /// informing the hypervisor. Only should be used when the hypervisor
51    /// is expected to have already locked the TLB.
52    pub(crate) fn mark_tlb_locked(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) {
53        debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
54        debug_assert!(self.is_tlb_locked_in_hypervisor(target_vtl));
55        self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
56    }
57
58    /// Check the status of the TLB lock of the target VTL on the current VP.
59    pub(crate) fn is_tlb_locked(&self, requesting_vtl: Vtl, target_vtl: GuestVtl) -> bool {
60        // This function should only be called in debug assertions.
61        assert!(cfg!(debug_assertions));
62        debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
63        let local_status = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
64        // The hypervisor may lock the TLB without us knowing, but the inverse should never happen.
65        if local_status {
66            debug_assert!(self.is_tlb_locked_in_hypervisor(target_vtl));
67        }
68        local_status
69    }
70
71    fn is_tlb_locked_in_hypervisor(&self, target_vtl: GuestVtl) -> bool {
72        // This function should only be called in debug assertions.
73        assert!(cfg!(debug_assertions));
74        let name = HvAllArchRegisterName(
75            HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
76        );
77        let result = self
78            .partition
79            .hcl
80            .get_vp_register(name, hvdef::hypercall::HvInputVtl::CURRENT_VTL)
81            .expect("failure is a misconfiguration");
82        let config = hvdef::HvRegisterVsmVpSecureVtlConfig::from(result.as_u64());
83        config.tlb_locked()
84    }
85
86    /// Marks the TLBs of all lower VTLs as unlocked.
87    /// The hypervisor does the actual unlocking required upon VTL exit.
88    pub(crate) fn unlock_tlb_lock(&mut self, unlocking_vtl: Vtl) {
89        debug_assert_eq!(unlocking_vtl, Vtl::Vtl2);
90        self.vtls_tlb_locked.fill(unlocking_vtl, false);
91    }
92}