virt_mshv_vtl/processor/mshv/
tlb_lock.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

//! TLB lock infrastructure support for Microsoft hypervisor-backed partitions.

use crate::HypervisorBacked;
use crate::UhProcessor;
use hcl::GuestVtl;
use hvdef::HvAllArchRegisterName;
use hvdef::Vtl;
use hvdef::hypercall::HvInputVtl;

impl UhProcessor<'_, HypervisorBacked> {
    /// Causes the specified VTL on the current VP to wait on all TLB locks.
    /// This is typically used to synchronize VTL permission changes with
    /// concurrent instruction emulation.
    #[expect(dead_code)]
    pub(crate) fn set_wait_for_tlb_locks(&mut self, target_vtl: Vtl) {
        let reg = [(
            HvAllArchRegisterName::VsmVpWaitForTlbLock,
            u64::from(hvdef::HvRegisterVsmWpWaitForTlbLock::new().with_wait(true)),
        )];
        self.runner
            .set_vp_registers_hvcall(target_vtl, reg)
            .expect("set_vp_registers hypercall for waiting for tlb lock should not fail");
    }

    /// Lock the TLB of the target VTL on the current VP.
    #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
    pub(crate) fn set_tlb_lock(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) {
        debug_assert_eq!(requesting_vtl, Vtl::Vtl2);

        if self.is_tlb_locked(requesting_vtl, target_vtl) {
            return;
        }

        let reg = [(
            HvAllArchRegisterName(
                HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
            ),
            u64::from(hvdef::HvRegisterVsmVpSecureVtlConfig::new().with_tlb_locked(true)),
        )];
        self.runner
            .set_vp_registers_hvcall(requesting_vtl, reg)
            .expect("set_vp_registers hypercall for setting tlb lock should not fail");

        self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
    }

    /// Check the status of the TLB lock of the target VTL on the current VP.
    pub(crate) fn is_tlb_locked(&mut self, requesting_vtl: Vtl, target_vtl: GuestVtl) -> bool {
        debug_assert_eq!(requesting_vtl, Vtl::Vtl2);
        let local_status = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
        // The hypervisor may lock the TLB without us knowing, but the inverse should never happen.
        if local_status {
            debug_assert!(self.is_tlb_locked_in_hypervisor(target_vtl))
        };
        local_status
    }

    fn is_tlb_locked_in_hypervisor(&self, target_vtl: GuestVtl) -> bool {
        let name = HvAllArchRegisterName(
            HvAllArchRegisterName::VsmVpSecureConfigVtl0.0 + target_vtl as u32,
        );
        let result = self
            .partition
            .hcl
            .get_vp_register(name, HvInputVtl::CURRENT_VTL)
            .expect("failure is a misconfiguration");
        let config = hvdef::HvRegisterVsmVpSecureVtlConfig::from(result.as_u64());
        config.tlb_locked()
    }

    /// Marks the TLBs of all lower VTLs as unlocked.
    /// The hypervisor does the actual unlocking required upon VTL exit.
    #[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
    pub(crate) fn unlock_tlb_lock(&mut self, unlocking_vtl: Vtl) {
        debug_assert_eq!(unlocking_vtl, Vtl::Vtl2);
        self.vtls_tlb_locked.fill(unlocking_vtl, false);
    }
}