virt_mshv_vtl/processor/hardware_cvm/
apic.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4use crate::UhProcessor;
5use crate::processor::HardwareIsolatedBacking;
6use cvm_tracing::CVM_ALLOWED;
7use hcl::GuestVtl;
8use virt::Processor;
9use virt::vp::MpState;
10use virt::x86::SegmentRegister;
11use virt_support_apic::ApicWork;
12
13pub(crate) trait ApicBacking<'b, B: HardwareIsolatedBacking> {
14    fn vp(&mut self) -> &mut UhProcessor<'b, B>;
15
16    fn handle_init(&mut self, vtl: GuestVtl) {
17        let vp_info = self.vp().inner.vp_info;
18        let mut access = self.vp().access_state(vtl.into());
19        virt::vp::x86_init(&mut access, &vp_info).unwrap();
20    }
21
22    fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister);
23    fn handle_nmi(&mut self, vtl: GuestVtl);
24    fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8);
25
26    fn handle_extint(&mut self, vtl: GuestVtl) {
27        tracelimit::warn_ratelimited!(CVM_ALLOWED, ?vtl, "extint not supported");
28    }
29}
30
31pub(crate) fn poll_apic_core<'b, B: HardwareIsolatedBacking, T: ApicBacking<'b, B>>(
32    apic_backing: &mut T,
33    vtl: GuestVtl,
34    scan_irr: bool,
35) {
36    // Check for interrupt requests from the host and kernel offload.
37    if vtl == GuestVtl::Vtl0 {
38        if let Some(irr) = apic_backing.vp().runner.proxy_irr_vtl0() {
39            // We can't put the interrupts directly into offload (where supported) because we might need
40            // to clear the tmr state. This can happen if a vector was previously used for a level
41            // triggered interrupt, and is now being used for an edge-triggered interrupt.
42            apic_backing.vp().backing.cvm_state_mut().lapics[vtl]
43                .lapic
44                .request_fixed_interrupts(irr);
45        }
46    }
47
48    let vp = apic_backing.vp();
49    let ApicWork {
50        init,
51        extint,
52        sipi,
53        nmi,
54        interrupt,
55    } = vp.backing.cvm_state_mut().lapics[vtl]
56        .lapic
57        .scan(&mut vp.vmtime, scan_irr);
58
59    // Check VTL permissions inside each block to avoid taking a lock on the hot path,
60    // INIT and SIPI are quite cold.
61    if init {
62        if !apic_backing
63            .vp()
64            .cvm_partition()
65            .is_lower_vtl_startup_denied()
66        {
67            apic_backing.handle_init(vtl);
68        }
69    }
70
71    if let Some(vector) = sipi {
72        if apic_backing.vp().backing.cvm_state_mut().lapics[vtl].activity == MpState::WaitForSipi {
73            if !apic_backing
74                .vp()
75                .cvm_partition()
76                .is_lower_vtl_startup_denied()
77            {
78                let base = (vector as u64) << 12;
79                let selector = (vector as u16) << 8;
80                apic_backing.handle_sipi(
81                    vtl,
82                    SegmentRegister {
83                        base,
84                        limit: 0xffff,
85                        selector,
86                        attributes: 0x9b,
87                    },
88                );
89            }
90        }
91    }
92
93    // Interrupts are ignored while waiting for SIPI.
94    let lapic = &mut apic_backing.vp().backing.cvm_state_mut().lapics[vtl];
95    if lapic.activity != MpState::WaitForSipi {
96        if nmi || lapic.nmi_pending {
97            lapic.nmi_pending = true;
98            apic_backing.handle_nmi(vtl);
99        }
100
101        if let Some(vector) = interrupt {
102            apic_backing.handle_interrupt(vtl, vector);
103        }
104
105        if extint {
106            apic_backing.handle_extint(vtl);
107        }
108    }
109}