virt_mshv_vtl/processor/hardware_cvm/
apic.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4#![cfg(guest_arch = "x86_64")]
5
6use crate::UhProcessor;
7use crate::processor::HardwareIsolatedBacking;
8use cvm_tracing::CVM_ALLOWED;
9use hcl::GuestVtl;
10use virt::Processor;
11use virt::vp::MpState;
12use virt::x86::SegmentRegister;
13use virt_support_apic::ApicWork;
14
15pub(crate) trait ApicBacking<'b, B: HardwareIsolatedBacking> {
16    fn vp(&mut self) -> &mut UhProcessor<'b, B>;
17
18    fn handle_init(&mut self, vtl: GuestVtl) {
19        let vp_info = self.vp().inner.vp_info;
20        let mut access = self.vp().access_state(vtl.into());
21        virt::vp::x86_init(&mut access, &vp_info).unwrap();
22    }
23
24    fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister);
25    fn handle_nmi(&mut self, vtl: GuestVtl);
26    fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8);
27
28    fn handle_extint(&mut self, vtl: GuestVtl) {
29        tracelimit::warn_ratelimited!(CVM_ALLOWED, ?vtl, "extint not supported");
30    }
31}
32
33pub(crate) fn poll_apic_core<'b, B: HardwareIsolatedBacking, T: ApicBacking<'b, B>>(
34    apic_backing: &mut T,
35    vtl: GuestVtl,
36    scan_irr: bool,
37) {
38    // Check for interrupt requests from the host and kernel offload.
39    if vtl == GuestVtl::Vtl0 {
40        if let Some(irr) = apic_backing.vp().runner.proxy_irr_vtl0() {
41            // We can't put the interrupts directly into offload (where supported) because we might need
42            // to clear the tmr state. This can happen if a vector was previously used for a level
43            // triggered interrupt, and is now being used for an edge-triggered interrupt.
44            apic_backing.vp().backing.cvm_state_mut().lapics[vtl]
45                .lapic
46                .request_fixed_interrupts(irr);
47        }
48    }
49
50    let vp = apic_backing.vp();
51    let ApicWork {
52        init,
53        extint,
54        sipi,
55        nmi,
56        interrupt,
57    } = vp.backing.cvm_state_mut().lapics[vtl]
58        .lapic
59        .scan(&mut vp.vmtime, scan_irr);
60
61    // Check VTL permissions inside each block to avoid taking a lock on the hot path,
62    // INIT and SIPI are quite cold.
63    if init {
64        if !apic_backing
65            .vp()
66            .cvm_partition()
67            .is_lower_vtl_startup_denied()
68        {
69            apic_backing.handle_init(vtl);
70        }
71    }
72
73    if let Some(vector) = sipi {
74        if apic_backing.vp().backing.cvm_state_mut().lapics[vtl].activity == MpState::WaitForSipi {
75            if !apic_backing
76                .vp()
77                .cvm_partition()
78                .is_lower_vtl_startup_denied()
79            {
80                let base = (vector as u64) << 12;
81                let selector = (vector as u16) << 8;
82                apic_backing.handle_sipi(
83                    vtl,
84                    SegmentRegister {
85                        base,
86                        limit: 0xffff,
87                        selector,
88                        attributes: 0x9b,
89                    },
90                );
91            }
92        }
93    }
94
95    // Interrupts are ignored while waiting for SIPI.
96    let lapic = &mut apic_backing.vp().backing.cvm_state_mut().lapics[vtl];
97    if lapic.activity != MpState::WaitForSipi {
98        if nmi || lapic.nmi_pending {
99            lapic.nmi_pending = true;
100            apic_backing.handle_nmi(vtl);
101        }
102
103        if let Some(vector) = interrupt {
104            apic_backing.handle_interrupt(vtl, vector);
105        }
106
107        if extint {
108            apic_backing.handle_extint(vtl);
109        }
110    }
111}