virt_mshv_vtl/processor/hardware_cvm/
apic.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4#![cfg(guest_arch = "x86_64")]
5
6use super::UhRunVpError;
7use crate::UhProcessor;
8use crate::processor::HardwareIsolatedBacking;
9use cvm_tracing::CVM_ALLOWED;
10use hcl::GuestVtl;
11use virt::Processor;
12use virt::vp::MpState;
13use virt::x86::SegmentRegister;
14use virt_support_apic::ApicWork;
15
16pub(crate) trait ApicBacking<'b, B: HardwareIsolatedBacking> {
17    fn vp(&mut self) -> &mut UhProcessor<'b, B>;
18
19    fn handle_init(&mut self, vtl: GuestVtl) -> Result<(), UhRunVpError> {
20        let vp_info = self.vp().inner.vp_info;
21        let mut access = self.vp().access_state(vtl.into());
22        virt::vp::x86_init(&mut access, &vp_info).map_err(UhRunVpError::State)?;
23        Ok(())
24    }
25
26    fn handle_sipi(&mut self, vtl: GuestVtl, cs: SegmentRegister) -> Result<(), UhRunVpError>;
27    fn handle_nmi(&mut self, vtl: GuestVtl) -> Result<(), UhRunVpError>;
28    fn handle_interrupt(&mut self, vtl: GuestVtl, vector: u8) -> Result<(), UhRunVpError>;
29
30    fn handle_extint(&mut self, vtl: GuestVtl) -> Result<(), UhRunVpError> {
31        tracelimit::warn_ratelimited!(CVM_ALLOWED, ?vtl, "extint not supported");
32        Ok(())
33    }
34}
35
36pub(crate) fn poll_apic_core<'b, B: HardwareIsolatedBacking, T: ApicBacking<'b, B>>(
37    apic_backing: &mut T,
38    vtl: GuestVtl,
39    scan_irr: bool,
40) -> Result<(), UhRunVpError> {
41    // Check for interrupt requests from the host and kernel offload.
42    if vtl == GuestVtl::Vtl0 {
43        if let Some(irr) = apic_backing.vp().runner.proxy_irr_vtl0() {
44            // We can't put the interrupts directly into offload (where supported) because we might need
45            // to clear the tmr state. This can happen if a vector was previously used for a level
46            // triggered interrupt, and is now being used for an edge-triggered interrupt.
47            apic_backing.vp().backing.cvm_state_mut().lapics[vtl]
48                .lapic
49                .request_fixed_interrupts(irr);
50        }
51    }
52
53    let vp = apic_backing.vp();
54    let ApicWork {
55        init,
56        extint,
57        sipi,
58        nmi,
59        interrupt,
60    } = vp.backing.cvm_state_mut().lapics[vtl]
61        .lapic
62        .scan(&mut vp.vmtime, scan_irr);
63
64    // Check VTL permissions inside each block to avoid taking a lock on the hot path,
65    // INIT and SIPI are quite cold.
66    if init {
67        if !apic_backing
68            .vp()
69            .cvm_partition()
70            .is_lower_vtl_startup_denied()
71        {
72            apic_backing.handle_init(vtl)?;
73        }
74    }
75
76    if let Some(vector) = sipi {
77        if apic_backing.vp().backing.cvm_state_mut().lapics[vtl].activity == MpState::WaitForSipi {
78            if !apic_backing
79                .vp()
80                .cvm_partition()
81                .is_lower_vtl_startup_denied()
82            {
83                let base = (vector as u64) << 12;
84                let selector = (vector as u16) << 8;
85                apic_backing.handle_sipi(
86                    vtl,
87                    SegmentRegister {
88                        base,
89                        limit: 0xffff,
90                        selector,
91                        attributes: 0x9b,
92                    },
93                )?;
94            }
95        }
96    }
97
98    // Interrupts are ignored while waiting for SIPI.
99    let lapic = &mut apic_backing.vp().backing.cvm_state_mut().lapics[vtl];
100    if lapic.activity != MpState::WaitForSipi {
101        if nmi || lapic.nmi_pending {
102            lapic.nmi_pending = true;
103            apic_backing.handle_nmi(vtl)?;
104        }
105
106        if let Some(vector) = interrupt {
107            apic_backing.handle_interrupt(vtl, vector)?;
108        }
109
110        if extint {
111            apic_backing.handle_extint(vtl)?;
112        }
113    }
114
115    Ok(())
116}