hcl/ioctl/
aarch64.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Backing for non-hardware-isolated ARM64 partitions.
5
6use super::Hcl;
7use super::HclVp;
8use super::NoRunner;
9use super::ProcessorRunner;
10use super::TranslateGvaToGpaError;
11use super::TranslateResult;
12use crate::GuestVtl;
13use crate::protocol::hcl_cpu_context_aarch64;
14use hvdef::HV_PARTITION_ID_SELF;
15use hvdef::HV_VP_INDEX_SELF;
16use hvdef::HvAarch64RegisterPage;
17use hvdef::HvArm64RegisterName;
18use hvdef::HvRegisterName;
19use hvdef::HvRegisterValue;
20use hvdef::HypercallCode;
21use sidecar_client::SidecarVp;
22use std::cell::UnsafeCell;
23use thiserror::Error;
24use zerocopy::FromZeros;
25
26/// Result when the translate gva hypercall returns a code indicating
27/// the translation was unsuccessful.
28#[derive(Error, Debug)]
29#[error("translate gva to gpa returned non-successful code {code:?}")]
30pub struct TranslateErrorAarch64 {
31    /// The code returned by the translate gva hypercall.
32    pub code: u32,
33}
34
35/// Runner backing for non-hardware-isolated ARM64 partitions.
36#[non_exhaustive]
37pub struct MshvArm64<'a> {
38    reg_page: Option<&'a UnsafeCell<HvAarch64RegisterPage>>,
39}
40
41impl<'a> ProcessorRunner<'a, MshvArm64<'a>> {
42    fn reg_page(&self) -> Option<&HvAarch64RegisterPage> {
43        // SAFETY: the register page will not be concurrently accessed by the
44        // hypervisor while this VP is in VTL2.
45        let reg_page = unsafe { &*self.state.reg_page?.get() };
46        if reg_page.is_valid != 0 {
47            Some(reg_page)
48        } else {
49            None
50        }
51    }
52
53    fn reg_page_mut(&mut self) -> Option<&mut HvAarch64RegisterPage> {
54        // SAFETY: the register page will not be concurrently accessed by the
55        // hypervisor while this VP is in VTL2.
56        let reg_page = unsafe { &mut *self.state.reg_page?.get() };
57        if reg_page.is_valid != 0 {
58            Some(reg_page)
59        } else {
60            None
61        }
62    }
63
64    /// Returns a reference to the current VTL's CPU context.
65    pub fn cpu_context(&self) -> &hcl_cpu_context_aarch64 {
66        // SAFETY: the cpu context will not be concurrently accessed by the
67        // hypervisor while this VP is in VTL2.
68        unsafe { &*(&raw mut (*self.run.get()).context).cast() }
69    }
70
71    /// Returns a mutable reference to the current VTL's CPU context.
72    pub fn cpu_context_mut(&mut self) -> &mut hcl_cpu_context_aarch64 {
73        // SAFETY: the cpu context will not be concurrently accessed by the
74        // hypervisor while this VP is in VTL2.
75        unsafe { &mut *(&raw mut (*self.run.get()).context).cast() }
76    }
77
78    /// Translate the following gva to a gpa page.
79    ///
80    /// The caller must ensure `control_flags.input_vtl()` is set to a specific
81    /// VTL.
82    pub fn translate_gva_to_gpa(
83        &self,
84        gva: u64,
85        control_flags: hvdef::hypercall::TranslateGvaControlFlagsArm64,
86    ) -> Result<Result<TranslateResult, TranslateErrorAarch64>, TranslateGvaToGpaError> {
87        use hvdef::hypercall;
88
89        assert!(
90            control_flags.input_vtl().use_target_vtl(),
91            "did not specify a target VTL"
92        );
93
94        let gvn = gva >> hvdef::HV_PAGE_SHIFT;
95        let header = hypercall::TranslateVirtualAddressArm64 {
96            partition_id: HV_PARTITION_ID_SELF,
97            vp_index: HV_VP_INDEX_SELF,
98            reserved: 0,
99            control_flags,
100            gva_page: gvn,
101        };
102
103        let mut output: hypercall::TranslateVirtualAddressExOutputArm64 = FromZeros::new_zeroed();
104
105        // SAFETY: The input header and slice are the correct types for this hypercall.
106        //         The hypercall output is validated right after the hypercall is issued.
107        let status = unsafe {
108            self.hcl
109                .mshv_hvcall
110                .hvcall(
111                    HypercallCode::HvCallTranslateVirtualAddressEx,
112                    &header,
113                    &mut output,
114                )
115                .expect("translate can never fail")
116        };
117
118        status
119            .result()
120            .map_err(|hv_error| TranslateGvaToGpaError::Hypervisor { gva, hv_error })?;
121
122        // Note: WHP doesn't currently support TranslateVirtualAddressEx, so overlay_page, cache_type,
123        // event_info aren't trustworthy values if the results came from WHP.
124        match output.translation_result.result.result_code() {
125            c if c == hypercall::TranslateGvaResultCode::SUCCESS.0 => Ok(Ok(TranslateResult {
126                gpa_page: output.gpa_page,
127                overlay_page: output.translation_result.result.overlay_page(),
128            })),
129            x => Ok(Err(TranslateErrorAarch64 { code: x })),
130        }
131    }
132}
133
134impl<'a> super::BackingPrivate<'a> for MshvArm64<'a> {
135    fn new(vp: &'a HclVp, sidecar: Option<&SidecarVp<'_>>, _hcl: &Hcl) -> Result<Self, NoRunner> {
136        assert!(sidecar.is_none());
137        let super::BackingState::MshvAarch64 { reg_page } = &vp.backing else {
138            unreachable!()
139        };
140        Ok(Self {
141            reg_page: reg_page.as_ref().map(|x| x.as_ref()),
142        })
143    }
144
145    fn try_set_reg(
146        runner: &mut ProcessorRunner<'a, Self>,
147        vtl: GuestVtl,
148        name: HvRegisterName,
149        value: HvRegisterValue,
150    ) -> bool {
151        // Try to set the register in the CPU context, the fastest path. Only
152        // VTL-shared registers can be set this way: the CPU context only
153        // exposes the last VTL, and if we entered VTL2 on an interrupt,
154        // OpenHCL doesn't know what the last VTL is.
155        // NOTE: x18 is omitted here as it is managed by the hypervisor.
156        let set = match name.into() {
157            HvArm64RegisterName::X0
158            | HvArm64RegisterName::X1
159            | HvArm64RegisterName::X2
160            | HvArm64RegisterName::X3
161            | HvArm64RegisterName::X4
162            | HvArm64RegisterName::X5
163            | HvArm64RegisterName::X6
164            | HvArm64RegisterName::X7
165            | HvArm64RegisterName::X8
166            | HvArm64RegisterName::X9
167            | HvArm64RegisterName::X10
168            | HvArm64RegisterName::X11
169            | HvArm64RegisterName::X12
170            | HvArm64RegisterName::X13
171            | HvArm64RegisterName::X14
172            | HvArm64RegisterName::X15
173            | HvArm64RegisterName::X16
174            | HvArm64RegisterName::X17
175            | HvArm64RegisterName::X19
176            | HvArm64RegisterName::X20
177            | HvArm64RegisterName::X21
178            | HvArm64RegisterName::X22
179            | HvArm64RegisterName::X23
180            | HvArm64RegisterName::X24
181            | HvArm64RegisterName::X25
182            | HvArm64RegisterName::X26
183            | HvArm64RegisterName::X27
184            | HvArm64RegisterName::X28
185            | HvArm64RegisterName::XFp
186            | HvArm64RegisterName::XLr => {
187                runner.cpu_context_mut().x[(name.0 - HvArm64RegisterName::X0.0) as usize] =
188                    value.as_u64();
189                true
190            }
191            HvArm64RegisterName::X18 => {
192                // TODO: handle X18 for VTL1
193                runner.cpu_context_mut().x[18] = value.as_u64();
194                false
195            }
196            _ => false,
197        };
198        if set {
199            return true;
200        }
201
202        if let Some(reg_page) = runner.reg_page_mut() {
203            if reg_page.vtl == vtl as u8 {
204                let set = match name.into() {
205                    HvArm64RegisterName::XPc => {
206                        reg_page.pc = value.as_u64();
207                        reg_page.dirty.set_instruction_pointer(true);
208                        true
209                    }
210                    HvArm64RegisterName::Cpsr => {
211                        reg_page.cpsr = value.as_u64();
212                        reg_page.dirty.set_processor_state(true);
213                        true
214                    }
215                    HvArm64RegisterName::SctlrEl1 => {
216                        reg_page.sctlr_el1 = value.as_u64();
217                        reg_page.dirty.set_control_registers(true);
218                        true
219                    }
220                    HvArm64RegisterName::TcrEl1 => {
221                        reg_page.tcr_el1 = value.as_u64();
222                        reg_page.dirty.set_control_registers(true);
223                        true
224                    }
225                    _ => false,
226                };
227                if set {
228                    return true;
229                }
230            }
231        };
232        false
233    }
234
235    fn must_flush_regs_on(_runner: &ProcessorRunner<'a, Self>, _name: HvRegisterName) -> bool {
236        false
237    }
238
239    fn try_get_reg(
240        runner: &ProcessorRunner<'a, Self>,
241        vtl: GuestVtl,
242        name: HvRegisterName,
243    ) -> Option<HvRegisterValue> {
244        // Try to get the register from the CPU context, the fastest path.
245        // NOTE: x18 is omitted here as it is managed by the hypervisor.
246        let value = match name.into() {
247            HvArm64RegisterName::X0
248            | HvArm64RegisterName::X1
249            | HvArm64RegisterName::X2
250            | HvArm64RegisterName::X3
251            | HvArm64RegisterName::X4
252            | HvArm64RegisterName::X5
253            | HvArm64RegisterName::X6
254            | HvArm64RegisterName::X7
255            | HvArm64RegisterName::X8
256            | HvArm64RegisterName::X9
257            | HvArm64RegisterName::X10
258            | HvArm64RegisterName::X11
259            | HvArm64RegisterName::X12
260            | HvArm64RegisterName::X13
261            | HvArm64RegisterName::X14
262            | HvArm64RegisterName::X15
263            | HvArm64RegisterName::X16
264            | HvArm64RegisterName::X17
265            | HvArm64RegisterName::X19
266            | HvArm64RegisterName::X20
267            | HvArm64RegisterName::X21
268            | HvArm64RegisterName::X22
269            | HvArm64RegisterName::X23
270            | HvArm64RegisterName::X24
271            | HvArm64RegisterName::X25
272            | HvArm64RegisterName::X26
273            | HvArm64RegisterName::X27
274            | HvArm64RegisterName::X28
275            | HvArm64RegisterName::XFp
276            | HvArm64RegisterName::XLr => {
277                Some(runner.cpu_context().x[(name.0 - HvArm64RegisterName::X0.0) as usize].into())
278            }
279            _ => None,
280        };
281        if value.is_some() {
282            return value;
283        }
284
285        if let Some(reg_page) = runner.reg_page() {
286            if reg_page.vtl == vtl as u8 {
287                let value = match name.into() {
288                    HvArm64RegisterName::XPc => Some(HvRegisterValue((reg_page.pc).into())),
289                    HvArm64RegisterName::Cpsr => Some(HvRegisterValue((reg_page.cpsr).into())),
290                    HvArm64RegisterName::SctlrEl1 => {
291                        Some(HvRegisterValue((reg_page.sctlr_el1).into()))
292                    }
293                    HvArm64RegisterName::TcrEl1 => Some(HvRegisterValue((reg_page.tcr_el1).into())),
294                    _ => None,
295                };
296                if value.is_some() {
297                    return value;
298                }
299            }
300        };
301        None
302    }
303
304    fn flush_register_page(runner: &mut ProcessorRunner<'a, Self>) {
305        let Some(reg_page) = runner.reg_page_mut() else {
306            return;
307        };
308
309        // Collect any dirty registers.
310        let mut regs: Vec<(HvArm64RegisterName, HvRegisterValue)> = Vec::new();
311        if reg_page.dirty.instruction_pointer() {
312            regs.push((HvArm64RegisterName::XPc, reg_page.pc.into()));
313        }
314        if reg_page.dirty.processor_state() {
315            regs.push((HvArm64RegisterName::Cpsr, reg_page.cpsr.into()));
316        }
317        if reg_page.dirty.control_registers() {
318            regs.push((HvArm64RegisterName::SctlrEl1, reg_page.sctlr_el1.into()));
319            regs.push((HvArm64RegisterName::TcrEl1, reg_page.tcr_el1.into()));
320        }
321
322        // Disable the reg page so future writes do not use it (until the state
323        // is reset at the next VTL transition).
324        reg_page.is_valid = 0;
325        reg_page.dirty = 0.into();
326
327        // Set the registers now that the register page is marked invalid.
328        if let Err(err) = runner.set_vp_registers(GuestVtl::Vtl0, regs.as_slice()) {
329            panic!(
330                "Failed to flush register page: {}",
331                &err as &dyn std::error::Error
332            );
333        }
334    }
335}