hcl/ioctl/
aarch64.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Backing for non-hardware-isolated ARM64 partitions.
5
6use super::Hcl;
7use super::HclVp;
8use super::NoRunner;
9use super::ProcessorRunner;
10use crate::GuestVtl;
11use crate::protocol::hcl_cpu_context_aarch64;
12use hvdef::HvAarch64RegisterPage;
13use hvdef::HvArm64RegisterName;
14use hvdef::HvRegisterName;
15use hvdef::HvRegisterValue;
16use sidecar_client::SidecarVp;
17use std::cell::UnsafeCell;
18use thiserror::Error;
19
20/// Result when the translate gva hypercall returns a code indicating
21/// the translation was unsuccessful.
22#[derive(Error, Debug)]
23#[error("translate gva to gpa returned non-successful code {code:?}")]
24pub struct TranslateErrorAarch64 {
25    /// The code returned by the translate gva hypercall.
26    pub code: u32,
27}
28
29/// Runner backing for non-hardware-isolated ARM64 partitions.
30#[non_exhaustive]
31pub struct MshvArm64<'a> {
32    reg_page: Option<&'a UnsafeCell<HvAarch64RegisterPage>>,
33}
34
35impl<'a> ProcessorRunner<'a, MshvArm64<'a>> {
36    fn reg_page(&self) -> Option<&HvAarch64RegisterPage> {
37        // SAFETY: the register page will not be concurrently accessed by the
38        // hypervisor while this VP is in VTL2.
39        let reg_page = unsafe { &*self.state.reg_page?.get() };
40        if reg_page.is_valid != 0 {
41            Some(reg_page)
42        } else {
43            None
44        }
45    }
46
47    fn reg_page_mut(&mut self) -> Option<&mut HvAarch64RegisterPage> {
48        // SAFETY: the register page will not be concurrently accessed by the
49        // hypervisor while this VP is in VTL2.
50        let reg_page = unsafe { &mut *self.state.reg_page?.get() };
51        if reg_page.is_valid != 0 {
52            Some(reg_page)
53        } else {
54            None
55        }
56    }
57
58    /// Returns a reference to the current VTL's CPU context.
59    pub fn cpu_context(&self) -> &hcl_cpu_context_aarch64 {
60        // SAFETY: the cpu context will not be concurrently accessed by the
61        // hypervisor while this VP is in VTL2.
62        unsafe { &*(&raw mut (*self.run.get()).context).cast() }
63    }
64
65    /// Returns a mutable reference to the current VTL's CPU context.
66    pub fn cpu_context_mut(&mut self) -> &mut hcl_cpu_context_aarch64 {
67        // SAFETY: the cpu context will not be concurrently accessed by the
68        // hypervisor while this VP is in VTL2.
69        unsafe { &mut *(&raw mut (*self.run.get()).context).cast() }
70    }
71}
72
73impl<'a> super::BackingPrivate<'a> for MshvArm64<'a> {
74    fn new(vp: &'a HclVp, sidecar: Option<&SidecarVp<'_>>, _hcl: &Hcl) -> Result<Self, NoRunner> {
75        assert!(sidecar.is_none());
76        let super::BackingState::MshvAarch64 { reg_page } = &vp.backing else {
77            unreachable!()
78        };
79        Ok(Self {
80            reg_page: reg_page.as_ref().map(|x| x.as_ref()),
81        })
82    }
83
84    fn try_set_reg(
85        runner: &mut ProcessorRunner<'a, Self>,
86        vtl: GuestVtl,
87        name: HvRegisterName,
88        value: HvRegisterValue,
89    ) -> Result<bool, super::Error> {
90        // Try to set the register in the CPU context, the fastest path. Only
91        // VTL-shared registers can be set this way: the CPU context only
92        // exposes the last VTL, and if we entered VTL2 on an interrupt,
93        // OpenHCL doesn't know what the last VTL is.
94        // NOTE: x18 is omitted here as it is managed by the hypervisor.
95        let set = match name.into() {
96            HvArm64RegisterName::X0
97            | HvArm64RegisterName::X1
98            | HvArm64RegisterName::X2
99            | HvArm64RegisterName::X3
100            | HvArm64RegisterName::X4
101            | HvArm64RegisterName::X5
102            | HvArm64RegisterName::X6
103            | HvArm64RegisterName::X7
104            | HvArm64RegisterName::X8
105            | HvArm64RegisterName::X9
106            | HvArm64RegisterName::X10
107            | HvArm64RegisterName::X11
108            | HvArm64RegisterName::X12
109            | HvArm64RegisterName::X13
110            | HvArm64RegisterName::X14
111            | HvArm64RegisterName::X15
112            | HvArm64RegisterName::X16
113            | HvArm64RegisterName::X17
114            | HvArm64RegisterName::X19
115            | HvArm64RegisterName::X20
116            | HvArm64RegisterName::X21
117            | HvArm64RegisterName::X22
118            | HvArm64RegisterName::X23
119            | HvArm64RegisterName::X24
120            | HvArm64RegisterName::X25
121            | HvArm64RegisterName::X26
122            | HvArm64RegisterName::X27
123            | HvArm64RegisterName::X28
124            | HvArm64RegisterName::XFp
125            | HvArm64RegisterName::XLr => {
126                runner.cpu_context_mut().x[(name.0 - HvArm64RegisterName::X0.0) as usize] =
127                    value.as_u64();
128                true
129            }
130            HvArm64RegisterName::X18 => {
131                // TODO: handle X18 for VTL1
132                runner.cpu_context_mut().x[18] = value.as_u64();
133                false
134            }
135            _ => false,
136        };
137        if set {
138            return Ok(true);
139        }
140
141        if let Some(reg_page) = runner.reg_page_mut() {
142            if reg_page.vtl == vtl as u8 {
143                let set = match name.into() {
144                    HvArm64RegisterName::XPc => {
145                        reg_page.pc = value.as_u64();
146                        reg_page.dirty.set_instruction_pointer(true);
147                        true
148                    }
149                    HvArm64RegisterName::Cpsr => {
150                        reg_page.cpsr = value.as_u64();
151                        reg_page.dirty.set_processor_state(true);
152                        true
153                    }
154                    HvArm64RegisterName::SctlrEl1 => {
155                        reg_page.sctlr_el1 = value.as_u64();
156                        reg_page.dirty.set_control_registers(true);
157                        true
158                    }
159                    HvArm64RegisterName::TcrEl1 => {
160                        reg_page.tcr_el1 = value.as_u64();
161                        reg_page.dirty.set_control_registers(true);
162                        true
163                    }
164                    _ => false,
165                };
166                if set {
167                    return Ok(true);
168                }
169            }
170        };
171
172        Ok(false)
173    }
174
175    fn must_flush_regs_on(_runner: &ProcessorRunner<'a, Self>, _name: HvRegisterName) -> bool {
176        false
177    }
178
179    fn try_get_reg(
180        runner: &ProcessorRunner<'a, Self>,
181        vtl: GuestVtl,
182        name: HvRegisterName,
183    ) -> Result<Option<HvRegisterValue>, super::Error> {
184        // Try to get the register from the CPU context, the fastest path.
185        // NOTE: x18 is omitted here as it is managed by the hypervisor.
186        let value = match name.into() {
187            HvArm64RegisterName::X0
188            | HvArm64RegisterName::X1
189            | HvArm64RegisterName::X2
190            | HvArm64RegisterName::X3
191            | HvArm64RegisterName::X4
192            | HvArm64RegisterName::X5
193            | HvArm64RegisterName::X6
194            | HvArm64RegisterName::X7
195            | HvArm64RegisterName::X8
196            | HvArm64RegisterName::X9
197            | HvArm64RegisterName::X10
198            | HvArm64RegisterName::X11
199            | HvArm64RegisterName::X12
200            | HvArm64RegisterName::X13
201            | HvArm64RegisterName::X14
202            | HvArm64RegisterName::X15
203            | HvArm64RegisterName::X16
204            | HvArm64RegisterName::X17
205            | HvArm64RegisterName::X19
206            | HvArm64RegisterName::X20
207            | HvArm64RegisterName::X21
208            | HvArm64RegisterName::X22
209            | HvArm64RegisterName::X23
210            | HvArm64RegisterName::X24
211            | HvArm64RegisterName::X25
212            | HvArm64RegisterName::X26
213            | HvArm64RegisterName::X27
214            | HvArm64RegisterName::X28
215            | HvArm64RegisterName::XFp
216            | HvArm64RegisterName::XLr => {
217                Some(runner.cpu_context().x[(name.0 - HvArm64RegisterName::X0.0) as usize].into())
218            }
219            _ => None,
220        };
221        if value.is_some() {
222            return Ok(value);
223        }
224
225        if let Some(reg_page) = runner.reg_page() {
226            if reg_page.vtl == vtl as u8 {
227                let value = match name.into() {
228                    HvArm64RegisterName::XPc => Some(HvRegisterValue((reg_page.pc).into())),
229                    HvArm64RegisterName::Cpsr => Some(HvRegisterValue((reg_page.cpsr).into())),
230                    HvArm64RegisterName::SctlrEl1 => {
231                        Some(HvRegisterValue((reg_page.sctlr_el1).into()))
232                    }
233                    HvArm64RegisterName::TcrEl1 => Some(HvRegisterValue((reg_page.tcr_el1).into())),
234                    _ => None,
235                };
236                if value.is_some() {
237                    return Ok(value);
238                }
239            }
240        };
241        Ok(None)
242    }
243
244    fn flush_register_page(runner: &mut ProcessorRunner<'a, Self>) {
245        let Some(reg_page) = runner.reg_page_mut() else {
246            return;
247        };
248
249        // Collect any dirty registers.
250        let mut regs: Vec<(HvArm64RegisterName, HvRegisterValue)> = Vec::new();
251        if reg_page.dirty.instruction_pointer() {
252            regs.push((HvArm64RegisterName::XPc, reg_page.pc.into()));
253        }
254        if reg_page.dirty.processor_state() {
255            regs.push((HvArm64RegisterName::Cpsr, reg_page.cpsr.into()));
256        }
257        if reg_page.dirty.control_registers() {
258            regs.push((HvArm64RegisterName::SctlrEl1, reg_page.sctlr_el1.into()));
259            regs.push((HvArm64RegisterName::TcrEl1, reg_page.tcr_el1.into()));
260        }
261
262        // Disable the reg page so future writes do not use it (until the state
263        // is reset at the next VTL transition).
264        reg_page.is_valid = 0;
265        reg_page.dirty = 0.into();
266
267        // Set the registers now that the register page is marked invalid.
268        if let Err(err) = runner.set_vp_registers(GuestVtl::Vtl0, regs.as_slice()) {
269            panic!(
270                "Failed to flush register page: {}",
271                &err as &dyn std::error::Error
272            );
273        }
274    }
275}