1use super::BackingState;
7use super::Error;
8use super::GuestVtl;
9use super::Hcl;
10use super::HclVp;
11use super::NoRunner;
12use super::ProcessorRunner;
13use super::TranslateGvaToGpaError;
14use super::TranslateResult;
15use super::private::BackingPrivate;
16use crate::protocol::hcl_cpu_context_x64;
17use hvdef::HV_PARTITION_ID_SELF;
18use hvdef::HV_VP_INDEX_SELF;
19use hvdef::HvRegisterName;
20use hvdef::HvRegisterValue;
21use hvdef::HvX64RegisterName;
22use hvdef::HvX64RegisterPage;
23use hvdef::HypercallCode;
24use sidecar_client::SidecarVp;
25use std::cell::UnsafeCell;
26use zerocopy::FromZeros;
27
28#[derive(Error, Debug)]
31#[error("translate gva to gpa returned non-successful code {code:?}")]
32pub struct TranslateErrorX64 {
33 pub code: u32,
35 pub event_info: hvdef::HvX64PendingEvent,
37}
38
39#[derive(Error, Debug)]
41#[expect(missing_docs)]
42pub enum RegisterPageVtlError {
43 #[error("no register page")]
44 NoRegisterPage,
45 #[error("invalid guest vtl {0}")]
46 InvalidVtl(u8),
47}
48
49pub struct MshvX64<'a> {
51 reg_page: Option<&'a UnsafeCell<HvX64RegisterPage>>,
52 cpu_context: &'a UnsafeCell<hcl_cpu_context_x64>,
53}
54
55impl<'a> ProcessorRunner<'a, MshvX64<'a>> {
56 fn reg_page(&self) -> Option<&HvX64RegisterPage> {
57 let reg_page = unsafe { &*self.state.reg_page?.get() };
60 if reg_page.is_valid != 0 {
61 Some(reg_page)
62 } else {
63 None
64 }
65 }
66
67 fn reg_page_mut(&mut self) -> Option<&mut HvX64RegisterPage> {
68 let reg_page = unsafe { &mut *self.state.reg_page?.get() };
71 if reg_page.is_valid != 0 {
72 Some(reg_page)
73 } else {
74 None
75 }
76 }
77
78 pub fn reg_page_vtl(&self) -> Result<GuestVtl, RegisterPageVtlError> {
80 let vtl = self
83 .reg_page()
84 .ok_or(RegisterPageVtlError::NoRegisterPage)?
85 .vtl;
86 vtl.try_into()
87 .map_err(|_| RegisterPageVtlError::InvalidVtl(vtl))
88 }
89
90 pub fn cpu_context(&self) -> &hcl_cpu_context_x64 {
92 unsafe { &*self.state.cpu_context.get() }
95 }
96
97 pub fn cpu_context_mut(&mut self) -> &mut hcl_cpu_context_x64 {
99 unsafe { &mut *self.state.cpu_context.get() }
102 }
103
104 pub fn translate_gva_to_gpa(
110 &mut self,
111 gva: u64,
112 control_flags: hvdef::hypercall::TranslateGvaControlFlagsX64,
113 ) -> Result<Result<TranslateResult, TranslateErrorX64>, TranslateGvaToGpaError> {
114 use hvdef::hypercall;
115
116 assert!(
117 control_flags.input_vtl().use_target_vtl(),
118 "did not specify a target VTL"
119 );
120
121 let gvn = gva >> hvdef::HV_PAGE_SHIFT;
122 let output = if let Some(sidecar) = &mut self.sidecar {
123 sidecar
124 .translate_gva(gvn, control_flags)
125 .map_err(|err| TranslateGvaToGpaError::Sidecar { error: err, gva })?
126 } else {
127 let header = hypercall::TranslateVirtualAddressX64 {
128 partition_id: HV_PARTITION_ID_SELF,
129 vp_index: HV_VP_INDEX_SELF,
130 reserved: 0,
131 control_flags,
132 gva_page: gvn,
133 };
134
135 let mut output: hypercall::TranslateVirtualAddressExOutputX64 = FromZeros::new_zeroed();
136
137 let status = unsafe {
140 self.hcl
141 .mshv_hvcall
142 .hvcall(
143 HypercallCode::HvCallTranslateVirtualAddressEx,
144 &header,
145 &mut output,
146 )
147 .expect("translate can never fail")
148 };
149
150 status
151 .result()
152 .map_err(|hv_error| TranslateGvaToGpaError::Hypervisor { gva, hv_error })?;
153
154 output
155 };
156
157 match output.translation_result.result.result_code() {
160 c if c == hypercall::TranslateGvaResultCode::SUCCESS.0 => Ok(Ok(TranslateResult {
161 gpa_page: output.gpa_page,
162 overlay_page: output.translation_result.result.overlay_page(),
163 })),
164 x => Ok(Err(TranslateErrorX64 {
165 code: x,
166 event_info: output.translation_result.event_info,
167 })),
168 }
169 }
170}
171
172impl<'a> BackingPrivate<'a> for MshvX64<'a> {
173 fn new(vp: &'a HclVp, sidecar: Option<&SidecarVp<'a>>, _hcl: &Hcl) -> Result<Self, NoRunner> {
174 let BackingState::Mshv { reg_page } = &vp.backing else {
175 return Err(NoRunner::MismatchedIsolation);
176 };
177
178 unsafe {
181 let this = if let Some(sidecar) = sidecar {
182 Self {
186 reg_page: reg_page.is_some().then(|| &*sidecar.register_page().cast()),
187 cpu_context: &*sidecar.cpu_context().cast(),
188 }
189 } else {
190 Self {
191 reg_page: reg_page.as_ref().map(|x| x.as_ref()),
192 cpu_context: &*(&raw mut (*vp.run.as_ptr()).context).cast(),
193 }
194 };
195
196 Ok(this)
197 }
198 }
199
200 fn try_set_reg(
201 runner: &mut ProcessorRunner<'a, Self>,
202 vtl: GuestVtl,
203 name: HvRegisterName,
204 value: HvRegisterValue,
205 ) -> Result<bool, Error> {
206 let name = name.into();
211 let set = match name {
212 HvX64RegisterName::Rax
213 | HvX64RegisterName::Rcx
214 | HvX64RegisterName::Rdx
215 | HvX64RegisterName::Rbx
216 | HvX64RegisterName::Rbp
217 | HvX64RegisterName::Rsi
218 | HvX64RegisterName::Rdi
219 | HvX64RegisterName::R8
220 | HvX64RegisterName::R9
221 | HvX64RegisterName::R10
222 | HvX64RegisterName::R11
223 | HvX64RegisterName::R12
224 | HvX64RegisterName::R13
225 | HvX64RegisterName::R14
226 | HvX64RegisterName::R15 => {
227 runner.cpu_context_mut().gps[(name.0 - HvX64RegisterName::Rax.0) as usize] =
228 value.as_u64();
229 true
230 }
231
232 HvX64RegisterName::Cr2 => {
233 runner.cpu_context_mut().gps
235 [(HvX64RegisterName::Rsp.0 - HvX64RegisterName::Rax.0) as usize] =
236 value.as_u64();
237 true
238 }
239
240 HvX64RegisterName::Xmm0
241 | HvX64RegisterName::Xmm1
242 | HvX64RegisterName::Xmm2
243 | HvX64RegisterName::Xmm3
244 | HvX64RegisterName::Xmm4
245 | HvX64RegisterName::Xmm5 => {
246 runner.cpu_context_mut().fx_state.xmm
247 [(name.0 - HvX64RegisterName::Xmm0.0) as usize] = value.as_u128().to_ne_bytes();
248 true
249 }
250 _ => false,
251 };
252 if set {
253 return Ok(true);
254 }
255
256 if let Some(reg_page) = runner.reg_page_mut() {
257 if reg_page.vtl == vtl as u8 {
258 let set = match name {
259 HvX64RegisterName::Rsp => {
260 reg_page.gp_registers[(name.0 - HvX64RegisterName::Rax.0) as usize] =
261 value.as_u64();
262 reg_page.dirty.set_general_purpose(true);
263 true
264 }
265 HvX64RegisterName::Rip => {
266 reg_page.rip = value.as_u64();
267 reg_page.dirty.set_instruction_pointer(true);
268 true
269 }
270 HvX64RegisterName::Rflags => {
271 reg_page.rflags = value.as_u64();
272 reg_page.dirty.set_flags(true);
273 true
274 }
275 HvX64RegisterName::Es
276 | HvX64RegisterName::Cs
277 | HvX64RegisterName::Ss
278 | HvX64RegisterName::Ds
279 | HvX64RegisterName::Fs
280 | HvX64RegisterName::Gs => {
281 reg_page.segment[(name.0 - HvX64RegisterName::Es.0) as usize] =
282 value.as_u128();
283 reg_page.dirty.set_segments(true);
284 true
285 }
286
287 HvX64RegisterName::Cr0 => reg_page.cr0 == value.as_u64(),
289 HvX64RegisterName::Cr3 => reg_page.cr3 == value.as_u64(),
290 HvX64RegisterName::Cr4 => reg_page.cr4 == value.as_u64(),
291 HvX64RegisterName::Cr8 => reg_page.cr8 == value.as_u64(),
292 HvX64RegisterName::Efer => reg_page.efer == value.as_u64(),
293 HvX64RegisterName::Dr7 => reg_page.dr7 == value.as_u64(),
294 _ => false,
295 };
296 if set {
297 return Ok(true);
298 }
299 }
300 }
301
302 Ok(false)
303 }
304
305 fn must_flush_regs_on(runner: &ProcessorRunner<'a, Self>, name: HvRegisterName) -> bool {
306 matches!(HvX64RegisterName::from(name), HvX64RegisterName::Rflags)
309 && runner.reg_page().is_some()
310 }
311
312 fn try_get_reg(
313 runner: &ProcessorRunner<'a, Self>,
314 vtl: GuestVtl,
315 name: HvRegisterName,
316 ) -> Result<Option<HvRegisterValue>, Error> {
317 let name = name.into();
318
319 let value = match name {
320 HvX64RegisterName::Rax
321 | HvX64RegisterName::Rcx
322 | HvX64RegisterName::Rdx
323 | HvX64RegisterName::Rbx
324 | HvX64RegisterName::Rbp
325 | HvX64RegisterName::Rsi
326 | HvX64RegisterName::Rdi
327 | HvX64RegisterName::R8
328 | HvX64RegisterName::R9
329 | HvX64RegisterName::R10
330 | HvX64RegisterName::R11
331 | HvX64RegisterName::R12
332 | HvX64RegisterName::R13
333 | HvX64RegisterName::R14
334 | HvX64RegisterName::R15 => {
335 Some(runner.cpu_context().gps[(name.0 - HvX64RegisterName::Rax.0) as usize].into())
336 }
337
338 HvX64RegisterName::Cr2 => {
339 Some(
341 runner.cpu_context().gps
342 [(HvX64RegisterName::Rsp.0 - HvX64RegisterName::Rax.0) as usize]
343 .into(),
344 )
345 }
346
347 HvX64RegisterName::Xmm0
348 | HvX64RegisterName::Xmm1
349 | HvX64RegisterName::Xmm2
350 | HvX64RegisterName::Xmm3
351 | HvX64RegisterName::Xmm4
352 | HvX64RegisterName::Xmm5 => Some(
353 u128::from_ne_bytes(
354 runner.cpu_context().fx_state.xmm
355 [(name.0 - HvX64RegisterName::Xmm0.0) as usize],
356 )
357 .into(),
358 ),
359 _ => None,
360 };
361 if let Some(value) = value {
362 return Ok(Some(value));
363 }
364
365 if let Some(reg_page) = runner.reg_page() {
366 if reg_page.vtl == vtl as u8 {
367 let value = match name {
368 HvX64RegisterName::Rsp => Some(HvRegisterValue(
369 reg_page.gp_registers[(name.0 - HvX64RegisterName::Rax.0) as usize].into(),
370 )),
371 HvX64RegisterName::Rip => Some(HvRegisterValue((reg_page.rip).into())),
372 HvX64RegisterName::Rflags => Some(HvRegisterValue((reg_page.rflags).into())),
373 HvX64RegisterName::Es
374 | HvX64RegisterName::Cs
375 | HvX64RegisterName::Ss
376 | HvX64RegisterName::Ds
377 | HvX64RegisterName::Fs
378 | HvX64RegisterName::Gs => Some(HvRegisterValue(
379 reg_page.segment[(name.0 - HvX64RegisterName::Es.0) as usize].into(),
380 )),
381 HvX64RegisterName::Cr0 => Some(HvRegisterValue((reg_page.cr0).into())),
382 HvX64RegisterName::Cr3 => Some(HvRegisterValue((reg_page.cr3).into())),
383 HvX64RegisterName::Cr4 => Some(HvRegisterValue((reg_page.cr4).into())),
384 HvX64RegisterName::Cr8 => Some(HvRegisterValue((reg_page.cr8).into())),
385 HvX64RegisterName::Efer => Some(HvRegisterValue((reg_page.efer).into())),
386 HvX64RegisterName::Dr7 => Some(HvRegisterValue((reg_page.dr7).into())),
387 HvX64RegisterName::InstructionEmulationHints => Some(HvRegisterValue(
388 (u64::from(reg_page.instruction_emulation_hints)).into(),
389 )),
390 HvX64RegisterName::PendingInterruption => {
391 Some(u64::from(reg_page.pending_interruption).into())
392 }
393 HvX64RegisterName::InterruptState => {
394 Some(u64::from(reg_page.interrupt_state).into())
395 }
396 _ => None,
397 };
398 if let Some(value) = value {
399 return Ok(Some(value));
400 }
401 }
402 }
403
404 Ok(None)
405 }
406
407 fn flush_register_page(runner: &mut ProcessorRunner<'a, Self>) {
408 let Some(reg_page) = runner.reg_page_mut() else {
409 return;
410 };
411
412 let mut regs: Vec<(HvX64RegisterName, HvRegisterValue)> = Vec::new();
414 if reg_page.dirty.instruction_pointer() {
415 regs.push((HvX64RegisterName::Rip, reg_page.rip.into()));
416 }
417 if reg_page.dirty.general_purpose() {
418 regs.push((
419 HvX64RegisterName::Rsp,
420 reg_page.gp_registers
421 [(HvX64RegisterName::Rsp.0 - HvX64RegisterName::Rax.0) as usize]
422 .into(),
423 ));
424 }
425 if reg_page.dirty.flags() {
426 regs.push((HvX64RegisterName::Rflags, reg_page.rflags.into()));
427 }
428 if reg_page.dirty.segments() {
429 let segment_regs = reg_page
430 .segment
431 .iter()
432 .copied()
433 .enumerate()
434 .map(|(i, val)| {
435 (
436 HvX64RegisterName::from(HvRegisterName(HvX64RegisterName::Es.0 + i as u32)),
437 HvRegisterValue::from(val),
438 )
439 });
440 regs.extend(segment_regs);
441 }
442
443 reg_page.is_valid = 0;
446 reg_page.dirty = 0.into();
447
448 if let Err(err) = runner.set_vp_registers(GuestVtl::Vtl0, regs.as_slice()) {
450 panic!(
451 "Failed to flush register page: {}",
452 &err as &dyn std::error::Error
453 );
454 }
455 }
456}