virt_support_aarch64emu/
emulate.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Wrapper around aarch64emu for emulating single instructions to handle VM exits.
5
6use crate::translate::TranslationRegisters;
7use aarch64defs::EsrEl2;
8use aarch64defs::FaultStatusCode;
9use aarch64defs::IssInstructionAbort;
10use aarch64emu::AccessCpuState;
11use aarch64emu::InterceptState;
12use guestmem::GuestMemory;
13use guestmem::GuestMemoryError;
14use hvdef::HV_PAGE_SIZE;
15use hvdef::HvAarch64PendingEvent;
16use hvdef::HvAarch64PendingEventType;
17use hvdef::HvInterceptAccessType;
18use hvdef::HvMapGpaFlags;
19use thiserror::Error;
20use virt::EmulatorMonitorSupport;
21use virt::VpHaltReason;
22use virt::io::CpuIo;
23use vm_topology::processor::VpIndex;
24use zerocopy::FromBytes;
25use zerocopy::IntoBytes;
26
27/// Support routines for the emulator.
28pub trait EmulatorSupport: AccessCpuState {
29    /// The current VP index.
30    fn vp_index(&self) -> VpIndex;
31
32    /// The physical address that caused the fault.
33    fn physical_address(&self) -> Option<u64>;
34
35    /// The gva translation included in the intercept message header, if valid.
36    fn initial_gva_translation(&mut self) -> Option<InitialTranslation>;
37
38    /// If interrupt pending is marked in the intercept message
39    fn interruption_pending(&self) -> bool;
40
41    /// Check that the current GPA is valid to access by the current VTL with the following access mode.
42    /// Returns true if valid to access.
43    fn check_vtl_access(
44        &mut self,
45        gpa: u64,
46        mode: TranslateMode,
47    ) -> Result<(), EmuCheckVtlAccessError>;
48
49    /// Translates a GVA to a GPA.
50    fn translate_gva(
51        &mut self,
52        gva: u64,
53        mode: TranslateMode,
54    ) -> Result<EmuTranslateResult, EmuTranslateError>;
55
56    /// Generates an event (exception, guest nested page fault, etc.) in the guest.
57    fn inject_pending_event(&mut self, event_info: HvAarch64PendingEvent);
58
59    /// Get access to monitor support for the emulator, if it supports it.
60    fn monitor_support(&self) -> Option<&dyn EmulatorMonitorSupport> {
61        None
62    }
63
64    /// Returns true if `gpa` is mapped for the specified permissions.
65    ///
66    /// If true, then the emulator will use [`GuestMemory`] to access the GPA,
67    /// and any failures will be fatal to the VM.
68    ///
69    /// If false, then the emulator will use [`CpuIo`] to access the GPA as
70    /// MMIO.
71    fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool;
72}
73
74pub trait TranslateGvaSupport {
75    type Error;
76
77    /// Gets the object used to access the guest memory.
78    fn guest_memory(&self) -> &GuestMemory;
79
80    /// Acquires the TLB lock for this processor.
81    fn acquire_tlb_lock(&mut self);
82
83    /// Returns the registers used to walk the page table.
84    fn registers(&mut self) -> Result<TranslationRegisters, Self::Error>;
85}
86
87/// The result of translate_gva on [`EmulatorSupport`].
88pub struct EmuTranslateResult {
89    /// The GPA result of the translation.
90    pub gpa: u64,
91    /// Whether the page is an overlay page.
92    /// Not all implementations return overlay page or event_info yet, so these values are optional
93    pub overlay_page: Option<bool>,
94}
95
96/// The translation, if any, provided in the intercept message and provided by [`EmulatorSupport`].
97#[derive(Debug)]
98pub struct InitialTranslation {
99    /// GVA for the translation
100    pub gva: u64,
101    /// Translated gpa for the gva
102    pub gpa: u64,
103    // Whether the translation has read, write, or execute permissions.
104    pub translate_mode: TranslateMode,
105}
106
107#[derive(Error, Debug)]
108pub enum EmuCheckVtlAccessError {
109    #[error("failed vtl permissions access for vtl {vtl:?} and access flags {denied_flags:?}")]
110    AccessDenied {
111        vtl: hvdef::Vtl,
112        denied_flags: HvMapGpaFlags,
113    },
114}
115
116#[derive(Error, Debug)]
117#[error("translate gva to gpa returned non-successful code {code:?}")]
118/// Error for a failed gva translation from [`EmulatorSupport`].
119pub struct EmuTranslateError {
120    /// Translate code of type hvdef::hypercall::TranslateGvaResultCode
121    /// Should != Success
122    pub code: hvdef::hypercall::TranslateGvaResultCode,
123    /// Pending event, if any, returned by hypervisor to go with the translate code.
124    pub event_info: Option<EsrEl2>,
125}
126
127/// The access type for a gva translation for [`EmulatorSupport`].
128#[derive(Debug, Copy, Clone, PartialEq, Eq)]
129pub enum TranslateMode {
130    /// A read operation.
131    Read,
132    /// A write operation.
133    Write,
134    /// An execute operation.
135    Execute,
136}
137
138/// The requested intercept access type isn't supported
139#[derive(Debug)]
140pub struct UnsupportedInterceptAccessType;
141
142impl TryFrom<HvInterceptAccessType> for TranslateMode {
143    type Error = UnsupportedInterceptAccessType;
144
145    fn try_from(access_type: HvInterceptAccessType) -> Result<Self, Self::Error> {
146        match access_type {
147            HvInterceptAccessType::READ => Ok(TranslateMode::Read),
148            HvInterceptAccessType::WRITE => Ok(TranslateMode::Write),
149            HvInterceptAccessType::EXECUTE => Ok(TranslateMode::Execute),
150            _ => Err(UnsupportedInterceptAccessType),
151        }
152    }
153}
154
155#[derive(Debug, Error)]
156enum EmulationError {
157    #[error("an interrupt caused the memory access exit")]
158    InterruptionPending,
159    #[error("emulator error (instruction {bytes:02x?})")]
160    Emulator {
161        bytes: Vec<u8>,
162        #[source]
163        error: aarch64emu::Error<Error>,
164    },
165}
166
167/// Emulates an instruction.
168pub async fn emulate<T: EmulatorSupport>(
169    support: &mut T,
170    intercept_state: &InterceptState,
171    emu_mem: &GuestMemory,
172    dev: &impl CpuIo,
173) -> Result<(), VpHaltReason> {
174    emulate_core(support, intercept_state, emu_mem, dev)
175        .await
176        .map_err(|e| VpHaltReason::EmulationFailure(e.into()))
177}
178
179async fn emulate_core<T: EmulatorSupport>(
180    support: &mut T,
181    intercept_state: &InterceptState,
182    gm: &GuestMemory,
183    dev: &impl CpuIo,
184) -> Result<(), EmulationError> {
185    tracing::trace!(physical_address = support.physical_address(), "emulating");
186
187    if support.interruption_pending() {
188        // This means a fault or interruption *caused* the intercept
189        // (and only really applies to memory intercept handling).
190        // An example of how this could happen is if the
191        // interrupt vector table itself is in mmio space; taking an
192        // interrupt at that point requires that the processor reads the
193        // vector out of the table, which generates an mmio intercept,
194        // but not one associated with any particular instruction.
195        // Therefore, there is nothing to emulate.
196        //
197        // A fault can't be injected into the guest because that could
198        // cause an infinite loop (as the processor tries to get the trap
199        // vector out of the mmio-ed vector table).  Just give up.
200
201        return Err(EmulationError::InterruptionPending);
202    }
203
204    let mut cpu = EmulatorCpu::new(gm, dev, support, intercept_state.syndrome);
205    let pc = cpu.pc();
206    let result = {
207        let mut emu = aarch64emu::Emulator::new(&mut cpu, intercept_state);
208        emu.run().await
209    };
210
211    let instruction_bytes = if intercept_state.instruction_byte_count > 0 {
212        intercept_state.instruction_bytes.to_vec()
213    } else {
214        vec![0, 0, 0, 0]
215    };
216    cpu.commit();
217
218    if let Err(e) = result {
219        match *e {
220            aarch64emu::Error::MemoryAccess(addr, kind, err) => {
221                if inject_memory_access_fault(addr, &err, support, intercept_state.syndrome) {
222                    return Ok(());
223                } else {
224                    return Err(EmulationError::Emulator {
225                        bytes: instruction_bytes,
226                        error: aarch64emu::Error::MemoryAccess(addr, kind, err),
227                    });
228                };
229            }
230            err => {
231                tracing::error!(
232                    err = &err as &dyn std::error::Error,
233                    len = instruction_bytes.len(),
234                    physical_address = cpu.support.physical_address(),
235                    "failed to emulate instruction"
236                );
237                let syndrome: EsrEl2 = IssInstructionAbort::new().into();
238                cpu.support
239                    .inject_pending_event(make_exception_event(syndrome, pc));
240            }
241        }
242    }
243
244    Ok(())
245}
246
247/// For storing gva to gpa translations in a cache in [`EmulatorCpu`]
248struct GvaGpaCacheEntry {
249    gva_page: u64,
250    gpa_page: u64,
251    translate_mode: TranslateMode,
252}
253
254impl GvaGpaCacheEntry {
255    pub fn new(gva: u64, gpa: u64, translate_mode: TranslateMode) -> Self {
256        GvaGpaCacheEntry {
257            gva_page: gva >> hvdef::HV_PAGE_SHIFT,
258            gpa_page: gpa >> hvdef::HV_PAGE_SHIFT,
259            translate_mode,
260        }
261    }
262}
263
264struct EmulatorCpu<'a, T, U> {
265    gm: &'a GuestMemory,
266    support: &'a mut T,
267    dev: &'a U,
268    cached_translation: Option<GvaGpaCacheEntry>,
269    syndrome: EsrEl2,
270}
271
272#[derive(Debug, Error)]
273enum Error {
274    #[error("translation error")]
275    Translate(#[source] TranslateGvaError, Option<EsrEl2>),
276    #[error("vtl permissions denied access for gpa {gpa}")]
277    NoVtlAccess {
278        gpa: u64,
279        intercepting_vtl: hvdef::Vtl,
280        denied_flags: HvMapGpaFlags,
281    },
282    #[error("failed to access mapped memory")]
283    Memory(#[source] GuestMemoryError),
284}
285
286/// Result of a gva translation in [`EmulatorCpu`]
287#[derive(Error, Debug)]
288enum TranslateGvaError {
289    #[error("gpa access denied code {0:?}")]
290    AccessDenied(hvdef::hypercall::TranslateGvaResultCode),
291    #[error("write on overlay page")]
292    OverlayPageWrite,
293    #[error("translation failed with unknown code {0:?}")]
294    UnknownCode(hvdef::hypercall::TranslateGvaResultCode),
295    #[error("translation failed with an intercept code")]
296    Intercept,
297    #[error("translation failed with a page fault-related code {0:?}")]
298    PageFault(hvdef::hypercall::TranslateGvaResultCode),
299}
300
301impl<T: EmulatorSupport, U> EmulatorCpu<'_, T, U> {
302    pub fn new<'a>(
303        gm: &'a GuestMemory,
304        dev: &'a U,
305        support: &'a mut T,
306        syndrome: EsrEl2,
307    ) -> EmulatorCpu<'a, T, U> {
308        let init_cache = {
309            if let Some(InitialTranslation {
310                gva,
311                gpa,
312                translate_mode,
313            }) = support.initial_gva_translation()
314            {
315                tracing::trace!(
316                    ?gva,
317                    ?gpa,
318                    ?translate_mode,
319                    "adding initial translation to cache"
320                );
321                Some(GvaGpaCacheEntry::new(gva, gpa, translate_mode))
322            } else {
323                None
324            }
325        };
326
327        EmulatorCpu {
328            gm,
329            dev,
330            support,
331            cached_translation: init_cache,
332            syndrome,
333        }
334    }
335
336    pub fn translate_gva(&mut self, gva: u64, mode: TranslateMode) -> Result<u64, Error> {
337        type TranslateCode = hvdef::hypercall::TranslateGvaResultCode;
338
339        // Note about invalid accesses at user mode: the exception code will
340        // distinguish user vs kernel via _LOWER (e.g. kernel -> DATA_ABORT,
341        // user -> DATA_ABORT_LOWER). We don't track that here though because
342        // Hyper-V only takes the general version and will convert it depending
343        // on the last execution state it has recorded.
344
345        if let Some(GvaGpaCacheEntry {
346            gva_page: cached_gva_page,
347            gpa_page: cached_gpa_page,
348            translate_mode: cached_mode,
349        }) = self.cached_translation
350        {
351            if ((gva >> hvdef::HV_PAGE_SHIFT) == cached_gva_page) && (cached_mode == mode) {
352                tracing::trace!(
353                    ?gva,
354                    ?cached_gva_page,
355                    cached_gpa_page,
356                    ?cached_mode,
357                    "using cached entry"
358                );
359                return Ok((cached_gpa_page << hvdef::HV_PAGE_SHIFT) + (gva & (HV_PAGE_SIZE - 1)));
360            }
361        };
362
363        match self.support.translate_gva(gva, mode) {
364            Ok(EmuTranslateResult { gpa, overlay_page }) => {
365                if overlay_page.is_some()
366                    && overlay_page
367                        .expect("should've already checked that the overlay page has value")
368                    && (mode == TranslateMode::Write)
369                {
370                    // Parity: Reads of overlay pages are allowed for x64.
371                    let mut syndrome: EsrEl2 = crate::translate::Error::GpaUnmapped(3).into();
372                    syndrome.set_il(self.syndrome.il());
373                    return Err(Error::Translate(TranslateGvaError::OverlayPageWrite, None));
374                }
375
376                let new_cache_entry = GvaGpaCacheEntry::new(gva, gpa, mode);
377
378                self.cached_translation = Some(new_cache_entry);
379                Ok(gpa)
380            }
381            Err(EmuTranslateError { code, event_info }) => match code {
382                TranslateCode::INTERCEPT => {
383                    tracing::trace!("translate gva to gpa returned an intercept event");
384                    Err(Error::Translate(TranslateGvaError::Intercept, event_info))
385                }
386                TranslateCode::GPA_NO_READ_ACCESS
387                | TranslateCode::GPA_NO_WRITE_ACCESS
388                | TranslateCode::GPA_UNMAPPED
389                | TranslateCode::GPA_ILLEGAL_OVERLAY_ACCESS
390                | TranslateCode::GPA_UNACCEPTED => {
391                    tracing::trace!("translate gva to gpa returned no access to page {:?}", code);
392                    Err(Error::Translate(
393                        TranslateGvaError::AccessDenied(code),
394                        event_info,
395                    ))
396                }
397                TranslateCode::PAGE_NOT_PRESENT
398                | TranslateCode::PRIVILEGE_VIOLATION
399                | TranslateCode::INVALID_PAGE_TABLE_FLAGS => {
400                    tracing::trace!(gva, ?code, "translate gva to gpa returned");
401                    Err(Error::Translate(
402                        TranslateGvaError::PageFault(code),
403                        event_info,
404                    ))
405                }
406                TranslateCode::SUCCESS => unreachable!(),
407                _ => {
408                    tracing::trace!(
409                        "translate error: unknown translation result code {:?}",
410                        code
411                    );
412
413                    Err(Error::Translate(TranslateGvaError::UnknownCode(code), None))
414                }
415            },
416        }
417    }
418
419    pub fn check_vtl_access(&mut self, gpa: u64, mode: TranslateMode) -> Result<(), Error> {
420        self.support
421            .check_vtl_access(gpa, mode)
422            .map_err(|e| match e {
423                EmuCheckVtlAccessError::AccessDenied { vtl, denied_flags } => Error::NoVtlAccess {
424                    gpa,
425                    intercepting_vtl: vtl,
426                    denied_flags,
427                },
428            })
429    }
430
431    fn check_monitor_write(&self, gpa: u64, bytes: &[u8]) -> bool {
432        if let Some(monitor_support) = self.support.monitor_support() {
433            monitor_support.check_write(gpa, bytes)
434        } else {
435            false
436        }
437    }
438
439    fn check_monitor_read(&self, gpa: u64, bytes: &mut [u8]) -> bool {
440        if let Some(monitor_support) = self.support.monitor_support() {
441            monitor_support.check_read(gpa, bytes)
442        } else {
443            false
444        }
445    }
446}
447
448impl<T: EmulatorSupport, U: CpuIo> aarch64emu::Cpu for EmulatorCpu<'_, T, U> {
449    type Error = Error;
450
451    async fn read_instruction(&mut self, gva: u64, bytes: &mut [u8]) -> Result<(), Self::Error> {
452        let gpa = match self.translate_gva(gva, TranslateMode::Execute) {
453            Ok(g) => g,
454            Err(e) => return Err(e),
455        };
456        self.read_physical_memory(gpa, bytes).await
457    }
458
459    async fn read_memory(&mut self, gva: u64, bytes: &mut [u8]) -> Result<(), Self::Error> {
460        let gpa = match self.translate_gva(gva, TranslateMode::Read) {
461            Ok(g) => g,
462            Err(e) => return Err(e),
463        };
464        self.read_physical_memory(gpa, bytes).await
465    }
466
467    async fn read_physical_memory(
468        &mut self,
469        gpa: u64,
470        bytes: &mut [u8],
471    ) -> Result<(), Self::Error> {
472        self.check_vtl_access(gpa, TranslateMode::Read)?;
473
474        if self.check_monitor_read(gpa, bytes) {
475            Ok(())
476        } else if self.support.is_gpa_mapped(gpa, false) {
477            self.gm.read_at(gpa, bytes).map_err(Self::Error::Memory)
478        } else {
479            self.dev
480                .read_mmio(self.support.vp_index(), gpa, bytes)
481                .await;
482            Ok(())
483        }
484    }
485
486    async fn write_memory(&mut self, gva: u64, bytes: &[u8]) -> Result<(), Self::Error> {
487        let gpa = match self.translate_gva(gva, TranslateMode::Write) {
488            Ok(g) => g,
489            Err(e) => return Err(e),
490        };
491        self.write_physical_memory(gpa, bytes).await
492    }
493
494    async fn write_physical_memory(&mut self, gpa: u64, bytes: &[u8]) -> Result<(), Self::Error> {
495        self.check_vtl_access(gpa, TranslateMode::Write)?;
496
497        if self.support.is_gpa_mapped(gpa, true) {
498            self.gm.write_at(gpa, bytes).map_err(Self::Error::Memory)?;
499        } else {
500            self.dev
501                .write_mmio(self.support.vp_index(), gpa, bytes)
502                .await;
503        }
504        Ok(())
505    }
506
507    async fn compare_and_write_memory(
508        &mut self,
509        gva: u64,
510        current: &[u8],
511        new: &[u8],
512        success: &mut bool,
513    ) -> Result<(), Self::Error> {
514        let gpa = match self.translate_gva(gva, TranslateMode::Write) {
515            Ok(g) => g,
516            Err(e) => return Err(e),
517        };
518
519        self.check_vtl_access(gpa, TranslateMode::Write)?;
520
521        if self.check_monitor_write(gpa, new) {
522            *success = true;
523            Ok(())
524        } else if self.support.is_gpa_mapped(gpa, true) {
525            *success = match (current.len(), new.len()) {
526                (1, 1) => self
527                    .gm
528                    .compare_exchange(gpa, current[0], new[0])
529                    .map(|r| r.is_ok()),
530                (2, 2) => self
531                    .gm
532                    .compare_exchange(
533                        gpa,
534                        u16::from_ne_bytes(current.try_into().unwrap()),
535                        u16::from_ne_bytes(new.try_into().unwrap()),
536                    )
537                    .map(|r| r.is_ok()),
538                (4, 4) => self
539                    .gm
540                    .compare_exchange(
541                        gpa,
542                        u32::from_ne_bytes(current.try_into().unwrap()),
543                        u32::from_ne_bytes(new.try_into().unwrap()),
544                    )
545                    .map(|r| r.is_ok()),
546                (8, 8) => self
547                    .gm
548                    .compare_exchange(
549                        gpa,
550                        u64::from_ne_bytes(current.try_into().unwrap()),
551                        u64::from_ne_bytes(new.try_into().unwrap()),
552                    )
553                    .map(|r| r.is_ok()),
554                _ => panic!("unsupported size for compare and write memory"),
555            }
556            .map_err(Self::Error::Memory)?;
557            Ok(())
558        } else {
559            // Ignore the comparison aspect for device MMIO.
560            *success = true;
561            self.dev.write_mmio(self.support.vp_index(), gpa, new).await;
562            Ok(())
563        }
564    }
565}
566
567impl<T: AccessCpuState, U: CpuIo> AccessCpuState for EmulatorCpu<'_, T, U> {
568    fn commit(&mut self) {
569        self.support.commit()
570    }
571    fn x(&mut self, index: u8) -> u64 {
572        self.support.x(index)
573    }
574    fn update_x(&mut self, index: u8, data: u64) {
575        self.support.update_x(index, data)
576    }
577    fn q(&self, index: u8) -> u128 {
578        self.support.q(index)
579    }
580    fn update_q(&mut self, index: u8, data: u128) {
581        self.support.update_q(index, data)
582    }
583    fn d(&self, index: u8) -> u64 {
584        self.support.d(index)
585    }
586    fn update_d(&mut self, index: u8, data: u64) {
587        self.support.update_d(index, data)
588    }
589    fn h(&self, index: u8) -> u32 {
590        self.support.h(index)
591    }
592    fn update_h(&mut self, index: u8, data: u32) {
593        self.support.update_h(index, data)
594    }
595    fn s(&self, index: u8) -> u16 {
596        self.support.s(index)
597    }
598    fn update_s(&mut self, index: u8, data: u16) {
599        self.support.update_s(index, data)
600    }
601    fn b(&self, index: u8) -> u8 {
602        self.support.b(index)
603    }
604    fn update_b(&mut self, index: u8, data: u8) {
605        self.support.update_b(index, data)
606    }
607    fn sp(&mut self) -> u64 {
608        self.support.sp()
609    }
610    fn update_sp(&mut self, data: u64) {
611        self.support.update_sp(data)
612    }
613    fn fp(&mut self) -> u64 {
614        self.support.fp()
615    }
616    fn update_fp(&mut self, data: u64) {
617        self.support.update_fp(data)
618    }
619    fn lr(&mut self) -> u64 {
620        self.support.lr()
621    }
622    fn update_lr(&mut self, data: u64) {
623        self.support.update_lr(data)
624    }
625    fn pc(&mut self) -> u64 {
626        self.support.pc()
627    }
628    fn update_pc(&mut self, data: u64) {
629        self.support.update_pc(data)
630    }
631    fn cpsr(&mut self) -> aarch64defs::Cpsr64 {
632        self.support.cpsr()
633    }
634}
635
636/// Creates a pending event for the exception type
637pub fn make_exception_event(syndrome: EsrEl2, fault_address: u64) -> HvAarch64PendingEvent {
638    let exception_event = hvdef::HvAarch64PendingExceptionEvent {
639        header: hvdef::HvAarch64PendingEventHeader::new()
640            .with_event_pending(true)
641            .with_event_type(HvAarch64PendingEventType::EXCEPTION),
642        syndrome: syndrome.into(),
643        fault_address,
644        _padding: Default::default(),
645    };
646    let exception_event_bytes = exception_event.as_bytes();
647    let mut event = [0u8; 32];
648    event.as_mut_slice()[..exception_event_bytes.len()].copy_from_slice(exception_event_bytes);
649    HvAarch64PendingEvent::read_from_bytes(&event[..]).unwrap()
650}
651
652/// Injects an event into the guest if appropriate.
653///
654/// Returns true if an event was injected into the guest.
655/// In the case of false being returned, the caller can
656/// return the appropriate error code.
657#[must_use]
658fn inject_memory_access_fault<T: EmulatorSupport>(
659    gva: u64,
660    result: &Error,
661    support: &mut T,
662    syndrome: EsrEl2,
663) -> bool {
664    match result {
665        Error::Translate(e, event) => {
666            tracing::trace!(
667                error = e as &dyn std::error::Error,
668                "translation failed, injecting event"
669            );
670
671            if let Some(event_info) = event {
672                support.inject_pending_event(make_exception_event(*event_info, gva));
673
674                // The emulation did what it was supposed to do, which is throw a fault, so the emulation is done.
675                return true;
676            }
677            false
678        }
679        Error::NoVtlAccess {
680            gpa,
681            intercepting_vtl: _,
682            denied_flags,
683        } => {
684            tracing::trace!(
685                error = result as &dyn std::error::Error,
686                ?gva,
687                ?gpa,
688                "Vtl permissions checking failed"
689            );
690
691            let event = vtl_access_event(gva, *denied_flags, &syndrome);
692            support.inject_pending_event(event);
693            true
694        }
695        Error::Memory(_) => false,
696    }
697}
698
699/// Generates the appropriate event for a VTL access error based
700/// on the intercepting VTL
701fn vtl_access_event(
702    gva: u64,
703    denied_access: HvMapGpaFlags,
704    cur_syndrome: &EsrEl2,
705) -> HvAarch64PendingEvent {
706    assert!(denied_access.kernel_executable() || denied_access.user_executable());
707    let inst_abort = IssInstructionAbort::new().with_ifsc(FaultStatusCode::PERMISSION_FAULT_LEVEL2);
708    let mut syndrome: EsrEl2 = inst_abort.into();
709    syndrome.set_il(cur_syndrome.il());
710    make_exception_event(syndrome, gva)
711}
712
713/// Tries to emulate monitor page writes without taking the slower, full
714/// emulation path.
715///
716/// The caller must have already validated that the fault was due to a write to
717/// a monitor page GPA.
718///
719/// Returns the bit number being set within the monitor page.
720pub fn emulate_mnf_write_fast_path<T: EmulatorSupport>(
721    opcode: u32,
722    support: &mut T,
723    gm: &GuestMemory,
724    dev: &impl CpuIo,
725) -> Option<u64> {
726    if support.interruption_pending() {
727        return None;
728    }
729
730    // LDSETx / STSETx. A "fast path" is possible because we can assume the
731    // MNF page is always zero-filled.
732    if (opcode & 0x38203c00) == 0x38203000 {
733        let mut cpu = EmulatorCpu::new(gm, dev, support, EsrEl2::from_bits(0));
734        let size = (1 << (opcode >> 30)) * 8;
735        let rs = (opcode >> 16) as u8 & 0x1f;
736        let bitmask = if rs < 31 { cpu.x(rs) } else { 0 };
737        let bitmask = if size == 64 {
738            bitmask
739        } else {
740            bitmask & ((1 << size) - 1)
741        };
742        let rt = opcode as u8 & 0x1f;
743        if rt != 31 {
744            cpu.update_x(rt, 0);
745        }
746
747        let new_pc = cpu.pc().wrapping_add(4);
748        cpu.update_pc(new_pc);
749        cpu.commit();
750        Some(bitmask)
751    } else {
752        tracelimit::warn_ratelimited!(
753            opcode = format!("{:x}", opcode),
754            "MNF fast path unknown opcode"
755        );
756        None
757    }
758}