virt_support_aarch64emu/
emulate.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Wrapper around aarch64emu for emulating single instructions to handle VM exits.
5
6use crate::translate::TranslationRegisters;
7use aarch64defs::EsrEl2;
8use aarch64defs::FaultStatusCode;
9use aarch64defs::IssInstructionAbort;
10use aarch64emu::AccessCpuState;
11use aarch64emu::InterceptState;
12use guestmem::GuestMemory;
13use guestmem::GuestMemoryError;
14use hvdef::HV_PAGE_SIZE;
15use hvdef::HvAarch64PendingEvent;
16use hvdef::HvAarch64PendingEventType;
17use hvdef::HvInterceptAccessType;
18use hvdef::HvMapGpaFlags;
19use thiserror::Error;
20use virt::VpHaltReason;
21use virt::io::CpuIo;
22use vm_topology::processor::VpIndex;
23use zerocopy::FromBytes;
24use zerocopy::IntoBytes;
25
26/// Support routines for the emulator.
27pub trait EmulatorSupport: AccessCpuState {
28    /// The hypervisor error type.
29    type Error: 'static + std::error::Error + Send + Sync;
30
31    /// The current VP index.
32    fn vp_index(&self) -> VpIndex;
33
34    /// The physical address that caused the fault.
35    fn physical_address(&self) -> Option<u64>;
36
37    /// The gva translation included in the intercept message header, if valid.
38    fn initial_gva_translation(&mut self) -> Option<InitialTranslation>;
39
40    /// If interrupt pending is marked in the intercept message
41    fn interruption_pending(&self) -> bool;
42
43    /// Check that the current GPA is valid to access by the current VTL with the following access mode.
44    /// Returns true if valid to access.
45    fn check_vtl_access(
46        &mut self,
47        gpa: u64,
48        mode: TranslateMode,
49    ) -> Result<(), EmuCheckVtlAccessError<Self::Error>>;
50
51    /// Translates a GVA to a GPA.
52    fn translate_gva(
53        &mut self,
54        gva: u64,
55        mode: TranslateMode,
56    ) -> Result<Result<EmuTranslateResult, EmuTranslateError>, Self::Error>;
57
58    /// Generates an event (exception, guest nested page fault, etc.) in the guest.
59    fn inject_pending_event(&mut self, event_info: HvAarch64PendingEvent);
60
61    /// Check if the specified write is wholly inside the monitor page, and signal the associated
62    /// connected ID if it is.
63    fn check_monitor_write(&self, gpa: u64, bytes: &[u8]) -> bool {
64        let _ = (gpa, bytes);
65        false
66    }
67
68    /// Returns true if `gpa` is mapped for the specified permissions.
69    ///
70    /// If true, then the emulator will use [`GuestMemory`] to access the GPA,
71    /// and any failures will be fatal to the VM.
72    ///
73    /// If false, then the emulator will use [`CpuIo`] to access the GPA as
74    /// MMIO.
75    fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool;
76}
77
78pub trait TranslateGvaSupport {
79    type Error;
80
81    /// Gets the object used to access the guest memory.
82    fn guest_memory(&self) -> &GuestMemory;
83
84    /// Acquires the TLB lock for this processor.
85    fn acquire_tlb_lock(&mut self);
86
87    /// Returns the registers used to walk the page table.
88    fn registers(&mut self) -> Result<TranslationRegisters, Self::Error>;
89}
90
91/// The result of translate_gva on [`EmulatorSupport`].
92pub struct EmuTranslateResult {
93    /// The GPA result of the translation.
94    pub gpa: u64,
95    /// Whether the page is an overlay page.
96    /// Not all implementations return overlay page or event_info yet, so these values are optional
97    pub overlay_page: Option<bool>,
98}
99
100/// The translation, if any, provided in the intercept message and provided by [`EmulatorSupport`].
101#[derive(Debug)]
102pub struct InitialTranslation {
103    /// GVA for the translation
104    pub gva: u64,
105    /// Translated gpa for the gva
106    pub gpa: u64,
107    // Whether the translation has read, write, or execute permissions.
108    pub translate_mode: TranslateMode,
109}
110
111#[derive(Error, Debug)]
112pub enum EmuCheckVtlAccessError<E> {
113    #[error(transparent)]
114    Hypervisor(#[from] E),
115    #[error("failed vtl permissions access for vtl {vtl:?} and access flags {denied_flags:?}")]
116    AccessDenied {
117        vtl: hvdef::Vtl,
118        denied_flags: HvMapGpaFlags,
119    },
120}
121
122#[derive(Error, Debug)]
123#[error("translate gva to gpa returned non-successful code {code:?}")]
124/// Error for a failed gva translation from [`EmulatorSupport`].
125pub struct EmuTranslateError {
126    /// Translate code of type hvdef::hypercall::TranslateGvaResultCode
127    /// Should != Success
128    pub code: hvdef::hypercall::TranslateGvaResultCode,
129    /// Pending event, if any, returned by hypervisor to go with the translate code.
130    pub event_info: Option<EsrEl2>,
131}
132
133/// The access type for a gva translation for [`EmulatorSupport`].
134#[derive(Debug, Copy, Clone, PartialEq, Eq)]
135pub enum TranslateMode {
136    /// A read operation.
137    Read,
138    /// A write operation.
139    Write,
140    /// An execute operation.
141    Execute,
142}
143
144/// The requested intercept access type isn't supported
145#[derive(Debug)]
146pub struct UnsupportedInterceptAccessType;
147
148impl TryFrom<HvInterceptAccessType> for TranslateMode {
149    type Error = UnsupportedInterceptAccessType;
150
151    fn try_from(access_type: HvInterceptAccessType) -> Result<Self, Self::Error> {
152        match access_type {
153            HvInterceptAccessType::READ => Ok(TranslateMode::Read),
154            HvInterceptAccessType::WRITE => Ok(TranslateMode::Write),
155            HvInterceptAccessType::EXECUTE => Ok(TranslateMode::Execute),
156            _ => Err(UnsupportedInterceptAccessType),
157        }
158    }
159}
160
161#[derive(Debug, Error)]
162enum EmulationError<E> {
163    #[error("an interrupt caused the memory access exit")]
164    InterruptionPending,
165    #[error("emulator error (instruction {bytes:02x?})")]
166    Emulator {
167        bytes: Vec<u8>,
168        #[source]
169        error: aarch64emu::Error<E>,
170    },
171}
172
173/// Emulates an instruction.
174pub async fn emulate<T: EmulatorSupport>(
175    support: &mut T,
176    intercept_state: &InterceptState,
177    gm: &GuestMemory,
178    dev: &impl CpuIo,
179) -> Result<(), VpHaltReason<T::Error>> {
180    tracing::trace!(physical_address = support.physical_address(), "emulating");
181
182    if support.interruption_pending() {
183        // This means a fault or interruption *caused* the intercept
184        // (and only really applies to memory intercept handling).
185        // An example of how this could happen is if the
186        // interrupt vector table itself is in mmio space; taking an
187        // interrupt at that point requires that the processor reads the
188        // vector out of the table, which generates an mmio intercept,
189        // but not one associated with any particular instruction.
190        // Therefore, there is nothing to emulate.
191        //
192        // A fault can't be injected into the guest because that could
193        // cause an infinite loop (as the processor tries to get the trap
194        // vector out of the mmio-ed vector table).  Just give up.
195
196        return Err(VpHaltReason::EmulationFailure(
197            EmulationError::<T::Error>::InterruptionPending.into(),
198        ));
199    }
200
201    let mut cpu = EmulatorCpu::new(gm, dev, support, intercept_state.syndrome);
202    let pc = cpu.pc();
203    let result = {
204        let mut emu = aarch64emu::Emulator::new(&mut cpu, intercept_state);
205        emu.run().await
206    };
207
208    let instruction_bytes = if intercept_state.instruction_byte_count > 0 {
209        intercept_state.instruction_bytes.to_vec()
210    } else {
211        vec![0, 0, 0, 0]
212    };
213    cpu.commit();
214
215    if let Err(e) = result {
216        match *e {
217            aarch64emu::Error::MemoryAccess(addr, kind, err) => {
218                if inject_memory_access_fault(addr, &err, support, intercept_state.syndrome) {
219                    return Ok(());
220                } else {
221                    return Err(VpHaltReason::EmulationFailure(
222                        EmulationError::Emulator {
223                            bytes: instruction_bytes,
224                            error: aarch64emu::Error::MemoryAccess(addr, kind, err),
225                        }
226                        .into(),
227                    ));
228                };
229            }
230            err => {
231                tracing::error!(
232                    err = &err as &dyn std::error::Error,
233                    len = instruction_bytes.len(),
234                    physical_address = cpu.support.physical_address(),
235                    "failed to emulate instruction"
236                );
237                let syndrome: EsrEl2 = IssInstructionAbort::new().into();
238                cpu.support
239                    .inject_pending_event(make_exception_event(syndrome, pc));
240            }
241        }
242    }
243
244    Ok(())
245}
246
247/// For storing gva to gpa translations in a cache in [`EmulatorCpu`]
248struct GvaGpaCacheEntry {
249    gva_page: u64,
250    gpa_page: u64,
251    translate_mode: TranslateMode,
252}
253
254impl GvaGpaCacheEntry {
255    pub fn new(gva: u64, gpa: u64, translate_mode: TranslateMode) -> Self {
256        GvaGpaCacheEntry {
257            gva_page: gva >> hvdef::HV_PAGE_SHIFT,
258            gpa_page: gpa >> hvdef::HV_PAGE_SHIFT,
259            translate_mode,
260        }
261    }
262}
263
264struct EmulatorCpu<'a, T, U> {
265    gm: &'a GuestMemory,
266    support: &'a mut T,
267    dev: &'a U,
268    cached_translation: Option<GvaGpaCacheEntry>,
269    syndrome: EsrEl2,
270}
271
272#[derive(Debug, Error)]
273enum Error<E> {
274    #[error(transparent)]
275    Hypervisor(#[from] E),
276    #[error("translation error")]
277    Translate(#[source] TranslateGvaError, Option<EsrEl2>),
278    #[error("vtl permissions denied access for gpa {gpa}")]
279    NoVtlAccess {
280        gpa: u64,
281        intercepting_vtl: hvdef::Vtl,
282        denied_flags: HvMapGpaFlags,
283    },
284    #[error("failed to access mapped memory")]
285    Memory(#[source] GuestMemoryError),
286}
287
288/// Result of a gva translation in [`EmulatorCpu`]
289#[derive(Error, Debug)]
290enum TranslateGvaError {
291    #[error("gpa access denied code {0:?}")]
292    AccessDenied(hvdef::hypercall::TranslateGvaResultCode),
293    #[error("write on overlay page")]
294    OverlayPageWrite,
295    #[error("translation failed with unknown code {0:?}")]
296    UnknownCode(hvdef::hypercall::TranslateGvaResultCode),
297    #[error("translation failed with an intercept code")]
298    Intercept,
299    #[error("translation failed with a page fault-related code {0:?}")]
300    PageFault(hvdef::hypercall::TranslateGvaResultCode),
301}
302
303impl<T: EmulatorSupport, U> EmulatorCpu<'_, T, U> {
304    pub fn new<'a>(
305        gm: &'a GuestMemory,
306        dev: &'a U,
307        support: &'a mut T,
308        syndrome: EsrEl2,
309    ) -> EmulatorCpu<'a, T, U> {
310        let init_cache = {
311            if let Some(InitialTranslation {
312                gva,
313                gpa,
314                translate_mode,
315            }) = support.initial_gva_translation()
316            {
317                tracing::trace!(
318                    ?gva,
319                    ?gpa,
320                    ?translate_mode,
321                    "adding initial translation to cache"
322                );
323                Some(GvaGpaCacheEntry::new(gva, gpa, translate_mode))
324            } else {
325                None
326            }
327        };
328
329        EmulatorCpu {
330            gm,
331            dev,
332            support,
333            cached_translation: init_cache,
334            syndrome,
335        }
336    }
337
338    pub fn translate_gva(&mut self, gva: u64, mode: TranslateMode) -> Result<u64, Error<T::Error>> {
339        type TranslateCode = hvdef::hypercall::TranslateGvaResultCode;
340
341        // Note about invalid accesses at user mode: the exception code will
342        // distinguish user vs kernel via _LOWER (e.g. kernel -> DATA_ABORT,
343        // user -> DATA_ABORT_LOWER). We don't track that here though because
344        // Hyper-V only takes the general version and will convert it depending
345        // on the last execution state it has recorded.
346
347        if let Some(GvaGpaCacheEntry {
348            gva_page: cached_gva_page,
349            gpa_page: cached_gpa_page,
350            translate_mode: cached_mode,
351        }) = self.cached_translation
352        {
353            if ((gva >> hvdef::HV_PAGE_SHIFT) == cached_gva_page) && (cached_mode == mode) {
354                tracing::trace!(
355                    ?gva,
356                    ?cached_gva_page,
357                    cached_gpa_page,
358                    ?cached_mode,
359                    "using cached entry"
360                );
361                return Ok((cached_gpa_page << hvdef::HV_PAGE_SHIFT) + (gva & (HV_PAGE_SIZE - 1)));
362            }
363        };
364
365        match self.support.translate_gva(gva, mode) {
366            Ok(Ok(EmuTranslateResult { gpa, overlay_page })) => {
367                if overlay_page.is_some()
368                    && overlay_page
369                        .expect("should've already checked that the overlay page has value")
370                    && (mode == TranslateMode::Write)
371                {
372                    // Parity: Reads of overlay pages are allowed for x64.
373                    let mut syndrome: EsrEl2 = crate::translate::Error::GpaUnmapped(3).into();
374                    syndrome.set_il(self.syndrome.il());
375                    return Err(Error::Translate(TranslateGvaError::OverlayPageWrite, None));
376                }
377
378                let new_cache_entry = GvaGpaCacheEntry::new(gva, gpa, mode);
379
380                self.cached_translation = Some(new_cache_entry);
381                Ok(gpa)
382            }
383            Ok(Err(EmuTranslateError { code, event_info })) => match code {
384                TranslateCode::INTERCEPT => {
385                    tracing::trace!("translate gva to gpa returned an intercept event");
386                    Err(Error::Translate(TranslateGvaError::Intercept, event_info))
387                }
388                TranslateCode::GPA_NO_READ_ACCESS
389                | TranslateCode::GPA_NO_WRITE_ACCESS
390                | TranslateCode::GPA_UNMAPPED
391                | TranslateCode::GPA_ILLEGAL_OVERLAY_ACCESS
392                | TranslateCode::GPA_UNACCEPTED => {
393                    tracing::trace!("translate gva to gpa returned no access to page {:?}", code);
394                    Err(Error::Translate(
395                        TranslateGvaError::AccessDenied(code),
396                        event_info,
397                    ))
398                }
399                TranslateCode::PAGE_NOT_PRESENT
400                | TranslateCode::PRIVILEGE_VIOLATION
401                | TranslateCode::INVALID_PAGE_TABLE_FLAGS => {
402                    tracing::trace!(gva, ?code, "translate gva to gpa returned");
403                    Err(Error::Translate(
404                        TranslateGvaError::PageFault(code),
405                        event_info,
406                    ))
407                }
408                TranslateCode::SUCCESS => unreachable!(),
409                _ => {
410                    tracing::trace!(
411                        "translate error: unknown translation result code {:?}",
412                        code
413                    );
414
415                    Err(Error::Translate(TranslateGvaError::UnknownCode(code), None))
416                }
417            },
418            Err(e) => {
419                tracing::trace!("translate error {:?}", e);
420                Err(Error::Hypervisor(e))
421            }
422        }
423    }
424
425    pub fn check_vtl_access(
426        &mut self,
427        gpa: u64,
428        mode: TranslateMode,
429    ) -> Result<(), Error<T::Error>> {
430        self.support
431            .check_vtl_access(gpa, mode)
432            .map_err(|e| match e {
433                EmuCheckVtlAccessError::Hypervisor(hv_err) => Error::Hypervisor(hv_err),
434                EmuCheckVtlAccessError::AccessDenied { vtl, denied_flags } => Error::NoVtlAccess {
435                    gpa,
436                    intercepting_vtl: vtl,
437                    denied_flags,
438                },
439            })
440    }
441}
442
443impl<T: EmulatorSupport, U: CpuIo> aarch64emu::Cpu for EmulatorCpu<'_, T, U> {
444    type Error = Error<T::Error>;
445
446    async fn read_instruction(&mut self, gva: u64, bytes: &mut [u8]) -> Result<(), Self::Error> {
447        let gpa = match self.translate_gva(gva, TranslateMode::Execute) {
448            Ok(g) => g,
449            Err(e) => return Err(e),
450        };
451        self.read_physical_memory(gpa, bytes).await
452    }
453
454    async fn read_memory(&mut self, gva: u64, bytes: &mut [u8]) -> Result<(), Self::Error> {
455        let gpa = match self.translate_gva(gva, TranslateMode::Read) {
456            Ok(g) => g,
457            Err(e) => return Err(e),
458        };
459        self.read_physical_memory(gpa, bytes).await
460    }
461
462    async fn read_physical_memory(
463        &mut self,
464        gpa: u64,
465        bytes: &mut [u8],
466    ) -> Result<(), Self::Error> {
467        self.check_vtl_access(gpa, TranslateMode::Read)?;
468
469        if self.support.is_gpa_mapped(gpa, false) {
470            self.gm.read_at(gpa, bytes).map_err(Self::Error::Memory)?;
471        } else {
472            self.dev
473                .read_mmio(self.support.vp_index(), gpa, bytes)
474                .await;
475        }
476        Ok(())
477    }
478
479    async fn write_memory(&mut self, gva: u64, bytes: &[u8]) -> Result<(), Self::Error> {
480        let gpa = match self.translate_gva(gva, TranslateMode::Write) {
481            Ok(g) => g,
482            Err(e) => return Err(e),
483        };
484        self.write_physical_memory(gpa, bytes).await
485    }
486
487    async fn write_physical_memory(&mut self, gpa: u64, bytes: &[u8]) -> Result<(), Self::Error> {
488        self.check_vtl_access(gpa, TranslateMode::Write)?;
489
490        if self.support.is_gpa_mapped(gpa, true) {
491            self.gm.write_at(gpa, bytes).map_err(Self::Error::Memory)?;
492        } else {
493            self.dev
494                .write_mmio(self.support.vp_index(), gpa, bytes)
495                .await;
496        }
497        Ok(())
498    }
499
500    async fn compare_and_write_memory(
501        &mut self,
502        gva: u64,
503        current: &[u8],
504        new: &[u8],
505        success: &mut bool,
506    ) -> Result<(), Self::Error> {
507        let gpa = match self.translate_gva(gva, TranslateMode::Write) {
508            Ok(g) => g,
509            Err(e) => return Err(e),
510        };
511
512        self.check_vtl_access(gpa, TranslateMode::Write)?;
513
514        if self.support.check_monitor_write(gpa, new) {
515            *success = true;
516            Ok(())
517        } else if self.support.is_gpa_mapped(gpa, true) {
518            let buf = &mut [0; 16][..current.len()];
519            buf.copy_from_slice(current);
520            match self.gm.compare_exchange_bytes(gpa, buf, new) {
521                Ok(swapped) => {
522                    *success = swapped;
523                    Ok(())
524                }
525                Err(e) => Err(Self::Error::Memory(e)),
526            }
527        } else {
528            // Ignore the comparison aspect for device MMIO.
529            *success = true;
530            self.dev.write_mmio(self.support.vp_index(), gpa, new).await;
531            Ok(())
532        }
533    }
534}
535
536impl<T: AccessCpuState, U: CpuIo> AccessCpuState for EmulatorCpu<'_, T, U> {
537    fn commit(&mut self) {
538        self.support.commit()
539    }
540    fn x(&mut self, index: u8) -> u64 {
541        self.support.x(index)
542    }
543    fn update_x(&mut self, index: u8, data: u64) {
544        self.support.update_x(index, data)
545    }
546    fn q(&self, index: u8) -> u128 {
547        self.support.q(index)
548    }
549    fn update_q(&mut self, index: u8, data: u128) {
550        self.support.update_q(index, data)
551    }
552    fn d(&self, index: u8) -> u64 {
553        self.support.d(index)
554    }
555    fn update_d(&mut self, index: u8, data: u64) {
556        self.support.update_d(index, data)
557    }
558    fn h(&self, index: u8) -> u32 {
559        self.support.h(index)
560    }
561    fn update_h(&mut self, index: u8, data: u32) {
562        self.support.update_h(index, data)
563    }
564    fn s(&self, index: u8) -> u16 {
565        self.support.s(index)
566    }
567    fn update_s(&mut self, index: u8, data: u16) {
568        self.support.update_s(index, data)
569    }
570    fn b(&self, index: u8) -> u8 {
571        self.support.b(index)
572    }
573    fn update_b(&mut self, index: u8, data: u8) {
574        self.support.update_b(index, data)
575    }
576    fn sp(&mut self) -> u64 {
577        self.support.sp()
578    }
579    fn update_sp(&mut self, data: u64) {
580        self.support.update_sp(data)
581    }
582    fn fp(&mut self) -> u64 {
583        self.support.fp()
584    }
585    fn update_fp(&mut self, data: u64) {
586        self.support.update_fp(data)
587    }
588    fn lr(&mut self) -> u64 {
589        self.support.lr()
590    }
591    fn update_lr(&mut self, data: u64) {
592        self.support.update_lr(data)
593    }
594    fn pc(&mut self) -> u64 {
595        self.support.pc()
596    }
597    fn update_pc(&mut self, data: u64) {
598        self.support.update_pc(data)
599    }
600    fn cpsr(&mut self) -> aarch64defs::Cpsr64 {
601        self.support.cpsr()
602    }
603}
604
605/// Creates a pending event for the exception type
606pub fn make_exception_event(syndrome: EsrEl2, fault_address: u64) -> HvAarch64PendingEvent {
607    let exception_event = hvdef::HvAarch64PendingExceptionEvent {
608        header: hvdef::HvAarch64PendingEventHeader::new()
609            .with_event_pending(true)
610            .with_event_type(HvAarch64PendingEventType::EXCEPTION),
611        syndrome: syndrome.into(),
612        fault_address,
613        _padding: Default::default(),
614    };
615    let exception_event_bytes = exception_event.as_bytes();
616    let mut event = [0u8; 32];
617    event.as_mut_slice()[..exception_event_bytes.len()].copy_from_slice(exception_event_bytes);
618    HvAarch64PendingEvent::read_from_bytes(&event[..]).unwrap()
619}
620
621/// Injects an event into the guest if appropriate.
622///
623/// Returns true if an event was injected into the guest.
624/// In the case of false being returned, the caller can
625/// return the appropriate error code.
626#[must_use]
627fn inject_memory_access_fault<T: EmulatorSupport>(
628    gva: u64,
629    result: &Error<T::Error>,
630    support: &mut T,
631    syndrome: EsrEl2,
632) -> bool {
633    match result {
634        Error::Translate(e, event) => {
635            tracing::trace!(
636                error = e as &dyn std::error::Error,
637                "translation failed, injecting event"
638            );
639
640            if let Some(event_info) = event {
641                support.inject_pending_event(make_exception_event(*event_info, gva));
642
643                // The emulation did what it was supposed to do, which is throw a fault, so the emulation is done.
644                return true;
645            }
646            false
647        }
648        Error::NoVtlAccess {
649            gpa,
650            intercepting_vtl: _,
651            denied_flags,
652        } => {
653            tracing::trace!(
654                error = result as &dyn std::error::Error,
655                ?gva,
656                ?gpa,
657                "Vtl permissions checking failed"
658            );
659
660            let event = vtl_access_event(gva, *denied_flags, &syndrome);
661            support.inject_pending_event(event);
662            true
663        }
664        Error::Hypervisor(_) | Error::Memory(_) => false,
665    }
666}
667
668/// Generates the appropriate event for a VTL access error based
669/// on the intercepting VTL
670fn vtl_access_event(
671    gva: u64,
672    denied_access: HvMapGpaFlags,
673    cur_syndrome: &EsrEl2,
674) -> HvAarch64PendingEvent {
675    assert!(denied_access.kernel_executable() || denied_access.user_executable());
676    let inst_abort = IssInstructionAbort::new().with_ifsc(FaultStatusCode::PERMISSION_FAULT_LEVEL2);
677    let mut syndrome: EsrEl2 = inst_abort.into();
678    syndrome.set_il(cur_syndrome.il());
679    make_exception_event(syndrome, gva)
680}
681
682/// Tries to emulate monitor page writes without taking the slower, full
683/// emulation path.
684///
685/// The caller must have already validated that the fault was due to a write to
686/// a monitor page GPA.
687///
688/// Returns the bit number being set within the monitor page.
689pub fn emulate_mnf_write_fast_path<T: EmulatorSupport>(
690    opcode: u32,
691    support: &mut T,
692    gm: &GuestMemory,
693    dev: &impl CpuIo,
694) -> Option<u64> {
695    if support.interruption_pending() {
696        return None;
697    }
698
699    // LDSETx / STSETx. A "fast path" is possible because we can assume the
700    // MNF page is always zero-filled.
701    if (opcode & 0x38203c00) == 0x38203000 {
702        let mut cpu = EmulatorCpu::new(gm, dev, support, EsrEl2::from_bits(0));
703        let size = (1 << (opcode >> 30)) * 8;
704        let rs = (opcode >> 16) as u8 & 0x1f;
705        let bitmask = if rs < 31 { cpu.x(rs) } else { 0 };
706        let bitmask = if size == 64 {
707            bitmask
708        } else {
709            bitmask & ((1 << size) - 1)
710        };
711        let rt = opcode as u8 & 0x1f;
712        if rt != 31 {
713            cpu.update_x(rt, 0);
714        }
715
716        let new_pc = cpu.pc().wrapping_add(4);
717        cpu.update_pc(new_pc);
718        cpu.commit();
719        Some(bitmask)
720    } else {
721        tracelimit::warn_ratelimited!(
722            opcode = format!("{:x}", opcode),
723            "MNF fast path unknown opcode"
724        );
725        None
726    }
727}