use crate::translate::TranslateFlags;
use crate::translate::TranslatePrivilegeCheck;
use crate::translate::translate_gva_to_gpa;
use guestmem::GuestMemory;
use guestmem::GuestMemoryError;
use hvdef::HV_PAGE_SIZE;
use hvdef::HvInterceptAccessType;
use hvdef::HvMapGpaFlags;
use thiserror::Error;
use virt::VpHaltReason;
use virt::io::CpuIo;
use vm_topology::processor::VpIndex;
use x86defs::Exception;
use x86defs::RFlags;
use x86defs::SegmentRegister;
use x86emu::Gp;
use x86emu::RegisterIndex;
use x86emu::Segment;
use zerocopy::FromBytes;
use zerocopy::IntoBytes;
pub trait EmulatorSupport {
type Error: 'static + std::error::Error + Send + Sync;
fn vp_index(&self) -> VpIndex;
fn vendor(&self) -> x86defs::cpuid::Vendor;
fn gp(&mut self, index: Gp) -> u64;
fn set_gp(&mut self, reg: Gp, v: u64);
fn rip(&mut self) -> u64;
fn set_rip(&mut self, v: u64);
fn segment(&mut self, index: Segment) -> SegmentRegister;
fn efer(&mut self) -> u64;
fn cr0(&mut self) -> u64;
fn rflags(&mut self) -> RFlags;
fn set_rflags(&mut self, v: RFlags);
fn xmm(&mut self, reg: usize) -> u128;
fn set_xmm(&mut self, reg: usize, value: u128) -> Result<(), Self::Error>;
fn flush(&mut self) -> Result<(), Self::Error>;
fn instruction_bytes(&self) -> &[u8];
fn physical_address(&self) -> Option<u64>;
fn initial_gva_translation(&self) -> Option<InitialTranslation>;
fn interruption_pending(&self) -> bool;
fn check_vtl_access(
&mut self,
gpa: u64,
mode: TranslateMode,
) -> Result<(), EmuCheckVtlAccessError<Self::Error>>;
fn translate_gva(
&mut self,
gva: u64,
mode: TranslateMode,
) -> Result<Result<EmuTranslateResult, EmuTranslateError>, Self::Error>;
fn inject_pending_event(&mut self, event_info: hvdef::HvX64PendingEvent);
fn check_monitor_write(&self, gpa: u64, bytes: &[u8]) -> bool {
let _ = (gpa, bytes);
false
}
fn is_gpa_mapped(&self, gpa: u64, write: bool) -> bool;
fn lapic_base_address(&self) -> Option<u64>;
fn lapic_read(&mut self, address: u64, data: &mut [u8]);
fn lapic_write(&mut self, address: u64, data: &[u8]);
}
pub trait TranslateGvaSupport {
type Error;
fn guest_memory(&self) -> &GuestMemory;
fn acquire_tlb_lock(&mut self);
fn registers(&mut self) -> Result<crate::translate::TranslationRegisters, Self::Error>;
}
pub fn emulate_translate_gva<T: TranslateGvaSupport>(
support: &mut T,
gva: u64,
mode: TranslateMode,
) -> Result<Result<EmuTranslateResult, EmuTranslateError>, T::Error> {
support.acquire_tlb_lock();
let flags = TranslateFlags {
validate_execute: matches!(mode, TranslateMode::Execute),
validate_read: matches!(mode, TranslateMode::Read | TranslateMode::Write),
validate_write: matches!(mode, TranslateMode::Write),
override_smap: false,
enforce_smap: false,
privilege_check: TranslatePrivilegeCheck::CurrentPrivilegeLevel,
set_page_table_bits: true,
};
let registers = support.registers()?;
let r = match translate_gva_to_gpa(support.guest_memory(), gva, ®isters, flags) {
Ok(crate::translate::TranslateResult { gpa, cache_info: _ }) => Ok(EmuTranslateResult {
gpa,
overlay_page: None,
}),
Err(err) => Err(EmuTranslateError {
code: err.into(),
event_info: None,
}),
};
Ok(r)
}
pub struct EmuTranslateResult {
pub gpa: u64,
pub overlay_page: Option<bool>,
}
pub struct InitialTranslation {
pub gva: u64,
pub gpa: u64,
pub translate_mode: TranslateMode,
}
#[derive(Error, Debug)]
pub enum EmuCheckVtlAccessError<E> {
#[error(transparent)]
Hypervisor(#[from] E),
#[error("failed vtl permissions access for vtl {vtl:?} and access flags {denied_flags:?}")]
AccessDenied {
vtl: hvdef::Vtl,
denied_flags: HvMapGpaFlags,
},
}
#[derive(Error, Debug)]
#[error("translate gva to gpa returned non-successful code {code:?}")]
pub struct EmuTranslateError {
pub code: hvdef::hypercall::TranslateGvaResultCode,
pub event_info: Option<hvdef::HvX64PendingEvent>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum TranslateMode {
Read,
Write,
Execute,
}
#[derive(Debug)]
pub struct UnsupportedInterceptAccessType;
impl TryFrom<HvInterceptAccessType> for TranslateMode {
type Error = UnsupportedInterceptAccessType;
fn try_from(access_type: HvInterceptAccessType) -> Result<Self, Self::Error> {
match access_type {
HvInterceptAccessType::READ => Ok(TranslateMode::Read),
HvInterceptAccessType::WRITE => Ok(TranslateMode::Write),
HvInterceptAccessType::EXECUTE => Ok(TranslateMode::Execute),
_ => Err(UnsupportedInterceptAccessType),
}
}
}
#[derive(Debug, Error)]
enum EmulationError<E> {
#[error("an interrupt caused the memory access exit")]
InterruptionPending,
#[error("linear IP was not within CS segment limit")]
LinearIpPastCsLimit,
#[error("failed to flush the emulator cache")]
CacheFlushFailed(#[source] E),
#[error("failed to read instruction stream")]
InstructionRead(#[source] E),
#[error("emulator error (instruction {bytes:02x?})")]
Emulator {
bytes: Vec<u8>,
#[source]
error: x86emu::Error<E>,
},
}
pub async fn emulate<T: EmulatorSupport>(
support: &mut T,
gm: &GuestMemory,
dev: &impl CpuIo,
) -> Result<(), VpHaltReason<T::Error>> {
let vendor = support.vendor();
let mut bytes = [0; 16];
let mut valid_bytes;
{
let instruction_bytes = support.instruction_bytes();
valid_bytes = instruction_bytes.len();
bytes[..valid_bytes].copy_from_slice(instruction_bytes);
}
let instruction_bytes = &bytes[..valid_bytes];
tracing::trace!(
?instruction_bytes,
physical_address = support.physical_address(),
"emulating"
);
if support.interruption_pending() {
return Err(VpHaltReason::EmulationFailure(
EmulationError::<T::Error>::InterruptionPending.into(),
));
}
let initial_alignment_check = support.rflags().alignment_check();
let mut cpu = EmulatorCpu::new(gm, dev, support);
let result = loop {
let instruction_bytes = &bytes[..valid_bytes];
let mut emu = x86emu::Emulator::new(&mut cpu, vendor, instruction_bytes);
let res = emu.run().await;
if let Err(e) = &res {
if let x86emu::Error::NotEnoughBytes = **e {
assert!(valid_bytes < bytes.len());
let linear_ip =
emu.linear_ip(valid_bytes as u64)
.ok_or(VpHaltReason::EmulationFailure(
EmulationError::<T::Error>::LinearIpPastCsLimit.into(),
))?;
let is_user_mode = emu.is_user_mode();
let translate_result =
cpu.translate_gva(linear_ip, TranslateMode::Execute, is_user_mode);
let phys_ip = match translate_result {
Ok(ip) => ip,
Err(translate_error) => {
if inject_memory_access_fault(linear_ip, &translate_error, support) {
return Ok(());
} else {
return Err(VpHaltReason::EmulationFailure(
EmulationError::InstructionRead(translate_error).into(),
));
}
}
};
if let Err(err) = cpu.check_vtl_access(phys_ip, TranslateMode::Execute) {
if inject_memory_access_fault(linear_ip, &err, support) {
return Ok(());
} else {
return Err(VpHaltReason::EmulationFailure(
EmulationError::InstructionRead(err).into(),
));
};
}
tracing::trace!(linear_ip, phys_ip, "fetching instruction bytes");
let len = (bytes.len() - valid_bytes)
.min((HV_PAGE_SIZE - (phys_ip & (HV_PAGE_SIZE - 1))) as usize);
if let Err(err) = cpu
.gm
.read_at(phys_ip, &mut bytes[valid_bytes..valid_bytes + len])
{
tracing::error!(error = &err as &dyn std::error::Error, "read failed");
support.inject_pending_event(gpf_event());
return Ok(());
}
valid_bytes += len;
continue;
}
}
break res;
};
cpu.support.flush().map_err(|err| {
VpHaltReason::EmulationFailure(EmulationError::<T::Error>::CacheFlushFailed(err).into())
})?;
assert_eq!(
initial_alignment_check,
cpu.support.rflags().alignment_check()
);
let instruction_bytes = &bytes[..valid_bytes];
if let Err(e) = result {
match *e {
err @ (x86emu::Error::DecodeFailure | x86emu::Error::UnsupportedInstruction { .. }) => {
tracelimit::error_ratelimited!(
error = &err as &dyn std::error::Error,
?instruction_bytes,
physical_address = cpu.support.physical_address(),
"unsupported instruction"
);
cpu.support.inject_pending_event(make_exception_event(
Exception::INVALID_OPCODE,
None,
None,
));
}
err @ x86emu::Error::NonMemoryOrPortInstruction { .. } => {
tracelimit::error_ratelimited!(
error = &err as &dyn std::error::Error,
?instruction_bytes,
physical_address = cpu.support.physical_address(),
"given an instruction that we shouldn't have been asked to emulate - likely a bug in the caller"
);
return Err(VpHaltReason::EmulationFailure(
EmulationError::Emulator {
bytes: instruction_bytes.to_vec(),
error: err,
}
.into(),
));
}
x86emu::Error::InstructionException(exception, error_code, cause) => {
tracing::trace!(
?exception,
?error_code,
?cause,
"emulated instruction caused exception"
);
cpu.support
.inject_pending_event(make_exception_event(exception, error_code, None));
}
x86emu::Error::MemoryAccess(addr, kind, err) => {
if !inject_memory_access_fault(addr, &err, support) {
return Err(VpHaltReason::EmulationFailure(
EmulationError::Emulator {
bytes: instruction_bytes.to_vec(),
error: x86emu::Error::MemoryAccess(addr, kind, err),
}
.into(),
));
}
}
err @ (x86emu::Error::IoPort { .. } | x86emu::Error::XmmRegister { .. }) => {
return Err(VpHaltReason::EmulationFailure(
EmulationError::Emulator {
bytes: instruction_bytes.to_vec(),
error: err,
}
.into(),
));
}
x86emu::Error::NotEnoughBytes => unreachable!(),
}
}
Ok(())
}
struct GvaGpaCacheEntry {
gva_page: u64,
gpa_page: u64,
translate_mode: TranslateMode,
}
impl GvaGpaCacheEntry {
pub fn new(gva: u64, gpa: u64, translate_mode: TranslateMode) -> Self {
GvaGpaCacheEntry {
gva_page: gva >> hvdef::HV_PAGE_SHIFT,
gpa_page: gpa >> hvdef::HV_PAGE_SHIFT,
translate_mode,
}
}
}
struct EmulatorCpu<'a, T, U> {
gm: &'a GuestMemory,
support: &'a mut T,
dev: &'a U,
cached_translation: Option<GvaGpaCacheEntry>,
}
#[derive(Debug, Error)]
enum Error<E> {
#[error(transparent)]
Hypervisor(#[from] E),
#[error("translation error")]
Translate(
#[source] TranslateGvaError,
Option<hvdef::HvX64PendingEvent>,
),
#[error("vtl permissions denied access for gpa {gpa}")]
NoVtlAccess {
gpa: u64,
intercepting_vtl: hvdef::Vtl,
denied_flags: HvMapGpaFlags,
},
#[error("failed to access mapped memory")]
Memory(#[source] GuestMemoryError),
}
#[derive(Error, Debug)]
enum TranslateGvaError {
#[error("gpa access denied code {0:?}")]
AccessDenied(hvdef::hypercall::TranslateGvaResultCode),
#[error("write on overlay page")]
OverlayPageWrite,
#[error("translation failed with unknown code {0:?}")]
UnknownCode(hvdef::hypercall::TranslateGvaResultCode),
#[error("translation failed with an intercept code")]
Intercept,
#[error("translation failed with a page fault-related code {0:?}")]
PageFault(hvdef::hypercall::TranslateGvaResultCode),
}
impl<T: EmulatorSupport, U> EmulatorCpu<'_, T, U> {
pub fn new<'a>(gm: &'a GuestMemory, dev: &'a U, support: &'a mut T) -> EmulatorCpu<'a, T, U> {
let init_cache = {
if let Some(InitialTranslation {
gva,
gpa,
translate_mode,
}) = support.initial_gva_translation()
{
tracing::trace!(
?gva,
?gpa,
?translate_mode,
"adding initial translation to cache"
);
Some(GvaGpaCacheEntry::new(gva, gpa, translate_mode))
} else {
None
}
};
EmulatorCpu {
gm,
dev,
support,
cached_translation: init_cache,
}
}
pub fn translate_gva(
&mut self,
gva: u64,
mode: TranslateMode,
is_user_mode: bool,
) -> Result<u64, Error<T::Error>> {
type TranslateCode = hvdef::hypercall::TranslateGvaResultCode;
if let Some(GvaGpaCacheEntry {
gva_page: cached_gva_page,
gpa_page: cached_gpa_page,
translate_mode: cached_mode,
}) = self.cached_translation
{
if ((gva >> hvdef::HV_PAGE_SHIFT) == cached_gva_page) && (cached_mode == mode) {
tracing::trace!(
?gva,
?cached_gva_page,
cached_gpa_page,
?cached_mode,
"using cached entry"
);
return Ok((cached_gpa_page << hvdef::HV_PAGE_SHIFT) + (gva & (HV_PAGE_SIZE - 1)));
}
};
match self.support.translate_gva(gva, mode) {
Ok(Ok(EmuTranslateResult { gpa, overlay_page })) => {
if overlay_page.is_some()
&& overlay_page
.expect("should've already checked that the overlay page has value")
&& (mode == TranslateMode::Write)
{
return Err(Error::Translate(
TranslateGvaError::OverlayPageWrite,
Some(gpf_event()),
));
}
let new_cache_entry = GvaGpaCacheEntry::new(gva, gpa, mode);
self.cached_translation = Some(new_cache_entry);
Ok(gpa)
}
Ok(Err(EmuTranslateError { code, event_info })) => {
match code {
TranslateCode::INTERCEPT => {
tracing::trace!("translate gva to gpa returned an intercept event");
Err(Error::Translate(TranslateGvaError::Intercept, event_info))
}
TranslateCode::GPA_NO_READ_ACCESS
| TranslateCode::GPA_NO_WRITE_ACCESS
| TranslateCode::GPA_UNMAPPED
| TranslateCode::GPA_ILLEGAL_OVERLAY_ACCESS
| TranslateCode::GPA_UNACCEPTED => {
tracing::trace!(
"translate gva to gpa returned no access to page {:?}",
code
);
Err(Error::Translate(
TranslateGvaError::AccessDenied(code),
Some(gpf_event()),
))
}
TranslateCode::PAGE_NOT_PRESENT
| TranslateCode::PRIVILEGE_VIOLATION
| TranslateCode::INVALID_PAGE_TABLE_FLAGS => {
tracelimit::warn_ratelimited!(gva, ?code, "page table walk failed");
let mut error = x86defs::PageFaultErrorCode::new();
match code {
TranslateCode::PAGE_NOT_PRESENT => (),
TranslateCode::PRIVILEGE_VIOLATION => error.set_present(true),
TranslateCode::INVALID_PAGE_TABLE_FLAGS => {
error.set_present(true);
error.set_reserved(true);
}
_ => unreachable!(),
};
match mode {
TranslateMode::Execute => error.set_fetch(true),
TranslateMode::Write => error.set_write(true),
_ => (),
};
if is_user_mode {
error.set_user(true);
}
let event = make_exception_event(
Exception::PAGE_FAULT,
Some(error.into()),
Some(gva),
);
Err(Error::Translate(
TranslateGvaError::PageFault(code),
Some(event),
))
}
TranslateCode::SUCCESS => unreachable!(),
_ => {
tracing::trace!(
"translate error: unknown translation result code {:?}",
code
);
Err(Error::Translate(TranslateGvaError::UnknownCode(code), None))
}
}
}
Err(e) => {
tracing::trace!("translate error {:?}", e);
Err(Error::Hypervisor(e))
}
}
}
pub fn check_vtl_access(
&mut self,
gpa: u64,
mode: TranslateMode,
) -> Result<(), Error<T::Error>> {
self.support
.check_vtl_access(gpa, mode)
.map_err(|e| match e {
EmuCheckVtlAccessError::Hypervisor(hv_err) => Error::Hypervisor(hv_err),
EmuCheckVtlAccessError::AccessDenied { vtl, denied_flags } => Error::NoVtlAccess {
gpa,
intercepting_vtl: vtl,
denied_flags,
},
})
}
}
impl<T: EmulatorSupport, U: CpuIo> x86emu::Cpu for EmulatorCpu<'_, T, U> {
type Error = Error<T::Error>;
async fn read_memory(
&mut self,
gva: u64,
bytes: &mut [u8],
is_user_mode: bool,
) -> Result<(), Self::Error> {
let gpa = self.translate_gva(gva, TranslateMode::Read, is_user_mode)?;
if Some(gpa & !0xfff) == self.support.lapic_base_address() {
self.support.lapic_read(gpa, bytes);
return Ok(());
}
self.check_vtl_access(gpa, TranslateMode::Read)?;
if self.support.is_gpa_mapped(gpa, false) {
self.gm.read_at(gpa, bytes).map_err(Error::Memory)?;
} else {
self.dev
.read_mmio(self.support.vp_index(), gpa, bytes)
.await;
}
Ok(())
}
async fn write_memory(
&mut self,
gva: u64,
bytes: &[u8],
is_user_mode: bool,
) -> Result<(), Self::Error> {
let gpa = self.translate_gva(gva, TranslateMode::Write, is_user_mode)?;
if Some(gpa & !0xfff) == self.support.lapic_base_address() {
self.support.lapic_write(gpa, bytes);
return Ok(());
}
self.check_vtl_access(gpa, TranslateMode::Write)?;
if self.support.is_gpa_mapped(gpa, true) {
self.gm.write_at(gpa, bytes).map_err(Error::Memory)?;
} else {
self.dev
.write_mmio(self.support.vp_index(), gpa, bytes)
.await;
}
Ok(())
}
async fn compare_and_write_memory(
&mut self,
gva: u64,
current: &[u8],
new: &[u8],
is_user_mode: bool,
) -> Result<bool, Self::Error> {
let gpa = self.translate_gva(gva, TranslateMode::Write, is_user_mode)?;
self.check_vtl_access(gpa, TranslateMode::Write)?;
let success = if self.support.check_monitor_write(gpa, new) {
true
} else if self.support.is_gpa_mapped(gpa, true) {
let buf = &mut [0; 16][..current.len()];
buf.copy_from_slice(current);
self.gm
.compare_exchange_bytes(gpa, buf, new)
.map_err(Error::Memory)?
} else {
self.dev.write_mmio(self.support.vp_index(), gpa, new).await;
true
};
Ok(success)
}
async fn read_io(&mut self, io_port: u16, bytes: &mut [u8]) -> Result<(), Self::Error> {
self.dev
.read_io(self.support.vp_index(), io_port, bytes)
.await;
Ok(())
}
async fn write_io(&mut self, io_port: u16, bytes: &[u8]) -> Result<(), Self::Error> {
self.dev
.write_io(self.support.vp_index(), io_port, bytes)
.await;
Ok(())
}
fn gp(&mut self, reg: RegisterIndex) -> u64 {
let extended_register = self.support.gp(reg.extended_index);
reg.apply_sizing(extended_register)
}
fn gp_sign_extend(&mut self, reg: RegisterIndex) -> i64 {
let extended_register = self.support.gp(reg.extended_index);
reg.apply_sizing_signed(extended_register)
}
fn set_gp(&mut self, reg: RegisterIndex, v: u64) {
let register_value = self.gp(reg);
let updated_register_value = reg.apply_update(register_value, v);
self.support
.set_gp(reg.extended_index, updated_register_value);
}
fn rip(&mut self) -> u64 {
self.support.rip()
}
fn set_rip(&mut self, v: u64) {
self.support.set_rip(v);
}
fn segment(&mut self, index: Segment) -> SegmentRegister {
self.support.segment(index)
}
fn efer(&mut self) -> u64 {
self.support.efer()
}
fn cr0(&mut self) -> u64 {
self.support.cr0()
}
fn rflags(&mut self) -> RFlags {
self.support.rflags()
}
fn set_rflags(&mut self, v: RFlags) {
self.support.set_rflags(v);
}
fn xmm(&mut self, reg: usize) -> u128 {
self.support.xmm(reg)
}
fn set_xmm(&mut self, reg: usize, value: u128) -> Result<(), Self::Error> {
self.support.set_xmm(reg, value).map_err(Error::Hypervisor)
}
}
pub async fn emulate_io(
vp_index: VpIndex,
is_write: bool,
port: u16,
rax: &mut u64,
len: u8,
dev: &impl CpuIo,
) {
let len = len as usize;
if is_write {
dev.write_io(vp_index, port, &rax.to_ne_bytes()[..len])
.await;
} else {
let mut value = (*rax as u32).to_ne_bytes();
dev.read_io(vp_index, port, &mut value[..len]).await;
*rax = u32::from_ne_bytes(value) as u64;
}
}
#[must_use]
fn inject_memory_access_fault<T: EmulatorSupport>(
gva: u64,
result: &Error<T::Error>,
support: &mut T,
) -> bool {
match result {
Error::Translate(e, event) => {
tracing::trace!(
error = e as &dyn std::error::Error,
"translation failed, injecting event"
);
if let Some(event_info) = event {
support.inject_pending_event(*event_info);
return true;
}
false
}
Error::NoVtlAccess {
gpa,
intercepting_vtl,
denied_flags,
} => {
tracing::trace!(
error = result as &dyn std::error::Error,
?gva,
?gpa,
"Vtl permissions checking failed"
);
let event = vtl_access_event(gva, *gpa, *intercepting_vtl, *denied_flags);
support.inject_pending_event(event);
true
}
Error::Hypervisor(_) | Error::Memory(_) => false,
}
}
fn make_exception_event(
exception: Exception,
error_code: Option<u32>,
exception_parameter: Option<u64>,
) -> hvdef::HvX64PendingEvent {
let exception_event = hvdef::HvX64PendingExceptionEvent::new()
.with_event_pending(true)
.with_event_type(hvdef::HV_X64_PENDING_EVENT_EXCEPTION)
.with_deliver_error_code(error_code.is_some())
.with_error_code(error_code.unwrap_or(0))
.with_vector(exception.0.into())
.with_exception_parameter(exception_parameter.unwrap_or(0));
hvdef::HvX64PendingEvent::from(exception_event)
}
fn gpf_event() -> hvdef::HvX64PendingEvent {
make_exception_event(Exception::GENERAL_PROTECTION_FAULT, Some(0), None)
}
fn vtl_access_event(
gva: u64,
gpa: u64,
intercepting_vtl: hvdef::Vtl,
denied_access: HvMapGpaFlags,
) -> hvdef::HvX64PendingEvent {
if intercepting_vtl != hvdef::Vtl::Vtl2 {
let event_header = hvdef::HvX64PendingEventMemoryInterceptPendingEventHeader::new()
.with_event_pending(true)
.with_event_type(hvdef::HV_X64_PENDING_EVENT_MEMORY_INTERCEPT);
let access_flags = hvdef::HvX64PendingEventMemoryInterceptAccessFlags::new()
.with_guest_linear_address_valid(true)
.with_caused_by_gpa_access(true);
let access_type = if denied_access.kernel_executable() || denied_access.user_executable() {
HvInterceptAccessType::EXECUTE
} else if denied_access.writable() {
HvInterceptAccessType::WRITE
} else {
HvInterceptAccessType::READ
};
let memory_event = hvdef::HvX64PendingEventMemoryIntercept {
event_header,
target_vtl: intercepting_vtl.into(),
access_type,
access_flags,
_reserved2: 0,
guest_linear_address: (gva >> hvdef::HV_PAGE_SHIFT) << hvdef::HV_PAGE_SHIFT,
guest_physical_address: (gpa >> hvdef::HV_PAGE_SHIFT) << hvdef::HV_PAGE_SHIFT,
_reserved3: 0,
};
hvdef::HvX64PendingEvent::read_from_bytes(memory_event.as_bytes())
.expect("memory event and pending event should be the same size")
} else {
gpf_event()
}
}
pub fn emulate_mnf_write_fast_path<T: EmulatorSupport>(
support: &mut T,
gm: &GuestMemory,
dev: &impl CpuIo,
interruption_pending: bool,
tlb_lock_held: bool,
) -> Result<Option<u32>, VpHaltReason<T::Error>> {
let mut cpu = EmulatorCpu::new(gm, dev, support);
let instruction_bytes = cpu.support.instruction_bytes();
if interruption_pending || !tlb_lock_held || instruction_bytes.is_empty() {
return Ok(None);
}
let mut bytes = [0; 16];
let valid_bytes;
{
let instruction_bytes = cpu.support.instruction_bytes();
valid_bytes = instruction_bytes.len();
bytes[..valid_bytes].copy_from_slice(instruction_bytes);
}
let instruction_bytes = &bytes[..valid_bytes];
let bit = x86emu::fast_path::emulate_fast_path_set_bit(instruction_bytes, &mut cpu);
support.flush().map_err(|err| {
VpHaltReason::EmulationFailure(EmulationError::<T::Error>::CacheFlushFailed(err).into())
})?;
Ok(bit)
}