pub mod apic;
pub mod tlb_lock;
use super::UhEmulationState;
use super::UhProcessor;
use super::UhRunVpError;
use crate::CvmVtl1State;
use crate::GuestVsmState;
use crate::GuestVtl;
use crate::InitialVpContextOperation;
use crate::TlbFlushLockAccess;
use crate::VpStartEnableVtl;
use crate::WakeReason;
use crate::processor::HardwareIsolatedBacking;
use crate::processor::UhHypercallHandler;
use crate::validate_vtl_gpa_flags;
use guestmem::GuestMemory;
use hv1_emulator::RequestInterrupt;
use hv1_hypercall::HvRepResult;
use hv1_structs::ProcessorSet;
use hvdef::HvCacheType;
use hvdef::HvError;
use hvdef::HvMapGpaFlags;
use hvdef::HvRegisterVsmPartitionConfig;
use hvdef::HvRegisterVsmVpSecureVtlConfig;
use hvdef::HvResult;
use hvdef::HvSynicSint;
use hvdef::HvVtlEntryReason;
use hvdef::HvX64RegisterName;
use hvdef::Vtl;
use hvdef::hypercall::HostVisibilityType;
use hvdef::hypercall::HvFlushFlags;
use hvdef::hypercall::TranslateGvaResultCode;
use std::iter::zip;
use virt::Processor;
use virt::io::CpuIo;
use virt::vp::AccessVpState;
use virt::x86::MsrError;
use virt_support_x86emu::emulate::TranslateGvaSupport;
use virt_support_x86emu::translate::TranslateCachingInfo;
use virt_support_x86emu::translate::TranslationRegisters;
use zerocopy::FromZeros;
impl<T, B: HardwareIsolatedBacking> UhHypercallHandler<'_, '_, T, B> {
fn validate_register_access(
&mut self,
vtl: GuestVtl,
name: hvdef::HvRegisterName,
) -> HvResult<()> {
match name.into() {
HvX64RegisterName::Star
| HvX64RegisterName::Lstar
| HvX64RegisterName::Cstar
| HvX64RegisterName::SysenterCs
| HvX64RegisterName::SysenterEip
| HvX64RegisterName::SysenterEsp
| HvX64RegisterName::Sfmask
| HvX64RegisterName::Xfem
| HvX64RegisterName::KernelGsBase
| HvX64RegisterName::Efer
| HvX64RegisterName::Cr0
| HvX64RegisterName::Cr2
| HvX64RegisterName::Cr3
| HvX64RegisterName::Cr4
| HvX64RegisterName::Cr8
| HvX64RegisterName::Dr0
| HvX64RegisterName::Dr1
| HvX64RegisterName::Dr2
| HvX64RegisterName::Dr3
| HvX64RegisterName::Dr7
| HvX64RegisterName::Es
| HvX64RegisterName::Cs
| HvX64RegisterName::Ss
| HvX64RegisterName::Ds
| HvX64RegisterName::Fs
| HvX64RegisterName::Gs
| HvX64RegisterName::Tr
| HvX64RegisterName::Ldtr
| HvX64RegisterName::Gdtr
| HvX64RegisterName::Idtr
| HvX64RegisterName::Rip
| HvX64RegisterName::Rflags
| HvX64RegisterName::Rax
| HvX64RegisterName::Rcx
| HvX64RegisterName::Rdx
| HvX64RegisterName::Rbx
| HvX64RegisterName::Rsp
| HvX64RegisterName::Rbp
| HvX64RegisterName::Rsi
| HvX64RegisterName::Rdi
| HvX64RegisterName::R8
| HvX64RegisterName::R9
| HvX64RegisterName::R10
| HvX64RegisterName::R11
| HvX64RegisterName::R12
| HvX64RegisterName::R13
| HvX64RegisterName::R14
| HvX64RegisterName::R15
| HvX64RegisterName::Pat => {
if vtl >= self.intercepted_vtl {
return Err(HvError::AccessDenied);
}
Ok(())
}
HvX64RegisterName::TscAux => {
if vtl >= self.intercepted_vtl {
return Err(HvError::AccessDenied);
}
if self.vp.partition.caps.tsc_aux {
Ok(())
} else {
Err(HvError::InvalidParameter)
}
}
_ => Ok(()),
}
}
fn reg_access_error_to_hv_err(err: crate::processor::vp_state::Error) -> HvError {
tracing::trace!(?err, "failed on register access");
match err {
super::vp_state::Error::SetRegisters(_) => HvError::OperationFailed,
super::vp_state::Error::GetRegisters(_) => HvError::OperationFailed,
super::vp_state::Error::SetEfer(_, _) => HvError::InvalidRegisterValue,
super::vp_state::Error::Unimplemented(_) => HvError::InvalidParameter,
super::vp_state::Error::InvalidApicBase(_) => HvError::InvalidRegisterValue,
}
}
fn get_vp_register(
&mut self,
vtl: GuestVtl,
name: hvdef::HvRegisterName,
) -> HvResult<hvdef::HvRegisterValue> {
self.validate_register_access(vtl, name)?;
match name.into() {
HvX64RegisterName::VsmCodePageOffsets => Ok(u64::from(
self.vp.backing.cvm_state_mut().hv[vtl].vsm_code_page_offsets(true),
)
.into()),
HvX64RegisterName::VsmCapabilities => Ok(u64::from(
hvdef::HvRegisterVsmCapabilities::new()
.with_deny_lower_vtl_startup(true)
.with_dr6_shared(self.vp.partition.hcl.dr6_shared()),
)
.into()),
HvX64RegisterName::VsmVpSecureConfigVtl0 => {
Ok(u64::from(self.vp.get_vsm_vp_secure_config_vtl(vtl, GuestVtl::Vtl0)?).into())
}
HvX64RegisterName::VpAssistPage => Ok(self.vp.backing.cvm_state_mut().hv[vtl]
.vp_assist_page()
.into()),
virt_msr @ (HvX64RegisterName::Star
| HvX64RegisterName::Lstar
| HvX64RegisterName::Cstar
| HvX64RegisterName::SysenterCs
| HvX64RegisterName::SysenterEip
| HvX64RegisterName::SysenterEsp
| HvX64RegisterName::Sfmask
| HvX64RegisterName::KernelGsBase) => {
let msrs = self
.vp
.access_state(vtl.into())
.virtual_msrs()
.map_err(Self::reg_access_error_to_hv_err)?;
match virt_msr {
HvX64RegisterName::Star => Ok(msrs.star.into()),
HvX64RegisterName::Lstar => Ok(msrs.lstar.into()),
HvX64RegisterName::Cstar => Ok(msrs.cstar.into()),
HvX64RegisterName::SysenterCs => Ok(msrs.sysenter_cs.into()),
HvX64RegisterName::SysenterEip => Ok(msrs.sysenter_eip.into()),
HvX64RegisterName::SysenterEsp => Ok(msrs.sysenter_esp.into()),
HvX64RegisterName::Sfmask => Ok(msrs.sfmask.into()),
HvX64RegisterName::KernelGsBase => Ok(msrs.kernel_gs_base.into()),
_ => unreachable!(),
}
}
HvX64RegisterName::Xfem => Ok(self
.vp
.access_state(vtl.into())
.xcr()
.map_err(Self::reg_access_error_to_hv_err)?
.value
.into()),
HvX64RegisterName::TscAux => Ok(self
.vp
.access_state(vtl.into())
.tsc_aux()
.map_err(Self::reg_access_error_to_hv_err)?
.value
.into()),
register @ (HvX64RegisterName::Efer
| HvX64RegisterName::Cr0
| HvX64RegisterName::Cr2
| HvX64RegisterName::Cr3
| HvX64RegisterName::Cr4
| HvX64RegisterName::Cr8
| HvX64RegisterName::Es
| HvX64RegisterName::Cs
| HvX64RegisterName::Ss
| HvX64RegisterName::Ds
| HvX64RegisterName::Fs
| HvX64RegisterName::Gs
| HvX64RegisterName::Tr
| HvX64RegisterName::Ldtr
| HvX64RegisterName::Gdtr
| HvX64RegisterName::Idtr
| HvX64RegisterName::Rip
| HvX64RegisterName::Rflags
| HvX64RegisterName::Rax
| HvX64RegisterName::Rcx
| HvX64RegisterName::Rdx
| HvX64RegisterName::Rbx
| HvX64RegisterName::Rsp
| HvX64RegisterName::Rbp
| HvX64RegisterName::Rsi
| HvX64RegisterName::Rdi
| HvX64RegisterName::R8
| HvX64RegisterName::R9
| HvX64RegisterName::R10
| HvX64RegisterName::R11
| HvX64RegisterName::R12
| HvX64RegisterName::R13
| HvX64RegisterName::R14
| HvX64RegisterName::R15) => {
let registers = self
.vp
.access_state(vtl.into())
.registers()
.map_err(Self::reg_access_error_to_hv_err)?;
match register {
HvX64RegisterName::Efer => Ok(registers.efer.into()),
HvX64RegisterName::Cr0 => Ok(registers.cr0.into()),
HvX64RegisterName::Cr2 => Ok(registers.cr2.into()),
HvX64RegisterName::Cr3 => Ok(registers.cr3.into()),
HvX64RegisterName::Cr4 => Ok(registers.cr4.into()),
HvX64RegisterName::Cr8 => Ok(registers.cr8.into()),
HvX64RegisterName::Es => {
Ok(hvdef::HvX64SegmentRegister::from(registers.es).into())
}
HvX64RegisterName::Cs => {
Ok(hvdef::HvX64SegmentRegister::from(registers.cs).into())
}
HvX64RegisterName::Ss => {
Ok(hvdef::HvX64SegmentRegister::from(registers.ss).into())
}
HvX64RegisterName::Ds => {
Ok(hvdef::HvX64SegmentRegister::from(registers.ds).into())
}
HvX64RegisterName::Fs => {
Ok(hvdef::HvX64SegmentRegister::from(registers.fs).into())
}
HvX64RegisterName::Gs => {
Ok(hvdef::HvX64SegmentRegister::from(registers.gs).into())
}
HvX64RegisterName::Tr => {
Ok(hvdef::HvX64SegmentRegister::from(registers.tr).into())
}
HvX64RegisterName::Ldtr => {
Ok(hvdef::HvX64SegmentRegister::from(registers.ldtr).into())
}
HvX64RegisterName::Gdtr => {
Ok(hvdef::HvX64TableRegister::from(registers.gdtr).into())
}
HvX64RegisterName::Idtr => {
Ok(hvdef::HvX64TableRegister::from(registers.idtr).into())
}
HvX64RegisterName::Rip => Ok(registers.rip.into()),
HvX64RegisterName::Rflags => Ok(registers.rflags.into()),
HvX64RegisterName::Rax => Ok(registers.rax.into()),
HvX64RegisterName::Rcx => Ok(registers.rcx.into()),
HvX64RegisterName::Rdx => Ok(registers.rdx.into()),
HvX64RegisterName::Rbx => Ok(registers.rbx.into()),
HvX64RegisterName::Rsp => Ok(registers.rsp.into()),
HvX64RegisterName::Rbp => Ok(registers.rbp.into()),
HvX64RegisterName::Rsi => Ok(registers.rsi.into()),
HvX64RegisterName::Rdi => Ok(registers.rdi.into()),
HvX64RegisterName::R8 => Ok(registers.r8.into()),
HvX64RegisterName::R9 => Ok(registers.r9.into()),
HvX64RegisterName::R10 => Ok(registers.r10.into()),
HvX64RegisterName::R11 => Ok(registers.r11.into()),
HvX64RegisterName::R12 => Ok(registers.r12.into()),
HvX64RegisterName::R13 => Ok(registers.r13.into()),
HvX64RegisterName::R14 => Ok(registers.r14.into()),
HvX64RegisterName::R15 => Ok(registers.r15.into()),
_ => unreachable!(),
}
}
debug_reg @ (HvX64RegisterName::Dr0
| HvX64RegisterName::Dr1
| HvX64RegisterName::Dr2
| HvX64RegisterName::Dr3
| HvX64RegisterName::Dr7) => {
let debug_regs = self
.vp
.access_state(vtl.into())
.debug_regs()
.map_err(Self::reg_access_error_to_hv_err)?;
match debug_reg {
HvX64RegisterName::Dr0 => Ok(debug_regs.dr0.into()),
HvX64RegisterName::Dr1 => Ok(debug_regs.dr1.into()),
HvX64RegisterName::Dr2 => Ok(debug_regs.dr2.into()),
HvX64RegisterName::Dr3 => Ok(debug_regs.dr3.into()),
HvX64RegisterName::Dr7 => Ok(debug_regs.dr7.into()),
_ => unreachable!(),
}
}
HvX64RegisterName::Pat => Ok(self
.vp
.access_state(vtl.into())
.pat()
.map_err(Self::reg_access_error_to_hv_err)?
.value
.into()),
synic_reg @ (HvX64RegisterName::Sint0
| HvX64RegisterName::Sint1
| HvX64RegisterName::Sint2
| HvX64RegisterName::Sint3
| HvX64RegisterName::Sint4
| HvX64RegisterName::Sint5
| HvX64RegisterName::Sint6
| HvX64RegisterName::Sint7
| HvX64RegisterName::Sint8
| HvX64RegisterName::Sint9
| HvX64RegisterName::Sint10
| HvX64RegisterName::Sint11
| HvX64RegisterName::Sint12
| HvX64RegisterName::Sint13
| HvX64RegisterName::Sint14
| HvX64RegisterName::Sint15
| HvX64RegisterName::Scontrol
| HvX64RegisterName::Sversion
| HvX64RegisterName::Sifp
| HvX64RegisterName::Sipp
| HvX64RegisterName::Eom
| HvX64RegisterName::Stimer0Config
| HvX64RegisterName::Stimer0Count
| HvX64RegisterName::Stimer1Config
| HvX64RegisterName::Stimer1Count
| HvX64RegisterName::Stimer2Config
| HvX64RegisterName::Stimer2Count
| HvX64RegisterName::Stimer3Config
| HvX64RegisterName::Stimer3Count
| HvX64RegisterName::VsmVina) => self.vp.backing.cvm_state_mut().hv[vtl]
.synic
.read_reg(synic_reg.into()),
HvX64RegisterName::ApicBase => Ok(self.vp.backing.cvm_state_mut().lapics[vtl]
.lapic
.apic_base()
.into()),
_ => {
tracing::error!(
?name,
"guest invoked getvpregister with unsupported register"
);
Err(HvError::InvalidParameter)
}
}
}
fn set_vp_register(
&mut self,
vtl: GuestVtl,
reg: &hvdef::hypercall::HvRegisterAssoc,
) -> HvResult<()> {
self.validate_register_access(vtl, reg.name)?;
match HvX64RegisterName::from(reg.name) {
HvX64RegisterName::VsmPartitionConfig => self.vp.set_vsm_partition_config(
HvRegisterVsmPartitionConfig::from(reg.value.as_u64()),
vtl,
),
HvX64RegisterName::VsmVpSecureConfigVtl0 => self.vp.set_vsm_vp_secure_config_vtl(
vtl,
GuestVtl::Vtl0,
HvRegisterVsmVpSecureVtlConfig::from(reg.value.as_u64()),
),
HvX64RegisterName::VpAssistPage => self.vp.backing.cvm_state_mut().hv[vtl]
.msr_write_vp_assist_page(reg.value.as_u64())
.map_err(|_| HvError::InvalidRegisterValue),
virt_msr @ (HvX64RegisterName::Star
| HvX64RegisterName::Cstar
| HvX64RegisterName::Lstar
| HvX64RegisterName::SysenterCs
| HvX64RegisterName::SysenterEip
| HvX64RegisterName::SysenterEsp
| HvX64RegisterName::Sfmask) => {
let mut msrs = self
.vp
.access_state(vtl.into())
.virtual_msrs()
.map_err(Self::reg_access_error_to_hv_err)?;
match virt_msr {
HvX64RegisterName::Star => msrs.star = reg.value.as_u64(),
HvX64RegisterName::Cstar => msrs.cstar = reg.value.as_u64(),
HvX64RegisterName::Lstar => msrs.lstar = reg.value.as_u64(),
HvX64RegisterName::SysenterCs => msrs.sysenter_cs = reg.value.as_u64(),
HvX64RegisterName::SysenterEip => msrs.sysenter_eip = reg.value.as_u64(),
HvX64RegisterName::SysenterEsp => msrs.sysenter_esp = reg.value.as_u64(),
HvX64RegisterName::Sfmask => msrs.sfmask = reg.value.as_u64(),
_ => unreachable!(),
}
self.vp
.access_state(vtl.into())
.set_virtual_msrs(&msrs)
.map_err(Self::reg_access_error_to_hv_err)
}
HvX64RegisterName::TscAux => self
.vp
.access_state(vtl.into())
.set_tsc_aux(&virt::vp::TscAux {
value: reg.value.as_u64(),
})
.map_err(Self::reg_access_error_to_hv_err),
debug_reg @ (HvX64RegisterName::Dr3 | HvX64RegisterName::Dr7) => {
let mut debug_registers = self
.vp
.access_state(vtl.into())
.debug_regs()
.map_err(Self::reg_access_error_to_hv_err)?;
match debug_reg {
HvX64RegisterName::Dr3 => debug_registers.dr3 = reg.value.as_u64(),
HvX64RegisterName::Dr7 => debug_registers.dr7 = reg.value.as_u64(),
_ => unreachable!(),
}
self.vp
.access_state(vtl.into())
.set_debug_regs(&debug_registers)
.map_err(Self::reg_access_error_to_hv_err)
}
HvX64RegisterName::Pat => self
.vp
.access_state(vtl.into())
.set_pat(&virt::vp::Pat {
value: reg.value.as_u64(),
})
.map_err(Self::reg_access_error_to_hv_err),
register @ (HvX64RegisterName::Efer
| HvX64RegisterName::Cr0
| HvX64RegisterName::Cr4
| HvX64RegisterName::Cr8
| HvX64RegisterName::Ldtr
| HvX64RegisterName::Gdtr
| HvX64RegisterName::Idtr
| HvX64RegisterName::Rip
| HvX64RegisterName::Rflags
| HvX64RegisterName::Rsp) => {
let mut registers = self
.vp
.access_state(vtl.into())
.registers()
.map_err(Self::reg_access_error_to_hv_err)?;
match register {
HvX64RegisterName::Efer => registers.efer = reg.value.as_u64(),
HvX64RegisterName::Cr0 => registers.cr0 = reg.value.as_u64(),
HvX64RegisterName::Cr4 => registers.cr4 = reg.value.as_u64(),
HvX64RegisterName::Cr8 => registers.cr8 = reg.value.as_u64(),
HvX64RegisterName::Ldtr => {
registers.ldtr = hvdef::HvX64SegmentRegister::from(reg.value).into()
}
HvX64RegisterName::Gdtr => {
registers.gdtr = hvdef::HvX64TableRegister::from(reg.value).into()
}
HvX64RegisterName::Idtr => {
registers.idtr = hvdef::HvX64TableRegister::from(reg.value).into()
}
HvX64RegisterName::Rip => registers.rip = reg.value.as_u64(),
HvX64RegisterName::Rflags => registers.rflags = reg.value.as_u64(),
HvX64RegisterName::Rsp => registers.rsp = reg.value.as_u64(),
_ => unreachable!(),
}
self.vp
.access_state(vtl.into())
.set_registers(®isters)
.map_err(Self::reg_access_error_to_hv_err)
}
synic_reg @ (HvX64RegisterName::Sint0
| HvX64RegisterName::Sint1
| HvX64RegisterName::Sint2
| HvX64RegisterName::Sint3
| HvX64RegisterName::Sint4
| HvX64RegisterName::Sint5
| HvX64RegisterName::Sint6
| HvX64RegisterName::Sint7
| HvX64RegisterName::Sint8
| HvX64RegisterName::Sint9
| HvX64RegisterName::Sint10
| HvX64RegisterName::Sint11
| HvX64RegisterName::Sint12
| HvX64RegisterName::Sint13
| HvX64RegisterName::Sint14
| HvX64RegisterName::Sint15
| HvX64RegisterName::Scontrol
| HvX64RegisterName::Sversion
| HvX64RegisterName::Sifp
| HvX64RegisterName::Sipp
| HvX64RegisterName::Eom
| HvX64RegisterName::Stimer0Config
| HvX64RegisterName::Stimer0Count
| HvX64RegisterName::Stimer1Config
| HvX64RegisterName::Stimer1Count
| HvX64RegisterName::Stimer2Config
| HvX64RegisterName::Stimer2Count
| HvX64RegisterName::Stimer3Config
| HvX64RegisterName::Stimer3Count
| HvX64RegisterName::VsmVina) => self.vp.backing.cvm_state_mut().hv[vtl]
.synic
.write_reg(&self.vp.partition.gm[vtl], synic_reg.into(), reg.value),
HvX64RegisterName::ApicBase => {
let current = self.vp.backing.cvm_state_mut().lapics[vtl]
.lapic
.apic_base();
if reg.value.as_u64() != current {
return Err(HvError::InvalidParameter);
}
Ok(())
}
_ => {
tracing::error!(
?reg,
"guest invoked SetVpRegisters with unsupported register",
);
Err(HvError::InvalidParameter)
}
}
}
}
impl<T: CpuIo, B: HardwareIsolatedBacking> hv1_hypercall::ModifySparseGpaPageHostVisibility
for UhHypercallHandler<'_, '_, T, B>
{
fn modify_gpa_visibility(
&mut self,
partition_id: u64,
visibility: HostVisibilityType,
gpa_pages: &[u64],
) -> HvRepResult {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err((HvError::AccessDenied, 0));
}
tracing::debug!(
?visibility,
pages = gpa_pages.len(),
"modify_gpa_visibility"
);
if self.vp.cvm_partition().hide_isolation {
return Err((HvError::AccessDenied, 0));
}
let shared = match visibility {
HostVisibilityType::PRIVATE => false,
HostVisibilityType::SHARED => true,
_ => return Err((HvError::InvalidParameter, 0)),
};
self.vp
.cvm_partition()
.isolated_memory_protector
.change_host_visibility(shared, gpa_pages, &mut self.vp.tlb_flush_lock_access())
}
}
impl<T: CpuIo, B: HardwareIsolatedBacking> UhHypercallHandler<'_, '_, T, B> {
fn retarget_physical_interrupt(
&mut self,
device_id: u64,
address: u64,
data: u32,
vector: u32,
multicast: bool,
target_processors: ProcessorSet<'_>,
) -> HvResult<()> {
self.vp.partition.request_proxy_irr_filter_update(
self.intercepted_vtl,
vector as u8,
self.vp.vp_index(),
);
self.vp.update_proxy_irr_filter(self.intercepted_vtl);
self.vp.partition.hcl.retarget_device_interrupt(
device_id,
hvdef::hypercall::InterruptEntry {
source: hvdef::hypercall::HvInterruptSource::MSI,
rsvd: 0,
data: [address as u32, data],
},
vector,
multicast,
target_processors,
)
}
pub(crate) fn hcvm_validate_flush_inputs(
&mut self,
processor_set: ProcessorSet<'_>,
flags: HvFlushFlags,
allow_extended_ranges: bool,
) -> HvResult<()> {
let valid_flags = HvFlushFlags::new()
.with_all_processors(true)
.with_all_virtual_address_spaces(true)
.with_non_global_mappings_only(true)
.with_use_extended_range_format(allow_extended_ranges);
if u64::from(flags) & !u64::from(valid_flags) != 0 {
return Err(HvError::InvalidParameter);
}
if processor_set.is_empty() && !flags.all_processors() {
return Err(HvError::InvalidParameter);
}
Ok(())
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::GetVpRegisters
for UhHypercallHandler<'_, '_, T, B>
{
fn get_vp_registers(
&mut self,
partition_id: u64,
vp_index: u32,
vtl: Option<Vtl>,
registers: &[hvdef::HvRegisterName],
output: &mut [hvdef::HvRegisterValue],
) -> HvRepResult {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err((HvError::AccessDenied, 0));
}
if vp_index != hvdef::HV_VP_INDEX_SELF && vp_index != self.vp.vp_index().index() {
return Err((HvError::AccessDenied, 0));
}
let vtl = self
.target_vtl_no_higher(vtl.unwrap_or_else(|| self.intercepted_vtl.into()))
.map_err(|e| (e, 0))?;
for (i, (&name, output)) in zip(registers, output).enumerate() {
*output = self.get_vp_register(vtl, name).map_err(|e| (e, i))?;
}
Ok(())
}
}
impl<T: CpuIo, B: HardwareIsolatedBacking> hv1_hypercall::RetargetDeviceInterrupt
for UhHypercallHandler<'_, '_, T, B>
{
fn retarget_interrupt(
&mut self,
device_id: u64,
address: u64,
data: u32,
params: hv1_hypercall::HvInterruptParameters<'_>,
) -> HvResult<()> {
let hv1_hypercall::HvInterruptParameters {
vector,
multicast,
target_processors,
} = params;
let hv_result = self.retarget_physical_interrupt(
device_id,
address,
data,
vector,
multicast,
target_processors,
);
let virtual_result = self.retarget_virtual_interrupt(
device_id,
address,
data,
vector,
multicast,
target_processors,
);
hv_result.or(virtual_result)
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::SetVpRegisters
for UhHypercallHandler<'_, '_, T, B>
{
fn set_vp_registers(
&mut self,
partition_id: u64,
vp_index: u32,
vtl: Option<Vtl>,
registers: &[hvdef::hypercall::HvRegisterAssoc],
) -> HvRepResult {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err((HvError::AccessDenied, 0));
}
if vp_index != hvdef::HV_VP_INDEX_SELF && vp_index != self.vp.vp_index().index() {
return Err((HvError::InvalidVpIndex, 0));
}
let target_vtl = vtl
.map_or_else(|| Ok(self.intercepted_vtl), |vtl| vtl.try_into())
.map_err(|_| (HvError::InvalidParameter, 0))?;
for (i, reg) in registers.iter().enumerate() {
self.set_vp_register(target_vtl, reg).map_err(|e| (e, i))?;
}
Ok(())
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::VtlCall for UhHypercallHandler<'_, '_, T, B> {
fn is_vtl_call_allowed(&self) -> bool {
tracing::trace!("checking if vtl call is allowed");
if self.intercepted_vtl != GuestVtl::Vtl0 {
tracelimit::warn_ratelimited!(
"vtl call not allowed from vtl {:?}",
self.intercepted_vtl
);
false
} else if !self.vp.backing.cvm_state().vtl1_enabled {
tracelimit::warn_ratelimited!("vtl call not allowed because vtl 1 is not enabled");
false
} else {
true
}
}
fn vtl_call(&mut self) {
tracing::trace!("handling vtl call");
self.vp
.raise_vtl(
self.intercepted_vtl,
GuestVtl::Vtl1,
HvVtlEntryReason::VTL_CALL,
)
.unwrap();
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::VtlReturn for UhHypercallHandler<'_, '_, T, B> {
fn is_vtl_return_allowed(&self) -> bool {
tracing::trace!("checking if vtl return is allowed");
if self.intercepted_vtl != GuestVtl::Vtl1 {
tracelimit::warn_ratelimited!(
"vtl return not allowed from vtl {:?}",
self.intercepted_vtl
);
}
self.intercepted_vtl != GuestVtl::Vtl0
}
fn vtl_return(&mut self, fast: bool) {
tracing::trace!("handling vtl return");
self.vp.unlock_tlb_lock(Vtl::Vtl1);
let hv = &mut self.vp.backing.cvm_state_mut().hv[GuestVtl::Vtl1];
if hv.synic.vina().auto_reset() {
hv.set_vina_asserted(false).unwrap();
}
B::switch_vtl(self.vp, self.intercepted_vtl, GuestVtl::Vtl0);
if !fast {
let [rax, rcx] = self.vp.backing.cvm_state_mut().hv[GuestVtl::Vtl1]
.return_registers()
.expect("getting return registers shouldn't fail");
let mut vp_state = self.vp.access_state(Vtl::Vtl0);
let mut registers = vp_state
.registers()
.expect("getting registers shouldn't fail");
registers.rax = rax;
registers.rcx = rcx;
vp_state
.set_registers(®isters)
.expect("setting registers shouldn't fail");
}
}
}
impl<T, B: HardwareIsolatedBacking>
hv1_hypercall::StartVirtualProcessor<hvdef::hypercall::InitialVpContextX64>
for UhHypercallHandler<'_, '_, T, B>
{
fn start_virtual_processor(
&mut self,
partition_id: u64,
target_vp: u32,
target_vtl: Vtl,
vp_context: &hvdef::hypercall::InitialVpContextX64,
) -> HvResult<()> {
tracing::debug!(
vp_index = self.vp.vp_index().index(),
target_vp,
?target_vtl,
"HvStartVirtualProcessor"
);
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err(HvError::InvalidPartitionId);
}
if target_vp == self.vp.vp_index().index()
|| target_vp as usize >= self.vp.partition.vps.len()
{
return Err(HvError::InvalidVpIndex);
}
let target_vtl = self.target_vtl_no_higher(target_vtl)?;
let target_vp_inner = self.vp.cvm_partition().vp_inner(target_vp);
if target_vtl == GuestVtl::Vtl1 && !*target_vp_inner.vtl1_enable_called.lock() {
return Err(HvError::InvalidVpState);
}
if self.intercepted_vtl == GuestVtl::Vtl0
&& self.vp.cvm_partition().is_lower_vtl_startup_denied()
{
return Err(HvError::AccessDenied);
}
if target_vp_inner
.started
.compare_exchange(
false,
true,
std::sync::atomic::Ordering::Relaxed,
std::sync::atomic::Ordering::Relaxed,
)
.is_err()
{
return Err(HvError::InvalidVpState);
}
let start_state = VpStartEnableVtl {
operation: InitialVpContextOperation::StartVp,
context: *vp_context,
};
*self
.vp
.cvm_partition()
.vp_inner(target_vp)
.hv_start_enable_vtl_vp[target_vtl]
.lock() = Some(Box::new(start_state));
self.vp.partition.vps[target_vp as usize]
.wake(target_vtl, WakeReason::HV_START_ENABLE_VP_VTL);
Ok(())
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::ModifyVtlProtectionMask
for UhHypercallHandler<'_, '_, T, B>
{
fn modify_vtl_protection_mask(
&mut self,
partition_id: u64,
map_flags: HvMapGpaFlags,
target_vtl: Option<Vtl>,
gpa_pages: &[u64],
) -> HvRepResult {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err((HvError::AccessDenied, 0));
}
let target_vtl = self
.target_vtl_no_higher(target_vtl.unwrap_or(self.intercepted_vtl.into()))
.map_err(|e| (e, 0))?;
if target_vtl == GuestVtl::Vtl0 {
return Err((HvError::InvalidParameter, 0));
}
let protector = &self.vp.cvm_partition().isolated_memory_protector;
if target_vtl == self.intercepted_vtl && !protector.vtl1_protections_enabled() {
return Err((HvError::AccessDenied, 0));
}
let guest_vsm_lock = self.vp.cvm_partition().guest_vsm.read();
let GuestVsmState::Enabled { vtl1, .. } = &*guest_vsm_lock else {
return Err((HvError::InvalidVtlState, 0));
};
if !validate_vtl_gpa_flags(
map_flags,
vtl1.mbec_enabled,
vtl1.shadow_supervisor_stack_enabled,
) {
return Err((HvError::InvalidRegisterValue, 0));
}
protector.change_vtl_protections(
GuestVtl::Vtl0,
gpa_pages,
map_flags,
&mut self.vp.tlb_flush_lock_access(),
)
}
}
impl<T: CpuIo, B: HardwareIsolatedBacking> hv1_hypercall::QuerySparseGpaPageHostVisibility
for UhHypercallHandler<'_, '_, T, B>
{
fn query_gpa_visibility(
&mut self,
partition_id: u64,
gpa_pages: &[u64],
host_visibility: &mut [HostVisibilityType],
) -> HvRepResult {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err((HvError::AccessDenied, 0));
}
if self.vp.cvm_partition().hide_isolation {
return Err((HvError::AccessDenied, 0));
}
self.vp
.cvm_partition()
.isolated_memory_protector
.query_host_visibility(gpa_pages, host_visibility)
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::EnablePartitionVtl
for UhHypercallHandler<'_, '_, T, B>
{
fn enable_partition_vtl(
&mut self,
partition_id: u64,
target_vtl: Vtl,
flags: hvdef::hypercall::EnablePartitionVtlFlags,
) -> HvResult<()> {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err(HvError::InvalidPartitionId);
}
let target_vtl = GuestVtl::try_from(target_vtl).map_err(|_| HvError::AccessDenied)?;
if target_vtl != GuestVtl::Vtl1 {
return Err(HvError::AccessDenied);
}
if flags.enable_supervisor_shadow_stack() || flags.enable_hardware_hvpt() {
return Err(HvError::InvalidParameter);
}
let mut gvsm_state = self.vp.cvm_partition().guest_vsm.write();
match *gvsm_state {
GuestVsmState::NotPlatformSupported => return Err(HvError::AccessDenied),
GuestVsmState::NotGuestEnabled => (),
GuestVsmState::Enabled { vtl1: _ } => {
return Err(HvError::VtlAlreadyEnabled);
}
}
self.vp.partition.hcl.enable_partition_vtl(
target_vtl,
0.into(),
)?;
*gvsm_state = GuestVsmState::Enabled {
vtl1: CvmVtl1State {
mbec_enabled: flags.enable_mbec(),
..Default::default()
},
};
let protector = &self.vp.cvm_partition().isolated_memory_protector;
tracing::debug!("Granting VTL 1 access to lower VTL memory");
protector.change_default_vtl_protections(
GuestVtl::Vtl1,
hvdef::HV_MAP_GPA_PERMISSIONS_ALL,
&mut self.vp.tlb_flush_lock_access(),
)?;
tracing::debug!("Successfully granted vtl 1 access to lower vtl memory");
tracing::info!("Enabled vtl 1 on the partition");
Ok(())
}
}
impl<T, B: HardwareIsolatedBacking>
hv1_hypercall::EnableVpVtl<hvdef::hypercall::InitialVpContextX64>
for UhHypercallHandler<'_, '_, T, B>
{
fn enable_vp_vtl(
&mut self,
partition_id: u64,
vp_index: u32,
vtl: Vtl,
vp_context: &hvdef::hypercall::InitialVpContextX64,
) -> HvResult<()> {
tracing::debug!(
vp_index = self.vp.vp_index().index(),
target_vp = vp_index,
?vtl,
"HvEnableVpVtl"
);
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err(HvError::InvalidPartitionId);
}
if vp_index as usize >= self.vp.partition.vps.len() {
return Err(HvError::InvalidVpIndex);
}
let vtl = GuestVtl::try_from(vtl).map_err(|_| HvError::InvalidParameter)?;
if vtl != GuestVtl::Vtl1 {
return Err(HvError::InvalidParameter);
}
let gvsm_state = {
let guest_vsm_lock = self.vp.cvm_partition().guest_vsm.write();
let vtl1 = parking_lot::RwLockWriteGuard::try_map(guest_vsm_lock, |gvsm| {
if let GuestVsmState::Enabled { vtl1, .. } = &mut *gvsm {
Some(vtl1)
} else {
None
}
})
.map_err(|_| HvError::InvalidVtlState)?;
let current_vp_index = self.vp.vp_index().index();
if self.intercepted_vtl < GuestVtl::Vtl1 {
if vtl1.enabled_on_any_vp || vp_index != current_vp_index {
return Err(HvError::AccessDenied);
}
Some(vtl1)
} else {
assert!(vtl1.enabled_on_any_vp);
None
}
};
let mut vtl1_enabled = self
.vp
.cvm_partition()
.vp_inner(vp_index)
.vtl1_enable_called
.lock();
if *vtl1_enabled {
return Err(HvError::VtlAlreadyEnabled);
}
let hv_vp_context = match self.vp.partition.isolation {
virt::IsolationType::None | virt::IsolationType::Vbs => unreachable!(),
virt::IsolationType::Snp => {
let vmsa_pfn = self.vp.partition.hcl.vtl1_vmsa_pfn(vp_index);
let sev_control = hvdef::HvX64RegisterSevControl::new()
.with_enable_encrypted_state(true)
.with_vmsa_gpa_page_number(vmsa_pfn);
let mut hv_vp_context = hvdef::hypercall::InitialVpContextX64::new_zeroed();
hv_vp_context.rip = sev_control.into();
hv_vp_context
}
virt::IsolationType::Tdx => hvdef::hypercall::InitialVpContextX64::new_zeroed(),
};
self.vp
.partition
.hcl
.enable_vp_vtl(vp_index, vtl, hv_vp_context)?;
if let Some(mut vtl1) = gvsm_state {
vtl1.enabled_on_any_vp = true;
}
*vtl1_enabled = true;
let enable_vp_vtl_state = VpStartEnableVtl {
operation: InitialVpContextOperation::EnableVpVtl,
context: *vp_context,
};
*self
.vp
.cvm_partition()
.vp_inner(vp_index)
.hv_start_enable_vtl_vp[vtl]
.lock() = Some(Box::new(enable_vp_vtl_state));
self.vp.partition.vps[vp_index as usize].wake(vtl, WakeReason::HV_START_ENABLE_VP_VTL);
tracing::debug!(vp_index, "enabled vtl 1 on vp");
Ok(())
}
}
impl<T, B: HardwareIsolatedBacking> hv1_hypercall::TranslateVirtualAddressX64
for UhHypercallHandler<'_, '_, T, B>
{
fn translate_virtual_address(
&mut self,
partition_id: u64,
vp_index: u32,
control_flags: hvdef::hypercall::TranslateGvaControlFlagsX64,
gva_page: u64,
) -> HvResult<hvdef::hypercall::TranslateVirtualAddressOutput> {
if partition_id != hvdef::HV_PARTITION_ID_SELF {
return Err(HvError::AccessDenied);
}
if vp_index != hvdef::HV_VP_INDEX_SELF && vp_index != self.vp.vp_index().index() {
return Err(HvError::AccessDenied);
}
let target_vtl = self
.target_vtl_no_higher(
control_flags
.input_vtl()
.target_vtl()?
.unwrap_or(self.intercepted_vtl.into()),
)
.map_err(|_| HvError::AccessDenied)?;
if self.intercepted_vtl == target_vtl {
return Err(HvError::AccessDenied);
}
let gva = gva_page * hvdef::HV_PAGE_SIZE;
if control_flags.tlb_flush_inhibit() {
self.vp
.set_tlb_lock(self.intercepted_vtl.into(), target_vtl);
}
match virt_support_x86emu::translate::translate_gva_to_gpa(
&self.vp.partition.gm[target_vtl], gva,
&self.vp.backing.translation_registers(self.vp, target_vtl),
virt_support_x86emu::translate::TranslateFlags::from_hv_flags(control_flags),
) {
Ok(virt_support_x86emu::translate::TranslateResult { gpa, cache_info }) => {
let overlay_page = hvdef::hypercall::MsrHypercallContents::from(
self.vp.backing.cvm_state_mut().hv[target_vtl]
.msr_read(hvdef::HV_X64_MSR_HYPERCALL)
.unwrap(),
)
.gpn();
let cache_type = match cache_info {
TranslateCachingInfo::NoPaging => HvCacheType::HvCacheTypeWriteBack.0 as u8,
TranslateCachingInfo::Paging { pat_index } => {
((self.vp.access_state(target_vtl.into()).pat().unwrap().value
>> (pat_index * 8))
& 0xff) as u8
}
};
let gpn = gpa / hvdef::HV_PAGE_SIZE;
Ok(hvdef::hypercall::TranslateVirtualAddressOutput {
translation_result: hvdef::hypercall::TranslateGvaResult::new()
.with_result_code(TranslateGvaResultCode::SUCCESS.0)
.with_overlay_page(gpn == overlay_page)
.with_cache_type(cache_type),
gpa_page: gpn,
})
}
Err(err) => Ok(hvdef::hypercall::TranslateVirtualAddressOutput {
translation_result: hvdef::hypercall::TranslateGvaResult::new()
.with_result_code(TranslateGvaResultCode::from(err).0),
gpa_page: 0,
}),
}
}
}
struct HypercallOverlayAccess<'a> {
vtl: GuestVtl,
protector: &'a dyn crate::ProtectIsolatedMemory,
tlb_access: &'a mut dyn TlbFlushLockAccess,
}
impl hv1_emulator::hv::VtlProtectHypercallOverlay for HypercallOverlayAccess<'_> {
fn change_overlay(&mut self, gpn: u64) {
self.protector
.change_hypercall_overlay(self.vtl, gpn, self.tlb_access)
}
fn disable_overlay(&mut self) {
self.protector
.disable_hypercall_overlay(self.vtl, self.tlb_access)
}
}
#[expect(private_bounds)]
impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
pub(crate) fn write_msr_cvm(
&mut self,
msr: u32,
value: u64,
vtl: GuestVtl,
) -> Result<(), MsrError> {
let self_index = self.vp_index();
let hv = &mut self.backing.cvm_state_mut().hv[vtl];
let mut irr_filter_update = false;
if matches!(msr, hvdef::HV_X64_MSR_SINT0..=hvdef::HV_X64_MSR_SINT15) {
let sint_curr = HvSynicSint::from(hv.synic.sint((msr - hvdef::HV_X64_MSR_SINT0) as u8));
let sint_new = HvSynicSint::from(value);
if sint_curr.proxy() || sint_new.proxy() {
irr_filter_update = true;
}
}
let mut access = HypercallOverlayAccess {
vtl,
protector: B::cvm_partition_state(self.shared)
.isolated_memory_protector
.as_ref(),
tlb_access: &mut B::tlb_flush_lock_access(self_index, self.partition, self.shared),
};
let r = hv.msr_write(msr, value, &mut access);
if !matches!(r, Err(MsrError::Unknown)) {
if irr_filter_update {
self.update_proxy_irr_filter(vtl);
}
}
r
}
fn set_vsm_partition_config(
&mut self,
value: HvRegisterVsmPartitionConfig,
vtl: GuestVtl,
) -> Result<(), HvError> {
if vtl != GuestVtl::Vtl1 {
return Err(HvError::InvalidParameter);
}
assert!(self.partition.isolation.is_isolated());
let allowed_bits = HvRegisterVsmPartitionConfig::new()
.with_enable_vtl_protection(true)
.with_default_vtl_protection_mask(0xf)
.with_zero_memory_on_reset(true)
.with_deny_lower_vtl_startup(true);
if (!u64::from(allowed_bits) & u64::from(value)) != 0 {
return Err(HvError::InvalidRegisterValue);
}
let mut guest_vsm_lock = self.cvm_partition().guest_vsm.write();
let GuestVsmState::Enabled { vtl1, .. } = &mut *guest_vsm_lock else {
return Err(HvError::InvalidVtlState);
};
let protections = HvMapGpaFlags::from(value.default_vtl_protection_mask() as u32);
let protector = &self.cvm_partition().isolated_memory_protector;
if !value.enable_vtl_protection() && protector.vtl1_protections_enabled() {
return Err(HvError::InvalidRegisterValue);
}
if !validate_vtl_gpa_flags(
protections,
vtl1.mbec_enabled,
vtl1.shadow_supervisor_stack_enabled,
) {
return Err(HvError::InvalidRegisterValue);
}
if !(protections.readable() && protections.writable()) {
return Err(HvError::InvalidRegisterValue);
}
let targeted_vtl = GuestVtl::Vtl0;
if protector.vtl1_protections_enabled() {
let current_protections = protector.default_vtl0_protections();
if protections != current_protections {
return Err(HvError::InvalidRegisterValue);
}
}
protector.change_default_vtl_protections(
targeted_vtl,
protections,
&mut self.tlb_flush_lock_access(),
)?;
protector.set_vtl1_protections_enabled();
vtl1.zero_memory_on_reset = value.zero_memory_on_reset();
vtl1.deny_lower_vtl_startup = value.deny_lower_vtl_startup();
Ok(())
}
pub(crate) fn cvm_partition(&self) -> &'_ crate::UhCvmPartitionState {
B::cvm_partition_state(self.shared)
}
pub(crate) fn cvm_vp_inner(&self) -> &'_ crate::UhCvmVpInner {
self.cvm_partition().vp_inner(self.vp_index().index())
}
pub(crate) fn tlb_flush_lock_access(&self) -> impl TlbFlushLockAccess + use<'_, B> {
B::tlb_flush_lock_access(self.vp_index(), self.partition, self.shared)
}
pub(crate) fn cvm_handle_cross_vtl_interrupts(
&mut self,
is_interrupt_pending: impl Fn(&mut Self, GuestVtl, bool) -> bool,
) -> Result<bool, UhRunVpError> {
let cvm_state = self.backing.cvm_state();
if !cvm_state.vtl1_enabled {
return Ok(false);
}
if cvm_state.exit_vtl == GuestVtl::Vtl0 && is_interrupt_pending(self, GuestVtl::Vtl1, false)
{
self.raise_vtl(GuestVtl::Vtl0, GuestVtl::Vtl1, HvVtlEntryReason::INTERRUPT)?;
}
let mut reprocessing_required = false;
if self.backing.cvm_state().exit_vtl == GuestVtl::Vtl1
&& is_interrupt_pending(self, GuestVtl::Vtl0, true)
{
let hv = &self.backing.cvm_state().hv[GuestVtl::Vtl1];
let vina = hv.synic.vina();
if vina.enabled() && !hv.vina_asserted().map_err(UhRunVpError::VpAssistPage)? {
hv.set_vina_asserted(true)
.map_err(UhRunVpError::VpAssistPage)?;
self.partition
.synic_interrupt(self.vp_index(), GuestVtl::Vtl1)
.request_interrupt(vina.vector().into(), vina.auto_eoi());
reprocessing_required = true;
}
}
Ok(reprocessing_required)
}
pub(crate) fn hcvm_handle_vp_start_enable_vtl(
&mut self,
vtl: GuestVtl,
) -> Result<(), UhRunVpError> {
let context = {
self.cvm_vp_inner().hv_start_enable_vtl_vp[vtl]
.lock()
.take()
};
if let Some(start_enable_vtl_state) = context {
if vtl == GuestVtl::Vtl1 {
assert!(*self.cvm_vp_inner().vtl1_enable_called.lock());
if let InitialVpContextOperation::EnableVpVtl = start_enable_vtl_state.operation {
self.backing.cvm_state_mut().vtl1_enabled = true;
}
}
tracing::debug!(
vp_index = self.vp_index().index(),
?vtl,
?start_enable_vtl_state.operation,
"setting up vp with initial registers"
);
hv1_emulator::hypercall::set_x86_vp_context(
&mut self.access_state(vtl.into()),
&(start_enable_vtl_state.context),
)
.map_err(UhRunVpError::State)?;
if let InitialVpContextOperation::StartVp = start_enable_vtl_state.operation {
match vtl {
GuestVtl::Vtl0 => {
if self.backing.cvm_state().vtl1_enabled {
self.backing.cvm_state_mut().exit_vtl = GuestVtl::Vtl1;
}
}
GuestVtl::Vtl1 => {
self.backing.cvm_state_mut().exit_vtl = GuestVtl::Vtl1;
}
}
}
}
Ok(())
}
pub(crate) fn hcvm_vtl1_inspectable(&self) -> bool {
self.backing.cvm_state().vtl1_enabled
}
fn get_vsm_vp_secure_config_vtl(
&mut self,
requesting_vtl: GuestVtl,
target_vtl: GuestVtl,
) -> Result<HvRegisterVsmVpSecureVtlConfig, HvError> {
if requesting_vtl <= target_vtl {
return Err(HvError::AccessDenied);
}
let requesting_vtl = requesting_vtl.into();
let guest_vsm_lock = self.cvm_partition().guest_vsm.read();
let GuestVsmState::Enabled { vtl1, .. } = &*guest_vsm_lock else {
return Err(HvError::InvalidVtlState);
};
let tlb_locked = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
Ok(HvRegisterVsmVpSecureVtlConfig::new()
.with_mbec_enabled(vtl1.mbec_enabled)
.with_tlb_locked(tlb_locked))
}
fn set_vsm_vp_secure_config_vtl(
&mut self,
requesting_vtl: GuestVtl,
target_vtl: GuestVtl,
config: HvRegisterVsmVpSecureVtlConfig,
) -> Result<(), HvError> {
tracing::debug!(
?requesting_vtl,
?target_vtl,
"setting vsm vp secure config vtl"
);
if requesting_vtl <= target_vtl {
return Err(HvError::AccessDenied);
}
if config.supervisor_shadow_stack_enabled() || config.hardware_hvpt_enabled() {
return Err(HvError::InvalidRegisterValue);
}
let requesting_vtl = requesting_vtl.into();
{
let guest_vsm_lock = self.cvm_partition().guest_vsm.read();
let GuestVsmState::Enabled { vtl1, .. } = &*guest_vsm_lock else {
return Err(HvError::InvalidVtlState);
};
if config.mbec_enabled() != vtl1.mbec_enabled {
return Err(HvError::InvalidRegisterValue);
}
}
let tlb_locked = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
match (tlb_locked, config.tlb_locked()) {
(true, false) => self.unlock_tlb_lock_target(requesting_vtl, target_vtl),
(false, true) => self.set_tlb_lock(requesting_vtl, target_vtl),
_ => (), };
Ok(())
}
fn raise_vtl(
&mut self,
source_vtl: GuestVtl,
target_vtl: GuestVtl,
entry_reason: HvVtlEntryReason,
) -> Result<(), UhRunVpError> {
assert!(source_vtl < target_vtl);
B::switch_vtl(self, source_vtl, target_vtl);
self.backing.cvm_state_mut().hv[target_vtl]
.set_return_reason(entry_reason)
.map_err(UhRunVpError::VpAssistPage)
}
}
pub(crate) struct XsetbvExitInput {
pub rax: u64,
pub rcx: u64,
pub rdx: u64,
pub cr4: u64,
pub cpl: u8,
}
pub(crate) fn validate_xsetbv_exit(input: XsetbvExitInput) -> Option<u64> {
let XsetbvExitInput {
rax,
rcx,
rdx,
cr4,
cpl,
} = input;
if rcx != 0 {
tracelimit::warn_ratelimited!(rcx, "xsetbv exit: rcx is not set to 0");
return None;
}
if cpl != 0 {
tracelimit::warn_ratelimited!(cpl, "xsetbv exit: invalid cpl");
return None;
}
let osxsave_flag = cr4 & x86defs::X64_CR4_OSXSAVE;
if osxsave_flag == 0 {
tracelimit::warn_ratelimited!(cr4, "xsetbv exit: cr4 osxsave not set");
return None;
}
let xfem = (rdx << 32) | (rax & 0xffffffff);
if (xfem & x86defs::xsave::XFEATURE_X87) == 0 {
tracelimit::warn_ratelimited!(xfem, "xsetbv exit: xfem legacy x87 bit not set");
return None;
}
Some(xfem)
}
impl<T: CpuIo, B: HardwareIsolatedBacking> TranslateGvaSupport for UhEmulationState<'_, '_, T, B> {
type Error = UhRunVpError;
fn guest_memory(&self) -> &GuestMemory {
&self.vp.partition.gm[self.vtl]
}
fn acquire_tlb_lock(&mut self) {
self.vp.set_tlb_lock(Vtl::Vtl2, self.vtl)
}
fn registers(&mut self) -> Result<TranslationRegisters, Self::Error> {
Ok(self.vp.backing.translation_registers(self.vp, self.vtl))
}
}