use super::synic::GlobalSynic;
use super::synic::ProcessorSynic;
use guestmem::GuestMemory;
use guestmem::GuestMemoryError;
use hv1_structs::VtlArray;
use hvdef::HV_PAGE_SIZE;
use hvdef::HV_PAGE_SIZE_USIZE;
use hvdef::HV_REFERENCE_TSC_SEQUENCE_INVALID;
use hvdef::HvRegisterVpAssistPage;
use hvdef::HvVpVtlControl;
use hvdef::HvVtlEntryReason;
use hvdef::Vtl;
use inspect::Inspect;
use parking_lot::Mutex;
use std::mem::offset_of;
use std::sync::Arc;
use virt::x86::MsrError;
use vm_topology::processor::VpIndex;
use vmcore::reference_time_source::ReferenceTimeSource;
use x86defs::cpuid::Vendor;
use zerocopy::FromZeros;
#[derive(Inspect)]
pub struct GlobalHv {
#[inspect(flatten)]
partition_state: Arc<GlobalHvState>,
vtl_mutable_state: VtlArray<Arc<Mutex<MutableHvState>>, 2>,
pub synic: VtlArray<GlobalSynic, 2>,
}
#[derive(Inspect)]
struct GlobalHvState {
#[inspect(display)]
vendor: Vendor,
#[inspect(skip)]
ref_time: Box<dyn ReferenceTimeSource>,
tsc_frequency: u64,
is_ref_time_backed_by_tsc: bool,
}
#[derive(Inspect)]
struct MutableHvState {
#[inspect(with = "|x| inspect::AsHex(u64::from(*x))")]
hypercall: hvdef::hypercall::MsrHypercallContents,
#[inspect(with = "|x| inspect::AsHex(u64::from(*x))")]
guest_os_id: hvdef::hypercall::HvGuestOsId,
#[inspect(with = "|x| inspect::AsHex(u64::from(*x))")]
reference_tsc: hvdef::HvRegisterReferenceTsc,
tsc_sequence: u32,
}
impl MutableHvState {
fn new() -> Self {
Self {
hypercall: hvdef::hypercall::MsrHypercallContents::new(),
guest_os_id: hvdef::hypercall::HvGuestOsId::new(),
reference_tsc: hvdef::HvRegisterReferenceTsc::new(),
tsc_sequence: 0,
}
}
fn reset(&mut self, overlay_access: &mut dyn VtlProtectHypercallOverlay) {
overlay_access.disable_overlay();
self.hypercall = hvdef::hypercall::MsrHypercallContents::new();
self.guest_os_id = hvdef::hypercall::HvGuestOsId::new();
self.reference_tsc = hvdef::HvRegisterReferenceTsc::new();
self.tsc_sequence = 0;
}
}
pub struct GlobalHvParams {
pub max_vp_count: u32,
pub vendor: Vendor,
pub tsc_frequency: u64,
pub ref_time: Box<dyn ReferenceTimeSource>,
}
impl GlobalHv {
pub fn new(params: GlobalHvParams) -> Self {
Self {
partition_state: Arc::new(GlobalHvState {
vendor: params.vendor,
tsc_frequency: params.tsc_frequency,
is_ref_time_backed_by_tsc: params.ref_time.is_backed_by_tsc(),
ref_time: params.ref_time,
}),
vtl_mutable_state: VtlArray::from_fn(|_| Arc::new(Mutex::new(MutableHvState::new()))),
synic: VtlArray::from_fn(|_| GlobalSynic::new(params.max_vp_count)),
}
}
pub fn add_vp(&self, guest_memory: GuestMemory, vp_index: VpIndex, vtl: Vtl) -> ProcessorVtlHv {
ProcessorVtlHv {
vp_index,
partition_state: self.partition_state.clone(),
vtl_state: self.vtl_mutable_state[vtl].clone(),
synic: self.synic[vtl].add_vp(vp_index),
vp_assist_page: 0.into(),
guest_memory,
}
}
pub fn reset(&self, mut overlay_access: VtlArray<&mut dyn VtlProtectHypercallOverlay, 2>) {
for (state, overlay_access) in self.vtl_mutable_state.iter().zip(overlay_access.iter_mut())
{
state.lock().reset(*overlay_access);
}
}
pub fn guest_os_id(&self, vtl: Vtl) -> hvdef::hypercall::HvGuestOsId {
self.vtl_mutable_state[vtl].lock().guest_os_id
}
}
#[derive(Inspect)]
pub struct ProcessorVtlHv {
vp_index: VpIndex,
#[inspect(skip)]
partition_state: Arc<GlobalHvState>,
vtl_state: Arc<Mutex<MutableHvState>>,
guest_memory: GuestMemory,
pub synic: ProcessorSynic,
#[inspect(with = "|x| inspect::AsHex(u64::from(*x))")]
vp_assist_page: HvRegisterVpAssistPage,
}
impl ProcessorVtlHv {
pub fn ref_time_now(&self) -> u64 {
self.partition_state.ref_time.now_100ns()
}
pub fn reset(&mut self) {
let Self {
vp_index: _,
partition_state: _,
vtl_state: _,
guest_memory: _,
synic,
vp_assist_page,
} = self;
synic.reset();
*vp_assist_page = Default::default();
}
pub fn msr_write_guest_os_id(&mut self, v: u64) {
self.vtl_state.lock().guest_os_id = v.into();
}
pub fn msr_write_vp_assist_page(&mut self, v: u64) -> Result<(), MsrError> {
if v & !u64::from(
HvRegisterVpAssistPage::new()
.with_enabled(true)
.with_gpa_page_number(!0 >> 12),
) != 0
{
return Err(MsrError::InvalidAccess);
}
let vp_assist_page = HvRegisterVpAssistPage::from(v);
if vp_assist_page.enabled()
&& (!self.vp_assist_page.enabled()
|| vp_assist_page.gpa_page_number() != self.vp_assist_page.gpa_page_number())
{
let gpa = vp_assist_page.gpa_page_number() * HV_PAGE_SIZE;
if let Err(err) = self.guest_memory.fill_at(gpa, 0, HV_PAGE_SIZE_USIZE) {
tracelimit::warn_ratelimited!(
gpa,
error = &err as &dyn std::error::Error,
"failed to clear vp assist page"
);
return Err(MsrError::InvalidAccess);
}
}
self.vp_assist_page = vp_assist_page;
Ok(())
}
pub fn msr_write(
&mut self,
n: u32,
v: u64,
overlay_access: &mut dyn VtlProtectHypercallOverlay,
) -> Result<(), MsrError> {
match n {
hvdef::HV_X64_MSR_GUEST_OS_ID => {
self.msr_write_guest_os_id(v);
}
hvdef::HV_X64_MSR_HYPERCALL => {
let mut mutable = self.vtl_state.lock();
if mutable.hypercall.locked() {
return Err(MsrError::InvalidAccess);
}
let hc = hvdef::hypercall::MsrHypercallContents::from(v);
if hc.reserved_p() != 0 {
return Err(MsrError::InvalidAccess);
}
if hc.enable()
&& (!mutable.hypercall.enable() || hc.gpn() != mutable.hypercall.gpn())
{
let gpa = hc.gpn() * HV_PAGE_SIZE;
if let Err(err) = self.write_hypercall_page(gpa) {
tracelimit::warn_ratelimited!(
gpa,
error = &err as &dyn std::error::Error,
"failed to write hypercall page"
);
return Err(MsrError::InvalidAccess);
}
overlay_access.change_overlay(hc.gpn());
} else if !hc.enable() {
overlay_access.disable_overlay();
}
mutable.hypercall = hc;
}
hvdef::HV_X64_MSR_VP_INDEX => return Err(MsrError::InvalidAccess),
hvdef::HV_X64_MSR_TIME_REF_COUNT => return Err(MsrError::InvalidAccess),
hvdef::HV_X64_MSR_REFERENCE_TSC => {
let mut mutable = self.vtl_state.lock();
let v = hvdef::HvRegisterReferenceTsc::from(v);
if v.reserved_p() != 0 {
return Err(MsrError::InvalidAccess);
}
if v.enable() && mutable.reference_tsc.gpn() != v.gpn() {
let gm = &self.guest_memory;
let gpa = v.gpn() * HV_PAGE_SIZE;
if let Err(err) = gm.write_plain(gpa, &HV_REFERENCE_TSC_SEQUENCE_INVALID) {
tracelimit::warn_ratelimited!(
gpa,
error = &err as &dyn std::error::Error,
"failed to write reference tsc page"
);
return Err(MsrError::InvalidAccess);
}
if self.partition_state.is_ref_time_backed_by_tsc {
let tsc_scale = (((10_000_000_u128) << 64)
/ self.partition_state.tsc_frequency as u128)
as u64;
let reference_page = hvdef::HvReferenceTscPage {
tsc_scale,
..FromZeros::new_zeroed()
};
if let Err(err) = gm.write_plain(gpa, &reference_page) {
tracelimit::warn_ratelimited!(
gpa,
error = &err as &dyn std::error::Error,
"failed to write reference tsc page"
);
return Err(MsrError::InvalidAccess);
}
mutable.tsc_sequence = mutable.tsc_sequence.wrapping_add(1);
if mutable.tsc_sequence == HV_REFERENCE_TSC_SEQUENCE_INVALID {
mutable.tsc_sequence = mutable.tsc_sequence.wrapping_add(1);
}
if let Err(err) = gm.write_plain(gpa, &mutable.tsc_sequence) {
tracelimit::warn_ratelimited!(
gpa,
error = &err as &dyn std::error::Error,
"failed to write reference tsc page"
);
return Err(MsrError::InvalidAccess);
}
}
}
mutable.reference_tsc = v;
}
hvdef::HV_X64_MSR_TSC_FREQUENCY => return Err(MsrError::InvalidAccess),
hvdef::HV_X64_MSR_VP_ASSIST_PAGE => self.msr_write_vp_assist_page(v)?,
msr @ hvdef::HV_X64_MSR_SCONTROL..=hvdef::HV_X64_MSR_STIMER3_COUNT => {
self.synic.write_msr(&self.guest_memory, msr, v)?
}
_ => return Err(MsrError::Unknown),
}
Ok(())
}
fn write_hypercall_page(&self, gpa: u64) -> Result<(), GuestMemoryError> {
let page_contents: &[u8] = if self.partition_state.vendor.is_amd_compatible() {
&AMD_HYPERCALL_PAGE.page
} else if self.partition_state.vendor.is_intel_compatible() {
&INTEL_HYPERCALL_PAGE.page
} else {
unreachable!()
};
self.guest_memory.write_at(gpa, page_contents)?;
let int3 = 0xcc;
self.guest_memory.fill_at(
gpa + page_contents.len() as u64,
int3,
HV_PAGE_SIZE_USIZE - page_contents.len(),
)?;
Ok(())
}
pub fn vsm_code_page_offsets(&self, bit64: bool) -> hvdef::HvRegisterVsmCodePageOffsets {
let page = if self.partition_state.vendor.is_amd_compatible() {
&AMD_HYPERCALL_PAGE
} else if self.partition_state.vendor.is_intel_compatible() {
&INTEL_HYPERCALL_PAGE
} else {
unreachable!()
};
if bit64 {
page.offsets64
} else {
page.offsets32
}
}
pub fn msr_read(&self, msr: u32) -> Result<u64, MsrError> {
let v = match msr {
hvdef::HV_X64_MSR_GUEST_OS_ID => self.vtl_state.lock().guest_os_id.into(),
hvdef::HV_X64_MSR_HYPERCALL => self.vtl_state.lock().hypercall.into(),
hvdef::HV_X64_MSR_VP_INDEX => self.vp_index.index() as u64, hvdef::HV_X64_MSR_TIME_REF_COUNT => self.partition_state.ref_time.now_100ns(),
hvdef::HV_X64_MSR_REFERENCE_TSC => self.vtl_state.lock().reference_tsc.into(),
hvdef::HV_X64_MSR_TSC_FREQUENCY => self.partition_state.tsc_frequency,
hvdef::HV_X64_MSR_VP_ASSIST_PAGE => self.vp_assist_page.into(),
msr @ hvdef::HV_X64_MSR_SCONTROL..=hvdef::HV_X64_MSR_STIMER3_COUNT => {
self.synic.read_msr(msr)?
}
_ => {
return Err(MsrError::Unknown);
}
};
Ok(v)
}
pub fn vp_assist_page(&self) -> u64 {
self.vp_assist_page.into()
}
#[must_use]
pub fn set_lazy_eoi(&mut self) -> bool {
if !self.vp_assist_page.enabled() {
return false;
}
let gpa = self.vp_assist_page.gpa_page_number() * HV_PAGE_SIZE
+ offset_of!(hvdef::HvVpAssistPage, apic_assist) as u64;
let v = 1u32;
match self.guest_memory.write_plain(gpa, &v) {
Ok(()) => true,
Err(err) => {
tracelimit::warn_ratelimited!(
error = &err as &dyn std::error::Error,
"failed to write lazy eoi to assist page"
);
false
}
}
}
#[must_use]
pub fn clear_lazy_eoi(&mut self) -> bool {
let gpa = self.vp_assist_page.gpa_page_number() * HV_PAGE_SIZE
+ offset_of!(hvdef::HvVpAssistPage, apic_assist) as u64;
let v: u32 = match self.guest_memory.read_plain(gpa) {
Ok(v) => v,
Err(err) => {
tracelimit::warn_ratelimited!(
error = &err as &dyn std::error::Error,
"failed to read lazy eoi from assist page"
);
return false;
}
};
if v & 1 == 0 {
true
} else {
let v = v & !1;
if let Err(err) = self.guest_memory.write_plain(gpa, &v) {
tracelimit::warn_ratelimited!(
error = &err as &dyn std::error::Error,
"failed to clear lazy eoi from assist page"
);
}
false
}
}
pub fn return_registers(&self) -> Result<[u64; 2], GuestMemoryError> {
let gpa = (self.vp_assist_page.gpa_page_number() * HV_PAGE_SIZE)
+ offset_of!(hvdef::HvVpAssistPage, vtl_control) as u64
+ offset_of!(HvVpVtlControl, registers) as u64;
self.guest_memory.read_plain(gpa)
}
pub fn set_return_reason(&self, reason: HvVtlEntryReason) -> Result<(), GuestMemoryError> {
let gpa = (self.vp_assist_page.gpa_page_number() * HV_PAGE_SIZE)
+ offset_of!(hvdef::HvVpAssistPage, vtl_control) as u64
+ offset_of!(HvVpVtlControl, entry_reason) as u64;
self.guest_memory.write_plain(gpa, &(reason.0))
}
pub fn vina_asserted(&self) -> Result<bool, GuestMemoryError> {
let gpa = (self.vp_assist_page.gpa_page_number() * HV_PAGE_SIZE)
+ offset_of!(hvdef::HvVpAssistPage, vtl_control) as u64
+ offset_of!(HvVpVtlControl, vina_status) as u64;
self.guest_memory.read_plain(gpa).map(|v: u8| v != 0)
}
pub fn set_vina_asserted(&self, value: bool) -> Result<(), GuestMemoryError> {
let gpa = (self.vp_assist_page.gpa_page_number() * HV_PAGE_SIZE)
+ offset_of!(hvdef::HvVpAssistPage, vtl_control) as u64
+ offset_of!(HvVpVtlControl, vina_status) as u64;
self.guest_memory.write_plain(gpa, &(value as u8))
}
}
struct HypercallPage {
page: [u8; 50],
offsets32: hvdef::HvRegisterVsmCodePageOffsets,
offsets64: hvdef::HvRegisterVsmCodePageOffsets,
}
const fn hypercall_page(use_vmmcall: bool) -> HypercallPage {
let [hc0, hc1, hc2] = if use_vmmcall {
[0x0f, 0x01, 0xd9] } else {
[0x0f, 0x01, 0xc1] };
#[rustfmt::skip]
let page = [
hc0, hc1, hc2, 0xc3, 0x89, 0xc1, 0xb8, 0x11, 0x00, 0x00, 0x00, hc0, hc1, hc2, 0xc3, 0x48, 0x89, 0xc8, 0xb9, 0x11, 0x00, 0x00, 0x00, hc0, hc1, hc2, 0xc3, 0x89, 0xc1, 0xb8, 0x12, 0x00, 0x00, 0x00, hc0, hc1, hc2, 0xc3, 0x48, 0x89, 0xc8, 0xb9, 0x12, 0x00, 0x00, 0x00, hc0, hc1, hc2, 0xc3, ];
HypercallPage {
page,
offsets32: hvdef::HvRegisterVsmCodePageOffsets::new()
.with_call_offset(0x4)
.with_return_offset(0x1b),
offsets64: hvdef::HvRegisterVsmCodePageOffsets::new()
.with_call_offset(0xf)
.with_return_offset(0x26),
}
}
const AMD_HYPERCALL_PAGE: HypercallPage = hypercall_page(true);
const INTEL_HYPERCALL_PAGE: HypercallPage = hypercall_page(false);
pub trait VtlProtectHypercallOverlay {
fn change_overlay(&mut self, gpn: u64);
fn disable_overlay(&mut self);
}