hcl/ioctl/
register.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Routines for getting and setting register values.
5
6use super::Backing;
7use super::Hcl;
8use super::HvcallRepInput;
9use super::IsolationType;
10use super::MshvHvcall;
11use super::ProcessorRunner;
12use super::hcl_get_vp_register;
13use super::hcl_set_vp_register;
14use super::ioctls::mshv_vp_registers;
15use crate::GuestVtl;
16use arrayvec::ArrayVec;
17use hvdef::HV_PARTITION_ID_SELF;
18use hvdef::HV_VP_INDEX_SELF;
19use hvdef::HvError;
20use hvdef::HvRegisterValue;
21use hvdef::HypercallCode;
22use hvdef::Vtl;
23use hvdef::hypercall::HvRegisterAssoc;
24use std::os::fd::AsRawFd;
25use thiserror::Error;
26use zerocopy::FromZeros;
27
28#[cfg(guest_arch = "x86_64")]
29type HvArchRegisterName = hvdef::HvX64RegisterName;
30
31#[cfg(guest_arch = "aarch64")]
32type HvArchRegisterName = hvdef::HvArm64RegisterName;
33
34#[derive(Error, Debug)]
35#[expect(missing_docs)]
36pub enum GetRegError {
37    #[error("failed to get VP register from ioctl")]
38    Ioctl(#[source] nix::Error),
39    #[error("failed to get VP register from hypercall")]
40    Hypercall(#[source] HvError),
41    #[error("failed to get VP register from sidecar")]
42    Sidecar(#[source] sidecar_client::SidecarError),
43}
44
45#[derive(Error, Debug)]
46#[expect(missing_docs)]
47pub enum SetRegError {
48    #[error("failed to set VP register via ioctl")]
49    Ioctl(#[source] nix::Error),
50    #[error("failed to set VP register via hypercall")]
51    Hypercall(#[source] HvError),
52    #[error("failed to set VP register via sidecar")]
53    Sidecar(#[source] sidecar_client::SidecarError),
54}
55
56impl<'a, T: Backing<'a>> ProcessorRunner<'a, T> {
57    /// Get the given register on the current VP for the given VTL.
58    pub fn get_vp_register(
59        &mut self,
60        vtl: GuestVtl,
61        name: HvArchRegisterName,
62    ) -> Result<HvRegisterValue, GetRegError> {
63        let mut value = [FromZeros::new_zeroed(); 1];
64        self.get_regs(vtl.into(), &[name], &mut value)?;
65        Ok(value[0])
66    }
67
68    /// Set the given register on the current VP for the given VTL.
69    pub fn set_vp_register(
70        &mut self,
71        vtl: GuestVtl,
72        name: HvArchRegisterName,
73        value: HvRegisterValue,
74    ) -> Result<(), SetRegError> {
75        self.set_regs(vtl.into(), [(name, value)])
76    }
77
78    /// Get the given registers on the current VP for the given VTL.
79    ///
80    /// # Panics
81    /// Panics if `names.len() != values.len()`.
82    pub fn get_vp_registers(
83        &mut self,
84        vtl: GuestVtl,
85        names: &[HvArchRegisterName],
86        values: &mut [HvRegisterValue],
87    ) -> Result<(), GetRegError> {
88        self.get_regs(vtl.into(), names, values)
89    }
90
91    /// Get the given register on the VP for VTL 2 via hypercall.
92    /// Only a select set of registers are supported; others will cause a panic.
93    pub fn get_vp_vtl2_register(
94        &mut self,
95        name: HvArchRegisterName,
96    ) -> Result<HvRegisterValue, GetRegError> {
97        assert!(matches!(
98            name,
99            HvArchRegisterName::VsmVpSecureConfigVtl0 | HvArchRegisterName::VsmVpSecureConfigVtl1
100        ));
101
102        // Go through get_regs to ensure proper sidecar handling, even though
103        // we know this will never end up calling the ioctl.
104        let mut value = [FromZeros::new_zeroed(); 1];
105        self.get_regs(Vtl::Vtl2, &[name], &mut value)?;
106        Ok(value[0])
107    }
108
109    /// Set the given registers on the current VP for the given VTL.
110    pub fn set_vp_registers<I>(&mut self, vtl: GuestVtl, regs: I) -> Result<(), SetRegError>
111    where
112        I: IntoIterator,
113        I::Item: Into<HvRegisterAssoc>,
114    {
115        self.set_regs(vtl.into(), regs)
116    }
117
118    /// Get the given registers on the current VP for the given VTL via
119    /// ioctl/hypercall, as appropriate.
120    fn get_regs(
121        &mut self,
122        vtl: Vtl,
123        names: &[HvArchRegisterName],
124        values: &mut [HvRegisterValue],
125    ) -> Result<(), GetRegError> {
126        assert_eq!(names.len(), values.len());
127
128        if let Some(sidecar) = &mut self.sidecar {
129            return sidecar
130                .get_vp_registers(vtl.into(), zerocopy::transmute_ref!(names), values)
131                .map_err(GetRegError::Sidecar);
132        }
133
134        const MAX_REGS_PER_HVCALL: usize = 32;
135        let mut hv_names: ArrayVec<_, MAX_REGS_PER_HVCALL> = ArrayVec::new();
136        let mut hv_values: ArrayVec<_, MAX_REGS_PER_HVCALL> = ArrayVec::new();
137
138        let do_hvcall =
139            |hv_names: &mut ArrayVec<_, _>, hv_values: &mut ArrayVec<&mut HvRegisterValue, _>| {
140                let mut values: ArrayVec<_, MAX_REGS_PER_HVCALL> = ArrayVec::from_iter(
141                    std::iter::repeat_n(FromZeros::new_zeroed(), hv_names.len()),
142                );
143                self.hcl
144                    .mshv_hvcall
145                    .get_vp_registers_hypercall(vtl, hv_names, &mut values)
146                    .map_err(GetRegError::Hypercall)?;
147
148                for (dest, value) in hv_values.iter_mut().zip(values.into_iter()) {
149                    **dest = value;
150                }
151                hv_names.clear();
152                hv_values.clear();
153                Ok(())
154            };
155
156        for (&name, value) in names.iter().zip(values.iter_mut()) {
157            if let Ok(vtl) = vtl.try_into()
158                && let Some(v) = T::try_get_reg(self, vtl, name.into())
159            {
160                *value = v;
161            } else if self.is_kernel_managed(name) {
162                // TODO: group up to MSHV_VP_MAX_REGISTERS regs. The kernel
163                // currently has a bug where it only supports one register at a
164                // time. Once that's fixed, this code could get a group of
165                // registers in one ioctl.
166                let mut reg = HvRegisterAssoc {
167                    name: name.into(),
168                    pad: Default::default(),
169                    value: HvRegisterValue::new_zeroed(),
170                };
171                let mut mshv_vp_register_args = mshv_vp_registers {
172                    count: 1,
173                    regs: &mut reg,
174                };
175                // SAFETY: we know that our file is a vCPU fd, we know the kernel will only read the
176                // correct amount of memory from our pointer, and we verify the return result.
177                unsafe {
178                    hcl_get_vp_register(
179                        self.hcl.mshv_vtl.file.as_raw_fd(),
180                        &mut mshv_vp_register_args,
181                    )
182                    .map_err(GetRegError::Ioctl)?;
183                }
184                *value = reg.value;
185            } else {
186                hv_names.push(name);
187                hv_values.push(value);
188
189                if hv_names.is_full() {
190                    do_hvcall(&mut hv_names, &mut hv_values)?;
191                }
192            }
193        }
194
195        if !hv_names.is_empty() {
196            do_hvcall(&mut hv_names, &mut hv_values)?;
197        }
198
199        Ok(())
200    }
201
202    /// Set the given registers on the current VP for the given VTL via
203    /// ioctl/hypercall, as appropriate.
204    fn set_regs<I>(&mut self, vtl: Vtl, regs: I) -> Result<(), SetRegError>
205    where
206        I: IntoIterator,
207        I::Item: Into<HvRegisterAssoc>,
208    {
209        self.set_regs_nongeneric(vtl, &mut regs.into_iter().map(Into::into))
210    }
211
212    /// Set the given registers on the current VP for the given VTL via
213    /// ioctl/hypercall, as appropriate.
214    fn set_regs_nongeneric(
215        &mut self,
216        vtl: Vtl,
217        regs: &mut dyn Iterator<Item = HvRegisterAssoc>,
218    ) -> Result<(), SetRegError> {
219        if let Some(sidecar) = &mut self.sidecar {
220            // TODO: Optimize this call to not need the heap?
221            let regs: Vec<HvRegisterAssoc> = regs.collect();
222            return sidecar
223                .set_vp_registers(vtl.into(), &regs)
224                .map_err(SetRegError::Sidecar);
225        }
226
227        const MAX_REGS_PER_HVCALL: usize = 32;
228        let mut hv_regs: ArrayVec<_, MAX_REGS_PER_HVCALL> = ArrayVec::new();
229
230        let do_hvcall = |hv_regs: &mut ArrayVec<_, _>| {
231            self.hcl
232                .mshv_hvcall
233                .set_vp_registers_hypercall(vtl, hv_regs)
234                .map_err(SetRegError::Hypercall)?;
235            hv_regs.clear();
236            Ok(())
237        };
238
239        for reg in regs {
240            if let Ok(vtl) = vtl.try_into()
241                && !T::must_flush_regs_on(self, reg.name)
242                && T::try_set_reg(self, vtl, reg.name, reg.value)
243            {
244            } else if self.is_kernel_managed(reg.name.into()) {
245                // TODO: group up to MSHV_VP_MAX_REGISTERS regs. The kernel
246                // currently has a bug where it only supports one register at a
247                // time. Once that's fixed, this code could set a group of
248                // registers in one ioctl.
249                let mshv_vp_register_args = mshv_vp_registers {
250                    count: 1,
251                    regs: std::ptr::from_ref(&reg).cast_mut(),
252                };
253                // SAFETY: we know that our file is a vCPU fd, we know the kernel will only read the
254                // correct amount of memory from our pointer, and we verify the return result.
255                unsafe {
256                    hcl_set_vp_register(self.hcl.mshv_vtl.file.as_raw_fd(), &mshv_vp_register_args)
257                        .map_err(SetRegError::Ioctl)?;
258                }
259            } else {
260                hv_regs.push(reg);
261
262                if hv_regs.is_full() {
263                    do_hvcall(&mut hv_regs)?;
264                }
265            }
266        }
267
268        if !hv_regs.is_empty() {
269            do_hvcall(&mut hv_regs)?;
270        }
271
272        Ok(())
273    }
274
275    /// Indicate whether the given register is managed by our kernel.
276    fn is_kernel_managed(&self, name: HvArchRegisterName) -> bool {
277        #[cfg(guest_arch = "x86_64")]
278        if name == HvArchRegisterName::Dr6 {
279            return self.hcl.dr6_shared();
280        }
281
282        is_vtl_shared_reg(name)
283    }
284
285    /// Sets the following registers on the current VP and given VTL using a
286    /// direct hypercall.
287    ///
288    /// This should not be used on the fast path. Therefore only a select set of
289    /// registers are supported, and others will cause a panic.
290    ///
291    /// This function can be used with VTL2 as a target.
292    pub fn set_vp_registers_hvcall<I>(&mut self, vtl: Vtl, values: I) -> Result<(), HvError>
293    where
294        I: IntoIterator,
295        I::Item: Into<HvRegisterAssoc> + Clone,
296    {
297        let registers: Vec<HvRegisterAssoc> = values.into_iter().map(Into::into).collect();
298
299        #[cfg(guest_arch = "x86_64")]
300        let per_arch = |name| matches!(name, HvArchRegisterName::CrInterceptControl);
301
302        #[cfg(guest_arch = "aarch64")]
303        let per_arch = |_: HvArchRegisterName| false;
304
305        assert!(registers.iter().all(
306            |HvRegisterAssoc {
307                 name,
308                 pad: _,
309                 value: _,
310             }| matches!(
311                (*name).into(),
312                HvArchRegisterName::PendingEvent0
313                    | HvArchRegisterName::PendingEvent1
314                    | HvArchRegisterName::Sipp
315                    | HvArchRegisterName::Sifp
316                    | HvArchRegisterName::Ghcb
317                    | HvArchRegisterName::VsmPartitionConfig
318                    | HvArchRegisterName::VsmVpWaitForTlbLock
319                    | HvArchRegisterName::VsmVpSecureConfigVtl0
320                    | HvArchRegisterName::VsmVpSecureConfigVtl1
321            ) || per_arch((*name).into())
322        ));
323        self.hcl
324            .mshv_hvcall
325            .set_vp_registers_hypercall(vtl, &registers)
326    }
327}
328
329impl Hcl {
330    /// Gets the current hypervisor reference time.
331    pub fn reference_time(&self) -> Result<u64, GetRegError> {
332        Ok(self
333            .get_partition_vtl2_register(HvArchRegisterName::TimeRefCount)?
334            .as_u64())
335    }
336
337    /// Read the vsm capabilities register for VTL2.
338    pub fn get_vsm_capabilities(&self) -> Result<hvdef::HvRegisterVsmCapabilities, GetRegError> {
339        let caps = hvdef::HvRegisterVsmCapabilities::from(
340            self.get_partition_vtl2_register(HvArchRegisterName::VsmCapabilities)?
341                .as_u64(),
342        );
343
344        let caps = match self.isolation {
345            IsolationType::None | IsolationType::Vbs => caps,
346            IsolationType::Snp => hvdef::HvRegisterVsmCapabilities::new()
347                .with_deny_lower_vtl_startup(caps.deny_lower_vtl_startup())
348                .with_intercept_page_available(caps.intercept_page_available()),
349            IsolationType::Tdx => hvdef::HvRegisterVsmCapabilities::new()
350                .with_deny_lower_vtl_startup(caps.deny_lower_vtl_startup())
351                .with_intercept_page_available(caps.intercept_page_available())
352                .with_dr6_shared(true)
353                .with_proxy_interrupt_redirect_available(caps.proxy_interrupt_redirect_available()),
354        };
355
356        assert_eq!(caps.dr6_shared(), self.dr6_shared());
357
358        Ok(caps)
359    }
360
361    /// Get the [`hvdef::HvRegisterGuestVsmPartitionConfig`] register for VTL2.
362    pub fn get_guest_vsm_partition_config(
363        &self,
364    ) -> Result<hvdef::HvRegisterGuestVsmPartitionConfig, GetRegError> {
365        Ok(hvdef::HvRegisterGuestVsmPartitionConfig::from(
366            self.get_partition_vtl2_register(HvArchRegisterName::GuestVsmPartitionConfig)?
367                .as_u64(),
368        ))
369    }
370
371    /// Get the [`hvdef::HvRegisterVsmPartitionStatus`] register for VTL2.
372    pub fn get_vsm_partition_status(
373        &self,
374    ) -> Result<hvdef::HvRegisterVsmPartitionStatus, GetRegError> {
375        Ok(hvdef::HvRegisterVsmPartitionStatus::from(
376            self.get_partition_vtl2_register(HvArchRegisterName::VsmPartitionStatus)?
377                .as_u64(),
378        ))
379    }
380
381    /// Get the [`hvdef::HvPartitionPrivilege`] info. On x86_64, this uses
382    /// CPUID. On aarch64, it uses get_vp_register.
383    pub fn get_privileges_and_features_info(
384        &self,
385    ) -> Result<hvdef::HvPartitionPrivilege, GetRegError> {
386        #[cfg(guest_arch = "x86_64")]
387        {
388            let result = safe_intrinsics::cpuid(hvdef::HV_CPUID_FUNCTION_MS_HV_FEATURES, 0);
389            let num = result.eax as u64 | ((result.ebx as u64) << 32);
390            Ok(hvdef::HvPartitionPrivilege::from(num))
391        }
392
393        #[cfg(guest_arch = "aarch64")]
394        {
395            Ok(hvdef::HvPartitionPrivilege::from(
396                self.get_partition_vtl2_register(HvArchRegisterName::PrivilegesAndFeaturesInfo)?
397                    .as_u64(),
398            ))
399        }
400    }
401
402    /// Get the [`hvdef::hypercall::HvGuestOsId`] register for the given VTL.
403    pub fn get_guest_os_id(
404        &self,
405        vtl: GuestVtl,
406    ) -> Result<hvdef::hypercall::HvGuestOsId, GetRegError> {
407        Ok(hvdef::hypercall::HvGuestOsId::from(
408            self.mshv_hvcall
409                .get_vp_register_hypercall(vtl.into(), HvArchRegisterName::GuestOsId)
410                .map_err(GetRegError::Hypercall)?
411                .as_u64(),
412        ))
413    }
414
415    /// Set the [`hvdef::HvRegisterVsmPartitionConfig`] register.
416    pub fn set_vtl2_vsm_partition_config(
417        &self,
418        vsm_config: hvdef::HvRegisterVsmPartitionConfig,
419    ) -> Result<(), SetRegError> {
420        self.set_partition_vtl2_register(
421            HvArchRegisterName::VsmPartitionConfig,
422            HvRegisterValue::from(u64::from(vsm_config)),
423        )
424    }
425
426    /// Configure guest VSM.
427    /// The only configuration attribute currently supported is changing the maximum number of
428    /// guest-visible virtual trust levels for the partition. (VTL 1)
429    pub fn set_guest_vsm_partition_config(
430        &self,
431        enable_guest_vsm: bool,
432    ) -> Result<(), SetRegError> {
433        let register_value = hvdef::HvRegisterGuestVsmPartitionConfig::new()
434            .with_maximum_vtl(if enable_guest_vsm { 1 } else { 0 })
435            .with_reserved(0);
436
437        tracing::trace!(enable_guest_vsm, "set_guest_vsm_partition_config");
438        if self.isolation.is_hardware_isolated() {
439            unimplemented!("set_guest_vsm_partition_config");
440        }
441
442        self.set_partition_vtl2_register(
443            HvArchRegisterName::GuestVsmPartitionConfig,
444            HvRegisterValue::from(u64::from(register_value)),
445        )
446    }
447
448    /// Sets the Power Management Timer assist in the hypervisor.
449    #[cfg(guest_arch = "x86_64")]
450    pub fn set_pm_timer_assist(&self, port: Option<u16>) -> Result<(), SetRegError> {
451        tracing::debug!(?port, "set_pm_timer_assist");
452        if self.isolation.is_hardware_isolated() {
453            if port.is_some() {
454                unimplemented!("set_pm_timer_assist");
455            }
456        }
457
458        let val = HvRegisterValue::from(u64::from(match port {
459            Some(p) => hvdef::HvPmTimerInfo::new()
460                .with_port(p)
461                .with_enabled(true)
462                .with_width_24(false),
463            None => 0.into(),
464        }));
465
466        self.set_partition_vtl2_register(HvArchRegisterName::PmTimerAssist, val)
467    }
468
469    /// Sets the Power Management Timer assist in the hypervisor.
470    #[cfg(guest_arch = "aarch64")]
471    pub fn set_pm_timer_assist(&self, port: Option<u16>) -> Result<(), SetRegError> {
472        tracing::debug!(?port, "set_pm_timer_assist unimplemented on aarch64");
473        Err(SetRegError::Hypercall(HvError::UnknownRegisterName))
474    }
475
476    /// Get the given register on the partition for VTL 2 via hypercall.
477    /// Only a select set of registers are supported; others will cause a panic.
478    fn get_partition_vtl2_register(
479        &self,
480        name: HvArchRegisterName,
481    ) -> Result<HvRegisterValue, GetRegError> {
482        #[cfg(guest_arch = "x86_64")]
483        let per_arch = false;
484
485        #[cfg(guest_arch = "aarch64")]
486        let per_arch = matches!(name, HvArchRegisterName::PrivilegesAndFeaturesInfo);
487
488        assert!(
489            matches!(
490                name,
491                HvArchRegisterName::GuestVsmPartitionConfig
492                    | HvArchRegisterName::VsmPartitionConfig
493                    | HvArchRegisterName::VsmPartitionStatus
494                    | HvArchRegisterName::VsmCapabilities
495                    | HvArchRegisterName::TimeRefCount
496            ) || per_arch
497        );
498        self.mshv_hvcall
499            .get_vp_register_hypercall(Vtl::Vtl2, name)
500            .map_err(GetRegError::Hypercall)
501    }
502
503    /// Set the given register on the partition for VTL 2 via hypercall.
504    /// Only a select set of registers are supported; others will cause a panic.
505    fn set_partition_vtl2_register(
506        &self,
507        name: HvArchRegisterName,
508        value: HvRegisterValue,
509    ) -> Result<(), SetRegError> {
510        #[cfg(guest_arch = "x86_64")]
511        let per_arch = matches!(name, HvArchRegisterName::PmTimerAssist);
512
513        #[cfg(guest_arch = "aarch64")]
514        let per_arch = false;
515
516        assert!(
517            matches!(
518                name,
519                HvArchRegisterName::GuestVsmPartitionConfig
520                    | HvArchRegisterName::VsmPartitionConfig
521            ) || per_arch
522        );
523
524        self.mshv_hvcall
525            .set_vp_registers_hypercall(
526                Vtl::Vtl2,
527                &[HvRegisterAssoc {
528                    name: name.into(),
529                    pad: Default::default(),
530                    value,
531                }],
532            )
533            .map_err(SetRegError::Hypercall)
534    }
535}
536
537impl MshvHvcall {
538    /// Get the given register on the current VP for the given VTL via hypercall.
539    ///
540    /// Only VTL-private registers can go through this path. VTL-shared registers
541    /// have to go through the kernel (either via the CPU context page or via the
542    /// dedicated ioctl), as they may require special handling there.
543    fn get_vp_register_hypercall(
544        &self,
545        vtl: Vtl,
546        name: HvArchRegisterName,
547    ) -> Result<HvRegisterValue, HvError> {
548        let mut value = [FromZeros::new_zeroed(); 1];
549        self.get_vp_registers_hypercall(vtl, &[name], &mut value)?;
550        Ok(value[0])
551    }
552
553    /// Get the given registers on the current VP for the given VTL via hypercall.
554    ///
555    /// Only VTL-private registers can go through this path. VTL-shared registers
556    /// have to go through the kernel (either via the CPU context page or via the
557    /// dedicated ioctl), as they may require special handling there.
558    fn get_vp_registers_hypercall(
559        &self,
560        vtl: Vtl,
561        names: &[HvArchRegisterName],
562        values: &mut [HvRegisterValue],
563    ) -> Result<(), HvError> {
564        assert_eq!(names.len(), values.len());
565
566        let header = hvdef::hypercall::GetSetVpRegisters {
567            partition_id: HV_PARTITION_ID_SELF,
568            vp_index: HV_VP_INDEX_SELF,
569            target_vtl: vtl.into(),
570            rsvd: [0; 3],
571        };
572
573        // SAFETY: The input header and rep slice are the correct types for this hypercall.
574        //         The hypercall output is validated right after the hypercall is issued.
575        let status = unsafe {
576            self.hvcall_rep(
577                HypercallCode::HvCallGetVpRegisters,
578                &header,
579                HvcallRepInput::Elements(names),
580                Some(values),
581            )
582            .expect("get_vp_registers hypercall should not fail")
583        };
584
585        // Status must be success with all elements completed
586        status.result()?;
587        assert_eq!(status.elements_processed(), names.len());
588
589        Ok(())
590    }
591
592    /// Set the given registers on the current VP for the given VTL via hypercall.
593    ///
594    /// Only VTL-private registers can go through this path. VTL-shared registers
595    /// have to go through the kernel (either via the CPU context page or via the
596    /// dedicated ioctl), as they may require special handling there.
597    fn set_vp_registers_hypercall(
598        &self,
599        vtl: Vtl,
600        registers: &[HvRegisterAssoc],
601    ) -> Result<(), HvError> {
602        let header = hvdef::hypercall::GetSetVpRegisters {
603            partition_id: HV_PARTITION_ID_SELF,
604            vp_index: HV_VP_INDEX_SELF,
605            target_vtl: vtl.into(),
606            rsvd: [0; 3],
607        };
608
609        // SAFETY: The input header and rep slice are the correct types for this hypercall.
610        //         The hypercall output is validated right after the hypercall is issued.
611        let status = unsafe {
612            self.hvcall_rep::<hvdef::hypercall::GetSetVpRegisters, HvRegisterAssoc, u8>(
613                HypercallCode::HvCallSetVpRegisters,
614                &header,
615                HvcallRepInput::Elements(registers),
616                None,
617            )
618            .expect("set_vp_registers hypercall should not fail")
619        };
620
621        // Status must be success
622        status.result()?;
623        Ok(())
624    }
625}
626
627/// Indicate whether reg is shared across VTLs.
628///
629/// This function is not complete: DR6 may or may not be shared, depending on
630/// the processor type; the caller needs to check HvRegisterVsmCapabilities.
631/// Some MSRs are not included here as they are not represented in
632/// HvArchRegisterName, including MSR_TSC_FREQUENCY, MSR_MCG_CAP,
633/// MSR_MCG_STATUS, MSR_RESET, MSR_GUEST_IDLE, and MSR_DEBUG_DEVICE_OPTIONS.
634fn is_vtl_shared_reg(reg: HvArchRegisterName) -> bool {
635    #[cfg(guest_arch = "x86_64")]
636    {
637        matches!(
638            reg,
639            HvArchRegisterName::VpIndex
640                | HvArchRegisterName::VpRuntime
641                | HvArchRegisterName::TimeRefCount
642                | HvArchRegisterName::Rax
643                | HvArchRegisterName::Rbx
644                | HvArchRegisterName::Rcx
645                | HvArchRegisterName::Rdx
646                | HvArchRegisterName::Rsi
647                | HvArchRegisterName::Rdi
648                | HvArchRegisterName::Rbp
649                | HvArchRegisterName::Cr2
650                | HvArchRegisterName::R8
651                | HvArchRegisterName::R9
652                | HvArchRegisterName::R10
653                | HvArchRegisterName::R11
654                | HvArchRegisterName::R12
655                | HvArchRegisterName::R13
656                | HvArchRegisterName::R14
657                | HvArchRegisterName::R15
658                | HvArchRegisterName::Dr0
659                | HvArchRegisterName::Dr1
660                | HvArchRegisterName::Dr2
661                | HvArchRegisterName::Dr3
662                | HvArchRegisterName::Xmm0
663                | HvArchRegisterName::Xmm1
664                | HvArchRegisterName::Xmm2
665                | HvArchRegisterName::Xmm3
666                | HvArchRegisterName::Xmm4
667                | HvArchRegisterName::Xmm5
668                | HvArchRegisterName::Xmm6
669                | HvArchRegisterName::Xmm7
670                | HvArchRegisterName::Xmm8
671                | HvArchRegisterName::Xmm9
672                | HvArchRegisterName::Xmm10
673                | HvArchRegisterName::Xmm11
674                | HvArchRegisterName::Xmm12
675                | HvArchRegisterName::Xmm13
676                | HvArchRegisterName::Xmm14
677                | HvArchRegisterName::Xmm15
678                | HvArchRegisterName::FpMmx0
679                | HvArchRegisterName::FpMmx1
680                | HvArchRegisterName::FpMmx2
681                | HvArchRegisterName::FpMmx3
682                | HvArchRegisterName::FpMmx4
683                | HvArchRegisterName::FpMmx5
684                | HvArchRegisterName::FpMmx6
685                | HvArchRegisterName::FpMmx7
686                | HvArchRegisterName::FpControlStatus
687                | HvArchRegisterName::XmmControlStatus
688                | HvArchRegisterName::Xfem
689                | HvArchRegisterName::MsrMtrrCap
690                | HvArchRegisterName::MsrMtrrDefType
691                | HvArchRegisterName::MsrMtrrPhysBase0
692                | HvArchRegisterName::MsrMtrrPhysBase1
693                | HvArchRegisterName::MsrMtrrPhysBase2
694                | HvArchRegisterName::MsrMtrrPhysBase3
695                | HvArchRegisterName::MsrMtrrPhysBase4
696                | HvArchRegisterName::MsrMtrrPhysBase5
697                | HvArchRegisterName::MsrMtrrPhysBase6
698                | HvArchRegisterName::MsrMtrrPhysBase7
699                | HvArchRegisterName::MsrMtrrPhysBase8
700                | HvArchRegisterName::MsrMtrrPhysBase9
701                | HvArchRegisterName::MsrMtrrPhysBaseA
702                | HvArchRegisterName::MsrMtrrPhysBaseB
703                | HvArchRegisterName::MsrMtrrPhysBaseC
704                | HvArchRegisterName::MsrMtrrPhysBaseD
705                | HvArchRegisterName::MsrMtrrPhysBaseE
706                | HvArchRegisterName::MsrMtrrPhysBaseF
707                | HvArchRegisterName::MsrMtrrPhysMask0
708                | HvArchRegisterName::MsrMtrrPhysMask1
709                | HvArchRegisterName::MsrMtrrPhysMask2
710                | HvArchRegisterName::MsrMtrrPhysMask3
711                | HvArchRegisterName::MsrMtrrPhysMask4
712                | HvArchRegisterName::MsrMtrrPhysMask5
713                | HvArchRegisterName::MsrMtrrPhysMask6
714                | HvArchRegisterName::MsrMtrrPhysMask7
715                | HvArchRegisterName::MsrMtrrPhysMask8
716                | HvArchRegisterName::MsrMtrrPhysMask9
717                | HvArchRegisterName::MsrMtrrPhysMaskA
718                | HvArchRegisterName::MsrMtrrPhysMaskB
719                | HvArchRegisterName::MsrMtrrPhysMaskC
720                | HvArchRegisterName::MsrMtrrPhysMaskD
721                | HvArchRegisterName::MsrMtrrPhysMaskE
722                | HvArchRegisterName::MsrMtrrPhysMaskF
723                | HvArchRegisterName::MsrMtrrFix64k00000
724                | HvArchRegisterName::MsrMtrrFix16k80000
725                | HvArchRegisterName::MsrMtrrFix16kA0000
726                | HvArchRegisterName::MsrMtrrFix4kC0000
727                | HvArchRegisterName::MsrMtrrFix4kC8000
728                | HvArchRegisterName::MsrMtrrFix4kD0000
729                | HvArchRegisterName::MsrMtrrFix4kD8000
730                | HvArchRegisterName::MsrMtrrFix4kE0000
731                | HvArchRegisterName::MsrMtrrFix4kE8000
732                | HvArchRegisterName::MsrMtrrFix4kF0000
733                | HvArchRegisterName::MsrMtrrFix4kF8000
734        )
735    }
736
737    #[cfg(guest_arch = "aarch64")]
738    {
739        matches!(
740            reg,
741            HvArchRegisterName::X0
742                | HvArchRegisterName::X1
743                | HvArchRegisterName::X2
744                | HvArchRegisterName::X3
745                | HvArchRegisterName::X4
746                | HvArchRegisterName::X5
747                | HvArchRegisterName::X6
748                | HvArchRegisterName::X7
749                | HvArchRegisterName::X8
750                | HvArchRegisterName::X9
751                | HvArchRegisterName::X10
752                | HvArchRegisterName::X11
753                | HvArchRegisterName::X12
754                | HvArchRegisterName::X13
755                | HvArchRegisterName::X14
756                | HvArchRegisterName::X15
757                | HvArchRegisterName::X16
758                | HvArchRegisterName::X17
759                | HvArchRegisterName::X19
760                | HvArchRegisterName::X20
761                | HvArchRegisterName::X21
762                | HvArchRegisterName::X22
763                | HvArchRegisterName::X23
764                | HvArchRegisterName::X24
765                | HvArchRegisterName::X25
766                | HvArchRegisterName::X26
767                | HvArchRegisterName::X27
768                | HvArchRegisterName::X28
769                | HvArchRegisterName::XFp
770                | HvArchRegisterName::XLr
771        )
772    }
773}