Skip to main content

virt_mshv/x86_64/
vp_state.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4use crate::Error;
5use crate::ErrorInner;
6use crate::MshvProcessor;
7use crate::VcpuFdExt;
8use hvdef::HvX64RegisterName;
9use hvdef::hypercall::HvRegisterAssoc;
10use mshv_bindings::MSHV_VP_STATE_SIEFP;
11use mshv_bindings::MSHV_VP_STATE_SIMP;
12use mshv_bindings::MSHV_VP_STATE_SYNTHETIC_TIMERS;
13use mshv_bindings::mshv_get_set_vp_state;
14use std::ptr::NonNull;
15use std::sync::OnceLock;
16use virt::state::HvRegisterState;
17use virt::vp::ApicRegisters;
18use virt::x86::vp;
19use virt::x86::vp::AccessVpState;
20use zerocopy::FromZeros;
21use zerocopy::IntoBytes;
22
23impl MshvProcessor<'_> {
24    pub(crate) fn set_register_state<T, const N: usize>(&self, regs: &T) -> Result<(), Error>
25    where
26        T: HvRegisterState<HvX64RegisterName, N>,
27    {
28        let mut assoc = regs.names().map(|name| HvRegisterAssoc {
29            name: name.into(),
30            pad: [0; 3],
31            value: FromZeros::new_zeroed(),
32        });
33
34        regs.get_values(assoc.iter_mut().map(|assoc| &mut assoc.value));
35
36        self.runner
37            .vcpufd
38            .set_hvdef_regs(&assoc[..])
39            .map_err(ErrorInner::Register)?;
40
41        Ok(())
42    }
43
44    pub(crate) fn get_register_state<T, const N: usize>(&self) -> Result<T, Error>
45    where
46        T: HvRegisterState<HvX64RegisterName, N>,
47    {
48        let mut regs = T::default();
49        let mut assoc = regs.names().map(|name| HvRegisterAssoc {
50            name: name.into(),
51            pad: [0; 3],
52            value: FromZeros::new_zeroed(),
53        });
54
55        self.runner
56            .vcpufd
57            .get_hvdef_regs(&mut assoc[..])
58            .map_err(ErrorInner::Register)?;
59
60        regs.set_values(assoc.iter().map(|assoc| assoc.value));
61        Ok(regs)
62    }
63
64    fn set_state(&self, ty: u32, data: &[u8]) -> Result<(), Error> {
65        // The kernel requires a page-aligned buffer for VP state operations.
66        let mut buf = PageAlignedBuffer::new(data.len());
67        buf.as_mut_bytes().copy_from_slice(data);
68
69        let vp_state = mshv_get_set_vp_state {
70            type_: ty as u8,
71            buf_sz: buf.aligned_len() as u32,
72            buf_ptr: buf.as_ptr() as u64,
73            ..Default::default()
74        };
75        self.runner
76            .vcpufd
77            .set_vp_state_ioctl(&vp_state)
78            .map_err(|e| ErrorInner::SetVpState {
79                error: e.into(),
80                ty: ty as u8,
81            })?;
82        Ok(())
83    }
84
85    fn get_fixed_state<T: zerocopy::FromBytes>(&self, ty: u32) -> Result<T, Error> {
86        let state = self.get_state(ty, size_of::<T>())?;
87        Ok(T::read_from_prefix(state.as_bytes()).unwrap().0)
88    }
89
90    fn get_state(&self, ty: u32, size: usize) -> Result<PageAlignedBuffer, Error> {
91        // The kernel requires a page-aligned buffer for VP state operations.
92        let mut buf = PageAlignedBuffer::new(size);
93        let mut vp_state = mshv_get_set_vp_state {
94            type_: ty as u8,
95            buf_sz: buf.aligned_len() as u32,
96            buf_ptr: buf.as_mut_ptr() as u64,
97            ..Default::default()
98        };
99        self.runner
100            .vcpufd
101            .get_vp_state_ioctl(&mut vp_state)
102            .map_err(|e| ErrorInner::GetVpState {
103                error: e.into(),
104                ty: ty as u8,
105            })?;
106        Ok(buf)
107    }
108
109    fn get_lapic(&self) -> Result<ApicRegisters, Error> {
110        let hv_state: hvdef::HvX64InterruptControllerState =
111            self.get_fixed_state(mshv_bindings::MSHV_VP_STATE_LAPIC)?;
112
113        Ok(ApicRegisters::from(hv_state))
114    }
115
116    fn set_lapic(&self, lapic: &ApicRegisters) -> Result<(), Error> {
117        let hv_state: hvdef::HvX64InterruptControllerState = (*lapic).into();
118        self.set_state(mshv_bindings::MSHV_VP_STATE_LAPIC, hv_state.as_bytes())
119    }
120}
121
122struct PageAlignedBuffer {
123    ptr: NonNull<u8>,
124    len: usize,
125    layout: std::alloc::Layout,
126}
127
128impl PageAlignedBuffer {
129    fn page_size() -> usize {
130        static PAGE_SIZE: OnceLock<usize> = OnceLock::new();
131        // SAFETY: sysconf(_SC_PAGESIZE) is always safe to call.
132        *PAGE_SIZE.get_or_init(|| unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as usize)
133    }
134
135    fn new(len: usize) -> Self {
136        let page_size = Self::page_size();
137        let layout =
138            std::alloc::Layout::from_size_align(len.next_multiple_of(page_size), page_size)
139                .unwrap();
140        // SAFETY: layout has non-zero size and page alignment.
141        let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
142        let Some(ptr) = NonNull::new(ptr) else {
143            std::alloc::handle_alloc_error(layout);
144        };
145        Self { ptr, len, layout }
146    }
147
148    fn aligned_len(&self) -> usize {
149        self.layout.size()
150    }
151
152    fn as_ptr(&self) -> *const u8 {
153        self.ptr.as_ptr()
154    }
155
156    fn as_mut_ptr(&mut self) -> *mut u8 {
157        self.ptr.as_ptr()
158    }
159
160    fn as_bytes(&self) -> &[u8] {
161        // SAFETY: ptr is valid for layout.size() >= self.len bytes and is
162        // uniquely owned.
163        unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len) }
164    }
165
166    fn as_mut_bytes(&mut self) -> &mut [u8] {
167        // SAFETY: ptr is valid for layout.size() >= self.len bytes, and &mut
168        // self guarantees exclusive access.
169        unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
170    }
171}
172
173impl Drop for PageAlignedBuffer {
174    fn drop(&mut self) {
175        // SAFETY: ptr was allocated with this layout via alloc_zeroed.
176        unsafe { std::alloc::dealloc(self.ptr.as_ptr(), self.layout) };
177    }
178}
179
180impl AccessVpState for &'_ mut MshvProcessor<'_> {
181    type Error = Error;
182
183    fn caps(&self) -> &virt::PartitionCapabilities {
184        &self.partition.caps
185    }
186
187    fn commit(&mut self) -> Result<(), Self::Error> {
188        Ok(())
189    }
190
191    fn registers(&mut self) -> Result<vp::Registers, Self::Error> {
192        self.get_register_state()
193    }
194
195    fn set_registers(&mut self, value: &vp::Registers) -> Result<(), Self::Error> {
196        self.set_register_state(value)
197    }
198
199    fn activity(&mut self) -> Result<vp::Activity, Self::Error> {
200        let mut activity: vp::Activity = self.get_register_state()?;
201        // The NMI pending bit is not part of the register state; it lives
202        // in the APIC page.
203        activity.nmi_pending = self.get_lapic()?.hv_apic_nmi_pending();
204        Ok(activity)
205    }
206
207    fn set_activity(&mut self, value: &vp::Activity) -> Result<(), Self::Error> {
208        self.set_register_state(value)?;
209        // The NMI pending bit is not part of the register state; it must
210        // be set via the APIC page.
211        let mut lapic = self.get_lapic()?;
212        if lapic.hv_apic_nmi_pending() != value.nmi_pending {
213            lapic.set_hv_apic_nmi_pending(value.nmi_pending);
214            self.set_lapic(&lapic)?;
215        }
216        Ok(())
217    }
218
219    fn xsave(&mut self) -> Result<vp::Xsave, Self::Error> {
220        let xsave = self.get_state(
221            mshv_bindings::MSHV_VP_STATE_XSAVE,
222            self.partition.caps.xsave.compact_len as usize,
223        )?;
224        Ok(vp::Xsave::from_compact(
225            xsave.as_bytes(),
226            &self.partition.caps,
227        ))
228    }
229
230    fn set_xsave(&mut self, value: &vp::Xsave) -> Result<(), Self::Error> {
231        self.set_state(mshv_bindings::MSHV_VP_STATE_XSAVE, value.compact())?;
232        Ok(())
233    }
234
235    fn apic(&mut self) -> Result<vp::Apic, Self::Error> {
236        // Get the APIC base register.
237        let mut assoc = [HvRegisterAssoc {
238            name: HvX64RegisterName::ApicBase.into(),
239            pad: [0; 3],
240            value: FromZeros::new_zeroed(),
241        }];
242        self.runner
243            .vcpufd
244            .get_hvdef_regs(&mut assoc)
245            .map_err(ErrorInner::Register)?;
246        let apic_base = assoc[0].value.as_u64();
247
248        // Get the LAPIC state page.
249        let mut lapic = self.get_lapic()?;
250        // Clear the non-architectural NMI pending bit.
251        lapic.set_hv_apic_nmi_pending(false);
252        Ok(vp::Apic::new(apic_base.into(), lapic, [0; 8]))
253    }
254
255    fn set_apic(&mut self, value: &vp::Apic) -> Result<(), Self::Error> {
256        // Set the APIC base register first to set the APIC mode before
257        // updating the APIC register state.
258        self.runner
259            .vcpufd
260            .set_hvdef_regs(&[HvRegisterAssoc::from((
261                HvX64RegisterName::ApicBase,
262                value.apic_base,
263            ))])
264            .map_err(ErrorInner::Register)?;
265
266        // Preserve the current NMI pending state across the restore.
267        let nmi_pending = self.get_lapic()?.hv_apic_nmi_pending();
268
269        // Set the LAPIC state page, restoring the NMI pending bit.
270        let mut lapic = *value.registers();
271        lapic.set_hv_apic_nmi_pending(nmi_pending);
272        self.set_lapic(&lapic)?;
273
274        Ok(())
275    }
276
277    fn xcr(&mut self) -> Result<vp::Xcr0, Self::Error> {
278        self.get_register_state()
279    }
280
281    fn set_xcr(&mut self, value: &vp::Xcr0) -> Result<(), Self::Error> {
282        self.set_register_state(value)
283    }
284
285    fn xss(&mut self) -> Result<vp::Xss, Self::Error> {
286        self.get_register_state()
287    }
288
289    fn set_xss(&mut self, value: &vp::Xss) -> Result<(), Self::Error> {
290        self.set_register_state(value)
291    }
292
293    fn mtrrs(&mut self) -> Result<vp::Mtrrs, Self::Error> {
294        self.get_register_state()
295    }
296
297    fn set_mtrrs(&mut self, value: &vp::Mtrrs) -> Result<(), Self::Error> {
298        self.set_register_state(value)
299    }
300
301    fn pat(&mut self) -> Result<vp::Pat, Self::Error> {
302        self.get_register_state()
303    }
304
305    fn set_pat(&mut self, value: &vp::Pat) -> Result<(), Self::Error> {
306        self.set_register_state(value)
307    }
308
309    fn virtual_msrs(&mut self) -> Result<vp::VirtualMsrs, Self::Error> {
310        self.get_register_state()
311    }
312
313    fn set_virtual_msrs(&mut self, value: &vp::VirtualMsrs) -> Result<(), Self::Error> {
314        self.set_register_state(value)
315    }
316
317    fn debug_regs(&mut self) -> Result<vp::DebugRegisters, Self::Error> {
318        self.get_register_state()
319    }
320
321    fn set_debug_regs(&mut self, value: &vp::DebugRegisters) -> Result<(), Self::Error> {
322        self.set_register_state(value)
323    }
324
325    fn tsc(&mut self) -> Result<vp::Tsc, Self::Error> {
326        self.get_register_state()
327    }
328
329    fn set_tsc(&mut self, value: &vp::Tsc) -> Result<(), Self::Error> {
330        self.set_register_state(value)
331    }
332
333    fn cet(&mut self) -> Result<vp::Cet, Self::Error> {
334        self.get_register_state()
335    }
336
337    fn set_cet(&mut self, value: &vp::Cet) -> Result<(), Self::Error> {
338        self.set_register_state(value)
339    }
340
341    fn cet_ss(&mut self) -> Result<vp::CetSs, Self::Error> {
342        self.get_register_state()
343    }
344
345    fn set_cet_ss(&mut self, value: &vp::CetSs) -> Result<(), Self::Error> {
346        self.set_register_state(value)
347    }
348
349    fn tsc_aux(&mut self) -> Result<vp::TscAux, Self::Error> {
350        self.get_register_state()
351    }
352
353    fn set_tsc_aux(&mut self, value: &vp::TscAux) -> Result<(), Self::Error> {
354        self.set_register_state(value)
355    }
356
357    fn synic_msrs(&mut self) -> Result<vp::SyntheticMsrs, Self::Error> {
358        self.get_register_state()
359    }
360
361    fn set_synic_msrs(&mut self, value: &vp::SyntheticMsrs) -> Result<(), Self::Error> {
362        self.set_register_state(value)
363    }
364
365    fn synic_timers(&mut self) -> Result<vp::SynicTimers, Self::Error> {
366        Ok(vp::SynicTimers::from_hv(
367            self.get_fixed_state(MSHV_VP_STATE_SYNTHETIC_TIMERS)?,
368        ))
369    }
370
371    fn set_synic_timers(&mut self, value: &vp::SynicTimers) -> Result<(), Self::Error> {
372        self.set_state(MSHV_VP_STATE_SYNTHETIC_TIMERS, value.as_hv().as_bytes())?;
373        Ok(())
374    }
375
376    fn synic_message_queues(&mut self) -> Result<vp::SynicMessageQueues, Self::Error> {
377        Ok(self.inner.message_queues.save())
378    }
379
380    fn set_synic_message_queues(
381        &mut self,
382        value: &vp::SynicMessageQueues,
383    ) -> Result<(), Self::Error> {
384        self.inner.message_queues.restore(value);
385        Ok(())
386    }
387
388    fn synic_message_page(&mut self) -> Result<vp::SynicMessagePage, Self::Error> {
389        let data = self.get_fixed_state(MSHV_VP_STATE_SIMP)?;
390        Ok(vp::SynicMessagePage { data })
391    }
392
393    fn set_synic_message_page(&mut self, value: &vp::SynicMessagePage) -> Result<(), Self::Error> {
394        self.set_state(MSHV_VP_STATE_SIMP, &value.data)
395    }
396
397    fn synic_event_flags_page(&mut self) -> Result<vp::SynicEventFlagsPage, Self::Error> {
398        let data = self.get_fixed_state(MSHV_VP_STATE_SIEFP)?;
399        Ok(vp::SynicEventFlagsPage { data })
400    }
401
402    fn set_synic_event_flags_page(
403        &mut self,
404        value: &vp::SynicEventFlagsPage,
405    ) -> Result<(), Self::Error> {
406        self.set_state(MSHV_VP_STATE_SIEFP, &value.data)
407    }
408}