virt/x86/
vp.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Per-VP state.
5
6use super::SegmentRegister;
7use super::TableRegister;
8use super::X86PartitionCapabilities;
9use crate::state::HvRegisterState;
10use crate::state::StateElement;
11use crate::state::state_trait;
12use hvdef::HV_MESSAGE_SIZE;
13use hvdef::HvInternalActivityRegister;
14use hvdef::HvRegisterValue;
15use hvdef::HvX64InterruptStateRegister;
16use hvdef::HvX64PendingEventReg0;
17use hvdef::HvX64PendingExceptionEvent;
18use hvdef::HvX64PendingExtIntEvent;
19use hvdef::HvX64PendingInterruptionRegister;
20use hvdef::HvX64PendingInterruptionType;
21use hvdef::HvX64RegisterName;
22use hvdef::HvX64SegmentRegister;
23use hvdef::HvX64TableRegister;
24use inspect::Inspect;
25use mesh_protobuf::Protobuf;
26use std::fmt::Debug;
27use vm_topology::processor::x86::X86VpInfo;
28use x86defs::RFlags;
29use x86defs::X64_CR0_CD;
30use x86defs::X64_CR0_ET;
31use x86defs::X64_CR0_NW;
32use x86defs::X64_EFER_NXE;
33use x86defs::X86X_MSR_DEFAULT_PAT;
34use x86defs::apic::APIC_BASE_PAGE;
35use x86defs::apic::ApicBase;
36use x86defs::apic::ApicVersion;
37use x86defs::xsave::DEFAULT_MXCSR;
38use x86defs::xsave::Fxsave;
39use x86defs::xsave::INIT_FCW;
40use x86defs::xsave::XCOMP_COMPRESSED;
41use x86defs::xsave::XFEATURE_SSE;
42use x86defs::xsave::XFEATURE_X87;
43use x86defs::xsave::XFEATURE_YMM;
44use x86defs::xsave::XSAVE_LEGACY_LEN;
45use x86defs::xsave::XSAVE_VARIABLE_OFFSET;
46use x86defs::xsave::XsaveHeader;
47use zerocopy::FromBytes;
48use zerocopy::FromZeros;
49use zerocopy::Immutable;
50use zerocopy::IntoBytes;
51use zerocopy::KnownLayout;
52use zerocopy::Ref;
53
54#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Protobuf, Inspect)]
55#[mesh(package = "virt.x86")]
56pub struct Registers {
57    #[inspect(hex)]
58    #[mesh(1)]
59    pub rax: u64,
60    #[inspect(hex)]
61    #[mesh(2)]
62    pub rcx: u64,
63    #[inspect(hex)]
64    #[mesh(3)]
65    pub rdx: u64,
66    #[inspect(hex)]
67    #[mesh(4)]
68    pub rbx: u64,
69    #[inspect(hex)]
70    #[mesh(5)]
71    pub rsp: u64,
72    #[inspect(hex)]
73    #[mesh(6)]
74    pub rbp: u64,
75    #[inspect(hex)]
76    #[mesh(7)]
77    pub rsi: u64,
78    #[inspect(hex)]
79    #[mesh(8)]
80    pub rdi: u64,
81    #[inspect(hex)]
82    #[mesh(9)]
83    pub r8: u64,
84    #[inspect(hex)]
85    #[mesh(10)]
86    pub r9: u64,
87    #[inspect(hex)]
88    #[mesh(11)]
89    pub r10: u64,
90    #[inspect(hex)]
91    #[mesh(12)]
92    pub r11: u64,
93    #[inspect(hex)]
94    #[mesh(13)]
95    pub r12: u64,
96    #[inspect(hex)]
97    #[mesh(14)]
98    pub r13: u64,
99    #[inspect(hex)]
100    #[mesh(15)]
101    pub r14: u64,
102    #[inspect(hex)]
103    #[mesh(16)]
104    pub r15: u64,
105    #[inspect(hex)]
106    #[mesh(17)]
107    pub rip: u64,
108    #[inspect(hex)]
109    #[mesh(18)]
110    pub rflags: u64,
111    #[mesh(19)]
112    pub cs: SegmentRegister,
113    #[mesh(20)]
114    pub ds: SegmentRegister,
115    #[mesh(21)]
116    pub es: SegmentRegister,
117    #[mesh(22)]
118    pub fs: SegmentRegister,
119    #[mesh(23)]
120    pub gs: SegmentRegister,
121    #[mesh(24)]
122    pub ss: SegmentRegister,
123    #[mesh(25)]
124    pub tr: SegmentRegister,
125    #[mesh(26)]
126    pub ldtr: SegmentRegister,
127    #[mesh(27)]
128    pub gdtr: TableRegister,
129    #[mesh(28)]
130    pub idtr: TableRegister,
131    #[inspect(hex)]
132    #[mesh(29)]
133    pub cr0: u64,
134    #[inspect(hex)]
135    #[mesh(30)]
136    pub cr2: u64,
137    #[inspect(hex)]
138    #[mesh(31)]
139    pub cr3: u64,
140    #[inspect(hex)]
141    #[mesh(32)]
142    pub cr4: u64,
143    #[inspect(hex)]
144    #[mesh(33)]
145    pub cr8: u64,
146    #[inspect(hex)]
147    #[mesh(34)]
148    pub efer: u64,
149}
150
151impl HvRegisterState<HvX64RegisterName, 34> for Registers {
152    fn names(&self) -> &'static [HvX64RegisterName; 34] {
153        &[
154            HvX64RegisterName::Rax,
155            HvX64RegisterName::Rcx,
156            HvX64RegisterName::Rdx,
157            HvX64RegisterName::Rbx,
158            HvX64RegisterName::Rsp,
159            HvX64RegisterName::Rbp,
160            HvX64RegisterName::Rsi,
161            HvX64RegisterName::Rdi,
162            HvX64RegisterName::R8,
163            HvX64RegisterName::R9,
164            HvX64RegisterName::R10,
165            HvX64RegisterName::R11,
166            HvX64RegisterName::R12,
167            HvX64RegisterName::R13,
168            HvX64RegisterName::R14,
169            HvX64RegisterName::R15,
170            HvX64RegisterName::Rip,
171            HvX64RegisterName::Rflags,
172            HvX64RegisterName::Cr0,
173            HvX64RegisterName::Cr2,
174            HvX64RegisterName::Cr3,
175            HvX64RegisterName::Cr4,
176            HvX64RegisterName::Cr8,
177            HvX64RegisterName::Efer,
178            HvX64RegisterName::Cs,
179            HvX64RegisterName::Ds,
180            HvX64RegisterName::Es,
181            HvX64RegisterName::Fs,
182            HvX64RegisterName::Gs,
183            HvX64RegisterName::Ss,
184            HvX64RegisterName::Tr,
185            HvX64RegisterName::Ldtr,
186            HvX64RegisterName::Gdtr,
187            HvX64RegisterName::Idtr,
188        ]
189    }
190
191    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
192        for (dest, src) in it.zip([
193            self.rax.into(),
194            self.rcx.into(),
195            self.rdx.into(),
196            self.rbx.into(),
197            self.rsp.into(),
198            self.rbp.into(),
199            self.rsi.into(),
200            self.rdi.into(),
201            self.r8.into(),
202            self.r9.into(),
203            self.r10.into(),
204            self.r11.into(),
205            self.r12.into(),
206            self.r13.into(),
207            self.r14.into(),
208            self.r15.into(),
209            self.rip.into(),
210            self.rflags.into(),
211            self.cr0.into(),
212            self.cr2.into(),
213            self.cr3.into(),
214            self.cr4.into(),
215            self.cr8.into(),
216            self.efer.into(),
217            HvX64SegmentRegister::from(self.cs).into(),
218            HvX64SegmentRegister::from(self.ds).into(),
219            HvX64SegmentRegister::from(self.es).into(),
220            HvX64SegmentRegister::from(self.fs).into(),
221            HvX64SegmentRegister::from(self.gs).into(),
222            HvX64SegmentRegister::from(self.ss).into(),
223            HvX64SegmentRegister::from(self.tr).into(),
224            HvX64SegmentRegister::from(self.ldtr).into(),
225            HvX64TableRegister::from(self.gdtr).into(),
226            HvX64TableRegister::from(self.idtr).into(),
227        ]) {
228            *dest = src;
229        }
230    }
231
232    fn set_values(&mut self, mut it: impl Iterator<Item = HvRegisterValue>) {
233        for (dest, src) in [
234            &mut self.rax,
235            &mut self.rcx,
236            &mut self.rdx,
237            &mut self.rbx,
238            &mut self.rsp,
239            &mut self.rbp,
240            &mut self.rsi,
241            &mut self.rdi,
242            &mut self.r8,
243            &mut self.r9,
244            &mut self.r10,
245            &mut self.r11,
246            &mut self.r12,
247            &mut self.r13,
248            &mut self.r14,
249            &mut self.r15,
250            &mut self.rip,
251            &mut self.rflags,
252            &mut self.cr0,
253            &mut self.cr2,
254            &mut self.cr3,
255            &mut self.cr4,
256            &mut self.cr8,
257            &mut self.efer,
258        ]
259        .into_iter()
260        .zip(&mut it)
261        {
262            *dest = src.as_u64();
263        }
264
265        for (dest, src) in [
266            &mut self.cs,
267            &mut self.ds,
268            &mut self.es,
269            &mut self.fs,
270            &mut self.gs,
271            &mut self.ss,
272            &mut self.tr,
273            &mut self.ldtr,
274        ]
275        .into_iter()
276        .zip(&mut it)
277        {
278            *dest = src.as_segment().into();
279        }
280
281        for (dest, src) in [&mut self.gdtr, &mut self.idtr].into_iter().zip(it) {
282            *dest = src.as_table().into();
283        }
284    }
285}
286
287impl StateElement<X86PartitionCapabilities, X86VpInfo> for Registers {
288    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
289        true
290    }
291
292    fn at_reset(caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
293        let cs = SegmentRegister {
294            base: 0xffff0000,
295            limit: 0xffff,
296            selector: 0xf000,
297            attributes: 0x9b,
298        };
299        let ds = SegmentRegister {
300            base: 0,
301            limit: 0xffff,
302            selector: 0,
303            attributes: 0x93,
304        };
305        let tr = SegmentRegister {
306            base: 0,
307            limit: 0xffff,
308            selector: 0,
309            attributes: 0x8b,
310        };
311        let ldtr = SegmentRegister {
312            base: 0,
313            limit: 0xffff,
314            selector: 0,
315            attributes: 0x82,
316        };
317        let gdtr = TableRegister {
318            base: 0,
319            limit: 0xffff,
320        };
321        let efer = if caps.nxe_forced_on { X64_EFER_NXE } else { 0 };
322        Self {
323            rax: 0,
324            rcx: 0,
325            rdx: caps.reset_rdx,
326            rbx: 0,
327            rbp: 0,
328            rsp: 0,
329            rsi: 0,
330            rdi: 0,
331            r8: 0,
332            r9: 0,
333            r10: 0,
334            r11: 0,
335            r12: 0,
336            r13: 0,
337            r14: 0,
338            r15: 0,
339            rip: 0xfff0,
340            rflags: RFlags::at_reset().into(),
341            cs,
342            ds,
343            es: ds,
344            fs: ds,
345            gs: ds,
346            ss: ds,
347            tr,
348            ldtr,
349            gdtr,
350            idtr: gdtr,
351            cr0: X64_CR0_ET | X64_CR0_CD | X64_CR0_NW,
352            cr2: 0,
353            cr3: 0,
354            cr4: 0,
355            cr8: 0,
356            efer,
357        }
358    }
359}
360
361#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
362#[mesh(package = "virt.x86")]
363pub struct Activity {
364    #[mesh(1)]
365    pub mp_state: MpState,
366    #[mesh(2)]
367    pub nmi_pending: bool,
368    #[mesh(3)]
369    pub nmi_masked: bool,
370    #[mesh(4)]
371    pub interrupt_shadow: bool,
372    #[mesh(5)]
373    pub pending_event: Option<PendingEvent>,
374    #[mesh(6)]
375    pub pending_interruption: Option<PendingInterruption>,
376}
377
378#[derive(Copy, Clone, Debug, PartialEq, Eq, Protobuf, Inspect)]
379#[mesh(package = "virt.x86")]
380pub enum MpState {
381    #[mesh(1)]
382    Running,
383    #[mesh(2)]
384    WaitForSipi,
385    #[mesh(3)]
386    Halted,
387    #[mesh(4)]
388    Idle,
389}
390
391impl Default for MpState {
392    fn default() -> Self {
393        Self::Running
394    }
395}
396
397// N.B. This does not include the NMI pending bit, which must be get/set via the
398//      APIC page.
399impl HvRegisterState<HvX64RegisterName, 4> for Activity {
400    fn names(&self) -> &'static [HvX64RegisterName; 4] {
401        &[
402            HvX64RegisterName::InternalActivityState,
403            HvX64RegisterName::PendingInterruption,
404            HvX64RegisterName::InterruptState,
405            HvX64RegisterName::PendingEvent0,
406        ]
407    }
408
409    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
410        let mut activity = HvInternalActivityRegister::from(0);
411        match self.mp_state {
412            MpState::Running => {}
413            MpState::WaitForSipi => {
414                activity.set_startup_suspend(true);
415            }
416            MpState::Halted => {
417                activity.set_halt_suspend(true);
418            }
419            MpState::Idle => {
420                activity.set_idle_suspend(true);
421            }
422        };
423
424        let pending_event = if let Some(event) = self.pending_event {
425            match event {
426                PendingEvent::Exception {
427                    vector,
428                    error_code,
429                    parameter,
430                } => HvX64PendingExceptionEvent::new()
431                    .with_event_pending(true)
432                    .with_event_type(hvdef::HV_X64_PENDING_EVENT_EXCEPTION)
433                    .with_vector(vector.into())
434                    .with_deliver_error_code(error_code.is_some())
435                    .with_error_code(error_code.unwrap_or(0))
436                    .with_exception_parameter(parameter)
437                    .into(),
438
439                PendingEvent::ExtInt { vector } => HvX64PendingExtIntEvent::new()
440                    .with_event_pending(true)
441                    .with_event_type(hvdef::HV_X64_PENDING_EVENT_EXT_INT)
442                    .with_vector(vector)
443                    .into(),
444            }
445        } else {
446            0
447        };
448
449        let mut pending_interruption = HvX64PendingInterruptionRegister::new();
450        if let Some(interruption) = self.pending_interruption {
451            pending_interruption.set_interruption_pending(true);
452            let ty = match interruption {
453                PendingInterruption::Exception { vector, error_code } => {
454                    pending_interruption.set_interruption_vector(vector.into());
455                    pending_interruption.set_deliver_error_code(error_code.is_some());
456                    pending_interruption.set_error_code(error_code.unwrap_or(0));
457                    HvX64PendingInterruptionType::HV_X64_PENDING_EXCEPTION
458                }
459                PendingInterruption::Interrupt { vector } => {
460                    pending_interruption.set_interruption_vector(vector.into());
461                    HvX64PendingInterruptionType::HV_X64_PENDING_INTERRUPT
462                }
463                PendingInterruption::Nmi => HvX64PendingInterruptionType::HV_X64_PENDING_NMI,
464            };
465            pending_interruption.set_interruption_type(ty.0);
466        }
467
468        let interrupt_state = HvX64InterruptStateRegister::new()
469            .with_nmi_masked(self.nmi_masked)
470            .with_interrupt_shadow(self.interrupt_shadow);
471
472        for (dest, src) in it.zip([
473            HvRegisterValue::from(u64::from(activity)),
474            u64::from(pending_interruption).into(),
475            u64::from(interrupt_state).into(),
476            pending_event.into(),
477        ]) {
478            *dest = src;
479        }
480    }
481
482    fn set_values(&mut self, mut it: impl Iterator<Item = HvRegisterValue>) {
483        let activity = HvInternalActivityRegister::from(it.next().unwrap().as_u64());
484        let interruption = HvX64PendingInterruptionRegister::from(it.next().unwrap().as_u64());
485        let interrupt_state = HvX64InterruptStateRegister::from(it.next().unwrap().as_u64());
486        let event = HvX64PendingEventReg0::from(it.next().unwrap().as_u128());
487
488        let mp_state = if activity.startup_suspend() {
489            MpState::WaitForSipi
490        } else if activity.halt_suspend() {
491            MpState::Halted
492        } else if activity.idle_suspend() {
493            MpState::Idle
494        } else {
495            MpState::Running
496        };
497
498        let pending_event = event.event_pending().then(|| match event.event_type() {
499            hvdef::HV_X64_PENDING_EVENT_EXCEPTION => {
500                let event = HvX64PendingExceptionEvent::from(u128::from(event));
501                PendingEvent::Exception {
502                    vector: event.vector().try_into().expect("exception code is 8 bits"),
503                    error_code: event.deliver_error_code().then(|| event.error_code()),
504                    parameter: event.exception_parameter(),
505                }
506            }
507            hvdef::HV_X64_PENDING_EVENT_EXT_INT => {
508                let event = HvX64PendingExtIntEvent::from(u128::from(event));
509                PendingEvent::ExtInt {
510                    vector: event.vector(),
511                }
512            }
513            ty => panic!("unhandled event type: {}", ty),
514        });
515
516        let pending_interruption = interruption.interruption_pending().then(|| {
517            match HvX64PendingInterruptionType(interruption.interruption_type()) {
518                HvX64PendingInterruptionType::HV_X64_PENDING_INTERRUPT => {
519                    PendingInterruption::Interrupt {
520                        vector: interruption
521                            .interruption_vector()
522                            .try_into()
523                            .expect("x86 vector is 8 bits"),
524                    }
525                }
526                HvX64PendingInterruptionType::HV_X64_PENDING_NMI => PendingInterruption::Nmi,
527                HvX64PendingInterruptionType::HV_X64_PENDING_EXCEPTION => {
528                    PendingInterruption::Exception {
529                        vector: interruption
530                            .interruption_vector()
531                            .try_into()
532                            .expect("exception code is 8 bits"),
533                        error_code: interruption
534                            .deliver_error_code()
535                            .then(|| interruption.error_code()),
536                    }
537                }
538                ty => panic!("unhandled interruption type: {ty:?}"),
539            }
540        });
541
542        *self = Self {
543            mp_state,
544            nmi_pending: false,
545            nmi_masked: interrupt_state.nmi_masked(),
546            interrupt_shadow: interrupt_state.interrupt_shadow(),
547            pending_event,
548            pending_interruption,
549        };
550    }
551}
552
553impl StateElement<X86PartitionCapabilities, X86VpInfo> for Activity {
554    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
555        true
556    }
557
558    fn at_reset(_caps: &X86PartitionCapabilities, vp_info: &X86VpInfo) -> Self {
559        let mp_state = if vp_info.base.is_bsp() {
560            MpState::Running
561        } else {
562            // FUTURE: we should really emulate INIT and SIPI to have
563            // finer-grained control over the states.
564            MpState::WaitForSipi
565        };
566        Self {
567            mp_state,
568            nmi_pending: false,
569            nmi_masked: false,
570            interrupt_shadow: false,
571            pending_event: None,
572            pending_interruption: None,
573        }
574    }
575}
576
577#[derive(Debug, PartialEq, Eq, Copy, Clone, Protobuf, Inspect)]
578#[mesh(package = "virt.x86")]
579#[inspect(external_tag)]
580pub enum PendingEvent {
581    #[mesh(1)]
582    Exception {
583        #[mesh(1)]
584        vector: u8,
585        #[mesh(2)]
586        error_code: Option<u32>,
587        #[mesh(3)]
588        parameter: u64,
589    },
590    #[mesh(2)]
591    ExtInt {
592        #[mesh(1)]
593        vector: u8,
594    },
595}
596
597#[derive(Debug, PartialEq, Eq, Copy, Clone, Protobuf, Inspect)]
598#[mesh(package = "virt.x86")]
599#[inspect(external_tag)]
600pub enum PendingInterruption {
601    #[mesh(1)]
602    Exception {
603        #[mesh(1)]
604        vector: u8,
605        #[mesh(2)]
606        error_code: Option<u32>,
607    },
608    #[mesh(2)]
609    Interrupt {
610        #[mesh(1)]
611        vector: u8,
612    },
613    #[mesh(3)]
614    Nmi,
615}
616
617#[derive(Debug, Default, PartialEq, Eq, Copy, Clone, Protobuf, Inspect)]
618#[mesh(package = "virt.x86")]
619pub struct DebugRegisters {
620    #[mesh(1)]
621    #[inspect(hex)]
622    pub dr0: u64,
623    #[mesh(2)]
624    #[inspect(hex)]
625    pub dr1: u64,
626    #[mesh(3)]
627    #[inspect(hex)]
628    pub dr2: u64,
629    #[mesh(4)]
630    #[inspect(hex)]
631    pub dr3: u64,
632    #[mesh(5)]
633    #[inspect(hex)]
634    pub dr6: u64,
635    #[mesh(6)]
636    #[inspect(hex)]
637    pub dr7: u64,
638}
639
640impl HvRegisterState<HvX64RegisterName, 6> for DebugRegisters {
641    fn names(&self) -> &'static [HvX64RegisterName; 6] {
642        &[
643            HvX64RegisterName::Dr0,
644            HvX64RegisterName::Dr1,
645            HvX64RegisterName::Dr2,
646            HvX64RegisterName::Dr3,
647            HvX64RegisterName::Dr6,
648            HvX64RegisterName::Dr7,
649        ]
650    }
651
652    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
653        for (dest, src) in it.zip([
654            self.dr0.into(),
655            self.dr1.into(),
656            self.dr2.into(),
657            self.dr3.into(),
658            self.dr6.into(),
659            self.dr7.into(),
660        ]) {
661            *dest = src;
662        }
663    }
664
665    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
666        for (src, dest) in it.zip([
667            &mut self.dr0,
668            &mut self.dr1,
669            &mut self.dr2,
670            &mut self.dr3,
671            &mut self.dr6,
672            &mut self.dr7,
673        ]) {
674            *dest = src.as_u64();
675        }
676    }
677}
678
679impl StateElement<X86PartitionCapabilities, X86VpInfo> for DebugRegisters {
680    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
681        true
682    }
683
684    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
685        Self {
686            dr0: 0,
687            dr1: 0,
688            dr2: 0,
689            dr3: 0,
690            dr6: 0xffff0ff0,
691            dr7: 0x400,
692        }
693    }
694
695    fn can_compare(caps: &X86PartitionCapabilities) -> bool {
696        // Some machines support clearing bit 16 for some TSX debugging feature,
697        // but the hypervisor does not support restoring DR6 into this state.
698        // Ignore comparison failures in this case.
699        !caps.dr6_tsx_broken
700    }
701}
702
703#[derive(PartialEq, Eq, Protobuf)]
704#[mesh(package = "virt.x86")]
705pub struct Xsave {
706    #[mesh(1)]
707    pub data: Vec<u64>,
708}
709
710impl Xsave {
711    fn normalize(&mut self) {
712        let (mut fxsave, data) = Ref::<_, Fxsave>::from_prefix(self.data.as_mut_bytes()).unwrap();
713        let header = XsaveHeader::mut_from_prefix(data).unwrap().0; // TODO: zerocopy: ref-from-prefix: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
714
715        // Clear the mxcsr mask since it's ignored in the restore process and
716        // will only cause xsave comparisons to fail.
717        fxsave.mxcsr_mask = 0;
718
719        // Clear SSE state if it's not actually set to anything interesting.
720        // This normalizes behavior between mshv (which always sets SSE in
721        // xstate_bv) and KVM (which does not).
722        if header.xstate_bv & XFEATURE_SSE != 0 {
723            if fxsave.xmm.iter().eq(std::iter::repeat_n(&[0; 16], 16))
724                && fxsave.mxcsr == DEFAULT_MXCSR
725            {
726                header.xstate_bv &= !XFEATURE_SSE;
727            }
728        } else {
729            fxsave.xmm.fill(Default::default());
730        }
731
732        if header.xstate_bv & (XFEATURE_SSE | XFEATURE_YMM) == 0 {
733            fxsave.mxcsr = 0;
734        }
735
736        // Clear init FPU state as well.
737        if header.xstate_bv & XFEATURE_X87 != 0 {
738            if fxsave.fcw == INIT_FCW
739                && fxsave.fsw == 0
740                && fxsave.ftw == 0
741                && fxsave.fop == 0
742                && fxsave.fip == 0
743                && fxsave.fdp == 0
744                && fxsave.st == [[0; 16]; 8]
745            {
746                fxsave.fcw = 0;
747                header.xstate_bv &= !XFEATURE_X87;
748            }
749        } else {
750            fxsave.fcw = 0;
751            fxsave.fsw = 0;
752            fxsave.ftw = 0;
753            fxsave.fop = 0;
754            fxsave.fip = 0;
755            fxsave.fdp = 0;
756            fxsave.st.fill(Default::default());
757        }
758
759        // Clear the portion of the xsave legacy region that's specified to not
760        // to be used by the processor. Never versions of KVM put garbage values
761        // in here for some (possibly incorrect) reason.
762        fxsave.unused.fill(0);
763    }
764
765    /// Construct from the xsave compact format.
766    pub fn from_compact(data: &[u8], caps: &X86PartitionCapabilities) -> Self {
767        assert_eq!(data.len() % 8, 0);
768        let mut aligned = vec![0; data.len() / 8];
769        aligned.as_mut_bytes().copy_from_slice(data);
770        let mut this = Self { data: aligned };
771
772        this.normalize();
773
774        // Some versions of the MS hypervisor fail to set xstate_bv for
775        // supervisor states. In this case, force-enable them--this is always
776        // safe (since their init state == zero) and does not have a performance
777        // penalty.
778        if caps.xsaves_state_bv_broken {
779            let header =
780                XsaveHeader::mut_from_prefix(&mut this.data.as_mut_bytes()[XSAVE_LEGACY_LEN..])
781                    .unwrap()
782                    .0; // TODO: zerocopy: ref-from-prefix: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
783
784            // Just enable supervisor states that were possible when the
785            // hypervisor had the bug. Future ones will only be supported by
786            // fixed hypervisors.
787            header.xstate_bv |= header.xcomp_bv & 0x1c00;
788        }
789
790        this
791    }
792
793    /// Construct from standard (non-compact) xsave format.
794    pub fn from_standard(src: &[u8], caps: &X86PartitionCapabilities) -> Self {
795        let mut this = Self {
796            data: vec![0; caps.xsave.compact_len as usize / 8],
797        };
798        this.data.as_mut_bytes()[..XSAVE_VARIABLE_OFFSET]
799            .copy_from_slice(&src[..XSAVE_VARIABLE_OFFSET]);
800
801        let (mut header, data) =
802            Ref::<_, XsaveHeader>::from_prefix(&mut this.data.as_mut_bytes()[XSAVE_LEGACY_LEN..])
803                .unwrap();
804
805        header.xcomp_bv = caps.xsave.features | caps.xsave.supervisor_features | XCOMP_COMPRESSED;
806        let mut cur = 0;
807        for i in 2..63 {
808            if header.xcomp_bv & (1 << i) != 0 {
809                let feature = &caps.xsave.feature_info[i];
810                let offset = feature.offset as usize;
811                let len = feature.len as usize;
812                if feature.align {
813                    cur = (cur + 63) & !63;
814                }
815                if header.xstate_bv & (1 << i) != 0 {
816                    data[cur..cur + len].copy_from_slice(&src[offset..offset + len]);
817                }
818                cur += len;
819            }
820        }
821        this.normalize();
822        this
823    }
824
825    /// Write out to standard (non-compact) xsave format.
826    pub fn write_standard(&self, data: &mut [u8], caps: &X86PartitionCapabilities) {
827        // Copy the legacy region including default values for disabled features.
828        data[..XSAVE_LEGACY_LEN].copy_from_slice(self.fxsave().as_bytes());
829
830        // Copy the xsave header but clear xcomp_bv.
831        let header = self.xsave_header();
832        data[XSAVE_LEGACY_LEN..XSAVE_VARIABLE_OFFSET].copy_from_slice(
833            XsaveHeader {
834                xcomp_bv: 0,
835                ..*header
836            }
837            .as_bytes(),
838        );
839
840        // Copy the features.
841        let mut cur = XSAVE_VARIABLE_OFFSET;
842        for i in 2..63 {
843            if header.xcomp_bv & (1 << i) != 0 {
844                let feature = &caps.xsave.feature_info[i];
845                let offset = feature.offset as usize;
846                let len = feature.len as usize;
847                if feature.align {
848                    cur = (cur + 63) & !63;
849                }
850                if header.xstate_bv & (1 << i) != 0 {
851                    data[offset..offset + len]
852                        .copy_from_slice(&self.data.as_bytes()[cur..cur + len]);
853                }
854                cur += len;
855            }
856        }
857    }
858
859    /// Returns the compact form.
860    pub fn compact(&self) -> &[u8] {
861        self.data.as_bytes()
862    }
863
864    /// Returns the legacy fxsave state only.
865    ///
866    /// Since this does not include `xstate_bv`, fields for disabled features
867    /// will be set to their default values.
868    pub fn fxsave(&self) -> Fxsave {
869        let mut fxsave = Fxsave::read_from_prefix(self.data.as_bytes()).unwrap().0; // TODO: zerocopy: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
870        let header = self.xsave_header();
871        if header.xstate_bv & XFEATURE_X87 == 0 {
872            fxsave.fcw = INIT_FCW;
873        }
874        if header.xstate_bv & (XFEATURE_SSE | XFEATURE_YMM) == 0 {
875            fxsave.mxcsr = DEFAULT_MXCSR;
876        }
877        fxsave
878    }
879
880    fn xsave_header(&self) -> &XsaveHeader {
881        XsaveHeader::ref_from_prefix(&self.data.as_bytes()[XSAVE_LEGACY_LEN..])
882            .unwrap()
883            .0 // TODO: zerocopy: ref-from-prefix: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
884    }
885}
886
887impl Debug for Xsave {
888    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
889        f.debug_struct("Xsave")
890            .field("legacy", &format_args!("{:x?}", self.fxsave()))
891            .field("header", &format_args!("{:x?}", self.xsave_header()))
892            .field("data", &&self.data[XSAVE_VARIABLE_OFFSET / 8..])
893            .finish()
894    }
895}
896
897impl Inspect for Xsave {
898    fn inspect(&self, req: inspect::Request<'_>) {
899        let Fxsave {
900            fcw,
901            fsw,
902            ftw,
903            reserved: _,
904            fop,
905            fip,
906            fdp,
907            mxcsr,
908            mxcsr_mask,
909            st,
910            xmm,
911            reserved2: _,
912            unused: _,
913        } = self.fxsave();
914
915        let &XsaveHeader {
916            xstate_bv,
917            xcomp_bv,
918            reserved: _,
919        } = self.xsave_header();
920
921        let mut resp = req.respond();
922        resp.hex("fcw", fcw)
923            .hex("fsw", fsw)
924            .hex("ftw", ftw)
925            .hex("fop", fop)
926            .hex("fip", fip)
927            .hex("fdp", fdp)
928            .hex("mxcsr", mxcsr)
929            .hex("mxcsr_mask", mxcsr_mask)
930            .hex("xstate_bv", xstate_bv)
931            .hex("xcomp_bv", xcomp_bv);
932
933        for (st, name) in st
934            .iter()
935            .zip(["st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7"])
936        {
937            resp.field(name, st);
938        }
939
940        for (xmm, name) in xmm.iter().zip([
941            "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9",
942            "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
943        ]) {
944            resp.field(name, xmm);
945        }
946    }
947}
948
949impl StateElement<X86PartitionCapabilities, X86VpInfo> for Xsave {
950    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
951        true
952    }
953
954    fn at_reset(caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
955        let mut data = vec![0; caps.xsave.compact_len as usize];
956        *XsaveHeader::mut_from_prefix(&mut data[XSAVE_LEGACY_LEN..])
957            .unwrap()
958            .0 = XsaveHeader {
959            // TODO: zerocopy: ref-from-prefix: use-rest-of-range (https://github.com/microsoft/openvmm/issues/759)
960            xstate_bv: 0,
961            xcomp_bv: XCOMP_COMPRESSED | caps.xsave.features | caps.xsave.supervisor_features,
962            reserved: [0; 6],
963        };
964        Self::from_compact(&data, caps)
965    }
966}
967
968#[derive(PartialEq, Eq, Clone, Protobuf, Inspect)]
969#[mesh(package = "virt.x86")]
970pub struct Apic {
971    #[inspect(hex)]
972    #[mesh(1)]
973    pub apic_base: u64,
974    #[inspect(with = "ApicRegisters::from")]
975    #[mesh(2)]
976    pub registers: [u32; 64],
977    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
978    #[mesh(3)]
979    pub auto_eoi: [u32; 8],
980}
981
982impl Debug for Apic {
983    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
984        let Self {
985            apic_base,
986            registers,
987            auto_eoi,
988        } = self;
989        f.debug_struct("Apic")
990            .field("apic_base", &format_args!("{:#x}", apic_base))
991            .field("registers", &format_args!("{:#x?}", registers))
992            .field("registers", &format_args!("{:#x?}", auto_eoi))
993            .finish()
994    }
995}
996
997#[repr(C)]
998#[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes, Inspect)]
999pub struct ApicRegisters {
1000    #[inspect(skip)]
1001    pub reserved_0: [u32; 2],
1002    #[inspect(hex)]
1003    pub id: u32,
1004    #[inspect(hex)]
1005    pub version: u32,
1006    #[inspect(skip)]
1007    pub reserved_4: [u32; 4],
1008    #[inspect(hex)]
1009    pub tpr: u32, // Task Priority Register
1010    #[inspect(hex)]
1011    pub apr: u32, // Arbitration Priority Register
1012    #[inspect(hex)]
1013    pub ppr: u32, // Processor Priority Register
1014    #[inspect(hex)]
1015    pub eoi: u32, //
1016    #[inspect(hex)]
1017    pub rrd: u32, // Remote Read Register
1018    #[inspect(hex)]
1019    pub ldr: u32, // Logical Destination Register
1020    #[inspect(hex)]
1021    pub dfr: u32, // Destination Format Register
1022    #[inspect(hex)]
1023    pub svr: u32, // Spurious Interrupt Vector
1024    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1025    pub isr: [u32; 8], // In-Service Register
1026    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1027    pub tmr: [u32; 8], // Trigger Mode Register
1028    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1029    pub irr: [u32; 8], // Interrupt Request Register
1030    #[inspect(hex)]
1031    pub esr: u32, // Error Status Register
1032    #[inspect(skip)]
1033    pub reserved_29: [u32; 6],
1034    #[inspect(hex)]
1035    pub lvt_cmci: u32,
1036    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1037    pub icr: [u32; 2], // Interrupt Command Register
1038    #[inspect(hex)]
1039    pub lvt_timer: u32,
1040    #[inspect(hex)]
1041    pub lvt_thermal: u32,
1042    #[inspect(hex)]
1043    pub lvt_pmc: u32,
1044    #[inspect(hex)]
1045    pub lvt_lint0: u32,
1046    #[inspect(hex)]
1047    pub lvt_lint1: u32,
1048    #[inspect(hex)]
1049    pub lvt_error: u32,
1050    #[inspect(hex)]
1051    pub timer_icr: u32, // Initial Count Register
1052    #[inspect(hex)]
1053    pub timer_ccr: u32, // Current Count Register
1054    #[inspect(skip)]
1055    pub reserved_3a: [u32; 4],
1056    #[inspect(hex)]
1057    pub timer_dcr: u32, // Divide Configuration Register
1058    #[inspect(skip)]
1059    pub reserved_3f: u32,
1060}
1061
1062const _: () = assert!(size_of::<ApicRegisters>() == 0x100);
1063
1064impl From<&'_ [u32; 64]> for ApicRegisters {
1065    fn from(value: &'_ [u32; 64]) -> Self {
1066        Self::read_from_bytes(value.as_bytes()).unwrap()
1067    }
1068}
1069
1070impl From<ApicRegisters> for [u32; 64] {
1071    fn from(value: ApicRegisters) -> Self {
1072        Self::read_from_bytes(value.as_bytes()).unwrap()
1073    }
1074}
1075
1076#[repr(C)]
1077#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
1078struct ApicRegister {
1079    value: u32,
1080    zero: [u32; 3],
1081}
1082
1083// The IRR bit number corresponding to NMI pending in the Hyper-V exo APIC saved
1084// state.
1085const HV_IRR_NMI_PENDING_SHIFT: u32 = 2;
1086
1087impl Apic {
1088    pub fn as_page(&self) -> [u8; 1024] {
1089        let mut bytes = [0; 1024];
1090        self.registers
1091            .map(|value| ApicRegister {
1092                value,
1093                zero: [0; 3],
1094            })
1095            .write_to(bytes.as_mut_slice())
1096            .unwrap();
1097        bytes
1098    }
1099
1100    /// Convert from an APIC page.
1101    ///
1102    /// N.B. The MS hypervisor's APIC page format includes a non-architectural
1103    /// NMI pending bit that should be stripped first.
1104    pub fn from_page(apic_base: u64, page: &[u8; 1024]) -> Self {
1105        let registers = <[ApicRegister; 64]>::read_from_bytes(page.as_slice()).unwrap();
1106        Self {
1107            apic_base,
1108            registers: registers.map(|reg| reg.value),
1109            auto_eoi: [0; 8],
1110        }
1111    }
1112}
1113
1114impl StateElement<X86PartitionCapabilities, X86VpInfo> for Apic {
1115    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
1116        true
1117    }
1118
1119    fn at_reset(caps: &X86PartitionCapabilities, vp_info: &X86VpInfo) -> Self {
1120        let x2apic = caps.x2apic_enabled;
1121
1122        let mut regs = ApicRegisters::new_zeroed();
1123        regs.id = if x2apic {
1124            vp_info.apic_id
1125        } else {
1126            vp_info.apic_id << 24
1127        };
1128        regs.version = ApicVersion::new()
1129            .with_version(0x14)
1130            .with_max_lvt_entry(5)
1131            .into();
1132        if x2apic {
1133            regs.ldr = ((vp_info.apic_id << 12) & 0xffff0000) | (1 << (vp_info.apic_id & 0xf));
1134        } else {
1135            regs.dfr = !0;
1136        }
1137        regs.svr = 0xff;
1138        regs.lvt_timer = 0x10000;
1139        regs.lvt_thermal = 0x10000;
1140        regs.lvt_pmc = 0x10000;
1141        regs.lvt_lint0 = 0x10000;
1142        regs.lvt_lint1 = 0x10000;
1143        regs.lvt_error = 0x10000;
1144
1145        let apic_base = ApicBase::new()
1146            .with_base_page(APIC_BASE_PAGE)
1147            .with_bsp(vp_info.base.is_bsp())
1148            .with_x2apic(x2apic)
1149            .with_enable(true);
1150
1151        Apic {
1152            apic_base: apic_base.into(),
1153            registers: regs.into(),
1154            auto_eoi: [0; 8],
1155        }
1156    }
1157
1158    fn can_compare(caps: &X86PartitionCapabilities) -> bool {
1159        // If a partition (ie KVM) cannot freeze time, one of the APIC timer values will continue counting up after restore.
1160        // For now, disallow comparing the whole Apic structure if so.
1161        caps.can_freeze_time
1162    }
1163}
1164
1165/// Sets the non-architectural Hyper-V NMI pending bit in the APIC page.
1166pub fn set_hv_apic_nmi_pending(page: &mut [u8], pending: bool) {
1167    page[0x200] &= !(1 << HV_IRR_NMI_PENDING_SHIFT);
1168    page[0x200] |= (pending as u8) << HV_IRR_NMI_PENDING_SHIFT;
1169}
1170
1171/// Gets the non-architectural Hyper-V NMI pending bit from the APIC page.
1172pub fn hv_apic_nmi_pending(page: &[u8]) -> bool {
1173    page[0x200] & (1 << HV_IRR_NMI_PENDING_SHIFT) != 0
1174}
1175
1176#[derive(Debug, Default, PartialEq, Eq, Protobuf, Inspect)]
1177#[mesh(package = "virt.x86")]
1178pub struct Xcr0 {
1179    #[mesh(1)]
1180    #[inspect(hex)]
1181    pub value: u64,
1182}
1183
1184impl HvRegisterState<HvX64RegisterName, 1> for Xcr0 {
1185    fn names(&self) -> &'static [HvX64RegisterName; 1] {
1186        &[HvX64RegisterName::Xfem]
1187    }
1188
1189    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1190        for (dest, src) in it.zip([self.value]) {
1191            *dest = src.into();
1192        }
1193    }
1194
1195    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1196        for (src, dest) in it.zip([&mut self.value]) {
1197            *dest = src.as_u64();
1198        }
1199    }
1200}
1201
1202impl StateElement<X86PartitionCapabilities, X86VpInfo> for Xcr0 {
1203    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1204        caps.xsave.features != 0
1205    }
1206
1207    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1208        Self { value: 1 }
1209    }
1210}
1211
1212#[derive(Debug, Default, PartialEq, Eq, Protobuf, Inspect)]
1213#[mesh(package = "virt.x86")]
1214pub struct Xss {
1215    #[mesh(1)]
1216    #[inspect(hex)]
1217    pub value: u64,
1218}
1219
1220impl HvRegisterState<HvX64RegisterName, 1> for Xss {
1221    fn names(&self) -> &'static [HvX64RegisterName; 1] {
1222        &[HvX64RegisterName::Xss]
1223    }
1224
1225    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1226        for (dest, src) in it.zip([self.value]) {
1227            *dest = src.into();
1228        }
1229    }
1230
1231    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1232        for (src, dest) in it.zip([&mut self.value]) {
1233            *dest = src.as_u64();
1234        }
1235    }
1236}
1237
1238impl StateElement<X86PartitionCapabilities, X86VpInfo> for Xss {
1239    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1240        caps.xsave.supervisor_features != 0
1241    }
1242
1243    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1244        Self { value: 0 }
1245    }
1246}
1247
1248#[repr(C)]
1249#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1250#[mesh(package = "virt.x86")]
1251pub struct Pat {
1252    #[mesh(1)]
1253    #[inspect(hex)]
1254    pub value: u64,
1255}
1256
1257impl HvRegisterState<HvX64RegisterName, 1> for Pat {
1258    fn names(&self) -> &'static [HvX64RegisterName; 1] {
1259        &[HvX64RegisterName::Pat]
1260    }
1261
1262    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1263        for (dest, src) in it.zip([self.value]) {
1264            *dest = src.into();
1265        }
1266    }
1267
1268    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1269        for (src, dest) in it.zip([&mut self.value]) {
1270            *dest = src.as_u64();
1271        }
1272    }
1273}
1274
1275impl StateElement<X86PartitionCapabilities, X86VpInfo> for Pat {
1276    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
1277        true
1278    }
1279
1280    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1281        Self {
1282            value: X86X_MSR_DEFAULT_PAT,
1283        }
1284    }
1285}
1286
1287#[repr(C)]
1288#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1289#[mesh(package = "virt.x86")]
1290pub struct Mtrrs {
1291    #[mesh(1)]
1292    #[inspect(hex)]
1293    pub msr_mtrr_def_type: u64,
1294    #[mesh(2)]
1295    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1296    pub fixed: [u64; 11],
1297    #[mesh(3)]
1298    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1299    pub variable: [u64; 16],
1300}
1301
1302impl HvRegisterState<HvX64RegisterName, 28> for Mtrrs {
1303    fn names(&self) -> &'static [HvX64RegisterName; 28] {
1304        &[
1305            HvX64RegisterName::MsrMtrrDefType,
1306            HvX64RegisterName::MsrMtrrFix64k00000,
1307            HvX64RegisterName::MsrMtrrFix16k80000,
1308            HvX64RegisterName::MsrMtrrFix16kA0000,
1309            HvX64RegisterName::MsrMtrrFix4kC0000,
1310            HvX64RegisterName::MsrMtrrFix4kC8000,
1311            HvX64RegisterName::MsrMtrrFix4kD0000,
1312            HvX64RegisterName::MsrMtrrFix4kD8000,
1313            HvX64RegisterName::MsrMtrrFix4kE0000,
1314            HvX64RegisterName::MsrMtrrFix4kE8000,
1315            HvX64RegisterName::MsrMtrrFix4kF0000,
1316            HvX64RegisterName::MsrMtrrFix4kF8000,
1317            HvX64RegisterName::MsrMtrrPhysBase0,
1318            HvX64RegisterName::MsrMtrrPhysMask0,
1319            HvX64RegisterName::MsrMtrrPhysBase1,
1320            HvX64RegisterName::MsrMtrrPhysMask1,
1321            HvX64RegisterName::MsrMtrrPhysBase2,
1322            HvX64RegisterName::MsrMtrrPhysMask2,
1323            HvX64RegisterName::MsrMtrrPhysBase3,
1324            HvX64RegisterName::MsrMtrrPhysMask3,
1325            HvX64RegisterName::MsrMtrrPhysBase4,
1326            HvX64RegisterName::MsrMtrrPhysMask4,
1327            HvX64RegisterName::MsrMtrrPhysBase5,
1328            HvX64RegisterName::MsrMtrrPhysMask5,
1329            HvX64RegisterName::MsrMtrrPhysBase6,
1330            HvX64RegisterName::MsrMtrrPhysMask6,
1331            HvX64RegisterName::MsrMtrrPhysBase7,
1332            HvX64RegisterName::MsrMtrrPhysMask7,
1333        ]
1334    }
1335
1336    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1337        for (dest, src) in it.zip(
1338            [self.msr_mtrr_def_type]
1339                .into_iter()
1340                .chain(self.fixed)
1341                .chain(self.variable),
1342        ) {
1343            *dest = src.into();
1344        }
1345    }
1346
1347    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1348        for (src, dest) in it.zip(
1349            [&mut self.msr_mtrr_def_type]
1350                .into_iter()
1351                .chain(&mut self.fixed)
1352                .chain(&mut self.variable),
1353        ) {
1354            *dest = src.as_u64();
1355        }
1356    }
1357}
1358
1359impl StateElement<X86PartitionCapabilities, X86VpInfo> for Mtrrs {
1360    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
1361        true
1362    }
1363
1364    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1365        Self {
1366            msr_mtrr_def_type: 0,
1367            fixed: [0; 11],
1368            variable: [0; 16],
1369        }
1370    }
1371}
1372
1373#[repr(C)]
1374#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1375#[mesh(package = "virt.x86")]
1376pub struct VirtualMsrs {
1377    #[mesh(1)]
1378    #[inspect(hex)]
1379    pub kernel_gs_base: u64,
1380    #[mesh(2)]
1381    #[inspect(hex)]
1382    pub sysenter_cs: u64,
1383    #[mesh(3)]
1384    #[inspect(hex)]
1385    pub sysenter_eip: u64,
1386    #[mesh(4)]
1387    #[inspect(hex)]
1388    pub sysenter_esp: u64,
1389    #[mesh(5)]
1390    #[inspect(hex)]
1391    pub star: u64,
1392    #[mesh(6)]
1393    #[inspect(hex)]
1394    pub lstar: u64,
1395    #[mesh(7)]
1396    #[inspect(hex)]
1397    pub cstar: u64,
1398    #[mesh(8)]
1399    #[inspect(hex)]
1400    pub sfmask: u64,
1401}
1402
1403impl HvRegisterState<HvX64RegisterName, 8> for VirtualMsrs {
1404    fn names(&self) -> &'static [HvX64RegisterName; 8] {
1405        &[
1406            HvX64RegisterName::KernelGsBase,
1407            HvX64RegisterName::SysenterCs,
1408            HvX64RegisterName::SysenterEsp,
1409            HvX64RegisterName::SysenterEip,
1410            HvX64RegisterName::Star,
1411            HvX64RegisterName::Lstar,
1412            HvX64RegisterName::Cstar,
1413            HvX64RegisterName::Sfmask,
1414        ]
1415    }
1416
1417    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1418        for (dest, src) in it.zip([
1419            self.kernel_gs_base,
1420            self.sysenter_cs,
1421            self.sysenter_eip,
1422            self.sysenter_esp,
1423            self.star,
1424            self.lstar,
1425            self.cstar,
1426            self.sfmask,
1427        ]) {
1428            *dest = src.into();
1429        }
1430    }
1431
1432    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1433        for (src, dest) in it.zip([
1434            &mut self.kernel_gs_base,
1435            &mut self.sysenter_cs,
1436            &mut self.sysenter_eip,
1437            &mut self.sysenter_esp,
1438            &mut self.star,
1439            &mut self.lstar,
1440            &mut self.cstar,
1441            &mut self.sfmask,
1442        ]) {
1443            *dest = src.as_u64();
1444        }
1445    }
1446}
1447
1448impl StateElement<X86PartitionCapabilities, X86VpInfo> for VirtualMsrs {
1449    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
1450        true
1451    }
1452
1453    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1454        Self {
1455            kernel_gs_base: 0,
1456            sysenter_cs: 0,
1457            sysenter_eip: 0,
1458            sysenter_esp: 0,
1459            star: 0,
1460            lstar: 0,
1461            cstar: 0,
1462            sfmask: 0,
1463        }
1464    }
1465}
1466
1467#[repr(C)]
1468#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1469#[mesh(package = "virt.x86")]
1470pub struct TscAux {
1471    #[mesh(1)]
1472    #[inspect(hex)]
1473    pub value: u64,
1474}
1475
1476impl HvRegisterState<HvX64RegisterName, 1> for TscAux {
1477    fn names(&self) -> &'static [HvX64RegisterName; 1] {
1478        &[HvX64RegisterName::TscAux]
1479    }
1480
1481    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1482        for (dest, src) in it.zip([self.value]) {
1483            *dest = src.into();
1484        }
1485    }
1486
1487    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1488        for (src, dest) in it.zip([&mut self.value]) {
1489            *dest = src.as_u64();
1490        }
1491    }
1492}
1493
1494impl StateElement<X86PartitionCapabilities, X86VpInfo> for TscAux {
1495    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1496        caps.tsc_aux
1497    }
1498
1499    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1500        Default::default()
1501    }
1502}
1503
1504#[repr(C)]
1505#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1506#[mesh(package = "virt.x86")]
1507pub struct Tsc {
1508    #[mesh(1)]
1509    #[inspect(hex)]
1510    pub value: u64,
1511}
1512
1513impl HvRegisterState<HvX64RegisterName, 1> for Tsc {
1514    fn names(&self) -> &'static [HvX64RegisterName; 1] {
1515        &[HvX64RegisterName::Tsc]
1516    }
1517
1518    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1519        for (dest, src) in it.zip([self.value]) {
1520            *dest = src.into();
1521        }
1522    }
1523
1524    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1525        for (src, dest) in it.zip([&mut self.value]) {
1526            *dest = src.as_u64();
1527        }
1528    }
1529}
1530
1531impl StateElement<X86PartitionCapabilities, X86VpInfo> for Tsc {
1532    fn is_present(_caps: &X86PartitionCapabilities) -> bool {
1533        true
1534    }
1535
1536    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1537        Self { value: 0 }
1538    }
1539
1540    fn can_compare(caps: &X86PartitionCapabilities) -> bool {
1541        caps.can_freeze_time
1542    }
1543}
1544
1545#[repr(C)]
1546#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1547#[mesh(package = "virt.x86")]
1548pub struct Cet {
1549    #[mesh(1)]
1550    #[inspect(hex)]
1551    pub scet: u64,
1552    // Ucet is part of xsave state.
1553}
1554
1555impl HvRegisterState<HvX64RegisterName, 1> for Cet {
1556    fn names(&self) -> &'static [HvX64RegisterName; 1] {
1557        &[HvX64RegisterName::SCet]
1558    }
1559
1560    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1561        for (dest, src) in it.zip([self.scet]) {
1562            *dest = src.into();
1563        }
1564    }
1565
1566    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1567        for (src, dest) in it.zip([&mut self.scet]) {
1568            *dest = src.as_u64();
1569        }
1570    }
1571}
1572
1573impl StateElement<X86PartitionCapabilities, X86VpInfo> for Cet {
1574    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1575        caps.cet
1576    }
1577
1578    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1579        Self { scet: 0 }
1580    }
1581}
1582
1583#[repr(C)]
1584#[derive(Default, Debug, PartialEq, Eq, Protobuf, Inspect)]
1585#[mesh(package = "virt.x86")]
1586pub struct CetSs {
1587    #[mesh(1)]
1588    #[inspect(hex)]
1589    pub ssp: u64,
1590    #[mesh(2)]
1591    #[inspect(hex)]
1592    pub interrupt_ssp_table_addr: u64,
1593    // Plx_ssp are part of xsave state.
1594}
1595
1596impl HvRegisterState<HvX64RegisterName, 2> for CetSs {
1597    fn names(&self) -> &'static [HvX64RegisterName; 2] {
1598        &[
1599            HvX64RegisterName::Ssp,
1600            HvX64RegisterName::InterruptSspTableAddr,
1601        ]
1602    }
1603
1604    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1605        for (dest, src) in it.zip([self.ssp, self.interrupt_ssp_table_addr]) {
1606            *dest = src.into();
1607        }
1608    }
1609
1610    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1611        for (src, dest) in it.zip([&mut self.ssp, &mut self.interrupt_ssp_table_addr]) {
1612            *dest = src.as_u64();
1613        }
1614    }
1615}
1616
1617impl StateElement<X86PartitionCapabilities, X86VpInfo> for CetSs {
1618    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1619        caps.cet_ss
1620    }
1621
1622    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1623        Default::default()
1624    }
1625}
1626
1627#[repr(C)]
1628#[derive(Debug, Default, PartialEq, Eq, Protobuf, Inspect)]
1629#[mesh(package = "virt.x86")]
1630pub struct SyntheticMsrs {
1631    #[mesh(1)]
1632    #[inspect(hex)]
1633    pub vp_assist_page: u64,
1634    #[mesh(2)]
1635    #[inspect(hex)]
1636    pub scontrol: u64,
1637    #[mesh(3)]
1638    #[inspect(hex)]
1639    pub siefp: u64,
1640    #[mesh(4)]
1641    #[inspect(hex)]
1642    pub simp: u64,
1643    #[mesh(5)]
1644    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(inspect::AsHex))")]
1645    pub sint: [u64; 16],
1646}
1647
1648impl HvRegisterState<HvX64RegisterName, 20> for SyntheticMsrs {
1649    fn names(&self) -> &'static [HvX64RegisterName; 20] {
1650        &[
1651            HvX64RegisterName::VpAssistPage,
1652            HvX64RegisterName::Scontrol,
1653            HvX64RegisterName::Sifp,
1654            HvX64RegisterName::Sipp,
1655            HvX64RegisterName::Sint0,
1656            HvX64RegisterName::Sint1,
1657            HvX64RegisterName::Sint2,
1658            HvX64RegisterName::Sint3,
1659            HvX64RegisterName::Sint4,
1660            HvX64RegisterName::Sint5,
1661            HvX64RegisterName::Sint6,
1662            HvX64RegisterName::Sint7,
1663            HvX64RegisterName::Sint8,
1664            HvX64RegisterName::Sint9,
1665            HvX64RegisterName::Sint10,
1666            HvX64RegisterName::Sint11,
1667            HvX64RegisterName::Sint12,
1668            HvX64RegisterName::Sint13,
1669            HvX64RegisterName::Sint14,
1670            HvX64RegisterName::Sint15,
1671        ]
1672    }
1673    fn get_values<'a>(&self, it: impl Iterator<Item = &'a mut HvRegisterValue>) {
1674        for (dest, src) in it.zip(
1675            [self.vp_assist_page, self.scontrol, self.siefp, self.simp]
1676                .into_iter()
1677                .chain(self.sint),
1678        ) {
1679            *dest = src.into();
1680        }
1681    }
1682
1683    fn set_values(&mut self, it: impl Iterator<Item = HvRegisterValue>) {
1684        for (src, dest) in it.zip(
1685            [
1686                &mut self.vp_assist_page,
1687                &mut self.scontrol,
1688                &mut self.siefp,
1689                &mut self.simp,
1690            ]
1691            .into_iter()
1692            .chain(&mut self.sint),
1693        ) {
1694            *dest = src.as_u64();
1695        }
1696    }
1697}
1698
1699impl StateElement<X86PartitionCapabilities, X86VpInfo> for SyntheticMsrs {
1700    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1701        caps.hv1
1702    }
1703
1704    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1705        Self {
1706            vp_assist_page: 0,
1707            scontrol: 1,
1708            siefp: 0,
1709            simp: 0,
1710            sint: [0x10000; 16],
1711        }
1712    }
1713}
1714
1715#[derive(Default, Debug, Copy, Clone, Eq, PartialEq, Protobuf, Inspect)]
1716#[mesh(package = "virt.x86")]
1717pub struct SynicTimer {
1718    #[mesh(1)]
1719    #[inspect(hex)]
1720    pub config: u64,
1721    #[mesh(2)]
1722    #[inspect(hex)]
1723    pub count: u64,
1724    #[mesh(3)]
1725    #[inspect(hex)]
1726    pub adjustment: u64,
1727    #[mesh(4)]
1728    #[inspect(hex)]
1729    pub undelivered_message_expiration_time: Option<u64>,
1730}
1731
1732#[derive(Debug, Copy, Clone, Eq, PartialEq, Protobuf, Inspect)]
1733#[mesh(package = "virt.x86")]
1734pub struct SynicTimers {
1735    #[mesh(1)]
1736    #[inspect(iter_by_index)]
1737    pub timers: [SynicTimer; 4],
1738}
1739
1740impl SynicTimers {
1741    pub fn as_hv(&self) -> hvdef::HvSyntheticTimersState {
1742        let timers = self.timers.map(|timer| hvdef::HvStimerState {
1743            undelivered_message_pending: timer.undelivered_message_expiration_time.is_some().into(),
1744            reserved: 0,
1745            config: timer.config,
1746            count: timer.count,
1747            adjustment: timer.adjustment,
1748            undelivered_expiration_time: timer.undelivered_message_expiration_time.unwrap_or(0),
1749        });
1750
1751        hvdef::HvSyntheticTimersState {
1752            timers,
1753            reserved: [0; 5],
1754        }
1755    }
1756
1757    pub fn from_hv(state: hvdef::HvSyntheticTimersState) -> Self {
1758        let timers = state.timers.map(|timer| SynicTimer {
1759            config: timer.config,
1760            count: timer.count,
1761            adjustment: timer.adjustment,
1762            undelivered_message_expiration_time: (timer.undelivered_message_pending & 1 != 0)
1763                .then_some(timer.undelivered_expiration_time),
1764        });
1765        Self { timers }
1766    }
1767}
1768
1769impl StateElement<X86PartitionCapabilities, X86VpInfo> for SynicTimers {
1770    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1771        caps.hv1
1772    }
1773
1774    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1775        Self {
1776            timers: [SynicTimer::default(); 4],
1777        }
1778    }
1779
1780    fn can_compare(_caps: &X86PartitionCapabilities) -> bool {
1781        // These can't be compared, since the hypervisor may choose to
1782        // immediately deliver the undelivered message.
1783        false
1784    }
1785}
1786
1787#[derive(Debug, Default, PartialEq, Eq, Protobuf, Inspect)]
1788#[mesh(package = "virt.x86")]
1789pub struct SynicMessageQueues {
1790    #[mesh(1)]
1791    #[inspect(with = "|x| inspect::iter_by_index(x.iter().map(Vec::len))")]
1792    pub queues: [Vec<[u8; HV_MESSAGE_SIZE]>; 16],
1793}
1794
1795impl StateElement<X86PartitionCapabilities, X86VpInfo> for SynicMessageQueues {
1796    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1797        caps.hv1
1798    }
1799
1800    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1801        Default::default()
1802    }
1803}
1804
1805#[derive(Debug, Copy, Clone, PartialEq, Eq, Protobuf, Inspect)]
1806#[mesh(package = "virt.x86")]
1807#[inspect(skip)]
1808pub struct SynicMessagePage {
1809    #[mesh(1)]
1810    pub data: [u8; 4096],
1811}
1812
1813impl StateElement<X86PartitionCapabilities, X86VpInfo> for SynicMessagePage {
1814    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1815        caps.hv1
1816    }
1817
1818    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1819        Self { data: [0; 4096] }
1820    }
1821}
1822
1823#[derive(Debug, Copy, Clone, PartialEq, Eq, Protobuf, Inspect)]
1824#[mesh(package = "virt.x86")]
1825#[inspect(skip)]
1826pub struct SynicEventFlagsPage {
1827    #[mesh(1)]
1828    pub data: [u8; 4096],
1829}
1830
1831impl StateElement<X86PartitionCapabilities, X86VpInfo> for SynicEventFlagsPage {
1832    fn is_present(caps: &X86PartitionCapabilities) -> bool {
1833        caps.hv1
1834    }
1835
1836    fn at_reset(_caps: &X86PartitionCapabilities, _vp_info: &X86VpInfo) -> Self {
1837        Self { data: [0; 4096] }
1838    }
1839}
1840
1841state_trait! {
1842    "Per-VP state",
1843    AccessVpState,
1844    X86PartitionCapabilities,
1845    X86VpInfo,
1846    VpSavedState,
1847    "virt.x86",
1848    (1, "registers", registers, set_registers, Registers),
1849    (2, "activity", activity, set_activity, Activity),
1850    (3, "xsave", xsave, set_xsave, Xsave),
1851    (4, "apic", apic, set_apic, Apic),
1852    (5, "xcr", xcr, set_xcr, Xcr0),
1853    (6, "xss", xss, set_xss, Xss),
1854    (7, "mtrrs", mtrrs, set_mtrrs, Mtrrs),
1855    (8, "pat", pat, set_pat, Pat),
1856    (9, "msrs", virtual_msrs, set_virtual_msrs, VirtualMsrs),
1857    (10, "drs", debug_regs, set_debug_regs, DebugRegisters),
1858    (11, "tsc", tsc, set_tsc, Tsc),
1859    (12, "cet", cet, set_cet, Cet),
1860    (13, "cet_ss", cet_ss, set_cet_ss, CetSs),
1861    (14, "tsc_aux", tsc_aux, set_tsc_aux, TscAux),
1862
1863    // Synic state
1864    (100, "synic", synic_msrs, set_synic_msrs, SyntheticMsrs),
1865    // The simp page contents must come after synic MSRs so that the SIMP page
1866    // register is set, but before the message queues and timers in case the
1867    // hypervisor decides to flush a pending message to the message page during
1868    // restore.
1869    (
1870        101,
1871        "simp",
1872        synic_message_page,
1873        set_synic_message_page,
1874        SynicMessagePage
1875    ),
1876    (
1877        102,
1878        "siefp",
1879        synic_event_flags_page,
1880        set_synic_event_flags_page,
1881        SynicEventFlagsPage
1882    ),
1883    (
1884        103,
1885        "synic_message_queues",
1886        synic_message_queues,
1887        set_synic_message_queues,
1888        SynicMessageQueues
1889    ),
1890    (104, "synic_timers", synic_timers, set_synic_timers, SynicTimers),
1891}
1892
1893/// Resets register state for an x86 INIT via the APIC.
1894pub fn x86_init<T: AccessVpState>(access: &mut T, vp_info: &X86VpInfo) -> Result<(), T::Error> {
1895    // Reset core register and debug register state, but preserve a few bits of cr0.
1896    let cr0 = access.registers()?.cr0;
1897    let mut regs = Registers::at_reset(access.caps(), vp_info);
1898    let cr0_mask = X64_CR0_NW | X64_CR0_CD;
1899    regs.cr0 = (cr0 & cr0_mask) | (regs.cr0 & !cr0_mask);
1900    access.set_registers(&regs)?;
1901    access.set_debug_regs(&StateElement::at_reset(access.caps(), vp_info))?;
1902
1903    // Reset the APIC state, leaving the APIC base address and APIC ID intact.
1904    //
1905    // Note that there may be still be pending interrupt requests in the APIC
1906    // (e.g. an incoming SIPI), which this should not affect.
1907    let current_apic = access.apic()?;
1908    let mut apic = Apic::at_reset(access.caps(), vp_info);
1909    apic.registers[x86defs::apic::ApicRegister::ID.0 as usize] =
1910        current_apic.registers[x86defs::apic::ApicRegister::ID.0 as usize];
1911    apic.apic_base = current_apic.apic_base;
1912    access.set_apic(&apic)?;
1913
1914    // Enable the wait-for-SIPI state.
1915    if !vp_info.base.is_bsp() {
1916        let mut activity = access.activity()?;
1917        activity.mp_state = MpState::WaitForSipi;
1918        access.set_activity(&activity)?;
1919    }
1920
1921    Ok(())
1922}