x86defs/
lib.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Definitions relating to the x86 architecture, including the core CPU and
5//! its interrupt controller (APIC).
6
7#![expect(missing_docs)]
8#![no_std]
9#![forbid(unsafe_code)]
10
11pub mod apic;
12pub mod cpuid;
13pub mod msi;
14pub mod snp;
15pub mod tdx;
16pub mod vmx;
17pub mod xsave;
18
19use bitfield_struct::bitfield;
20use open_enum::open_enum;
21use zerocopy::FromBytes;
22use zerocopy::FromZeros;
23use zerocopy::Immutable;
24use zerocopy::IntoBytes;
25use zerocopy::KnownLayout;
26
27pub const X64_CR0_PE: u64 = 0x0000000000000001; // protection enable
28pub const X64_CR0_MP: u64 = 0x0000000000000002; // math present
29pub const X64_CR0_EM: u64 = 0x0000000000000004; // emulate math coprocessor
30pub const X64_CR0_TS: u64 = 0x0000000000000008; // task switched
31pub const X64_CR0_ET: u64 = 0x0000000000000010; // extension type (80387)
32pub const X64_CR0_NE: u64 = 0x0000000000000020; // numeric error
33pub const X64_CR0_WP: u64 = 0x0000000000010000; // write protect
34pub const X64_CR0_AM: u64 = 0x0000000000040000; // alignment mask
35pub const X64_CR0_NW: u64 = 0x0000000020000000; // not write-through
36pub const X64_CR0_CD: u64 = 0x0000000040000000; // cache disable
37pub const X64_CR0_PG: u64 = 0x0000000080000000; // paging
38
39pub const X64_CR4_VME: u64 = 0x0000000000000001; // Virtual 8086 mode extensions
40pub const X64_CR4_PVI: u64 = 0x0000000000000002; // Protected mode virtual interrupts
41pub const X64_CR4_TSD: u64 = 0x0000000000000004; // Time stamp disable
42pub const X64_CR4_DE: u64 = 0x0000000000000008; // Debugging extensions
43pub const X64_CR4_PSE: u64 = 0x0000000000000010; // Page size extensions
44pub const X64_CR4_PAE: u64 = 0x0000000000000020; // Physical address extensions
45pub const X64_CR4_MCE: u64 = 0x0000000000000040; // Machine check enable
46pub const X64_CR4_PGE: u64 = 0x0000000000000080; // Page global enable
47pub const X64_CR4_PCE: u64 = 0x0000000000000100; // Performance Counter Enable
48pub const X64_CR4_FXSR: u64 = 0x0000000000000200; // FXSR used by OS
49pub const X64_CR4_XMMEXCPT: u64 = 0x0000000000000400; // XMMI used by OS
50pub const X64_CR4_UMIP: u64 = 0x0000000000000800; // UMIP used by OS
51pub const X64_CR4_LA57: u64 = 0x0000000000001000; // 5-level paging enabled
52pub const X64_CR4_VMXE: u64 = 0x0000000000002000; // VMX enabled
53pub const X64_CR4_RWFSGS: u64 = 0x0000000000010000; // RDWRFSGS enabled by OS
54pub const X64_CR4_PCIDE: u64 = 0x0000000000020000; // PCID enabled by OS
55pub const X64_CR4_OSXSAVE: u64 = 0x0000000000040000; // XSAVE enabled by OS
56pub const X64_CR4_SMEP: u64 = 0x0000000000100000; // Supervisor Mode Execution Protection
57pub const X64_CR4_SMAP: u64 = 0x0000000000200000; // Supervisor Mode Access Protection
58pub const X64_CR4_CET: u64 = 0x0000000000800000; // CET enabled by OS
59
60pub const X64_EFER_SCE: u64 = 0x0000000000000001; // Syscall Enable
61pub const X64_EFER_LME: u64 = 0x0000000000000100; // Long Mode Enabled
62pub const X64_EFER_LMA: u64 = 0x0000000000000400; // Long Mode Active
63pub const X64_EFER_NXE: u64 = 0x0000000000000800; // No-execute Enable
64pub const X64_EFER_SVME: u64 = 0x0000000000001000; // SVM enable
65pub const X64_EFER_FFXSR: u64 = 0x0000000000004000; // Fast save/restore enabled
66
67pub const X86X_MSR_DEFAULT_PAT: u64 = 0x0007040600070406;
68pub const X64_EMPTY_DR7: u64 = 0x0000000000000400;
69
70pub const USER_MODE_DPL: u8 = 3;
71
72pub const X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES: SegmentAttributes = SegmentAttributes::new()
73    .with_granularity(true)
74    .with_long(true)
75    .with_present(true)
76    .with_non_system_segment(true)
77    .with_segment_type(0xb);
78pub const X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES: SegmentAttributes = SegmentAttributes::new()
79    .with_granularity(true)
80    .with_default(true)
81    .with_present(true)
82    .with_non_system_segment(true)
83    .with_segment_type(0x3);
84pub const X64_BUSY_TSS_SEGMENT_ATTRIBUTES: SegmentAttributes = SegmentAttributes::new()
85    .with_present(true)
86    .with_segment_type(0xb);
87
88#[bitfield(u16)]
89#[derive(PartialEq)]
90pub struct SegmentAttributes {
91    #[bits(4)]
92    pub segment_type: u8,
93    pub non_system_segment: bool,
94    #[bits(2)]
95    pub descriptor_privilege_level: u8,
96    pub present: bool,
97    #[bits(4)]
98    _reserved: u8,
99    pub available: bool,
100    pub long: bool,
101    pub default: bool,
102    pub granularity: bool,
103}
104
105impl SegmentAttributes {
106    pub const fn as_bits(&self) -> u16 {
107        self.0
108    }
109}
110
111#[cfg(feature = "arbitrary")]
112impl<'a> arbitrary::Arbitrary<'a> for SegmentAttributes {
113    fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
114        let x: u16 = u.arbitrary()?;
115        Ok(x.into())
116    }
117}
118
119/// Segment selector (what goes into a segment register)
120#[bitfield(u16)]
121#[derive(PartialEq, Eq)]
122pub struct SegmentSelector {
123    #[bits(2)]
124    /// Request Privilege Level (ring 0-3, where 0 is the highest)
125    pub rpl: u8,
126    /// Table indicator: 0 - GDT, 1 - LDT
127    pub ti: bool,
128    #[bits(13)]
129    /// Index in the descriptor table
130    pub index: u16,
131}
132
133impl SegmentSelector {
134    pub const fn as_bits(&self) -> u16 {
135        self.0
136    }
137
138    pub fn from_gdt_index(index: u16, rpl: u8) -> Self {
139        Self::new().with_index(index).with_rpl(rpl).with_ti(false)
140    }
141}
142
143#[derive(Debug, Copy, Clone, PartialEq)]
144pub struct SegmentRegister {
145    pub base: u64,
146    pub limit: u32,
147    pub selector: u16,
148    pub attributes: SegmentAttributes,
149}
150
151#[cfg(feature = "arbitrary")]
152impl<'a> arbitrary::Arbitrary<'a> for SegmentRegister {
153    fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
154        Ok(SegmentRegister {
155            base: u.arbitrary()?,
156            limit: u.arbitrary()?,
157            selector: u.arbitrary()?,
158            attributes: u.arbitrary()?,
159        })
160    }
161}
162
163/// Values for `X86X_IA32_MSR_MISC_ENABLE` MSR.
164///
165/// Many of these fields are undocumented or underdocumented and do not always
166/// have the same meaning across different CPU models. However, this MSR must
167/// have appropriate values for Linux to successfully boot.
168#[bitfield(u64)]
169pub struct MiscEnable {
170    pub fast_string: bool,
171    pub tcc: bool,
172    pub x87_compat: bool,
173    pub tm1: bool,
174    pub split_lock_disable: bool,
175    _reserved5: bool,
176    pub l3cache_disable: bool,
177    pub emon: bool,
178    pub suppress_lock: bool,
179    pub prefetch_disable: bool,
180    pub ferr: bool,
181    pub bts_unavailable: bool,
182    pub pebs_unavailable: bool,
183    pub tm2: bool,
184    _reserved14: bool,
185    _reserved15: bool,
186    pub enhanced_speedstep: bool,
187    _reserved17: bool,
188    pub mwait: bool,
189    pub adj_prefetch_disable: bool,
190    pub enable_speedstep_lock: bool,
191    _reserved21: bool,
192    pub limit_cpuid: bool,
193    pub xtpr_disable: bool,
194    pub l1d_context: bool,
195    #[bits(39)]
196    _reserved: u64,
197}
198
199pub const X86X_MSR_TSC: u32 = 0x10;
200pub const X86X_IA32_MSR_PLATFORM_ID: u32 = 0x17;
201pub const X86X_MSR_APIC_BASE: u32 = 0x1b;
202pub const X86X_MSR_EBL_CR_POWERON: u32 = 0x2a;
203pub const X86X_IA32_MSR_SMI_COUNT: u32 = 0x34;
204pub const X86X_IA32_MSR_FEATURE_CONTROL: u32 = 0x3a;
205pub const X86X_MSR_PPIN_CTL: u32 = 0x4e;
206pub const X86X_MSR_BIOS_UPDT_TRIG: u32 = 0x79;
207pub const X86X_MSR_MC_UPDATE_PATCH_LEVEL: u32 = 0x8b;
208pub const X86X_MSR_PLATFORM_INFO: u32 = 0xce;
209pub const X86X_MSR_UMWAIT_CONTROL: u32 = 0xe1;
210pub const X86X_MSR_MTRR_CAP: u32 = 0xfe;
211pub const X86X_MSR_MISC_FEATURE_ENABLES: u32 = 0x140;
212pub const X86X_MSR_SYSENTER_CS: u32 = 0x174;
213pub const X86X_MSR_SYSENTER_ESP: u32 = 0x175;
214pub const X86X_MSR_SYSENTER_EIP: u32 = 0x176;
215pub const X86X_MSR_MCG_CAP: u32 = 0x179;
216pub const X86X_MSR_MCG_STATUS: u32 = 0x17a;
217pub const X86X_IA32_MSR_MISC_ENABLE: u32 = 0x1a0;
218pub const X86X_MSR_MTRR_PHYSBASE0: u32 = 0x200;
219pub const X86X_MSR_MTRR_FIX64K_00000: u32 = 0x0250;
220pub const X86X_MSR_MTRR_FIX16K_80000: u32 = 0x0258;
221pub const X86X_MSR_MTRR_FIX16K_A0000: u32 = 0x0259;
222pub const X86X_MSR_MTRR_FIX4K_C0000: u32 = 0x0268;
223pub const X86X_MSR_MTRR_FIX4K_C8000: u32 = 0x0269;
224pub const X86X_MSR_MTRR_FIX4K_D0000: u32 = 0x026A;
225pub const X86X_MSR_MTRR_FIX4K_D8000: u32 = 0x026B;
226pub const X86X_MSR_MTRR_FIX4K_E0000: u32 = 0x026C;
227pub const X86X_MSR_MTRR_FIX4K_E8000: u32 = 0x026D;
228pub const X86X_MSR_MTRR_FIX4K_F0000: u32 = 0x026E;
229pub const X86X_MSR_MTRR_FIX4K_F8000: u32 = 0x026F;
230pub const X86X_MSR_CR_PAT: u32 = 0x277;
231pub const X86X_MSR_MTRR_DEF_TYPE: u32 = 0x2ff;
232
233pub const X86X_MSR_XSS: u32 = 0xda0;
234
235pub const X86X_IA32_MSR_RAPL_POWER_UNIT: u32 = 0x606;
236pub const X86X_IA32_MSR_PKG_ENERGY_STATUS: u32 = 0x611;
237pub const X86X_IA32_MSR_DRAM_ENERGY_STATUS: u32 = 0x619;
238pub const X86X_IA32_MSR_PP0_ENERGY_STATUS: u32 = 0x639;
239
240pub const X86X_MSR_U_CET: u32 = 0x6a0;
241pub const X86X_MSR_S_CET: u32 = 0x6a2;
242pub const X86X_MSR_PL0_SSP: u32 = 0x6a4;
243pub const X86X_MSR_PL1_SSP: u32 = 0x6a5;
244pub const X86X_MSR_PL2_SSP: u32 = 0x6a6;
245pub const X86X_MSR_PL3_SSP: u32 = 0x6a7;
246pub const X86X_MSR_INTERRUPT_SSP_TABLE_ADDR: u32 = 0x6a8;
247
248pub const X86X_MSR_STAR: u32 = 0xC0000081;
249pub const X86X_MSR_LSTAR: u32 = 0xC0000082;
250pub const X86X_MSR_CSTAR: u32 = 0xC0000083;
251pub const X86X_MSR_SFMASK: u32 = 0xC0000084;
252
253pub const X86X_MSR_EFER: u32 = 0xC0000080;
254pub const X64_MSR_FS_BASE: u32 = 0xC0000100;
255pub const X64_MSR_GS_BASE: u32 = 0xC0000101;
256pub const X64_MSR_KERNEL_GS_BASE: u32 = 0xC0000102;
257
258pub const X86X_MSR_TSC_AUX: u32 = 0xC0000103;
259
260pub const X86X_MSR_SPEC_CTRL: u32 = 0x48;
261pub const X86X_IA32_MSR_XFD: u32 = 0x1C4;
262pub const X86X_IA32_MSR_XFD_ERR: u32 = 0x1C5;
263
264pub const X86X_AMD_MSR_PERF_EVT_SEL0: u32 = 0xC0010000;
265pub const X86X_AMD_MSR_PERF_EVT_SEL1: u32 = 0xC0010001;
266pub const X86X_AMD_MSR_PERF_EVT_SEL2: u32 = 0xC0010002;
267pub const X86X_AMD_MSR_PERF_EVT_SEL3: u32 = 0xC0010003;
268pub const X86X_AMD_MSR_PERF_CTR0: u32 = 0xC0010004;
269pub const X86X_AMD_MSR_PERF_CTR1: u32 = 0xC0010005;
270pub const X86X_AMD_MSR_PERF_CTR2: u32 = 0xC0010006;
271pub const X86X_AMD_MSR_PERF_CTR3: u32 = 0xC0010007;
272pub const X86X_AMD_MSR_SYSCFG: u32 = 0xC0010010;
273pub const X86X_AMD_MSR_HW_CFG: u32 = 0xC0010015;
274pub const X86X_AMD_MSR_NB_CFG: u32 = 0xC001001F;
275pub const X86X_AMD_MSR_VM_CR: u32 = 0xC0010114;
276pub const X86X_AMD_MSR_GHCB: u32 = 0xC0010130;
277pub const X86X_AMD_MSR_SEV: u32 = 0xC0010131;
278pub const X86X_AMD_MSR_OSVW_ID_LENGTH: u32 = 0xc0010140;
279pub const X86X_AMD_MSR_OSVW_ID_STATUS: u32 = 0xc0010141;
280pub const X86X_AMD_MSR_DE_CFG: u32 = 0xc0011029;
281
282pub const DR6_BREAKPOINT_MASK: u64 = 0xf;
283pub const DR6_SINGLE_STEP: u64 = 0x4000;
284
285#[bitfield(u64, default = false)]
286#[derive(PartialEq)]
287pub struct RFlags {
288    // FLAGS
289    pub carry: bool,
290    pub reserved_must_be_1: bool,
291    pub parity: bool,
292    _reserved1: bool,
293    pub adjust: bool,
294    _reserved2: bool,
295    pub zero: bool,
296    pub sign: bool,
297    pub trap: bool,
298    pub interrupt_enable: bool,
299    pub direction: bool,
300    pub overflow: bool,
301    #[bits(2)]
302    pub io_privilege_level: u8,
303    pub nested_task: bool,
304    pub mode: bool,
305
306    // EFLAGS
307    pub resume: bool,
308    pub virtual_8086_mode: bool,
309    pub alignment_check: bool,
310    pub virtual_interrupt: bool,
311    pub virtual_interrupt_pending: bool,
312    pub cpuid_allowed: bool,
313    _reserved3: u8,
314    pub aes_key_schedule_loaded: bool,
315    _reserved4: bool,
316
317    // RFLAGS
318    _reserved5: u32,
319}
320
321impl RFlags {
322    /// Returns the reset value of the RFLAGS register.
323    pub fn at_reset() -> Self {
324        Self::new().with_reserved_must_be_1(true)
325    }
326}
327
328impl core::ops::BitAnd<RFlags> for RFlags {
329    type Output = RFlags;
330
331    fn bitand(self, rhs: RFlags) -> Self::Output {
332        RFlags(self.0 & rhs.0)
333    }
334}
335
336#[cfg(feature = "arbitrary")]
337impl<'a> arbitrary::Arbitrary<'a> for RFlags {
338    fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
339        let x: u64 = u.arbitrary()?;
340        Ok(x.into())
341    }
342}
343
344#[repr(C)]
345#[derive(Debug, Clone, Copy, IntoBytes, Immutable, KnownLayout, FromBytes)]
346pub struct IdtEntry64 {
347    pub offset_low: u16,
348    pub selector: u16,
349    pub attributes: IdtAttributes,
350    pub offset_middle: u16,
351    pub offset_high: u32,
352    pub reserved: u32,
353}
354
355#[bitfield(u16)]
356#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
357pub struct IdtAttributes {
358    #[bits(3)]
359    pub ist: u8,
360    #[bits(5)]
361    _reserved: u8,
362    #[bits(4)]
363    pub gate_type: u8,
364    _reserved2: bool,
365    #[bits(2)]
366    pub dpl: u8,
367    pub present: bool,
368}
369
370#[repr(C)]
371#[derive(Clone, Copy, IntoBytes, Immutable, KnownLayout, FromBytes)]
372pub struct GdtEntry {
373    pub limit_low: u16,
374    pub base_low: u16,
375    pub base_middle: u8,
376    pub attr_low: u8,
377    pub attr_high: u8,
378    pub base_high: u8,
379}
380
381#[repr(C)]
382#[derive(Clone, Copy, IntoBytes, Immutable, KnownLayout, FromBytes)]
383pub struct LargeGdtEntry {
384    pub limit_low: u16,
385    pub base_low: u16,
386    pub base_middle: u8,
387    pub attr_low: u8,
388    pub attr_high: u8,
389    pub base_high: u8,
390    pub base_upper: u32,
391    pub mbz: u32,
392}
393
394impl LargeGdtEntry {
395    /// Get the large GDT entry as two smaller GDT entries, for building a GDT.
396    pub fn get_gdt_entries(&self) -> [GdtEntry; 2] {
397        let mut entries = [GdtEntry::new_zeroed(); 2];
398        entries.as_mut_bytes().copy_from_slice(self.as_bytes());
399        entries
400    }
401}
402
403#[repr(C, packed)]
404#[derive(Clone, Copy, Immutable, KnownLayout, IntoBytes, FromBytes)]
405pub struct Tss64 {
406    pub _mbz0: u32,
407    pub rsp: [u64; 3],
408    pub ist: [u64; 8],
409    pub _mbz1: u64,
410    pub _mbz2: u16,
411    pub io_map_base: u16,
412}
413
414open_enum! {
415    pub enum Exception: u8 {
416        DIVIDE_ERROR = 0x0,
417        DEBUG = 0x1,
418        BREAKPOINT = 0x3,
419        OVERFLOW = 0x4,
420        BOUND_RANGE_EXCEEDED = 0x5,
421        INVALID_OPCODE = 0x6,
422        DEVICE_NOT_AVAILABLE = 0x7,
423        DOUBLE_FAULT = 0x8,
424        INVALID_TSS = 0x0A,
425        SEGMENT_NOT_PRESENT = 0x0B,
426        STACK_SEGMENT_FAULT = 0x0C,
427        GENERAL_PROTECTION_FAULT = 0x0D,
428        PAGE_FAULT = 0x0E,
429        FLOATING_POINT_EXCEPTION = 0x10,
430        ALIGNMENT_CHECK = 0x11,
431        MACHINE_CHECK = 0x12,
432        SIMD_FLOATING_POINT_EXCEPTION = 0x13,
433        CONTROL_PROTECTION_EXCEPTION = 0x15,
434        SEV_VMM_COMMUNICATION = 0x1D,
435    }
436}
437
438#[bitfield(u32)]
439pub struct PageFaultErrorCode {
440    pub present: bool,
441    pub write: bool,
442    pub user: bool,
443    pub reserved: bool,
444    pub fetch: bool,
445    #[bits(27)]
446    _unused: u32,
447}
448
449pub const X64_LARGE_PAGE_SIZE: u64 = 0x200000;
450
451#[bitfield(u64)]
452#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
453pub struct Pte {
454    pub present: bool,
455    pub read_write: bool,
456    pub user: bool,
457    pub write_through: bool,
458    pub cache_disable: bool,
459    pub accessed: bool,
460    pub dirty: bool,
461    pub pat: bool,
462    pub global: bool,
463    #[bits(3)]
464    pub available0: u64,
465    #[bits(40)]
466    pub pfn: u64,
467    #[bits(11)]
468    pub available1: u64,
469    pub no_execute: bool,
470}
471
472impl Pte {
473    pub fn address(&self) -> u64 {
474        self.pfn() << 12
475    }
476
477    pub fn with_address(self, address: u64) -> Self {
478        assert!(address & 0xfff == 0);
479        self.with_pfn(address >> 12)
480    }
481
482    pub fn set_address(&mut self, address: u64) -> &mut Self {
483        *self = self.with_address(address);
484        self
485    }
486}
487
488#[bitfield(u64)]
489#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
490pub struct LargePde {
491    pub present: bool,
492    pub read_write: bool,
493    pub user: bool,
494    pub write_through: bool,
495    pub cache_disable: bool,
496    pub accessed: bool,
497    pub dirty: bool,
498    pub large_page: bool,
499    pub global: bool,
500    #[bits(3)]
501    pub available0: u64,
502    pub pat: bool,
503    #[bits(8)]
504    _reserved0: u64,
505    #[bits(31)]
506    pub large_page_base: u64,
507    #[bits(11)]
508    pub available1: u64,
509    pub no_execute: bool,
510}
511
512#[bitfield(u64)]
513#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
514pub struct X86xMcgStatusRegister {
515    pub ripv: bool, // Restart IP is valid
516    pub eipv: bool, // Error IP is valid
517    pub mcip: bool, // Machine check is in progress
518    #[bits(61)]
519    pub reserved0: u64,
520}