1#![expect(missing_docs)]
8#![no_std]
9#![forbid(unsafe_code)]
10
11pub mod apic;
12pub mod cpuid;
13pub mod msi;
14pub mod snp;
15pub mod tdx;
16pub mod vmx;
17pub mod xsave;
18
19use bitfield_struct::bitfield;
20use open_enum::open_enum;
21use zerocopy::FromBytes;
22use zerocopy::FromZeros;
23use zerocopy::Immutable;
24use zerocopy::IntoBytes;
25use zerocopy::KnownLayout;
26
27pub const X64_CR0_PE: u64 = 0x0000000000000001; pub const X64_CR0_MP: u64 = 0x0000000000000002; pub const X64_CR0_EM: u64 = 0x0000000000000004; pub const X64_CR0_TS: u64 = 0x0000000000000008; pub const X64_CR0_ET: u64 = 0x0000000000000010; pub const X64_CR0_NE: u64 = 0x0000000000000020; pub const X64_CR0_WP: u64 = 0x0000000000010000; pub const X64_CR0_AM: u64 = 0x0000000000040000; pub const X64_CR0_NW: u64 = 0x0000000020000000; pub const X64_CR0_CD: u64 = 0x0000000040000000; pub const X64_CR0_PG: u64 = 0x0000000080000000; pub const X64_CR4_VME: u64 = 0x0000000000000001; pub const X64_CR4_PVI: u64 = 0x0000000000000002; pub const X64_CR4_TSD: u64 = 0x0000000000000004; pub const X64_CR4_DE: u64 = 0x0000000000000008; pub const X64_CR4_PSE: u64 = 0x0000000000000010; pub const X64_CR4_PAE: u64 = 0x0000000000000020; pub const X64_CR4_MCE: u64 = 0x0000000000000040; pub const X64_CR4_PGE: u64 = 0x0000000000000080; pub const X64_CR4_PCE: u64 = 0x0000000000000100; pub const X64_CR4_FXSR: u64 = 0x0000000000000200; pub const X64_CR4_XMMEXCPT: u64 = 0x0000000000000400; pub const X64_CR4_UMIP: u64 = 0x0000000000000800; pub const X64_CR4_LA57: u64 = 0x0000000000001000; pub const X64_CR4_VMXE: u64 = 0x0000000000002000; pub const X64_CR4_RWFSGS: u64 = 0x0000000000010000; pub const X64_CR4_PCIDE: u64 = 0x0000000000020000; pub const X64_CR4_OSXSAVE: u64 = 0x0000000000040000; pub const X64_CR4_SMEP: u64 = 0x0000000000100000; pub const X64_CR4_SMAP: u64 = 0x0000000000200000; pub const X64_CR4_CET: u64 = 0x0000000000800000; pub const X64_EFER_SCE: u64 = 0x0000000000000001; pub const X64_EFER_LME: u64 = 0x0000000000000100; pub const X64_EFER_LMA: u64 = 0x0000000000000400; pub const X64_EFER_NXE: u64 = 0x0000000000000800; pub const X64_EFER_SVME: u64 = 0x0000000000001000; pub const X64_EFER_FFXSR: u64 = 0x0000000000004000; pub const X86X_MSR_DEFAULT_PAT: u64 = 0x0007040600070406;
68pub const X64_EMPTY_DR7: u64 = 0x0000000000000400;
69
70pub const USER_MODE_DPL: u8 = 3;
71
72pub const X64_DEFAULT_CODE_SEGMENT_ATTRIBUTES: SegmentAttributes = SegmentAttributes::new()
73 .with_granularity(true)
74 .with_long(true)
75 .with_present(true)
76 .with_non_system_segment(true)
77 .with_segment_type(0xb);
78pub const X64_DEFAULT_DATA_SEGMENT_ATTRIBUTES: SegmentAttributes = SegmentAttributes::new()
79 .with_granularity(true)
80 .with_default(true)
81 .with_present(true)
82 .with_non_system_segment(true)
83 .with_segment_type(0x3);
84pub const X64_BUSY_TSS_SEGMENT_ATTRIBUTES: SegmentAttributes = SegmentAttributes::new()
85 .with_present(true)
86 .with_segment_type(0xb);
87
88#[bitfield(u16)]
89#[derive(PartialEq)]
90pub struct SegmentAttributes {
91 #[bits(4)]
92 pub segment_type: u8,
93 pub non_system_segment: bool,
94 #[bits(2)]
95 pub descriptor_privilege_level: u8,
96 pub present: bool,
97 #[bits(4)]
98 _reserved: u8,
99 pub available: bool,
100 pub long: bool,
101 pub default: bool,
102 pub granularity: bool,
103}
104
105impl SegmentAttributes {
106 pub const fn as_bits(&self) -> u16 {
107 self.0
108 }
109}
110
111#[cfg(feature = "arbitrary")]
112impl<'a> arbitrary::Arbitrary<'a> for SegmentAttributes {
113 fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
114 let x: u16 = u.arbitrary()?;
115 Ok(x.into())
116 }
117}
118
119#[bitfield(u16)]
121#[derive(PartialEq, Eq)]
122pub struct SegmentSelector {
123 #[bits(2)]
124 pub rpl: u8,
126 pub ti: bool,
128 #[bits(13)]
129 pub index: u16,
131}
132
133impl SegmentSelector {
134 pub const fn as_bits(&self) -> u16 {
135 self.0
136 }
137
138 pub fn from_gdt_index(index: u16, rpl: u8) -> Self {
139 Self::new().with_index(index).with_rpl(rpl).with_ti(false)
140 }
141}
142
143#[derive(Debug, Copy, Clone, PartialEq)]
144pub struct SegmentRegister {
145 pub base: u64,
146 pub limit: u32,
147 pub selector: u16,
148 pub attributes: SegmentAttributes,
149}
150
151#[cfg(feature = "arbitrary")]
152impl<'a> arbitrary::Arbitrary<'a> for SegmentRegister {
153 fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
154 Ok(SegmentRegister {
155 base: u.arbitrary()?,
156 limit: u.arbitrary()?,
157 selector: u.arbitrary()?,
158 attributes: u.arbitrary()?,
159 })
160 }
161}
162
163#[bitfield(u64)]
169pub struct MiscEnable {
170 pub fast_string: bool,
171 pub tcc: bool,
172 pub x87_compat: bool,
173 pub tm1: bool,
174 pub split_lock_disable: bool,
175 _reserved5: bool,
176 pub l3cache_disable: bool,
177 pub emon: bool,
178 pub suppress_lock: bool,
179 pub prefetch_disable: bool,
180 pub ferr: bool,
181 pub bts_unavailable: bool,
182 pub pebs_unavailable: bool,
183 pub tm2: bool,
184 _reserved14: bool,
185 _reserved15: bool,
186 pub enhanced_speedstep: bool,
187 _reserved17: bool,
188 pub mwait: bool,
189 pub adj_prefetch_disable: bool,
190 pub enable_speedstep_lock: bool,
191 _reserved21: bool,
192 pub limit_cpuid: bool,
193 pub xtpr_disable: bool,
194 pub l1d_context: bool,
195 #[bits(39)]
196 _reserved: u64,
197}
198
199pub const X86X_MSR_TSC: u32 = 0x10;
200pub const X86X_IA32_MSR_PLATFORM_ID: u32 = 0x17;
201pub const X86X_MSR_APIC_BASE: u32 = 0x1b;
202pub const X86X_MSR_EBL_CR_POWERON: u32 = 0x2a;
203pub const X86X_IA32_MSR_SMI_COUNT: u32 = 0x34;
204pub const X86X_IA32_MSR_FEATURE_CONTROL: u32 = 0x3a;
205pub const X86X_MSR_PPIN_CTL: u32 = 0x4e;
206pub const X86X_MSR_BIOS_UPDT_TRIG: u32 = 0x79;
207pub const X86X_MSR_MC_UPDATE_PATCH_LEVEL: u32 = 0x8b;
208pub const X86X_MSR_PLATFORM_INFO: u32 = 0xce;
209pub const X86X_MSR_UMWAIT_CONTROL: u32 = 0xe1;
210pub const X86X_MSR_MTRR_CAP: u32 = 0xfe;
211pub const X86X_MSR_MISC_FEATURE_ENABLES: u32 = 0x140;
212pub const X86X_MSR_SYSENTER_CS: u32 = 0x174;
213pub const X86X_MSR_SYSENTER_ESP: u32 = 0x175;
214pub const X86X_MSR_SYSENTER_EIP: u32 = 0x176;
215pub const X86X_MSR_MCG_CAP: u32 = 0x179;
216pub const X86X_MSR_MCG_STATUS: u32 = 0x17a;
217pub const X86X_IA32_MSR_MISC_ENABLE: u32 = 0x1a0;
218pub const X86X_MSR_MTRR_PHYSBASE0: u32 = 0x200;
219pub const X86X_MSR_MTRR_FIX64K_00000: u32 = 0x0250;
220pub const X86X_MSR_MTRR_FIX16K_80000: u32 = 0x0258;
221pub const X86X_MSR_MTRR_FIX16K_A0000: u32 = 0x0259;
222pub const X86X_MSR_MTRR_FIX4K_C0000: u32 = 0x0268;
223pub const X86X_MSR_MTRR_FIX4K_C8000: u32 = 0x0269;
224pub const X86X_MSR_MTRR_FIX4K_D0000: u32 = 0x026A;
225pub const X86X_MSR_MTRR_FIX4K_D8000: u32 = 0x026B;
226pub const X86X_MSR_MTRR_FIX4K_E0000: u32 = 0x026C;
227pub const X86X_MSR_MTRR_FIX4K_E8000: u32 = 0x026D;
228pub const X86X_MSR_MTRR_FIX4K_F0000: u32 = 0x026E;
229pub const X86X_MSR_MTRR_FIX4K_F8000: u32 = 0x026F;
230pub const X86X_MSR_CR_PAT: u32 = 0x277;
231pub const X86X_MSR_MTRR_DEF_TYPE: u32 = 0x2ff;
232
233pub const X86X_MSR_XSS: u32 = 0xda0;
234
235pub const X86X_IA32_MSR_RAPL_POWER_UNIT: u32 = 0x606;
236pub const X86X_IA32_MSR_PKG_ENERGY_STATUS: u32 = 0x611;
237pub const X86X_IA32_MSR_DRAM_ENERGY_STATUS: u32 = 0x619;
238pub const X86X_IA32_MSR_PP0_ENERGY_STATUS: u32 = 0x639;
239
240pub const X86X_MSR_U_CET: u32 = 0x6a0;
241pub const X86X_MSR_S_CET: u32 = 0x6a2;
242pub const X86X_MSR_PL0_SSP: u32 = 0x6a4;
243pub const X86X_MSR_PL1_SSP: u32 = 0x6a5;
244pub const X86X_MSR_PL2_SSP: u32 = 0x6a6;
245pub const X86X_MSR_PL3_SSP: u32 = 0x6a7;
246pub const X86X_MSR_INTERRUPT_SSP_TABLE_ADDR: u32 = 0x6a8;
247
248pub const X86X_MSR_STAR: u32 = 0xC0000081;
249pub const X86X_MSR_LSTAR: u32 = 0xC0000082;
250pub const X86X_MSR_CSTAR: u32 = 0xC0000083;
251pub const X86X_MSR_SFMASK: u32 = 0xC0000084;
252
253pub const X86X_MSR_EFER: u32 = 0xC0000080;
254pub const X64_MSR_FS_BASE: u32 = 0xC0000100;
255pub const X64_MSR_GS_BASE: u32 = 0xC0000101;
256pub const X64_MSR_KERNEL_GS_BASE: u32 = 0xC0000102;
257
258pub const X86X_MSR_TSC_AUX: u32 = 0xC0000103;
259
260pub const X86X_MSR_SPEC_CTRL: u32 = 0x48;
261pub const X86X_IA32_MSR_XFD: u32 = 0x1C4;
262pub const X86X_IA32_MSR_XFD_ERR: u32 = 0x1C5;
263
264pub const X86X_AMD_MSR_PERF_EVT_SEL0: u32 = 0xC0010000;
265pub const X86X_AMD_MSR_PERF_EVT_SEL1: u32 = 0xC0010001;
266pub const X86X_AMD_MSR_PERF_EVT_SEL2: u32 = 0xC0010002;
267pub const X86X_AMD_MSR_PERF_EVT_SEL3: u32 = 0xC0010003;
268pub const X86X_AMD_MSR_PERF_CTR0: u32 = 0xC0010004;
269pub const X86X_AMD_MSR_PERF_CTR1: u32 = 0xC0010005;
270pub const X86X_AMD_MSR_PERF_CTR2: u32 = 0xC0010006;
271pub const X86X_AMD_MSR_PERF_CTR3: u32 = 0xC0010007;
272pub const X86X_AMD_MSR_SYSCFG: u32 = 0xC0010010;
273pub const X86X_AMD_MSR_HW_CFG: u32 = 0xC0010015;
274pub const X86X_AMD_MSR_NB_CFG: u32 = 0xC001001F;
275pub const X86X_AMD_MSR_VM_CR: u32 = 0xC0010114;
276pub const X86X_AMD_MSR_GHCB: u32 = 0xC0010130;
277pub const X86X_AMD_MSR_SEV: u32 = 0xC0010131;
278pub const X86X_AMD_MSR_SECURE_AVIC_CONTROL: u32 = 0xc0010138;
279pub const X86X_AMD_MSR_OSVW_ID_LENGTH: u32 = 0xc0010140;
280pub const X86X_AMD_MSR_OSVW_ID_STATUS: u32 = 0xc0010141;
281pub const X86X_AMD_MSR_DE_CFG: u32 = 0xc0011029;
282
283pub const DR6_BREAKPOINT_MASK: u64 = 0xf;
284pub const DR6_SINGLE_STEP: u64 = 0x4000;
285
286#[bitfield(u64, default = false)]
287#[derive(PartialEq)]
288pub struct RFlags {
289 pub carry: bool,
291 pub reserved_must_be_1: bool,
292 pub parity: bool,
293 _reserved1: bool,
294 pub adjust: bool,
295 _reserved2: bool,
296 pub zero: bool,
297 pub sign: bool,
298 pub trap: bool,
299 pub interrupt_enable: bool,
300 pub direction: bool,
301 pub overflow: bool,
302 #[bits(2)]
303 pub io_privilege_level: u8,
304 pub nested_task: bool,
305 pub mode: bool,
306
307 pub resume: bool,
309 pub virtual_8086_mode: bool,
310 pub alignment_check: bool,
311 pub virtual_interrupt: bool,
312 pub virtual_interrupt_pending: bool,
313 pub cpuid_allowed: bool,
314 _reserved3: u8,
315 pub aes_key_schedule_loaded: bool,
316 _reserved4: bool,
317
318 _reserved5: u32,
320}
321
322impl RFlags {
323 pub fn at_reset() -> Self {
325 Self::new().with_reserved_must_be_1(true)
326 }
327}
328
329impl core::ops::BitAnd<RFlags> for RFlags {
330 type Output = RFlags;
331
332 fn bitand(self, rhs: RFlags) -> Self::Output {
333 RFlags(self.0 & rhs.0)
334 }
335}
336
337#[cfg(feature = "arbitrary")]
338impl<'a> arbitrary::Arbitrary<'a> for RFlags {
339 fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
340 let x: u64 = u.arbitrary()?;
341 Ok(x.into())
342 }
343}
344
345#[repr(C)]
346#[derive(Debug, Clone, Copy, IntoBytes, Immutable, KnownLayout, FromBytes)]
347pub struct IdtEntry64 {
348 pub offset_low: u16,
349 pub selector: u16,
350 pub attributes: IdtAttributes,
351 pub offset_middle: u16,
352 pub offset_high: u32,
353 pub reserved: u32,
354}
355
356#[bitfield(u16)]
357#[derive(IntoBytes, Immutable, KnownLayout, FromBytes)]
358pub struct IdtAttributes {
359 #[bits(3)]
360 pub ist: u8,
361 #[bits(5)]
362 _reserved: u8,
363 #[bits(4)]
364 pub gate_type: u8,
365 _reserved2: bool,
366 #[bits(2)]
367 pub dpl: u8,
368 pub present: bool,
369}
370
371#[repr(C)]
372#[derive(Clone, Copy, IntoBytes, Immutable, KnownLayout, FromBytes)]
373pub struct GdtEntry {
374 pub limit_low: u16,
375 pub base_low: u16,
376 pub base_middle: u8,
377 pub attr_low: u8,
378 pub attr_high: u8,
379 pub base_high: u8,
380}
381
382#[repr(C)]
383#[derive(Clone, Copy, IntoBytes, Immutable, KnownLayout, FromBytes)]
384pub struct LargeGdtEntry {
385 pub limit_low: u16,
386 pub base_low: u16,
387 pub base_middle: u8,
388 pub attr_low: u8,
389 pub attr_high: u8,
390 pub base_high: u8,
391 pub base_upper: u32,
392 pub mbz: u32,
393}
394
395impl LargeGdtEntry {
396 pub fn get_gdt_entries(&self) -> [GdtEntry; 2] {
398 let mut entries = [GdtEntry::new_zeroed(); 2];
399 entries.as_mut_bytes().copy_from_slice(self.as_bytes());
400 entries
401 }
402}
403
404#[repr(C, packed)]
405#[derive(Clone, Copy, Immutable, KnownLayout, IntoBytes, FromBytes)]
406pub struct Tss64 {
407 pub _mbz0: u32,
408 pub rsp: [u64; 3],
409 pub ist: [u64; 8],
410 pub _mbz1: u64,
411 pub _mbz2: u16,
412 pub io_map_base: u16,
413}
414
415open_enum! {
416 pub enum Exception: u8 {
417 DIVIDE_ERROR = 0x0,
418 DEBUG = 0x1,
419 BREAKPOINT = 0x3,
420 OVERFLOW = 0x4,
421 BOUND_RANGE_EXCEEDED = 0x5,
422 INVALID_OPCODE = 0x6,
423 DEVICE_NOT_AVAILABLE = 0x7,
424 DOUBLE_FAULT = 0x8,
425 INVALID_TSS = 0x0A,
426 SEGMENT_NOT_PRESENT = 0x0B,
427 STACK_SEGMENT_FAULT = 0x0C,
428 GENERAL_PROTECTION_FAULT = 0x0D,
429 PAGE_FAULT = 0x0E,
430 FLOATING_POINT_EXCEPTION = 0x10,
431 ALIGNMENT_CHECK = 0x11,
432 MACHINE_CHECK = 0x12,
433 SIMD_FLOATING_POINT_EXCEPTION = 0x13,
434 CONTROL_PROTECTION_EXCEPTION = 0x15,
435 SEV_VMM_COMMUNICATION = 0x1D,
436 }
437}
438
439#[bitfield(u32)]
440pub struct PageFaultErrorCode {
441 pub present: bool,
442 pub write: bool,
443 pub user: bool,
444 pub reserved: bool,
445 pub fetch: bool,
446 #[bits(27)]
447 _unused: u32,
448}
449
450pub const X64_LARGE_PAGE_SIZE: u64 = 0x200000;
451
452#[bitfield(u64)]
453#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
454pub struct Pte {
455 pub present: bool,
456 pub read_write: bool,
457 pub user: bool,
458 pub write_through: bool,
459 pub cache_disable: bool,
460 pub accessed: bool,
461 pub dirty: bool,
462 pub pat: bool,
463 pub global: bool,
464 #[bits(3)]
465 pub available0: u64,
466 #[bits(40)]
467 pub pfn: u64,
468 #[bits(11)]
469 pub available1: u64,
470 pub no_execute: bool,
471}
472
473impl Pte {
474 pub fn address(&self) -> u64 {
475 self.pfn() << 12
476 }
477
478 pub fn with_address(self, address: u64) -> Self {
479 assert!(address & 0xfff == 0);
480 self.with_pfn(address >> 12)
481 }
482
483 pub fn set_address(&mut self, address: u64) -> &mut Self {
484 *self = self.with_address(address);
485 self
486 }
487}
488
489#[bitfield(u64)]
490#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
491pub struct LargePde {
492 pub present: bool,
493 pub read_write: bool,
494 pub user: bool,
495 pub write_through: bool,
496 pub cache_disable: bool,
497 pub accessed: bool,
498 pub dirty: bool,
499 pub large_page: bool,
500 pub global: bool,
501 #[bits(3)]
502 pub available0: u64,
503 pub pat: bool,
504 #[bits(8)]
505 _reserved0: u64,
506 #[bits(31)]
507 pub large_page_base: u64,
508 #[bits(11)]
509 pub available1: u64,
510 pub no_execute: bool,
511}
512
513#[bitfield(u64)]
514#[derive(PartialEq, Eq, IntoBytes, Immutable, KnownLayout, FromBytes)]
515pub struct X86xMcgStatusRegister {
516 pub ripv: bool, pub eipv: bool, pub mcip: bool, #[bits(61)]
520 pub reserved0: u64,
521}
522
523#[repr(C)]
524#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)]
525pub struct ApicRegisterValue {
526 pub value: u32,
527 _reserved: [u32; 3],
528}