1use acpi::dsdt;
8use acpi_spec::fadt::AddressSpaceId;
9use acpi_spec::fadt::AddressWidth;
10use acpi_spec::fadt::GenericAddress;
11use acpi_spec::madt::InterruptPolarity;
12use acpi_spec::madt::InterruptTriggerMode;
13use cache_topology::CacheTopology;
14use chipset::ioapic;
15use chipset::psp;
16use inspect::Inspect;
17use std::collections::BTreeMap;
18use vm_topology::memory::MemoryLayout;
19use vm_topology::processor::ArchTopology;
20use vm_topology::processor::ProcessorTopology;
21use vm_topology::processor::aarch64::Aarch64Topology;
22use vm_topology::processor::x86::X86Topology;
23use x86defs::apic::APIC_BASE_ADDRESS;
24use zerocopy::IntoBytes;
25
26pub struct BuiltAcpiTables {
28 pub rdsp: Vec<u8>,
30 pub tables: Vec<u8>,
32}
33
34pub struct AcpiTablesBuilder<'a, T: AcpiTopology> {
36 pub processor_topology: &'a ProcessorTopology<T>,
41 pub mem_layout: &'a MemoryLayout,
43 pub cache_topology: Option<&'a CacheTopology>,
47 pub with_ioapic: bool,
49 pub with_pic: bool,
51 pub with_pit: bool,
53 pub with_psp: bool,
55 pub pm_base: u16,
57 pub acpi_irq: u32,
59}
60
61pub const OEM_INFO: acpi::builder::OemInfo = acpi::builder::OemInfo {
62 oem_id: *b"HVLITE",
63 oem_tableid: *b"HVLITETB",
64 oem_revision: 0,
65 creator_id: *b"MSHV",
66 creator_revision: 0,
67};
68
69pub trait AcpiTopology: ArchTopology + Inspect + Sized {
70 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>);
71 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>);
72}
73
74const MAX_LEGACY_APIC_ID: u32 = 0xfe;
79
80impl AcpiTopology for X86Topology {
81 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
82 for vp in topology.vps_arch() {
83 if vp.apic_id <= MAX_LEGACY_APIC_ID {
84 srat.extend_from_slice(
85 acpi_spec::srat::SratApic::new(vp.apic_id as u8, vp.base.vnode).as_bytes(),
86 );
87 } else {
88 srat.extend_from_slice(
89 acpi_spec::srat::SratX2Apic::new(vp.apic_id, vp.base.vnode).as_bytes(),
90 );
91 }
92 }
93 }
94
95 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
96 for vp in topology.vps_arch() {
97 let uid = vp.base.vp_index.index() + 1;
98 if vp.apic_id <= MAX_LEGACY_APIC_ID && uid <= u8::MAX.into() {
99 madt.extend_from_slice(
100 acpi_spec::madt::MadtApic {
101 apic_id: vp.apic_id as u8,
102 acpi_processor_uid: uid as u8,
103 flags: acpi_spec::madt::MADT_APIC_ENABLED,
104 ..acpi_spec::madt::MadtApic::new()
105 }
106 .as_bytes(),
107 );
108 } else {
109 madt.extend_from_slice(
110 acpi_spec::madt::MadtX2Apic {
111 x2_apic_id: vp.apic_id,
112 acpi_processor_uid: uid,
113 flags: acpi_spec::madt::MADT_APIC_ENABLED,
114 ..acpi_spec::madt::MadtX2Apic::new()
115 }
116 .as_bytes(),
117 );
118 }
119 }
120 }
121}
122
123impl AcpiTopology for Aarch64Topology {
124 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
125 for vp in topology.vps_arch() {
126 srat.extend_from_slice(
127 acpi_spec::srat::SratGicc::new(vp.base.vp_index.index() + 1, vp.base.vnode)
128 .as_bytes(),
129 );
130 }
131 }
132
133 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
134 madt.extend_from_slice(
136 acpi_spec::madt::MadtGicd::new(0, topology.gic_distributor_base(), 3).as_bytes(),
137 );
138 for vp in topology.vps_arch() {
139 let uid = vp.base.vp_index.index() + 1;
140
141 let mpidr = u64::from(vp.mpidr) & u64::from(aarch64defs::MpidrEl1::AFFINITY_MASK);
143 let gicr = topology.gic_redistributors_base()
144 + vp.base.vp_index.index() as u64 * aarch64defs::GIC_REDISTRIBUTOR_SIZE;
145 madt.extend_from_slice(acpi_spec::madt::MadtGicc::new(uid, mpidr, gicr).as_bytes());
146 }
147 }
148}
149
150impl<T: AcpiTopology> AcpiTablesBuilder<'_, T> {
151 fn with_srat<F, R>(&self, f: F) -> R
152 where
153 F: FnOnce(&acpi::builder::Table<'_>) -> R,
154 {
155 let mut srat_extra: Vec<u8> = Vec::new();
156 T::extend_srat(self.processor_topology, &mut srat_extra);
157 for range in self.mem_layout.ram() {
158 srat_extra.extend_from_slice(
159 acpi_spec::srat::SratMemory::new(
160 range.range.start(),
161 range.range.len(),
162 range.vnode,
163 )
164 .as_bytes(),
165 );
166 }
167
168 (f)(&acpi::builder::Table::new_dyn(
169 acpi_spec::srat::SRAT_REVISION,
170 None,
171 &acpi_spec::srat::SratHeader::new(),
172 &[srat_extra.as_slice()],
173 ))
174 }
175
176 fn with_madt<F, R>(&self, f: F) -> R
177 where
178 F: FnOnce(&acpi::builder::Table<'_>) -> R,
179 {
180 let mut madt_extra: Vec<u8> = Vec::new();
181 if self.with_ioapic {
182 madt_extra.extend_from_slice(
183 acpi_spec::madt::MadtIoApic {
184 io_apic_id: 0,
185 io_apic_address: ioapic::IOAPIC_DEVICE_MMIO_REGION_BASE_ADDRESS as u32,
186 ..acpi_spec::madt::MadtIoApic::new()
187 }
188 .as_bytes(),
189 );
190 }
191
192 madt_extra.extend_from_slice(
194 acpi_spec::madt::MadtInterruptSourceOverride::new(
195 self.acpi_irq.try_into().expect("should be in range"),
196 self.acpi_irq,
197 Some(InterruptPolarity::ActiveHigh),
198 Some(InterruptTriggerMode::Level),
199 )
200 .as_bytes(),
201 );
202
203 if self.with_pit {
204 madt_extra.extend_from_slice(
206 acpi_spec::madt::MadtInterruptSourceOverride::new(0, 2, None, None).as_bytes(),
207 );
208 }
209
210 T::extend_madt(self.processor_topology, &mut madt_extra);
211
212 let flags = if self.with_pic {
213 acpi_spec::madt::MADT_PCAT_COMPAT
214 } else {
215 0
216 };
217
218 (f)(&acpi::builder::Table::new_dyn(
219 5,
220 None,
221 &acpi_spec::madt::Madt {
222 apic_addr: APIC_BASE_ADDRESS,
223 flags,
224 },
225 &[madt_extra.as_slice()],
226 ))
227 }
228
229 fn with_pptt<F, R>(&self, f: F) -> R
230 where
231 F: FnOnce(&acpi::builder::Table<'_>) -> R,
232 {
233 use acpi_spec::pptt;
234
235 let cache = self.cache_topology.expect("cache topology is required");
236
237 let current_offset =
238 |pptt_extra: &[u8]| (size_of::<acpi_spec::Header>() + pptt_extra.len()) as u32;
239
240 let cache_for = |pptt_extra: &mut Vec<u8>, level: u8, cache_type, next: Option<u32>| {
241 let descriptor = cache
242 .caches
243 .iter()
244 .find(|d| d.level == level && d.cache_type == cache_type)?;
245 let offset = current_offset(pptt_extra);
246 pptt_extra.extend_from_slice(
247 pptt::PpttCache {
248 flags: u32::from(
249 pptt::PpttCacheFlags::new()
250 .with_size_valid(true)
251 .with_associativity_valid(true)
252 .with_cache_type_valid(true)
253 .with_line_size_valid(true),
254 )
255 .into(),
256 size: descriptor.size.into(),
257 associativity: descriptor.associativity.unwrap_or(0) as u8,
258 attributes: pptt::PpttCacheAttributes::new().with_cache_type(match descriptor
259 .cache_type
260 {
261 cache_topology::CacheType::Data => pptt::PPTT_CACHE_TYPE_DATA,
262 cache_topology::CacheType::Instruction => pptt::PPTT_CACHE_TYPE_INSTRUCTION,
263 cache_topology::CacheType::Unified => pptt::PPTT_CACHE_TYPE_UNIFIED,
264 }),
265 line_size: (descriptor.line_size as u16).into(),
266 next_level: next.unwrap_or(0).into(),
267 ..pptt::PpttCache::new()
268 }
269 .as_bytes(),
270 );
271 Some(offset)
272 };
273
274 let mut pptt_extra = Vec::new();
275 let mut sockets = BTreeMap::new();
276 let smt_enabled = self.processor_topology.smt_enabled();
277
278 for vp in self.processor_topology.vps() {
279 let acpi_processor_id = vp.vp_index.index() + 1;
280 let info = self.processor_topology.vp_topology(vp.vp_index);
281
282 let &mut (socket_offset, ref mut cores) =
283 sockets.entry(info.socket).or_insert_with(|| {
284 let l3 =
285 cache_for(&mut pptt_extra, 3, cache_topology::CacheType::Unified, None);
286 let socket_offset = current_offset(&pptt_extra);
287 pptt_extra.extend_from_slice(
288 pptt::PpttProcessor {
289 flags: u32::from(
290 pptt::PpttProcessorFlags::new().with_physical_package(true),
291 )
292 .into(),
293 ..pptt::PpttProcessor::new(l3.is_some() as u8)
294 }
295 .as_bytes(),
296 );
297
298 if let Some(l3) = l3 {
299 pptt_extra.extend_from_slice(&l3.to_ne_bytes());
300 }
301
302 (socket_offset, BTreeMap::new())
303 });
304
305 let core_offset = *cores.entry(info.core).or_insert_with(|| {
306 let l2 = cache_for(&mut pptt_extra, 2, cache_topology::CacheType::Unified, None);
307 let l1i = cache_for(
308 &mut pptt_extra,
309 1,
310 cache_topology::CacheType::Instruction,
311 l2,
312 );
313 let l1d = cache_for(&mut pptt_extra, 1, cache_topology::CacheType::Data, l2);
314
315 let core_offset = current_offset(&pptt_extra);
316 pptt_extra.extend_from_slice(
317 pptt::PpttProcessor {
318 flags: u32::from(
319 pptt::PpttProcessorFlags::new()
320 .with_acpi_processor_uid_valid(!smt_enabled),
321 )
322 .into(),
323 acpi_processor_id: if !smt_enabled {
324 acpi_processor_id.into()
325 } else {
326 0u32.into()
327 },
328 parent: socket_offset.into(),
329 ..pptt::PpttProcessor::new(l1i.is_some() as u8 + l1d.is_some() as u8)
330 }
331 .as_bytes(),
332 );
333
334 if let Some(l1) = l1i {
335 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
336 }
337 if let Some(l1) = l1d {
338 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
339 }
340
341 core_offset
342 });
343
344 if smt_enabled {
345 pptt_extra.extend_from_slice(
346 pptt::PpttProcessor {
347 flags: u32::from(
348 pptt::PpttProcessorFlags::new().with_acpi_processor_uid_valid(true),
349 )
350 .into(),
351 acpi_processor_id: acpi_processor_id.into(),
352 parent: core_offset.into(),
353 ..pptt::PpttProcessor::new(0)
354 }
355 .as_bytes(),
356 )
357 }
358 }
359
360 (f)(&acpi::builder::Table::new_dyn(
361 1,
362 None,
363 &pptt::Pptt {},
364 &[pptt_extra.as_slice()],
365 ))
366 }
367
368 pub fn build_acpi_tables<F>(&self, gpa: u64, add_devices_to_dsdt: F) -> BuiltAcpiTables
374 where
375 F: FnOnce(&MemoryLayout, &mut dsdt::Dsdt),
376 {
377 let mut dsdt_data = dsdt::Dsdt::new();
378 dsdt_data.add_object(&dsdt::NamedObject::new(
380 b"\\_S0",
381 &dsdt::Package(vec![0, 0]),
382 ));
383 dsdt_data.add_object(&dsdt::NamedObject::new(
385 b"\\_S5",
386 &dsdt::Package(vec![0, 0]),
387 ));
388 add_devices_to_dsdt(self.mem_layout, &mut dsdt_data);
390 for proc_index in 1..self.processor_topology.vp_count() + 1 {
393 let c = (b'P' + (proc_index / 1000) as u8) as char;
396 let name = &format!("{c}{:03}", proc_index % 1000);
397 let mut proc = dsdt::Device::new(name.as_bytes());
398 proc.add_object(&dsdt::NamedString::new(b"_HID", b"ACPI0007"));
399 proc.add_object(&dsdt::NamedInteger::new(b"_UID", proc_index as u64));
400 let mut method = dsdt::Method::new(b"_STA");
401 method.add_operation(&dsdt::ReturnOp {
402 result: dsdt::encode_integer(0xf),
403 });
404 proc.add_object(&method);
405 dsdt_data.add_object(&proc);
406 }
407
408 self.build_acpi_tables_inner(gpa, &dsdt_data.to_bytes())
409 }
410
411 pub fn build_acpi_tables_custom_dsdt(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
417 self.build_acpi_tables_inner(gpa, dsdt)
418 }
419
420 fn build_acpi_tables_inner(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
421 let mut b = acpi::builder::Builder::new(gpa + 0x1000, OEM_INFO);
422
423 let dsdt = b.append_raw(dsdt);
424
425 b.append(&acpi::builder::Table::new(
426 6,
427 None,
428 &acpi_spec::fadt::Fadt {
429 flags: acpi_spec::fadt::FADT_WBINVD
430 | acpi_spec::fadt::FADT_PROC_C1
431 | acpi_spec::fadt::FADT_PWR_BUTTON
432 | acpi_spec::fadt::FADT_SLP_BUTTON
433 | acpi_spec::fadt::FADT_RTC_S4
434 | acpi_spec::fadt::FADT_TMR_VAL_EXT
435 | acpi_spec::fadt::FADT_RESET_REG_SUP
436 | acpi_spec::fadt::FADT_USE_PLATFORM_CLOCK,
437 x_dsdt: dsdt,
438 sci_int: self.acpi_irq as u16,
439 p_lvl2_lat: 101, p_lvl3_lat: 1001, pm1_evt_len: 4,
442 x_pm1a_evt_blk: GenericAddress {
443 addr_space_id: AddressSpaceId::SystemIo,
444 register_bit_width: 32,
445 register_bit_offset: 0,
446 access_size: AddressWidth::Word,
447 address: (self.pm_base + chipset::pm::DynReg::STATUS.0 as u16).into(),
448 },
449 pm1_cnt_len: 2,
450 x_pm1a_cnt_blk: GenericAddress {
451 addr_space_id: AddressSpaceId::SystemIo,
452 register_bit_width: 16,
453 register_bit_offset: 0,
454 access_size: AddressWidth::Word,
455 address: (self.pm_base + chipset::pm::DynReg::CONTROL.0 as u16).into(),
456 },
457 gpe0_blk_len: 4,
458 x_gpe0_blk: GenericAddress {
459 addr_space_id: AddressSpaceId::SystemIo,
460 register_bit_width: 32,
461 register_bit_offset: 0,
462 access_size: AddressWidth::Word,
463 address: (self.pm_base + chipset::pm::DynReg::GEN_PURPOSE_STATUS.0 as u16)
464 .into(),
465 },
466 reset_reg: GenericAddress {
467 addr_space_id: AddressSpaceId::SystemIo,
468 register_bit_width: 8,
469 register_bit_offset: 0,
470 access_size: AddressWidth::Byte,
471 address: (self.pm_base + chipset::pm::DynReg::RESET.0 as u16).into(),
472 },
473 reset_value: chipset::pm::RESET_VALUE,
474 pm_tmr_len: 4,
475 x_pm_tmr_blk: GenericAddress {
476 addr_space_id: AddressSpaceId::SystemIo,
477 register_bit_width: 32,
478 register_bit_offset: 0,
479 access_size: AddressWidth::Dword,
480 address: (self.pm_base + chipset::pm::DynReg::TIMER.0 as u16).into(),
481 },
482 ..Default::default()
483 },
484 ));
485
486 if self.with_psp {
487 use acpi_spec::aspt;
488 use acpi_spec::aspt::Aspt;
489 use acpi_spec::aspt::AsptStructHeader;
490
491 b.append(&acpi::builder::Table::new_dyn(
492 1,
493 None,
494 &Aspt { num_structs: 3 },
495 &[
496 AsptStructHeader::new::<aspt::structs::AspGlobalRegisters>().as_bytes(),
498 aspt::structs::AspGlobalRegisters {
499 _reserved: 0,
500 feature_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::FEATURE,
501 interrupt_enable_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::INT_EN,
502 interrupt_status_register_address: psp::PSP_MMIO_ADDRESS
503 + psp::reg::INT_STS,
504 }
505 .as_bytes(),
506 AsptStructHeader::new::<aspt::structs::SevMailboxRegisters>().as_bytes(),
508 aspt::structs::SevMailboxRegisters {
509 mailbox_interrupt_id: 1,
510 _reserved: [0; 3],
511 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::CMD_RESP,
512 cmd_buf_addr_lo_register_address: psp::PSP_MMIO_ADDRESS
513 + psp::reg::CMD_BUF_ADDR_LO,
514 cmd_buf_addr_hi_register_address: psp::PSP_MMIO_ADDRESS
515 + psp::reg::CMD_BUF_ADDR_HI,
516 }
517 .as_bytes(),
518 AsptStructHeader::new::<aspt::structs::AcpiMailboxRegisters>().as_bytes(),
520 aspt::structs::AcpiMailboxRegisters {
521 _reserved1: 0,
522 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::ACPI_CMD_RESP,
523 _reserved2: [0; 2],
524 }
525 .as_bytes(),
526 ],
527 ));
528 }
529
530 self.with_madt(|t| b.append(t));
531 self.with_srat(|t| b.append(t));
532 if self.cache_topology.is_some() {
533 self.with_pptt(|t| b.append(t));
534 }
535
536 let (rdsp, tables) = b.build();
537
538 BuiltAcpiTables { rdsp, tables }
539 }
540
541 pub fn build_madt(&self) -> Vec<u8> {
544 self.with_madt(|t| t.to_vec(&OEM_INFO))
545 }
546
547 pub fn build_srat(&self) -> Vec<u8> {
550 self.with_srat(|t| t.to_vec(&OEM_INFO))
551 }
552
553 pub fn build_pptt(&self) -> Vec<u8> {
559 self.with_pptt(|t| t.to_vec(&OEM_INFO))
560 }
561}
562
563#[cfg(test)]
564mod test {
565 use super::*;
566 use acpi_spec::madt::MadtParser;
567 use memory_range::MemoryRange;
568 use virt::VpIndex;
569 use virt::VpInfo;
570 use vm_topology::processor::TopologyBuilder;
571 use vm_topology::processor::x86::X86VpInfo;
572
573 const KB: u64 = 1024;
574 const MB: u64 = 1024 * KB;
575 const GB: u64 = 1024 * MB;
576 const TB: u64 = 1024 * GB;
577
578 const MMIO: [MemoryRange; 2] = [
579 MemoryRange::new(GB..2 * GB),
580 MemoryRange::new(3 * GB..4 * GB),
581 ];
582
583 fn new_mem() -> MemoryLayout {
584 MemoryLayout::new(TB, &MMIO, None).unwrap()
585 }
586
587 fn new_builder<'a>(
588 mem_layout: &'a MemoryLayout,
589 processor_topology: &'a ProcessorTopology<X86Topology>,
590 ) -> AcpiTablesBuilder<'a, X86Topology> {
591 AcpiTablesBuilder {
592 processor_topology,
593 mem_layout,
594 cache_topology: None,
595 with_ioapic: true,
596 with_pic: false,
597 with_pit: false,
598 with_psp: false,
599 pm_base: 1234,
600 acpi_irq: 2,
601 }
602 }
603
604 #[test]
606 fn test_basic_madt_cpu() {
607 let mem = new_mem();
608 let topology = TopologyBuilder::new_x86().build(16).unwrap();
609 let builder = new_builder(&mem, &topology);
610 let madt = builder.build_madt();
611
612 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
613 assert_eq!(entries, (0..16).map(Some).collect::<Vec<_>>());
614
615 let topology = TopologyBuilder::new_x86()
616 .apic_id_offset(13)
617 .build(16)
618 .unwrap();
619 let builder = new_builder(&mem, &topology);
620 let madt = builder.build_madt();
621
622 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
623 assert_eq!(entries, (13..29).map(Some).collect::<Vec<_>>());
624
625 let apic_ids = [12, 58, 4823, 36];
626 let topology = TopologyBuilder::new_x86()
627 .build_with_vp_info(apic_ids.iter().enumerate().map(|(uid, apic)| X86VpInfo {
628 base: VpInfo {
629 vp_index: VpIndex::new(uid as u32),
630 vnode: 0,
631 },
632 apic_id: *apic,
633 }))
634 .unwrap();
635 let builder = new_builder(&mem, &topology);
636 let madt = builder.build_madt();
637
638 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
639 assert_eq!(
640 entries,
641 apic_ids.iter().map(|e| Some(*e)).collect::<Vec<_>>()
642 );
643 }
644}