1use acpi::dsdt;
8use acpi_spec::fadt::AddressSpaceId;
9use acpi_spec::fadt::AddressWidth;
10use acpi_spec::fadt::GenericAddress;
11use acpi_spec::madt::InterruptPolarity;
12use acpi_spec::madt::InterruptTriggerMode;
13use cache_topology::CacheTopology;
14use chipset::ioapic;
15use chipset::psp;
16use inspect::Inspect;
17use std::collections::BTreeMap;
18use vm_topology::memory::MemoryLayout;
19use vm_topology::processor::ArchTopology;
20use vm_topology::processor::ProcessorTopology;
21use vm_topology::processor::aarch64::Aarch64Topology;
22use vm_topology::processor::x86::X86Topology;
23use x86defs::apic::APIC_BASE_ADDRESS;
24use zerocopy::IntoBytes;
25
26pub struct BuiltAcpiTables {
28 pub rdsp: Vec<u8>,
30 pub tables: Vec<u8>,
32}
33
34pub struct AcpiTablesBuilder<'a, T: AcpiTopology> {
36 pub processor_topology: &'a ProcessorTopology<T>,
41 pub mem_layout: &'a MemoryLayout,
43 pub cache_topology: Option<&'a CacheTopology>,
47 pub with_ioapic: bool,
49 pub with_pic: bool,
51 pub with_pit: bool,
53 pub with_psp: bool,
55 pub pm_base: u16,
57 pub acpi_irq: u32,
59}
60
61pub const OEM_INFO: acpi::builder::OemInfo = acpi::builder::OemInfo {
62 oem_id: *b"HVLITE",
63 oem_tableid: *b"HVLITETB",
64 oem_revision: 0,
65 creator_id: *b"MSHV",
66 creator_revision: 0,
67};
68
69pub trait AcpiTopology: ArchTopology + Inspect + Sized {
70 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>);
71 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>);
72}
73
74const MAX_LEGACY_APIC_ID: u32 = 0xfe;
79
80impl AcpiTopology for X86Topology {
81 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
82 for vp in topology.vps_arch() {
83 if vp.apic_id <= MAX_LEGACY_APIC_ID {
84 srat.extend_from_slice(
85 acpi_spec::srat::SratApic::new(vp.apic_id as u8, vp.base.vnode).as_bytes(),
86 );
87 } else {
88 srat.extend_from_slice(
89 acpi_spec::srat::SratX2Apic::new(vp.apic_id, vp.base.vnode).as_bytes(),
90 );
91 }
92 }
93 }
94
95 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
96 madt.extend_from_slice(acpi_spec::madt::MadtLocalNmiSource::new().as_bytes());
98
99 for vp in topology.vps_arch() {
100 let uid = vp.base.vp_index.index() + 1;
101 if vp.apic_id <= MAX_LEGACY_APIC_ID && uid <= u8::MAX.into() {
102 madt.extend_from_slice(
103 acpi_spec::madt::MadtApic {
104 apic_id: vp.apic_id as u8,
105 acpi_processor_uid: uid as u8,
106 flags: acpi_spec::madt::MADT_APIC_ENABLED,
107 ..acpi_spec::madt::MadtApic::new()
108 }
109 .as_bytes(),
110 );
111 } else {
112 madt.extend_from_slice(
113 acpi_spec::madt::MadtX2Apic {
114 x2_apic_id: vp.apic_id,
115 acpi_processor_uid: uid,
116 flags: acpi_spec::madt::MADT_APIC_ENABLED,
117 ..acpi_spec::madt::MadtX2Apic::new()
118 }
119 .as_bytes(),
120 );
121 }
122 }
123 }
124}
125
126impl AcpiTopology for Aarch64Topology {
127 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
128 for vp in topology.vps_arch() {
129 srat.extend_from_slice(
130 acpi_spec::srat::SratGicc::new(vp.base.vp_index.index() + 1, vp.base.vnode)
131 .as_bytes(),
132 );
133 }
134 }
135
136 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
137 madt.extend_from_slice(
139 acpi_spec::madt::MadtGicd::new(0, topology.gic_distributor_base(), 3).as_bytes(),
140 );
141 for vp in topology.vps_arch() {
142 let uid = vp.base.vp_index.index() + 1;
143
144 let mpidr = u64::from(vp.mpidr) & u64::from(aarch64defs::MpidrEl1::AFFINITY_MASK);
146 let gicr = topology.gic_redistributors_base()
147 + vp.base.vp_index.index() as u64 * aarch64defs::GIC_REDISTRIBUTOR_SIZE;
148 let pmu_gsiv = topology.pmu_gsiv();
149 madt.extend_from_slice(
150 acpi_spec::madt::MadtGicc::new(uid, mpidr, gicr, pmu_gsiv).as_bytes(),
151 );
152 }
153 }
154}
155
156impl<T: AcpiTopology> AcpiTablesBuilder<'_, T> {
157 fn with_srat<F, R>(&self, f: F) -> R
158 where
159 F: FnOnce(&acpi::builder::Table<'_>) -> R,
160 {
161 let mut srat_extra: Vec<u8> = Vec::new();
162 T::extend_srat(self.processor_topology, &mut srat_extra);
163 for range in self.mem_layout.ram() {
164 srat_extra.extend_from_slice(
165 acpi_spec::srat::SratMemory::new(
166 range.range.start(),
167 range.range.len(),
168 range.vnode,
169 )
170 .as_bytes(),
171 );
172 }
173
174 (f)(&acpi::builder::Table::new_dyn(
175 acpi_spec::srat::SRAT_REVISION,
176 None,
177 &acpi_spec::srat::SratHeader::new(),
178 &[srat_extra.as_slice()],
179 ))
180 }
181
182 fn with_madt<F, R>(&self, f: F) -> R
183 where
184 F: FnOnce(&acpi::builder::Table<'_>) -> R,
185 {
186 let mut madt_extra: Vec<u8> = Vec::new();
187 if self.with_ioapic {
188 madt_extra.extend_from_slice(
189 acpi_spec::madt::MadtIoApic {
190 io_apic_id: 0,
191 io_apic_address: ioapic::IOAPIC_DEVICE_MMIO_REGION_BASE_ADDRESS as u32,
192 ..acpi_spec::madt::MadtIoApic::new()
193 }
194 .as_bytes(),
195 );
196 }
197
198 madt_extra.extend_from_slice(
200 acpi_spec::madt::MadtInterruptSourceOverride::new(
201 self.acpi_irq.try_into().expect("should be in range"),
202 self.acpi_irq,
203 Some(InterruptPolarity::ActiveHigh),
204 Some(InterruptTriggerMode::Level),
205 )
206 .as_bytes(),
207 );
208
209 if self.with_pit {
210 madt_extra.extend_from_slice(
212 acpi_spec::madt::MadtInterruptSourceOverride::new(0, 2, None, None).as_bytes(),
213 );
214 }
215
216 T::extend_madt(self.processor_topology, &mut madt_extra);
217
218 let flags = if self.with_pic {
219 acpi_spec::madt::MADT_PCAT_COMPAT
220 } else {
221 0
222 };
223
224 (f)(&acpi::builder::Table::new_dyn(
225 5,
226 None,
227 &acpi_spec::madt::Madt {
228 apic_addr: APIC_BASE_ADDRESS,
229 flags,
230 },
231 &[madt_extra.as_slice()],
232 ))
233 }
234
235 fn with_pptt<F, R>(&self, f: F) -> R
236 where
237 F: FnOnce(&acpi::builder::Table<'_>) -> R,
238 {
239 use acpi_spec::pptt;
240
241 let cache = self.cache_topology.expect("cache topology is required");
242
243 let current_offset =
244 |pptt_extra: &[u8]| (size_of::<acpi_spec::Header>() + pptt_extra.len()) as u32;
245
246 let cache_for = |pptt_extra: &mut Vec<u8>, level: u8, cache_type, next: Option<u32>| {
247 let descriptor = cache
248 .caches
249 .iter()
250 .find(|d| d.level == level && d.cache_type == cache_type)?;
251 let offset = current_offset(pptt_extra);
252 pptt_extra.extend_from_slice(
253 pptt::PpttCache {
254 flags: u32::from(
255 pptt::PpttCacheFlags::new()
256 .with_size_valid(true)
257 .with_associativity_valid(true)
258 .with_cache_type_valid(true)
259 .with_line_size_valid(true),
260 )
261 .into(),
262 size: descriptor.size.into(),
263 associativity: descriptor.associativity.unwrap_or(0) as u8,
264 attributes: pptt::PpttCacheAttributes::new().with_cache_type(match descriptor
265 .cache_type
266 {
267 cache_topology::CacheType::Data => pptt::PPTT_CACHE_TYPE_DATA,
268 cache_topology::CacheType::Instruction => pptt::PPTT_CACHE_TYPE_INSTRUCTION,
269 cache_topology::CacheType::Unified => pptt::PPTT_CACHE_TYPE_UNIFIED,
270 }),
271 line_size: (descriptor.line_size as u16).into(),
272 next_level: next.unwrap_or(0).into(),
273 ..pptt::PpttCache::new()
274 }
275 .as_bytes(),
276 );
277 Some(offset)
278 };
279
280 let mut pptt_extra = Vec::new();
281 let mut sockets = BTreeMap::new();
282 let smt_enabled = self.processor_topology.smt_enabled();
283
284 for vp in self.processor_topology.vps() {
285 let acpi_processor_id = vp.vp_index.index() + 1;
286 let info = self.processor_topology.vp_topology(vp.vp_index);
287
288 let &mut (socket_offset, ref mut cores) =
289 sockets.entry(info.socket).or_insert_with(|| {
290 let l3 =
291 cache_for(&mut pptt_extra, 3, cache_topology::CacheType::Unified, None);
292 let socket_offset = current_offset(&pptt_extra);
293 pptt_extra.extend_from_slice(
294 pptt::PpttProcessor {
295 flags: u32::from(
296 pptt::PpttProcessorFlags::new().with_physical_package(true),
297 )
298 .into(),
299 ..pptt::PpttProcessor::new(l3.is_some() as u8)
300 }
301 .as_bytes(),
302 );
303
304 if let Some(l3) = l3 {
305 pptt_extra.extend_from_slice(&l3.to_ne_bytes());
306 }
307
308 (socket_offset, BTreeMap::new())
309 });
310
311 let core_offset = *cores.entry(info.core).or_insert_with(|| {
312 let l2 = cache_for(&mut pptt_extra, 2, cache_topology::CacheType::Unified, None);
313 let l1i = cache_for(
314 &mut pptt_extra,
315 1,
316 cache_topology::CacheType::Instruction,
317 l2,
318 );
319 let l1d = cache_for(&mut pptt_extra, 1, cache_topology::CacheType::Data, l2);
320
321 let core_offset = current_offset(&pptt_extra);
322 pptt_extra.extend_from_slice(
323 pptt::PpttProcessor {
324 flags: u32::from(
325 pptt::PpttProcessorFlags::new()
326 .with_acpi_processor_uid_valid(!smt_enabled),
327 )
328 .into(),
329 acpi_processor_id: if !smt_enabled {
330 acpi_processor_id.into()
331 } else {
332 0u32.into()
333 },
334 parent: socket_offset.into(),
335 ..pptt::PpttProcessor::new(l1i.is_some() as u8 + l1d.is_some() as u8)
336 }
337 .as_bytes(),
338 );
339
340 if let Some(l1) = l1i {
341 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
342 }
343 if let Some(l1) = l1d {
344 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
345 }
346
347 core_offset
348 });
349
350 if smt_enabled {
351 pptt_extra.extend_from_slice(
352 pptt::PpttProcessor {
353 flags: u32::from(
354 pptt::PpttProcessorFlags::new().with_acpi_processor_uid_valid(true),
355 )
356 .into(),
357 acpi_processor_id: acpi_processor_id.into(),
358 parent: core_offset.into(),
359 ..pptt::PpttProcessor::new(0)
360 }
361 .as_bytes(),
362 )
363 }
364 }
365
366 (f)(&acpi::builder::Table::new_dyn(
367 1,
368 None,
369 &pptt::Pptt {},
370 &[pptt_extra.as_slice()],
371 ))
372 }
373
374 pub fn build_acpi_tables<F>(&self, gpa: u64, add_devices_to_dsdt: F) -> BuiltAcpiTables
380 where
381 F: FnOnce(&MemoryLayout, &mut dsdt::Dsdt),
382 {
383 let mut dsdt_data = dsdt::Dsdt::new();
384 dsdt_data.add_object(&dsdt::NamedObject::new(
386 b"\\_S0",
387 &dsdt::Package(vec![0, 0]),
388 ));
389 dsdt_data.add_object(&dsdt::NamedObject::new(
391 b"\\_S5",
392 &dsdt::Package(vec![0, 0]),
393 ));
394 add_devices_to_dsdt(self.mem_layout, &mut dsdt_data);
396 for proc_index in 1..self.processor_topology.vp_count() + 1 {
399 let c = (b'P' + (proc_index / 1000) as u8) as char;
402 let name = &format!("{c}{:03}", proc_index % 1000);
403 let mut proc = dsdt::Device::new(name.as_bytes());
404 proc.add_object(&dsdt::NamedString::new(b"_HID", b"ACPI0007"));
405 proc.add_object(&dsdt::NamedInteger::new(b"_UID", proc_index as u64));
406 let mut method = dsdt::Method::new(b"_STA");
407 method.add_operation(&dsdt::ReturnOp {
408 result: dsdt::encode_integer(0xf),
409 });
410 proc.add_object(&method);
411 dsdt_data.add_object(&proc);
412 }
413
414 self.build_acpi_tables_inner(gpa, &dsdt_data.to_bytes())
415 }
416
417 pub fn build_acpi_tables_custom_dsdt(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
423 self.build_acpi_tables_inner(gpa, dsdt)
424 }
425
426 fn build_acpi_tables_inner(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
427 let mut b = acpi::builder::Builder::new(gpa + 0x1000, OEM_INFO);
428
429 let dsdt = b.append_raw(dsdt);
430
431 b.append(&acpi::builder::Table::new(
432 6,
433 None,
434 &acpi_spec::fadt::Fadt {
435 flags: acpi_spec::fadt::FADT_WBINVD
436 | acpi_spec::fadt::FADT_PROC_C1
437 | acpi_spec::fadt::FADT_PWR_BUTTON
438 | acpi_spec::fadt::FADT_SLP_BUTTON
439 | acpi_spec::fadt::FADT_RTC_S4
440 | acpi_spec::fadt::FADT_TMR_VAL_EXT
441 | acpi_spec::fadt::FADT_RESET_REG_SUP
442 | acpi_spec::fadt::FADT_USE_PLATFORM_CLOCK,
443 x_dsdt: dsdt,
444 sci_int: self.acpi_irq as u16,
445 p_lvl2_lat: 101, p_lvl3_lat: 1001, pm1_evt_len: 4,
448 x_pm1a_evt_blk: GenericAddress {
449 addr_space_id: AddressSpaceId::SystemIo,
450 register_bit_width: 32,
451 register_bit_offset: 0,
452 access_size: AddressWidth::Word,
453 address: (self.pm_base + chipset::pm::DynReg::STATUS.0 as u16).into(),
454 },
455 pm1_cnt_len: 2,
456 x_pm1a_cnt_blk: GenericAddress {
457 addr_space_id: AddressSpaceId::SystemIo,
458 register_bit_width: 16,
459 register_bit_offset: 0,
460 access_size: AddressWidth::Word,
461 address: (self.pm_base + chipset::pm::DynReg::CONTROL.0 as u16).into(),
462 },
463 gpe0_blk_len: 4,
464 x_gpe0_blk: GenericAddress {
465 addr_space_id: AddressSpaceId::SystemIo,
466 register_bit_width: 32,
467 register_bit_offset: 0,
468 access_size: AddressWidth::Word,
469 address: (self.pm_base + chipset::pm::DynReg::GEN_PURPOSE_STATUS.0 as u16)
470 .into(),
471 },
472 reset_reg: GenericAddress {
473 addr_space_id: AddressSpaceId::SystemIo,
474 register_bit_width: 8,
475 register_bit_offset: 0,
476 access_size: AddressWidth::Byte,
477 address: (self.pm_base + chipset::pm::DynReg::RESET.0 as u16).into(),
478 },
479 reset_value: chipset::pm::RESET_VALUE,
480 pm_tmr_len: 4,
481 x_pm_tmr_blk: GenericAddress {
482 addr_space_id: AddressSpaceId::SystemIo,
483 register_bit_width: 32,
484 register_bit_offset: 0,
485 access_size: AddressWidth::Dword,
486 address: (self.pm_base + chipset::pm::DynReg::TIMER.0 as u16).into(),
487 },
488 ..Default::default()
489 },
490 ));
491
492 if self.with_psp {
493 use acpi_spec::aspt;
494 use acpi_spec::aspt::Aspt;
495 use acpi_spec::aspt::AsptStructHeader;
496
497 b.append(&acpi::builder::Table::new_dyn(
498 1,
499 None,
500 &Aspt { num_structs: 3 },
501 &[
502 AsptStructHeader::new::<aspt::structs::AspGlobalRegisters>().as_bytes(),
504 aspt::structs::AspGlobalRegisters {
505 _reserved: 0,
506 feature_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::FEATURE,
507 interrupt_enable_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::INT_EN,
508 interrupt_status_register_address: psp::PSP_MMIO_ADDRESS
509 + psp::reg::INT_STS,
510 }
511 .as_bytes(),
512 AsptStructHeader::new::<aspt::structs::SevMailboxRegisters>().as_bytes(),
514 aspt::structs::SevMailboxRegisters {
515 mailbox_interrupt_id: 1,
516 _reserved: [0; 3],
517 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::CMD_RESP,
518 cmd_buf_addr_lo_register_address: psp::PSP_MMIO_ADDRESS
519 + psp::reg::CMD_BUF_ADDR_LO,
520 cmd_buf_addr_hi_register_address: psp::PSP_MMIO_ADDRESS
521 + psp::reg::CMD_BUF_ADDR_HI,
522 }
523 .as_bytes(),
524 AsptStructHeader::new::<aspt::structs::AcpiMailboxRegisters>().as_bytes(),
526 aspt::structs::AcpiMailboxRegisters {
527 _reserved1: 0,
528 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::ACPI_CMD_RESP,
529 _reserved2: [0; 2],
530 }
531 .as_bytes(),
532 ],
533 ));
534 }
535
536 self.with_madt(|t| b.append(t));
537 self.with_srat(|t| b.append(t));
538 if self.cache_topology.is_some() {
539 self.with_pptt(|t| b.append(t));
540 }
541
542 let (rdsp, tables) = b.build();
543
544 BuiltAcpiTables { rdsp, tables }
545 }
546
547 pub fn build_madt(&self) -> Vec<u8> {
550 self.with_madt(|t| t.to_vec(&OEM_INFO))
551 }
552
553 pub fn build_srat(&self) -> Vec<u8> {
556 self.with_srat(|t| t.to_vec(&OEM_INFO))
557 }
558
559 pub fn build_pptt(&self) -> Vec<u8> {
565 self.with_pptt(|t| t.to_vec(&OEM_INFO))
566 }
567}
568
569#[cfg(test)]
570mod test {
571 use super::*;
572 use acpi_spec::madt::MadtParser;
573 use memory_range::MemoryRange;
574 use virt::VpIndex;
575 use virt::VpInfo;
576 use vm_topology::processor::TopologyBuilder;
577 use vm_topology::processor::x86::X86VpInfo;
578
579 const KB: u64 = 1024;
580 const MB: u64 = 1024 * KB;
581 const GB: u64 = 1024 * MB;
582 const TB: u64 = 1024 * GB;
583
584 const MMIO: [MemoryRange; 2] = [
585 MemoryRange::new(GB..2 * GB),
586 MemoryRange::new(3 * GB..4 * GB),
587 ];
588
589 fn new_mem() -> MemoryLayout {
590 MemoryLayout::new(TB, &MMIO, None).unwrap()
591 }
592
593 fn new_builder<'a>(
594 mem_layout: &'a MemoryLayout,
595 processor_topology: &'a ProcessorTopology<X86Topology>,
596 ) -> AcpiTablesBuilder<'a, X86Topology> {
597 AcpiTablesBuilder {
598 processor_topology,
599 mem_layout,
600 cache_topology: None,
601 with_ioapic: true,
602 with_pic: false,
603 with_pit: false,
604 with_psp: false,
605 pm_base: 1234,
606 acpi_irq: 2,
607 }
608 }
609
610 #[test]
612 fn test_basic_madt_cpu() {
613 let mem = new_mem();
614 let topology = TopologyBuilder::new_x86().build(16).unwrap();
615 let builder = new_builder(&mem, &topology);
616 let madt = builder.build_madt();
617
618 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
619 assert_eq!(entries, (0..16).map(Some).collect::<Vec<_>>());
620
621 let topology = TopologyBuilder::new_x86()
622 .apic_id_offset(13)
623 .build(16)
624 .unwrap();
625 let builder = new_builder(&mem, &topology);
626 let madt = builder.build_madt();
627
628 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
629 assert_eq!(entries, (13..29).map(Some).collect::<Vec<_>>());
630
631 let apic_ids = [12, 58, 4823, 36];
632 let topology = TopologyBuilder::new_x86()
633 .build_with_vp_info(apic_ids.iter().enumerate().map(|(uid, apic)| X86VpInfo {
634 base: VpInfo {
635 vp_index: VpIndex::new(uid as u32),
636 vnode: 0,
637 },
638 apic_id: *apic,
639 }))
640 .unwrap();
641 let builder = new_builder(&mem, &topology);
642 let madt = builder.build_madt();
643
644 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
645 assert_eq!(
646 entries,
647 apic_ids.iter().map(|e| Some(*e)).collect::<Vec<_>>()
648 );
649 }
650}