1use acpi::dsdt;
8use acpi::ssdt::Ssdt;
9use acpi_spec::fadt::AddressSpaceId;
10use acpi_spec::fadt::AddressWidth;
11use acpi_spec::fadt::GenericAddress;
12use acpi_spec::madt::InterruptPolarity;
13use acpi_spec::madt::InterruptTriggerMode;
14use cache_topology::CacheTopology;
15use chipset::ioapic;
16use chipset::psp;
17use inspect::Inspect;
18use std::collections::BTreeMap;
19use vm_topology::memory::MemoryLayout;
20use vm_topology::pcie::PcieHostBridge;
21use vm_topology::processor::ArchTopology;
22use vm_topology::processor::ProcessorTopology;
23use vm_topology::processor::aarch64::Aarch64Topology;
24use vm_topology::processor::x86::X86Topology;
25use x86defs::apic::APIC_BASE_ADDRESS;
26use zerocopy::IntoBytes;
27
28pub struct BuiltAcpiTables {
30 pub rdsp: Vec<u8>,
32 pub tables: Vec<u8>,
34}
35
36pub struct AcpiTablesBuilder<'a, T: AcpiTopology> {
38 pub processor_topology: &'a ProcessorTopology<T>,
43 pub mem_layout: &'a MemoryLayout,
45 pub cache_topology: Option<&'a CacheTopology>,
49 pub pcie_host_bridges: &'a Vec<PcieHostBridge>,
53 pub with_ioapic: bool,
55 pub with_pic: bool,
57 pub with_pit: bool,
59 pub with_psp: bool,
61 pub pm_base: u16,
63 pub acpi_irq: u32,
65}
66
67pub const OEM_INFO: acpi::builder::OemInfo = acpi::builder::OemInfo {
68 oem_id: *b"HVLITE",
69 oem_tableid: *b"HVLITETB",
70 oem_revision: 0,
71 creator_id: *b"MSHV",
72 creator_revision: 0,
73};
74
75pub trait AcpiTopology: ArchTopology + Inspect + Sized {
76 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>);
77 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>);
78}
79
80const MAX_LEGACY_APIC_ID: u32 = 0xfe;
85
86impl AcpiTopology for X86Topology {
87 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
88 for vp in topology.vps_arch() {
89 if vp.apic_id <= MAX_LEGACY_APIC_ID {
90 srat.extend_from_slice(
91 acpi_spec::srat::SratApic::new(vp.apic_id as u8, vp.base.vnode).as_bytes(),
92 );
93 } else {
94 srat.extend_from_slice(
95 acpi_spec::srat::SratX2Apic::new(vp.apic_id, vp.base.vnode).as_bytes(),
96 );
97 }
98 }
99 }
100
101 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
102 madt.extend_from_slice(acpi_spec::madt::MadtLocalNmiSource::new().as_bytes());
104
105 for vp in topology.vps_arch() {
106 let uid = vp.base.vp_index.index() + 1;
107 if vp.apic_id <= MAX_LEGACY_APIC_ID && uid <= u8::MAX.into() {
108 madt.extend_from_slice(
109 acpi_spec::madt::MadtApic {
110 apic_id: vp.apic_id as u8,
111 acpi_processor_uid: uid as u8,
112 flags: acpi_spec::madt::MADT_APIC_ENABLED,
113 ..acpi_spec::madt::MadtApic::new()
114 }
115 .as_bytes(),
116 );
117 } else {
118 madt.extend_from_slice(
119 acpi_spec::madt::MadtX2Apic {
120 x2_apic_id: vp.apic_id,
121 acpi_processor_uid: uid,
122 flags: acpi_spec::madt::MADT_APIC_ENABLED,
123 ..acpi_spec::madt::MadtX2Apic::new()
124 }
125 .as_bytes(),
126 );
127 }
128 }
129 }
130}
131
132impl AcpiTopology for Aarch64Topology {
133 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
134 for vp in topology.vps_arch() {
135 srat.extend_from_slice(
136 acpi_spec::srat::SratGicc::new(vp.base.vp_index.index() + 1, vp.base.vnode)
137 .as_bytes(),
138 );
139 }
140 }
141
142 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
143 madt.extend_from_slice(
145 acpi_spec::madt::MadtGicd::new(0, topology.gic_distributor_base(), 3).as_bytes(),
146 );
147 for vp in topology.vps_arch() {
148 let uid = vp.base.vp_index.index() + 1;
149
150 let mpidr = u64::from(vp.mpidr) & u64::from(aarch64defs::MpidrEl1::AFFINITY_MASK);
152 let gicr = topology.gic_redistributors_base()
153 + vp.base.vp_index.index() as u64 * aarch64defs::GIC_REDISTRIBUTOR_SIZE;
154 let pmu_gsiv = topology.pmu_gsiv();
155 madt.extend_from_slice(
156 acpi_spec::madt::MadtGicc::new(uid, mpidr, gicr, pmu_gsiv).as_bytes(),
157 );
158 }
159 }
160}
161
162impl<T: AcpiTopology> AcpiTablesBuilder<'_, T> {
163 fn with_srat<F, R>(&self, f: F) -> R
164 where
165 F: FnOnce(&acpi::builder::Table<'_>) -> R,
166 {
167 let mut srat_extra: Vec<u8> = Vec::new();
168 T::extend_srat(self.processor_topology, &mut srat_extra);
169 for range in self.mem_layout.ram() {
170 srat_extra.extend_from_slice(
171 acpi_spec::srat::SratMemory::new(
172 range.range.start(),
173 range.range.len(),
174 range.vnode,
175 )
176 .as_bytes(),
177 );
178 }
179
180 (f)(&acpi::builder::Table::new_dyn(
181 acpi_spec::srat::SRAT_REVISION,
182 None,
183 &acpi_spec::srat::SratHeader::new(),
184 &[srat_extra.as_slice()],
185 ))
186 }
187
188 fn with_madt<F, R>(&self, f: F) -> R
189 where
190 F: FnOnce(&acpi::builder::Table<'_>) -> R,
191 {
192 let mut madt_extra: Vec<u8> = Vec::new();
193 if self.with_ioapic {
194 madt_extra.extend_from_slice(
195 acpi_spec::madt::MadtIoApic {
196 io_apic_id: 0,
197 io_apic_address: ioapic::IOAPIC_DEVICE_MMIO_REGION_BASE_ADDRESS as u32,
198 ..acpi_spec::madt::MadtIoApic::new()
199 }
200 .as_bytes(),
201 );
202 }
203
204 madt_extra.extend_from_slice(
206 acpi_spec::madt::MadtInterruptSourceOverride::new(
207 self.acpi_irq.try_into().expect("should be in range"),
208 self.acpi_irq,
209 Some(InterruptPolarity::ActiveHigh),
210 Some(InterruptTriggerMode::Level),
211 )
212 .as_bytes(),
213 );
214
215 if self.with_pit {
216 madt_extra.extend_from_slice(
218 acpi_spec::madt::MadtInterruptSourceOverride::new(0, 2, None, None).as_bytes(),
219 );
220 }
221
222 T::extend_madt(self.processor_topology, &mut madt_extra);
223
224 let flags = if self.with_pic {
225 acpi_spec::madt::MADT_PCAT_COMPAT
226 } else {
227 0
228 };
229
230 (f)(&acpi::builder::Table::new_dyn(
231 5,
232 None,
233 &acpi_spec::madt::Madt {
234 apic_addr: APIC_BASE_ADDRESS,
235 flags,
236 },
237 &[madt_extra.as_slice()],
238 ))
239 }
240
241 fn with_mcfg<F, R>(&self, f: F) -> R
242 where
243 F: FnOnce(&acpi::builder::Table<'_>) -> R,
244 {
245 let mut mcfg_extra: Vec<u8> = Vec::new();
246 for bridge in self.pcie_host_bridges {
247 let ecam_region_offset = (bridge.start_bus as u64) * 256 * 4096;
253 mcfg_extra.extend_from_slice(
254 acpi_spec::mcfg::McfgSegmentBusRange::new(
255 bridge.ecam_range.start() - ecam_region_offset,
256 bridge.segment,
257 bridge.start_bus,
258 bridge.end_bus,
259 )
260 .as_bytes(),
261 )
262 }
263
264 (f)(&acpi::builder::Table::new_dyn(
265 acpi_spec::mcfg::MCFG_REVISION,
266 None,
267 &acpi_spec::mcfg::McfgHeader::new(),
268 &[mcfg_extra.as_slice()],
269 ))
270 }
271
272 fn with_pptt<F, R>(&self, f: F) -> R
273 where
274 F: FnOnce(&acpi::builder::Table<'_>) -> R,
275 {
276 use acpi_spec::pptt;
277
278 let cache = self.cache_topology.expect("cache topology is required");
279
280 let current_offset =
281 |pptt_extra: &[u8]| (size_of::<acpi_spec::Header>() + pptt_extra.len()) as u32;
282
283 let cache_for = |pptt_extra: &mut Vec<u8>, level: u8, cache_type, next: Option<u32>| {
284 let descriptor = cache
285 .caches
286 .iter()
287 .find(|d| d.level == level && d.cache_type == cache_type)?;
288 let offset = current_offset(pptt_extra);
289 pptt_extra.extend_from_slice(
290 pptt::PpttCache {
291 flags: u32::from(
292 pptt::PpttCacheFlags::new()
293 .with_size_valid(true)
294 .with_associativity_valid(true)
295 .with_cache_type_valid(true)
296 .with_line_size_valid(true),
297 )
298 .into(),
299 size: descriptor.size.into(),
300 associativity: descriptor.associativity.unwrap_or(0) as u8,
301 attributes: pptt::PpttCacheAttributes::new().with_cache_type(match descriptor
302 .cache_type
303 {
304 cache_topology::CacheType::Data => pptt::PPTT_CACHE_TYPE_DATA,
305 cache_topology::CacheType::Instruction => pptt::PPTT_CACHE_TYPE_INSTRUCTION,
306 cache_topology::CacheType::Unified => pptt::PPTT_CACHE_TYPE_UNIFIED,
307 }),
308 line_size: (descriptor.line_size as u16).into(),
309 next_level: next.unwrap_or(0).into(),
310 ..pptt::PpttCache::new()
311 }
312 .as_bytes(),
313 );
314 Some(offset)
315 };
316
317 let mut pptt_extra = Vec::new();
318 let mut sockets = BTreeMap::new();
319 let smt_enabled = self.processor_topology.smt_enabled();
320
321 for vp in self.processor_topology.vps() {
322 let acpi_processor_id = vp.vp_index.index() + 1;
323 let info = self.processor_topology.vp_topology(vp.vp_index);
324
325 let &mut (socket_offset, ref mut cores) =
326 sockets.entry(info.socket).or_insert_with(|| {
327 let l3 =
328 cache_for(&mut pptt_extra, 3, cache_topology::CacheType::Unified, None);
329 let socket_offset = current_offset(&pptt_extra);
330 pptt_extra.extend_from_slice(
331 pptt::PpttProcessor {
332 flags: u32::from(
333 pptt::PpttProcessorFlags::new().with_physical_package(true),
334 )
335 .into(),
336 ..pptt::PpttProcessor::new(l3.is_some() as u8)
337 }
338 .as_bytes(),
339 );
340
341 if let Some(l3) = l3 {
342 pptt_extra.extend_from_slice(&l3.to_ne_bytes());
343 }
344
345 (socket_offset, BTreeMap::new())
346 });
347
348 let core_offset = *cores.entry(info.core).or_insert_with(|| {
349 let l2 = cache_for(&mut pptt_extra, 2, cache_topology::CacheType::Unified, None);
350 let l1i = cache_for(
351 &mut pptt_extra,
352 1,
353 cache_topology::CacheType::Instruction,
354 l2,
355 );
356 let l1d = cache_for(&mut pptt_extra, 1, cache_topology::CacheType::Data, l2);
357
358 let core_offset = current_offset(&pptt_extra);
359 pptt_extra.extend_from_slice(
360 pptt::PpttProcessor {
361 flags: u32::from(
362 pptt::PpttProcessorFlags::new()
363 .with_acpi_processor_uid_valid(!smt_enabled),
364 )
365 .into(),
366 acpi_processor_id: if !smt_enabled {
367 acpi_processor_id.into()
368 } else {
369 0u32.into()
370 },
371 parent: socket_offset.into(),
372 ..pptt::PpttProcessor::new(l1i.is_some() as u8 + l1d.is_some() as u8)
373 }
374 .as_bytes(),
375 );
376
377 if let Some(l1) = l1i {
378 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
379 }
380 if let Some(l1) = l1d {
381 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
382 }
383
384 core_offset
385 });
386
387 if smt_enabled {
388 pptt_extra.extend_from_slice(
389 pptt::PpttProcessor {
390 flags: u32::from(
391 pptt::PpttProcessorFlags::new().with_acpi_processor_uid_valid(true),
392 )
393 .into(),
394 acpi_processor_id: acpi_processor_id.into(),
395 parent: core_offset.into(),
396 ..pptt::PpttProcessor::new(0)
397 }
398 .as_bytes(),
399 )
400 }
401 }
402
403 (f)(&acpi::builder::Table::new_dyn(
404 1,
405 None,
406 &pptt::Pptt {},
407 &[pptt_extra.as_slice()],
408 ))
409 }
410
411 pub fn build_acpi_tables<F>(&self, gpa: u64, add_devices_to_dsdt: F) -> BuiltAcpiTables
417 where
418 F: FnOnce(&MemoryLayout, &mut dsdt::Dsdt),
419 {
420 let mut dsdt_data = dsdt::Dsdt::new();
421 dsdt_data.add_object(&dsdt::NamedObject::new(
423 b"\\_S0",
424 &dsdt::Package(vec![0, 0]),
425 ));
426 dsdt_data.add_object(&dsdt::NamedObject::new(
428 b"\\_S5",
429 &dsdt::Package(vec![0, 0]),
430 ));
431 add_devices_to_dsdt(self.mem_layout, &mut dsdt_data);
433 for proc_index in 1..self.processor_topology.vp_count() + 1 {
436 let c = (b'P' + (proc_index / 1000) as u8) as char;
439 let name = &format!("{c}{:03}", proc_index % 1000);
440 let mut proc = dsdt::Device::new(name.as_bytes());
441 proc.add_object(&dsdt::NamedString::new(b"_HID", b"ACPI0007"));
442 proc.add_object(&dsdt::NamedInteger::new(b"_UID", proc_index as u64));
443 let mut method = dsdt::Method::new(b"_STA");
444 method.add_operation(&dsdt::ReturnOp {
445 result: dsdt::encode_integer(0xf),
446 });
447 proc.add_object(&method);
448 dsdt_data.add_object(&proc);
449 }
450
451 self.build_acpi_tables_inner(gpa, &dsdt_data.to_bytes())
452 }
453
454 pub fn build_acpi_tables_custom_dsdt(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
460 self.build_acpi_tables_inner(gpa, dsdt)
461 }
462
463 fn build_acpi_tables_inner(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
464 let mut b = acpi::builder::Builder::new(gpa + 0x1000, OEM_INFO);
465
466 let dsdt = b.append_raw(dsdt);
467
468 b.append(&acpi::builder::Table::new(
469 6,
470 None,
471 &acpi_spec::fadt::Fadt {
472 flags: acpi_spec::fadt::FADT_WBINVD
473 | acpi_spec::fadt::FADT_PROC_C1
474 | acpi_spec::fadt::FADT_PWR_BUTTON
475 | acpi_spec::fadt::FADT_SLP_BUTTON
476 | acpi_spec::fadt::FADT_RTC_S4
477 | acpi_spec::fadt::FADT_TMR_VAL_EXT
478 | acpi_spec::fadt::FADT_RESET_REG_SUP
479 | acpi_spec::fadt::FADT_USE_PLATFORM_CLOCK,
480 x_dsdt: dsdt,
481 sci_int: self.acpi_irq as u16,
482 p_lvl2_lat: 101, p_lvl3_lat: 1001, pm1_evt_len: 4,
485 x_pm1a_evt_blk: GenericAddress {
486 addr_space_id: AddressSpaceId::SystemIo,
487 register_bit_width: 32,
488 register_bit_offset: 0,
489 access_size: AddressWidth::Word,
490 address: (self.pm_base + chipset::pm::DynReg::STATUS.0 as u16).into(),
491 },
492 pm1_cnt_len: 2,
493 x_pm1a_cnt_blk: GenericAddress {
494 addr_space_id: AddressSpaceId::SystemIo,
495 register_bit_width: 16,
496 register_bit_offset: 0,
497 access_size: AddressWidth::Word,
498 address: (self.pm_base + chipset::pm::DynReg::CONTROL.0 as u16).into(),
499 },
500 gpe0_blk_len: 4,
501 x_gpe0_blk: GenericAddress {
502 addr_space_id: AddressSpaceId::SystemIo,
503 register_bit_width: 32,
504 register_bit_offset: 0,
505 access_size: AddressWidth::Word,
506 address: (self.pm_base + chipset::pm::DynReg::GEN_PURPOSE_STATUS.0 as u16)
507 .into(),
508 },
509 reset_reg: GenericAddress {
510 addr_space_id: AddressSpaceId::SystemIo,
511 register_bit_width: 8,
512 register_bit_offset: 0,
513 access_size: AddressWidth::Byte,
514 address: (self.pm_base + chipset::pm::DynReg::RESET.0 as u16).into(),
515 },
516 reset_value: chipset::pm::RESET_VALUE,
517 pm_tmr_len: 4,
518 x_pm_tmr_blk: GenericAddress {
519 addr_space_id: AddressSpaceId::SystemIo,
520 register_bit_width: 32,
521 register_bit_offset: 0,
522 access_size: AddressWidth::Dword,
523 address: (self.pm_base + chipset::pm::DynReg::TIMER.0 as u16).into(),
524 },
525 ..Default::default()
526 },
527 ));
528
529 if self.with_psp {
530 use acpi_spec::aspt;
531 use acpi_spec::aspt::Aspt;
532 use acpi_spec::aspt::AsptStructHeader;
533
534 b.append(&acpi::builder::Table::new_dyn(
535 1,
536 None,
537 &Aspt { num_structs: 3 },
538 &[
539 AsptStructHeader::new::<aspt::structs::AspGlobalRegisters>().as_bytes(),
541 aspt::structs::AspGlobalRegisters {
542 _reserved: 0,
543 feature_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::FEATURE,
544 interrupt_enable_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::INT_EN,
545 interrupt_status_register_address: psp::PSP_MMIO_ADDRESS
546 + psp::reg::INT_STS,
547 }
548 .as_bytes(),
549 AsptStructHeader::new::<aspt::structs::SevMailboxRegisters>().as_bytes(),
551 aspt::structs::SevMailboxRegisters {
552 mailbox_interrupt_id: 1,
553 _reserved: [0; 3],
554 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::CMD_RESP,
555 cmd_buf_addr_lo_register_address: psp::PSP_MMIO_ADDRESS
556 + psp::reg::CMD_BUF_ADDR_LO,
557 cmd_buf_addr_hi_register_address: psp::PSP_MMIO_ADDRESS
558 + psp::reg::CMD_BUF_ADDR_HI,
559 }
560 .as_bytes(),
561 AsptStructHeader::new::<aspt::structs::AcpiMailboxRegisters>().as_bytes(),
563 aspt::structs::AcpiMailboxRegisters {
564 _reserved1: 0,
565 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::ACPI_CMD_RESP,
566 _reserved2: [0; 2],
567 }
568 .as_bytes(),
569 ],
570 ));
571 }
572
573 self.with_madt(|t| b.append(t));
574 self.with_srat(|t| b.append(t));
575 if !self.pcie_host_bridges.is_empty() {
576 self.with_mcfg(|t| b.append(t));
577
578 let mut ssdt = Ssdt::new();
579 for bridge in self.pcie_host_bridges {
580 ssdt.add_pcie(
581 bridge.index,
582 bridge.segment,
583 bridge.start_bus,
584 bridge.end_bus,
585 bridge.ecam_range,
586 bridge.low_mmio,
587 bridge.high_mmio,
588 );
589 }
590 b.append_raw(&ssdt.to_bytes());
591 }
592
593 if self.cache_topology.is_some() {
594 self.with_pptt(|t| b.append(t));
595 }
596
597 let (rdsp, tables) = b.build();
598
599 BuiltAcpiTables { rdsp, tables }
600 }
601
602 pub fn build_madt(&self) -> Vec<u8> {
605 self.with_madt(|t| t.to_vec(&OEM_INFO))
606 }
607
608 pub fn build_srat(&self) -> Vec<u8> {
611 self.with_srat(|t| t.to_vec(&OEM_INFO))
612 }
613
614 pub fn build_mcfg(&self) -> Vec<u8> {
617 self.with_mcfg(|t| t.to_vec(&OEM_INFO))
618 }
619
620 pub fn build_pptt(&self) -> Vec<u8> {
626 self.with_pptt(|t| t.to_vec(&OEM_INFO))
627 }
628}
629
630#[cfg(test)]
631mod test {
632 use super::*;
633 use acpi_spec::madt::MadtParser;
634 use acpi_spec::mcfg::parse_mcfg;
635 use memory_range::MemoryRange;
636 use virt::VpIndex;
637 use virt::VpInfo;
638 use vm_topology::processor::TopologyBuilder;
639 use vm_topology::processor::x86::X86VpInfo;
640
641 const KB: u64 = 1024;
642 const MB: u64 = 1024 * KB;
643 const GB: u64 = 1024 * MB;
644 const TB: u64 = 1024 * GB;
645
646 const MMIO: [MemoryRange; 2] = [
647 MemoryRange::new(GB..2 * GB),
648 MemoryRange::new(3 * GB..4 * GB),
649 ];
650
651 fn new_mem() -> MemoryLayout {
652 MemoryLayout::new(TB, &MMIO, &[], &[], None).unwrap()
653 }
654
655 fn new_builder<'a>(
656 mem_layout: &'a MemoryLayout,
657 processor_topology: &'a ProcessorTopology<X86Topology>,
658 pcie_host_bridges: &'a Vec<PcieHostBridge>,
659 ) -> AcpiTablesBuilder<'a, X86Topology> {
660 AcpiTablesBuilder {
661 processor_topology,
662 mem_layout,
663 cache_topology: None,
664 pcie_host_bridges,
665 with_ioapic: true,
666 with_pic: false,
667 with_pit: false,
668 with_psp: false,
669 pm_base: 1234,
670 acpi_irq: 2,
671 }
672 }
673
674 #[test]
676 fn test_basic_madt_cpu() {
677 let mem = new_mem();
678 let topology = TopologyBuilder::new_x86().build(16).unwrap();
679 let pcie = vec![];
680 let builder = new_builder(&mem, &topology, &pcie);
681 let madt = builder.build_madt();
682
683 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
684 assert_eq!(entries, (0..16).map(Some).collect::<Vec<_>>());
685
686 let topology = TopologyBuilder::new_x86()
687 .apic_id_offset(13)
688 .build(16)
689 .unwrap();
690 let builder = new_builder(&mem, &topology, &pcie);
691 let madt = builder.build_madt();
692
693 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
694 assert_eq!(entries, (13..29).map(Some).collect::<Vec<_>>());
695
696 let apic_ids = [12, 58, 4823, 36];
697 let topology = TopologyBuilder::new_x86()
698 .build_with_vp_info(apic_ids.iter().enumerate().map(|(uid, apic)| X86VpInfo {
699 base: VpInfo {
700 vp_index: VpIndex::new(uid as u32),
701 vnode: 0,
702 },
703 apic_id: *apic,
704 }))
705 .unwrap();
706 let builder = new_builder(&mem, &topology, &pcie);
707 let madt = builder.build_madt();
708
709 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
710 assert_eq!(
711 entries,
712 apic_ids.iter().map(|e| Some(*e)).collect::<Vec<_>>()
713 );
714 }
715
716 #[test]
717 fn test_basic_pcie_topology() {
718 let mem = new_mem();
719 let topology = TopologyBuilder::new_x86().build(16).unwrap();
720 let pcie_host_bridges = vec![
721 PcieHostBridge {
722 index: 0,
723 segment: 0,
724 start_bus: 0,
725 end_bus: 255,
726 ecam_range: MemoryRange::new(0..256 * 256 * 4096),
727 low_mmio: MemoryRange::new(0..0),
728 high_mmio: MemoryRange::new(0..0),
729 },
730 PcieHostBridge {
731 index: 1,
732 segment: 1,
733 start_bus: 32,
734 end_bus: 63,
735 ecam_range: MemoryRange::new(5 * GB..5 * GB + 32 * 256 * 4096),
736 low_mmio: MemoryRange::new(0..0),
737 high_mmio: MemoryRange::new(0..0),
738 },
739 ];
740
741 let builder = new_builder(&mem, &topology, &pcie_host_bridges);
742 let mcfg = builder.build_mcfg();
743
744 let mut i = 0;
745 let _ = parse_mcfg(&mcfg, |sbr| match i {
746 0 => {
747 assert_eq!(sbr.ecam_base, 0);
748 assert_eq!(sbr.segment, 0);
749 assert_eq!(sbr.start_bus, 0);
750 assert_eq!(sbr.end_bus, 255);
751 i += 1;
752 }
753 1 => {
754 assert_eq!(sbr.ecam_base, 5 * GB - 32 * 256 * 4096);
755 assert_eq!(sbr.segment, 1);
756 assert_eq!(sbr.start_bus, 32);
757 assert_eq!(sbr.end_bus, 63);
758 i += 1;
759 }
760 _ => panic!("only expected two MCFG segment bus range entries"),
761 })
762 .unwrap();
763 }
764}