1use acpi::dsdt;
8use acpi_spec::fadt::AddressSpaceId;
9use acpi_spec::fadt::AddressWidth;
10use acpi_spec::fadt::GenericAddress;
11use acpi_spec::madt::InterruptPolarity;
12use acpi_spec::madt::InterruptTriggerMode;
13use cache_topology::CacheTopology;
14use chipset::ioapic;
15use chipset::psp;
16use inspect::Inspect;
17use std::collections::BTreeMap;
18use vm_topology::memory::MemoryLayout;
19use vm_topology::pcie::PcieHostBridge;
20use vm_topology::processor::ArchTopology;
21use vm_topology::processor::ProcessorTopology;
22use vm_topology::processor::aarch64::Aarch64Topology;
23use vm_topology::processor::x86::X86Topology;
24use x86defs::apic::APIC_BASE_ADDRESS;
25use zerocopy::IntoBytes;
26
27pub struct BuiltAcpiTables {
29 pub rdsp: Vec<u8>,
31 pub tables: Vec<u8>,
33}
34
35pub struct AcpiTablesBuilder<'a, T: AcpiTopology> {
37 pub processor_topology: &'a ProcessorTopology<T>,
42 pub mem_layout: &'a MemoryLayout,
44 pub cache_topology: Option<&'a CacheTopology>,
48 pub pcie_host_bridges: &'a Vec<PcieHostBridge>,
52 pub with_ioapic: bool,
54 pub with_pic: bool,
56 pub with_pit: bool,
58 pub with_psp: bool,
60 pub pm_base: u16,
62 pub acpi_irq: u32,
64}
65
66pub const OEM_INFO: acpi::builder::OemInfo = acpi::builder::OemInfo {
67 oem_id: *b"HVLITE",
68 oem_tableid: *b"HVLITETB",
69 oem_revision: 0,
70 creator_id: *b"MSHV",
71 creator_revision: 0,
72};
73
74pub trait AcpiTopology: ArchTopology + Inspect + Sized {
75 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>);
76 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>);
77}
78
79const MAX_LEGACY_APIC_ID: u32 = 0xfe;
84
85impl AcpiTopology for X86Topology {
86 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
87 for vp in topology.vps_arch() {
88 if vp.apic_id <= MAX_LEGACY_APIC_ID {
89 srat.extend_from_slice(
90 acpi_spec::srat::SratApic::new(vp.apic_id as u8, vp.base.vnode).as_bytes(),
91 );
92 } else {
93 srat.extend_from_slice(
94 acpi_spec::srat::SratX2Apic::new(vp.apic_id, vp.base.vnode).as_bytes(),
95 );
96 }
97 }
98 }
99
100 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
101 madt.extend_from_slice(acpi_spec::madt::MadtLocalNmiSource::new().as_bytes());
103
104 for vp in topology.vps_arch() {
105 let uid = vp.base.vp_index.index() + 1;
106 if vp.apic_id <= MAX_LEGACY_APIC_ID && uid <= u8::MAX.into() {
107 madt.extend_from_slice(
108 acpi_spec::madt::MadtApic {
109 apic_id: vp.apic_id as u8,
110 acpi_processor_uid: uid as u8,
111 flags: acpi_spec::madt::MADT_APIC_ENABLED,
112 ..acpi_spec::madt::MadtApic::new()
113 }
114 .as_bytes(),
115 );
116 } else {
117 madt.extend_from_slice(
118 acpi_spec::madt::MadtX2Apic {
119 x2_apic_id: vp.apic_id,
120 acpi_processor_uid: uid,
121 flags: acpi_spec::madt::MADT_APIC_ENABLED,
122 ..acpi_spec::madt::MadtX2Apic::new()
123 }
124 .as_bytes(),
125 );
126 }
127 }
128 }
129}
130
131impl AcpiTopology for Aarch64Topology {
132 fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
133 for vp in topology.vps_arch() {
134 srat.extend_from_slice(
135 acpi_spec::srat::SratGicc::new(vp.base.vp_index.index() + 1, vp.base.vnode)
136 .as_bytes(),
137 );
138 }
139 }
140
141 fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
142 madt.extend_from_slice(
144 acpi_spec::madt::MadtGicd::new(0, topology.gic_distributor_base(), 3).as_bytes(),
145 );
146 for vp in topology.vps_arch() {
147 let uid = vp.base.vp_index.index() + 1;
148
149 let mpidr = u64::from(vp.mpidr) & u64::from(aarch64defs::MpidrEl1::AFFINITY_MASK);
151 let gicr = topology.gic_redistributors_base()
152 + vp.base.vp_index.index() as u64 * aarch64defs::GIC_REDISTRIBUTOR_SIZE;
153 let pmu_gsiv = topology.pmu_gsiv();
154 madt.extend_from_slice(
155 acpi_spec::madt::MadtGicc::new(uid, mpidr, gicr, pmu_gsiv).as_bytes(),
156 );
157 }
158 }
159}
160
161impl<T: AcpiTopology> AcpiTablesBuilder<'_, T> {
162 fn with_srat<F, R>(&self, f: F) -> R
163 where
164 F: FnOnce(&acpi::builder::Table<'_>) -> R,
165 {
166 let mut srat_extra: Vec<u8> = Vec::new();
167 T::extend_srat(self.processor_topology, &mut srat_extra);
168 for range in self.mem_layout.ram() {
169 srat_extra.extend_from_slice(
170 acpi_spec::srat::SratMemory::new(
171 range.range.start(),
172 range.range.len(),
173 range.vnode,
174 )
175 .as_bytes(),
176 );
177 }
178
179 (f)(&acpi::builder::Table::new_dyn(
180 acpi_spec::srat::SRAT_REVISION,
181 None,
182 &acpi_spec::srat::SratHeader::new(),
183 &[srat_extra.as_slice()],
184 ))
185 }
186
187 fn with_madt<F, R>(&self, f: F) -> R
188 where
189 F: FnOnce(&acpi::builder::Table<'_>) -> R,
190 {
191 let mut madt_extra: Vec<u8> = Vec::new();
192 if self.with_ioapic {
193 madt_extra.extend_from_slice(
194 acpi_spec::madt::MadtIoApic {
195 io_apic_id: 0,
196 io_apic_address: ioapic::IOAPIC_DEVICE_MMIO_REGION_BASE_ADDRESS as u32,
197 ..acpi_spec::madt::MadtIoApic::new()
198 }
199 .as_bytes(),
200 );
201 }
202
203 madt_extra.extend_from_slice(
205 acpi_spec::madt::MadtInterruptSourceOverride::new(
206 self.acpi_irq.try_into().expect("should be in range"),
207 self.acpi_irq,
208 Some(InterruptPolarity::ActiveHigh),
209 Some(InterruptTriggerMode::Level),
210 )
211 .as_bytes(),
212 );
213
214 if self.with_pit {
215 madt_extra.extend_from_slice(
217 acpi_spec::madt::MadtInterruptSourceOverride::new(0, 2, None, None).as_bytes(),
218 );
219 }
220
221 T::extend_madt(self.processor_topology, &mut madt_extra);
222
223 let flags = if self.with_pic {
224 acpi_spec::madt::MADT_PCAT_COMPAT
225 } else {
226 0
227 };
228
229 (f)(&acpi::builder::Table::new_dyn(
230 5,
231 None,
232 &acpi_spec::madt::Madt {
233 apic_addr: APIC_BASE_ADDRESS,
234 flags,
235 },
236 &[madt_extra.as_slice()],
237 ))
238 }
239
240 fn with_mcfg<F, R>(&self, f: F) -> R
241 where
242 F: FnOnce(&acpi::builder::Table<'_>) -> R,
243 {
244 let mut mcfg_extra: Vec<u8> = Vec::new();
245 for bridge in self.pcie_host_bridges {
246 let ecam_region_offset = (bridge.start_bus as u64) * 256 * 4096;
252 mcfg_extra.extend_from_slice(
253 acpi_spec::mcfg::McfgSegmentBusRange::new(
254 bridge.ecam_range.start() - ecam_region_offset,
255 bridge.segment,
256 bridge.start_bus,
257 bridge.end_bus,
258 )
259 .as_bytes(),
260 )
261 }
262
263 (f)(&acpi::builder::Table::new_dyn(
264 acpi_spec::mcfg::MCFG_REVISION,
265 None,
266 &acpi_spec::mcfg::McfgHeader::new(),
267 &[mcfg_extra.as_slice()],
268 ))
269 }
270
271 fn with_pptt<F, R>(&self, f: F) -> R
272 where
273 F: FnOnce(&acpi::builder::Table<'_>) -> R,
274 {
275 use acpi_spec::pptt;
276
277 let cache = self.cache_topology.expect("cache topology is required");
278
279 let current_offset =
280 |pptt_extra: &[u8]| (size_of::<acpi_spec::Header>() + pptt_extra.len()) as u32;
281
282 let cache_for = |pptt_extra: &mut Vec<u8>, level: u8, cache_type, next: Option<u32>| {
283 let descriptor = cache
284 .caches
285 .iter()
286 .find(|d| d.level == level && d.cache_type == cache_type)?;
287 let offset = current_offset(pptt_extra);
288 pptt_extra.extend_from_slice(
289 pptt::PpttCache {
290 flags: u32::from(
291 pptt::PpttCacheFlags::new()
292 .with_size_valid(true)
293 .with_associativity_valid(true)
294 .with_cache_type_valid(true)
295 .with_line_size_valid(true),
296 )
297 .into(),
298 size: descriptor.size.into(),
299 associativity: descriptor.associativity.unwrap_or(0) as u8,
300 attributes: pptt::PpttCacheAttributes::new().with_cache_type(match descriptor
301 .cache_type
302 {
303 cache_topology::CacheType::Data => pptt::PPTT_CACHE_TYPE_DATA,
304 cache_topology::CacheType::Instruction => pptt::PPTT_CACHE_TYPE_INSTRUCTION,
305 cache_topology::CacheType::Unified => pptt::PPTT_CACHE_TYPE_UNIFIED,
306 }),
307 line_size: (descriptor.line_size as u16).into(),
308 next_level: next.unwrap_or(0).into(),
309 ..pptt::PpttCache::new()
310 }
311 .as_bytes(),
312 );
313 Some(offset)
314 };
315
316 let mut pptt_extra = Vec::new();
317 let mut sockets = BTreeMap::new();
318 let smt_enabled = self.processor_topology.smt_enabled();
319
320 for vp in self.processor_topology.vps() {
321 let acpi_processor_id = vp.vp_index.index() + 1;
322 let info = self.processor_topology.vp_topology(vp.vp_index);
323
324 let &mut (socket_offset, ref mut cores) =
325 sockets.entry(info.socket).or_insert_with(|| {
326 let l3 =
327 cache_for(&mut pptt_extra, 3, cache_topology::CacheType::Unified, None);
328 let socket_offset = current_offset(&pptt_extra);
329 pptt_extra.extend_from_slice(
330 pptt::PpttProcessor {
331 flags: u32::from(
332 pptt::PpttProcessorFlags::new().with_physical_package(true),
333 )
334 .into(),
335 ..pptt::PpttProcessor::new(l3.is_some() as u8)
336 }
337 .as_bytes(),
338 );
339
340 if let Some(l3) = l3 {
341 pptt_extra.extend_from_slice(&l3.to_ne_bytes());
342 }
343
344 (socket_offset, BTreeMap::new())
345 });
346
347 let core_offset = *cores.entry(info.core).or_insert_with(|| {
348 let l2 = cache_for(&mut pptt_extra, 2, cache_topology::CacheType::Unified, None);
349 let l1i = cache_for(
350 &mut pptt_extra,
351 1,
352 cache_topology::CacheType::Instruction,
353 l2,
354 );
355 let l1d = cache_for(&mut pptt_extra, 1, cache_topology::CacheType::Data, l2);
356
357 let core_offset = current_offset(&pptt_extra);
358 pptt_extra.extend_from_slice(
359 pptt::PpttProcessor {
360 flags: u32::from(
361 pptt::PpttProcessorFlags::new()
362 .with_acpi_processor_uid_valid(!smt_enabled),
363 )
364 .into(),
365 acpi_processor_id: if !smt_enabled {
366 acpi_processor_id.into()
367 } else {
368 0u32.into()
369 },
370 parent: socket_offset.into(),
371 ..pptt::PpttProcessor::new(l1i.is_some() as u8 + l1d.is_some() as u8)
372 }
373 .as_bytes(),
374 );
375
376 if let Some(l1) = l1i {
377 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
378 }
379 if let Some(l1) = l1d {
380 pptt_extra.extend_from_slice(&l1.to_ne_bytes());
381 }
382
383 core_offset
384 });
385
386 if smt_enabled {
387 pptt_extra.extend_from_slice(
388 pptt::PpttProcessor {
389 flags: u32::from(
390 pptt::PpttProcessorFlags::new().with_acpi_processor_uid_valid(true),
391 )
392 .into(),
393 acpi_processor_id: acpi_processor_id.into(),
394 parent: core_offset.into(),
395 ..pptt::PpttProcessor::new(0)
396 }
397 .as_bytes(),
398 )
399 }
400 }
401
402 (f)(&acpi::builder::Table::new_dyn(
403 1,
404 None,
405 &pptt::Pptt {},
406 &[pptt_extra.as_slice()],
407 ))
408 }
409
410 pub fn build_acpi_tables<F>(&self, gpa: u64, add_devices_to_dsdt: F) -> BuiltAcpiTables
416 where
417 F: FnOnce(&MemoryLayout, &mut dsdt::Dsdt),
418 {
419 let mut dsdt_data = dsdt::Dsdt::new();
420 dsdt_data.add_object(&dsdt::NamedObject::new(
422 b"\\_S0",
423 &dsdt::Package(vec![0, 0]),
424 ));
425 dsdt_data.add_object(&dsdt::NamedObject::new(
427 b"\\_S5",
428 &dsdt::Package(vec![0, 0]),
429 ));
430 add_devices_to_dsdt(self.mem_layout, &mut dsdt_data);
432 for proc_index in 1..self.processor_topology.vp_count() + 1 {
435 let c = (b'P' + (proc_index / 1000) as u8) as char;
438 let name = &format!("{c}{:03}", proc_index % 1000);
439 let mut proc = dsdt::Device::new(name.as_bytes());
440 proc.add_object(&dsdt::NamedString::new(b"_HID", b"ACPI0007"));
441 proc.add_object(&dsdt::NamedInteger::new(b"_UID", proc_index as u64));
442 let mut method = dsdt::Method::new(b"_STA");
443 method.add_operation(&dsdt::ReturnOp {
444 result: dsdt::encode_integer(0xf),
445 });
446 proc.add_object(&method);
447 dsdt_data.add_object(&proc);
448 }
449
450 self.build_acpi_tables_inner(gpa, &dsdt_data.to_bytes())
451 }
452
453 pub fn build_acpi_tables_custom_dsdt(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
459 self.build_acpi_tables_inner(gpa, dsdt)
460 }
461
462 fn build_acpi_tables_inner(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
463 let mut b = acpi::builder::Builder::new(gpa + 0x1000, OEM_INFO);
464
465 let dsdt = b.append_raw(dsdt);
466
467 b.append(&acpi::builder::Table::new(
468 6,
469 None,
470 &acpi_spec::fadt::Fadt {
471 flags: acpi_spec::fadt::FADT_WBINVD
472 | acpi_spec::fadt::FADT_PROC_C1
473 | acpi_spec::fadt::FADT_PWR_BUTTON
474 | acpi_spec::fadt::FADT_SLP_BUTTON
475 | acpi_spec::fadt::FADT_RTC_S4
476 | acpi_spec::fadt::FADT_TMR_VAL_EXT
477 | acpi_spec::fadt::FADT_RESET_REG_SUP
478 | acpi_spec::fadt::FADT_USE_PLATFORM_CLOCK,
479 x_dsdt: dsdt,
480 sci_int: self.acpi_irq as u16,
481 p_lvl2_lat: 101, p_lvl3_lat: 1001, pm1_evt_len: 4,
484 x_pm1a_evt_blk: GenericAddress {
485 addr_space_id: AddressSpaceId::SystemIo,
486 register_bit_width: 32,
487 register_bit_offset: 0,
488 access_size: AddressWidth::Word,
489 address: (self.pm_base + chipset::pm::DynReg::STATUS.0 as u16).into(),
490 },
491 pm1_cnt_len: 2,
492 x_pm1a_cnt_blk: GenericAddress {
493 addr_space_id: AddressSpaceId::SystemIo,
494 register_bit_width: 16,
495 register_bit_offset: 0,
496 access_size: AddressWidth::Word,
497 address: (self.pm_base + chipset::pm::DynReg::CONTROL.0 as u16).into(),
498 },
499 gpe0_blk_len: 4,
500 x_gpe0_blk: GenericAddress {
501 addr_space_id: AddressSpaceId::SystemIo,
502 register_bit_width: 32,
503 register_bit_offset: 0,
504 access_size: AddressWidth::Word,
505 address: (self.pm_base + chipset::pm::DynReg::GEN_PURPOSE_STATUS.0 as u16)
506 .into(),
507 },
508 reset_reg: GenericAddress {
509 addr_space_id: AddressSpaceId::SystemIo,
510 register_bit_width: 8,
511 register_bit_offset: 0,
512 access_size: AddressWidth::Byte,
513 address: (self.pm_base + chipset::pm::DynReg::RESET.0 as u16).into(),
514 },
515 reset_value: chipset::pm::RESET_VALUE,
516 pm_tmr_len: 4,
517 x_pm_tmr_blk: GenericAddress {
518 addr_space_id: AddressSpaceId::SystemIo,
519 register_bit_width: 32,
520 register_bit_offset: 0,
521 access_size: AddressWidth::Dword,
522 address: (self.pm_base + chipset::pm::DynReg::TIMER.0 as u16).into(),
523 },
524 ..Default::default()
525 },
526 ));
527
528 if self.with_psp {
529 use acpi_spec::aspt;
530 use acpi_spec::aspt::Aspt;
531 use acpi_spec::aspt::AsptStructHeader;
532
533 b.append(&acpi::builder::Table::new_dyn(
534 1,
535 None,
536 &Aspt { num_structs: 3 },
537 &[
538 AsptStructHeader::new::<aspt::structs::AspGlobalRegisters>().as_bytes(),
540 aspt::structs::AspGlobalRegisters {
541 _reserved: 0,
542 feature_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::FEATURE,
543 interrupt_enable_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::INT_EN,
544 interrupt_status_register_address: psp::PSP_MMIO_ADDRESS
545 + psp::reg::INT_STS,
546 }
547 .as_bytes(),
548 AsptStructHeader::new::<aspt::structs::SevMailboxRegisters>().as_bytes(),
550 aspt::structs::SevMailboxRegisters {
551 mailbox_interrupt_id: 1,
552 _reserved: [0; 3],
553 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::CMD_RESP,
554 cmd_buf_addr_lo_register_address: psp::PSP_MMIO_ADDRESS
555 + psp::reg::CMD_BUF_ADDR_LO,
556 cmd_buf_addr_hi_register_address: psp::PSP_MMIO_ADDRESS
557 + psp::reg::CMD_BUF_ADDR_HI,
558 }
559 .as_bytes(),
560 AsptStructHeader::new::<aspt::structs::AcpiMailboxRegisters>().as_bytes(),
562 aspt::structs::AcpiMailboxRegisters {
563 _reserved1: 0,
564 cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::ACPI_CMD_RESP,
565 _reserved2: [0; 2],
566 }
567 .as_bytes(),
568 ],
569 ));
570 }
571
572 self.with_madt(|t| b.append(t));
573 self.with_srat(|t| b.append(t));
574 if !self.pcie_host_bridges.is_empty() {
575 self.with_mcfg(|t| b.append(t));
576 }
577 if self.cache_topology.is_some() {
578 self.with_pptt(|t| b.append(t));
579 }
580
581 let (rdsp, tables) = b.build();
582
583 BuiltAcpiTables { rdsp, tables }
584 }
585
586 pub fn build_madt(&self) -> Vec<u8> {
589 self.with_madt(|t| t.to_vec(&OEM_INFO))
590 }
591
592 pub fn build_srat(&self) -> Vec<u8> {
595 self.with_srat(|t| t.to_vec(&OEM_INFO))
596 }
597
598 pub fn build_mcfg(&self) -> Vec<u8> {
601 self.with_mcfg(|t| t.to_vec(&OEM_INFO))
602 }
603
604 pub fn build_pptt(&self) -> Vec<u8> {
610 self.with_pptt(|t| t.to_vec(&OEM_INFO))
611 }
612}
613
614#[cfg(test)]
615mod test {
616 use super::*;
617 use acpi_spec::madt::MadtParser;
618 use acpi_spec::mcfg::parse_mcfg;
619 use memory_range::MemoryRange;
620 use virt::VpIndex;
621 use virt::VpInfo;
622 use vm_topology::processor::TopologyBuilder;
623 use vm_topology::processor::x86::X86VpInfo;
624
625 const KB: u64 = 1024;
626 const MB: u64 = 1024 * KB;
627 const GB: u64 = 1024 * MB;
628 const TB: u64 = 1024 * GB;
629
630 const MMIO: [MemoryRange; 2] = [
631 MemoryRange::new(GB..2 * GB),
632 MemoryRange::new(3 * GB..4 * GB),
633 ];
634
635 fn new_mem() -> MemoryLayout {
636 MemoryLayout::new(TB, &MMIO, None).unwrap()
637 }
638
639 fn new_builder<'a>(
640 mem_layout: &'a MemoryLayout,
641 processor_topology: &'a ProcessorTopology<X86Topology>,
642 pcie_host_bridges: &'a Vec<PcieHostBridge>,
643 ) -> AcpiTablesBuilder<'a, X86Topology> {
644 AcpiTablesBuilder {
645 processor_topology,
646 mem_layout,
647 cache_topology: None,
648 pcie_host_bridges,
649 with_ioapic: true,
650 with_pic: false,
651 with_pit: false,
652 with_psp: false,
653 pm_base: 1234,
654 acpi_irq: 2,
655 }
656 }
657
658 #[test]
660 fn test_basic_madt_cpu() {
661 let mem = new_mem();
662 let topology = TopologyBuilder::new_x86().build(16).unwrap();
663 let pcie = vec![];
664 let builder = new_builder(&mem, &topology, &pcie);
665 let madt = builder.build_madt();
666
667 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
668 assert_eq!(entries, (0..16).map(Some).collect::<Vec<_>>());
669
670 let topology = TopologyBuilder::new_x86()
671 .apic_id_offset(13)
672 .build(16)
673 .unwrap();
674 let builder = new_builder(&mem, &topology, &pcie);
675 let madt = builder.build_madt();
676
677 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
678 assert_eq!(entries, (13..29).map(Some).collect::<Vec<_>>());
679
680 let apic_ids = [12, 58, 4823, 36];
681 let topology = TopologyBuilder::new_x86()
682 .build_with_vp_info(apic_ids.iter().enumerate().map(|(uid, apic)| X86VpInfo {
683 base: VpInfo {
684 vp_index: VpIndex::new(uid as u32),
685 vnode: 0,
686 },
687 apic_id: *apic,
688 }))
689 .unwrap();
690 let builder = new_builder(&mem, &topology, &pcie);
691 let madt = builder.build_madt();
692
693 let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
694 assert_eq!(
695 entries,
696 apic_ids.iter().map(|e| Some(*e)).collect::<Vec<_>>()
697 );
698 }
699
700 #[test]
701 fn test_basic_pcie_topology() {
702 let mem = new_mem();
703 let topology = TopologyBuilder::new_x86().build(16).unwrap();
704 let pcie_host_bridges = vec![
705 PcieHostBridge {
706 index: 0,
707 segment: 0,
708 start_bus: 0,
709 end_bus: 255,
710 ecam_range: MemoryRange::new(0..256 * 256 * 4096),
711 low_mmio: MemoryRange::new(0..0),
712 high_mmio: MemoryRange::new(0..0),
713 },
714 PcieHostBridge {
715 index: 1,
716 segment: 1,
717 start_bus: 32,
718 end_bus: 63,
719 ecam_range: MemoryRange::new(5 * GB..5 * GB + 32 * 256 * 4096),
720 low_mmio: MemoryRange::new(0..0),
721 high_mmio: MemoryRange::new(0..0),
722 },
723 ];
724
725 let builder = new_builder(&mem, &topology, &pcie_host_bridges);
726 let mcfg = builder.build_mcfg();
727
728 let mut i = 0;
729 let _ = parse_mcfg(&mcfg, |sbr| match i {
730 0 => {
731 assert_eq!(sbr.ecam_base, 0);
732 assert_eq!(sbr.segment, 0);
733 assert_eq!(sbr.start_bus, 0);
734 assert_eq!(sbr.end_bus, 255);
735 i += 1;
736 }
737 1 => {
738 assert_eq!(sbr.ecam_base, 5 * GB - 32 * 256 * 4096);
739 assert_eq!(sbr.segment, 1);
740 assert_eq!(sbr.start_bus, 32);
741 assert_eq!(sbr.end_bus, 63);
742 i += 1;
743 }
744 _ => panic!("only expected two MCFG segment bus range entries"),
745 })
746 .unwrap();
747 }
748}