vmm_core/
acpi_builder.rs

1// Copyright (c) Microsoft Corporation.
2// Licensed under the MIT License.
3
4//! Construct ACPI tables for a concrete VM topology
5
6// TODO: continue to remove these hardcoded deps
7use acpi::dsdt;
8use acpi::ssdt::Ssdt;
9use acpi_spec::madt::InterruptPolarity;
10use acpi_spec::madt::InterruptTriggerMode;
11use cache_topology::CacheTopology;
12use chipset::ioapic;
13use chipset::psp;
14use inspect::Inspect;
15use std::collections::BTreeMap;
16use vm_topology::memory::MemoryLayout;
17use vm_topology::pcie::PcieHostBridge;
18use vm_topology::processor::ArchTopology;
19use vm_topology::processor::ProcessorTopology;
20use vm_topology::processor::aarch64::Aarch64Topology;
21use vm_topology::processor::x86::X86Topology;
22use x86defs::apic::APIC_BASE_ADDRESS;
23use zerocopy::IntoBytes;
24
25/// Binary ACPI tables constructed by [`AcpiTablesBuilder`].
26pub struct BuiltAcpiTables {
27    /// The RDSP. Assumed to be given a whole page.
28    pub rdsp: Vec<u8>,
29    /// The remaining tables pointed to by the RDSP.
30    pub tables: Vec<u8>,
31}
32
33/// Builder to construct a set of [`BuiltAcpiTables`]
34pub struct AcpiTablesBuilder<'a, T: AcpiTopology> {
35    /// The processor topology.
36    ///
37    /// It is assumed that the MADT processor UID should start at 1 and enumerate each
38    /// of these APIC IDs in turn.
39    pub processor_topology: &'a ProcessorTopology<T>,
40    /// The memory layout of the VM.
41    pub mem_layout: &'a MemoryLayout,
42    /// The cache topology of the VM.
43    ///
44    /// If and only if this is set, then the PPTT table will be generated.
45    pub cache_topology: Option<&'a CacheTopology>,
46    /// The PCIe topology.
47    ///
48    /// If and only if this has root complexes, then an MCFG will be generated.
49    pub pcie_host_bridges: &'a Vec<PcieHostBridge>,
50    /// Architecture-specific ACPI configuration.
51    pub arch: AcpiArchConfig,
52}
53
54/// Architecture-specific ACPI configuration carried by [`AcpiTablesBuilder`].
55pub enum AcpiArchConfig {
56    /// x86-specific settings (IOAPIC, PIC, PIT, PSP, PM base, SCI IRQ).
57    X86 {
58        /// If an IOAPIC is present.
59        with_ioapic: bool,
60        /// If a PIC is present.
61        with_pic: bool,
62        /// If a PIT is present.
63        with_pit: bool,
64        /// If a PSP is present.
65        with_psp: bool,
66        /// Base address of dynamic power management device registers.
67        pm_base: u16,
68        /// ACPI IRQ number.
69        acpi_irq: u32,
70    },
71    /// ARM64-specific settings (HW_REDUCED_ACPI FADT).
72    Aarch64 {
73        /// Hypervisor vendor identity for the FADT.
74        /// Zero when not running under a hypervisor.
75        hypervisor_vendor_identity: u64,
76        /// Virtual timer PPI (GIC INTID).
77        virt_timer_ppi: u32,
78    },
79}
80
81pub const OEM_INFO: acpi::builder::OemInfo = acpi::builder::OemInfo {
82    oem_id: *b"HVLITE",
83    oem_tableid: *b"HVLITETB",
84    oem_revision: 0,
85    creator_id: *b"MSHV",
86    creator_revision: 0,
87};
88
89pub trait AcpiTopology: ArchTopology + Inspect + Sized {
90    fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>);
91    fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>);
92}
93
94/// The maximum ID that can be used for a legacy APIC ID in an ACPI table.
95/// Anything bigger than this must use the x2apic format.
96///
97/// This isn't 0xff because that's the broadcast ID.
98const MAX_LEGACY_APIC_ID: u32 = 0xfe;
99
100impl AcpiTopology for X86Topology {
101    fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
102        for vp in topology.vps_arch() {
103            if vp.apic_id <= MAX_LEGACY_APIC_ID {
104                srat.extend_from_slice(
105                    acpi_spec::srat::SratApic::new(vp.apic_id as u8, vp.base.vnode).as_bytes(),
106                );
107            } else {
108                srat.extend_from_slice(
109                    acpi_spec::srat::SratX2Apic::new(vp.apic_id, vp.base.vnode).as_bytes(),
110                );
111            }
112        }
113    }
114
115    fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
116        // Add LINT1 as the local NMI source
117        madt.extend_from_slice(acpi_spec::madt::MadtLocalNmiSource::new().as_bytes());
118
119        for vp in topology.vps_arch() {
120            let uid = vp.base.vp_index.index() + 1;
121            if vp.apic_id <= MAX_LEGACY_APIC_ID && uid <= u8::MAX.into() {
122                madt.extend_from_slice(
123                    acpi_spec::madt::MadtApic {
124                        apic_id: vp.apic_id as u8,
125                        acpi_processor_uid: uid as u8,
126                        flags: acpi_spec::madt::MADT_APIC_ENABLED,
127                        ..acpi_spec::madt::MadtApic::new()
128                    }
129                    .as_bytes(),
130                );
131            } else {
132                madt.extend_from_slice(
133                    acpi_spec::madt::MadtX2Apic {
134                        x2_apic_id: vp.apic_id,
135                        acpi_processor_uid: uid,
136                        flags: acpi_spec::madt::MADT_APIC_ENABLED,
137                        ..acpi_spec::madt::MadtX2Apic::new()
138                    }
139                    .as_bytes(),
140                );
141            }
142        }
143    }
144}
145
146impl AcpiTopology for Aarch64Topology {
147    fn extend_srat(topology: &ProcessorTopology<Self>, srat: &mut Vec<u8>) {
148        for vp in topology.vps_arch() {
149            srat.extend_from_slice(
150                acpi_spec::srat::SratGicc::new(vp.base.vp_index.index() + 1, vp.base.vnode)
151                    .as_bytes(),
152            );
153        }
154    }
155
156    fn extend_madt(topology: &ProcessorTopology<Self>, madt: &mut Vec<u8>) {
157        use vm_topology::processor::aarch64::GicVersion;
158
159        let gic_acpi_version: u8 = match topology.gic_version() {
160            GicVersion::V2 { .. } => 2,
161            GicVersion::V3 { .. } => 3,
162        };
163
164        madt.extend_from_slice(
165            acpi_spec::madt::MadtGicd::new(0, topology.gic_distributor_base(), gic_acpi_version)
166                .as_bytes(),
167        );
168        for vp in topology.vps_arch() {
169            let uid = vp.base.vp_index.index() + 1;
170
171            // ACPI specifies that just the MPIDR affinity fields should be included.
172            let mpidr = u64::from(vp.mpidr) & u64::from(aarch64defs::MpidrEl1::AFFINITY_MASK);
173
174            let mut gicc = acpi_spec::madt::MadtGicc::new(uid, mpidr);
175
176            if let Some(gicr) = vp.gicr {
177                gicc.gicr_base_address = gicr.into();
178            }
179
180            if let GicVersion::V2 { cpu_interface_base } = topology.gic_version() {
181                gicc.base_address = cpu_interface_base.into();
182            }
183
184            if let Some(pmu_gsiv) = topology.pmu_gsiv() {
185                gicc.performance_monitoring_gsiv = pmu_gsiv.into();
186            }
187            madt.extend_from_slice(gicc.as_bytes());
188        }
189
190        // GIC v2m MSI frame for PCIe MSI support.
191        if let Some(v2m) = topology.gic_v2m() {
192            madt.extend_from_slice(
193                acpi_spec::madt::MadtGicMsiFrame::new(
194                    0,
195                    v2m.frame_base,
196                    v2m.spi_base as u16,
197                    v2m.spi_count as u16,
198                )
199                .as_bytes(),
200            );
201        }
202    }
203}
204
205impl<T: AcpiTopology> AcpiTablesBuilder<'_, T> {
206    fn with_srat<F, R>(&self, f: F) -> R
207    where
208        F: FnOnce(&acpi::builder::Table<'_>) -> R,
209    {
210        let mut srat_extra: Vec<u8> = Vec::new();
211        T::extend_srat(self.processor_topology, &mut srat_extra);
212        for range in self.mem_layout.ram() {
213            srat_extra.extend_from_slice(
214                acpi_spec::srat::SratMemory::new(
215                    range.range.start(),
216                    range.range.len(),
217                    range.vnode,
218                )
219                .as_bytes(),
220            );
221        }
222
223        (f)(&acpi::builder::Table::new_dyn(
224            acpi_spec::srat::SRAT_REVISION,
225            None,
226            &acpi_spec::srat::SratHeader::new(),
227            &[srat_extra.as_slice()],
228        ))
229    }
230
231    fn with_madt<F, R>(&self, f: F) -> R
232    where
233        F: FnOnce(&acpi::builder::Table<'_>) -> R,
234    {
235        let mut madt_extra: Vec<u8> = Vec::new();
236
237        if let AcpiArchConfig::X86 {
238            with_ioapic,
239            acpi_irq,
240            with_pit,
241            ..
242        } = self.arch
243        {
244            if with_ioapic {
245                madt_extra.extend_from_slice(
246                    acpi_spec::madt::MadtIoApic {
247                        io_apic_id: 0,
248                        io_apic_address: ioapic::IOAPIC_DEVICE_MMIO_REGION_BASE_ADDRESS as u32,
249                        ..acpi_spec::madt::MadtIoApic::new()
250                    }
251                    .as_bytes(),
252                );
253            }
254
255            // Add override for ACPI interrupt to be level triggered, active high.
256            madt_extra.extend_from_slice(
257                acpi_spec::madt::MadtInterruptSourceOverride::new(
258                    acpi_irq.try_into().expect("should be in range"),
259                    acpi_irq,
260                    Some(InterruptPolarity::ActiveHigh),
261                    Some(InterruptTriggerMode::Level),
262                )
263                .as_bytes(),
264            );
265
266            if with_pit {
267                // IO-APIC IRQ0 is interrupt 2, which the PIT is attached to.
268                madt_extra.extend_from_slice(
269                    acpi_spec::madt::MadtInterruptSourceOverride::new(0, 2, None, None).as_bytes(),
270                );
271            }
272        }
273
274        T::extend_madt(self.processor_topology, &mut madt_extra);
275
276        let (apic_addr, flags) = match self.arch {
277            AcpiArchConfig::X86 { with_pic, .. } => (
278                APIC_BASE_ADDRESS,
279                if with_pic {
280                    acpi_spec::madt::MADT_PCAT_COMPAT
281                } else {
282                    0
283                },
284            ),
285            AcpiArchConfig::Aarch64 { .. } => (0u32, 0u32),
286        };
287
288        (f)(&acpi::builder::Table::new_dyn(
289            5,
290            None,
291            &acpi_spec::madt::Madt { apic_addr, flags },
292            &[madt_extra.as_slice()],
293        ))
294    }
295
296    fn with_mcfg<F, R>(&self, f: F) -> R
297    where
298        F: FnOnce(&acpi::builder::Table<'_>) -> R,
299    {
300        let mut mcfg_extra: Vec<u8> = Vec::new();
301        for bridge in self.pcie_host_bridges {
302            // Note: The topology representation of the host bridge reflects
303            // the actual MMIO region regardless of starting bus number, but the
304            // address reported in the MCFG table must reflect wherever bus number
305            // 0 would be accessible even if the host bridge has a different starting
306            // bus number.
307            let ecam_region_offset = (bridge.start_bus as u64) * 256 * 4096;
308            mcfg_extra.extend_from_slice(
309                acpi_spec::mcfg::McfgSegmentBusRange::new(
310                    bridge.ecam_range.start() - ecam_region_offset,
311                    bridge.segment,
312                    bridge.start_bus,
313                    bridge.end_bus,
314                )
315                .as_bytes(),
316            )
317        }
318
319        (f)(&acpi::builder::Table::new_dyn(
320            acpi_spec::mcfg::MCFG_REVISION,
321            None,
322            &acpi_spec::mcfg::McfgHeader::new(),
323            &[mcfg_extra.as_slice()],
324        ))
325    }
326
327    fn with_pptt<F, R>(&self, f: F) -> R
328    where
329        F: FnOnce(&acpi::builder::Table<'_>) -> R,
330    {
331        use acpi_spec::pptt;
332
333        let cache = self.cache_topology.expect("cache topology is required");
334
335        let current_offset =
336            |pptt_extra: &[u8]| (size_of::<acpi_spec::Header>() + pptt_extra.len()) as u32;
337
338        let cache_for = |pptt_extra: &mut Vec<u8>, level: u8, cache_type, next: Option<u32>| {
339            let descriptor = cache
340                .caches
341                .iter()
342                .find(|d| d.level == level && d.cache_type == cache_type)?;
343            let offset = current_offset(pptt_extra);
344            pptt_extra.extend_from_slice(
345                pptt::PpttCache {
346                    flags: u32::from(
347                        pptt::PpttCacheFlags::new()
348                            .with_size_valid(true)
349                            .with_associativity_valid(true)
350                            .with_cache_type_valid(true)
351                            .with_line_size_valid(true),
352                    )
353                    .into(),
354                    size: descriptor.size.into(),
355                    associativity: descriptor.associativity.unwrap_or(0) as u8,
356                    attributes: pptt::PpttCacheAttributes::new().with_cache_type(match descriptor
357                        .cache_type
358                    {
359                        cache_topology::CacheType::Data => pptt::PPTT_CACHE_TYPE_DATA,
360                        cache_topology::CacheType::Instruction => pptt::PPTT_CACHE_TYPE_INSTRUCTION,
361                        cache_topology::CacheType::Unified => pptt::PPTT_CACHE_TYPE_UNIFIED,
362                    }),
363                    line_size: (descriptor.line_size as u16).into(),
364                    next_level: next.unwrap_or(0).into(),
365                    ..pptt::PpttCache::new()
366                }
367                .as_bytes(),
368            );
369            Some(offset)
370        };
371
372        let mut pptt_extra = Vec::new();
373        let mut sockets = BTreeMap::new();
374        let smt_enabled = self.processor_topology.smt_enabled();
375
376        for vp in self.processor_topology.vps() {
377            let acpi_processor_id = vp.vp_index.index() + 1;
378            let info = self.processor_topology.vp_topology(vp.vp_index);
379
380            let &mut (socket_offset, ref mut cores) =
381                sockets.entry(info.socket).or_insert_with(|| {
382                    let l3 =
383                        cache_for(&mut pptt_extra, 3, cache_topology::CacheType::Unified, None);
384                    let socket_offset = current_offset(&pptt_extra);
385                    pptt_extra.extend_from_slice(
386                        pptt::PpttProcessor {
387                            flags: u32::from(
388                                pptt::PpttProcessorFlags::new().with_physical_package(true),
389                            )
390                            .into(),
391                            ..pptt::PpttProcessor::new(l3.is_some() as u8)
392                        }
393                        .as_bytes(),
394                    );
395
396                    if let Some(l3) = l3 {
397                        pptt_extra.extend_from_slice(&l3.to_ne_bytes());
398                    }
399
400                    (socket_offset, BTreeMap::new())
401                });
402
403            let core_offset = *cores.entry(info.core).or_insert_with(|| {
404                let l2 = cache_for(&mut pptt_extra, 2, cache_topology::CacheType::Unified, None);
405                let l1i = cache_for(
406                    &mut pptt_extra,
407                    1,
408                    cache_topology::CacheType::Instruction,
409                    l2,
410                );
411                let l1d = cache_for(&mut pptt_extra, 1, cache_topology::CacheType::Data, l2);
412
413                let core_offset = current_offset(&pptt_extra);
414                pptt_extra.extend_from_slice(
415                    pptt::PpttProcessor {
416                        flags: u32::from(
417                            pptt::PpttProcessorFlags::new()
418                                .with_acpi_processor_uid_valid(!smt_enabled),
419                        )
420                        .into(),
421                        acpi_processor_id: if !smt_enabled {
422                            acpi_processor_id.into()
423                        } else {
424                            0u32.into()
425                        },
426                        parent: socket_offset.into(),
427                        ..pptt::PpttProcessor::new(l1i.is_some() as u8 + l1d.is_some() as u8)
428                    }
429                    .as_bytes(),
430                );
431
432                if let Some(l1) = l1i {
433                    pptt_extra.extend_from_slice(&l1.to_ne_bytes());
434                }
435                if let Some(l1) = l1d {
436                    pptt_extra.extend_from_slice(&l1.to_ne_bytes());
437                }
438
439                core_offset
440            });
441
442            if smt_enabled {
443                pptt_extra.extend_from_slice(
444                    pptt::PpttProcessor {
445                        flags: u32::from(
446                            pptt::PpttProcessorFlags::new().with_acpi_processor_uid_valid(true),
447                        )
448                        .into(),
449                        acpi_processor_id: acpi_processor_id.into(),
450                        parent: core_offset.into(),
451                        ..pptt::PpttProcessor::new(0)
452                    }
453                    .as_bytes(),
454                )
455            }
456        }
457
458        (f)(&acpi::builder::Table::new_dyn(
459            1,
460            None,
461            &pptt::Pptt {},
462            &[pptt_extra.as_slice()],
463        ))
464    }
465
466    /// Build ACPI tables based on the supplied closure that adds devices to the DSDT.
467    ///
468    /// The RDSP is assumed to take one whole page.
469    ///
470    /// Returns tables that should be loaded at the supplied gpa.
471    pub fn build_acpi_tables<F>(&self, gpa: u64, add_devices_to_dsdt: F) -> BuiltAcpiTables
472    where
473        F: FnOnce(&MemoryLayout, &mut dsdt::Dsdt),
474    {
475        let mut dsdt_data = dsdt::Dsdt::new();
476        // Name(\_S0, Package(2){0, 0})
477        dsdt_data.add_object(&dsdt::NamedObject::new(
478            b"\\_S0",
479            &dsdt::Package(vec![0, 0]),
480        ));
481        // Name(\_S5, Package(2){0, 0})
482        dsdt_data.add_object(&dsdt::NamedObject::new(
483            b"\\_S5",
484            &dsdt::Package(vec![0, 0]),
485        ));
486        // Add any chipset devices.
487        add_devices_to_dsdt(self.mem_layout, &mut dsdt_data);
488        // Add processor devices:
489        // Device(P###) { Name(_HID, "ACPI0007") Name(_UID, #) Method(_STA, 0) { Return(0xF) } }
490        for proc_index in 1..self.processor_topology.vp_count() + 1 {
491            // To support more than 1000 processors, increment the first
492            // character of the device name beyond P999.
493            let c = (b'P' + (proc_index / 1000) as u8) as char;
494            let name = &format!("{c}{:03}", proc_index % 1000);
495            let mut proc = dsdt::Device::new(name.as_bytes());
496            proc.add_object(&dsdt::NamedString::new(b"_HID", b"ACPI0007"));
497            proc.add_object(&dsdt::NamedInteger::new(b"_UID", proc_index as u64));
498            let mut method = dsdt::Method::new(b"_STA");
499            method.add_operation(&dsdt::ReturnOp {
500                result: dsdt::encode_integer(0xf),
501            });
502            proc.add_object(&method);
503            dsdt_data.add_object(&proc);
504        }
505
506        self.build_acpi_tables_inner(gpa, &dsdt_data.to_bytes())
507    }
508
509    /// Build ACPI tables based on the supplied custom DSDT.
510    ///
511    /// The RDSP is assumed to take one whole page.
512    ///
513    /// Returns tables that should be loaded at the supplied gpa.
514    pub fn build_acpi_tables_custom_dsdt(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
515        self.build_acpi_tables_inner(gpa, dsdt)
516    }
517
518    fn build_acpi_tables_inner(&self, gpa: u64, dsdt: &[u8]) -> BuiltAcpiTables {
519        let mut b = acpi::builder::Builder::new(gpa + 0x1000, OEM_INFO);
520
521        let dsdt = b.append_raw(dsdt);
522
523        if let AcpiArchConfig::X86 {
524            pm_base, acpi_irq, ..
525        } = self.arch
526        {
527            use acpi_spec::fadt::AddressSpaceId;
528            use acpi_spec::fadt::AddressWidth;
529            use acpi_spec::fadt::GenericAddress;
530
531            b.append(&acpi::builder::Table::new(
532                6,
533                None,
534                &acpi_spec::fadt::Fadt {
535                    flags: acpi_spec::fadt::FADT_WBINVD
536                        | acpi_spec::fadt::FADT_PROC_C1
537                        | acpi_spec::fadt::FADT_PWR_BUTTON
538                        | acpi_spec::fadt::FADT_SLP_BUTTON
539                        | acpi_spec::fadt::FADT_RTC_S4
540                        | acpi_spec::fadt::FADT_TMR_VAL_EXT
541                        | acpi_spec::fadt::FADT_RESET_REG_SUP
542                        | acpi_spec::fadt::FADT_USE_PLATFORM_CLOCK,
543                    x_dsdt: dsdt,
544                    sci_int: acpi_irq as u16,
545                    p_lvl2_lat: 101,  // disable C2
546                    p_lvl3_lat: 1001, // disable C3
547                    pm1_evt_len: 4,
548                    x_pm1a_evt_blk: GenericAddress {
549                        addr_space_id: AddressSpaceId::SystemIo,
550                        register_bit_width: 32,
551                        register_bit_offset: 0,
552                        access_size: AddressWidth::Word,
553                        address: (pm_base + chipset::pm::DynReg::STATUS.0 as u16).into(),
554                    },
555                    pm1_cnt_len: 2,
556                    x_pm1a_cnt_blk: GenericAddress {
557                        addr_space_id: AddressSpaceId::SystemIo,
558                        register_bit_width: 16,
559                        register_bit_offset: 0,
560                        access_size: AddressWidth::Word,
561                        address: (pm_base + chipset::pm::DynReg::CONTROL.0 as u16).into(),
562                    },
563                    gpe0_blk_len: 4,
564                    x_gpe0_blk: GenericAddress {
565                        addr_space_id: AddressSpaceId::SystemIo,
566                        register_bit_width: 32,
567                        register_bit_offset: 0,
568                        access_size: AddressWidth::Word,
569                        address: (pm_base + chipset::pm::DynReg::GEN_PURPOSE_STATUS.0 as u16)
570                            .into(),
571                    },
572                    reset_reg: GenericAddress {
573                        addr_space_id: AddressSpaceId::SystemIo,
574                        register_bit_width: 8,
575                        register_bit_offset: 0,
576                        access_size: AddressWidth::Byte,
577                        address: (pm_base + chipset::pm::DynReg::RESET.0 as u16).into(),
578                    },
579                    reset_value: chipset::pm::RESET_VALUE,
580                    pm_tmr_len: 4,
581                    x_pm_tmr_blk: GenericAddress {
582                        addr_space_id: AddressSpaceId::SystemIo,
583                        register_bit_width: 32,
584                        register_bit_offset: 0,
585                        access_size: AddressWidth::Dword,
586                        address: (pm_base + chipset::pm::DynReg::TIMER.0 as u16).into(),
587                    },
588                    ..Default::default()
589                },
590            ));
591        }
592
593        if let AcpiArchConfig::Aarch64 {
594            hypervisor_vendor_identity,
595            ..
596        } = self.arch
597        {
598            b.append(&acpi::builder::Table::new(
599                6,
600                None,
601                &acpi_spec::fadt::Fadt {
602                    flags: acpi_spec::fadt::FADT_HW_REDUCED_ACPI,
603                    arm_boot_arch: 0x0003, // PSCI_COMPLIANT | PSCI_USE_HVC
604                    minor_version: 3,
605                    hypervisor_vendor_identity,
606                    x_dsdt: dsdt,
607                    ..Default::default()
608                },
609            ));
610        }
611
612        if let AcpiArchConfig::X86 { with_psp: true, .. } = self.arch {
613            use acpi_spec::aspt;
614            use acpi_spec::aspt::Aspt;
615            use acpi_spec::aspt::AsptStructHeader;
616
617            b.append(&acpi::builder::Table::new_dyn(
618                1,
619                None,
620                &Aspt { num_structs: 3 },
621                &[
622                    // AspGlobalRegisters
623                    AsptStructHeader::new::<aspt::structs::AspGlobalRegisters>().as_bytes(),
624                    aspt::structs::AspGlobalRegisters {
625                        _reserved: 0,
626                        feature_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::FEATURE,
627                        interrupt_enable_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::INT_EN,
628                        interrupt_status_register_address: psp::PSP_MMIO_ADDRESS
629                            + psp::reg::INT_STS,
630                    }
631                    .as_bytes(),
632                    // SevMailboxRegisters
633                    AsptStructHeader::new::<aspt::structs::SevMailboxRegisters>().as_bytes(),
634                    aspt::structs::SevMailboxRegisters {
635                        mailbox_interrupt_id: 1,
636                        _reserved: [0; 3],
637                        cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::CMD_RESP,
638                        cmd_buf_addr_lo_register_address: psp::PSP_MMIO_ADDRESS
639                            + psp::reg::CMD_BUF_ADDR_LO,
640                        cmd_buf_addr_hi_register_address: psp::PSP_MMIO_ADDRESS
641                            + psp::reg::CMD_BUF_ADDR_HI,
642                    }
643                    .as_bytes(),
644                    // AcpiMailboxRegisters
645                    AsptStructHeader::new::<aspt::structs::AcpiMailboxRegisters>().as_bytes(),
646                    aspt::structs::AcpiMailboxRegisters {
647                        _reserved1: 0,
648                        cmd_resp_register_address: psp::PSP_MMIO_ADDRESS + psp::reg::ACPI_CMD_RESP,
649                        _reserved2: [0; 2],
650                    }
651                    .as_bytes(),
652                ],
653            ));
654        }
655
656        self.with_madt(|t| b.append(t));
657        self.with_srat(|t| b.append(t));
658        if !self.pcie_host_bridges.is_empty() {
659            self.with_mcfg(|t| b.append(t));
660
661            let mut ssdt = Ssdt::new();
662            for bridge in self.pcie_host_bridges {
663                ssdt.add_pcie(
664                    bridge.index,
665                    bridge.segment,
666                    bridge.start_bus,
667                    bridge.end_bus,
668                    bridge.ecam_range,
669                    bridge.low_mmio,
670                    bridge.high_mmio,
671                );
672            }
673            b.append_raw(&ssdt.to_bytes());
674        }
675
676        if self.cache_topology.is_some() {
677            self.with_pptt(|t| b.append(t));
678        }
679
680        if matches!(self.arch, AcpiArchConfig::Aarch64 { .. }) {
681            self.with_gtdt(|t| b.append(t));
682        }
683
684        let (rdsp, tables) = b.build();
685
686        BuiltAcpiTables { rdsp, tables }
687    }
688
689    /// Helper method to construct an MADT without constructing the rest of
690    /// the ACPI tables.
691    pub fn build_madt(&self) -> Vec<u8> {
692        self.with_madt(|t| t.to_vec(&OEM_INFO))
693    }
694
695    /// Helper method to construct an SRAT without constructing the rest of
696    /// the ACPI tables.
697    pub fn build_srat(&self) -> Vec<u8> {
698        self.with_srat(|t| t.to_vec(&OEM_INFO))
699    }
700
701    /// Helper method to construct a MCFG without constructing the rest of the
702    /// ACPI tables.
703    pub fn build_mcfg(&self) -> Vec<u8> {
704        self.with_mcfg(|t| t.to_vec(&OEM_INFO))
705    }
706
707    /// Helper method to construct a PPTT without constructing the rest of the
708    /// ACPI tables.
709    ///
710    /// # Panics
711    /// Panics if `self.cache_topology` is not set.
712    pub fn build_pptt(&self) -> Vec<u8> {
713        self.with_pptt(|t| t.to_vec(&OEM_INFO))
714    }
715
716    fn with_gtdt<R>(&self, f: impl FnOnce(&acpi::builder::Table<'_>) -> R) -> R {
717        let virt_timer_ppi = if let AcpiArchConfig::Aarch64 { virt_timer_ppi, .. } = self.arch {
718            virt_timer_ppi
719        } else {
720            0
721        };
722        (f)(&acpi::builder::Table::new(
723            3,
724            None,
725            &acpi_spec::gtdt::Gtdt {
726                cnt_control_base: 0xFFFF_FFFF_FFFF_FFFF,
727                virtual_el1_timer_gsiv: virt_timer_ppi,
728                virtual_el1_timer_flags: acpi_spec::gtdt::GTDT_TIMER_ACTIVE_LOW,
729                cnt_read_base: 0xFFFF_FFFF_FFFF_FFFF,
730                ..Default::default()
731            },
732        ))
733    }
734
735    pub fn build_gtdt(&self) -> Vec<u8> {
736        self.with_gtdt(|t| t.to_vec(&OEM_INFO))
737    }
738}
739
740#[cfg(test)]
741mod test {
742    use super::*;
743    use acpi_spec::madt::MadtParser;
744    use acpi_spec::mcfg::parse_mcfg;
745    use memory_range::MemoryRange;
746    use virt::VpIndex;
747    use virt::VpInfo;
748    use vm_topology::processor::TopologyBuilder;
749    use vm_topology::processor::x86::X86VpInfo;
750
751    const KB: u64 = 1024;
752    const MB: u64 = 1024 * KB;
753    const GB: u64 = 1024 * MB;
754    const TB: u64 = 1024 * GB;
755
756    const MMIO: [MemoryRange; 2] = [
757        MemoryRange::new(GB..2 * GB),
758        MemoryRange::new(3 * GB..4 * GB),
759    ];
760
761    fn new_mem() -> MemoryLayout {
762        MemoryLayout::new(TB, &MMIO, &[], &[], None).unwrap()
763    }
764
765    fn new_builder<'a>(
766        mem_layout: &'a MemoryLayout,
767        processor_topology: &'a ProcessorTopology<X86Topology>,
768        pcie_host_bridges: &'a Vec<PcieHostBridge>,
769    ) -> AcpiTablesBuilder<'a, X86Topology> {
770        AcpiTablesBuilder {
771            processor_topology,
772            mem_layout,
773            cache_topology: None,
774            pcie_host_bridges,
775            arch: AcpiArchConfig::X86 {
776                with_ioapic: true,
777                with_pic: false,
778                with_pit: false,
779                with_psp: false,
780                pm_base: 1234,
781                acpi_irq: 2,
782            },
783        }
784    }
785
786    // TODO: might be useful to test ioapic, pic, etc
787    #[test]
788    fn test_basic_madt_cpu() {
789        let mem = new_mem();
790        let topology = TopologyBuilder::new_x86().build(16).unwrap();
791        let pcie = vec![];
792        let builder = new_builder(&mem, &topology, &pcie);
793        let madt = builder.build_madt();
794
795        let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
796        assert_eq!(entries, (0..16).map(Some).collect::<Vec<_>>());
797
798        let topology = TopologyBuilder::new_x86()
799            .apic_id_offset(13)
800            .build(16)
801            .unwrap();
802        let builder = new_builder(&mem, &topology, &pcie);
803        let madt = builder.build_madt();
804
805        let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
806        assert_eq!(entries, (13..29).map(Some).collect::<Vec<_>>());
807
808        let apic_ids = [12, 58, 4823, 36];
809        let topology = TopologyBuilder::new_x86()
810            .build_with_vp_info(apic_ids.iter().enumerate().map(|(uid, apic)| X86VpInfo {
811                base: VpInfo {
812                    vp_index: VpIndex::new(uid as u32),
813                    vnode: 0,
814                },
815                apic_id: *apic,
816            }))
817            .unwrap();
818        let builder = new_builder(&mem, &topology, &pcie);
819        let madt = builder.build_madt();
820
821        let entries = MadtParser::new(&madt).unwrap().parse_apic_ids().unwrap();
822        assert_eq!(
823            entries,
824            apic_ids.iter().map(|e| Some(*e)).collect::<Vec<_>>()
825        );
826    }
827
828    #[test]
829    fn test_basic_pcie_topology() {
830        let mem = new_mem();
831        let topology = TopologyBuilder::new_x86().build(16).unwrap();
832        let pcie_host_bridges = vec![
833            PcieHostBridge {
834                index: 0,
835                segment: 0,
836                start_bus: 0,
837                end_bus: 255,
838                ecam_range: MemoryRange::new(0..256 * 256 * 4096),
839                low_mmio: MemoryRange::new(0..0),
840                high_mmio: MemoryRange::new(0..0),
841            },
842            PcieHostBridge {
843                index: 1,
844                segment: 1,
845                start_bus: 32,
846                end_bus: 63,
847                ecam_range: MemoryRange::new(5 * GB..5 * GB + 32 * 256 * 4096),
848                low_mmio: MemoryRange::new(0..0),
849                high_mmio: MemoryRange::new(0..0),
850            },
851        ];
852
853        let builder = new_builder(&mem, &topology, &pcie_host_bridges);
854        let mcfg = builder.build_mcfg();
855
856        let mut i = 0;
857        let _ = parse_mcfg(&mcfg, |sbr| match i {
858            0 => {
859                assert_eq!(sbr.ecam_base, 0);
860                assert_eq!(sbr.segment, 0);
861                assert_eq!(sbr.start_bus, 0);
862                assert_eq!(sbr.end_bus, 255);
863                i += 1;
864            }
865            1 => {
866                assert_eq!(sbr.ecam_base, 5 * GB - 32 * 256 * 4096);
867                assert_eq!(sbr.segment, 1);
868                assert_eq!(sbr.start_bus, 32);
869                assert_eq!(sbr.end_bus, 63);
870                i += 1;
871            }
872            _ => panic!("only expected two MCFG segment bus range entries"),
873        })
874        .unwrap();
875    }
876}