vmm_core/cpuid/
topology.rs1use super::CpuidFn;
7use thiserror::Error;
8use virt::CpuidLeaf;
9use vm_topology::processor::ProcessorTopology;
10use x86defs::cpuid::CacheParametersEax;
11use x86defs::cpuid::CpuidFunction;
12use x86defs::cpuid::ExtendedTopologyEax;
13use x86defs::cpuid::ExtendedTopologyEbx;
14use x86defs::cpuid::ExtendedTopologyEcx;
15use x86defs::cpuid::TopologyLevelType;
16use x86defs::cpuid::Vendor;
17use x86defs::cpuid::VendorAndMaxFunctionEax;
18use x86defs::cpuid::VersionAndFeaturesEbx;
19
20#[derive(Debug, Error)]
21#[error("unknown processor vendor {0}")]
22pub struct UnknownVendor(Vendor);
23
24pub fn topology_cpuid<'a>(
29 topology: &'a ProcessorTopology,
30 cpuid: CpuidFn<'a>,
31 leaves: &mut Vec<CpuidLeaf>,
32) -> Result<(), UnknownVendor> {
33 let result = cpuid(CpuidFunction::VendorAndMaxFunction.0, 0);
34 let max = VendorAndMaxFunctionEax::from(result[0]).max_function();
35 let vendor = Vendor::from_ebx_ecx_edx(result[1], result[2], result[3]);
36 if !vendor.is_intel_compatible() && !vendor.is_amd_compatible() {
37 return Err(UnknownVendor(vendor));
38 };
39
40 leaves.push(
42 CpuidLeaf::new(
43 CpuidFunction::VersionAndFeatures.0,
44 [
45 0,
46 VersionAndFeaturesEbx::new()
47 .with_lps_per_package(topology.reserved_vps_per_socket() as u8)
48 .into(),
49 0,
50 0,
51 ],
52 )
53 .masked([
54 0,
55 VersionAndFeaturesEbx::new()
56 .with_lps_per_package(0xff)
57 .into(),
58 0,
59 0,
60 ]),
61 );
62
63 if vendor.is_intel_compatible() {
65 cache_parameters_cpuid(topology, cpuid, leaves);
66 }
67
68 extended_topology_cpuid(topology, CpuidFunction::ExtendedTopologyEnumeration, leaves);
70
71 if max >= CpuidFunction::V2ExtendedTopologyEnumeration.0 {
73 extended_topology_cpuid(
74 topology,
75 CpuidFunction::V2ExtendedTopologyEnumeration,
76 leaves,
77 );
78 }
79
80 Ok(())
83}
84
85fn cache_parameters_cpuid(
89 topology: &ProcessorTopology,
90 cpuid: CpuidFn<'_>,
91 leaves: &mut Vec<CpuidLeaf>,
92) {
93 for i in 0..=255 {
94 let result = cpuid(CpuidFunction::CacheParameters.0, i);
95 if result == [0; 4] {
96 break;
97 }
98 let mut eax = CacheParametersEax::new();
99 if topology.smt_enabled() {
100 eax.set_cores_per_socket_minus_one((topology.reserved_vps_per_socket() / 2) - 1);
101 eax.set_threads_sharing_cache_minus_one(1);
102 } else {
103 eax.set_cores_per_socket_minus_one(topology.reserved_vps_per_socket() - 1);
104 eax.set_threads_sharing_cache_minus_one(0);
105 }
106
107 if eax.cache_level() == 3 {
109 eax.set_threads_sharing_cache_minus_one(topology.reserved_vps_per_socket() - 1);
110 }
111
112 let eax_mask = CacheParametersEax::new()
113 .with_cores_per_socket_minus_one(0x3f)
114 .with_threads_sharing_cache_minus_one(0xfff);
115
116 leaves.push(
117 CpuidLeaf::new(CpuidFunction::CacheParameters.0, [eax.into(), 0, 0, 0]).masked([
118 eax_mask.into(),
119 0,
120 0,
121 0,
122 ]),
123 )
124 }
125}
126
127fn extended_topology_cpuid(
132 topology: &ProcessorTopology,
133 function: CpuidFunction,
134 leaves: &mut Vec<CpuidLeaf>,
135) {
136 assert!(
137 function == CpuidFunction::ExtendedTopologyEnumeration
138 || function == CpuidFunction::V2ExtendedTopologyEnumeration
139 );
140 for (index, (level_type, num_lps)) in [
141 (
142 TopologyLevelType::SMT,
143 if topology.smt_enabled() { 2 } else { 1 },
144 ),
145 (TopologyLevelType::CORE, topology.reserved_vps_per_socket()),
146 ]
147 .into_iter()
148 .enumerate()
149 {
150 if level_type <= TopologyLevelType::CORE
151 || function == CpuidFunction::V2ExtendedTopologyEnumeration
152 {
153 let eax = ExtendedTopologyEax::new().with_x2_apic_shift(num_lps.trailing_zeros());
154 let ebx = ExtendedTopologyEbx::new().with_num_lps(num_lps as u16);
155 let ecx = ExtendedTopologyEcx::new()
156 .with_level_number(index as u8)
157 .with_level_type(level_type.0);
158
159 leaves.push(
162 CpuidLeaf::new(function.0, [eax.into(), ebx.into(), ecx.into(), 0])
163 .indexed(index as u32)
164 .masked([!0, !0, !0, 0]),
165 );
166 }
167 }
168}