vmm_core/cpuid/
topology.rs1use super::CpuidFn;
7use thiserror::Error;
8use virt::CpuidLeaf;
9use vm_topology::processor::ProcessorTopology;
10use x86defs::cpuid::CacheParametersEax;
11use x86defs::cpuid::CpuidFunction;
12use x86defs::cpuid::ExtendedAddressSpaceSizesEcx;
13use x86defs::cpuid::ExtendedTopologyEax;
14use x86defs::cpuid::ExtendedTopologyEbx;
15use x86defs::cpuid::ExtendedTopologyEcx;
16use x86defs::cpuid::ProcessorTopologyDefinitionEbx;
17use x86defs::cpuid::ProcessorTopologyDefinitionEcx;
18use x86defs::cpuid::TopologyLevelType;
19use x86defs::cpuid::Vendor;
20use x86defs::cpuid::VendorAndMaxFunctionEax;
21use x86defs::cpuid::VersionAndFeaturesEbx;
22
23#[derive(Debug, Error)]
24#[error("unknown processor vendor {0}")]
25pub struct UnknownVendor(Vendor);
26
27pub fn topology_cpuid<'a>(
32 topology: &'a ProcessorTopology,
33 cpuid: CpuidFn<'a>,
34 leaves: &mut Vec<CpuidLeaf>,
35) -> Result<(), UnknownVendor> {
36 let result = cpuid(CpuidFunction::VendorAndMaxFunction.0, 0);
37 let max = VendorAndMaxFunctionEax::from(result[0]).max_function();
38 let vendor = Vendor::from_ebx_ecx_edx(result[1], result[2], result[3]);
39 if !vendor.is_intel_compatible() && !vendor.is_amd_compatible() {
40 return Err(UnknownVendor(vendor));
41 };
42
43 leaves.push(
45 CpuidLeaf::new(
46 CpuidFunction::VersionAndFeatures.0,
47 [
48 0,
49 VersionAndFeaturesEbx::new()
50 .with_lps_per_package(topology.reserved_vps_per_socket() as u8)
51 .into(),
52 0,
53 0,
54 ],
55 )
56 .masked([
57 0,
58 VersionAndFeaturesEbx::new()
59 .with_lps_per_package(0xff)
60 .into(),
61 0,
62 0,
63 ]),
64 );
65
66 if vendor.is_intel_compatible() {
68 cache_parameters_cpuid(topology, cpuid, leaves);
69 }
70
71 extended_topology_cpuid(topology, CpuidFunction::ExtendedTopologyEnumeration, leaves);
73
74 if max >= CpuidFunction::V2ExtendedTopologyEnumeration.0 {
76 extended_topology_cpuid(
77 topology,
78 CpuidFunction::V2ExtendedTopologyEnumeration,
79 leaves,
80 );
81 }
82
83 if vendor.is_amd_compatible() {
84 amd_extended_address_space_sizes_cpuid(topology, leaves);
86 amd_processor_topology_definition_cpuid(topology, leaves);
87 }
88
89 Ok(())
90}
91
92fn cache_parameters_cpuid(
96 topology: &ProcessorTopology,
97 cpuid: CpuidFn<'_>,
98 leaves: &mut Vec<CpuidLeaf>,
99) {
100 for i in 0..=255 {
101 let result = cpuid(CpuidFunction::CacheParameters.0, i);
102 if result == [0; 4] {
103 break;
104 }
105 let mut eax = CacheParametersEax::new();
106 if topology.smt_enabled() {
107 eax.set_cores_per_socket_minus_one((topology.reserved_vps_per_socket() / 2) - 1);
108 eax.set_threads_sharing_cache_minus_one(1);
109 } else {
110 eax.set_cores_per_socket_minus_one(topology.reserved_vps_per_socket() - 1);
111 eax.set_threads_sharing_cache_minus_one(0);
112 }
113
114 if eax.cache_level() == 3 {
116 eax.set_threads_sharing_cache_minus_one(topology.reserved_vps_per_socket() - 1);
117 }
118
119 let eax_mask = CacheParametersEax::new()
120 .with_cores_per_socket_minus_one(0x3f)
121 .with_threads_sharing_cache_minus_one(0xfff);
122
123 leaves.push(
124 CpuidLeaf::new(CpuidFunction::CacheParameters.0, [eax.into(), 0, 0, 0]).masked([
125 eax_mask.into(),
126 0,
127 0,
128 0,
129 ]),
130 )
131 }
132}
133
134fn extended_topology_cpuid(
139 topology: &ProcessorTopology,
140 function: CpuidFunction,
141 leaves: &mut Vec<CpuidLeaf>,
142) {
143 assert!(
144 function == CpuidFunction::ExtendedTopologyEnumeration
145 || function == CpuidFunction::V2ExtendedTopologyEnumeration
146 );
147 for (index, (level_type, num_lps)) in [
148 (
149 TopologyLevelType::SMT,
150 if topology.smt_enabled() { 2 } else { 1 },
151 ),
152 (TopologyLevelType::CORE, topology.reserved_vps_per_socket()),
153 ]
154 .into_iter()
155 .enumerate()
156 {
157 if level_type <= TopologyLevelType::CORE
158 || function == CpuidFunction::V2ExtendedTopologyEnumeration
159 {
160 let eax = ExtendedTopologyEax::new().with_x2_apic_shift(num_lps.trailing_zeros());
161 let ebx = ExtendedTopologyEbx::new().with_num_lps(num_lps as u16);
162 let ecx = ExtendedTopologyEcx::new()
163 .with_level_number(index as u8)
164 .with_level_type(level_type.0);
165
166 leaves.push(
169 CpuidLeaf::new(function.0, [eax.into(), ebx.into(), ecx.into(), 0])
170 .indexed(index as u32)
171 .masked([!0, !0, !0, 0]),
172 );
173 }
174 }
175}
176
177fn amd_extended_address_space_sizes_cpuid(
181 topology: &ProcessorTopology,
182 leaves: &mut Vec<CpuidLeaf>,
183) {
184 let nc = (topology.reserved_vps_per_socket() - 1) as u8;
185 let apic_core_id_size = topology.reserved_vps_per_socket().trailing_zeros() as u8;
186 let ecx = ExtendedAddressSpaceSizesEcx::new()
187 .with_nc(nc)
188 .with_apic_core_id_size(apic_core_id_size);
189
190 let ecx_mask = ExtendedAddressSpaceSizesEcx::new()
191 .with_nc(0xff)
192 .with_apic_core_id_size(0xf);
193
194 leaves.push(
195 CpuidLeaf::new(
196 CpuidFunction::ExtendedAddressSpaceSizes.0,
197 [0, 0, ecx.into(), 0],
198 )
199 .masked([0, 0, ecx_mask.into(), 0]),
200 );
201}
202
203fn amd_processor_topology_definition_cpuid(
205 topology: &ProcessorTopology,
206 leaves: &mut Vec<CpuidLeaf>,
207) {
208 let threads_per_compute_unit = if topology.smt_enabled() { 1 } else { 0 };
210 let ebx = ProcessorTopologyDefinitionEbx::new()
211 .with_threads_per_compute_unit(threads_per_compute_unit);
212
213 let ebx_mask = ProcessorTopologyDefinitionEbx::new().with_threads_per_compute_unit(!0);
214
215 let ecx = ProcessorTopologyDefinitionEcx::new().with_nodes_per_processor(0);
217 let ecx_mask = ProcessorTopologyDefinitionEcx::new().with_nodes_per_processor(0x7);
218
219 leaves.push(
220 CpuidLeaf::new(
221 CpuidFunction::ProcessorTopologyDefinition.0,
222 [0, ebx.into(), ecx.into(), 0],
223 )
224 .masked([0, ebx_mask.into(), ecx_mask.into(), 0]),
225 );
226}