1use crate::CpuidLeaf;
7use thiserror::Error;
8use vm_topology::processor::ProcessorTopology;
9use x86defs::cpuid::CacheParametersEax;
10use x86defs::cpuid::CpuidFunction;
11use x86defs::cpuid::ExtendedAddressSpaceSizesEcx;
12use x86defs::cpuid::ExtendedTopologyEax;
13use x86defs::cpuid::ExtendedTopologyEbx;
14use x86defs::cpuid::ExtendedTopologyEcx;
15use x86defs::cpuid::ProcessorTopologyDefinitionEbx;
16use x86defs::cpuid::ProcessorTopologyDefinitionEcx;
17use x86defs::cpuid::TopologyLevelType;
18use x86defs::cpuid::Vendor;
19use x86defs::cpuid::VendorAndMaxFunctionEax;
20use x86defs::cpuid::VersionAndFeaturesEbx;
21
22pub type CpuidFn<'a> = &'a dyn Fn(u32, u32) -> [u32; 4];
25
26#[derive(Debug, Error)]
27#[error("unknown processor vendor {0}")]
28pub struct UnknownVendor(Vendor);
29
30pub fn topology_cpuid<'a>(
35 topology: &'a ProcessorTopology,
36 cpuid: CpuidFn<'a>,
37 leaves: &mut Vec<CpuidLeaf>,
38) -> Result<(), UnknownVendor> {
39 let result = cpuid(CpuidFunction::VendorAndMaxFunction.0, 0);
40 let max = VendorAndMaxFunctionEax::from(result[0]).max_function();
41 let vendor = Vendor::from_ebx_ecx_edx(result[1], result[2], result[3]);
42 if !vendor.is_intel_compatible() && !vendor.is_amd_compatible() {
43 return Err(UnknownVendor(vendor));
44 };
45
46 leaves.push(
48 CpuidLeaf::new(
49 CpuidFunction::VersionAndFeatures.0,
50 [
51 0,
52 VersionAndFeaturesEbx::new()
53 .with_lps_per_package(topology.reserved_vps_per_socket() as u8)
54 .into(),
55 0,
56 0,
57 ],
58 )
59 .masked([
60 0,
61 VersionAndFeaturesEbx::new()
62 .with_lps_per_package(0xff)
63 .into(),
64 0,
65 0,
66 ]),
67 );
68
69 if vendor.is_intel_compatible() {
71 cache_parameters_cpuid(topology, cpuid, leaves);
72 }
73
74 extended_topology_cpuid(topology, CpuidFunction::ExtendedTopologyEnumeration, leaves);
76
77 if max >= CpuidFunction::V2ExtendedTopologyEnumeration.0 {
79 extended_topology_cpuid(
80 topology,
81 CpuidFunction::V2ExtendedTopologyEnumeration,
82 leaves,
83 );
84 }
85
86 if vendor.is_amd_compatible() {
87 amd_extended_address_space_sizes_cpuid(topology, leaves);
89 amd_processor_topology_definition_cpuid(topology, leaves);
90 }
91
92 Ok(())
93}
94
95fn cache_parameters_cpuid(
99 topology: &ProcessorTopology,
100 cpuid: CpuidFn<'_>,
101 leaves: &mut Vec<CpuidLeaf>,
102) {
103 for i in 0..=255 {
104 let result = cpuid(CpuidFunction::CacheParameters.0, i);
105 if result == [0; 4] {
106 break;
107 }
108 let mut eax = CacheParametersEax::new();
109 if topology.smt_enabled() {
110 eax.set_cores_per_socket_minus_one((topology.reserved_vps_per_socket() / 2) - 1);
111 eax.set_threads_sharing_cache_minus_one(1);
112 } else {
113 eax.set_cores_per_socket_minus_one(topology.reserved_vps_per_socket() - 1);
114 eax.set_threads_sharing_cache_minus_one(0);
115 }
116
117 if eax.cache_level() == 3 {
119 eax.set_threads_sharing_cache_minus_one(topology.reserved_vps_per_socket() - 1);
120 }
121
122 let eax_mask = CacheParametersEax::new()
123 .with_cores_per_socket_minus_one(0x3f)
124 .with_threads_sharing_cache_minus_one(0xfff);
125
126 leaves.push(
127 CpuidLeaf::new(CpuidFunction::CacheParameters.0, [eax.into(), 0, 0, 0]).masked([
128 eax_mask.into(),
129 0,
130 0,
131 0,
132 ]),
133 )
134 }
135}
136
137fn extended_topology_cpuid(
142 topology: &ProcessorTopology,
143 function: CpuidFunction,
144 leaves: &mut Vec<CpuidLeaf>,
145) {
146 assert!(
147 function == CpuidFunction::ExtendedTopologyEnumeration
148 || function == CpuidFunction::V2ExtendedTopologyEnumeration
149 );
150 for (index, (level_type, num_lps)) in [
151 (
152 TopologyLevelType::SMT,
153 if topology.smt_enabled() { 2 } else { 1 },
154 ),
155 (TopologyLevelType::CORE, topology.reserved_vps_per_socket()),
156 ]
157 .into_iter()
158 .enumerate()
159 {
160 if level_type <= TopologyLevelType::CORE
161 || function == CpuidFunction::V2ExtendedTopologyEnumeration
162 {
163 let eax = ExtendedTopologyEax::new().with_x2_apic_shift(num_lps.trailing_zeros());
164 let ebx = ExtendedTopologyEbx::new().with_num_lps(num_lps as u16);
165 let ecx = ExtendedTopologyEcx::new()
166 .with_level_number(index as u8)
167 .with_level_type(level_type.0);
168
169 leaves.push(
172 CpuidLeaf::new(function.0, [eax.into(), ebx.into(), ecx.into(), 0])
173 .indexed(index as u32)
174 .masked([!0, !0, !0, 0]),
175 );
176 }
177 }
178}
179
180fn amd_extended_address_space_sizes_cpuid(
184 topology: &ProcessorTopology,
185 leaves: &mut Vec<CpuidLeaf>,
186) {
187 let nc = (topology.reserved_vps_per_socket() - 1) as u8;
188 let apic_core_id_size = topology.reserved_vps_per_socket().trailing_zeros() as u8;
189 let ecx = ExtendedAddressSpaceSizesEcx::new()
190 .with_nc(nc)
191 .with_apic_core_id_size(apic_core_id_size);
192
193 let ecx_mask = ExtendedAddressSpaceSizesEcx::new()
194 .with_nc(0xff)
195 .with_apic_core_id_size(0xf);
196
197 leaves.push(
198 CpuidLeaf::new(
199 CpuidFunction::ExtendedAddressSpaceSizes.0,
200 [0, 0, ecx.into(), 0],
201 )
202 .masked([0, 0, ecx_mask.into(), 0]),
203 );
204}
205
206fn amd_processor_topology_definition_cpuid(
208 topology: &ProcessorTopology,
209 leaves: &mut Vec<CpuidLeaf>,
210) {
211 let threads_per_compute_unit = if topology.smt_enabled() { 1 } else { 0 };
213 let ebx = ProcessorTopologyDefinitionEbx::new()
214 .with_threads_per_compute_unit(threads_per_compute_unit);
215
216 let ebx_mask = ProcessorTopologyDefinitionEbx::new().with_threads_per_compute_unit(!0);
217
218 let ecx = ProcessorTopologyDefinitionEcx::new().with_nodes_per_processor(0);
220 let ecx_mask = ProcessorTopologyDefinitionEcx::new().with_nodes_per_processor(0x7);
221
222 leaves.push(
223 CpuidLeaf::new(
224 CpuidFunction::ProcessorTopologyDefinition.0,
225 [0, ebx.into(), ecx.into(), 0],
226 )
227 .masked([0, ebx_mask.into(), ecx_mask.into(), 0]),
228 );
229}