hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/arm/kernel/topology.c
....@@ -94,14 +94,8 @@
9494 __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
9595 GFP_NOWAIT);
9696
97
- cn = of_find_node_by_path("/cpus");
98
- if (!cn) {
99
- pr_err("No CPU information found in DT\n");
100
- return;
101
- }
102
-
10397 for_each_possible_cpu(cpu) {
104
- const u32 *rate;
98
+ const __be32 *rate;
10599 int len;
106100
107101 /* too early to use cpu->of_node */
....@@ -175,59 +169,13 @@
175169 topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
176170
177171 pr_info("CPU%u: update cpu_capacity %lu\n",
178
- cpu, topology_get_cpu_scale(NULL, cpu));
172
+ cpu, topology_get_cpu_scale(cpu));
179173 }
180174
181175 #else
182176 static inline void parse_dt_topology(void) {}
183177 static inline void update_cpu_capacity(unsigned int cpuid) {}
184178 #endif
185
-
186
- /*
187
- * cpu topology table
188
- */
189
-struct cputopo_arm cpu_topology[NR_CPUS];
190
-EXPORT_SYMBOL_GPL(cpu_topology);
191
-
192
-const struct cpumask *cpu_coregroup_mask(int cpu)
193
-{
194
- return &cpu_topology[cpu].core_sibling;
195
-}
196
-
197
-/*
198
- * The current assumption is that we can power gate each core independently.
199
- * This will be superseded by DT binding once available.
200
- */
201
-const struct cpumask *cpu_corepower_mask(int cpu)
202
-{
203
- return &cpu_topology[cpu].thread_sibling;
204
-}
205
-
206
-static void update_siblings_masks(unsigned int cpuid)
207
-{
208
- struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
209
- int cpu;
210
-
211
- /* update core and thread sibling masks */
212
- for_each_possible_cpu(cpu) {
213
- cpu_topo = &cpu_topology[cpu];
214
-
215
- if (cpuid_topo->socket_id != cpu_topo->socket_id)
216
- continue;
217
-
218
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
219
- if (cpu != cpuid)
220
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
221
-
222
- if (cpuid_topo->core_id != cpu_topo->core_id)
223
- continue;
224
-
225
- cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
226
- if (cpu != cpuid)
227
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
228
- }
229
- smp_wmb();
230
-}
231179
232180 /*
233181 * store_cpu_topology is called at boot when only one cpu is running
....@@ -236,12 +184,11 @@
236184 */
237185 void store_cpu_topology(unsigned int cpuid)
238186 {
239
- struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
187
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
240188 unsigned int mpidr;
241189
242
- /* If the cpu topology has been already set, just return */
243
- if (cpuid_topo->core_id != -1)
244
- return;
190
+ if (cpuid_topo->package_id != -1)
191
+ goto topology_populated;
245192
246193 mpidr = read_cpuid_mpidr();
247194
....@@ -256,12 +203,12 @@
256203 /* core performance interdependency */
257204 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
258205 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
259
- cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
206
+ cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
260207 } else {
261208 /* largely independent cores */
262209 cpuid_topo->thread_id = -1;
263210 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
264
- cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
211
+ cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
265212 }
266213 } else {
267214 /*
....@@ -271,32 +218,19 @@
271218 */
272219 cpuid_topo->thread_id = -1;
273220 cpuid_topo->core_id = 0;
274
- cpuid_topo->socket_id = -1;
221
+ cpuid_topo->package_id = -1;
275222 }
276
-
277
- update_siblings_masks(cpuid);
278223
279224 update_cpu_capacity(cpuid);
280225
281226 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
282227 cpuid, cpu_topology[cpuid].thread_id,
283228 cpu_topology[cpuid].core_id,
284
- cpu_topology[cpuid].socket_id, mpidr);
285
-}
229
+ cpu_topology[cpuid].package_id, mpidr);
286230
287
-static inline int cpu_corepower_flags(void)
288
-{
289
- return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
231
+topology_populated:
232
+ update_siblings_masks(cpuid);
290233 }
291
-
292
-static struct sched_domain_topology_level arm_topology[] = {
293
-#ifdef CONFIG_SCHED_MC
294
- { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
295
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
296
-#endif
297
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
298
- { NULL, },
299
-};
300234
301235 /*
302236 * init_cpu_topology is called at boot when only one cpu is running
....@@ -304,22 +238,8 @@
304238 */
305239 void __init init_cpu_topology(void)
306240 {
307
- unsigned int cpu;
308
-
309
- /* init core mask and capacity */
310
- for_each_possible_cpu(cpu) {
311
- struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
312
-
313
- cpu_topo->thread_id = -1;
314
- cpu_topo->core_id = -1;
315
- cpu_topo->socket_id = -1;
316
- cpumask_clear(&cpu_topo->core_sibling);
317
- cpumask_clear(&cpu_topo->thread_sibling);
318
- }
241
+ reset_cpu_topology();
319242 smp_wmb();
320243
321244 parse_dt_topology();
322
-
323
- /* Set scheduler topology descriptor */
324
- set_sched_topology(arm_topology);
325245 }