| .. | .. |
|---|
| 94 | 94 | __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), |
|---|
| 95 | 95 | GFP_NOWAIT); |
|---|
| 96 | 96 | |
|---|
| 97 | | - cn = of_find_node_by_path("/cpus"); |
|---|
| 98 | | - if (!cn) { |
|---|
| 99 | | - pr_err("No CPU information found in DT\n"); |
|---|
| 100 | | - return; |
|---|
| 101 | | - } |
|---|
| 102 | | - |
|---|
| 103 | 97 | for_each_possible_cpu(cpu) { |
|---|
| 104 | | - const u32 *rate; |
|---|
| 98 | + const __be32 *rate; |
|---|
| 105 | 99 | int len; |
|---|
| 106 | 100 | |
|---|
| 107 | 101 | /* too early to use cpu->of_node */ |
|---|
| .. | .. |
|---|
| 175 | 169 | topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity); |
|---|
| 176 | 170 | |
|---|
| 177 | 171 | pr_info("CPU%u: update cpu_capacity %lu\n", |
|---|
| 178 | | - cpu, topology_get_cpu_scale(NULL, cpu)); |
|---|
| 172 | + cpu, topology_get_cpu_scale(cpu)); |
|---|
| 179 | 173 | } |
|---|
| 180 | 174 | |
|---|
| 181 | 175 | #else |
|---|
| 182 | 176 | static inline void parse_dt_topology(void) {} |
|---|
| 183 | 177 | static inline void update_cpu_capacity(unsigned int cpuid) {} |
|---|
| 184 | 178 | #endif |
|---|
| 185 | | - |
|---|
| 186 | | - /* |
|---|
| 187 | | - * cpu topology table |
|---|
| 188 | | - */ |
|---|
| 189 | | -struct cputopo_arm cpu_topology[NR_CPUS]; |
|---|
| 190 | | -EXPORT_SYMBOL_GPL(cpu_topology); |
|---|
| 191 | | - |
|---|
| 192 | | -const struct cpumask *cpu_coregroup_mask(int cpu) |
|---|
| 193 | | -{ |
|---|
| 194 | | - return &cpu_topology[cpu].core_sibling; |
|---|
| 195 | | -} |
|---|
| 196 | | - |
|---|
| 197 | | -/* |
|---|
| 198 | | - * The current assumption is that we can power gate each core independently. |
|---|
| 199 | | - * This will be superseded by DT binding once available. |
|---|
| 200 | | - */ |
|---|
| 201 | | -const struct cpumask *cpu_corepower_mask(int cpu) |
|---|
| 202 | | -{ |
|---|
| 203 | | - return &cpu_topology[cpu].thread_sibling; |
|---|
| 204 | | -} |
|---|
| 205 | | - |
|---|
| 206 | | -static void update_siblings_masks(unsigned int cpuid) |
|---|
| 207 | | -{ |
|---|
| 208 | | - struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; |
|---|
| 209 | | - int cpu; |
|---|
| 210 | | - |
|---|
| 211 | | - /* update core and thread sibling masks */ |
|---|
| 212 | | - for_each_possible_cpu(cpu) { |
|---|
| 213 | | - cpu_topo = &cpu_topology[cpu]; |
|---|
| 214 | | - |
|---|
| 215 | | - if (cpuid_topo->socket_id != cpu_topo->socket_id) |
|---|
| 216 | | - continue; |
|---|
| 217 | | - |
|---|
| 218 | | - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); |
|---|
| 219 | | - if (cpu != cpuid) |
|---|
| 220 | | - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); |
|---|
| 221 | | - |
|---|
| 222 | | - if (cpuid_topo->core_id != cpu_topo->core_id) |
|---|
| 223 | | - continue; |
|---|
| 224 | | - |
|---|
| 225 | | - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); |
|---|
| 226 | | - if (cpu != cpuid) |
|---|
| 227 | | - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); |
|---|
| 228 | | - } |
|---|
| 229 | | - smp_wmb(); |
|---|
| 230 | | -} |
|---|
| 231 | 179 | |
|---|
| 232 | 180 | /* |
|---|
| 233 | 181 | * store_cpu_topology is called at boot when only one cpu is running |
|---|
| .. | .. |
|---|
| 236 | 184 | */ |
|---|
| 237 | 185 | void store_cpu_topology(unsigned int cpuid) |
|---|
| 238 | 186 | { |
|---|
| 239 | | - struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; |
|---|
| 187 | + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; |
|---|
| 240 | 188 | unsigned int mpidr; |
|---|
| 241 | 189 | |
|---|
| 242 | | - /* If the cpu topology has been already set, just return */ |
|---|
| 243 | | - if (cpuid_topo->core_id != -1) |
|---|
| 244 | | - return; |
|---|
| 190 | + if (cpuid_topo->package_id != -1) |
|---|
| 191 | + goto topology_populated; |
|---|
| 245 | 192 | |
|---|
| 246 | 193 | mpidr = read_cpuid_mpidr(); |
|---|
| 247 | 194 | |
|---|
| .. | .. |
|---|
| 256 | 203 | /* core performance interdependency */ |
|---|
| 257 | 204 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
|---|
| 258 | 205 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
|---|
| 259 | | - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |
|---|
| 206 | + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |
|---|
| 260 | 207 | } else { |
|---|
| 261 | 208 | /* largely independent cores */ |
|---|
| 262 | 209 | cpuid_topo->thread_id = -1; |
|---|
| 263 | 210 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
|---|
| 264 | | - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
|---|
| 211 | + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
|---|
| 265 | 212 | } |
|---|
| 266 | 213 | } else { |
|---|
| 267 | 214 | /* |
|---|
| .. | .. |
|---|
| 271 | 218 | */ |
|---|
| 272 | 219 | cpuid_topo->thread_id = -1; |
|---|
| 273 | 220 | cpuid_topo->core_id = 0; |
|---|
| 274 | | - cpuid_topo->socket_id = -1; |
|---|
| 221 | + cpuid_topo->package_id = -1; |
|---|
| 275 | 222 | } |
|---|
| 276 | | - |
|---|
| 277 | | - update_siblings_masks(cpuid); |
|---|
| 278 | 223 | |
|---|
| 279 | 224 | update_cpu_capacity(cpuid); |
|---|
| 280 | 225 | |
|---|
| 281 | 226 | pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
|---|
| 282 | 227 | cpuid, cpu_topology[cpuid].thread_id, |
|---|
| 283 | 228 | cpu_topology[cpuid].core_id, |
|---|
| 284 | | - cpu_topology[cpuid].socket_id, mpidr); |
|---|
| 285 | | -} |
|---|
| 229 | + cpu_topology[cpuid].package_id, mpidr); |
|---|
| 286 | 230 | |
|---|
| 287 | | -static inline int cpu_corepower_flags(void) |
|---|
| 288 | | -{ |
|---|
| 289 | | - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; |
|---|
| 231 | +topology_populated: |
|---|
| 232 | + update_siblings_masks(cpuid); |
|---|
| 290 | 233 | } |
|---|
| 291 | | - |
|---|
| 292 | | -static struct sched_domain_topology_level arm_topology[] = { |
|---|
| 293 | | -#ifdef CONFIG_SCHED_MC |
|---|
| 294 | | - { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, |
|---|
| 295 | | - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
|---|
| 296 | | -#endif |
|---|
| 297 | | - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
|---|
| 298 | | - { NULL, }, |
|---|
| 299 | | -}; |
|---|
| 300 | 234 | |
|---|
| 301 | 235 | /* |
|---|
| 302 | 236 | * init_cpu_topology is called at boot when only one cpu is running |
|---|
| .. | .. |
|---|
| 304 | 238 | */ |
|---|
| 305 | 239 | void __init init_cpu_topology(void) |
|---|
| 306 | 240 | { |
|---|
| 307 | | - unsigned int cpu; |
|---|
| 308 | | - |
|---|
| 309 | | - /* init core mask and capacity */ |
|---|
| 310 | | - for_each_possible_cpu(cpu) { |
|---|
| 311 | | - struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); |
|---|
| 312 | | - |
|---|
| 313 | | - cpu_topo->thread_id = -1; |
|---|
| 314 | | - cpu_topo->core_id = -1; |
|---|
| 315 | | - cpu_topo->socket_id = -1; |
|---|
| 316 | | - cpumask_clear(&cpu_topo->core_sibling); |
|---|
| 317 | | - cpumask_clear(&cpu_topo->thread_sibling); |
|---|
| 318 | | - } |
|---|
| 241 | + reset_cpu_topology(); |
|---|
| 319 | 242 | smp_wmb(); |
|---|
| 320 | 243 | |
|---|
| 321 | 244 | parse_dt_topology(); |
|---|
| 322 | | - |
|---|
| 323 | | - /* Set scheduler topology descriptor */ |
|---|
| 324 | | - set_sched_topology(arm_topology); |
|---|
| 325 | 245 | } |
|---|