.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * SMP initialisation and IPI support |
---|
3 | 4 | * Based on arch/arm/kernel/smp.c |
---|
4 | 5 | * |
---|
5 | 6 | * Copyright (C) 2012 ARM Ltd. |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | |
---|
20 | 9 | #include <linux/acpi.h> |
---|
.. | .. |
---|
35 | 24 | #include <linux/smp.h> |
---|
36 | 25 | #include <linux/seq_file.h> |
---|
37 | 26 | #include <linux/irq.h> |
---|
| 27 | +#include <linux/irqchip/arm-gic-v3.h> |
---|
38 | 28 | #include <linux/percpu.h> |
---|
39 | 29 | #include <linux/clockchips.h> |
---|
40 | 30 | #include <linux/completion.h> |
---|
41 | 31 | #include <linux/of.h> |
---|
42 | 32 | #include <linux/irq_work.h> |
---|
| 33 | +#include <linux/kernel_stat.h> |
---|
43 | 34 | #include <linux/kexec.h> |
---|
| 35 | +#include <linux/kvm_host.h> |
---|
44 | 36 | |
---|
45 | 37 | #include <asm/alternative.h> |
---|
46 | 38 | #include <asm/atomic.h> |
---|
.. | .. |
---|
49 | 41 | #include <asm/cputype.h> |
---|
50 | 42 | #include <asm/cpu_ops.h> |
---|
51 | 43 | #include <asm/daifflags.h> |
---|
| 44 | +#include <asm/kvm_mmu.h> |
---|
52 | 45 | #include <asm/mmu_context.h> |
---|
53 | 46 | #include <asm/numa.h> |
---|
54 | | -#include <asm/pgtable.h> |
---|
55 | | -#include <asm/pgalloc.h> |
---|
56 | 47 | #include <asm/processor.h> |
---|
57 | | -#include <asm/scs.h> |
---|
58 | 48 | #include <asm/smp_plat.h> |
---|
59 | 49 | #include <asm/sections.h> |
---|
60 | 50 | #include <asm/tlbflush.h> |
---|
.. | .. |
---|
63 | 53 | |
---|
64 | 54 | #define CREATE_TRACE_POINTS |
---|
65 | 55 | #include <trace/events/ipi.h> |
---|
| 56 | +#undef CREATE_TRACE_POINTS |
---|
| 57 | +#include <trace/hooks/debug.h> |
---|
| 58 | + |
---|
| 59 | +#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP) |
---|
| 60 | +#include <soc/rockchip/rk_minidump.h> |
---|
| 61 | +#endif |
---|
66 | 62 | |
---|
67 | 63 | DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); |
---|
68 | 64 | EXPORT_PER_CPU_SYMBOL(cpu_number); |
---|
| 65 | +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise); |
---|
| 66 | +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry); |
---|
| 67 | +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit); |
---|
69 | 68 | |
---|
70 | 69 | /* |
---|
71 | 70 | * as from 2.5, kernels no longer have an init_tasks structure |
---|
.. | .. |
---|
74 | 73 | */ |
---|
75 | 74 | struct secondary_data secondary_data; |
---|
76 | 75 | /* Number of CPUs which aren't online, but looping in kernel text. */ |
---|
77 | | -int cpus_stuck_in_kernel; |
---|
| 76 | +static int cpus_stuck_in_kernel; |
---|
78 | 77 | |
---|
79 | 78 | enum ipi_msg_type { |
---|
80 | 79 | IPI_RESCHEDULE, |
---|
.. | .. |
---|
83 | 82 | IPI_CPU_CRASH_STOP, |
---|
84 | 83 | IPI_TIMER, |
---|
85 | 84 | IPI_IRQ_WORK, |
---|
86 | | - IPI_WAKEUP |
---|
| 85 | + IPI_WAKEUP, |
---|
| 86 | + NR_IPI |
---|
87 | 87 | }; |
---|
88 | 88 | |
---|
| 89 | +static int ipi_irq_base __read_mostly; |
---|
| 90 | +static int nr_ipi __read_mostly = NR_IPI; |
---|
| 91 | +static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; |
---|
| 92 | + |
---|
| 93 | +static void ipi_setup(int cpu); |
---|
| 94 | + |
---|
89 | 95 | #ifdef CONFIG_HOTPLUG_CPU |
---|
| 96 | +static void ipi_teardown(int cpu); |
---|
90 | 97 | static int op_cpu_kill(unsigned int cpu); |
---|
91 | 98 | #else |
---|
92 | 99 | static inline int op_cpu_kill(unsigned int cpu) |
---|
.. | .. |
---|
102 | 109 | */ |
---|
103 | 110 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) |
---|
104 | 111 | { |
---|
105 | | - if (cpu_ops[cpu]->cpu_boot) |
---|
106 | | - return cpu_ops[cpu]->cpu_boot(cpu); |
---|
| 112 | + const struct cpu_operations *ops = get_cpu_ops(cpu); |
---|
| 113 | + |
---|
| 114 | + if (ops->cpu_boot) |
---|
| 115 | + return ops->cpu_boot(cpu); |
---|
107 | 116 | |
---|
108 | 117 | return -EOPNOTSUPP; |
---|
109 | 118 | } |
---|
110 | 119 | |
---|
111 | 120 | static DECLARE_COMPLETION(cpu_running); |
---|
112 | | -bool va52mismatch __ro_after_init; |
---|
113 | 121 | |
---|
114 | 122 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
---|
115 | 123 | { |
---|
.. | .. |
---|
125 | 133 | update_cpu_boot_status(CPU_MMU_OFF); |
---|
126 | 134 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
---|
127 | 135 | |
---|
128 | | - /* |
---|
129 | | - * Now bring the CPU into our world. |
---|
130 | | - */ |
---|
| 136 | + /* Now bring the CPU into our world */ |
---|
131 | 137 | ret = boot_secondary(cpu, idle); |
---|
132 | | - if (ret == 0) { |
---|
133 | | - /* |
---|
134 | | - * CPU was successfully started, wait for it to come online or |
---|
135 | | - * time out. |
---|
136 | | - */ |
---|
137 | | - wait_for_completion_timeout(&cpu_running, |
---|
138 | | - msecs_to_jiffies(1000)); |
---|
139 | | - |
---|
140 | | - if (!cpu_online(cpu)) { |
---|
141 | | - pr_crit("CPU%u: failed to come online\n", cpu); |
---|
142 | | - |
---|
143 | | - if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch) |
---|
144 | | - pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); |
---|
145 | | - |
---|
146 | | - ret = -EIO; |
---|
147 | | - } |
---|
148 | | - } else { |
---|
| 138 | + if (ret) { |
---|
149 | 139 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
---|
150 | 140 | return ret; |
---|
151 | 141 | } |
---|
152 | 142 | |
---|
| 143 | + /* |
---|
| 144 | + * CPU was successfully started, wait for it to come online or |
---|
| 145 | + * time out. |
---|
| 146 | + */ |
---|
| 147 | + wait_for_completion_timeout(&cpu_running, |
---|
| 148 | + msecs_to_jiffies(5000)); |
---|
| 149 | + if (cpu_online(cpu)) |
---|
| 150 | + return 0; |
---|
| 151 | + |
---|
| 152 | + pr_crit("CPU%u: failed to come online\n", cpu); |
---|
153 | 153 | secondary_data.task = NULL; |
---|
154 | 154 | secondary_data.stack = NULL; |
---|
| 155 | + __flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
---|
155 | 156 | status = READ_ONCE(secondary_data.status); |
---|
156 | | - if (ret && status) { |
---|
| 157 | + if (status == CPU_MMU_OFF) |
---|
| 158 | + status = READ_ONCE(__early_cpu_boot_status); |
---|
157 | 159 | |
---|
158 | | - if (status == CPU_MMU_OFF) |
---|
159 | | - status = READ_ONCE(__early_cpu_boot_status); |
---|
160 | | - |
---|
161 | | - switch (status) { |
---|
162 | | - default: |
---|
163 | | - pr_err("CPU%u: failed in unknown state : 0x%lx\n", |
---|
164 | | - cpu, status); |
---|
| 160 | + switch (status & CPU_BOOT_STATUS_MASK) { |
---|
| 161 | + default: |
---|
| 162 | + pr_err("CPU%u: failed in unknown state : 0x%lx\n", |
---|
| 163 | + cpu, status); |
---|
| 164 | + cpus_stuck_in_kernel++; |
---|
| 165 | + break; |
---|
| 166 | + case CPU_KILL_ME: |
---|
| 167 | + if (!op_cpu_kill(cpu)) { |
---|
| 168 | + pr_crit("CPU%u: died during early boot\n", cpu); |
---|
165 | 169 | break; |
---|
166 | | - case CPU_KILL_ME: |
---|
167 | | - if (!op_cpu_kill(cpu)) { |
---|
168 | | - pr_crit("CPU%u: died during early boot\n", cpu); |
---|
169 | | - break; |
---|
170 | | - } |
---|
171 | | - /* Fall through */ |
---|
172 | | - pr_crit("CPU%u: may not have shut down cleanly\n", cpu); |
---|
173 | | - case CPU_STUCK_IN_KERNEL: |
---|
174 | | - pr_crit("CPU%u: is stuck in kernel\n", cpu); |
---|
175 | | - cpus_stuck_in_kernel++; |
---|
176 | | - break; |
---|
177 | | - case CPU_PANIC_KERNEL: |
---|
178 | | - panic("CPU%u detected unsupported configuration\n", cpu); |
---|
179 | 170 | } |
---|
| 171 | + pr_crit("CPU%u: may not have shut down cleanly\n", cpu); |
---|
| 172 | + fallthrough; |
---|
| 173 | + case CPU_STUCK_IN_KERNEL: |
---|
| 174 | + pr_crit("CPU%u: is stuck in kernel\n", cpu); |
---|
| 175 | + if (status & CPU_STUCK_REASON_52_BIT_VA) |
---|
| 176 | + pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); |
---|
| 177 | + if (status & CPU_STUCK_REASON_NO_GRAN) { |
---|
| 178 | + pr_crit("CPU%u: does not support %luK granule\n", |
---|
| 179 | + cpu, PAGE_SIZE / SZ_1K); |
---|
| 180 | + } |
---|
| 181 | + cpus_stuck_in_kernel++; |
---|
| 182 | + break; |
---|
| 183 | + case CPU_PANIC_KERNEL: |
---|
| 184 | + panic("CPU%u detected unsupported configuration\n", cpu); |
---|
180 | 185 | } |
---|
181 | 186 | |
---|
182 | | - return ret; |
---|
| 187 | + return -EIO; |
---|
| 188 | +} |
---|
| 189 | + |
---|
| 190 | +static void init_gic_priority_masking(void) |
---|
| 191 | +{ |
---|
| 192 | + u32 cpuflags; |
---|
| 193 | + |
---|
| 194 | + if (WARN_ON(!gic_enable_sre())) |
---|
| 195 | + return; |
---|
| 196 | + |
---|
| 197 | + cpuflags = read_sysreg(daif); |
---|
| 198 | + |
---|
| 199 | + WARN_ON(!(cpuflags & PSR_I_BIT)); |
---|
| 200 | + |
---|
| 201 | + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); |
---|
183 | 202 | } |
---|
184 | 203 | |
---|
185 | 204 | /* |
---|
.. | .. |
---|
190 | 209 | { |
---|
191 | 210 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; |
---|
192 | 211 | struct mm_struct *mm = &init_mm; |
---|
| 212 | + const struct cpu_operations *ops; |
---|
193 | 213 | unsigned int cpu; |
---|
194 | 214 | |
---|
195 | 215 | cpu = task_cpu(current); |
---|
.. | .. |
---|
208 | 228 | */ |
---|
209 | 229 | cpu_uninstall_idmap(); |
---|
210 | 230 | |
---|
211 | | - preempt_disable(); |
---|
| 231 | + if (system_uses_irq_prio_masking()) |
---|
| 232 | + init_gic_priority_masking(); |
---|
| 233 | + |
---|
| 234 | + rcu_cpu_starting(cpu); |
---|
212 | 235 | trace_hardirqs_off(); |
---|
213 | 236 | |
---|
214 | 237 | /* |
---|
.. | .. |
---|
218 | 241 | */ |
---|
219 | 242 | check_local_cpu_capabilities(); |
---|
220 | 243 | |
---|
221 | | - if (cpu_ops[cpu]->cpu_postboot) |
---|
222 | | - cpu_ops[cpu]->cpu_postboot(); |
---|
| 244 | + ops = get_cpu_ops(cpu); |
---|
| 245 | + if (ops->cpu_postboot) |
---|
| 246 | + ops->cpu_postboot(); |
---|
223 | 247 | |
---|
224 | 248 | /* |
---|
225 | 249 | * Log the CPU info before it is marked online and might get read. |
---|
.. | .. |
---|
230 | 254 | * Enable GIC and timers. |
---|
231 | 255 | */ |
---|
232 | 256 | notify_cpu_starting(cpu); |
---|
| 257 | + |
---|
| 258 | + ipi_setup(cpu); |
---|
233 | 259 | |
---|
234 | 260 | store_cpu_topology(cpu); |
---|
235 | 261 | numa_add_cpu(cpu); |
---|
.. | .. |
---|
257 | 283 | #ifdef CONFIG_HOTPLUG_CPU |
---|
258 | 284 | static int op_cpu_disable(unsigned int cpu) |
---|
259 | 285 | { |
---|
| 286 | + const struct cpu_operations *ops = get_cpu_ops(cpu); |
---|
| 287 | + |
---|
260 | 288 | /* |
---|
261 | 289 | * If we don't have a cpu_die method, abort before we reach the point |
---|
262 | 290 | * of no return. CPU0 may not have an cpu_ops, so test for it. |
---|
263 | 291 | */ |
---|
264 | | - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) |
---|
| 292 | + if (!ops || !ops->cpu_die) |
---|
265 | 293 | return -EOPNOTSUPP; |
---|
266 | 294 | |
---|
267 | 295 | /* |
---|
268 | 296 | * We may need to abort a hot unplug for some other mechanism-specific |
---|
269 | 297 | * reason. |
---|
270 | 298 | */ |
---|
271 | | - if (cpu_ops[cpu]->cpu_disable) |
---|
272 | | - return cpu_ops[cpu]->cpu_disable(cpu); |
---|
| 299 | + if (ops->cpu_disable) |
---|
| 300 | + return ops->cpu_disable(cpu); |
---|
273 | 301 | |
---|
274 | 302 | return 0; |
---|
275 | 303 | } |
---|
.. | .. |
---|
294 | 322 | * and we must not schedule until we're ready to give up the cpu. |
---|
295 | 323 | */ |
---|
296 | 324 | set_cpu_online(cpu, false); |
---|
| 325 | + ipi_teardown(cpu); |
---|
297 | 326 | |
---|
298 | 327 | /* |
---|
299 | 328 | * OK - migrate IRQs away from this CPU |
---|
.. | .. |
---|
305 | 334 | |
---|
306 | 335 | static int op_cpu_kill(unsigned int cpu) |
---|
307 | 336 | { |
---|
| 337 | + const struct cpu_operations *ops = get_cpu_ops(cpu); |
---|
| 338 | + |
---|
308 | 339 | /* |
---|
309 | 340 | * If we have no means of synchronising with the dying CPU, then assume |
---|
310 | 341 | * that it is really dead. We can only wait for an arbitrary length of |
---|
311 | 342 | * time and hope that it's dead, so let's skip the wait and just hope. |
---|
312 | 343 | */ |
---|
313 | | - if (!cpu_ops[cpu]->cpu_kill) |
---|
| 344 | + if (!ops->cpu_kill) |
---|
314 | 345 | return 0; |
---|
315 | 346 | |
---|
316 | | - return cpu_ops[cpu]->cpu_kill(cpu); |
---|
| 347 | + return ops->cpu_kill(cpu); |
---|
317 | 348 | } |
---|
318 | 349 | |
---|
319 | 350 | /* |
---|
.. | .. |
---|
328 | 359 | pr_crit("CPU%u: cpu didn't die\n", cpu); |
---|
329 | 360 | return; |
---|
330 | 361 | } |
---|
331 | | - pr_notice("CPU%u: shutdown\n", cpu); |
---|
| 362 | + pr_debug("CPU%u: shutdown\n", cpu); |
---|
332 | 363 | |
---|
333 | 364 | /* |
---|
334 | 365 | * Now that the dying CPU is beyond the point of no return w.r.t. |
---|
.. | .. |
---|
338 | 369 | */ |
---|
339 | 370 | err = op_cpu_kill(cpu); |
---|
340 | 371 | if (err) |
---|
341 | | - pr_warn("CPU%d may not have shut down cleanly: %d\n", |
---|
342 | | - cpu, err); |
---|
| 372 | + pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); |
---|
343 | 373 | } |
---|
344 | 374 | |
---|
345 | 375 | /* |
---|
.. | .. |
---|
349 | 379 | void cpu_die(void) |
---|
350 | 380 | { |
---|
351 | 381 | unsigned int cpu = smp_processor_id(); |
---|
352 | | - |
---|
353 | | - /* Save the shadow stack pointer before exiting the idle task */ |
---|
354 | | - scs_save(current); |
---|
| 382 | + const struct cpu_operations *ops = get_cpu_ops(cpu); |
---|
355 | 383 | |
---|
356 | 384 | idle_task_exit(); |
---|
357 | 385 | |
---|
.. | .. |
---|
365 | 393 | * mechanism must perform all required cache maintenance to ensure that |
---|
366 | 394 | * no dirty lines are lost in the process of shutting down the CPU. |
---|
367 | 395 | */ |
---|
368 | | - cpu_ops[cpu]->cpu_die(cpu); |
---|
| 396 | + ops->cpu_die(cpu); |
---|
369 | 397 | |
---|
370 | 398 | BUG(); |
---|
371 | 399 | } |
---|
372 | 400 | #endif |
---|
| 401 | + |
---|
| 402 | +static void __cpu_try_die(int cpu) |
---|
| 403 | +{ |
---|
| 404 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 405 | + const struct cpu_operations *ops = get_cpu_ops(cpu); |
---|
| 406 | + |
---|
| 407 | + if (ops && ops->cpu_die) |
---|
| 408 | + ops->cpu_die(cpu); |
---|
| 409 | +#endif |
---|
| 410 | +} |
---|
373 | 411 | |
---|
374 | 412 | /* |
---|
375 | 413 | * Kill the calling secondary CPU, early in bringup before it is turned |
---|
.. | .. |
---|
383 | 421 | |
---|
384 | 422 | /* Mark this CPU absent */ |
---|
385 | 423 | set_cpu_present(cpu, 0); |
---|
| 424 | + rcu_report_dead(cpu); |
---|
386 | 425 | |
---|
387 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
388 | | - update_cpu_boot_status(CPU_KILL_ME); |
---|
389 | | - /* Check if we can park ourselves */ |
---|
390 | | - if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) |
---|
391 | | - cpu_ops[cpu]->cpu_die(cpu); |
---|
392 | | -#endif |
---|
| 426 | + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { |
---|
| 427 | + update_cpu_boot_status(CPU_KILL_ME); |
---|
| 428 | + __cpu_try_die(cpu); |
---|
| 429 | + } |
---|
| 430 | + |
---|
393 | 431 | update_cpu_boot_status(CPU_STUCK_IN_KERNEL); |
---|
394 | 432 | |
---|
395 | 433 | cpu_park_loop(); |
---|
.. | .. |
---|
404 | 442 | "CPU: CPUs started in inconsistent modes"); |
---|
405 | 443 | else |
---|
406 | 444 | pr_info("CPU: All CPU(s) started at EL1\n"); |
---|
| 445 | + if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) { |
---|
| 446 | + kvm_compute_layout(); |
---|
| 447 | + kvm_apply_hyp_relocations(); |
---|
| 448 | + } |
---|
407 | 449 | } |
---|
408 | 450 | |
---|
409 | 451 | void __init smp_cpus_done(unsigned int max_cpus) |
---|
.. | .. |
---|
419 | 461 | { |
---|
420 | 462 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
---|
421 | 463 | cpuinfo_store_boot_cpu(); |
---|
| 464 | + |
---|
| 465 | + /* |
---|
| 466 | + * We now know enough about the boot CPU to apply the |
---|
| 467 | + * alternatives that cannot wait until interrupt handling |
---|
| 468 | + * and/or scheduling is enabled. |
---|
| 469 | + */ |
---|
| 470 | + apply_boot_alternatives(); |
---|
| 471 | + |
---|
| 472 | + /* Conditionally switch to GIC PMR for interrupt masking */ |
---|
| 473 | + if (system_uses_irq_prio_masking()) |
---|
| 474 | + init_gic_priority_masking(); |
---|
| 475 | + |
---|
| 476 | + kasan_init_hw_tags(); |
---|
422 | 477 | } |
---|
423 | 478 | |
---|
424 | 479 | static u64 __init of_get_cpu_mpidr(struct device_node *dn) |
---|
.. | .. |
---|
470 | 525 | */ |
---|
471 | 526 | static int __init smp_cpu_setup(int cpu) |
---|
472 | 527 | { |
---|
473 | | - if (cpu_read_ops(cpu)) |
---|
| 528 | + const struct cpu_operations *ops; |
---|
| 529 | + |
---|
| 530 | + if (init_cpu_ops(cpu)) |
---|
474 | 531 | return -ENODEV; |
---|
475 | 532 | |
---|
476 | | - if (cpu_ops[cpu]->cpu_init(cpu)) |
---|
| 533 | + ops = get_cpu_ops(cpu); |
---|
| 534 | + if (ops->cpu_init(cpu)) |
---|
477 | 535 | return -ENODEV; |
---|
478 | 536 | |
---|
479 | 537 | set_cpu_possible(cpu, true); |
---|
.. | .. |
---|
534 | 592 | return; |
---|
535 | 593 | |
---|
536 | 594 | /* map the logical cpu id to cpu MPIDR */ |
---|
537 | | - cpu_logical_map(cpu_count) = hwid; |
---|
| 595 | + set_cpu_logical_map(cpu_count, hwid); |
---|
538 | 596 | |
---|
539 | 597 | cpu_madt_gicc[cpu_count] = *processor; |
---|
540 | 598 | |
---|
.. | .. |
---|
553 | 611 | } |
---|
554 | 612 | |
---|
555 | 613 | static int __init |
---|
556 | | -acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, |
---|
| 614 | +acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header, |
---|
557 | 615 | const unsigned long end) |
---|
558 | 616 | { |
---|
559 | 617 | struct acpi_madt_generic_interrupt *processor; |
---|
.. | .. |
---|
562 | 620 | if (BAD_MADT_GICC_ENTRY(processor, end)) |
---|
563 | 621 | return -EINVAL; |
---|
564 | 622 | |
---|
565 | | - acpi_table_print_madt_entry(header); |
---|
| 623 | + acpi_table_print_madt_entry(&header->common); |
---|
566 | 624 | |
---|
567 | 625 | acpi_map_gic_cpu_interface(processor); |
---|
568 | 626 | |
---|
.. | .. |
---|
597 | 655 | #else |
---|
598 | 656 | #define acpi_parse_and_init_cpus(...) do { } while (0) |
---|
599 | 657 | #endif |
---|
600 | | -/* Dummy vendor field */ |
---|
601 | | -DEFINE_PER_CPU(bool, pending_ipi); |
---|
602 | | -EXPORT_SYMBOL_GPL(pending_ipi); |
---|
603 | 658 | |
---|
604 | | -static void (*__smp_update_ipi_history_cb)(int cpu); |
---|
605 | 659 | /* |
---|
606 | 660 | * Enumerate the possible CPU set from the device tree and build the |
---|
607 | 661 | * cpu logical map array containing MPIDR values related to logical |
---|
.. | .. |
---|
611 | 665 | { |
---|
612 | 666 | struct device_node *dn; |
---|
613 | 667 | |
---|
614 | | - for_each_node_by_type(dn, "cpu") { |
---|
| 668 | + for_each_of_cpu_node(dn) { |
---|
615 | 669 | u64 hwid = of_get_cpu_mpidr(dn); |
---|
616 | 670 | |
---|
617 | 671 | if (hwid == INVALID_HWID) |
---|
.. | .. |
---|
652 | 706 | goto next; |
---|
653 | 707 | |
---|
654 | 708 | pr_debug("cpu logical map 0x%llx\n", hwid); |
---|
655 | | - cpu_logical_map(cpu_count) = hwid; |
---|
| 709 | + set_cpu_logical_map(cpu_count, hwid); |
---|
656 | 710 | |
---|
657 | 711 | early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); |
---|
658 | 712 | next: |
---|
.. | .. |
---|
693 | 747 | for (i = 1; i < nr_cpu_ids; i++) { |
---|
694 | 748 | if (cpu_logical_map(i) != INVALID_HWID) { |
---|
695 | 749 | if (smp_cpu_setup(i)) |
---|
696 | | - cpu_logical_map(i) = INVALID_HWID; |
---|
| 750 | + set_cpu_logical_map(i, INVALID_HWID); |
---|
697 | 751 | } |
---|
698 | 752 | } |
---|
699 | 753 | } |
---|
700 | 754 | |
---|
701 | 755 | void __init smp_prepare_cpus(unsigned int max_cpus) |
---|
702 | 756 | { |
---|
| 757 | + const struct cpu_operations *ops; |
---|
703 | 758 | int err; |
---|
704 | 759 | unsigned int cpu; |
---|
705 | 760 | unsigned int this_cpu; |
---|
.. | .. |
---|
730 | 785 | if (cpu == smp_processor_id()) |
---|
731 | 786 | continue; |
---|
732 | 787 | |
---|
733 | | - if (!cpu_ops[cpu]) |
---|
| 788 | + ops = get_cpu_ops(cpu); |
---|
| 789 | + if (!ops) |
---|
734 | 790 | continue; |
---|
735 | 791 | |
---|
736 | | - err = cpu_ops[cpu]->cpu_prepare(cpu); |
---|
| 792 | + err = ops->cpu_prepare(cpu); |
---|
737 | 793 | if (err) |
---|
738 | 794 | continue; |
---|
739 | 795 | |
---|
.. | .. |
---|
741 | 797 | numa_store_cpu_info(cpu); |
---|
742 | 798 | } |
---|
743 | 799 | } |
---|
744 | | - |
---|
745 | | -void (*__smp_cross_call)(const struct cpumask *, unsigned int); |
---|
746 | | - |
---|
747 | | -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
---|
748 | | -{ |
---|
749 | | - __smp_cross_call = fn; |
---|
750 | | -} |
---|
751 | | - |
---|
752 | | -void set_update_ipi_history_callback(void (*fn)(int)) |
---|
753 | | -{ |
---|
754 | | - __smp_update_ipi_history_cb = fn; |
---|
755 | | -} |
---|
756 | | -EXPORT_SYMBOL_GPL(set_update_ipi_history_callback); |
---|
757 | 800 | |
---|
758 | 801 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
---|
759 | 802 | #define S(x,s) [x] = s |
---|
.. | .. |
---|
766 | 809 | S(IPI_WAKEUP, "CPU wake-up interrupts"), |
---|
767 | 810 | }; |
---|
768 | 811 | |
---|
769 | | -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
770 | | -{ |
---|
771 | | - trace_ipi_raise(target, ipi_types[ipinr]); |
---|
772 | | - __smp_cross_call(target, ipinr); |
---|
773 | | -} |
---|
| 812 | +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); |
---|
774 | 813 | |
---|
775 | | -void show_ipi_list(struct seq_file *p, int prec) |
---|
| 814 | +unsigned long irq_err_count; |
---|
| 815 | + |
---|
| 816 | +int arch_show_interrupts(struct seq_file *p, int prec) |
---|
776 | 817 | { |
---|
777 | 818 | unsigned int cpu, i; |
---|
778 | 819 | |
---|
779 | 820 | for (i = 0; i < NR_IPI; i++) { |
---|
| 821 | + unsigned int irq = irq_desc_get_irq(ipi_desc[i]); |
---|
780 | 822 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
---|
781 | 823 | prec >= 4 ? " " : ""); |
---|
782 | 824 | for_each_online_cpu(cpu) |
---|
783 | | - seq_printf(p, "%10u ", |
---|
784 | | - __get_irq_stat(cpu, ipi_irqs[i])); |
---|
| 825 | + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); |
---|
785 | 826 | seq_printf(p, " %s\n", ipi_types[i]); |
---|
786 | 827 | } |
---|
787 | | -} |
---|
788 | 828 | |
---|
789 | | -u64 smp_irq_stat_cpu(unsigned int cpu) |
---|
790 | | -{ |
---|
791 | | - u64 sum = 0; |
---|
792 | | - int i; |
---|
793 | | - |
---|
794 | | - for (i = 0; i < NR_IPI; i++) |
---|
795 | | - sum += __get_irq_stat(cpu, ipi_irqs[i]); |
---|
796 | | - |
---|
797 | | - return sum; |
---|
| 829 | + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); |
---|
| 830 | + return 0; |
---|
798 | 831 | } |
---|
799 | 832 | |
---|
800 | 833 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
---|
.. | .. |
---|
817 | 850 | #ifdef CONFIG_IRQ_WORK |
---|
818 | 851 | void arch_irq_work_raise(void) |
---|
819 | 852 | { |
---|
820 | | - if (__smp_cross_call) |
---|
821 | | - smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); |
---|
| 853 | + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); |
---|
822 | 854 | } |
---|
823 | 855 | #endif |
---|
824 | 856 | |
---|
825 | | -/* |
---|
826 | | - * ipi_cpu_stop - handle IPI from smp_send_stop() |
---|
827 | | - */ |
---|
828 | | -static void ipi_cpu_stop(unsigned int cpu) |
---|
| 857 | +static void local_cpu_stop(void) |
---|
829 | 858 | { |
---|
830 | 859 | if (system_state <= SYSTEM_RUNNING) { |
---|
831 | | - pr_crit("CPU%u: stopping\n", cpu); |
---|
| 860 | + pr_crit("CPU%u: stopping\n", smp_processor_id()); |
---|
832 | 861 | dump_stack(); |
---|
833 | 862 | } |
---|
834 | | - set_cpu_online(cpu, false); |
---|
| 863 | + set_cpu_online(smp_processor_id(), false); |
---|
835 | 864 | |
---|
836 | 865 | local_daif_mask(); |
---|
837 | 866 | sdei_mask_local_cpu(); |
---|
| 867 | + cpu_park_loop(); |
---|
| 868 | +} |
---|
838 | 869 | |
---|
839 | | - while (1) |
---|
840 | | - cpu_relax(); |
---|
| 870 | +/* |
---|
| 871 | + * We need to implement panic_smp_self_stop() for parallel panic() calls, so |
---|
| 872 | + * that cpu_online_mask gets correctly updated and smp_send_stop() can skip |
---|
| 873 | + * CPUs that have already stopped themselves. |
---|
| 874 | + */ |
---|
| 875 | +void panic_smp_self_stop(void) |
---|
| 876 | +{ |
---|
| 877 | + local_cpu_stop(); |
---|
841 | 878 | } |
---|
842 | 879 | |
---|
843 | 880 | #ifdef CONFIG_KEXEC_CORE |
---|
.. | .. |
---|
854 | 891 | local_irq_disable(); |
---|
855 | 892 | sdei_mask_local_cpu(); |
---|
856 | 893 | |
---|
857 | | -#ifdef CONFIG_HOTPLUG_CPU |
---|
858 | | - if (cpu_ops[cpu]->cpu_die) |
---|
859 | | - cpu_ops[cpu]->cpu_die(cpu); |
---|
860 | | -#endif |
---|
| 894 | + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) |
---|
| 895 | + __cpu_try_die(cpu); |
---|
861 | 896 | |
---|
862 | 897 | /* just in case */ |
---|
863 | 898 | cpu_park_loop(); |
---|
.. | .. |
---|
867 | 902 | /* |
---|
868 | 903 | * Main handler for inter-processor interrupts |
---|
869 | 904 | */ |
---|
870 | | -void handle_IPI(int ipinr, struct pt_regs *regs) |
---|
| 905 | +static void do_handle_IPI(int ipinr) |
---|
871 | 906 | { |
---|
872 | 907 | unsigned int cpu = smp_processor_id(); |
---|
873 | | - struct pt_regs *old_regs = set_irq_regs(regs); |
---|
874 | 908 | |
---|
875 | | - if ((unsigned)ipinr < NR_IPI) { |
---|
| 909 | + if ((unsigned)ipinr < NR_IPI) |
---|
876 | 910 | trace_ipi_entry_rcuidle(ipi_types[ipinr]); |
---|
877 | | - __inc_irq_stat(cpu, ipi_irqs[ipinr]); |
---|
878 | | - } |
---|
879 | 911 | |
---|
880 | 912 | switch (ipinr) { |
---|
881 | 913 | case IPI_RESCHEDULE: |
---|
.. | .. |
---|
883 | 915 | break; |
---|
884 | 916 | |
---|
885 | 917 | case IPI_CALL_FUNC: |
---|
886 | | - irq_enter(); |
---|
887 | 918 | generic_smp_call_function_interrupt(); |
---|
888 | | - irq_exit(); |
---|
889 | 919 | break; |
---|
890 | 920 | |
---|
891 | 921 | case IPI_CPU_STOP: |
---|
892 | | - irq_enter(); |
---|
893 | | - ipi_cpu_stop(cpu); |
---|
894 | | - irq_exit(); |
---|
| 922 | + trace_android_vh_ipi_stop_rcuidle(get_irq_regs()); |
---|
| 923 | +#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP) |
---|
| 924 | + rk_minidump_update_cpu_regs(get_irq_regs()); |
---|
| 925 | +#endif |
---|
| 926 | + local_cpu_stop(); |
---|
895 | 927 | break; |
---|
896 | 928 | |
---|
897 | 929 | case IPI_CPU_CRASH_STOP: |
---|
898 | 930 | if (IS_ENABLED(CONFIG_KEXEC_CORE)) { |
---|
899 | | - irq_enter(); |
---|
900 | | - ipi_cpu_crash_stop(cpu, regs); |
---|
| 931 | + ipi_cpu_crash_stop(cpu, get_irq_regs()); |
---|
901 | 932 | |
---|
902 | 933 | unreachable(); |
---|
903 | 934 | } |
---|
.. | .. |
---|
905 | 936 | |
---|
906 | 937 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
---|
907 | 938 | case IPI_TIMER: |
---|
908 | | - irq_enter(); |
---|
909 | 939 | tick_receive_broadcast(); |
---|
910 | | - irq_exit(); |
---|
911 | 940 | break; |
---|
912 | 941 | #endif |
---|
913 | 942 | |
---|
914 | 943 | #ifdef CONFIG_IRQ_WORK |
---|
915 | 944 | case IPI_IRQ_WORK: |
---|
916 | | - irq_enter(); |
---|
917 | 945 | irq_work_run(); |
---|
918 | | - irq_exit(); |
---|
919 | 946 | break; |
---|
920 | 947 | #endif |
---|
921 | 948 | |
---|
.. | .. |
---|
934 | 961 | |
---|
935 | 962 | if ((unsigned)ipinr < NR_IPI) |
---|
936 | 963 | trace_ipi_exit_rcuidle(ipi_types[ipinr]); |
---|
937 | | - set_irq_regs(old_regs); |
---|
| 964 | +} |
---|
| 965 | + |
---|
| 966 | +static irqreturn_t ipi_handler(int irq, void *data) |
---|
| 967 | +{ |
---|
| 968 | + do_handle_IPI(irq - ipi_irq_base); |
---|
| 969 | + return IRQ_HANDLED; |
---|
| 970 | +} |
---|
| 971 | + |
---|
| 972 | +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
| 973 | +{ |
---|
| 974 | + trace_ipi_raise(target, ipi_types[ipinr]); |
---|
| 975 | + __ipi_send_mask(ipi_desc[ipinr], target); |
---|
| 976 | +} |
---|
| 977 | + |
---|
| 978 | +static void ipi_setup(int cpu) |
---|
| 979 | +{ |
---|
| 980 | + int i; |
---|
| 981 | + |
---|
| 982 | + if (WARN_ON_ONCE(!ipi_irq_base)) |
---|
| 983 | + return; |
---|
| 984 | + |
---|
| 985 | + for (i = 0; i < nr_ipi; i++) |
---|
| 986 | + enable_percpu_irq(ipi_irq_base + i, 0); |
---|
| 987 | +} |
---|
| 988 | + |
---|
| 989 | +#ifdef CONFIG_HOTPLUG_CPU |
---|
| 990 | +static void ipi_teardown(int cpu) |
---|
| 991 | +{ |
---|
| 992 | + int i; |
---|
| 993 | + |
---|
| 994 | + if (WARN_ON_ONCE(!ipi_irq_base)) |
---|
| 995 | + return; |
---|
| 996 | + |
---|
| 997 | + for (i = 0; i < nr_ipi; i++) |
---|
| 998 | + disable_percpu_irq(ipi_irq_base + i); |
---|
| 999 | +} |
---|
| 1000 | +#endif |
---|
| 1001 | + |
---|
| 1002 | +void __init set_smp_ipi_range(int ipi_base, int n) |
---|
| 1003 | +{ |
---|
| 1004 | + int i; |
---|
| 1005 | + |
---|
| 1006 | + WARN_ON(n < NR_IPI); |
---|
| 1007 | + nr_ipi = min(n, NR_IPI); |
---|
| 1008 | + |
---|
| 1009 | + for (i = 0; i < nr_ipi; i++) { |
---|
| 1010 | + int err; |
---|
| 1011 | + |
---|
| 1012 | + err = request_percpu_irq(ipi_base + i, ipi_handler, |
---|
| 1013 | + "IPI", &cpu_number); |
---|
| 1014 | + WARN_ON(err); |
---|
| 1015 | + |
---|
| 1016 | + ipi_desc[i] = irq_to_desc(ipi_base + i); |
---|
| 1017 | + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); |
---|
| 1018 | + |
---|
| 1019 | + /* The recheduling IPI is special... */ |
---|
| 1020 | + if (i == IPI_RESCHEDULE) |
---|
| 1021 | + __irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0); |
---|
| 1022 | + } |
---|
| 1023 | + |
---|
| 1024 | + ipi_irq_base = ipi_base; |
---|
| 1025 | + |
---|
| 1026 | + /* Setup the boot CPU immediately */ |
---|
| 1027 | + ipi_setup(smp_processor_id()); |
---|
938 | 1028 | } |
---|
939 | 1029 | |
---|
940 | 1030 | void smp_send_reschedule(int cpu) |
---|
941 | 1031 | { |
---|
942 | | - if (__smp_update_ipi_history_cb) |
---|
943 | | - __smp_update_ipi_history_cb(cpu); |
---|
944 | 1032 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
---|
945 | 1033 | } |
---|
946 | 1034 | |
---|
.. | .. |
---|
983 | 1071 | udelay(1); |
---|
984 | 1072 | |
---|
985 | 1073 | if (num_other_online_cpus()) |
---|
986 | | - pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", |
---|
987 | | - cpumask_pr_args(cpu_online_mask)); |
---|
| 1074 | + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
---|
| 1075 | + cpumask_pr_args(cpu_online_mask)); |
---|
988 | 1076 | |
---|
989 | 1077 | sdei_mask_local_cpu(); |
---|
990 | 1078 | } |
---|
.. | .. |
---|
1009 | 1097 | * If this cpu is the only one alive at this point in time, online or |
---|
1010 | 1098 | * not, there are no stop messages to be sent around, so just back out. |
---|
1011 | 1099 | */ |
---|
1012 | | - if (num_other_online_cpus() == 0) { |
---|
1013 | | - sdei_mask_local_cpu(); |
---|
1014 | | - return; |
---|
1015 | | - } |
---|
| 1100 | + if (num_other_online_cpus() == 0) |
---|
| 1101 | + goto skip_ipi; |
---|
1016 | 1102 | |
---|
1017 | 1103 | cpumask_copy(&mask, cpu_online_mask); |
---|
1018 | 1104 | cpumask_clear_cpu(smp_processor_id(), &mask); |
---|
.. | .. |
---|
1028 | 1114 | udelay(1); |
---|
1029 | 1115 | |
---|
1030 | 1116 | if (atomic_read(&waiting_for_crash_ipi) > 0) |
---|
1031 | | - pr_warning("SMP: failed to stop secondary CPUs %*pbl\n", |
---|
1032 | | - cpumask_pr_args(&mask)); |
---|
| 1117 | + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
---|
| 1118 | + cpumask_pr_args(&mask)); |
---|
1033 | 1119 | |
---|
| 1120 | +skip_ipi: |
---|
1034 | 1121 | sdei_mask_local_cpu(); |
---|
| 1122 | + sdei_handler_abort(); |
---|
1035 | 1123 | } |
---|
1036 | 1124 | |
---|
1037 | 1125 | bool smp_crash_stop_failed(void) |
---|
.. | .. |
---|
1052 | 1140 | { |
---|
1053 | 1141 | #ifdef CONFIG_HOTPLUG_CPU |
---|
1054 | 1142 | int any_cpu = raw_smp_processor_id(); |
---|
| 1143 | + const struct cpu_operations *ops = get_cpu_ops(any_cpu); |
---|
1055 | 1144 | |
---|
1056 | | - if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) |
---|
| 1145 | + if (ops && ops->cpu_die) |
---|
1057 | 1146 | return true; |
---|
1058 | 1147 | #endif |
---|
1059 | 1148 | return false; |
---|
.. | .. |
---|
1065 | 1154 | |
---|
1066 | 1155 | return !!cpus_stuck_in_kernel || smp_spin_tables; |
---|
1067 | 1156 | } |
---|
| 1157 | + |
---|
| 1158 | +int nr_ipi_get(void) |
---|
| 1159 | +{ |
---|
| 1160 | + return nr_ipi; |
---|
| 1161 | +} |
---|
| 1162 | +EXPORT_SYMBOL_GPL(nr_ipi_get); |
---|
| 1163 | + |
---|
| 1164 | +struct irq_desc **ipi_desc_get(void) |
---|
| 1165 | +{ |
---|
| 1166 | + return ipi_desc; |
---|
| 1167 | +} |
---|
| 1168 | +EXPORT_SYMBOL_GPL(ipi_desc_get); |
---|