hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/arm64/kernel/smp.c
....@@ -1,20 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * SMP initialisation and IPI support
34 * Based on arch/arm/kernel/smp.c
45 *
56 * Copyright (C) 2012 ARM Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198
209 #include <linux/acpi.h>
....@@ -35,12 +24,15 @@
3524 #include <linux/smp.h>
3625 #include <linux/seq_file.h>
3726 #include <linux/irq.h>
27
+#include <linux/irqchip/arm-gic-v3.h>
3828 #include <linux/percpu.h>
3929 #include <linux/clockchips.h>
4030 #include <linux/completion.h>
4131 #include <linux/of.h>
4232 #include <linux/irq_work.h>
33
+#include <linux/kernel_stat.h>
4334 #include <linux/kexec.h>
35
+#include <linux/kvm_host.h>
4436
4537 #include <asm/alternative.h>
4638 #include <asm/atomic.h>
....@@ -49,12 +41,10 @@
4941 #include <asm/cputype.h>
5042 #include <asm/cpu_ops.h>
5143 #include <asm/daifflags.h>
44
+#include <asm/kvm_mmu.h>
5245 #include <asm/mmu_context.h>
5346 #include <asm/numa.h>
54
-#include <asm/pgtable.h>
55
-#include <asm/pgalloc.h>
5647 #include <asm/processor.h>
57
-#include <asm/scs.h>
5848 #include <asm/smp_plat.h>
5949 #include <asm/sections.h>
6050 #include <asm/tlbflush.h>
....@@ -63,9 +53,18 @@
6353
6454 #define CREATE_TRACE_POINTS
6555 #include <trace/events/ipi.h>
56
+#undef CREATE_TRACE_POINTS
57
+#include <trace/hooks/debug.h>
58
+
59
+#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP)
60
+#include <soc/rockchip/rk_minidump.h>
61
+#endif
6662
6763 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
6864 EXPORT_PER_CPU_SYMBOL(cpu_number);
65
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
66
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
67
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
6968
7069 /*
7170 * as from 2.5, kernels no longer have an init_tasks structure
....@@ -74,7 +73,7 @@
7473 */
7574 struct secondary_data secondary_data;
7675 /* Number of CPUs which aren't online, but looping in kernel text. */
77
-int cpus_stuck_in_kernel;
76
+static int cpus_stuck_in_kernel;
7877
7978 enum ipi_msg_type {
8079 IPI_RESCHEDULE,
....@@ -83,10 +82,18 @@
8382 IPI_CPU_CRASH_STOP,
8483 IPI_TIMER,
8584 IPI_IRQ_WORK,
86
- IPI_WAKEUP
85
+ IPI_WAKEUP,
86
+ NR_IPI
8787 };
8888
89
+static int ipi_irq_base __read_mostly;
90
+static int nr_ipi __read_mostly = NR_IPI;
91
+static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
92
+
93
+static void ipi_setup(int cpu);
94
+
8995 #ifdef CONFIG_HOTPLUG_CPU
96
+static void ipi_teardown(int cpu);
9097 static int op_cpu_kill(unsigned int cpu);
9198 #else
9299 static inline int op_cpu_kill(unsigned int cpu)
....@@ -102,14 +109,15 @@
102109 */
103110 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
104111 {
105
- if (cpu_ops[cpu]->cpu_boot)
106
- return cpu_ops[cpu]->cpu_boot(cpu);
112
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
113
+
114
+ if (ops->cpu_boot)
115
+ return ops->cpu_boot(cpu);
107116
108117 return -EOPNOTSUPP;
109118 }
110119
111120 static DECLARE_COMPLETION(cpu_running);
112
-bool va52mismatch __ro_after_init;
113121
114122 int __cpu_up(unsigned int cpu, struct task_struct *idle)
115123 {
....@@ -125,61 +133,72 @@
125133 update_cpu_boot_status(CPU_MMU_OFF);
126134 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
127135
128
- /*
129
- * Now bring the CPU into our world.
130
- */
136
+ /* Now bring the CPU into our world */
131137 ret = boot_secondary(cpu, idle);
132
- if (ret == 0) {
133
- /*
134
- * CPU was successfully started, wait for it to come online or
135
- * time out.
136
- */
137
- wait_for_completion_timeout(&cpu_running,
138
- msecs_to_jiffies(1000));
139
-
140
- if (!cpu_online(cpu)) {
141
- pr_crit("CPU%u: failed to come online\n", cpu);
142
-
143
- if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch)
144
- pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
145
-
146
- ret = -EIO;
147
- }
148
- } else {
138
+ if (ret) {
149139 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
150140 return ret;
151141 }
152142
143
+ /*
144
+ * CPU was successfully started, wait for it to come online or
145
+ * time out.
146
+ */
147
+ wait_for_completion_timeout(&cpu_running,
148
+ msecs_to_jiffies(5000));
149
+ if (cpu_online(cpu))
150
+ return 0;
151
+
152
+ pr_crit("CPU%u: failed to come online\n", cpu);
153153 secondary_data.task = NULL;
154154 secondary_data.stack = NULL;
155
+ __flush_dcache_area(&secondary_data, sizeof(secondary_data));
155156 status = READ_ONCE(secondary_data.status);
156
- if (ret && status) {
157
+ if (status == CPU_MMU_OFF)
158
+ status = READ_ONCE(__early_cpu_boot_status);
157159
158
- if (status == CPU_MMU_OFF)
159
- status = READ_ONCE(__early_cpu_boot_status);
160
-
161
- switch (status) {
162
- default:
163
- pr_err("CPU%u: failed in unknown state : 0x%lx\n",
164
- cpu, status);
160
+ switch (status & CPU_BOOT_STATUS_MASK) {
161
+ default:
162
+ pr_err("CPU%u: failed in unknown state : 0x%lx\n",
163
+ cpu, status);
164
+ cpus_stuck_in_kernel++;
165
+ break;
166
+ case CPU_KILL_ME:
167
+ if (!op_cpu_kill(cpu)) {
168
+ pr_crit("CPU%u: died during early boot\n", cpu);
165169 break;
166
- case CPU_KILL_ME:
167
- if (!op_cpu_kill(cpu)) {
168
- pr_crit("CPU%u: died during early boot\n", cpu);
169
- break;
170
- }
171
- /* Fall through */
172
- pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
173
- case CPU_STUCK_IN_KERNEL:
174
- pr_crit("CPU%u: is stuck in kernel\n", cpu);
175
- cpus_stuck_in_kernel++;
176
- break;
177
- case CPU_PANIC_KERNEL:
178
- panic("CPU%u detected unsupported configuration\n", cpu);
179170 }
171
+ pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
172
+ fallthrough;
173
+ case CPU_STUCK_IN_KERNEL:
174
+ pr_crit("CPU%u: is stuck in kernel\n", cpu);
175
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
176
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
177
+ if (status & CPU_STUCK_REASON_NO_GRAN) {
178
+ pr_crit("CPU%u: does not support %luK granule\n",
179
+ cpu, PAGE_SIZE / SZ_1K);
180
+ }
181
+ cpus_stuck_in_kernel++;
182
+ break;
183
+ case CPU_PANIC_KERNEL:
184
+ panic("CPU%u detected unsupported configuration\n", cpu);
180185 }
181186
182
- return ret;
187
+ return -EIO;
188
+}
189
+
190
+static void init_gic_priority_masking(void)
191
+{
192
+ u32 cpuflags;
193
+
194
+ if (WARN_ON(!gic_enable_sre()))
195
+ return;
196
+
197
+ cpuflags = read_sysreg(daif);
198
+
199
+ WARN_ON(!(cpuflags & PSR_I_BIT));
200
+
201
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
183202 }
184203
185204 /*
....@@ -190,6 +209,7 @@
190209 {
191210 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
192211 struct mm_struct *mm = &init_mm;
212
+ const struct cpu_operations *ops;
193213 unsigned int cpu;
194214
195215 cpu = task_cpu(current);
....@@ -208,7 +228,10 @@
208228 */
209229 cpu_uninstall_idmap();
210230
211
- preempt_disable();
231
+ if (system_uses_irq_prio_masking())
232
+ init_gic_priority_masking();
233
+
234
+ rcu_cpu_starting(cpu);
212235 trace_hardirqs_off();
213236
214237 /*
....@@ -218,8 +241,9 @@
218241 */
219242 check_local_cpu_capabilities();
220243
221
- if (cpu_ops[cpu]->cpu_postboot)
222
- cpu_ops[cpu]->cpu_postboot();
244
+ ops = get_cpu_ops(cpu);
245
+ if (ops->cpu_postboot)
246
+ ops->cpu_postboot();
223247
224248 /*
225249 * Log the CPU info before it is marked online and might get read.
....@@ -230,6 +254,8 @@
230254 * Enable GIC and timers.
231255 */
232256 notify_cpu_starting(cpu);
257
+
258
+ ipi_setup(cpu);
233259
234260 store_cpu_topology(cpu);
235261 numa_add_cpu(cpu);
....@@ -257,19 +283,21 @@
257283 #ifdef CONFIG_HOTPLUG_CPU
258284 static int op_cpu_disable(unsigned int cpu)
259285 {
286
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
287
+
260288 /*
261289 * If we don't have a cpu_die method, abort before we reach the point
262290 * of no return. CPU0 may not have an cpu_ops, so test for it.
263291 */
264
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
292
+ if (!ops || !ops->cpu_die)
265293 return -EOPNOTSUPP;
266294
267295 /*
268296 * We may need to abort a hot unplug for some other mechanism-specific
269297 * reason.
270298 */
271
- if (cpu_ops[cpu]->cpu_disable)
272
- return cpu_ops[cpu]->cpu_disable(cpu);
299
+ if (ops->cpu_disable)
300
+ return ops->cpu_disable(cpu);
273301
274302 return 0;
275303 }
....@@ -294,6 +322,7 @@
294322 * and we must not schedule until we're ready to give up the cpu.
295323 */
296324 set_cpu_online(cpu, false);
325
+ ipi_teardown(cpu);
297326
298327 /*
299328 * OK - migrate IRQs away from this CPU
....@@ -305,15 +334,17 @@
305334
306335 static int op_cpu_kill(unsigned int cpu)
307336 {
337
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
338
+
308339 /*
309340 * If we have no means of synchronising with the dying CPU, then assume
310341 * that it is really dead. We can only wait for an arbitrary length of
311342 * time and hope that it's dead, so let's skip the wait and just hope.
312343 */
313
- if (!cpu_ops[cpu]->cpu_kill)
344
+ if (!ops->cpu_kill)
314345 return 0;
315346
316
- return cpu_ops[cpu]->cpu_kill(cpu);
347
+ return ops->cpu_kill(cpu);
317348 }
318349
319350 /*
....@@ -328,7 +359,7 @@
328359 pr_crit("CPU%u: cpu didn't die\n", cpu);
329360 return;
330361 }
331
- pr_notice("CPU%u: shutdown\n", cpu);
362
+ pr_debug("CPU%u: shutdown\n", cpu);
332363
333364 /*
334365 * Now that the dying CPU is beyond the point of no return w.r.t.
....@@ -338,8 +369,7 @@
338369 */
339370 err = op_cpu_kill(cpu);
340371 if (err)
341
- pr_warn("CPU%d may not have shut down cleanly: %d\n",
342
- cpu, err);
372
+ pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
343373 }
344374
345375 /*
....@@ -349,9 +379,7 @@
349379 void cpu_die(void)
350380 {
351381 unsigned int cpu = smp_processor_id();
352
-
353
- /* Save the shadow stack pointer before exiting the idle task */
354
- scs_save(current);
382
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
355383
356384 idle_task_exit();
357385
....@@ -365,11 +393,21 @@
365393 * mechanism must perform all required cache maintenance to ensure that
366394 * no dirty lines are lost in the process of shutting down the CPU.
367395 */
368
- cpu_ops[cpu]->cpu_die(cpu);
396
+ ops->cpu_die(cpu);
369397
370398 BUG();
371399 }
372400 #endif
401
+
402
+static void __cpu_try_die(int cpu)
403
+{
404
+#ifdef CONFIG_HOTPLUG_CPU
405
+ const struct cpu_operations *ops = get_cpu_ops(cpu);
406
+
407
+ if (ops && ops->cpu_die)
408
+ ops->cpu_die(cpu);
409
+#endif
410
+}
373411
374412 /*
375413 * Kill the calling secondary CPU, early in bringup before it is turned
....@@ -383,13 +421,13 @@
383421
384422 /* Mark this CPU absent */
385423 set_cpu_present(cpu, 0);
424
+ rcu_report_dead(cpu);
386425
387
-#ifdef CONFIG_HOTPLUG_CPU
388
- update_cpu_boot_status(CPU_KILL_ME);
389
- /* Check if we can park ourselves */
390
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
391
- cpu_ops[cpu]->cpu_die(cpu);
392
-#endif
426
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
427
+ update_cpu_boot_status(CPU_KILL_ME);
428
+ __cpu_try_die(cpu);
429
+ }
430
+
393431 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
394432
395433 cpu_park_loop();
....@@ -404,6 +442,10 @@
404442 "CPU: CPUs started in inconsistent modes");
405443 else
406444 pr_info("CPU: All CPU(s) started at EL1\n");
445
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
446
+ kvm_compute_layout();
447
+ kvm_apply_hyp_relocations();
448
+ }
407449 }
408450
409451 void __init smp_cpus_done(unsigned int max_cpus)
....@@ -419,6 +461,19 @@
419461 {
420462 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
421463 cpuinfo_store_boot_cpu();
464
+
465
+ /*
466
+ * We now know enough about the boot CPU to apply the
467
+ * alternatives that cannot wait until interrupt handling
468
+ * and/or scheduling is enabled.
469
+ */
470
+ apply_boot_alternatives();
471
+
472
+ /* Conditionally switch to GIC PMR for interrupt masking */
473
+ if (system_uses_irq_prio_masking())
474
+ init_gic_priority_masking();
475
+
476
+ kasan_init_hw_tags();
422477 }
423478
424479 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
....@@ -470,10 +525,13 @@
470525 */
471526 static int __init smp_cpu_setup(int cpu)
472527 {
473
- if (cpu_read_ops(cpu))
528
+ const struct cpu_operations *ops;
529
+
530
+ if (init_cpu_ops(cpu))
474531 return -ENODEV;
475532
476
- if (cpu_ops[cpu]->cpu_init(cpu))
533
+ ops = get_cpu_ops(cpu);
534
+ if (ops->cpu_init(cpu))
477535 return -ENODEV;
478536
479537 set_cpu_possible(cpu, true);
....@@ -534,7 +592,7 @@
534592 return;
535593
536594 /* map the logical cpu id to cpu MPIDR */
537
- cpu_logical_map(cpu_count) = hwid;
595
+ set_cpu_logical_map(cpu_count, hwid);
538596
539597 cpu_madt_gicc[cpu_count] = *processor;
540598
....@@ -553,7 +611,7 @@
553611 }
554612
555613 static int __init
556
-acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
614
+acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
557615 const unsigned long end)
558616 {
559617 struct acpi_madt_generic_interrupt *processor;
....@@ -562,7 +620,7 @@
562620 if (BAD_MADT_GICC_ENTRY(processor, end))
563621 return -EINVAL;
564622
565
- acpi_table_print_madt_entry(header);
623
+ acpi_table_print_madt_entry(&header->common);
566624
567625 acpi_map_gic_cpu_interface(processor);
568626
....@@ -597,11 +655,7 @@
597655 #else
598656 #define acpi_parse_and_init_cpus(...) do { } while (0)
599657 #endif
600
-/* Dummy vendor field */
601
-DEFINE_PER_CPU(bool, pending_ipi);
602
-EXPORT_SYMBOL_GPL(pending_ipi);
603658
604
-static void (*__smp_update_ipi_history_cb)(int cpu);
605659 /*
606660 * Enumerate the possible CPU set from the device tree and build the
607661 * cpu logical map array containing MPIDR values related to logical
....@@ -611,7 +665,7 @@
611665 {
612666 struct device_node *dn;
613667
614
- for_each_node_by_type(dn, "cpu") {
668
+ for_each_of_cpu_node(dn) {
615669 u64 hwid = of_get_cpu_mpidr(dn);
616670
617671 if (hwid == INVALID_HWID)
....@@ -652,7 +706,7 @@
652706 goto next;
653707
654708 pr_debug("cpu logical map 0x%llx\n", hwid);
655
- cpu_logical_map(cpu_count) = hwid;
709
+ set_cpu_logical_map(cpu_count, hwid);
656710
657711 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
658712 next:
....@@ -693,13 +747,14 @@
693747 for (i = 1; i < nr_cpu_ids; i++) {
694748 if (cpu_logical_map(i) != INVALID_HWID) {
695749 if (smp_cpu_setup(i))
696
- cpu_logical_map(i) = INVALID_HWID;
750
+ set_cpu_logical_map(i, INVALID_HWID);
697751 }
698752 }
699753 }
700754
701755 void __init smp_prepare_cpus(unsigned int max_cpus)
702756 {
757
+ const struct cpu_operations *ops;
703758 int err;
704759 unsigned int cpu;
705760 unsigned int this_cpu;
....@@ -730,10 +785,11 @@
730785 if (cpu == smp_processor_id())
731786 continue;
732787
733
- if (!cpu_ops[cpu])
788
+ ops = get_cpu_ops(cpu);
789
+ if (!ops)
734790 continue;
735791
736
- err = cpu_ops[cpu]->cpu_prepare(cpu);
792
+ err = ops->cpu_prepare(cpu);
737793 if (err)
738794 continue;
739795
....@@ -741,19 +797,6 @@
741797 numa_store_cpu_info(cpu);
742798 }
743799 }
744
-
745
-void (*__smp_cross_call)(const struct cpumask *, unsigned int);
746
-
747
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
748
-{
749
- __smp_cross_call = fn;
750
-}
751
-
752
-void set_update_ipi_history_callback(void (*fn)(int))
753
-{
754
- __smp_update_ipi_history_cb = fn;
755
-}
756
-EXPORT_SYMBOL_GPL(set_update_ipi_history_callback);
757800
758801 static const char *ipi_types[NR_IPI] __tracepoint_string = {
759802 #define S(x,s) [x] = s
....@@ -766,35 +809,25 @@
766809 S(IPI_WAKEUP, "CPU wake-up interrupts"),
767810 };
768811
769
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
770
-{
771
- trace_ipi_raise(target, ipi_types[ipinr]);
772
- __smp_cross_call(target, ipinr);
773
-}
812
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
774813
775
-void show_ipi_list(struct seq_file *p, int prec)
814
+unsigned long irq_err_count;
815
+
816
+int arch_show_interrupts(struct seq_file *p, int prec)
776817 {
777818 unsigned int cpu, i;
778819
779820 for (i = 0; i < NR_IPI; i++) {
821
+ unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
780822 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
781823 prec >= 4 ? " " : "");
782824 for_each_online_cpu(cpu)
783
- seq_printf(p, "%10u ",
784
- __get_irq_stat(cpu, ipi_irqs[i]));
825
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
785826 seq_printf(p, " %s\n", ipi_types[i]);
786827 }
787
-}
788828
789
-u64 smp_irq_stat_cpu(unsigned int cpu)
790
-{
791
- u64 sum = 0;
792
- int i;
793
-
794
- for (i = 0; i < NR_IPI; i++)
795
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
796
-
797
- return sum;
829
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
830
+ return 0;
798831 }
799832
800833 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
....@@ -817,27 +850,31 @@
817850 #ifdef CONFIG_IRQ_WORK
818851 void arch_irq_work_raise(void)
819852 {
820
- if (__smp_cross_call)
821
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
853
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
822854 }
823855 #endif
824856
825
-/*
826
- * ipi_cpu_stop - handle IPI from smp_send_stop()
827
- */
828
-static void ipi_cpu_stop(unsigned int cpu)
857
+static void local_cpu_stop(void)
829858 {
830859 if (system_state <= SYSTEM_RUNNING) {
831
- pr_crit("CPU%u: stopping\n", cpu);
860
+ pr_crit("CPU%u: stopping\n", smp_processor_id());
832861 dump_stack();
833862 }
834
- set_cpu_online(cpu, false);
863
+ set_cpu_online(smp_processor_id(), false);
835864
836865 local_daif_mask();
837866 sdei_mask_local_cpu();
867
+ cpu_park_loop();
868
+}
838869
839
- while (1)
840
- cpu_relax();
870
+/*
871
+ * We need to implement panic_smp_self_stop() for parallel panic() calls, so
872
+ * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
873
+ * CPUs that have already stopped themselves.
874
+ */
875
+void panic_smp_self_stop(void)
876
+{
877
+ local_cpu_stop();
841878 }
842879
843880 #ifdef CONFIG_KEXEC_CORE
....@@ -854,10 +891,8 @@
854891 local_irq_disable();
855892 sdei_mask_local_cpu();
856893
857
-#ifdef CONFIG_HOTPLUG_CPU
858
- if (cpu_ops[cpu]->cpu_die)
859
- cpu_ops[cpu]->cpu_die(cpu);
860
-#endif
894
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
895
+ __cpu_try_die(cpu);
861896
862897 /* just in case */
863898 cpu_park_loop();
....@@ -867,15 +902,12 @@
867902 /*
868903 * Main handler for inter-processor interrupts
869904 */
870
-void handle_IPI(int ipinr, struct pt_regs *regs)
905
+static void do_handle_IPI(int ipinr)
871906 {
872907 unsigned int cpu = smp_processor_id();
873
- struct pt_regs *old_regs = set_irq_regs(regs);
874908
875
- if ((unsigned)ipinr < NR_IPI) {
909
+ if ((unsigned)ipinr < NR_IPI)
876910 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
877
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
878
- }
879911
880912 switch (ipinr) {
881913 case IPI_RESCHEDULE:
....@@ -883,21 +915,20 @@
883915 break;
884916
885917 case IPI_CALL_FUNC:
886
- irq_enter();
887918 generic_smp_call_function_interrupt();
888
- irq_exit();
889919 break;
890920
891921 case IPI_CPU_STOP:
892
- irq_enter();
893
- ipi_cpu_stop(cpu);
894
- irq_exit();
922
+ trace_android_vh_ipi_stop_rcuidle(get_irq_regs());
923
+#if IS_ENABLED(CONFIG_ROCKCHIP_MINIDUMP)
924
+ rk_minidump_update_cpu_regs(get_irq_regs());
925
+#endif
926
+ local_cpu_stop();
895927 break;
896928
897929 case IPI_CPU_CRASH_STOP:
898930 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
899
- irq_enter();
900
- ipi_cpu_crash_stop(cpu, regs);
931
+ ipi_cpu_crash_stop(cpu, get_irq_regs());
901932
902933 unreachable();
903934 }
....@@ -905,17 +936,13 @@
905936
906937 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
907938 case IPI_TIMER:
908
- irq_enter();
909939 tick_receive_broadcast();
910
- irq_exit();
911940 break;
912941 #endif
913942
914943 #ifdef CONFIG_IRQ_WORK
915944 case IPI_IRQ_WORK:
916
- irq_enter();
917945 irq_work_run();
918
- irq_exit();
919946 break;
920947 #endif
921948
....@@ -934,13 +961,74 @@
934961
935962 if ((unsigned)ipinr < NR_IPI)
936963 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
937
- set_irq_regs(old_regs);
964
+}
965
+
966
+static irqreturn_t ipi_handler(int irq, void *data)
967
+{
968
+ do_handle_IPI(irq - ipi_irq_base);
969
+ return IRQ_HANDLED;
970
+}
971
+
972
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
973
+{
974
+ trace_ipi_raise(target, ipi_types[ipinr]);
975
+ __ipi_send_mask(ipi_desc[ipinr], target);
976
+}
977
+
978
+static void ipi_setup(int cpu)
979
+{
980
+ int i;
981
+
982
+ if (WARN_ON_ONCE(!ipi_irq_base))
983
+ return;
984
+
985
+ for (i = 0; i < nr_ipi; i++)
986
+ enable_percpu_irq(ipi_irq_base + i, 0);
987
+}
988
+
989
+#ifdef CONFIG_HOTPLUG_CPU
990
+static void ipi_teardown(int cpu)
991
+{
992
+ int i;
993
+
994
+ if (WARN_ON_ONCE(!ipi_irq_base))
995
+ return;
996
+
997
+ for (i = 0; i < nr_ipi; i++)
998
+ disable_percpu_irq(ipi_irq_base + i);
999
+}
1000
+#endif
1001
+
1002
+void __init set_smp_ipi_range(int ipi_base, int n)
1003
+{
1004
+ int i;
1005
+
1006
+ WARN_ON(n < NR_IPI);
1007
+ nr_ipi = min(n, NR_IPI);
1008
+
1009
+ for (i = 0; i < nr_ipi; i++) {
1010
+ int err;
1011
+
1012
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
1013
+ "IPI", &cpu_number);
1014
+ WARN_ON(err);
1015
+
1016
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
1017
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
1018
+
1019
+ /* The recheduling IPI is special... */
1020
+ if (i == IPI_RESCHEDULE)
1021
+ __irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0);
1022
+ }
1023
+
1024
+ ipi_irq_base = ipi_base;
1025
+
1026
+ /* Setup the boot CPU immediately */
1027
+ ipi_setup(smp_processor_id());
9381028 }
9391029
9401030 void smp_send_reschedule(int cpu)
9411031 {
942
- if (__smp_update_ipi_history_cb)
943
- __smp_update_ipi_history_cb(cpu);
9441032 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
9451033 }
9461034
....@@ -983,8 +1071,8 @@
9831071 udelay(1);
9841072
9851073 if (num_other_online_cpus())
986
- pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
987
- cpumask_pr_args(cpu_online_mask));
1074
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1075
+ cpumask_pr_args(cpu_online_mask));
9881076
9891077 sdei_mask_local_cpu();
9901078 }
....@@ -1009,10 +1097,8 @@
10091097 * If this cpu is the only one alive at this point in time, online or
10101098 * not, there are no stop messages to be sent around, so just back out.
10111099 */
1012
- if (num_other_online_cpus() == 0) {
1013
- sdei_mask_local_cpu();
1014
- return;
1015
- }
1100
+ if (num_other_online_cpus() == 0)
1101
+ goto skip_ipi;
10161102
10171103 cpumask_copy(&mask, cpu_online_mask);
10181104 cpumask_clear_cpu(smp_processor_id(), &mask);
....@@ -1028,10 +1114,12 @@
10281114 udelay(1);
10291115
10301116 if (atomic_read(&waiting_for_crash_ipi) > 0)
1031
- pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
1032
- cpumask_pr_args(&mask));
1117
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1118
+ cpumask_pr_args(&mask));
10331119
1120
+skip_ipi:
10341121 sdei_mask_local_cpu();
1122
+ sdei_handler_abort();
10351123 }
10361124
10371125 bool smp_crash_stop_failed(void)
....@@ -1052,8 +1140,9 @@
10521140 {
10531141 #ifdef CONFIG_HOTPLUG_CPU
10541142 int any_cpu = raw_smp_processor_id();
1143
+ const struct cpu_operations *ops = get_cpu_ops(any_cpu);
10551144
1056
- if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
1145
+ if (ops && ops->cpu_die)
10571146 return true;
10581147 #endif
10591148 return false;
....@@ -1065,3 +1154,15 @@
10651154
10661155 return !!cpus_stuck_in_kernel || smp_spin_tables;
10671156 }
1157
+
1158
+int nr_ipi_get(void)
1159
+{
1160
+ return nr_ipi;
1161
+}
1162
+EXPORT_SYMBOL_GPL(nr_ipi_get);
1163
+
1164
+struct irq_desc **ipi_desc_get(void)
1165
+{
1166
+ return ipi_desc;
1167
+}
1168
+EXPORT_SYMBOL_GPL(ipi_desc_get);