hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/arm/kernel/smp.c
....@@ -1,11 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/arch/arm/kernel/smp.c
34 *
45 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
96 */
107 #include <linux/module.h>
118 #include <linux/delay.h>
....@@ -29,6 +26,7 @@
2926 #include <linux/completion.h>
3027 #include <linux/cpufreq.h>
3128 #include <linux/irq_work.h>
29
+#include <linux/kernel_stat.h>
3230
3331 #include <linux/atomic.h>
3432 #include <asm/bugs.h>
....@@ -40,8 +38,6 @@
4038 #include <asm/idmap.h>
4139 #include <asm/topology.h>
4240 #include <asm/mmu_context.h>
43
-#include <asm/pgtable.h>
44
-#include <asm/pgalloc.h>
4541 #include <asm/procinfo.h>
4642 #include <asm/processor.h>
4743 #include <asm/sections.h>
....@@ -55,18 +51,16 @@
5551 #define CREATE_TRACE_POINTS
5652 #include <trace/events/ipi.h>
5753
54
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
55
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
56
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
57
+
5858 /*
5959 * as from 2.5, kernels no longer have an init_tasks structure
6060 * so we need some other way of telling a new secondary core
6161 * where to place its SVC stack
6262 */
6363 struct secondary_data secondary_data;
64
-
65
-/*
66
- * control for which core is the next to come out of the secondary
67
- * boot "holding pen"
68
- */
69
-volatile int pen_release = -1;
7064
7165 enum ipi_msg_type {
7266 IPI_WAKEUP,
....@@ -76,17 +70,25 @@
7670 IPI_CPU_STOP,
7771 IPI_IRQ_WORK,
7872 IPI_COMPLETION,
73
+ NR_IPI,
7974 /*
8075 * CPU_BACKTRACE is special and not included in NR_IPI
8176 * or tracable with trace_ipi_*
8277 */
83
- IPI_CPU_BACKTRACE,
78
+ IPI_CPU_BACKTRACE = NR_IPI,
8479 /*
8580 * SGI8-15 can be reserved by secure firmware, and thus may
8681 * not be usable by the kernel. Please keep the above limited
8782 * to at most 8 entries.
8883 */
84
+ MAX_IPI
8985 };
86
+
87
+static int ipi_irq_base __read_mostly;
88
+static int nr_ipi __read_mostly = NR_IPI;
89
+static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
90
+
91
+static void ipi_setup(int cpu);
9092
9193 static DECLARE_COMPLETION(cpu_running);
9294
....@@ -237,6 +239,17 @@
237239 return cpu != 0;
238240 }
239241
242
+static void ipi_teardown(int cpu)
243
+{
244
+ int i;
245
+
246
+ if (WARN_ON_ONCE(!ipi_irq_base))
247
+ return;
248
+
249
+ for (i = 0; i < nr_ipi; i++)
250
+ disable_percpu_irq(ipi_irq_base + i);
251
+}
252
+
240253 /*
241254 * __cpu_disable runs on the processor to be shutdown.
242255 */
....@@ -249,11 +262,16 @@
249262 if (ret)
250263 return ret;
251264
265
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
266
+ remove_cpu_topology(cpu);
267
+#endif
268
+
252269 /*
253270 * Take this CPU offline. Once we clear this, we can't return,
254271 * and we must not schedule until we're ready to give up the cpu.
255272 */
256273 set_cpu_online(cpu, false);
274
+ ipi_teardown(cpu);
257275
258276 /*
259277 * OK - migrate IRQs away from this CPU
....@@ -273,15 +291,13 @@
273291 return 0;
274292 }
275293
276
-static DECLARE_COMPLETION(cpu_died);
277
-
278294 /*
279295 * called on the thread which is asking for a CPU to be shutdown -
280296 * waits until shutdown has completed, or it is timed out.
281297 */
282298 void __cpu_die(unsigned int cpu)
283299 {
284
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
300
+ if (!cpu_wait_death(cpu, 5)) {
285301 pr_err("CPU%u: cpu didn't die\n", cpu);
286302 return;
287303 }
....@@ -328,7 +344,7 @@
328344 * this returns, power and/or clocks can be removed at any point
329345 * from this CPU and its cache by platform_cpu_kill().
330346 */
331
- complete(&cpu_died);
347
+ (void)cpu_report_death();
332348
333349 /*
334350 * Ensure that the cache lines associated with that completion are
....@@ -381,6 +397,7 @@
381397 cpu_info->cpuid = read_cpuid_id();
382398
383399 store_cpu_topology(cpuid);
400
+ check_cpu_icache_size(cpuid);
384401 }
385402
386403 /*
....@@ -419,7 +436,6 @@
419436 #endif
420437 pr_debug("CPU%u: Booted secondary processor\n", cpu);
421438
422
- preempt_disable();
423439 trace_hardirqs_off();
424440
425441 /*
....@@ -429,6 +445,8 @@
429445 smp_ops.smp_secondary_init(cpu);
430446
431447 notify_cpu_starting(cpu);
448
+
449
+ ipi_setup(cpu);
432450
433451 calibrate_delay();
434452
....@@ -508,22 +526,6 @@
508526 }
509527 }
510528
511
-static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
512
-
513
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
514
-{
515
- if (!__smp_cross_call)
516
- __smp_cross_call = fn;
517
-}
518
-
519
-static void (*__smp_update_ipi_history_cb)(int cpu);
520
-
521
-void set_update_ipi_history_callback(void (*fn)(int))
522
-{
523
- __smp_update_ipi_history_cb = fn;
524
-}
525
-EXPORT_SYMBOL_GPL(set_update_ipi_history_callback);
526
-
527529 static const char *ipi_types[NR_IPI] __tracepoint_string = {
528530 #define S(x,s) [x] = s
529531 S(IPI_WAKEUP, "CPU wakeup interrupts"),
....@@ -535,36 +537,26 @@
535537 S(IPI_COMPLETION, "completion interrupts"),
536538 };
537539
538
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
539
-{
540
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
541
- __smp_cross_call(target, ipinr);
542
-}
540
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
543541
544542 void show_ipi_list(struct seq_file *p, int prec)
545543 {
546544 unsigned int cpu, i;
547545
548546 for (i = 0; i < NR_IPI; i++) {
547
+ unsigned int irq;
548
+
549
+ if (!ipi_desc[i])
550
+ continue;
551
+
552
+ irq = irq_desc_get_irq(ipi_desc[i]);
549553 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
550554
551555 for_each_online_cpu(cpu)
552
- seq_printf(p, "%10u ",
553
- __get_irq_stat(cpu, ipi_irqs[i]));
556
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
554557
555558 seq_printf(p, " %s\n", ipi_types[i]);
556559 }
557
-}
558
-
559
-u64 smp_irq_stat_cpu(unsigned int cpu)
560
-{
561
- u64 sum = 0;
562
- int i;
563
-
564
- for (i = 0; i < NR_IPI; i++)
565
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
566
-
567
- return sum;
568560 }
569561
570562 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
....@@ -643,15 +635,12 @@
643635 handle_IPI(ipinr, regs);
644636 }
645637
646
-void handle_IPI(int ipinr, struct pt_regs *regs)
638
+static void do_handle_IPI(int ipinr)
647639 {
648640 unsigned int cpu = smp_processor_id();
649
- struct pt_regs *old_regs = set_irq_regs(regs);
650641
651
- if ((unsigned)ipinr < NR_IPI) {
642
+ if ((unsigned)ipinr < NR_IPI)
652643 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
653
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
654
- }
655644
656645 switch (ipinr) {
657646 case IPI_WAKEUP:
....@@ -659,9 +648,7 @@
659648
660649 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
661650 case IPI_TIMER:
662
- irq_enter();
663651 tick_receive_broadcast();
664
- irq_exit();
665652 break;
666653 #endif
667654
....@@ -670,37 +657,25 @@
670657 break;
671658
672659 case IPI_CALL_FUNC:
673
- irq_enter();
674660 generic_smp_call_function_interrupt();
675
- irq_exit();
676661 break;
677662
678663 case IPI_CPU_STOP:
679
- irq_enter();
680664 ipi_cpu_stop(cpu);
681
- irq_exit();
682665 break;
683666
684667 #ifdef CONFIG_IRQ_WORK
685668 case IPI_IRQ_WORK:
686
- irq_enter();
687669 irq_work_run();
688
- irq_exit();
689670 break;
690671 #endif
691672
692673 case IPI_COMPLETION:
693
- irq_enter();
694674 ipi_complete(cpu);
695
- irq_exit();
696675 break;
697676
698677 case IPI_CPU_BACKTRACE:
699
- printk_nmi_enter();
700
- irq_enter();
701
- nmi_cpu_backtrace(regs);
702
- irq_exit();
703
- printk_nmi_exit();
678
+ nmi_cpu_backtrace(get_irq_regs());
704679 break;
705680
706681 default:
....@@ -711,13 +686,73 @@
711686
712687 if ((unsigned)ipinr < NR_IPI)
713688 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
689
+}
690
+
691
+/* Legacy version, should go away once all irqchips have been converted */
692
+void handle_IPI(int ipinr, struct pt_regs *regs)
693
+{
694
+ struct pt_regs *old_regs = set_irq_regs(regs);
695
+
696
+ irq_enter();
697
+ do_handle_IPI(ipinr);
698
+ irq_exit();
699
+
714700 set_irq_regs(old_regs);
701
+}
702
+
703
+static irqreturn_t ipi_handler(int irq, void *data)
704
+{
705
+ do_handle_IPI(irq - ipi_irq_base);
706
+ return IRQ_HANDLED;
707
+}
708
+
709
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
710
+{
711
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
712
+ __ipi_send_mask(ipi_desc[ipinr], target);
713
+}
714
+
715
+static void ipi_setup(int cpu)
716
+{
717
+ int i;
718
+
719
+ if (WARN_ON_ONCE(!ipi_irq_base))
720
+ return;
721
+
722
+ for (i = 0; i < nr_ipi; i++)
723
+ enable_percpu_irq(ipi_irq_base + i, 0);
724
+}
725
+
726
+void __init set_smp_ipi_range(int ipi_base, int n)
727
+{
728
+ int i;
729
+
730
+ WARN_ON(n < MAX_IPI);
731
+ nr_ipi = min(n, MAX_IPI);
732
+
733
+ for (i = 0; i < nr_ipi; i++) {
734
+ int err;
735
+
736
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
737
+ "IPI", &irq_stat);
738
+ WARN_ON(err);
739
+
740
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
741
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
742
+
743
+ /* The recheduling IPI is special... */
744
+ if (i == IPI_RESCHEDULE)
745
+ __irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0);
746
+ }
747
+
748
+ ipi_irq_base = ipi_base;
749
+
750
+ /* Setup the boot CPU immediately */
751
+ ipi_setup(smp_processor_id());
715752 }
716753
717754 void smp_send_reschedule(int cpu)
718755 {
719
- if (__smp_update_ipi_history_cb)
720
- __smp_update_ipi_history_cb(cpu);
721756 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
722757 }
723758
....@@ -774,15 +809,20 @@
774809 unsigned long val, void *data)
775810 {
776811 struct cpufreq_freqs *freq = data;
777
- int cpu = freq->cpu;
812
+ struct cpumask *cpus = freq->policy->cpus;
813
+ int cpu, first = cpumask_first(cpus);
814
+ unsigned int lpj;
778815
779816 if (freq->flags & CPUFREQ_CONST_LOOPS)
780817 return NOTIFY_OK;
781818
782
- if (!per_cpu(l_p_j_ref, cpu)) {
783
- per_cpu(l_p_j_ref, cpu) =
784
- per_cpu(cpu_data, cpu).loops_per_jiffy;
785
- per_cpu(l_p_j_ref_freq, cpu) = freq->old;
819
+ if (!per_cpu(l_p_j_ref, first)) {
820
+ for_each_cpu(cpu, cpus) {
821
+ per_cpu(l_p_j_ref, cpu) =
822
+ per_cpu(cpu_data, cpu).loops_per_jiffy;
823
+ per_cpu(l_p_j_ref_freq, cpu) = freq->old;
824
+ }
825
+
786826 if (!global_l_p_j_ref) {
787827 global_l_p_j_ref = loops_per_jiffy;
788828 global_l_p_j_ref_freq = freq->old;
....@@ -794,10 +834,11 @@
794834 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
795835 global_l_p_j_ref_freq,
796836 freq->new);
797
- per_cpu(cpu_data, cpu).loops_per_jiffy =
798
- cpufreq_scale(per_cpu(l_p_j_ref, cpu),
799
- per_cpu(l_p_j_ref_freq, cpu),
800
- freq->new);
837
+
838
+ lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
839
+ per_cpu(l_p_j_ref_freq, first), freq->new);
840
+ for_each_cpu(cpu, cpus)
841
+ per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
801842 }
802843 return NOTIFY_OK;
803844 }
....@@ -817,7 +858,7 @@
817858
818859 static void raise_nmi(cpumask_t *mask)
819860 {
820
- __smp_cross_call(mask, IPI_CPU_BACKTRACE);
861
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
821862 }
822863
823864 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)