.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/arch/arm/kernel/smp.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | 6 | */ |
---|
10 | 7 | #include <linux/module.h> |
---|
11 | 8 | #include <linux/delay.h> |
---|
.. | .. |
---|
29 | 26 | #include <linux/completion.h> |
---|
30 | 27 | #include <linux/cpufreq.h> |
---|
31 | 28 | #include <linux/irq_work.h> |
---|
| 29 | +#include <linux/kernel_stat.h> |
---|
32 | 30 | |
---|
33 | 31 | #include <linux/atomic.h> |
---|
34 | 32 | #include <asm/bugs.h> |
---|
.. | .. |
---|
40 | 38 | #include <asm/idmap.h> |
---|
41 | 39 | #include <asm/topology.h> |
---|
42 | 40 | #include <asm/mmu_context.h> |
---|
43 | | -#include <asm/pgtable.h> |
---|
44 | | -#include <asm/pgalloc.h> |
---|
45 | 41 | #include <asm/procinfo.h> |
---|
46 | 42 | #include <asm/processor.h> |
---|
47 | 43 | #include <asm/sections.h> |
---|
.. | .. |
---|
55 | 51 | #define CREATE_TRACE_POINTS |
---|
56 | 52 | #include <trace/events/ipi.h> |
---|
57 | 53 | |
---|
| 54 | +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise); |
---|
| 55 | +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry); |
---|
| 56 | +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit); |
---|
| 57 | + |
---|
58 | 58 | /* |
---|
59 | 59 | * as from 2.5, kernels no longer have an init_tasks structure |
---|
60 | 60 | * so we need some other way of telling a new secondary core |
---|
61 | 61 | * where to place its SVC stack |
---|
62 | 62 | */ |
---|
63 | 63 | struct secondary_data secondary_data; |
---|
64 | | - |
---|
65 | | -/* |
---|
66 | | - * control for which core is the next to come out of the secondary |
---|
67 | | - * boot "holding pen" |
---|
68 | | - */ |
---|
69 | | -volatile int pen_release = -1; |
---|
70 | 64 | |
---|
71 | 65 | enum ipi_msg_type { |
---|
72 | 66 | IPI_WAKEUP, |
---|
.. | .. |
---|
76 | 70 | IPI_CPU_STOP, |
---|
77 | 71 | IPI_IRQ_WORK, |
---|
78 | 72 | IPI_COMPLETION, |
---|
| 73 | + NR_IPI, |
---|
79 | 74 | /* |
---|
80 | 75 | * CPU_BACKTRACE is special and not included in NR_IPI |
---|
81 | 76 | * or tracable with trace_ipi_* |
---|
82 | 77 | */ |
---|
83 | | - IPI_CPU_BACKTRACE, |
---|
| 78 | + IPI_CPU_BACKTRACE = NR_IPI, |
---|
84 | 79 | /* |
---|
85 | 80 | * SGI8-15 can be reserved by secure firmware, and thus may |
---|
86 | 81 | * not be usable by the kernel. Please keep the above limited |
---|
87 | 82 | * to at most 8 entries. |
---|
88 | 83 | */ |
---|
| 84 | + MAX_IPI |
---|
89 | 85 | }; |
---|
| 86 | + |
---|
| 87 | +static int ipi_irq_base __read_mostly; |
---|
| 88 | +static int nr_ipi __read_mostly = NR_IPI; |
---|
| 89 | +static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; |
---|
| 90 | + |
---|
| 91 | +static void ipi_setup(int cpu); |
---|
90 | 92 | |
---|
91 | 93 | static DECLARE_COMPLETION(cpu_running); |
---|
92 | 94 | |
---|
.. | .. |
---|
237 | 239 | return cpu != 0; |
---|
238 | 240 | } |
---|
239 | 241 | |
---|
| 242 | +static void ipi_teardown(int cpu) |
---|
| 243 | +{ |
---|
| 244 | + int i; |
---|
| 245 | + |
---|
| 246 | + if (WARN_ON_ONCE(!ipi_irq_base)) |
---|
| 247 | + return; |
---|
| 248 | + |
---|
| 249 | + for (i = 0; i < nr_ipi; i++) |
---|
| 250 | + disable_percpu_irq(ipi_irq_base + i); |
---|
| 251 | +} |
---|
| 252 | + |
---|
240 | 253 | /* |
---|
241 | 254 | * __cpu_disable runs on the processor to be shutdown. |
---|
242 | 255 | */ |
---|
.. | .. |
---|
249 | 262 | if (ret) |
---|
250 | 263 | return ret; |
---|
251 | 264 | |
---|
| 265 | +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY |
---|
| 266 | + remove_cpu_topology(cpu); |
---|
| 267 | +#endif |
---|
| 268 | + |
---|
252 | 269 | /* |
---|
253 | 270 | * Take this CPU offline. Once we clear this, we can't return, |
---|
254 | 271 | * and we must not schedule until we're ready to give up the cpu. |
---|
255 | 272 | */ |
---|
256 | 273 | set_cpu_online(cpu, false); |
---|
| 274 | + ipi_teardown(cpu); |
---|
257 | 275 | |
---|
258 | 276 | /* |
---|
259 | 277 | * OK - migrate IRQs away from this CPU |
---|
.. | .. |
---|
273 | 291 | return 0; |
---|
274 | 292 | } |
---|
275 | 293 | |
---|
276 | | -static DECLARE_COMPLETION(cpu_died); |
---|
277 | | - |
---|
278 | 294 | /* |
---|
279 | 295 | * called on the thread which is asking for a CPU to be shutdown - |
---|
280 | 296 | * waits until shutdown has completed, or it is timed out. |
---|
281 | 297 | */ |
---|
282 | 298 | void __cpu_die(unsigned int cpu) |
---|
283 | 299 | { |
---|
284 | | - if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { |
---|
| 300 | + if (!cpu_wait_death(cpu, 5)) { |
---|
285 | 301 | pr_err("CPU%u: cpu didn't die\n", cpu); |
---|
286 | 302 | return; |
---|
287 | 303 | } |
---|
.. | .. |
---|
328 | 344 | * this returns, power and/or clocks can be removed at any point |
---|
329 | 345 | * from this CPU and its cache by platform_cpu_kill(). |
---|
330 | 346 | */ |
---|
331 | | - complete(&cpu_died); |
---|
| 347 | + (void)cpu_report_death(); |
---|
332 | 348 | |
---|
333 | 349 | /* |
---|
334 | 350 | * Ensure that the cache lines associated with that completion are |
---|
.. | .. |
---|
381 | 397 | cpu_info->cpuid = read_cpuid_id(); |
---|
382 | 398 | |
---|
383 | 399 | store_cpu_topology(cpuid); |
---|
| 400 | + check_cpu_icache_size(cpuid); |
---|
384 | 401 | } |
---|
385 | 402 | |
---|
386 | 403 | /* |
---|
.. | .. |
---|
419 | 436 | #endif |
---|
420 | 437 | pr_debug("CPU%u: Booted secondary processor\n", cpu); |
---|
421 | 438 | |
---|
422 | | - preempt_disable(); |
---|
423 | 439 | trace_hardirqs_off(); |
---|
424 | 440 | |
---|
425 | 441 | /* |
---|
.. | .. |
---|
429 | 445 | smp_ops.smp_secondary_init(cpu); |
---|
430 | 446 | |
---|
431 | 447 | notify_cpu_starting(cpu); |
---|
| 448 | + |
---|
| 449 | + ipi_setup(cpu); |
---|
432 | 450 | |
---|
433 | 451 | calibrate_delay(); |
---|
434 | 452 | |
---|
.. | .. |
---|
508 | 526 | } |
---|
509 | 527 | } |
---|
510 | 528 | |
---|
511 | | -static void (*__smp_cross_call)(const struct cpumask *, unsigned int); |
---|
512 | | - |
---|
513 | | -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
---|
514 | | -{ |
---|
515 | | - if (!__smp_cross_call) |
---|
516 | | - __smp_cross_call = fn; |
---|
517 | | -} |
---|
518 | | - |
---|
519 | | -static void (*__smp_update_ipi_history_cb)(int cpu); |
---|
520 | | - |
---|
521 | | -void set_update_ipi_history_callback(void (*fn)(int)) |
---|
522 | | -{ |
---|
523 | | - __smp_update_ipi_history_cb = fn; |
---|
524 | | -} |
---|
525 | | -EXPORT_SYMBOL_GPL(set_update_ipi_history_callback); |
---|
526 | | - |
---|
527 | 529 | static const char *ipi_types[NR_IPI] __tracepoint_string = { |
---|
528 | 530 | #define S(x,s) [x] = s |
---|
529 | 531 | S(IPI_WAKEUP, "CPU wakeup interrupts"), |
---|
.. | .. |
---|
535 | 537 | S(IPI_COMPLETION, "completion interrupts"), |
---|
536 | 538 | }; |
---|
537 | 539 | |
---|
538 | | -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
539 | | -{ |
---|
540 | | - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); |
---|
541 | | - __smp_cross_call(target, ipinr); |
---|
542 | | -} |
---|
| 540 | +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); |
---|
543 | 541 | |
---|
544 | 542 | void show_ipi_list(struct seq_file *p, int prec) |
---|
545 | 543 | { |
---|
546 | 544 | unsigned int cpu, i; |
---|
547 | 545 | |
---|
548 | 546 | for (i = 0; i < NR_IPI; i++) { |
---|
| 547 | + unsigned int irq; |
---|
| 548 | + |
---|
| 549 | + if (!ipi_desc[i]) |
---|
| 550 | + continue; |
---|
| 551 | + |
---|
| 552 | + irq = irq_desc_get_irq(ipi_desc[i]); |
---|
549 | 553 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); |
---|
550 | 554 | |
---|
551 | 555 | for_each_online_cpu(cpu) |
---|
552 | | - seq_printf(p, "%10u ", |
---|
553 | | - __get_irq_stat(cpu, ipi_irqs[i])); |
---|
| 556 | + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); |
---|
554 | 557 | |
---|
555 | 558 | seq_printf(p, " %s\n", ipi_types[i]); |
---|
556 | 559 | } |
---|
557 | | -} |
---|
558 | | - |
---|
559 | | -u64 smp_irq_stat_cpu(unsigned int cpu) |
---|
560 | | -{ |
---|
561 | | - u64 sum = 0; |
---|
562 | | - int i; |
---|
563 | | - |
---|
564 | | - for (i = 0; i < NR_IPI; i++) |
---|
565 | | - sum += __get_irq_stat(cpu, ipi_irqs[i]); |
---|
566 | | - |
---|
567 | | - return sum; |
---|
568 | 560 | } |
---|
569 | 561 | |
---|
570 | 562 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
---|
.. | .. |
---|
643 | 635 | handle_IPI(ipinr, regs); |
---|
644 | 636 | } |
---|
645 | 637 | |
---|
646 | | -void handle_IPI(int ipinr, struct pt_regs *regs) |
---|
| 638 | +static void do_handle_IPI(int ipinr) |
---|
647 | 639 | { |
---|
648 | 640 | unsigned int cpu = smp_processor_id(); |
---|
649 | | - struct pt_regs *old_regs = set_irq_regs(regs); |
---|
650 | 641 | |
---|
651 | | - if ((unsigned)ipinr < NR_IPI) { |
---|
| 642 | + if ((unsigned)ipinr < NR_IPI) |
---|
652 | 643 | trace_ipi_entry_rcuidle(ipi_types[ipinr]); |
---|
653 | | - __inc_irq_stat(cpu, ipi_irqs[ipinr]); |
---|
654 | | - } |
---|
655 | 644 | |
---|
656 | 645 | switch (ipinr) { |
---|
657 | 646 | case IPI_WAKEUP: |
---|
.. | .. |
---|
659 | 648 | |
---|
660 | 649 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
---|
661 | 650 | case IPI_TIMER: |
---|
662 | | - irq_enter(); |
---|
663 | 651 | tick_receive_broadcast(); |
---|
664 | | - irq_exit(); |
---|
665 | 652 | break; |
---|
666 | 653 | #endif |
---|
667 | 654 | |
---|
.. | .. |
---|
670 | 657 | break; |
---|
671 | 658 | |
---|
672 | 659 | case IPI_CALL_FUNC: |
---|
673 | | - irq_enter(); |
---|
674 | 660 | generic_smp_call_function_interrupt(); |
---|
675 | | - irq_exit(); |
---|
676 | 661 | break; |
---|
677 | 662 | |
---|
678 | 663 | case IPI_CPU_STOP: |
---|
679 | | - irq_enter(); |
---|
680 | 664 | ipi_cpu_stop(cpu); |
---|
681 | | - irq_exit(); |
---|
682 | 665 | break; |
---|
683 | 666 | |
---|
684 | 667 | #ifdef CONFIG_IRQ_WORK |
---|
685 | 668 | case IPI_IRQ_WORK: |
---|
686 | | - irq_enter(); |
---|
687 | 669 | irq_work_run(); |
---|
688 | | - irq_exit(); |
---|
689 | 670 | break; |
---|
690 | 671 | #endif |
---|
691 | 672 | |
---|
692 | 673 | case IPI_COMPLETION: |
---|
693 | | - irq_enter(); |
---|
694 | 674 | ipi_complete(cpu); |
---|
695 | | - irq_exit(); |
---|
696 | 675 | break; |
---|
697 | 676 | |
---|
698 | 677 | case IPI_CPU_BACKTRACE: |
---|
699 | 678 | printk_nmi_enter(); |
---|
700 | | - irq_enter(); |
---|
701 | | - nmi_cpu_backtrace(regs); |
---|
702 | | - irq_exit(); |
---|
| 679 | + nmi_cpu_backtrace(get_irq_regs()); |
---|
703 | 680 | printk_nmi_exit(); |
---|
704 | 681 | break; |
---|
705 | 682 | |
---|
.. | .. |
---|
711 | 688 | |
---|
712 | 689 | if ((unsigned)ipinr < NR_IPI) |
---|
713 | 690 | trace_ipi_exit_rcuidle(ipi_types[ipinr]); |
---|
| 691 | +} |
---|
| 692 | + |
---|
| 693 | +/* Legacy version, should go away once all irqchips have been converted */ |
---|
| 694 | +void handle_IPI(int ipinr, struct pt_regs *regs) |
---|
| 695 | +{ |
---|
| 696 | + struct pt_regs *old_regs = set_irq_regs(regs); |
---|
| 697 | + |
---|
| 698 | + irq_enter(); |
---|
| 699 | + do_handle_IPI(ipinr); |
---|
| 700 | + irq_exit(); |
---|
| 701 | + |
---|
714 | 702 | set_irq_regs(old_regs); |
---|
| 703 | +} |
---|
| 704 | + |
---|
| 705 | +static irqreturn_t ipi_handler(int irq, void *data) |
---|
| 706 | +{ |
---|
| 707 | + do_handle_IPI(irq - ipi_irq_base); |
---|
| 708 | + return IRQ_HANDLED; |
---|
| 709 | +} |
---|
| 710 | + |
---|
| 711 | +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
| 712 | +{ |
---|
| 713 | + trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); |
---|
| 714 | + __ipi_send_mask(ipi_desc[ipinr], target); |
---|
| 715 | +} |
---|
| 716 | + |
---|
| 717 | +static void ipi_setup(int cpu) |
---|
| 718 | +{ |
---|
| 719 | + int i; |
---|
| 720 | + |
---|
| 721 | + if (WARN_ON_ONCE(!ipi_irq_base)) |
---|
| 722 | + return; |
---|
| 723 | + |
---|
| 724 | + for (i = 0; i < nr_ipi; i++) |
---|
| 725 | + enable_percpu_irq(ipi_irq_base + i, 0); |
---|
| 726 | +} |
---|
| 727 | + |
---|
| 728 | +void __init set_smp_ipi_range(int ipi_base, int n) |
---|
| 729 | +{ |
---|
| 730 | + int i; |
---|
| 731 | + |
---|
| 732 | + WARN_ON(n < MAX_IPI); |
---|
| 733 | + nr_ipi = min(n, MAX_IPI); |
---|
| 734 | + |
---|
| 735 | + for (i = 0; i < nr_ipi; i++) { |
---|
| 736 | + int err; |
---|
| 737 | + |
---|
| 738 | + err = request_percpu_irq(ipi_base + i, ipi_handler, |
---|
| 739 | + "IPI", &irq_stat); |
---|
| 740 | + WARN_ON(err); |
---|
| 741 | + |
---|
| 742 | + ipi_desc[i] = irq_to_desc(ipi_base + i); |
---|
| 743 | + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); |
---|
| 744 | + |
---|
| 745 | + /* The recheduling IPI is special... */ |
---|
| 746 | + if (i == IPI_RESCHEDULE) |
---|
| 747 | + __irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0); |
---|
| 748 | + } |
---|
| 749 | + |
---|
| 750 | + ipi_irq_base = ipi_base; |
---|
| 751 | + |
---|
| 752 | + /* Setup the boot CPU immediately */ |
---|
| 753 | + ipi_setup(smp_processor_id()); |
---|
715 | 754 | } |
---|
716 | 755 | |
---|
717 | 756 | void smp_send_reschedule(int cpu) |
---|
718 | 757 | { |
---|
719 | | - if (__smp_update_ipi_history_cb) |
---|
720 | | - __smp_update_ipi_history_cb(cpu); |
---|
721 | 758 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
---|
722 | 759 | } |
---|
723 | 760 | |
---|
.. | .. |
---|
774 | 811 | unsigned long val, void *data) |
---|
775 | 812 | { |
---|
776 | 813 | struct cpufreq_freqs *freq = data; |
---|
777 | | - int cpu = freq->cpu; |
---|
| 814 | + struct cpumask *cpus = freq->policy->cpus; |
---|
| 815 | + int cpu, first = cpumask_first(cpus); |
---|
| 816 | + unsigned int lpj; |
---|
778 | 817 | |
---|
779 | 818 | if (freq->flags & CPUFREQ_CONST_LOOPS) |
---|
780 | 819 | return NOTIFY_OK; |
---|
781 | 820 | |
---|
782 | | - if (!per_cpu(l_p_j_ref, cpu)) { |
---|
783 | | - per_cpu(l_p_j_ref, cpu) = |
---|
784 | | - per_cpu(cpu_data, cpu).loops_per_jiffy; |
---|
785 | | - per_cpu(l_p_j_ref_freq, cpu) = freq->old; |
---|
| 821 | + if (!per_cpu(l_p_j_ref, first)) { |
---|
| 822 | + for_each_cpu(cpu, cpus) { |
---|
| 823 | + per_cpu(l_p_j_ref, cpu) = |
---|
| 824 | + per_cpu(cpu_data, cpu).loops_per_jiffy; |
---|
| 825 | + per_cpu(l_p_j_ref_freq, cpu) = freq->old; |
---|
| 826 | + } |
---|
| 827 | + |
---|
786 | 828 | if (!global_l_p_j_ref) { |
---|
787 | 829 | global_l_p_j_ref = loops_per_jiffy; |
---|
788 | 830 | global_l_p_j_ref_freq = freq->old; |
---|
.. | .. |
---|
794 | 836 | loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, |
---|
795 | 837 | global_l_p_j_ref_freq, |
---|
796 | 838 | freq->new); |
---|
797 | | - per_cpu(cpu_data, cpu).loops_per_jiffy = |
---|
798 | | - cpufreq_scale(per_cpu(l_p_j_ref, cpu), |
---|
799 | | - per_cpu(l_p_j_ref_freq, cpu), |
---|
800 | | - freq->new); |
---|
| 839 | + |
---|
| 840 | + lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), |
---|
| 841 | + per_cpu(l_p_j_ref_freq, first), freq->new); |
---|
| 842 | + for_each_cpu(cpu, cpus) |
---|
| 843 | + per_cpu(cpu_data, cpu).loops_per_jiffy = lpj; |
---|
801 | 844 | } |
---|
802 | 845 | return NOTIFY_OK; |
---|
803 | 846 | } |
---|
.. | .. |
---|
817 | 860 | |
---|
818 | 861 | static void raise_nmi(cpumask_t *mask) |
---|
819 | 862 | { |
---|
820 | | - __smp_cross_call(mask, IPI_CPU_BACKTRACE); |
---|
| 863 | + __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); |
---|
821 | 864 | } |
---|
822 | 865 | |
---|
823 | 866 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
---|