.. | .. |
---|
86 | 86 | NR_IPI |
---|
87 | 87 | }; |
---|
88 | 88 | |
---|
89 | | -static int ipi_irq_base __read_mostly; |
---|
| 89 | +int ipi_irq_base __read_mostly; |
---|
90 | 90 | static int nr_ipi __read_mostly = NR_IPI; |
---|
91 | 91 | static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; |
---|
92 | 92 | |
---|
.. | .. |
---|
273 | 273 | complete(&cpu_running); |
---|
274 | 274 | |
---|
275 | 275 | local_daif_restore(DAIF_PROCCTX); |
---|
| 276 | + local_irq_enable_full(); |
---|
276 | 277 | |
---|
277 | 278 | /* |
---|
278 | 279 | * OK, it's off to the idle thread for us |
---|
.. | .. |
---|
811 | 812 | |
---|
812 | 813 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); |
---|
813 | 814 | |
---|
| 815 | +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu); |
---|
| 816 | + |
---|
814 | 817 | unsigned long irq_err_count; |
---|
815 | 818 | |
---|
816 | 819 | int arch_show_interrupts(struct seq_file *p, int prec) |
---|
.. | .. |
---|
822 | 825 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
---|
823 | 826 | prec >= 4 ? " " : ""); |
---|
824 | 827 | for_each_online_cpu(cpu) |
---|
825 | | - seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); |
---|
| 828 | + seq_printf(p, "%10u ", get_ipi_count(irq, cpu)); |
---|
826 | 829 | seq_printf(p, " %s\n", ipi_types[i]); |
---|
827 | 830 | } |
---|
828 | 831 | |
---|
.. | .. |
---|
888 | 891 | |
---|
889 | 892 | atomic_dec(&waiting_for_crash_ipi); |
---|
890 | 893 | |
---|
891 | | - local_irq_disable(); |
---|
| 894 | + local_irq_disable_full(); |
---|
892 | 895 | sdei_mask_local_cpu(); |
---|
893 | 896 | |
---|
894 | 897 | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) |
---|
.. | .. |
---|
900 | 903 | } |
---|
901 | 904 | |
---|
902 | 905 | /* |
---|
903 | | - * Main handler for inter-processor interrupts |
---|
| 906 | + * Main handler for inter-processor interrupts on the in-band stage. |
---|
904 | 907 | */ |
---|
905 | 908 | static void do_handle_IPI(int ipinr) |
---|
906 | 909 | { |
---|
.. | .. |
---|
963 | 966 | trace_ipi_exit_rcuidle(ipi_types[ipinr]); |
---|
964 | 967 | } |
---|
965 | 968 | |
---|
| 969 | +static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
| 970 | +{ |
---|
| 971 | + trace_ipi_raise(target, ipi_types[ipinr]); |
---|
| 972 | + __ipi_send_mask(ipi_desc[ipinr], target); |
---|
| 973 | +} |
---|
| 974 | + |
---|
| 975 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 976 | + |
---|
| 977 | +static DEFINE_PER_CPU(unsigned long, ipi_messages); |
---|
| 978 | + |
---|
| 979 | +static DEFINE_PER_CPU(unsigned int [NR_IPI], ipi_counts); |
---|
| 980 | + |
---|
| 981 | +static irqreturn_t ipi_handler(int irq, void *data) |
---|
| 982 | +{ |
---|
| 983 | + unsigned long *pmsg; |
---|
| 984 | + unsigned int ipinr; |
---|
| 985 | + |
---|
| 986 | + /* |
---|
| 987 | + * Decode in-band IPIs (0..NR_IPI - 1) multiplexed over |
---|
| 988 | + * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own |
---|
| 989 | + * individual handler. |
---|
| 990 | + */ |
---|
| 991 | + pmsg = raw_cpu_ptr(&ipi_messages); |
---|
| 992 | + while (*pmsg) { |
---|
| 993 | + ipinr = ffs(*pmsg) - 1; |
---|
| 994 | + clear_bit(ipinr, pmsg); |
---|
| 995 | + __this_cpu_inc(ipi_counts[ipinr]); |
---|
| 996 | + do_handle_IPI(ipinr); |
---|
| 997 | + } |
---|
| 998 | + |
---|
| 999 | + return IRQ_HANDLED; |
---|
| 1000 | +} |
---|
| 1001 | + |
---|
| 1002 | +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
| 1003 | +{ |
---|
| 1004 | + unsigned int cpu; |
---|
| 1005 | + |
---|
| 1006 | + /* regular in-band IPI (multiplexed over SGI0). */ |
---|
| 1007 | + for_each_cpu(cpu, target) |
---|
| 1008 | + set_bit(ipinr, &per_cpu(ipi_messages, cpu)); |
---|
| 1009 | + |
---|
| 1010 | + wmb(); |
---|
| 1011 | + __smp_cross_call(target, 0); |
---|
| 1012 | +} |
---|
| 1013 | + |
---|
| 1014 | +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) |
---|
| 1015 | +{ |
---|
| 1016 | + return per_cpu(ipi_counts[irq - ipi_irq_base], cpu); |
---|
| 1017 | +} |
---|
| 1018 | + |
---|
| 1019 | +void irq_send_oob_ipi(unsigned int irq, |
---|
| 1020 | + const struct cpumask *cpumask) |
---|
| 1021 | +{ |
---|
| 1022 | + unsigned int sgi = irq - ipi_irq_base; |
---|
| 1023 | + |
---|
| 1024 | + if (WARN_ON(irq_pipeline_debug() && |
---|
| 1025 | + (sgi < OOB_IPI_OFFSET || |
---|
| 1026 | + sgi >= OOB_IPI_OFFSET + OOB_NR_IPI))) |
---|
| 1027 | + return; |
---|
| 1028 | + |
---|
| 1029 | + /* Out-of-band IPI (SGI1-2). */ |
---|
| 1030 | + __smp_cross_call(cpumask, sgi); |
---|
| 1031 | +} |
---|
| 1032 | +EXPORT_SYMBOL_GPL(irq_send_oob_ipi); |
---|
| 1033 | + |
---|
| 1034 | +#else |
---|
| 1035 | + |
---|
966 | 1036 | static irqreturn_t ipi_handler(int irq, void *data) |
---|
967 | 1037 | { |
---|
968 | 1038 | do_handle_IPI(irq - ipi_irq_base); |
---|
.. | .. |
---|
971 | 1041 | |
---|
972 | 1042 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
973 | 1043 | { |
---|
974 | | - trace_ipi_raise(target, ipi_types[ipinr]); |
---|
975 | | - __ipi_send_mask(ipi_desc[ipinr], target); |
---|
| 1044 | + __smp_cross_call(target, ipinr); |
---|
976 | 1045 | } |
---|
| 1046 | + |
---|
| 1047 | +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) |
---|
| 1048 | +{ |
---|
| 1049 | + return kstat_irqs_cpu(irq, cpu); |
---|
| 1050 | +} |
---|
| 1051 | + |
---|
| 1052 | +#endif /* CONFIG_IRQ_PIPELINE */ |
---|
977 | 1053 | |
---|
978 | 1054 | static void ipi_setup(int cpu) |
---|
979 | 1055 | { |
---|
.. | .. |
---|
1001 | 1077 | |
---|
1002 | 1078 | void __init set_smp_ipi_range(int ipi_base, int n) |
---|
1003 | 1079 | { |
---|
1004 | | - int i; |
---|
| 1080 | + int i, inband_nr_ipi; |
---|
1005 | 1081 | |
---|
1006 | 1082 | WARN_ON(n < NR_IPI); |
---|
1007 | 1083 | nr_ipi = min(n, NR_IPI); |
---|
| 1084 | + /* |
---|
| 1085 | + * irq_pipeline: the in-band stage traps SGI0 only, |
---|
| 1086 | + * over which IPI messages are mutiplexed. Other SGIs |
---|
| 1087 | + * are available for exchanging out-of-band IPIs. |
---|
| 1088 | + */ |
---|
| 1089 | + inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi; |
---|
1008 | 1090 | |
---|
1009 | 1091 | for (i = 0; i < nr_ipi; i++) { |
---|
1010 | | - int err; |
---|
| 1092 | + if (i < inband_nr_ipi) { |
---|
| 1093 | + int err; |
---|
1011 | 1094 | |
---|
1012 | | - err = request_percpu_irq(ipi_base + i, ipi_handler, |
---|
1013 | | - "IPI", &cpu_number); |
---|
1014 | | - WARN_ON(err); |
---|
1015 | | - |
---|
| 1095 | + err = request_percpu_irq(ipi_base + i, ipi_handler, |
---|
| 1096 | + "IPI", &cpu_number); |
---|
| 1097 | + WARN_ON(err); |
---|
| 1098 | + } |
---|
1016 | 1099 | ipi_desc[i] = irq_to_desc(ipi_base + i); |
---|
1017 | 1100 | irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); |
---|
1018 | 1101 | |
---|