hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm/kernel/smp.c
....@@ -84,7 +84,7 @@
8484 MAX_IPI
8585 };
8686
87
-static int ipi_irq_base __read_mostly;
87
+int ipi_irq_base __read_mostly;
8888 static int nr_ipi __read_mostly = NR_IPI;
8989 static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
9090
....@@ -329,7 +329,7 @@
329329
330330 idle_task_exit();
331331
332
- local_irq_disable();
332
+ local_irq_disable_full();
333333
334334 /*
335335 * Flush the data out of the L1 cache for this CPU. This must be
....@@ -421,6 +421,13 @@
421421 local_flush_tlb_all();
422422
423423 /*
424
+ * irq_pipeline: debug_smp_processor_id() accesses percpu
425
+ * data.
426
+ */
427
+ if (irqs_pipelined())
428
+ set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id()));
429
+
430
+ /*
424431 * All kernel threads share the same mm context; grab a
425432 * reference and switch to it.
426433 */
....@@ -463,7 +470,7 @@
463470
464471 complete(&cpu_running);
465472
466
- local_irq_enable();
473
+ local_irq_enable_full();
467474 local_fiq_enable();
468475 local_abt_enable();
469476
....@@ -539,6 +546,8 @@
539546
540547 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
541548
549
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu);
550
+
542551 void show_ipi_list(struct seq_file *p, int prec)
543552 {
544553 unsigned int cpu, i;
....@@ -553,7 +562,7 @@
553562 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
554563
555564 for_each_online_cpu(cpu)
556
- seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
565
+ seq_printf(p, "%10u ", get_ipi_count(irq, cpu));
557566
558567 seq_printf(p, " %s\n", ipi_types[i]);
559568 }
....@@ -606,7 +615,7 @@
606615 set_cpu_online(cpu, false);
607616
608617 local_fiq_disable();
609
- local_irq_disable();
618
+ local_irq_disable_full();
610619
611620 while (1) {
612621 cpu_relax();
....@@ -695,12 +704,85 @@
695704 {
696705 struct pt_regs *old_regs = set_irq_regs(regs);
697706
707
+ /*
708
+ * We don't support legacy IPI delivery when pipelining
709
+ * interrupts.
710
+ */
711
+ WARN_ON_ONCE(irqs_pipelined());
712
+
698713 irq_enter();
699714 do_handle_IPI(ipinr);
700715 irq_exit();
701716
702717 set_irq_regs(old_regs);
703718 }
719
+
720
+static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr)
721
+{
722
+ trace_ipi_raise(target, ipi_types[ipinr]);
723
+ __ipi_send_mask(ipi_desc[ipinr], target);
724
+}
725
+
726
+#ifdef CONFIG_IRQ_PIPELINE
727
+
728
+static DEFINE_PER_CPU(unsigned long, ipi_messages);
729
+
730
+static DEFINE_PER_CPU(unsigned int [MAX_IPI], ipi_counts);
731
+
732
+static irqreturn_t ipi_handler(int irq, void *data)
733
+{
734
+ unsigned long *pmsg;
735
+ unsigned int ipinr;
736
+
737
+ /*
738
+ * Decode in-band IPIs (0..MAX_IPI - 1) multiplexed over
739
+ * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own
740
+ * individual handler.
741
+ */
742
+ pmsg = raw_cpu_ptr(&ipi_messages);
743
+ while (*pmsg) {
744
+ ipinr = ffs(*pmsg) - 1;
745
+ clear_bit(ipinr, pmsg);
746
+ __this_cpu_inc(ipi_counts[ipinr]);
747
+ do_handle_IPI(ipinr);
748
+ }
749
+
750
+ return IRQ_HANDLED;
751
+}
752
+
753
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
754
+{
755
+ unsigned int cpu;
756
+
757
+ /* regular in-band IPI (multiplexed over SGI0). */
758
+ for_each_cpu(cpu, target)
759
+ set_bit(ipinr, &per_cpu(ipi_messages, cpu));
760
+
761
+ wmb();
762
+ __smp_cross_call(target, 0);
763
+}
764
+
765
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
766
+{
767
+ return per_cpu(ipi_counts[irq - ipi_irq_base], cpu);
768
+}
769
+
770
+void irq_send_oob_ipi(unsigned int irq,
771
+ const struct cpumask *cpumask)
772
+{
773
+ unsigned int sgi = irq - ipi_irq_base;
774
+
775
+ if (WARN_ON(irq_pipeline_debug() &&
776
+ (sgi < OOB_IPI_OFFSET ||
777
+ sgi >= OOB_IPI_OFFSET + OOB_NR_IPI)))
778
+ return;
779
+
780
+ /* Out-of-band IPI (SGI1-2). */
781
+ __smp_cross_call(cpumask, sgi);
782
+}
783
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
784
+
785
+#else
704786
705787 static irqreturn_t ipi_handler(int irq, void *data)
706788 {
....@@ -710,9 +792,15 @@
710792
711793 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
712794 {
713
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
714
- __ipi_send_mask(ipi_desc[ipinr], target);
795
+ __smp_cross_call(target, ipinr);
715796 }
797
+
798
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
799
+{
800
+ return kstat_irqs_cpu(irq, cpu);
801
+}
802
+
803
+#endif /* CONFIG_IRQ_PIPELINE */
716804
717805 static void ipi_setup(int cpu)
718806 {
....@@ -727,18 +815,25 @@
727815
728816 void __init set_smp_ipi_range(int ipi_base, int n)
729817 {
730
- int i;
818
+ int i, inband_nr_ipi;
731819
732820 WARN_ON(n < MAX_IPI);
733821 nr_ipi = min(n, MAX_IPI);
822
+ /*
823
+ * irq_pipeline: the in-band stage traps SGI0 only,
824
+ * over which IPI messages are mutiplexed. Other SGIs
825
+ * are available for exchanging out-of-band IPIs.
826
+ */
827
+ inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi;
734828
735829 for (i = 0; i < nr_ipi; i++) {
736
- int err;
830
+ if (i < inband_nr_ipi) {
831
+ int err;
737832
738
- err = request_percpu_irq(ipi_base + i, ipi_handler,
739
- "IPI", &irq_stat);
740
- WARN_ON(err);
741
-
833
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
834
+ "IPI", &irq_stat);
835
+ WARN_ON(err);
836
+ }
742837 ipi_desc[i] = irq_to_desc(ipi_base + i);
743838 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
744839