hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/x86/kernel/apic/vector.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Local APIC related interfaces to support IOAPIC, MSI, etc.
34 *
....@@ -5,10 +6,6 @@
56 * Moved from arch/x86/kernel/apic/io_apic.c.
67 * Jiang Liu <jiang.liu@linux.intel.com>
78 * Enable support of hierarchical irqdomains
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
129 */
1310 #include <linux/interrupt.h>
1411 #include <linux/irq.h>
....@@ -18,6 +15,7 @@
1815 #include <linux/slab.h>
1916 #include <asm/irqdomain.h>
2017 #include <asm/hw_irq.h>
18
+#include <asm/traps.h>
2119 #include <asm/apic.h>
2220 #include <asm/i8259.h>
2321 #include <asm/desc.h>
....@@ -163,6 +161,7 @@
163161 apicd->move_in_progress = true;
164162 apicd->prev_vector = apicd->vector;
165163 apicd->prev_cpu = apicd->cpu;
164
+ WARN_ON_ONCE(apicd->cpu == newcpu);
166165 } else {
167166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
168167 managed);
....@@ -560,6 +559,12 @@
560559 irqd->chip_data = apicd;
561560 irqd->hwirq = virq + i;
562561 irqd_set_single_target(irqd);
562
+ /*
563
+ * Prevent that any of these interrupts is invoked in
564
+ * non interrupt context via e.g. generic_handle_irq()
565
+ * as that can corrupt the affinity move state.
566
+ */
567
+ irqd_set_handle_enforce_irqctx(irqd);
563568
564569 /* Don't invoke affinity setter on deactivated interrupts */
565570 irqd_set_affinity_on_activate(irqd);
....@@ -733,8 +738,6 @@
733738 BUG_ON(x86_vector_domain == NULL);
734739 irq_set_default_host(x86_vector_domain);
735740
736
- arch_init_msi_domain(x86_vector_domain);
737
-
738741 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
739742
740743 /*
....@@ -843,6 +846,7 @@
843846 .name = "APIC",
844847 .irq_ack = apic_ack_edge,
845848 .irq_set_affinity = apic_set_affinity,
849
+ .irq_compose_msi_msg = x86_vector_msi_compose_msg,
846850 .irq_retrigger = apic_retrigger_irq,
847851 };
848852
....@@ -855,13 +859,15 @@
855859 bool managed = apicd->is_managed;
856860
857861 /*
858
- * This should never happen. Managed interrupts are not
859
- * migrated except on CPU down, which does not involve the
860
- * cleanup vector. But try to keep the accounting correct
861
- * nevertheless.
862
+ * Managed interrupts are usually not migrated away
863
+ * from an online CPU, but CPU isolation 'managed_irq'
864
+ * can make that happen.
865
+ * 1) Activation does not take the isolation into account
866
+ * to keep the code simple
867
+ * 2) Migration away from an isolated CPU can happen when
868
+ * a non-isolated CPU which is in the calculated
869
+ * affinity mask comes online.
862870 */
863
- WARN_ON_ONCE(managed);
864
-
865871 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
866872 irq_matrix_free(vector_matrix, cpu, vector, managed);
867873 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
....@@ -870,13 +876,13 @@
870876 apicd->move_in_progress = 0;
871877 }
872878
873
-asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
879
+DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
874880 {
875881 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
876882 struct apic_chip_data *apicd;
877883 struct hlist_node *tmp;
878884
879
- entering_ack_irq();
885
+ ack_APIC_irq();
880886 /* Prevent vectors vanishing under us */
881887 raw_spin_lock(&vector_lock);
882888
....@@ -901,7 +907,6 @@
901907 }
902908
903909 raw_spin_unlock(&vector_lock);
904
- exiting_irq();
905910 }
906911
907912 static void __send_cleanup_vector(struct apic_chip_data *apicd)
....@@ -929,7 +934,7 @@
929934 __send_cleanup_vector(apicd);
930935 }
931936
932
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
937
+void irq_complete_move(struct irq_cfg *cfg)
933938 {
934939 struct apic_chip_data *apicd;
935940
....@@ -937,13 +942,14 @@
937942 if (likely(!apicd->move_in_progress))
938943 return;
939944
940
- if (vector == apicd->vector && apicd->cpu == smp_processor_id())
945
+ /*
946
+ * If the interrupt arrived on the new target CPU, cleanup the
947
+ * vector on the old target CPU. A vector check is not required
948
+ * because an interrupt can never move from one vector to another
949
+ * on the same CPU.
950
+ */
951
+ if (apicd->cpu == smp_processor_id())
941952 __send_cleanup_vector(apicd);
942
-}
943
-
944
-void irq_complete_move(struct irq_cfg *cfg)
945
-{
946
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
947953 }
948954
949955 /*