hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/x86/kernel/apic/vector.c
....@@ -39,7 +39,7 @@
3939
4040 struct irq_domain *x86_vector_domain;
4141 EXPORT_SYMBOL_GPL(x86_vector_domain);
42
-static DEFINE_RAW_SPINLOCK(vector_lock);
42
+static DEFINE_HARD_SPINLOCK(vector_lock);
4343 static cpumask_var_t vector_searchmask;
4444 static struct irq_chip lapic_controller;
4545 static struct irq_matrix *vector_matrix;
....@@ -757,6 +757,10 @@
757757 {
758758 int isairq = vector - ISA_IRQ_VECTOR(0);
759759
760
+ /* Copy the cleanup vector if irqs are pipelined. */
761
+ if (IS_ENABLED(CONFIG_IRQ_PIPELINE) &&
762
+ vector == IRQ_MOVE_CLEANUP_VECTOR)
763
+ return irq_to_desc(IRQ_MOVE_CLEANUP_VECTOR); /* 1:1 mapping */
760764 /* Check whether the irq is in the legacy space */
761765 if (isairq < 0 || isairq >= nr_legacy_irqs())
762766 return VECTOR_UNUSED;
....@@ -791,15 +795,19 @@
791795
792796 void lapic_offline(void)
793797 {
794
- lock_vector_lock();
798
+ unsigned long flags;
799
+
800
+ raw_spin_lock_irqsave(&vector_lock, flags);
795801 irq_matrix_offline(vector_matrix);
796
- unlock_vector_lock();
802
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
797803 }
798804
799805 static int apic_set_affinity(struct irq_data *irqd,
800806 const struct cpumask *dest, bool force)
801807 {
802808 int err;
809
+
810
+ WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
803811
804812 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
805813 return -EIO;
....@@ -830,10 +838,44 @@
830838 return 1;
831839 }
832840
833
-void apic_ack_irq(struct irq_data *irqd)
841
+#if defined(CONFIG_IRQ_PIPELINE) && \
842
+ defined(CONFIG_GENERIC_PENDING_IRQ)
843
+
844
+static void apic_deferred_irq_move(struct irq_work *work)
845
+{
846
+ struct irq_data *irqd;
847
+ struct irq_desc *desc;
848
+ unsigned long flags;
849
+
850
+ irqd = container_of(work, struct irq_data, move_work);
851
+ desc = irq_data_to_desc(irqd);
852
+ raw_spin_lock_irqsave(&desc->lock, flags);
853
+ __irq_move_irq(irqd);
854
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
855
+}
856
+
857
+static inline void apic_move_irq(struct irq_data *irqd)
858
+{
859
+ if (irqd_is_setaffinity_pending(irqd) &&
860
+ !irqd_is_setaffinity_blocked(irqd)) {
861
+ init_irq_work(&irqd->move_work, apic_deferred_irq_move);
862
+ irq_work_queue(&irqd->move_work);
863
+ }
864
+}
865
+
866
+#else
867
+
868
+static inline void apic_move_irq(struct irq_data *irqd)
834869 {
835870 irq_move_irq(irqd);
836
- ack_APIC_irq();
871
+}
872
+
873
+#endif
874
+
875
+void apic_ack_irq(struct irq_data *irqd)
876
+{
877
+ apic_move_irq(irqd);
878
+ __ack_APIC_irq();
837879 }
838880
839881 void apic_ack_edge(struct irq_data *irqd)
....@@ -876,15 +918,17 @@
876918 apicd->move_in_progress = 0;
877919 }
878920
879
-DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
921
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR,
922
+ sysvec_irq_move_cleanup)
880923 {
881924 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
882925 struct apic_chip_data *apicd;
883926 struct hlist_node *tmp;
927
+ unsigned long flags;
884928
885929 ack_APIC_irq();
886930 /* Prevent vectors vanishing under us */
887
- raw_spin_lock(&vector_lock);
931
+ raw_spin_lock_irqsave(&vector_lock, flags);
888932
889933 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
890934 unsigned int irr, vector = apicd->prev_vector;
....@@ -906,14 +950,15 @@
906950 free_moved_vector(apicd);
907951 }
908952
909
- raw_spin_unlock(&vector_lock);
953
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
910954 }
911955
912956 static void __send_cleanup_vector(struct apic_chip_data *apicd)
913957 {
958
+ unsigned long flags;
914959 unsigned int cpu;
915960
916
- raw_spin_lock(&vector_lock);
961
+ raw_spin_lock_irqsave(&vector_lock, flags);
917962 apicd->move_in_progress = 0;
918963 cpu = apicd->prev_cpu;
919964 if (cpu_online(cpu)) {
....@@ -922,7 +967,7 @@
922967 } else {
923968 apicd->prev_vector = 0;
924969 }
925
- raw_spin_unlock(&vector_lock);
970
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
926971 }
927972
928973 void send_cleanup_vector(struct irq_cfg *cfg)
....@@ -960,6 +1005,8 @@
9601005 struct apic_chip_data *apicd;
9611006 struct irq_data *irqd;
9621007 unsigned int vector;
1008
+
1009
+ WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
9631010
9641011 /*
9651012 * The function is called for all descriptors regardless of which
....@@ -1051,9 +1098,10 @@
10511098 int lapic_can_unplug_cpu(void)
10521099 {
10531100 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1101
+ unsigned long flags;
10541102 int ret = 0;
10551103
1056
- raw_spin_lock(&vector_lock);
1104
+ raw_spin_lock_irqsave(&vector_lock, flags);
10571105 tomove = irq_matrix_allocated(vector_matrix);
10581106 avl = irq_matrix_available(vector_matrix, true);
10591107 if (avl < tomove) {
....@@ -1068,7 +1116,7 @@
10681116 rsvd, avl);
10691117 }
10701118 out:
1071
- raw_spin_unlock(&vector_lock);
1119
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
10721120 return ret;
10731121 }
10741122 #endif /* HOTPLUG_CPU */