.. | .. |
---|
39 | 39 | |
---|
40 | 40 | struct irq_domain *x86_vector_domain; |
---|
41 | 41 | EXPORT_SYMBOL_GPL(x86_vector_domain); |
---|
42 | | -static DEFINE_RAW_SPINLOCK(vector_lock); |
---|
| 42 | +static DEFINE_HARD_SPINLOCK(vector_lock); |
---|
43 | 43 | static cpumask_var_t vector_searchmask; |
---|
44 | 44 | static struct irq_chip lapic_controller; |
---|
45 | 45 | static struct irq_matrix *vector_matrix; |
---|
.. | .. |
---|
757 | 757 | { |
---|
758 | 758 | int isairq = vector - ISA_IRQ_VECTOR(0); |
---|
759 | 759 | |
---|
| 760 | + /* Copy the cleanup vector if irqs are pipelined. */ |
---|
| 761 | + if (IS_ENABLED(CONFIG_IRQ_PIPELINE) && |
---|
| 762 | + vector == IRQ_MOVE_CLEANUP_VECTOR) |
---|
| 763 | + return irq_to_desc(IRQ_MOVE_CLEANUP_VECTOR); /* 1:1 mapping */ |
---|
760 | 764 | /* Check whether the irq is in the legacy space */ |
---|
761 | 765 | if (isairq < 0 || isairq >= nr_legacy_irqs()) |
---|
762 | 766 | return VECTOR_UNUSED; |
---|
.. | .. |
---|
791 | 795 | |
---|
792 | 796 | void lapic_offline(void) |
---|
793 | 797 | { |
---|
794 | | - lock_vector_lock(); |
---|
| 798 | + unsigned long flags; |
---|
| 799 | + |
---|
| 800 | + raw_spin_lock_irqsave(&vector_lock, flags); |
---|
795 | 801 | irq_matrix_offline(vector_matrix); |
---|
796 | | - unlock_vector_lock(); |
---|
| 802 | + raw_spin_unlock_irqrestore(&vector_lock, flags); |
---|
797 | 803 | } |
---|
798 | 804 | |
---|
799 | 805 | static int apic_set_affinity(struct irq_data *irqd, |
---|
800 | 806 | const struct cpumask *dest, bool force) |
---|
801 | 807 | { |
---|
802 | 808 | int err; |
---|
| 809 | + |
---|
| 810 | + WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled()); |
---|
803 | 811 | |
---|
804 | 812 | if (WARN_ON_ONCE(!irqd_is_activated(irqd))) |
---|
805 | 813 | return -EIO; |
---|
.. | .. |
---|
830 | 838 | return 1; |
---|
831 | 839 | } |
---|
832 | 840 | |
---|
833 | | -void apic_ack_irq(struct irq_data *irqd) |
---|
| 841 | +#if defined(CONFIG_IRQ_PIPELINE) && \ |
---|
| 842 | + defined(CONFIG_GENERIC_PENDING_IRQ) |
---|
| 843 | + |
---|
| 844 | +static void apic_deferred_irq_move(struct irq_work *work) |
---|
| 845 | +{ |
---|
| 846 | + struct irq_data *irqd; |
---|
| 847 | + struct irq_desc *desc; |
---|
| 848 | + unsigned long flags; |
---|
| 849 | + |
---|
| 850 | + irqd = container_of(work, struct irq_data, move_work); |
---|
| 851 | + desc = irq_data_to_desc(irqd); |
---|
| 852 | + raw_spin_lock_irqsave(&desc->lock, flags); |
---|
| 853 | + __irq_move_irq(irqd); |
---|
| 854 | + raw_spin_unlock_irqrestore(&desc->lock, flags); |
---|
| 855 | +} |
---|
| 856 | + |
---|
| 857 | +static inline void apic_move_irq(struct irq_data *irqd) |
---|
| 858 | +{ |
---|
| 859 | + if (irqd_is_setaffinity_pending(irqd) && |
---|
| 860 | + !irqd_is_setaffinity_blocked(irqd)) { |
---|
| 861 | + init_irq_work(&irqd->move_work, apic_deferred_irq_move); |
---|
| 862 | + irq_work_queue(&irqd->move_work); |
---|
| 863 | + } |
---|
| 864 | +} |
---|
| 865 | + |
---|
| 866 | +#else |
---|
| 867 | + |
---|
| 868 | +static inline void apic_move_irq(struct irq_data *irqd) |
---|
834 | 869 | { |
---|
835 | 870 | irq_move_irq(irqd); |
---|
836 | | - ack_APIC_irq(); |
---|
| 871 | +} |
---|
| 872 | + |
---|
| 873 | +#endif |
---|
| 874 | + |
---|
| 875 | +void apic_ack_irq(struct irq_data *irqd) |
---|
| 876 | +{ |
---|
| 877 | + apic_move_irq(irqd); |
---|
| 878 | + __ack_APIC_irq(); |
---|
837 | 879 | } |
---|
838 | 880 | |
---|
839 | 881 | void apic_ack_edge(struct irq_data *irqd) |
---|
.. | .. |
---|
876 | 918 | apicd->move_in_progress = 0; |
---|
877 | 919 | } |
---|
878 | 920 | |
---|
879 | | -DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup) |
---|
| 921 | +DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR, |
---|
| 922 | + sysvec_irq_move_cleanup) |
---|
880 | 923 | { |
---|
881 | 924 | struct hlist_head *clhead = this_cpu_ptr(&cleanup_list); |
---|
882 | 925 | struct apic_chip_data *apicd; |
---|
883 | 926 | struct hlist_node *tmp; |
---|
| 927 | + unsigned long flags; |
---|
884 | 928 | |
---|
885 | 929 | ack_APIC_irq(); |
---|
886 | 930 | /* Prevent vectors vanishing under us */ |
---|
887 | | - raw_spin_lock(&vector_lock); |
---|
| 931 | + raw_spin_lock_irqsave(&vector_lock, flags); |
---|
888 | 932 | |
---|
889 | 933 | hlist_for_each_entry_safe(apicd, tmp, clhead, clist) { |
---|
890 | 934 | unsigned int irr, vector = apicd->prev_vector; |
---|
.. | .. |
---|
906 | 950 | free_moved_vector(apicd); |
---|
907 | 951 | } |
---|
908 | 952 | |
---|
909 | | - raw_spin_unlock(&vector_lock); |
---|
| 953 | + raw_spin_unlock_irqrestore(&vector_lock, flags); |
---|
910 | 954 | } |
---|
911 | 955 | |
---|
912 | 956 | static void __send_cleanup_vector(struct apic_chip_data *apicd) |
---|
913 | 957 | { |
---|
| 958 | + unsigned long flags; |
---|
914 | 959 | unsigned int cpu; |
---|
915 | 960 | |
---|
916 | | - raw_spin_lock(&vector_lock); |
---|
| 961 | + raw_spin_lock_irqsave(&vector_lock, flags); |
---|
917 | 962 | apicd->move_in_progress = 0; |
---|
918 | 963 | cpu = apicd->prev_cpu; |
---|
919 | 964 | if (cpu_online(cpu)) { |
---|
.. | .. |
---|
922 | 967 | } else { |
---|
923 | 968 | apicd->prev_vector = 0; |
---|
924 | 969 | } |
---|
925 | | - raw_spin_unlock(&vector_lock); |
---|
| 970 | + raw_spin_unlock_irqrestore(&vector_lock, flags); |
---|
926 | 971 | } |
---|
927 | 972 | |
---|
928 | 973 | void send_cleanup_vector(struct irq_cfg *cfg) |
---|
.. | .. |
---|
960 | 1005 | struct apic_chip_data *apicd; |
---|
961 | 1006 | struct irq_data *irqd; |
---|
962 | 1007 | unsigned int vector; |
---|
| 1008 | + |
---|
| 1009 | + WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled()); |
---|
963 | 1010 | |
---|
964 | 1011 | /* |
---|
965 | 1012 | * The function is called for all descriptors regardless of which |
---|
.. | .. |
---|
1051 | 1098 | int lapic_can_unplug_cpu(void) |
---|
1052 | 1099 | { |
---|
1053 | 1100 | unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); |
---|
| 1101 | + unsigned long flags; |
---|
1054 | 1102 | int ret = 0; |
---|
1055 | 1103 | |
---|
1056 | | - raw_spin_lock(&vector_lock); |
---|
| 1104 | + raw_spin_lock_irqsave(&vector_lock, flags); |
---|
1057 | 1105 | tomove = irq_matrix_allocated(vector_matrix); |
---|
1058 | 1106 | avl = irq_matrix_available(vector_matrix, true); |
---|
1059 | 1107 | if (avl < tomove) { |
---|
.. | .. |
---|
1068 | 1116 | rsvd, avl); |
---|
1069 | 1117 | } |
---|
1070 | 1118 | out: |
---|
1071 | | - raw_spin_unlock(&vector_lock); |
---|
| 1119 | + raw_spin_unlock_irqrestore(&vector_lock, flags); |
---|
1072 | 1120 | return ret; |
---|
1073 | 1121 | } |
---|
1074 | 1122 | #endif /* HOTPLUG_CPU */ |
---|