.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Local APIC related interfaces to support IOAPIC, MSI, etc. |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * Moved from arch/x86/kernel/apic/io_apic.c. |
---|
6 | 7 | * Jiang Liu <jiang.liu@linux.intel.com> |
---|
7 | 8 | * Enable support of hierarchical irqdomains |
---|
8 | | - * |
---|
9 | | - * This program is free software; you can redistribute it and/or modify |
---|
10 | | - * it under the terms of the GNU General Public License version 2 as |
---|
11 | | - * published by the Free Software Foundation. |
---|
12 | 9 | */ |
---|
13 | 10 | #include <linux/interrupt.h> |
---|
14 | 11 | #include <linux/irq.h> |
---|
.. | .. |
---|
18 | 15 | #include <linux/slab.h> |
---|
19 | 16 | #include <asm/irqdomain.h> |
---|
20 | 17 | #include <asm/hw_irq.h> |
---|
| 18 | +#include <asm/traps.h> |
---|
21 | 19 | #include <asm/apic.h> |
---|
22 | 20 | #include <asm/i8259.h> |
---|
23 | 21 | #include <asm/desc.h> |
---|
.. | .. |
---|
163 | 161 | apicd->move_in_progress = true; |
---|
164 | 162 | apicd->prev_vector = apicd->vector; |
---|
165 | 163 | apicd->prev_cpu = apicd->cpu; |
---|
| 164 | + WARN_ON_ONCE(apicd->cpu == newcpu); |
---|
166 | 165 | } else { |
---|
167 | 166 | irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, |
---|
168 | 167 | managed); |
---|
.. | .. |
---|
560 | 559 | irqd->chip_data = apicd; |
---|
561 | 560 | irqd->hwirq = virq + i; |
---|
562 | 561 | irqd_set_single_target(irqd); |
---|
| 562 | + /* |
---|
| 563 | + * Prevent that any of these interrupts is invoked in |
---|
| 564 | + * non interrupt context via e.g. generic_handle_irq() |
---|
| 565 | + * as that can corrupt the affinity move state. |
---|
| 566 | + */ |
---|
| 567 | + irqd_set_handle_enforce_irqctx(irqd); |
---|
563 | 568 | |
---|
564 | 569 | /* Don't invoke affinity setter on deactivated interrupts */ |
---|
565 | 570 | irqd_set_affinity_on_activate(irqd); |
---|
.. | .. |
---|
733 | 738 | BUG_ON(x86_vector_domain == NULL); |
---|
734 | 739 | irq_set_default_host(x86_vector_domain); |
---|
735 | 740 | |
---|
736 | | - arch_init_msi_domain(x86_vector_domain); |
---|
737 | | - |
---|
738 | 741 | BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); |
---|
739 | 742 | |
---|
740 | 743 | /* |
---|
.. | .. |
---|
843 | 846 | .name = "APIC", |
---|
844 | 847 | .irq_ack = apic_ack_edge, |
---|
845 | 848 | .irq_set_affinity = apic_set_affinity, |
---|
| 849 | + .irq_compose_msi_msg = x86_vector_msi_compose_msg, |
---|
846 | 850 | .irq_retrigger = apic_retrigger_irq, |
---|
847 | 851 | }; |
---|
848 | 852 | |
---|
.. | .. |
---|
855 | 859 | bool managed = apicd->is_managed; |
---|
856 | 860 | |
---|
857 | 861 | /* |
---|
858 | | - * This should never happen. Managed interrupts are not |
---|
859 | | - * migrated except on CPU down, which does not involve the |
---|
860 | | - * cleanup vector. But try to keep the accounting correct |
---|
861 | | - * nevertheless. |
---|
| 862 | + * Managed interrupts are usually not migrated away |
---|
| 863 | + * from an online CPU, but CPU isolation 'managed_irq' |
---|
| 864 | + * can make that happen. |
---|
| 865 | + * 1) Activation does not take the isolation into account |
---|
| 866 | + * to keep the code simple |
---|
| 867 | + * 2) Migration away from an isolated CPU can happen when |
---|
| 868 | + * a non-isolated CPU which is in the calculated |
---|
| 869 | + * affinity mask comes online. |
---|
862 | 870 | */ |
---|
863 | | - WARN_ON_ONCE(managed); |
---|
864 | | - |
---|
865 | 871 | trace_vector_free_moved(apicd->irq, cpu, vector, managed); |
---|
866 | 872 | irq_matrix_free(vector_matrix, cpu, vector, managed); |
---|
867 | 873 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; |
---|
.. | .. |
---|
870 | 876 | apicd->move_in_progress = 0; |
---|
871 | 877 | } |
---|
872 | 878 | |
---|
873 | | -asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void) |
---|
| 879 | +DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup) |
---|
874 | 880 | { |
---|
875 | 881 | struct hlist_head *clhead = this_cpu_ptr(&cleanup_list); |
---|
876 | 882 | struct apic_chip_data *apicd; |
---|
877 | 883 | struct hlist_node *tmp; |
---|
878 | 884 | |
---|
879 | | - entering_ack_irq(); |
---|
| 885 | + ack_APIC_irq(); |
---|
880 | 886 | /* Prevent vectors vanishing under us */ |
---|
881 | 887 | raw_spin_lock(&vector_lock); |
---|
882 | 888 | |
---|
.. | .. |
---|
901 | 907 | } |
---|
902 | 908 | |
---|
903 | 909 | raw_spin_unlock(&vector_lock); |
---|
904 | | - exiting_irq(); |
---|
905 | 910 | } |
---|
906 | 911 | |
---|
907 | 912 | static void __send_cleanup_vector(struct apic_chip_data *apicd) |
---|
.. | .. |
---|
929 | 934 | __send_cleanup_vector(apicd); |
---|
930 | 935 | } |
---|
931 | 936 | |
---|
932 | | -static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) |
---|
| 937 | +void irq_complete_move(struct irq_cfg *cfg) |
---|
933 | 938 | { |
---|
934 | 939 | struct apic_chip_data *apicd; |
---|
935 | 940 | |
---|
.. | .. |
---|
937 | 942 | if (likely(!apicd->move_in_progress)) |
---|
938 | 943 | return; |
---|
939 | 944 | |
---|
940 | | - if (vector == apicd->vector && apicd->cpu == smp_processor_id()) |
---|
| 945 | + /* |
---|
| 946 | + * If the interrupt arrived on the new target CPU, cleanup the |
---|
| 947 | + * vector on the old target CPU. A vector check is not required |
---|
| 948 | + * because an interrupt can never move from one vector to another |
---|
| 949 | + * on the same CPU. |
---|
| 950 | + */ |
---|
| 951 | + if (apicd->cpu == smp_processor_id()) |
---|
941 | 952 | __send_cleanup_vector(apicd); |
---|
942 | | -} |
---|
943 | | - |
---|
944 | | -void irq_complete_move(struct irq_cfg *cfg) |
---|
945 | | -{ |
---|
946 | | - __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); |
---|
947 | 953 | } |
---|
948 | 954 | |
---|
949 | 955 | /* |
---|