.. | .. |
---|
15 | 15 | #include <linux/kernel_stat.h> |
---|
16 | 16 | #include <linux/irqdomain.h> |
---|
17 | 17 | #include <linux/wakeup_reason.h> |
---|
| 18 | +#include <linux/irq_pipeline.h> |
---|
18 | 19 | |
---|
19 | 20 | #include <trace/events/irq.h> |
---|
20 | 21 | |
---|
.. | .. |
---|
49 | 50 | |
---|
50 | 51 | if (!chip) |
---|
51 | 52 | chip = &no_irq_chip; |
---|
| 53 | + else |
---|
| 54 | + WARN_ONCE(irqs_pipelined() && |
---|
| 55 | + (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0, |
---|
| 56 | + "irqchip %s is not pipeline-safe!", chip->name); |
---|
52 | 57 | |
---|
53 | 58 | desc->irq_data.chip = chip; |
---|
54 | 59 | irq_put_desc_unlock(desc, flags); |
---|
.. | .. |
---|
155 | 160 | return 0; |
---|
156 | 161 | } |
---|
157 | 162 | EXPORT_SYMBOL(irq_set_chip_data); |
---|
158 | | - |
---|
159 | | -struct irq_data *irq_get_irq_data(unsigned int irq) |
---|
160 | | -{ |
---|
161 | | - struct irq_desc *desc = irq_to_desc(irq); |
---|
162 | | - |
---|
163 | | - return desc ? &desc->irq_data : NULL; |
---|
164 | | -} |
---|
165 | | -EXPORT_SYMBOL_GPL(irq_get_irq_data); |
---|
166 | 163 | |
---|
167 | 164 | static void irq_state_clr_disabled(struct irq_desc *desc) |
---|
168 | 165 | { |
---|
.. | .. |
---|
386 | 383 | */ |
---|
387 | 384 | void irq_disable(struct irq_desc *desc) |
---|
388 | 385 | { |
---|
389 | | - __irq_disable(desc, irq_settings_disable_unlazy(desc)); |
---|
| 386 | + __irq_disable(desc, |
---|
| 387 | + irq_settings_disable_unlazy(desc) || irqs_pipelined()); |
---|
390 | 388 | } |
---|
391 | 389 | |
---|
392 | 390 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
---|
.. | .. |
---|
532 | 530 | * If the interrupt is an armed wakeup source, mark it pending |
---|
533 | 531 | * and suspended, disable it and notify the pm core about the |
---|
534 | 532 | * event. |
---|
| 533 | + * |
---|
| 534 | + * When pipelining, the logic is as follows: |
---|
| 535 | + * |
---|
| 536 | + * - from a pipeline entry context, we might have preempted |
---|
| 537 | + * the oob stage, or irqs might be [virtually] off, so we may |
---|
| 538 | + * not run the in-band PM code. Just make sure any wakeup |
---|
| 539 | + * interrupt is detected later on when the flow handler |
---|
| 540 | + * re-runs from the in-band stage. |
---|
| 541 | + * |
---|
| 542 | + * - from the in-band context, run the PM wakeup check. |
---|
535 | 543 | */ |
---|
536 | | - if (irq_pm_check_wakeup(desc)) |
---|
| 544 | + if (irqs_pipelined()) { |
---|
| 545 | + WARN_ON_ONCE(irq_pipeline_debug() && !in_pipeline()); |
---|
| 546 | + if (irqd_is_wakeup_armed(&desc->irq_data)) |
---|
| 547 | + return true; |
---|
| 548 | + } else if (irq_pm_check_wakeup(desc)) |
---|
537 | 549 | return false; |
---|
538 | 550 | |
---|
539 | 551 | /* |
---|
.. | .. |
---|
557 | 569 | { |
---|
558 | 570 | raw_spin_lock(&desc->lock); |
---|
559 | 571 | |
---|
560 | | - if (!irq_may_run(desc)) |
---|
| 572 | + if (start_irq_flow() && !irq_may_run(desc)) |
---|
561 | 573 | goto out_unlock; |
---|
| 574 | + |
---|
| 575 | + if (on_pipeline_entry()) { |
---|
| 576 | + handle_oob_irq(desc); |
---|
| 577 | + goto out_unlock; |
---|
| 578 | + } |
---|
562 | 579 | |
---|
563 | 580 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
564 | 581 | |
---|
.. | .. |
---|
594 | 611 | |
---|
595 | 612 | raw_spin_lock(&desc->lock); |
---|
596 | 613 | |
---|
597 | | - if (!irq_may_run(desc)) |
---|
| 614 | + if (start_irq_flow() && !irq_may_run(desc)) |
---|
598 | 615 | goto out_unlock; |
---|
| 616 | + |
---|
| 617 | + if (on_pipeline_entry()) { |
---|
| 618 | + handle_oob_irq(desc); |
---|
| 619 | + goto out_unlock; |
---|
| 620 | + } |
---|
599 | 621 | |
---|
600 | 622 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
601 | 623 | |
---|
.. | .. |
---|
617 | 639 | raw_spin_unlock(&desc->lock); |
---|
618 | 640 | } |
---|
619 | 641 | EXPORT_SYMBOL_GPL(handle_untracked_irq); |
---|
| 642 | + |
---|
| 643 | +static inline void cond_eoi_irq(struct irq_desc *desc) |
---|
| 644 | +{ |
---|
| 645 | + struct irq_chip *chip = desc->irq_data.chip; |
---|
| 646 | + |
---|
| 647 | + if (!(chip->flags & IRQCHIP_EOI_THREADED)) |
---|
| 648 | + chip->irq_eoi(&desc->irq_data); |
---|
| 649 | +} |
---|
| 650 | + |
---|
| 651 | +static inline void mask_cond_eoi_irq(struct irq_desc *desc) |
---|
| 652 | +{ |
---|
| 653 | + mask_irq(desc); |
---|
| 654 | + cond_eoi_irq(desc); |
---|
| 655 | +} |
---|
620 | 656 | |
---|
621 | 657 | /* |
---|
622 | 658 | * Called unconditionally from handle_level_irq() and only for oneshot |
---|
.. | .. |
---|
648 | 684 | void handle_level_irq(struct irq_desc *desc) |
---|
649 | 685 | { |
---|
650 | 686 | raw_spin_lock(&desc->lock); |
---|
651 | | - mask_ack_irq(desc); |
---|
652 | 687 | |
---|
653 | | - if (!irq_may_run(desc)) |
---|
| 688 | + if (start_irq_flow()) { |
---|
| 689 | + mask_ack_irq(desc); |
---|
| 690 | + |
---|
| 691 | + if (!irq_may_run(desc)) |
---|
| 692 | + goto out_unlock; |
---|
| 693 | + } |
---|
| 694 | + |
---|
| 695 | + if (on_pipeline_entry()) { |
---|
| 696 | + if (handle_oob_irq(desc)) |
---|
| 697 | + goto out_unmask; |
---|
654 | 698 | goto out_unlock; |
---|
| 699 | + } |
---|
655 | 700 | |
---|
656 | 701 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
657 | 702 | |
---|
.. | .. |
---|
666 | 711 | |
---|
667 | 712 | kstat_incr_irqs_this_cpu(desc); |
---|
668 | 713 | handle_irq_event(desc); |
---|
669 | | - |
---|
| 714 | +out_unmask: |
---|
670 | 715 | cond_unmask_irq(desc); |
---|
671 | 716 | |
---|
672 | 717 | out_unlock: |
---|
.. | .. |
---|
677 | 722 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) |
---|
678 | 723 | { |
---|
679 | 724 | if (!(desc->istate & IRQS_ONESHOT)) { |
---|
680 | | - chip->irq_eoi(&desc->irq_data); |
---|
| 725 | + if (!irqs_pipelined()) |
---|
| 726 | + chip->irq_eoi(&desc->irq_data); |
---|
| 727 | + else if (!irqd_irq_disabled(&desc->irq_data)) |
---|
| 728 | + unmask_irq(desc); |
---|
681 | 729 | return; |
---|
682 | 730 | } |
---|
683 | 731 | /* |
---|
.. | .. |
---|
688 | 736 | */ |
---|
689 | 737 | if (!irqd_irq_disabled(&desc->irq_data) && |
---|
690 | 738 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { |
---|
691 | | - chip->irq_eoi(&desc->irq_data); |
---|
| 739 | + if (!irqs_pipelined()) |
---|
| 740 | + chip->irq_eoi(&desc->irq_data); |
---|
692 | 741 | unmask_irq(desc); |
---|
693 | | - } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { |
---|
| 742 | + } else if (!irqs_pipelined() && |
---|
| 743 | + !(chip->flags & IRQCHIP_EOI_THREADED)) { |
---|
694 | 744 | chip->irq_eoi(&desc->irq_data); |
---|
695 | 745 | } |
---|
696 | 746 | } |
---|
.. | .. |
---|
710 | 760 | |
---|
711 | 761 | raw_spin_lock(&desc->lock); |
---|
712 | 762 | |
---|
713 | | - if (!irq_may_run(desc)) |
---|
| 763 | + if (start_irq_flow() && !irq_may_run(desc)) |
---|
714 | 764 | goto out; |
---|
| 765 | + |
---|
| 766 | + if (on_pipeline_entry()) { |
---|
| 767 | + if (handle_oob_irq(desc)) |
---|
| 768 | + chip->irq_eoi(&desc->irq_data); |
---|
| 769 | + else |
---|
| 770 | + mask_cond_eoi_irq(desc); |
---|
| 771 | + goto out_unlock; |
---|
| 772 | + } |
---|
715 | 773 | |
---|
716 | 774 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
717 | 775 | |
---|
.. | .. |
---|
726 | 784 | } |
---|
727 | 785 | |
---|
728 | 786 | kstat_incr_irqs_this_cpu(desc); |
---|
729 | | - if (desc->istate & IRQS_ONESHOT) |
---|
| 787 | + if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT)) |
---|
730 | 788 | mask_irq(desc); |
---|
731 | 789 | |
---|
732 | 790 | handle_irq_event(desc); |
---|
733 | 791 | |
---|
734 | 792 | cond_unmask_eoi_irq(desc, chip); |
---|
735 | | - |
---|
| 793 | +out_unlock: |
---|
736 | 794 | raw_spin_unlock(&desc->lock); |
---|
737 | 795 | return; |
---|
738 | 796 | out: |
---|
.. | .. |
---|
792 | 850 | */ |
---|
793 | 851 | void handle_edge_irq(struct irq_desc *desc) |
---|
794 | 852 | { |
---|
| 853 | + struct irq_chip *chip = irq_desc_get_chip(desc); |
---|
| 854 | + |
---|
795 | 855 | raw_spin_lock(&desc->lock); |
---|
796 | 856 | |
---|
797 | | - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
| 857 | + if (start_irq_flow()) { |
---|
| 858 | + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
798 | 859 | |
---|
799 | | - if (!irq_may_run(desc)) { |
---|
800 | | - desc->istate |= IRQS_PENDING; |
---|
801 | | - mask_ack_irq(desc); |
---|
802 | | - goto out_unlock; |
---|
| 860 | + if (!irq_may_run(desc)) { |
---|
| 861 | + desc->istate |= IRQS_PENDING; |
---|
| 862 | + mask_ack_irq(desc); |
---|
| 863 | + goto out_unlock; |
---|
| 864 | + } |
---|
| 865 | + |
---|
| 866 | + /* |
---|
| 867 | + * If its disabled or no action available then mask it |
---|
| 868 | + * and get out of here. |
---|
| 869 | + */ |
---|
| 870 | + if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
---|
| 871 | + desc->istate |= IRQS_PENDING; |
---|
| 872 | + mask_ack_irq(desc); |
---|
| 873 | + goto out_unlock; |
---|
| 874 | + } |
---|
803 | 875 | } |
---|
804 | 876 | |
---|
805 | | - /* |
---|
806 | | - * If its disabled or no action available then mask it and get |
---|
807 | | - * out of here. |
---|
808 | | - */ |
---|
809 | | - if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
---|
810 | | - desc->istate |= IRQS_PENDING; |
---|
811 | | - mask_ack_irq(desc); |
---|
| 877 | + if (on_pipeline_entry()) { |
---|
| 878 | + chip->irq_ack(&desc->irq_data); |
---|
| 879 | + desc->istate |= IRQS_EDGE; |
---|
| 880 | + handle_oob_irq(desc); |
---|
812 | 881 | goto out_unlock; |
---|
813 | 882 | } |
---|
814 | 883 | |
---|
815 | 884 | kstat_incr_irqs_this_cpu(desc); |
---|
816 | 885 | |
---|
817 | 886 | /* Start handling the irq */ |
---|
818 | | - desc->irq_data.chip->irq_ack(&desc->irq_data); |
---|
| 887 | + if (!irqs_pipelined()) |
---|
| 888 | + chip->irq_ack(&desc->irq_data); |
---|
819 | 889 | |
---|
820 | 890 | do { |
---|
821 | 891 | if (unlikely(!desc->action)) { |
---|
.. | .. |
---|
840 | 910 | !irqd_irq_disabled(&desc->irq_data)); |
---|
841 | 911 | |
---|
842 | 912 | out_unlock: |
---|
| 913 | + if (on_pipeline_entry()) |
---|
| 914 | + desc->istate &= ~IRQS_EDGE; |
---|
843 | 915 | raw_spin_unlock(&desc->lock); |
---|
844 | 916 | } |
---|
845 | 917 | EXPORT_SYMBOL(handle_edge_irq); |
---|
.. | .. |
---|
858 | 930 | |
---|
859 | 931 | raw_spin_lock(&desc->lock); |
---|
860 | 932 | |
---|
861 | | - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
| 933 | + if (start_irq_flow()) { |
---|
| 934 | + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
862 | 935 | |
---|
863 | | - if (!irq_may_run(desc)) { |
---|
864 | | - desc->istate |= IRQS_PENDING; |
---|
865 | | - goto out_eoi; |
---|
| 936 | + if (!irq_may_run(desc)) { |
---|
| 937 | + desc->istate |= IRQS_PENDING; |
---|
| 938 | + goto out_eoi; |
---|
| 939 | + } |
---|
| 940 | + } |
---|
| 941 | + |
---|
| 942 | + if (on_pipeline_entry()) { |
---|
| 943 | + desc->istate |= IRQS_EDGE; |
---|
| 944 | + if (handle_oob_irq(desc)) |
---|
| 945 | + goto out_eoi; |
---|
| 946 | + goto out; |
---|
866 | 947 | } |
---|
867 | 948 | |
---|
868 | 949 | /* |
---|
.. | .. |
---|
887 | 968 | |
---|
888 | 969 | out_eoi: |
---|
889 | 970 | chip->irq_eoi(&desc->irq_data); |
---|
| 971 | +out: |
---|
| 972 | + if (on_pipeline_entry()) |
---|
| 973 | + desc->istate &= ~IRQS_EDGE; |
---|
890 | 974 | raw_spin_unlock(&desc->lock); |
---|
891 | 975 | } |
---|
892 | 976 | #endif |
---|
.. | .. |
---|
900 | 984 | void handle_percpu_irq(struct irq_desc *desc) |
---|
901 | 985 | { |
---|
902 | 986 | struct irq_chip *chip = irq_desc_get_chip(desc); |
---|
| 987 | + bool handled; |
---|
| 988 | + |
---|
| 989 | + if (on_pipeline_entry()) { |
---|
| 990 | + if (chip->irq_ack) |
---|
| 991 | + chip->irq_ack(&desc->irq_data); |
---|
| 992 | + handled = handle_oob_irq(desc); |
---|
| 993 | + if (chip->irq_eoi) |
---|
| 994 | + chip->irq_eoi(&desc->irq_data); |
---|
| 995 | + if (!handled && chip->irq_mask) |
---|
| 996 | + chip->irq_mask(&desc->irq_data); |
---|
| 997 | + return; |
---|
| 998 | + } |
---|
903 | 999 | |
---|
904 | 1000 | /* |
---|
905 | 1001 | * PER CPU interrupts are not serialized. Do not touch |
---|
.. | .. |
---|
907 | 1003 | */ |
---|
908 | 1004 | __kstat_incr_irqs_this_cpu(desc); |
---|
909 | 1005 | |
---|
910 | | - if (chip->irq_ack) |
---|
911 | | - chip->irq_ack(&desc->irq_data); |
---|
912 | | - |
---|
913 | | - handle_irq_event_percpu(desc); |
---|
914 | | - |
---|
915 | | - if (chip->irq_eoi) |
---|
916 | | - chip->irq_eoi(&desc->irq_data); |
---|
| 1006 | + if (irqs_pipelined()) { |
---|
| 1007 | + handle_irq_event_percpu(desc); |
---|
| 1008 | + if (chip->irq_unmask) |
---|
| 1009 | + chip->irq_unmask(&desc->irq_data); |
---|
| 1010 | + } else { |
---|
| 1011 | + if (chip->irq_ack) |
---|
| 1012 | + chip->irq_ack(&desc->irq_data); |
---|
| 1013 | + handle_irq_event_percpu(desc); |
---|
| 1014 | + if (chip->irq_eoi) |
---|
| 1015 | + chip->irq_eoi(&desc->irq_data); |
---|
| 1016 | + } |
---|
917 | 1017 | } |
---|
918 | 1018 | |
---|
919 | 1019 | /** |
---|
.. | .. |
---|
933 | 1033 | struct irqaction *action = desc->action; |
---|
934 | 1034 | unsigned int irq = irq_desc_get_irq(desc); |
---|
935 | 1035 | irqreturn_t res; |
---|
| 1036 | + bool handled; |
---|
| 1037 | + |
---|
| 1038 | + if (on_pipeline_entry()) { |
---|
| 1039 | + if (chip->irq_ack) |
---|
| 1040 | + chip->irq_ack(&desc->irq_data); |
---|
| 1041 | + handled = handle_oob_irq(desc); |
---|
| 1042 | + if (chip->irq_eoi) |
---|
| 1043 | + chip->irq_eoi(&desc->irq_data); |
---|
| 1044 | + if (!handled && chip->irq_mask) |
---|
| 1045 | + chip->irq_mask(&desc->irq_data); |
---|
| 1046 | + return; |
---|
| 1047 | + } |
---|
936 | 1048 | |
---|
937 | 1049 | /* |
---|
938 | 1050 | * PER CPU interrupts are not serialized. Do not touch |
---|
.. | .. |
---|
940 | 1052 | */ |
---|
941 | 1053 | __kstat_incr_irqs_this_cpu(desc); |
---|
942 | 1054 | |
---|
943 | | - if (chip->irq_ack) |
---|
| 1055 | + if (!irqs_pipelined() && chip->irq_ack) |
---|
944 | 1056 | chip->irq_ack(&desc->irq_data); |
---|
945 | 1057 | |
---|
946 | 1058 | if (likely(action)) { |
---|
.. | .. |
---|
958 | 1070 | enabled ? " and unmasked" : "", irq, cpu); |
---|
959 | 1071 | } |
---|
960 | 1072 | |
---|
961 | | - if (chip->irq_eoi) |
---|
962 | | - chip->irq_eoi(&desc->irq_data); |
---|
| 1073 | + if (irqs_pipelined()) { |
---|
| 1074 | + if (chip->irq_unmask) |
---|
| 1075 | + chip->irq_unmask(&desc->irq_data); |
---|
| 1076 | + } else if (chip->irq_eoi) |
---|
| 1077 | + chip->irq_eoi(&desc->irq_data); |
---|
963 | 1078 | } |
---|
964 | 1079 | |
---|
965 | 1080 | /** |
---|
.. | .. |
---|
979 | 1094 | unsigned int irq = irq_desc_get_irq(desc); |
---|
980 | 1095 | irqreturn_t res; |
---|
981 | 1096 | |
---|
982 | | - __kstat_incr_irqs_this_cpu(desc); |
---|
983 | | - |
---|
984 | 1097 | if (chip->irq_eoi) |
---|
985 | 1098 | chip->irq_eoi(&desc->irq_data); |
---|
| 1099 | + |
---|
| 1100 | + if (on_pipeline_entry()) { |
---|
| 1101 | + handle_oob_irq(desc); |
---|
| 1102 | + return; |
---|
| 1103 | + } |
---|
| 1104 | + |
---|
| 1105 | + /* Trap spurious IPIs if pipelined. */ |
---|
| 1106 | + if (irqs_pipelined() && !action) { |
---|
| 1107 | + print_irq_desc(irq, desc); |
---|
| 1108 | + return; |
---|
| 1109 | + } |
---|
| 1110 | + |
---|
| 1111 | + __kstat_incr_irqs_this_cpu(desc); |
---|
986 | 1112 | |
---|
987 | 1113 | trace_irq_handler_entry(irq, action); |
---|
988 | 1114 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); |
---|
.. | .. |
---|
1076 | 1202 | desc->handle_irq = handle; |
---|
1077 | 1203 | } |
---|
1078 | 1204 | |
---|
| 1205 | + irq_settings_set_chained(desc); |
---|
1079 | 1206 | irq_settings_set_noprobe(desc); |
---|
1080 | 1207 | irq_settings_set_norequest(desc); |
---|
1081 | 1208 | irq_settings_set_nothread(desc); |
---|
.. | .. |
---|
1251 | 1378 | |
---|
1252 | 1379 | raw_spin_lock(&desc->lock); |
---|
1253 | 1380 | |
---|
1254 | | - if (!irq_may_run(desc)) |
---|
| 1381 | + if (start_irq_flow() && !irq_may_run(desc)) |
---|
1255 | 1382 | goto out; |
---|
| 1383 | + |
---|
| 1384 | + if (on_pipeline_entry()) { |
---|
| 1385 | + chip->irq_ack(&desc->irq_data); |
---|
| 1386 | + if (handle_oob_irq(desc)) |
---|
| 1387 | + chip->irq_eoi(&desc->irq_data); |
---|
| 1388 | + else |
---|
| 1389 | + mask_cond_eoi_irq(desc); |
---|
| 1390 | + goto out_unlock; |
---|
| 1391 | + } |
---|
1256 | 1392 | |
---|
1257 | 1393 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
1258 | 1394 | |
---|
.. | .. |
---|
1267 | 1403 | } |
---|
1268 | 1404 | |
---|
1269 | 1405 | kstat_incr_irqs_this_cpu(desc); |
---|
1270 | | - if (desc->istate & IRQS_ONESHOT) |
---|
1271 | | - mask_irq(desc); |
---|
| 1406 | + if (!irqs_pipelined()) { |
---|
| 1407 | + if (desc->istate & IRQS_ONESHOT) |
---|
| 1408 | + mask_irq(desc); |
---|
1272 | 1409 | |
---|
1273 | | - /* Start handling the irq */ |
---|
1274 | | - desc->irq_data.chip->irq_ack(&desc->irq_data); |
---|
| 1410 | + /* Start handling the irq */ |
---|
| 1411 | + chip->irq_ack(&desc->irq_data); |
---|
| 1412 | + } |
---|
1275 | 1413 | |
---|
1276 | 1414 | handle_irq_event(desc); |
---|
1277 | 1415 | |
---|
.. | .. |
---|
1282 | 1420 | out: |
---|
1283 | 1421 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
---|
1284 | 1422 | chip->irq_eoi(&desc->irq_data); |
---|
| 1423 | +out_unlock: |
---|
1285 | 1424 | raw_spin_unlock(&desc->lock); |
---|
1286 | 1425 | } |
---|
1287 | 1426 | EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); |
---|
.. | .. |
---|
1301 | 1440 | struct irq_chip *chip = desc->irq_data.chip; |
---|
1302 | 1441 | |
---|
1303 | 1442 | raw_spin_lock(&desc->lock); |
---|
1304 | | - mask_ack_irq(desc); |
---|
1305 | 1443 | |
---|
1306 | | - if (!irq_may_run(desc)) |
---|
1307 | | - goto out; |
---|
| 1444 | + if (start_irq_flow()) { |
---|
| 1445 | + mask_ack_irq(desc); |
---|
| 1446 | + |
---|
| 1447 | + if (!irq_may_run(desc)) |
---|
| 1448 | + goto out; |
---|
| 1449 | + } |
---|
| 1450 | + |
---|
| 1451 | + if (on_pipeline_entry()) { |
---|
| 1452 | + if (handle_oob_irq(desc)) |
---|
| 1453 | + chip->irq_eoi(&desc->irq_data); |
---|
| 1454 | + else |
---|
| 1455 | + cond_eoi_irq(desc); |
---|
| 1456 | + goto out_unlock; |
---|
| 1457 | + } |
---|
1308 | 1458 | |
---|
1309 | 1459 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
---|
1310 | 1460 | |
---|
.. | .. |
---|
1319 | 1469 | } |
---|
1320 | 1470 | |
---|
1321 | 1471 | kstat_incr_irqs_this_cpu(desc); |
---|
1322 | | - if (desc->istate & IRQS_ONESHOT) |
---|
| 1472 | + if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT)) |
---|
1323 | 1473 | mask_irq(desc); |
---|
1324 | 1474 | |
---|
1325 | 1475 | handle_irq_event(desc); |
---|
.. | .. |
---|
1331 | 1481 | out: |
---|
1332 | 1482 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
---|
1333 | 1483 | chip->irq_eoi(&desc->irq_data); |
---|
| 1484 | +out_unlock: |
---|
1334 | 1485 | raw_spin_unlock(&desc->lock); |
---|
1335 | 1486 | } |
---|
1336 | 1487 | EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); |
---|