.. | .. |
---|
84 | 84 | MAX_IPI |
---|
85 | 85 | }; |
---|
86 | 86 | |
---|
87 | | -static int ipi_irq_base __read_mostly; |
---|
| 87 | +int ipi_irq_base __read_mostly; |
---|
88 | 88 | static int nr_ipi __read_mostly = NR_IPI; |
---|
89 | 89 | static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; |
---|
90 | 90 | |
---|
.. | .. |
---|
329 | 329 | |
---|
330 | 330 | idle_task_exit(); |
---|
331 | 331 | |
---|
332 | | - local_irq_disable(); |
---|
| 332 | + local_irq_disable_full(); |
---|
333 | 333 | |
---|
334 | 334 | /* |
---|
335 | 335 | * Flush the data out of the L1 cache for this CPU. This must be |
---|
.. | .. |
---|
421 | 421 | local_flush_tlb_all(); |
---|
422 | 422 | |
---|
423 | 423 | /* |
---|
| 424 | + * irq_pipeline: debug_smp_processor_id() accesses percpu |
---|
| 425 | + * data. |
---|
| 426 | + */ |
---|
| 427 | + if (irqs_pipelined()) |
---|
| 428 | + set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id())); |
---|
| 429 | + |
---|
| 430 | + /* |
---|
424 | 431 | * All kernel threads share the same mm context; grab a |
---|
425 | 432 | * reference and switch to it. |
---|
426 | 433 | */ |
---|
.. | .. |
---|
463 | 470 | |
---|
464 | 471 | complete(&cpu_running); |
---|
465 | 472 | |
---|
466 | | - local_irq_enable(); |
---|
| 473 | + local_irq_enable_full(); |
---|
467 | 474 | local_fiq_enable(); |
---|
468 | 475 | local_abt_enable(); |
---|
469 | 476 | |
---|
.. | .. |
---|
539 | 546 | |
---|
540 | 547 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); |
---|
541 | 548 | |
---|
| 549 | +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu); |
---|
| 550 | + |
---|
542 | 551 | void show_ipi_list(struct seq_file *p, int prec) |
---|
543 | 552 | { |
---|
544 | 553 | unsigned int cpu, i; |
---|
.. | .. |
---|
553 | 562 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); |
---|
554 | 563 | |
---|
555 | 564 | for_each_online_cpu(cpu) |
---|
556 | | - seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); |
---|
| 565 | + seq_printf(p, "%10u ", get_ipi_count(irq, cpu)); |
---|
557 | 566 | |
---|
558 | 567 | seq_printf(p, " %s\n", ipi_types[i]); |
---|
559 | 568 | } |
---|
.. | .. |
---|
606 | 615 | set_cpu_online(cpu, false); |
---|
607 | 616 | |
---|
608 | 617 | local_fiq_disable(); |
---|
609 | | - local_irq_disable(); |
---|
| 618 | + local_irq_disable_full(); |
---|
610 | 619 | |
---|
611 | 620 | while (1) { |
---|
612 | 621 | cpu_relax(); |
---|
.. | .. |
---|
695 | 704 | { |
---|
696 | 705 | struct pt_regs *old_regs = set_irq_regs(regs); |
---|
697 | 706 | |
---|
| 707 | + /* |
---|
| 708 | + * We don't support legacy IPI delivery when pipelining |
---|
| 709 | + * interrupts. |
---|
| 710 | + */ |
---|
| 711 | + WARN_ON_ONCE(irqs_pipelined()); |
---|
| 712 | + |
---|
698 | 713 | irq_enter(); |
---|
699 | 714 | do_handle_IPI(ipinr); |
---|
700 | 715 | irq_exit(); |
---|
701 | 716 | |
---|
702 | 717 | set_irq_regs(old_regs); |
---|
703 | 718 | } |
---|
| 719 | + |
---|
| 720 | +static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
| 721 | +{ |
---|
| 722 | + trace_ipi_raise(target, ipi_types[ipinr]); |
---|
| 723 | + __ipi_send_mask(ipi_desc[ipinr], target); |
---|
| 724 | +} |
---|
| 725 | + |
---|
| 726 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 727 | + |
---|
| 728 | +static DEFINE_PER_CPU(unsigned long, ipi_messages); |
---|
| 729 | + |
---|
| 730 | +static DEFINE_PER_CPU(unsigned int [MAX_IPI], ipi_counts); |
---|
| 731 | + |
---|
| 732 | +static irqreturn_t ipi_handler(int irq, void *data) |
---|
| 733 | +{ |
---|
| 734 | + unsigned long *pmsg; |
---|
| 735 | + unsigned int ipinr; |
---|
| 736 | + |
---|
| 737 | + /* |
---|
| 738 | + * Decode in-band IPIs (0..MAX_IPI - 1) multiplexed over |
---|
| 739 | + * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own |
---|
| 740 | + * individual handler. |
---|
| 741 | + */ |
---|
| 742 | + pmsg = raw_cpu_ptr(&ipi_messages); |
---|
| 743 | + while (*pmsg) { |
---|
| 744 | + ipinr = ffs(*pmsg) - 1; |
---|
| 745 | + clear_bit(ipinr, pmsg); |
---|
| 746 | + __this_cpu_inc(ipi_counts[ipinr]); |
---|
| 747 | + do_handle_IPI(ipinr); |
---|
| 748 | + } |
---|
| 749 | + |
---|
| 750 | + return IRQ_HANDLED; |
---|
| 751 | +} |
---|
| 752 | + |
---|
| 753 | +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
| 754 | +{ |
---|
| 755 | + unsigned int cpu; |
---|
| 756 | + |
---|
| 757 | + /* regular in-band IPI (multiplexed over SGI0). */ |
---|
| 758 | + for_each_cpu(cpu, target) |
---|
| 759 | + set_bit(ipinr, &per_cpu(ipi_messages, cpu)); |
---|
| 760 | + |
---|
| 761 | + wmb(); |
---|
| 762 | + __smp_cross_call(target, 0); |
---|
| 763 | +} |
---|
| 764 | + |
---|
| 765 | +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) |
---|
| 766 | +{ |
---|
| 767 | + return per_cpu(ipi_counts[irq - ipi_irq_base], cpu); |
---|
| 768 | +} |
---|
| 769 | + |
---|
| 770 | +void irq_send_oob_ipi(unsigned int irq, |
---|
| 771 | + const struct cpumask *cpumask) |
---|
| 772 | +{ |
---|
| 773 | + unsigned int sgi = irq - ipi_irq_base; |
---|
| 774 | + |
---|
| 775 | + if (WARN_ON(irq_pipeline_debug() && |
---|
| 776 | + (sgi < OOB_IPI_OFFSET || |
---|
| 777 | + sgi >= OOB_IPI_OFFSET + OOB_NR_IPI))) |
---|
| 778 | + return; |
---|
| 779 | + |
---|
| 780 | + /* Out-of-band IPI (SGI1-2). */ |
---|
| 781 | + __smp_cross_call(cpumask, sgi); |
---|
| 782 | +} |
---|
| 783 | +EXPORT_SYMBOL_GPL(irq_send_oob_ipi); |
---|
| 784 | + |
---|
| 785 | +#else |
---|
704 | 786 | |
---|
705 | 787 | static irqreturn_t ipi_handler(int irq, void *data) |
---|
706 | 788 | { |
---|
.. | .. |
---|
710 | 792 | |
---|
711 | 793 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
---|
712 | 794 | { |
---|
713 | | - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); |
---|
714 | | - __ipi_send_mask(ipi_desc[ipinr], target); |
---|
| 795 | + __smp_cross_call(target, ipinr); |
---|
715 | 796 | } |
---|
| 797 | + |
---|
| 798 | +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) |
---|
| 799 | +{ |
---|
| 800 | + return kstat_irqs_cpu(irq, cpu); |
---|
| 801 | +} |
---|
| 802 | + |
---|
| 803 | +#endif /* CONFIG_IRQ_PIPELINE */ |
---|
716 | 804 | |
---|
717 | 805 | static void ipi_setup(int cpu) |
---|
718 | 806 | { |
---|
.. | .. |
---|
727 | 815 | |
---|
728 | 816 | void __init set_smp_ipi_range(int ipi_base, int n) |
---|
729 | 817 | { |
---|
730 | | - int i; |
---|
| 818 | + int i, inband_nr_ipi; |
---|
731 | 819 | |
---|
732 | 820 | WARN_ON(n < MAX_IPI); |
---|
733 | 821 | nr_ipi = min(n, MAX_IPI); |
---|
| 822 | + /* |
---|
| 823 | + * irq_pipeline: the in-band stage traps SGI0 only, |
---|
| 824 | + * over which IPI messages are mutiplexed. Other SGIs |
---|
| 825 | + * are available for exchanging out-of-band IPIs. |
---|
| 826 | + */ |
---|
| 827 | + inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi; |
---|
734 | 828 | |
---|
735 | 829 | for (i = 0; i < nr_ipi; i++) { |
---|
736 | | - int err; |
---|
| 830 | + if (i < inband_nr_ipi) { |
---|
| 831 | + int err; |
---|
737 | 832 | |
---|
738 | | - err = request_percpu_irq(ipi_base + i, ipi_handler, |
---|
739 | | - "IPI", &irq_stat); |
---|
740 | | - WARN_ON(err); |
---|
741 | | - |
---|
| 833 | + err = request_percpu_irq(ipi_base + i, ipi_handler, |
---|
| 834 | + "IPI", &irq_stat); |
---|
| 835 | + WARN_ON(err); |
---|
| 836 | + } |
---|
742 | 837 | ipi_desc[i] = irq_to_desc(ipi_base + i); |
---|
743 | 838 | irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); |
---|
744 | 839 | |
---|