.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | #undef DEBUG |
---|
2 | 3 | |
---|
3 | 4 | /* |
---|
.. | .. |
---|
25 | 26 | |
---|
26 | 27 | #include <asm/irq_regs.h> |
---|
27 | 28 | |
---|
| 29 | +static int armpmu_count_irq_users(const int irq); |
---|
| 30 | + |
---|
| 31 | +struct pmu_irq_ops { |
---|
| 32 | + void (*enable_pmuirq)(unsigned int irq); |
---|
| 33 | + void (*disable_pmuirq)(unsigned int irq); |
---|
| 34 | + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); |
---|
| 35 | +}; |
---|
| 36 | + |
---|
| 37 | +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) |
---|
| 38 | +{ |
---|
| 39 | + free_irq(irq, per_cpu_ptr(devid, cpu)); |
---|
| 40 | +} |
---|
| 41 | + |
---|
| 42 | +static const struct pmu_irq_ops pmuirq_ops = { |
---|
| 43 | + .enable_pmuirq = enable_irq, |
---|
| 44 | + .disable_pmuirq = disable_irq_nosync, |
---|
| 45 | + .free_pmuirq = armpmu_free_pmuirq |
---|
| 46 | +}; |
---|
| 47 | + |
---|
| 48 | +static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) |
---|
| 49 | +{ |
---|
| 50 | + free_nmi(irq, per_cpu_ptr(devid, cpu)); |
---|
| 51 | +} |
---|
| 52 | + |
---|
| 53 | +static const struct pmu_irq_ops pmunmi_ops = { |
---|
| 54 | + .enable_pmuirq = enable_nmi, |
---|
| 55 | + .disable_pmuirq = disable_nmi_nosync, |
---|
| 56 | + .free_pmuirq = armpmu_free_pmunmi |
---|
| 57 | +}; |
---|
| 58 | + |
---|
| 59 | +static void armpmu_enable_percpu_pmuirq(unsigned int irq) |
---|
| 60 | +{ |
---|
| 61 | + enable_percpu_irq(irq, IRQ_TYPE_NONE); |
---|
| 62 | +} |
---|
| 63 | + |
---|
| 64 | +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, |
---|
| 65 | + void __percpu *devid) |
---|
| 66 | +{ |
---|
| 67 | + if (armpmu_count_irq_users(irq) == 1) |
---|
| 68 | + free_percpu_irq(irq, devid); |
---|
| 69 | +} |
---|
| 70 | + |
---|
| 71 | +static const struct pmu_irq_ops percpu_pmuirq_ops = { |
---|
| 72 | + .enable_pmuirq = armpmu_enable_percpu_pmuirq, |
---|
| 73 | + .disable_pmuirq = disable_percpu_irq, |
---|
| 74 | + .free_pmuirq = armpmu_free_percpu_pmuirq |
---|
| 75 | +}; |
---|
| 76 | + |
---|
| 77 | +static void armpmu_enable_percpu_pmunmi(unsigned int irq) |
---|
| 78 | +{ |
---|
| 79 | + if (!prepare_percpu_nmi(irq)) |
---|
| 80 | + enable_percpu_nmi(irq, IRQ_TYPE_NONE); |
---|
| 81 | +} |
---|
| 82 | + |
---|
| 83 | +static void armpmu_disable_percpu_pmunmi(unsigned int irq) |
---|
| 84 | +{ |
---|
| 85 | + disable_percpu_nmi(irq); |
---|
| 86 | + teardown_percpu_nmi(irq); |
---|
| 87 | +} |
---|
| 88 | + |
---|
| 89 | +static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, |
---|
| 90 | + void __percpu *devid) |
---|
| 91 | +{ |
---|
| 92 | + if (armpmu_count_irq_users(irq) == 1) |
---|
| 93 | + free_percpu_nmi(irq, devid); |
---|
| 94 | +} |
---|
| 95 | + |
---|
| 96 | +static const struct pmu_irq_ops percpu_pmunmi_ops = { |
---|
| 97 | + .enable_pmuirq = armpmu_enable_percpu_pmunmi, |
---|
| 98 | + .disable_pmuirq = armpmu_disable_percpu_pmunmi, |
---|
| 99 | + .free_pmuirq = armpmu_free_percpu_pmunmi |
---|
| 100 | +}; |
---|
| 101 | + |
---|
28 | 102 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); |
---|
29 | 103 | static DEFINE_PER_CPU(int, cpu_irq); |
---|
| 104 | +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); |
---|
| 105 | + |
---|
| 106 | +static bool has_nmi; |
---|
30 | 107 | |
---|
31 | 108 | static inline u64 arm_pmu_event_max_period(struct perf_event *event) |
---|
32 | 109 | { |
---|
.. | .. |
---|
321 | 398 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
---|
322 | 399 | return -EINVAL; |
---|
323 | 400 | |
---|
| 401 | + if (event == leader) |
---|
| 402 | + return 0; |
---|
| 403 | + |
---|
324 | 404 | for_each_sibling_event(sibling, leader) { |
---|
325 | 405 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
---|
326 | 406 | return -EINVAL; |
---|
.. | .. |
---|
357 | 437 | } |
---|
358 | 438 | |
---|
359 | 439 | static int |
---|
360 | | -event_requires_mode_exclusion(struct perf_event_attr *attr) |
---|
361 | | -{ |
---|
362 | | - return attr->exclude_idle || attr->exclude_user || |
---|
363 | | - attr->exclude_kernel || attr->exclude_hv; |
---|
364 | | -} |
---|
365 | | - |
---|
366 | | -static int |
---|
367 | 440 | __hw_perf_event_init(struct perf_event *event) |
---|
368 | 441 | { |
---|
369 | 442 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
---|
.. | .. |
---|
393 | 466 | /* |
---|
394 | 467 | * Check whether we need to exclude the counter from certain modes. |
---|
395 | 468 | */ |
---|
396 | | - if ((!armpmu->set_event_filter || |
---|
397 | | - armpmu->set_event_filter(hwc, &event->attr)) && |
---|
398 | | - event_requires_mode_exclusion(&event->attr)) { |
---|
| 469 | + if (armpmu->set_event_filter && |
---|
| 470 | + armpmu->set_event_filter(hwc, &event->attr)) { |
---|
399 | 471 | pr_debug("ARM performance counters do not support " |
---|
400 | 472 | "mode exclusion\n"); |
---|
401 | 473 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
418 | 490 | local64_set(&hwc->period_left, hwc->sample_period); |
---|
419 | 491 | } |
---|
420 | 492 | |
---|
421 | | - if (event->group_leader != event) { |
---|
422 | | - if (validate_group(event) != 0) |
---|
423 | | - return -EINVAL; |
---|
424 | | - } |
---|
425 | | - |
---|
426 | | - return 0; |
---|
| 493 | + return validate_group(event); |
---|
427 | 494 | } |
---|
428 | 495 | |
---|
429 | 496 | static int armpmu_event_init(struct perf_event *event) |
---|
.. | .. |
---|
551 | 618 | return count; |
---|
552 | 619 | } |
---|
553 | 620 | |
---|
| 621 | +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) |
---|
| 622 | +{ |
---|
| 623 | + const struct pmu_irq_ops *ops = NULL; |
---|
| 624 | + int cpu; |
---|
| 625 | + |
---|
| 626 | + for_each_possible_cpu(cpu) { |
---|
| 627 | + if (per_cpu(cpu_irq, cpu) != irq) |
---|
| 628 | + continue; |
---|
| 629 | + |
---|
| 630 | + ops = per_cpu(cpu_irq_ops, cpu); |
---|
| 631 | + if (ops) |
---|
| 632 | + break; |
---|
| 633 | + } |
---|
| 634 | + |
---|
| 635 | + return ops; |
---|
| 636 | +} |
---|
| 637 | + |
---|
554 | 638 | void armpmu_free_irq(int irq, int cpu) |
---|
555 | 639 | { |
---|
556 | 640 | if (per_cpu(cpu_irq, cpu) == 0) |
---|
.. | .. |
---|
558 | 642 | if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) |
---|
559 | 643 | return; |
---|
560 | 644 | |
---|
561 | | - if (!irq_is_percpu_devid(irq)) |
---|
562 | | - free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); |
---|
563 | | - else if (armpmu_count_irq_users(irq) == 1) |
---|
564 | | - free_percpu_irq(irq, &cpu_armpmu); |
---|
| 645 | + per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); |
---|
565 | 646 | |
---|
566 | 647 | per_cpu(cpu_irq, cpu) = 0; |
---|
| 648 | + per_cpu(cpu_irq_ops, cpu) = NULL; |
---|
567 | 649 | } |
---|
568 | 650 | |
---|
569 | 651 | int armpmu_request_irq(int irq, int cpu) |
---|
570 | 652 | { |
---|
571 | 653 | int err = 0; |
---|
572 | 654 | const irq_handler_t handler = armpmu_dispatch_irq; |
---|
| 655 | + const struct pmu_irq_ops *irq_ops; |
---|
| 656 | + |
---|
573 | 657 | if (!irq) |
---|
574 | 658 | return 0; |
---|
575 | 659 | |
---|
.. | .. |
---|
589 | 673 | IRQF_NO_THREAD; |
---|
590 | 674 | |
---|
591 | 675 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
---|
592 | | - err = request_irq(irq, handler, irq_flags, "arm-pmu", |
---|
| 676 | + |
---|
| 677 | + err = request_nmi(irq, handler, irq_flags, "arm-pmu", |
---|
593 | 678 | per_cpu_ptr(&cpu_armpmu, cpu)); |
---|
| 679 | + |
---|
| 680 | + /* If cannot get an NMI, get a normal interrupt */ |
---|
| 681 | + if (err) { |
---|
| 682 | + err = request_irq(irq, handler, irq_flags, "arm-pmu", |
---|
| 683 | + per_cpu_ptr(&cpu_armpmu, cpu)); |
---|
| 684 | + irq_ops = &pmuirq_ops; |
---|
| 685 | + } else { |
---|
| 686 | + has_nmi = true; |
---|
| 687 | + irq_ops = &pmunmi_ops; |
---|
| 688 | + } |
---|
594 | 689 | } else if (armpmu_count_irq_users(irq) == 0) { |
---|
595 | | - err = request_percpu_irq(irq, handler, "arm-pmu", |
---|
596 | | - &cpu_armpmu); |
---|
| 690 | + err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); |
---|
| 691 | + |
---|
| 692 | + /* If cannot get an NMI, get a normal interrupt */ |
---|
| 693 | + if (err) { |
---|
| 694 | + err = request_percpu_irq(irq, handler, "arm-pmu", |
---|
| 695 | + &cpu_armpmu); |
---|
| 696 | + irq_ops = &percpu_pmuirq_ops; |
---|
| 697 | + } else { |
---|
| 698 | + has_nmi= true; |
---|
| 699 | + irq_ops = &percpu_pmunmi_ops; |
---|
| 700 | + } |
---|
| 701 | + } else { |
---|
| 702 | + /* Per cpudevid irq was already requested by another CPU */ |
---|
| 703 | + irq_ops = armpmu_find_irq_ops(irq); |
---|
| 704 | + |
---|
| 705 | + if (WARN_ON(!irq_ops)) |
---|
| 706 | + err = -EINVAL; |
---|
597 | 707 | } |
---|
598 | 708 | |
---|
599 | 709 | if (err) |
---|
600 | 710 | goto err_out; |
---|
601 | 711 | |
---|
602 | 712 | per_cpu(cpu_irq, cpu) = irq; |
---|
| 713 | + per_cpu(cpu_irq_ops, cpu) = irq_ops; |
---|
603 | 714 | return 0; |
---|
604 | 715 | |
---|
605 | 716 | err_out: |
---|
.. | .. |
---|
632 | 743 | per_cpu(cpu_armpmu, cpu) = pmu; |
---|
633 | 744 | |
---|
634 | 745 | irq = armpmu_get_cpu_irq(pmu, cpu); |
---|
635 | | - if (irq) { |
---|
636 | | - if (irq_is_percpu_devid(irq)) |
---|
637 | | - enable_percpu_irq(irq, IRQ_TYPE_NONE); |
---|
638 | | - else |
---|
639 | | - enable_irq(irq); |
---|
640 | | - } |
---|
| 746 | + if (irq) |
---|
| 747 | + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); |
---|
641 | 748 | |
---|
642 | 749 | return 0; |
---|
643 | 750 | } |
---|
.. | .. |
---|
651 | 758 | return 0; |
---|
652 | 759 | |
---|
653 | 760 | irq = armpmu_get_cpu_irq(pmu, cpu); |
---|
654 | | - if (irq) { |
---|
655 | | - if (irq_is_percpu_devid(irq)) |
---|
656 | | - disable_percpu_irq(irq); |
---|
657 | | - else |
---|
658 | | - disable_irq_nosync(irq); |
---|
659 | | - } |
---|
| 761 | + if (irq) |
---|
| 762 | + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); |
---|
660 | 763 | |
---|
661 | 764 | per_cpu(cpu_armpmu, cpu) = NULL; |
---|
662 | 765 | |
---|
.. | .. |
---|
867 | 970 | if (ret) |
---|
868 | 971 | return ret; |
---|
869 | 972 | |
---|
| 973 | + if (!pmu->set_event_filter) |
---|
| 974 | + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; |
---|
| 975 | + |
---|
870 | 976 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); |
---|
871 | 977 | if (ret) |
---|
872 | 978 | goto out_destroy; |
---|
.. | .. |
---|
874 | 980 | if (!__oprofile_cpu_pmu) |
---|
875 | 981 | __oprofile_cpu_pmu = pmu; |
---|
876 | 982 | |
---|
877 | | - pr_info("enabled with %s PMU driver, %d counters available\n", |
---|
878 | | - pmu->name, pmu->num_events); |
---|
| 983 | + pr_info("enabled with %s PMU driver, %d counters available%s\n", |
---|
| 984 | + pmu->name, pmu->num_events, |
---|
| 985 | + has_nmi ? ", using NMIs" : ""); |
---|
879 | 986 | |
---|
880 | 987 | return 0; |
---|
881 | 988 | |
---|