| .. | .. |
|---|
| 645 | 645 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
|---|
| 646 | 646 | struct perf_event *event; |
|---|
| 647 | 647 | unsigned long val; |
|---|
| 648 | | - int found = 0; |
|---|
| 649 | 648 | |
|---|
| 650 | 649 | for (i = 0; i < ppmu->n_counter; ++i) { |
|---|
| 651 | 650 | event = cpuhw->event[i]; |
|---|
| .. | .. |
|---|
| 654 | 653 | if ((int)val < 0) { |
|---|
| 655 | 654 | if (event) { |
|---|
| 656 | 655 | /* event has overflowed */ |
|---|
| 657 | | - found = 1; |
|---|
| 658 | 656 | record_and_restart(event, val, regs); |
|---|
| 659 | 657 | } else { |
|---|
| 660 | 658 | /* |
|---|
| .. | .. |
|---|
| 672 | 670 | isync(); |
|---|
| 673 | 671 | } |
|---|
| 674 | 672 | |
|---|
| 675 | | -void hw_perf_event_setup(int cpu) |
|---|
| 673 | +static int fsl_emb_pmu_prepare_cpu(unsigned int cpu) |
|---|
| 676 | 674 | { |
|---|
| 677 | 675 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
|---|
| 678 | 676 | |
|---|
| 679 | 677 | memset(cpuhw, 0, sizeof(*cpuhw)); |
|---|
| 678 | + |
|---|
| 679 | + return 0; |
|---|
| 680 | 680 | } |
|---|
| 681 | 681 | |
|---|
| 682 | 682 | int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) |
|---|
| .. | .. |
|---|
| 689 | 689 | pmu->name); |
|---|
| 690 | 690 | |
|---|
| 691 | 691 | perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); |
|---|
| 692 | + cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare", |
|---|
| 693 | + fsl_emb_pmu_prepare_cpu, NULL); |
|---|
| 692 | 694 | |
|---|
| 693 | 695 | return 0; |
|---|
| 694 | 696 | } |
|---|