| .. | .. | 
|---|
|  | 1 | +// SPDX-License-Identifier: GPL-2.0-or-later | 
|---|
| 1 | 2 | /* | 
|---|
| 2 | 3 | * Performance event support - Freescale Embedded Performance Monitor | 
|---|
| 3 | 4 | * | 
|---|
| 4 | 5 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 
|---|
| 5 | 6 | * Copyright 2010 Freescale Semiconductor, Inc. | 
|---|
| 6 |  | - * | 
|---|
| 7 |  | - * This program is free software; you can redistribute it and/or | 
|---|
| 8 |  | - * modify it under the terms of the GNU General Public License | 
|---|
| 9 |  | - * as published by the Free Software Foundation; either version | 
|---|
| 10 |  | - * 2 of the License, or (at your option) any later version. | 
|---|
| 11 | 7 | */ | 
|---|
| 12 | 8 | #include <linux/kernel.h> | 
|---|
| 13 | 9 | #include <linux/sched.h> | 
|---|
| .. | .. | 
|---|
| 34 | 30 | static atomic_t num_events; | 
|---|
| 35 | 31 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | 
|---|
| 36 | 32 | static DEFINE_MUTEX(pmc_reserve_mutex); | 
|---|
| 37 |  | - | 
|---|
| 38 |  | -/* | 
|---|
| 39 |  | - * If interrupts were soft-disabled when a PMU interrupt occurs, treat | 
|---|
| 40 |  | - * it as an NMI. | 
|---|
| 41 |  | - */ | 
|---|
| 42 |  | -static inline int perf_intr_is_nmi(struct pt_regs *regs) | 
|---|
| 43 |  | -{ | 
|---|
| 44 |  | -#ifdef __powerpc64__ | 
|---|
| 45 |  | -	return (regs->softe & IRQS_DISABLED); | 
|---|
| 46 |  | -#else | 
|---|
| 47 |  | -	return 0; | 
|---|
| 48 |  | -#endif | 
|---|
| 49 |  | -} | 
|---|
| 50 | 33 |  | 
|---|
| 51 | 34 | static void perf_event_interrupt(struct pt_regs *regs); | 
|---|
| 52 | 35 |  | 
|---|
| .. | .. | 
|---|
| 662 | 645 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); | 
|---|
| 663 | 646 | struct perf_event *event; | 
|---|
| 664 | 647 | unsigned long val; | 
|---|
| 665 |  | -	int found = 0; | 
|---|
| 666 |  | -	int nmi; | 
|---|
| 667 |  | - | 
|---|
| 668 |  | -	nmi = perf_intr_is_nmi(regs); | 
|---|
| 669 |  | -	if (nmi) | 
|---|
| 670 |  | -		nmi_enter(); | 
|---|
| 671 |  | -	else | 
|---|
| 672 |  | -		irq_enter(); | 
|---|
| 673 | 648 |  | 
|---|
| 674 | 649 | for (i = 0; i < ppmu->n_counter; ++i) { | 
|---|
| 675 | 650 | event = cpuhw->event[i]; | 
|---|
| .. | .. | 
|---|
| 678 | 653 | if ((int)val < 0) { | 
|---|
| 679 | 654 | if (event) { | 
|---|
| 680 | 655 | /* event has overflowed */ | 
|---|
| 681 |  | -				found = 1; | 
|---|
| 682 | 656 | record_and_restart(event, val, regs); | 
|---|
| 683 | 657 | } else { | 
|---|
| 684 | 658 | /* | 
|---|
| .. | .. | 
|---|
| 694 | 668 | mtmsr(mfmsr() | MSR_PMM); | 
|---|
| 695 | 669 | mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); | 
|---|
| 696 | 670 | isync(); | 
|---|
| 697 |  | - | 
|---|
| 698 |  | -	if (nmi) | 
|---|
| 699 |  | -		nmi_exit(); | 
|---|
| 700 |  | -	else | 
|---|
| 701 |  | -		irq_exit(); | 
|---|
| 702 | 671 | } | 
|---|
| 703 | 672 |  | 
|---|
| 704 |  | -void hw_perf_event_setup(int cpu) | 
|---|
|  | 673 | +static int fsl_emb_pmu_prepare_cpu(unsigned int cpu) | 
|---|
| 705 | 674 | { | 
|---|
| 706 | 675 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 
|---|
| 707 | 676 |  | 
|---|
| 708 | 677 | memset(cpuhw, 0, sizeof(*cpuhw)); | 
|---|
|  | 678 | + | 
|---|
|  | 679 | +	return 0; | 
|---|
| 709 | 680 | } | 
|---|
| 710 | 681 |  | 
|---|
| 711 | 682 | int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | 
|---|
| .. | .. | 
|---|
| 718 | 689 | pmu->name); | 
|---|
| 719 | 690 |  | 
|---|
| 720 | 691 | perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); | 
|---|
|  | 692 | +	cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare", | 
|---|
|  | 693 | +			  fsl_emb_pmu_prepare_cpu, NULL); | 
|---|
| 721 | 694 |  | 
|---|
| 722 | 695 | return 0; | 
|---|
| 723 | 696 | } | 
|---|