.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. |
---|
2 | | - * |
---|
3 | | - * This program is free software; you can redistribute it and/or modify |
---|
4 | | - * it under the terms of the GNU General Public License version 2 and |
---|
5 | | - * only version 2 as published by the Free Software Foundation. |
---|
6 | | - * |
---|
7 | | - * This program is distributed in the hope that it will be useful, |
---|
8 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
9 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
10 | | - * GNU General Public License for more details. |
---|
11 | 3 | */ |
---|
12 | 4 | #include <linux/acpi.h> |
---|
13 | 5 | #include <linux/bitops.h> |
---|
.. | .. |
---|
31 | 23 | #include <asm/barrier.h> |
---|
32 | 24 | #include <asm/local64.h> |
---|
33 | 25 | #include <asm/sysreg.h> |
---|
| 26 | +#include <soc/qcom/kryo-l2-accessors.h> |
---|
34 | 27 | |
---|
35 | 28 | #define MAX_L2_CTRS 9 |
---|
36 | 29 | |
---|
.. | .. |
---|
87 | 80 | #define L2_COUNTER_RELOAD BIT_ULL(31) |
---|
88 | 81 | #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) |
---|
89 | 82 | |
---|
90 | | -#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) |
---|
91 | | -#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) |
---|
92 | 83 | |
---|
93 | 84 | #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) |
---|
94 | 85 | |
---|
.. | .. |
---|
107 | 98 | #define L2_EVENT_STREX 0x421 |
---|
108 | 99 | #define L2_EVENT_CLREX 0x422 |
---|
109 | 100 | |
---|
110 | | -static DEFINE_RAW_SPINLOCK(l2_access_lock); |
---|
111 | 101 | |
---|
112 | | -/** |
---|
113 | | - * set_l2_indirect_reg: write value to an L2 register |
---|
114 | | - * @reg: Address of L2 register. |
---|
115 | | - * @value: Value to be written to register. |
---|
116 | | - * |
---|
117 | | - * Use architecturally required barriers for ordering between system register |
---|
118 | | - * accesses |
---|
119 | | - */ |
---|
120 | | -static void set_l2_indirect_reg(u64 reg, u64 val) |
---|
121 | | -{ |
---|
122 | | - unsigned long flags; |
---|
123 | | - |
---|
124 | | - raw_spin_lock_irqsave(&l2_access_lock, flags); |
---|
125 | | - write_sysreg_s(reg, L2CPUSRSELR_EL1); |
---|
126 | | - isb(); |
---|
127 | | - write_sysreg_s(val, L2CPUSRDR_EL1); |
---|
128 | | - isb(); |
---|
129 | | - raw_spin_unlock_irqrestore(&l2_access_lock, flags); |
---|
130 | | -} |
---|
131 | | - |
---|
132 | | -/** |
---|
133 | | - * get_l2_indirect_reg: read an L2 register value |
---|
134 | | - * @reg: Address of L2 register. |
---|
135 | | - * |
---|
136 | | - * Use architecturally required barriers for ordering between system register |
---|
137 | | - * accesses |
---|
138 | | - */ |
---|
139 | | -static u64 get_l2_indirect_reg(u64 reg) |
---|
140 | | -{ |
---|
141 | | - u64 val; |
---|
142 | | - unsigned long flags; |
---|
143 | | - |
---|
144 | | - raw_spin_lock_irqsave(&l2_access_lock, flags); |
---|
145 | | - write_sysreg_s(reg, L2CPUSRSELR_EL1); |
---|
146 | | - isb(); |
---|
147 | | - val = read_sysreg_s(L2CPUSRDR_EL1); |
---|
148 | | - raw_spin_unlock_irqrestore(&l2_access_lock, flags); |
---|
149 | | - |
---|
150 | | - return val; |
---|
151 | | -} |
---|
152 | 102 | |
---|
153 | 103 | struct cluster_pmu; |
---|
154 | 104 | |
---|
.. | .. |
---|
219 | 169 | static void cluster_pmu_reset(void) |
---|
220 | 170 | { |
---|
221 | 171 | /* Reset all counters */ |
---|
222 | | - set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); |
---|
223 | | - set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); |
---|
224 | | - set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); |
---|
225 | | - set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); |
---|
| 172 | + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); |
---|
| 173 | + kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); |
---|
| 174 | + kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); |
---|
| 175 | + kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); |
---|
226 | 176 | } |
---|
227 | 177 | |
---|
228 | 178 | static inline void cluster_pmu_enable(void) |
---|
229 | 179 | { |
---|
230 | | - set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); |
---|
| 180 | + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); |
---|
231 | 181 | } |
---|
232 | 182 | |
---|
233 | 183 | static inline void cluster_pmu_disable(void) |
---|
234 | 184 | { |
---|
235 | | - set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); |
---|
| 185 | + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); |
---|
236 | 186 | } |
---|
237 | 187 | |
---|
238 | 188 | static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) |
---|
239 | 189 | { |
---|
240 | 190 | if (idx == l2_cycle_ctr_idx) |
---|
241 | | - set_l2_indirect_reg(L2PMCCNTR, value); |
---|
| 191 | + kryo_l2_set_indirect_reg(L2PMCCNTR, value); |
---|
242 | 192 | else |
---|
243 | | - set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); |
---|
| 193 | + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); |
---|
244 | 194 | } |
---|
245 | 195 | |
---|
246 | 196 | static inline u64 cluster_pmu_counter_get_value(u32 idx) |
---|
.. | .. |
---|
248 | 198 | u64 value; |
---|
249 | 199 | |
---|
250 | 200 | if (idx == l2_cycle_ctr_idx) |
---|
251 | | - value = get_l2_indirect_reg(L2PMCCNTR); |
---|
| 201 | + value = kryo_l2_get_indirect_reg(L2PMCCNTR); |
---|
252 | 202 | else |
---|
253 | | - value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); |
---|
| 203 | + value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); |
---|
254 | 204 | |
---|
255 | 205 | return value; |
---|
256 | 206 | } |
---|
257 | 207 | |
---|
258 | 208 | static inline void cluster_pmu_counter_enable(u32 idx) |
---|
259 | 209 | { |
---|
260 | | - set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); |
---|
| 210 | + kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); |
---|
261 | 211 | } |
---|
262 | 212 | |
---|
263 | 213 | static inline void cluster_pmu_counter_disable(u32 idx) |
---|
264 | 214 | { |
---|
265 | | - set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); |
---|
| 215 | + kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); |
---|
266 | 216 | } |
---|
267 | 217 | |
---|
268 | 218 | static inline void cluster_pmu_counter_enable_interrupt(u32 idx) |
---|
269 | 219 | { |
---|
270 | | - set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); |
---|
| 220 | + kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); |
---|
271 | 221 | } |
---|
272 | 222 | |
---|
273 | 223 | static inline void cluster_pmu_counter_disable_interrupt(u32 idx) |
---|
274 | 224 | { |
---|
275 | | - set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); |
---|
| 225 | + kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); |
---|
276 | 226 | } |
---|
277 | 227 | |
---|
278 | 228 | static inline void cluster_pmu_set_evccntcr(u32 val) |
---|
279 | 229 | { |
---|
280 | | - set_l2_indirect_reg(L2PMCCNTCR, val); |
---|
| 230 | + kryo_l2_set_indirect_reg(L2PMCCNTCR, val); |
---|
281 | 231 | } |
---|
282 | 232 | |
---|
283 | 233 | static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) |
---|
284 | 234 | { |
---|
285 | | - set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); |
---|
| 235 | + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); |
---|
286 | 236 | } |
---|
287 | 237 | |
---|
288 | 238 | static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) |
---|
289 | 239 | { |
---|
290 | | - set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); |
---|
| 240 | + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); |
---|
291 | 241 | } |
---|
292 | 242 | |
---|
293 | 243 | static void cluster_pmu_set_resr(struct cluster_pmu *cluster, |
---|
.. | .. |
---|
303 | 253 | |
---|
304 | 254 | spin_lock_irqsave(&cluster->pmu_lock, flags); |
---|
305 | 255 | |
---|
306 | | - resr_val = get_l2_indirect_reg(L2PMRESR); |
---|
| 256 | + resr_val = kryo_l2_get_indirect_reg(L2PMRESR); |
---|
307 | 257 | resr_val &= ~(L2PMRESR_GROUP_MASK << shift); |
---|
308 | 258 | resr_val |= field; |
---|
309 | 259 | resr_val |= L2PMRESR_EN; |
---|
310 | | - set_l2_indirect_reg(L2PMRESR, resr_val); |
---|
| 260 | + kryo_l2_set_indirect_reg(L2PMRESR, resr_val); |
---|
311 | 261 | |
---|
312 | 262 | spin_unlock_irqrestore(&cluster->pmu_lock, flags); |
---|
313 | 263 | } |
---|
.. | .. |
---|
323 | 273 | L2PMXEVFILTER_ORGFILTER_IDINDEP | |
---|
324 | 274 | L2PMXEVFILTER_ORGFILTER_ALL; |
---|
325 | 275 | |
---|
326 | | - set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); |
---|
| 276 | + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); |
---|
327 | 277 | } |
---|
328 | 278 | |
---|
329 | 279 | static inline u32 cluster_pmu_getreset_ovsr(void) |
---|
330 | 280 | { |
---|
331 | | - u32 result = get_l2_indirect_reg(L2PMOVSSET); |
---|
| 281 | + u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET); |
---|
332 | 282 | |
---|
333 | | - set_l2_indirect_reg(L2PMOVSCLR, result); |
---|
| 283 | + kryo_l2_set_indirect_reg(L2PMOVSCLR, result); |
---|
334 | 284 | return result; |
---|
335 | 285 | } |
---|
336 | 286 | |
---|
.. | .. |
---|
506 | 456 | if (event->cpu < 0) { |
---|
507 | 457 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
---|
508 | 458 | "Per-task mode not supported\n"); |
---|
509 | | - return -EOPNOTSUPP; |
---|
510 | | - } |
---|
511 | | - |
---|
512 | | - /* We cannot filter accurately so we just don't allow it. */ |
---|
513 | | - if (event->attr.exclude_user || event->attr.exclude_kernel || |
---|
514 | | - event->attr.exclude_hv || event->attr.exclude_idle) { |
---|
515 | | - dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
---|
516 | | - "Can't exclude execution levels\n"); |
---|
517 | 459 | return -EOPNOTSUPP; |
---|
518 | 460 | } |
---|
519 | 461 | |
---|
.. | .. |
---|
783 | 725 | { |
---|
784 | 726 | int val; |
---|
785 | 727 | |
---|
786 | | - val = get_l2_indirect_reg(L2PMCR); |
---|
| 728 | + val = kryo_l2_get_indirect_reg(L2PMCR); |
---|
787 | 729 | |
---|
788 | 730 | /* |
---|
789 | 731 | * Read number of counters from L2PMCR and add 1 |
---|
.. | .. |
---|
797 | 739 | { |
---|
798 | 740 | u64 mpidr; |
---|
799 | 741 | int cpu_cluster_id; |
---|
800 | | - struct cluster_pmu *cluster = NULL; |
---|
| 742 | + struct cluster_pmu *cluster; |
---|
801 | 743 | |
---|
802 | 744 | /* |
---|
803 | 745 | * This assumes that the cluster_id is in MPIDR[aff1] for |
---|
.. | .. |
---|
819 | 761 | cluster->cluster_id); |
---|
820 | 762 | cpumask_set_cpu(cpu, &cluster->cluster_cpus); |
---|
821 | 763 | *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; |
---|
822 | | - break; |
---|
| 764 | + return cluster; |
---|
823 | 765 | } |
---|
824 | 766 | |
---|
825 | | - return cluster; |
---|
| 767 | + return NULL; |
---|
826 | 768 | } |
---|
827 | 769 | |
---|
828 | 770 | static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) |
---|
.. | .. |
---|
925 | 867 | cluster->cluster_id = fw_cluster_id; |
---|
926 | 868 | |
---|
927 | 869 | irq = platform_get_irq(sdev, 0); |
---|
928 | | - if (irq < 0) { |
---|
929 | | - dev_err(&pdev->dev, |
---|
930 | | - "Failed to get valid irq for cluster %ld\n", |
---|
931 | | - fw_cluster_id); |
---|
| 870 | + if (irq < 0) |
---|
932 | 871 | return irq; |
---|
933 | | - } |
---|
934 | 872 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
---|
935 | 873 | cluster->irq = irq; |
---|
936 | 874 | |
---|
.. | .. |
---|
982 | 920 | .stop = l2_cache_event_stop, |
---|
983 | 921 | .read = l2_cache_event_read, |
---|
984 | 922 | .attr_groups = l2_cache_pmu_attr_grps, |
---|
| 923 | + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
---|
985 | 924 | }; |
---|
986 | 925 | |
---|
987 | 926 | l2cache_pmu->num_counters = get_num_counters(); |
---|
.. | .. |
---|
1047 | 986 | .driver = { |
---|
1048 | 987 | .name = "qcom-l2cache-pmu", |
---|
1049 | 988 | .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), |
---|
| 989 | + .suppress_bind_attrs = true, |
---|
1050 | 990 | }, |
---|
1051 | 991 | .probe = l2_cache_pmu_probe, |
---|
1052 | 992 | .remove = l2_cache_pmu_remove, |
---|