hc
2024-02-20 e636c8d336489bf3eed5878299e6cc045bbad077
kernel/drivers/perf/qcom_l2_pmu.c
....@@ -1,13 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
2
- *
3
- * This program is free software; you can redistribute it and/or modify
4
- * it under the terms of the GNU General Public License version 2 and
5
- * only version 2 as published by the Free Software Foundation.
6
- *
7
- * This program is distributed in the hope that it will be useful,
8
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
9
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
- * GNU General Public License for more details.
113 */
124 #include <linux/acpi.h>
135 #include <linux/bitops.h>
....@@ -31,6 +23,7 @@
3123 #include <asm/barrier.h>
3224 #include <asm/local64.h>
3325 #include <asm/sysreg.h>
26
+#include <soc/qcom/kryo-l2-accessors.h>
3427
3528 #define MAX_L2_CTRS 9
3629
....@@ -87,8 +80,6 @@
8780 #define L2_COUNTER_RELOAD BIT_ULL(31)
8881 #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
8982
90
-#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6)
91
-#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7)
9283
9384 #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
9485
....@@ -107,48 +98,7 @@
10798 #define L2_EVENT_STREX 0x421
10899 #define L2_EVENT_CLREX 0x422
109100
110
-static DEFINE_RAW_SPINLOCK(l2_access_lock);
111101
112
-/**
113
- * set_l2_indirect_reg: write value to an L2 register
114
- * @reg: Address of L2 register.
115
- * @value: Value to be written to register.
116
- *
117
- * Use architecturally required barriers for ordering between system register
118
- * accesses
119
- */
120
-static void set_l2_indirect_reg(u64 reg, u64 val)
121
-{
122
- unsigned long flags;
123
-
124
- raw_spin_lock_irqsave(&l2_access_lock, flags);
125
- write_sysreg_s(reg, L2CPUSRSELR_EL1);
126
- isb();
127
- write_sysreg_s(val, L2CPUSRDR_EL1);
128
- isb();
129
- raw_spin_unlock_irqrestore(&l2_access_lock, flags);
130
-}
131
-
132
-/**
133
- * get_l2_indirect_reg: read an L2 register value
134
- * @reg: Address of L2 register.
135
- *
136
- * Use architecturally required barriers for ordering between system register
137
- * accesses
138
- */
139
-static u64 get_l2_indirect_reg(u64 reg)
140
-{
141
- u64 val;
142
- unsigned long flags;
143
-
144
- raw_spin_lock_irqsave(&l2_access_lock, flags);
145
- write_sysreg_s(reg, L2CPUSRSELR_EL1);
146
- isb();
147
- val = read_sysreg_s(L2CPUSRDR_EL1);
148
- raw_spin_unlock_irqrestore(&l2_access_lock, flags);
149
-
150
- return val;
151
-}
152102
153103 struct cluster_pmu;
154104
....@@ -219,28 +169,28 @@
219169 static void cluster_pmu_reset(void)
220170 {
221171 /* Reset all counters */
222
- set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
223
- set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
224
- set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
225
- set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
172
+ kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
173
+ kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
174
+ kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
175
+ kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
226176 }
227177
228178 static inline void cluster_pmu_enable(void)
229179 {
230
- set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
180
+ kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
231181 }
232182
233183 static inline void cluster_pmu_disable(void)
234184 {
235
- set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
185
+ kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
236186 }
237187
238188 static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
239189 {
240190 if (idx == l2_cycle_ctr_idx)
241
- set_l2_indirect_reg(L2PMCCNTR, value);
191
+ kryo_l2_set_indirect_reg(L2PMCCNTR, value);
242192 else
243
- set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
193
+ kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
244194 }
245195
246196 static inline u64 cluster_pmu_counter_get_value(u32 idx)
....@@ -248,46 +198,46 @@
248198 u64 value;
249199
250200 if (idx == l2_cycle_ctr_idx)
251
- value = get_l2_indirect_reg(L2PMCCNTR);
201
+ value = kryo_l2_get_indirect_reg(L2PMCCNTR);
252202 else
253
- value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
203
+ value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
254204
255205 return value;
256206 }
257207
258208 static inline void cluster_pmu_counter_enable(u32 idx)
259209 {
260
- set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
210
+ kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
261211 }
262212
263213 static inline void cluster_pmu_counter_disable(u32 idx)
264214 {
265
- set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
215
+ kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
266216 }
267217
268218 static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
269219 {
270
- set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
220
+ kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
271221 }
272222
273223 static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
274224 {
275
- set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
225
+ kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
276226 }
277227
278228 static inline void cluster_pmu_set_evccntcr(u32 val)
279229 {
280
- set_l2_indirect_reg(L2PMCCNTCR, val);
230
+ kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
281231 }
282232
283233 static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
284234 {
285
- set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
235
+ kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
286236 }
287237
288238 static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
289239 {
290
- set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
240
+ kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
291241 }
292242
293243 static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
....@@ -303,11 +253,11 @@
303253
304254 spin_lock_irqsave(&cluster->pmu_lock, flags);
305255
306
- resr_val = get_l2_indirect_reg(L2PMRESR);
256
+ resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
307257 resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
308258 resr_val |= field;
309259 resr_val |= L2PMRESR_EN;
310
- set_l2_indirect_reg(L2PMRESR, resr_val);
260
+ kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
311261
312262 spin_unlock_irqrestore(&cluster->pmu_lock, flags);
313263 }
....@@ -323,14 +273,14 @@
323273 L2PMXEVFILTER_ORGFILTER_IDINDEP |
324274 L2PMXEVFILTER_ORGFILTER_ALL;
325275
326
- set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
276
+ kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
327277 }
328278
329279 static inline u32 cluster_pmu_getreset_ovsr(void)
330280 {
331
- u32 result = get_l2_indirect_reg(L2PMOVSSET);
281
+ u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
332282
333
- set_l2_indirect_reg(L2PMOVSCLR, result);
283
+ kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
334284 return result;
335285 }
336286
....@@ -506,14 +456,6 @@
506456 if (event->cpu < 0) {
507457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
508458 "Per-task mode not supported\n");
509
- return -EOPNOTSUPP;
510
- }
511
-
512
- /* We cannot filter accurately so we just don't allow it. */
513
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
514
- event->attr.exclude_hv || event->attr.exclude_idle) {
515
- dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
516
- "Can't exclude execution levels\n");
517459 return -EOPNOTSUPP;
518460 }
519461
....@@ -783,7 +725,7 @@
783725 {
784726 int val;
785727
786
- val = get_l2_indirect_reg(L2PMCR);
728
+ val = kryo_l2_get_indirect_reg(L2PMCR);
787729
788730 /*
789731 * Read number of counters from L2PMCR and add 1
....@@ -797,7 +739,7 @@
797739 {
798740 u64 mpidr;
799741 int cpu_cluster_id;
800
- struct cluster_pmu *cluster = NULL;
742
+ struct cluster_pmu *cluster;
801743
802744 /*
803745 * This assumes that the cluster_id is in MPIDR[aff1] for
....@@ -819,10 +761,10 @@
819761 cluster->cluster_id);
820762 cpumask_set_cpu(cpu, &cluster->cluster_cpus);
821763 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
822
- break;
764
+ return cluster;
823765 }
824766
825
- return cluster;
767
+ return NULL;
826768 }
827769
828770 static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
....@@ -925,12 +867,8 @@
925867 cluster->cluster_id = fw_cluster_id;
926868
927869 irq = platform_get_irq(sdev, 0);
928
- if (irq < 0) {
929
- dev_err(&pdev->dev,
930
- "Failed to get valid irq for cluster %ld\n",
931
- fw_cluster_id);
870
+ if (irq < 0)
932871 return irq;
933
- }
934872 irq_set_status_flags(irq, IRQ_NOAUTOEN);
935873 cluster->irq = irq;
936874
....@@ -982,6 +920,7 @@
982920 .stop = l2_cache_event_stop,
983921 .read = l2_cache_event_read,
984922 .attr_groups = l2_cache_pmu_attr_grps,
923
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
985924 };
986925
987926 l2cache_pmu->num_counters = get_num_counters();
....@@ -1047,6 +986,7 @@
1047986 .driver = {
1048987 .name = "qcom-l2cache-pmu",
1049988 .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
989
+ .suppress_bind_attrs = true,
1050990 },
1051991 .probe = l2_cache_pmu_probe,
1052992 .remove = l2_cache_pmu_remove,