forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/perf/hisilicon/hisi_uncore_pmu.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * HiSilicon SoC Hardware event counters support
34 *
....@@ -6,10 +7,6 @@
67 * Shaokun Zhang <zhangshaokun@hisilicon.com>
78 *
89 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9
- *
10
- * This program is free software; you can redistribute it and/or modify
11
- * it under the terms of the GNU General Public License version 2 as
12
- * published by the Free Software Foundation.
1310 */
1411 #include <linux/bitmap.h>
1512 #include <linux/bitops.h>
....@@ -18,6 +15,7 @@
1815 #include <linux/errno.h>
1916 #include <linux/interrupt.h>
2017
18
+#include <asm/cputype.h>
2119 #include <asm/local64.h>
2220
2321 #include "hisi_uncore_pmu.h"
....@@ -37,6 +35,7 @@
3735
3836 return sprintf(buf, "%s\n", (char *)eattr->var);
3937 }
38
+EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
4039
4140 /*
4241 * PMU event attributes
....@@ -50,6 +49,7 @@
5049
5150 return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
5251 }
52
+EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
5353
5454 /*
5555 * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
....@@ -61,6 +61,7 @@
6161
6262 return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
6363 }
64
+EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
6465
6566 static bool hisi_validate_event_group(struct perf_event *event)
6667 {
....@@ -99,6 +100,7 @@
99100 {
100101 return idx >= 0 && idx < hisi_pmu->num_counters;
101102 }
103
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid);
102104
103105 int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
104106 {
....@@ -115,6 +117,7 @@
115117
116118 return idx;
117119 }
120
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
118121
119122 static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
120123 {
....@@ -141,15 +144,6 @@
141144 */
142145 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
143146 return -EOPNOTSUPP;
144
-
145
- /* counters do not have these bits */
146
- if (event->attr.exclude_user ||
147
- event->attr.exclude_kernel ||
148
- event->attr.exclude_host ||
149
- event->attr.exclude_guest ||
150
- event->attr.exclude_hv ||
151
- event->attr.exclude_idle)
152
- return -EINVAL;
153147
154148 /*
155149 * The uncore counters not specific to any CPU, so cannot
....@@ -184,6 +178,7 @@
184178
185179 return 0;
186180 }
181
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
187182
188183 /*
189184 * Set the counter to count the event that we're interested in,
....@@ -231,6 +226,7 @@
231226 /* Write start value to the hardware event counter */
232227 hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
233228 }
229
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
234230
235231 void hisi_uncore_pmu_event_update(struct perf_event *event)
236232 {
....@@ -251,6 +247,7 @@
251247 HISI_MAX_PERIOD(hisi_pmu->counter_bits);
252248 local64_add(delta, &event->count);
253249 }
250
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
254251
255252 void hisi_uncore_pmu_start(struct perf_event *event, int flags)
256253 {
....@@ -273,6 +270,7 @@
273270 hisi_uncore_pmu_enable_event(event);
274271 perf_event_update_userpage(event);
275272 }
273
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
276274
277275 void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
278276 {
....@@ -289,6 +287,7 @@
289287 hisi_uncore_pmu_event_update(event);
290288 hwc->state |= PERF_HES_UPTODATE;
291289 }
290
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
292291
293292 int hisi_uncore_pmu_add(struct perf_event *event, int flags)
294293 {
....@@ -311,6 +310,7 @@
311310
312311 return 0;
313312 }
313
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
314314
315315 void hisi_uncore_pmu_del(struct perf_event *event, int flags)
316316 {
....@@ -322,12 +322,14 @@
322322 perf_event_update_userpage(event);
323323 hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
324324 }
325
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
325326
326327 void hisi_uncore_pmu_read(struct perf_event *event)
327328 {
328329 /* Read hardware counter and update the perf counter statistics */
329330 hisi_uncore_pmu_event_update(event);
330331 }
332
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
331333
332334 void hisi_uncore_pmu_enable(struct pmu *pmu)
333335 {
....@@ -340,6 +342,7 @@
340342
341343 hisi_pmu->ops->start_counters(hisi_pmu);
342344 }
345
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
343346
344347 void hisi_uncore_pmu_disable(struct pmu *pmu)
345348 {
....@@ -347,30 +350,46 @@
347350
348351 hisi_pmu->ops->stop_counters(hisi_pmu);
349352 }
353
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
354
+
350355
351356 /*
352
- * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
353
- * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2]
354
- * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID
355
- * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
357
+ * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
358
+ * determined from the MPIDR_EL1, but the encoding varies by CPU:
359
+ *
360
+ * - For MT variants of TSV110:
361
+ * SCCL is Aff2[7:3], CCL is Aff2[2:0]
362
+ *
363
+ * - For other MT parts:
364
+ * SCCL is Aff3[7:0], CCL is Aff2[7:0]
365
+ *
366
+ * - For non-MT parts:
367
+ * SCCL is Aff2[7:0], CCL is Aff1[7:0]
356368 */
357
-static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
369
+static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
358370 {
359371 u64 mpidr = read_cpuid_mpidr();
372
+ int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
373
+ int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
374
+ int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
375
+ bool mt = mpidr & MPIDR_MT_BITMASK;
376
+ int sccl, ccl;
360377
361
- if (mpidr & MPIDR_MT_BITMASK) {
362
- int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
363
-
364
- if (sccl_id)
365
- *sccl_id = aff2 >> 3;
366
- if (ccl_id)
367
- *ccl_id = aff2 & 0x7;
378
+ if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
379
+ sccl = aff2 >> 3;
380
+ ccl = aff2 & 0x7;
381
+ } else if (mt) {
382
+ sccl = aff3;
383
+ ccl = aff2;
368384 } else {
369
- if (sccl_id)
370
- *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
371
- if (ccl_id)
372
- *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
385
+ sccl = aff2;
386
+ ccl = aff1;
373387 }
388
+
389
+ if (scclp)
390
+ *scclp = sccl;
391
+ if (cclp)
392
+ *cclp = ccl;
374393 }
375394
376395 /*
....@@ -410,10 +429,11 @@
410429 hisi_pmu->on_cpu = cpu;
411430
412431 /* Overflow interrupt also should use the same CPU */
413
- WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
432
+ WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu)));
414433
415434 return 0;
416435 }
436
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
417437
418438 int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
419439 {
....@@ -442,7 +462,10 @@
442462 perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
443463 /* Use this CPU for event counting */
444464 hisi_pmu->on_cpu = target;
445
- WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
465
+ WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target)));
446466
447467 return 0;
448468 }
469
+EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
470
+
471
+MODULE_LICENSE("GPL v2");