hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/arch/powerpc/perf/isa207-common.c
....@@ -1,14 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Common Performance counter support functions for PowerISA v2.07 processors.
34 *
45 * Copyright 2009 Paul Mackerras, IBM Corporation.
56 * Copyright 2013 Michael Ellerman, IBM Corporation.
67 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
7
- *
8
- * This program is free software; you can redistribute it and/or
9
- * modify it under the terms of the GNU General Public License
10
- * as published by the Free Software Foundation; either version
11
- * 2 of the License, or (at your option) any later version.
128 */
139 #include "isa207-common.h"
1410
....@@ -59,7 +55,9 @@
5955 {
6056 u64 valid_mask = EVENT_VALID_MASK;
6157
62
- if (cpu_has_feature(CPU_FTR_ARCH_300))
58
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
59
+ valid_mask = p10_EVENT_VALID_MASK;
60
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
6361 valid_mask = p9_EVENT_VALID_MASK;
6462
6563 return !(event & ~valid_mask);
....@@ -73,6 +71,14 @@
7371 return false;
7472 }
7573
74
+static unsigned long sdar_mod_val(u64 event)
75
+{
76
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
77
+ return p10_SDAR_MODE(event);
78
+
79
+ return p9_SDAR_MODE(event);
80
+}
81
+
7682 static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
7783 {
7884 /*
....@@ -83,7 +89,7 @@
8389 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
8490 * mode and will be un-changed when setting MMCRA[63] (Marked events).
8591 *
86
- * Incase of Power9:
92
+ * Incase of Power9/power10:
8793 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
8894 * or if group already have any marked events.
8995 * For rest
....@@ -94,8 +100,8 @@
94100 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
95101 if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
96102 *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
97
- else if (p9_SDAR_MODE(event))
98
- *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
103
+ else if (sdar_mod_val(event))
104
+ *mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT;
99105 else
100106 *mmcra |= MMCRA_SDAR_MODE_DCACHE;
101107 } else
....@@ -138,7 +144,11 @@
138144 /*
139145 * Check the mantissa upper two bits are not zero, unless the
140146 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
147
+ * Power10: thresh_cmp is replaced by l2_l3 event select.
141148 */
149
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
150
+ return false;
151
+
142152 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
143153 exp = cmp >> 7;
144154
....@@ -237,6 +247,9 @@
237247 u64 sier = mfspr(SPRN_SIER);
238248 u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
239249
250
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
251
+ mantissa = P10_MMCRA_THR_CTR_MANT(mmcra);
252
+
240253 if (val == 0 || val == 7)
241254 *weight = 0;
242255 else
....@@ -255,7 +268,12 @@
255268
256269 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
257270 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
258
- cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
271
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
272
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) &
273
+ p10_EVENT_CACHE_SEL_MASK;
274
+ else
275
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) &
276
+ EVENT_CACHE_SEL_MASK;
259277 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
260278
261279 if (pmc) {
....@@ -296,21 +314,39 @@
296314 }
297315
298316 if (unit >= 6 && unit <= 9) {
299
- /*
300
- * L2/L3 events contain a cache selector field, which is
301
- * supposed to be programmed into MMCRC. However MMCRC is only
302
- * HV writable, and there is no API for guest kernels to modify
303
- * it. The solution is for the hypervisor to initialise the
304
- * field to zeroes, and for us to only ever allow events that
305
- * have a cache selector of zero. The bank selector (bit 3) is
306
- * irrelevant, as long as the rest of the value is 0.
307
- */
308
- if (!cpu_has_feature(CPU_FTR_ARCH_300) && (cache & 0x7))
317
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
318
+ if (unit == 6) {
319
+ mask |= CNST_L2L3_GROUP_MASK;
320
+ value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT);
321
+ }
322
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
323
+ mask |= CNST_CACHE_GROUP_MASK;
324
+ value |= CNST_CACHE_GROUP_VAL(event & 0xff);
325
+
326
+ mask |= CNST_CACHE_PMC4_MASK;
327
+ if (pmc == 4)
328
+ value |= CNST_CACHE_PMC4_VAL;
329
+ } else if (cache & 0x7) {
330
+ /*
331
+ * L2/L3 events contain a cache selector field, which is
332
+ * supposed to be programmed into MMCRC. However MMCRC is only
333
+ * HV writable, and there is no API for guest kernels to modify
334
+ * it. The solution is for the hypervisor to initialise the
335
+ * field to zeroes, and for us to only ever allow events that
336
+ * have a cache selector of zero. The bank selector (bit 3) is
337
+ * irrelevant, as long as the rest of the value is 0.
338
+ */
309339 return -1;
340
+ }
310341
311342 } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
312343 mask |= CNST_L1_QUAL_MASK;
313344 value |= CNST_L1_QUAL_VAL(cache);
345
+ }
346
+
347
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
348
+ mask |= CNST_RADIX_SCOPE_GROUP_MASK;
349
+ value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT);
314350 }
315351
316352 if (is_event_marked(event)) {
....@@ -318,11 +354,17 @@
318354 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
319355 }
320356
321
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
357
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
358
+ if (event_is_threshold(event)) {
359
+ mask |= CNST_THRESH_CTL_SEL_MASK;
360
+ value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
361
+ }
362
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
322363 if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
323364 mask |= CNST_THRESH_MASK;
324365 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
325
- }
366
+ } else if (event_is_threshold(event))
367
+ return -1;
326368 } else {
327369 /*
328370 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
....@@ -369,10 +411,11 @@
369411 }
370412
371413 int isa207_compute_mmcr(u64 event[], int n_ev,
372
- unsigned int hwc[], unsigned long mmcr[],
414
+ unsigned int hwc[], struct mmcr_regs *mmcr,
373415 struct perf_event *pevents[])
374416 {
375417 unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
418
+ unsigned long mmcr3;
376419 unsigned int pmc, pmc_inuse;
377420 int i;
378421
....@@ -385,7 +428,14 @@
385428 pmc_inuse |= 1 << pmc;
386429 }
387430
388
- mmcra = mmcr1 = mmcr2 = 0;
431
+ mmcra = mmcr1 = mmcr2 = mmcr3 = 0;
432
+
433
+ /*
434
+ * Disable bhrb unless explicitly requested
435
+ * by setting MMCRA (BHRBRD) bit.
436
+ */
437
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
438
+ mmcra |= MMCRA_BHRB_DISABLE;
389439
390440 /* Second pass: assign PMCs, set all MMCR1 fields */
391441 for (i = 0; i < n_ev; ++i) {
....@@ -422,6 +472,13 @@
422472 }
423473 }
424474
475
+ /* Set RADIX_SCOPE_QUAL bit */
476
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
477
+ val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) &
478
+ p10_EVENT_RADIX_SCOPE_QUAL_MASK;
479
+ mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT;
480
+ }
481
+
425482 if (is_event_marked(event[i])) {
426483 mmcra |= MMCRA_SAMPLE_ENABLE;
427484
....@@ -444,14 +501,28 @@
444501 mmcra |= val << MMCRA_THR_CTL_SHIFT;
445502 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
446503 mmcra |= val << MMCRA_THR_SEL_SHIFT;
447
- val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
448
- mmcra |= thresh_cmp_val(val);
504
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
505
+ val = (event[i] >> EVENT_THR_CMP_SHIFT) &
506
+ EVENT_THR_CMP_MASK;
507
+ mmcra |= thresh_cmp_val(val);
508
+ }
509
+ }
510
+
511
+ if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
512
+ val = (event[i] >> p10_L2L3_EVENT_SHIFT) &
513
+ p10_EVENT_L2L3_SEL_MASK;
514
+ mmcr2 |= val << p10_L2L3_SEL_SHIFT;
449515 }
450516
451517 if (event[i] & EVENT_WANTS_BHRB) {
452518 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
453519 mmcra |= val << MMCRA_IFM_SHIFT;
454520 }
521
+
522
+ /* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */
523
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
524
+ (has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB)))
525
+ mmcra &= ~MMCRA_BHRB_DISABLE;
455526
456527 if (pevents[i]->attr.exclude_user)
457528 mmcr2 |= MMCR2_FCP(pmc);
....@@ -466,34 +537,51 @@
466537 mmcr2 |= MMCR2_FCS(pmc);
467538 }
468539
540
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
541
+ if (pmc <= 4) {
542
+ val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
543
+ p10_EVENT_MMCR3_MASK;
544
+ mmcr3 |= val << MMCR3_SHIFT(pmc);
545
+ }
546
+ }
547
+
469548 hwc[i] = pmc - 1;
470549 }
471550
472551 /* Return MMCRx values */
473
- mmcr[0] = 0;
552
+ mmcr->mmcr0 = 0;
474553
475554 /* pmc_inuse is 1-based */
476555 if (pmc_inuse & 2)
477
- mmcr[0] = MMCR0_PMC1CE;
556
+ mmcr->mmcr0 = MMCR0_PMC1CE;
478557
479558 if (pmc_inuse & 0x7c)
480
- mmcr[0] |= MMCR0_PMCjCE;
559
+ mmcr->mmcr0 |= MMCR0_PMCjCE;
481560
482561 /* If we're not using PMC 5 or 6, freeze them */
483562 if (!(pmc_inuse & 0x60))
484
- mmcr[0] |= MMCR0_FC56;
563
+ mmcr->mmcr0 |= MMCR0_FC56;
485564
486
- mmcr[1] = mmcr1;
487
- mmcr[2] = mmcra;
488
- mmcr[3] = mmcr2;
565
+ /*
566
+ * Set mmcr0 (PMCCEXT) for p10 which
567
+ * will restrict access to group B registers
568
+ * when MMCR0 PMCC=0b00.
569
+ */
570
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
571
+ mmcr->mmcr0 |= MMCR0_PMCCEXT;
572
+
573
+ mmcr->mmcr1 = mmcr1;
574
+ mmcr->mmcra = mmcra;
575
+ mmcr->mmcr2 = mmcr2;
576
+ mmcr->mmcr3 = mmcr3;
489577
490578 return 0;
491579 }
492580
493
-void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
581
+void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
494582 {
495583 if (pmc <= 3)
496
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
584
+ mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
497585 }
498586
499587 static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)