forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/events/amd/core.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/perf_event.h>
23 #include <linux/export.h>
34 #include <linux/types.h>
....@@ -12,6 +13,10 @@
1213
1314 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
1415 static unsigned long perf_nmi_window;
16
+
17
+/* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */
18
+#define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
19
+#define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
1520
1621 static __initconst const u64 amd_hw_cache_event_ids
1722 [PERF_COUNT_HW_CACHE_MAX]
....@@ -301,6 +306,25 @@
301306 return offset;
302307 }
303308
309
+/*
310
+ * AMD64 events are detected based on their event codes.
311
+ */
312
+static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
313
+{
314
+ return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
315
+}
316
+
317
+static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
318
+{
319
+ if (!(x86_pmu.flags & PMU_FL_PAIR))
320
+ return false;
321
+
322
+ switch (amd_get_event_code(hwc)) {
323
+ case 0x003: return true; /* Retired SSE/AVX FLOPs */
324
+ default: return false;
325
+ }
326
+}
327
+
304328 static int amd_core_hw_config(struct perf_event *event)
305329 {
306330 if (event->attr.exclude_host && event->attr.exclude_guest)
....@@ -316,15 +340,10 @@
316340 else if (event->attr.exclude_guest)
317341 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
318342
319
- return 0;
320
-}
343
+ if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
344
+ event->hw.flags |= PERF_X86_EVENT_PAIR;
321345
322
-/*
323
- * AMD64 events are detected based on their event codes.
324
- */
325
-static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
326
-{
327
- return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
346
+ return 0;
328347 }
329348
330349 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
....@@ -652,15 +671,7 @@
652671 */
653672 static int amd_pmu_handle_irq(struct pt_regs *regs)
654673 {
655
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
656
- int active, handled;
657
-
658
- /*
659
- * Obtain the active count before calling x86_pmu_handle_irq() since
660
- * it is possible that x86_pmu_handle_irq() may make a counter
661
- * inactive (through x86_pmu_stop).
662
- */
663
- active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
674
+ int handled;
664675
665676 /* Process any counter overflows */
666677 handled = x86_pmu_handle_irq(regs);
....@@ -670,8 +681,7 @@
670681 * NMIs will be claimed if arriving within that window.
671682 */
672683 if (handled) {
673
- this_cpu_write(perf_nmi_tstamp,
674
- jiffies + perf_nmi_window);
684
+ this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
675685
676686 return handled;
677687 }
....@@ -864,6 +874,29 @@
864874 }
865875 }
866876
877
+static struct event_constraint pair_constraint;
878
+
879
+static struct event_constraint *
880
+amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
881
+ struct perf_event *event)
882
+{
883
+ struct hw_perf_event *hwc = &event->hw;
884
+
885
+ if (amd_is_pair_event_code(hwc))
886
+ return &pair_constraint;
887
+
888
+ return &unconstrained;
889
+}
890
+
891
+static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
892
+ struct perf_event *event)
893
+{
894
+ struct hw_perf_event *hwc = &event->hw;
895
+
896
+ if (is_counter_pair(hwc))
897
+ --cpuc->n_pair;
898
+}
899
+
867900 static ssize_t amd_event_sysfs_show(char *page, u64 config)
868901 {
869902 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
....@@ -907,28 +940,14 @@
907940
908941 static int __init amd_core_pmu_init(void)
909942 {
943
+ u64 even_ctr_mask = 0ULL;
944
+ int i;
945
+
910946 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
911947 return 0;
912948
913
- /* Avoid calulating the value each time in the NMI handler */
949
+ /* Avoid calculating the value each time in the NMI handler */
914950 perf_nmi_window = msecs_to_jiffies(100);
915
-
916
- switch (boot_cpu_data.x86) {
917
- case 0x15:
918
- pr_cont("Fam15h ");
919
- x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
920
- break;
921
- case 0x17:
922
- pr_cont("Fam17h ");
923
- /*
924
- * In family 17h, there are no event constraints in the PMC hardware.
925
- * We fallback to using default amd_get_event_constraints.
926
- */
927
- break;
928
- default:
929
- pr_err("core perfctr but no constraints; unknown hardware!\n");
930
- return -ENODEV;
931
- }
932951
933952 /*
934953 * If core performance counter extensions exists, we must use
....@@ -944,6 +963,32 @@
944963 */
945964 x86_pmu.amd_nb_constraints = 0;
946965
966
+ if (boot_cpu_data.x86 == 0x15) {
967
+ pr_cont("Fam15h ");
968
+ x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
969
+ }
970
+ if (boot_cpu_data.x86 >= 0x17) {
971
+ pr_cont("Fam17h+ ");
972
+ /*
973
+ * Family 17h and compatibles have constraints for Large
974
+ * Increment per Cycle events: they may only be assigned an
975
+ * even numbered counter that has a consecutive adjacent odd
976
+ * numbered counter following it.
977
+ */
978
+ for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
979
+ even_ctr_mask |= 1 << i;
980
+
981
+ pair_constraint = (struct event_constraint)
982
+ __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
983
+ x86_pmu.num_counters / 2, 0,
984
+ PERF_X86_EVENT_PAIR);
985
+
986
+ x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
987
+ x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
988
+ x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
989
+ x86_pmu.flags |= PMU_FL_PAIR;
990
+ }
991
+
947992 pr_cont("core perfctr, ");
948993 return 0;
949994 }