From 08f87f769b595151be1afeff53e144f543faa614 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 06 Dec 2023 09:51:13 +0000
Subject: [PATCH] add dts config
---
kernel/arch/x86/events/amd/core.c | 119 +++++++++++++++++++++++++++++++++++++++++------------------
1 files changed, 82 insertions(+), 37 deletions(-)
diff --git a/kernel/arch/x86/events/amd/core.c b/kernel/arch/x86/events/amd/core.c
index c3ec535..39eb276 100644
--- a/kernel/arch/x86/events/amd/core.c
+++ b/kernel/arch/x86/events/amd/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/perf_event.h>
#include <linux/export.h>
#include <linux/types.h>
@@ -12,6 +13,10 @@
static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
static unsigned long perf_nmi_window;
+
+/* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */
+#define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
+#define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
@@ -301,6 +306,25 @@
return offset;
}
+/*
+ * AMD64 events are detected based on their event codes.
+ */
+static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
+{
+ return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+}
+
+static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
+{
+ if (!(x86_pmu.flags & PMU_FL_PAIR))
+ return false;
+
+ switch (amd_get_event_code(hwc)) {
+ case 0x003: return true; /* Retired SSE/AVX FLOPs */
+ default: return false;
+ }
+}
+
static int amd_core_hw_config(struct perf_event *event)
{
if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -316,15 +340,10 @@
else if (event->attr.exclude_guest)
event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
- return 0;
-}
+ if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
+ event->hw.flags |= PERF_X86_EVENT_PAIR;
-/*
- * AMD64 events are detected based on their event codes.
- */
-static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
-{
- return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
+ return 0;
}
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
@@ -652,15 +671,7 @@
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int active, handled;
-
- /*
- * Obtain the active count before calling x86_pmu_handle_irq() since
- * it is possible that x86_pmu_handle_irq() may make a counter
- * inactive (through x86_pmu_stop).
- */
- active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
+ int handled;
/* Process any counter overflows */
handled = x86_pmu_handle_irq(regs);
@@ -670,8 +681,7 @@
* NMIs will be claimed if arriving within that window.
*/
if (handled) {
- this_cpu_write(perf_nmi_tstamp,
- jiffies + perf_nmi_window);
+ this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
return handled;
}
@@ -864,6 +874,29 @@
}
}
+static struct event_constraint pair_constraint;
+
+static struct event_constraint *
+amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (amd_is_pair_event_code(hwc))
+ return &pair_constraint;
+
+ return &unconstrained;
+}
+
+static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (is_counter_pair(hwc))
+ --cpuc->n_pair;
+}
+
static ssize_t amd_event_sysfs_show(char *page, u64 config)
{
u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
@@ -907,28 +940,14 @@
static int __init amd_core_pmu_init(void)
{
+ u64 even_ctr_mask = 0ULL;
+ int i;
+
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return 0;
- /* Avoid calulating the value each time in the NMI handler */
+ /* Avoid calculating the value each time in the NMI handler */
perf_nmi_window = msecs_to_jiffies(100);
-
- switch (boot_cpu_data.x86) {
- case 0x15:
- pr_cont("Fam15h ");
- x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
- break;
- case 0x17:
- pr_cont("Fam17h ");
- /*
- * In family 17h, there are no event constraints in the PMC hardware.
- * We fallback to using default amd_get_event_constraints.
- */
- break;
- default:
- pr_err("core perfctr but no constraints; unknown hardware!\n");
- return -ENODEV;
- }
/*
* If core performance counter extensions exists, we must use
@@ -944,6 +963,32 @@
*/
x86_pmu.amd_nb_constraints = 0;
+ if (boot_cpu_data.x86 == 0x15) {
+ pr_cont("Fam15h ");
+ x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+ }
+ if (boot_cpu_data.x86 >= 0x17) {
+ pr_cont("Fam17h+ ");
+ /*
+ * Family 17h and compatibles have constraints for Large
+ * Increment per Cycle events: they may only be assigned an
+ * even numbered counter that has a consecutive adjacent odd
+ * numbered counter following it.
+ */
+ for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
+ even_ctr_mask |= 1 << i;
+
+ pair_constraint = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
+ x86_pmu.num_counters / 2, 0,
+ PERF_X86_EVENT_PAIR);
+
+ x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
+ x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
+ x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
+ x86_pmu.flags |= PMU_FL_PAIR;
+ }
+
pr_cont("core perfctr, ");
return 0;
}
--
Gitblit v1.6.2