From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt
---
kernel/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c | 72 ++++++++++++++++++++++++++++-------
1 files changed, 57 insertions(+), 15 deletions(-)
diff --git a/kernel/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/kernel/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index d7c2a6d..e7d48cb 100644
--- a/kernel/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/kernel/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
#if defined(__i386__) || defined(__x86_64__)
@@ -19,6 +18,10 @@
#define MSR_APERF 0xE8
#define MSR_MPERF 0xE7
+
+#define RDPRU ".byte 0x0f, 0x01, 0xfd"
+#define RDPRU_ECX_MPERF 0
+#define RDPRU_ECX_APERF 1
#define MSR_TSC 0x10
@@ -87,15 +90,51 @@
return ret;
}
-static int mperf_init_stats(unsigned int cpu)
+static int get_aperf_mperf(int cpu, unsigned long long *aval,
+ unsigned long long *mval)
{
- unsigned long long val;
+ unsigned long low_a, high_a;
+ unsigned long low_m, high_m;
int ret;
- ret = read_msr(cpu, MSR_APERF, &val);
- aperf_previous_count[cpu] = val;
- ret |= read_msr(cpu, MSR_MPERF, &val);
- mperf_previous_count[cpu] = val;
+ /*
+ * Running on the cpu from which we read the registers will
+ * prevent APERF/MPERF from going out of sync because of IPI
+ * latency introduced by read_msr()s.
+ */
+ if (mperf_monitor.flags.per_cpu_schedule) {
+ if (bind_cpu(cpu))
+ return 1;
+ }
+
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_RDPRU) {
+ asm volatile(RDPRU
+ : "=a" (low_a), "=d" (high_a)
+ : "c" (RDPRU_ECX_APERF));
+ asm volatile(RDPRU
+ : "=a" (low_m), "=d" (high_m)
+ : "c" (RDPRU_ECX_MPERF));
+
+ *aval = ((low_a) | (high_a) << 32);
+ *mval = ((low_m) | (high_m) << 32);
+
+ return 0;
+ }
+
+ ret = read_msr(cpu, MSR_APERF, aval);
+ ret |= read_msr(cpu, MSR_MPERF, mval);
+
+ return ret;
+}
+
+static int mperf_init_stats(unsigned int cpu)
+{
+ unsigned long long aval, mval;
+ int ret;
+
+ ret = get_aperf_mperf(cpu, &aval, &mval);
+ aperf_previous_count[cpu] = aval;
+ mperf_previous_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@@ -103,13 +142,12 @@
static int mperf_measure_stats(unsigned int cpu)
{
- unsigned long long val;
+ unsigned long long aval, mval;
int ret;
- ret = read_msr(cpu, MSR_APERF, &val);
- aperf_current_count[cpu] = val;
- ret |= read_msr(cpu, MSR_MPERF, &val);
- mperf_current_count[cpu] = val;
+ ret = get_aperf_mperf(cpu, &aval, &mval);
+ aperf_current_count[cpu] = aval;
+ mperf_current_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@@ -241,7 +279,8 @@
if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
goto use_sysfs;
- if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) {
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
+ cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
/* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf
* freq.
* A test whether hwcr is accessable/available would be:
@@ -305,6 +344,9 @@
if (init_maxfreq_mode())
return NULL;
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD)
+ mperf_monitor.flags.per_cpu_schedule = 1;
+
/* Free this at program termination */
is_valid = calloc(cpu_count, sizeof(int));
mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
@@ -333,7 +375,7 @@
.stop = mperf_stop,
.do_register = mperf_register,
.unregister = mperf_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
--
Gitblit v1.6.2