From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 31 Jan 2024 03:29:01 +0000
Subject: [PATCH] add lvds1024*800
---
kernel/arch/x86/events/amd/ibs.c | 162 +++++++++++++++++++++++++++++++++++-------------------
1 files changed, 105 insertions(+), 57 deletions(-)
diff --git a/kernel/arch/x86/events/amd/ibs.c b/kernel/arch/x86/events/amd/ibs.c
index 2d9e137..354d52e 100644
--- a/kernel/arch/x86/events/amd/ibs.c
+++ b/kernel/arch/x86/events/amd/ibs.c
@@ -202,7 +202,7 @@
}
/*
- * Use IBS for precise event sampling:
+ * core pmu config -> IBS config
*
* perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
* perf record -a -e r076:p ... # same as -e cpu-cycles:p
@@ -211,25 +211,9 @@
* IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
* MSRC001_1033) is used to select either cycle or micro-ops counting
* mode.
- *
- * The rip of IBS samples has skid 0. Thus, IBS supports precise
- * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
- * rip is invalid when IBS was not able to record the rip correctly.
- * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
- *
*/
-static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
+static int core_pmu_ibs_config(struct perf_event *event, u64 *config)
{
- switch (event->attr.precise_ip) {
- case 0:
- return -ENOENT;
- case 1:
- case 2:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
switch (event->attr.config) {
@@ -255,37 +239,40 @@
return -EOPNOTSUPP;
}
-static const struct perf_event_attr ibs_notsupp = {
- .exclude_user = 1,
- .exclude_kernel = 1,
- .exclude_hv = 1,
- .exclude_idle = 1,
- .exclude_host = 1,
- .exclude_guest = 1,
-};
+/*
+ * The rip of IBS samples has skid 0. Thus, IBS supports precise
+ * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
+ * rip is invalid when IBS was not able to record the rip correctly.
+ * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
+ */
+int forward_event_to_ibs(struct perf_event *event)
+{
+ u64 config = 0;
+
+ if (!event->attr.precise_ip || event->attr.precise_ip > 2)
+ return -EOPNOTSUPP;
+
+ if (!core_pmu_ibs_config(event, &config)) {
+ event->attr.type = perf_ibs_op.pmu.type;
+ event->attr.config = config;
+ }
+ return -ENOENT;
+}
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs;
u64 max_cnt, config;
- int ret;
perf_ibs = get_ibs_pmu(event->attr.type);
- if (perf_ibs) {
- config = event->attr.config;
- } else {
- perf_ibs = &perf_ibs_op;
- ret = perf_ibs_precise_event(event, &config);
- if (ret)
- return ret;
- }
+ if (!perf_ibs)
+ return -ENOENT;
+
+ config = event->attr.config;
if (event->pmu != &perf_ibs->pmu)
return -ENOENT;
-
- if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
- return -EINVAL;
if (config & ~perf_ibs->config_mask)
return -EINVAL;
@@ -324,6 +311,16 @@
hwc->config_base = perf_ibs->msr;
hwc->config = config;
+ /*
+ * rip recorded by IbsOpRip will not be consistent with rsp and rbp
+ * recorded as part of interrupt regs. Thus we need to use rip from
+ * interrupt regs while unwinding call stack. Setting _EARLY flag
+ * makes sure we unwind call-stack before perf sample rip is set to
+ * IbsOpRip.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
+
return 0;
}
@@ -353,10 +350,13 @@
* and the lower 7 bits of CurCnt are randomized.
* Otherwise CurCnt has the full 27-bit current counter value.
*/
- if (config & IBS_OP_VAL)
+ if (config & IBS_OP_VAL) {
count = (config & IBS_OP_MAX_CNT) << 4;
- else if (ibs_caps & IBS_CAPS_RDWROPCNT)
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ count += config & IBS_OP_MAX_CNT_EXT_MASK;
+ } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
count = (config & IBS_OP_CUR_CNT) >> 32;
+ }
return count;
}
@@ -417,7 +417,7 @@
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
- u64 period;
+ u64 period, config = 0;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@@ -426,13 +426,19 @@
hwc->state = 0;
perf_ibs_set_period(perf_ibs, hwc, &period);
+ if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
+ config |= period & IBS_OP_MAX_CNT_EXT_MASK;
+ period &= ~IBS_OP_MAX_CNT_EXT_MASK;
+ }
+ config |= period >> 4;
+
/*
* Set STARTED before enabling the hardware, such that a subsequent NMI
* must observe it.
*/
set_bit(IBS_STARTED, pcpu->state);
clear_bit(IBS_STOPPING, pcpu->state);
- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+ perf_ibs_enable_event(perf_ibs, hwc, config);
perf_event_update_userpage(event);
}
@@ -549,6 +555,7 @@
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
},
.msr = MSR_AMD64_IBSFETCHCTL,
.config_mask = IBS_FETCH_CONFIG_MASK,
@@ -573,6 +580,7 @@
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
},
.msr = MSR_AMD64_IBSOPCTL,
.config_mask = IBS_OP_CONFIG_MASK,
@@ -599,7 +607,7 @@
struct perf_ibs_data ibs_data;
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
- u64 *buf, *config, period;
+ u64 *buf, *config, period, new_config = 0;
if (!test_bit(IBS_STARTED, pcpu->state)) {
fail:
@@ -693,18 +701,30 @@
data.raw = &raw;
}
+ /*
+ * rip recorded by IbsOpRip will not be consistent with rsp and rbp
+ * recorded as part of interrupt regs. Thus we need to use rip from
+ * interrupt regs while unwinding call stack.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ data.callchain = perf_callchain(event, iregs);
+
throttle = perf_event_overflow(event, &data, ®s);
out:
if (throttle) {
perf_ibs_stop(event, 0);
} else {
- period >>= 4;
+ if (perf_ibs == &perf_ibs_op) {
+ if (ibs_caps & IBS_CAPS_OPCNTEXT) {
+ new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
+ period &= ~IBS_OP_MAX_CNT_EXT_MASK;
+ }
+ if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL))
+ new_config |= *config & IBS_OP_CUR_CNT_RAND;
+ }
+ new_config |= period >> 4;
- if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
- (*config & IBS_OP_CNT_CTL))
- period |= *config & IBS_OP_CUR_CNT_RAND;
-
- perf_ibs_enable_event(perf_ibs, hwc, period);
+ perf_ibs_enable_event(perf_ibs, hwc, new_config);
}
perf_event_update_userpage(event);
@@ -761,9 +781,10 @@
return ret;
}
-static __init void perf_event_ibs_init(void)
+static __init int perf_event_ibs_init(void)
{
struct attribute **attr = ibs_op_format_attrs;
+ int ret;
/*
* Some chips fail to reset the fetch count when it is written; instead
@@ -775,21 +796,50 @@
if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
- perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
+ ret = perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
+ if (ret)
+ return ret;
if (ibs_caps & IBS_CAPS_OPCNT) {
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
*attr++ = &format_attr_cnt_ctl.attr;
}
- perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
- register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
+ if (ibs_caps & IBS_CAPS_OPCNTEXT) {
+ perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
+ perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
+ perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
+ }
+
+ ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
+ if (ret)
+ goto err_op;
+
+ ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
+ if (ret)
+ goto err_nmi;
+
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
+ return 0;
+
+err_nmi:
+ perf_pmu_unregister(&perf_ibs_op.pmu);
+ free_percpu(perf_ibs_op.pcpu);
+ perf_ibs_op.pcpu = NULL;
+err_op:
+ perf_pmu_unregister(&perf_ibs_fetch.pmu);
+ free_percpu(perf_ibs_fetch.pcpu);
+ perf_ibs_fetch.pcpu = NULL;
+
+ return ret;
}
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
-static __init void perf_event_ibs_init(void) { }
+static __init int perf_event_ibs_init(void)
+{
+ return 0;
+}
#endif
@@ -1059,9 +1109,7 @@
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);
- perf_event_ibs_init();
-
- return 0;
+ return perf_event_ibs_init();
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
--
Gitblit v1.6.2