From 08f87f769b595151be1afeff53e144f543faa614 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 06 Dec 2023 09:51:13 +0000
Subject: [PATCH] add dts config
---
kernel/arch/x86/events/intel/pt.c | 675 +++++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 471 insertions(+), 204 deletions(-)
diff --git a/kernel/arch/x86/events/intel/pt.c b/kernel/arch/x86/events/intel/pt.c
index 849f0ba..d87421a 100644
--- a/kernel/arch/x86/events/intel/pt.c
+++ b/kernel/arch/x86/events/intel/pt.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel(R) Processor Trace PMU driver for perf
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*
* Intel PT is specified in the Intel Architecture Instruction Set Extensions
* Programming Reference:
@@ -21,6 +13,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/limits.h>
#include <linux/slab.h>
#include <linux/device.h>
@@ -68,6 +62,7 @@
PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
+ PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)),
PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7),
PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
@@ -75,14 +70,21 @@
PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
-static u32 pt_cap_get(enum pt_capabilities cap)
+u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
{
- struct pt_cap_desc *cd = &pt_caps[cap];
- u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
+ struct pt_cap_desc *cd = &pt_caps[capability];
+ u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
unsigned int shift = __ffs(cd->mask);
return (c & cd->mask) >> shift;
}
+EXPORT_SYMBOL_GPL(intel_pt_validate_cap);
+
+u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
+{
+ return intel_pt_validate_cap(pt_pmu.caps, cap);
+}
+EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
static ssize_t pt_cap_show(struct device *cdev,
struct device_attribute *attr,
@@ -92,10 +94,10 @@
container_of(attr, struct dev_ext_attribute, attr);
enum pt_capabilities cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
}
-static struct attribute_group pt_cap_group = {
+static struct attribute_group pt_cap_group __ro_after_init = {
.name = "caps",
};
@@ -204,9 +206,9 @@
/* model-specific quirks */
switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_BROADWELL_CORE:
- case INTEL_FAM6_BROADWELL_XEON_D:
- case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_BROADWELL:
+ case INTEL_FAM6_BROADWELL_D:
+ case INTEL_FAM6_BROADWELL_G:
case INTEL_FAM6_BROADWELL_X:
/* not setting BRANCH_EN will #GP, erratum BDM106 */
pt_pmu.branch_en_always_on = true;
@@ -225,8 +227,6 @@
if (reg & BIT(14))
pt_pmu.vmx = true;
}
-
- attrs = NULL;
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
@@ -310,16 +310,16 @@
return false;
if (config & RTIT_CTL_CYC_PSB) {
- if (!pt_cap_get(PT_CAP_psb_cyc))
+ if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
return false;
- allowed = pt_cap_get(PT_CAP_psb_periods);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
requested = (config & RTIT_CTL_PSB_FREQ) >>
RTIT_CTL_PSB_FREQ_OFFSET;
if (requested && (!(allowed & BIT(requested))))
return false;
- allowed = pt_cap_get(PT_CAP_cycle_thresholds);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
requested = (config & RTIT_CTL_CYC_THRESH) >>
RTIT_CTL_CYC_THRESH_OFFSET;
if (requested && (!(allowed & BIT(requested))))
@@ -334,10 +334,10 @@
* Spec says that setting mtc period bits while mtc bit in
* CPUID is 0 will #GP, so better safe than sorry.
*/
- if (!pt_cap_get(PT_CAP_mtc))
+ if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
return false;
- allowed = pt_cap_get(PT_CAP_mtc_periods);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
if (!allowed)
return false;
@@ -349,11 +349,11 @@
}
if (config & RTIT_CTL_PWR_EVT_EN &&
- !pt_cap_get(PT_CAP_power_event_trace))
+ !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
return false;
if (config & RTIT_CTL_PTW) {
- if (!pt_cap_get(PT_CAP_ptwrite))
+ if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
return false;
/* FUPonPTW without PTW doesn't make sense */
@@ -396,6 +396,20 @@
* PT configuration helpers
* These all are cpu affine and operate on a local PT
*/
+
+static void pt_config_start(struct perf_event *event)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ u64 ctl = event->hw.config;
+
+ ctl |= RTIT_CTL_TRACEEN;
+ if (READ_ONCE(pt->vmx_on))
+ perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
+ else
+ wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+
+ WRITE_ONCE(event->hw.config, ctl);
+}
/* Address ranges and their corresponding msr configuration registers */
static const struct pt_address_range {
@@ -460,7 +474,7 @@
pt->filters.filter[range].msr_b = filter->msr_b;
}
- rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
+ rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
}
return rtit_ctl;
@@ -469,6 +483,7 @@
static void pt_config(struct perf_event *event)
{
struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
u64 reg;
/* First round: clear STATUS, in particular the PSB byte counter. */
@@ -478,7 +493,9 @@
}
reg = pt_config_filters(event);
- reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN;
+ reg |= RTIT_CTL_TRACEEN;
+ if (!buf->single)
+ reg |= RTIT_CTL_TOPA;
/*
* Previously, we had BRANCH_EN on by default, but now that PT has
@@ -501,10 +518,7 @@
reg |= (event->attr.config & PT_CONFIG_MASK);
event->hw.config = reg;
- if (READ_ONCE(pt->vmx_on))
- perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
- else
- wrmsrl(MSR_IA32_RTIT_CTL, reg);
+ pt_config_start(event);
}
static void pt_config_stop(struct perf_event *event)
@@ -533,45 +547,89 @@
wmb();
}
-static void pt_config_buffer(void *buf, unsigned int topa_idx,
- unsigned int output_off)
-{
- u64 reg;
-
- wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
-
- reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
-
- wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
-}
+/**
+ * struct topa - ToPA metadata
+ * @list: linkage to struct pt_buffer's list of tables
+ * @offset: offset of the first entry in this table in the buffer
+ * @size: total size of all entries in this table
+ * @last: index of the last initialized entry in this table
+ * @z_count: how many times the first entry repeats
+ */
+struct topa {
+ struct list_head list;
+ u64 offset;
+ size_t size;
+ int last;
+ unsigned int z_count;
+};
/*
* Keep ToPA table-related metadata on the same page as the actual table,
* taking up a few words from the top
*/
-#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
+#define TENTS_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
/**
- * struct topa - page-sized ToPA table with metadata at the top
+ * struct topa_page - page-sized ToPA table with metadata at the top
* @table: actual ToPA table entries, as understood by PT hardware
- * @list: linkage to struct pt_buffer's list of tables
- * @phys: physical address of this page
- * @offset: offset of the first entry in this table in the buffer
- * @size: total size of all entries in this table
- * @last: index of the last initialized entry in this table
+ * @topa: metadata
*/
-struct topa {
+struct topa_page {
struct topa_entry table[TENTS_PER_PAGE];
- struct list_head list;
- u64 phys;
- u64 offset;
- size_t size;
- int last;
+ struct topa topa;
};
+static inline struct topa_page *topa_to_page(struct topa *topa)
+{
+ return container_of(topa, struct topa_page, topa);
+}
+
+static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
+{
+ return (struct topa_page *)((unsigned long)te & PAGE_MASK);
+}
+
+static inline phys_addr_t topa_pfn(struct topa *topa)
+{
+ return PFN_DOWN(virt_to_phys(topa_to_page(topa)));
+}
+
/* make -1 stand for the last table entry */
-#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
+#define TOPA_ENTRY(t, i) \
+ ((i) == -1 \
+ ? &topa_to_page(t)->table[(t)->last] \
+ : &topa_to_page(t)->table[(i)])
+#define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
+#define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
+
+static void pt_config_buffer(struct pt_buffer *buf)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ u64 reg, mask;
+ void *base;
+
+ if (buf->single) {
+ base = buf->data_pages[0];
+ mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7;
+ } else {
+ base = topa_to_page(buf->cur)->table;
+ mask = (u64)buf->cur_idx;
+ }
+
+ reg = virt_to_phys(base);
+ if (pt->output_base != reg) {
+ pt->output_base = reg;
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg);
+ }
+
+ reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32);
+ if (pt->output_mask != reg) {
+ pt->output_mask = reg;
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
+ }
+}
/**
* topa_alloc() - allocate page-sized ToPA table
@@ -583,27 +641,26 @@
static struct topa *topa_alloc(int cpu, gfp_t gfp)
{
int node = cpu_to_node(cpu);
- struct topa *topa;
+ struct topa_page *tp;
struct page *p;
p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
if (!p)
return NULL;
- topa = page_address(p);
- topa->last = 0;
- topa->phys = page_to_phys(p);
+ tp = page_address(p);
+ tp->topa.last = 0;
/*
* In case of singe-entry ToPA, always put the self-referencing END
* link as the 2nd entry in the table
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
- TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
- TOPA_ENTRY(topa, 1)->end = 1;
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
+ TOPA_ENTRY(&tp->topa, 1)->end = 1;
}
- return topa;
+ return &tp->topa;
}
/**
@@ -638,12 +695,12 @@
topa->offset = last->offset + last->size;
buf->last = topa;
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return;
BUG_ON(last->last != TENTS_PER_PAGE - 1);
- TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(last, -1)->base = topa_pfn(topa);
TOPA_ENTRY(last, -1)->end = 1;
}
@@ -654,7 +711,7 @@
static bool topa_table_full(struct topa *topa)
{
/* single-entry ToPA is a special case */
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return !!topa->last;
return topa->last == TENTS_PER_PAGE - 1;
@@ -670,7 +727,7 @@
*
* Return: 0 on success or error code.
*/
-static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
+static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
{
struct topa *topa = buf->last;
int order = 0;
@@ -681,16 +738,22 @@
order = page_private(p);
if (topa_table_full(topa)) {
- topa = topa_alloc(buf->cpu, gfp);
+ topa = topa_alloc(cpu, gfp);
if (!topa)
return -ENOMEM;
topa_insert_table(buf, topa);
}
+ if (topa->z_count == topa->last - 1) {
+ if (order == TOPA_ENTRY(topa, topa->last - 1)->size)
+ topa->z_count++;
+ }
+
TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(topa, -1)->size = order;
- if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (!buf->snapshot &&
+ !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, -1)->intr = 1;
TOPA_ENTRY(topa, -1)->stop = 1;
}
@@ -712,23 +775,26 @@
struct topa *topa;
list_for_each_entry(topa, &buf->tables, list) {
+ struct topa_page *tp = topa_to_page(topa);
int i;
- pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
- topa->phys, topa->offset, topa->size);
+ pr_debug("# table @%p, off %llx size %zx\n", tp->table,
+ topa->offset, topa->size);
for (i = 0; i < TENTS_PER_PAGE; i++) {
pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
- &topa->table[i],
- (unsigned long)topa->table[i].base << TOPA_SHIFT,
- sizes(topa->table[i].size),
- topa->table[i].end ? 'E' : ' ',
- topa->table[i].intr ? 'I' : ' ',
- topa->table[i].stop ? 'S' : ' ',
- *(u64 *)&topa->table[i]);
- if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
- topa->table[i].stop) ||
- topa->table[i].end)
+ &tp->table[i],
+ (unsigned long)tp->table[i].base << TOPA_SHIFT,
+ sizes(tp->table[i].size),
+ tp->table[i].end ? 'E' : ' ',
+ tp->table[i].intr ? 'I' : ' ',
+ tp->table[i].stop ? 'S' : ' ',
+ *(u64 *)&tp->table[i]);
+ if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
+ tp->table[i].stop) ||
+ tp->table[i].end)
break;
+ if (!i && topa->z_count)
+ i += topa->z_count;
}
}
}
@@ -765,12 +831,17 @@
struct pt_buffer *buf = perf_get_aux(&pt->handle);
u64 topa_idx, base, old;
+ if (buf->single) {
+ local_set(&buf->data_size, buf->output_off);
+ return;
+ }
+
/* offset of the first region in this table from the beginning of buf */
base = buf->cur->offset + buf->output_off;
/* offset of the current output region within this table */
for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
- base += sizes(buf->cur->table[topa_idx].size);
+ base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
if (buf->snapshot) {
local_set(&buf->data_size, base);
@@ -790,7 +861,7 @@
*/
static void *pt_buffer_region(struct pt_buffer *buf)
{
- return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
+ return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
}
/**
@@ -799,7 +870,7 @@
*/
static size_t pt_buffer_region_size(struct pt_buffer *buf)
{
- return sizes(buf->cur->table[buf->cur_idx].size);
+ return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
}
/**
@@ -828,8 +899,9 @@
* means we are already losing data; need to let the decoder
* know.
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
- buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
+ if (!buf->single &&
+ (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
+ buf->output_off == pt_buffer_region_size(buf))) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
advance++;
@@ -840,7 +912,8 @@
* Also on single-entry ToPA implementations, interrupt will come
* before the output reaches its output region's boundary.
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
+ !buf->snapshot &&
pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
void *head = pt_buffer_region(buf);
@@ -865,41 +938,114 @@
*/
static void pt_read_offset(struct pt_buffer *buf)
{
- u64 offset, base_topa;
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct topa_page *tp;
- rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
- buf->cur = phys_to_virt(base_topa);
+ if (!buf->single) {
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
+ tp = phys_to_virt(pt->output_base);
+ buf->cur = &tp->topa;
+ }
- rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
/* offset within current output region */
- buf->output_off = offset >> 32;
+ buf->output_off = pt->output_mask >> 32;
/* index of current output region within this table */
- buf->cur_idx = (offset & 0xffffff80) >> 7;
+ if (!buf->single)
+ buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7;
}
-/**
- * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
- * @buf: PT buffer.
- * @pg: Page offset in the buffer.
- *
- * When advancing to the next output region (ToPA entry), given a page offset
- * into the buffer, we need to find the offset of the first page in the next
- * region.
- */
-static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
+static struct topa_entry *
+pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
{
- struct topa_entry *te = buf->topa_index[pg];
+ struct topa_page *tp;
+ struct topa *topa;
+ unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0;
- /* one region */
- if (buf->first == buf->last && buf->first->last == 1)
- return pg;
+ /*
+ * Indicates a bug in the caller.
+ */
+ if (WARN_ON_ONCE(pg >= buf->nr_pages))
+ return NULL;
- do {
- pg++;
- pg &= buf->nr_pages - 1;
- } while (buf->topa_index[pg] == te);
+ /*
+ * First, find the ToPA table where @pg fits. With high
+ * order allocations, there shouldn't be many of these.
+ */
+ list_for_each_entry(topa, &buf->tables, list) {
+ if (topa->offset + topa->size > pg << PAGE_SHIFT)
+ goto found;
+ }
- return pg;
+ /*
+ * Hitting this means we have a problem in the ToPA
+ * allocation code.
+ */
+ WARN_ON_ONCE(1);
+
+ return NULL;
+
+found:
+ /*
+ * Indicates a problem in the ToPA allocation code.
+ */
+ if (WARN_ON_ONCE(topa->last == -1))
+ return NULL;
+
+ tp = topa_to_page(topa);
+ cur_pg = PFN_DOWN(topa->offset);
+ if (topa->z_count) {
+ z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1);
+ start_idx = topa->z_count + 1;
+ }
+
+ /*
+ * Multiple entries at the beginning of the table have the same size,
+ * ideally all of them; if @pg falls there, the search is done.
+ */
+ if (pg >= cur_pg && pg < cur_pg + z_pg) {
+ idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0);
+ return &tp->table[idx];
+ }
+
+ /*
+ * Otherwise, slow path: iterate through the remaining entries.
+ */
+ for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) {
+ if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg)
+ return &tp->table[idx];
+
+ cur_pg += TOPA_ENTRY_PAGES(topa, idx);
+ }
+
+ /*
+ * Means we couldn't find a ToPA entry in the table that does match.
+ */
+ WARN_ON_ONCE(1);
+
+ return NULL;
+}
+
+static struct topa_entry *
+pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te)
+{
+ unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
+ struct topa_page *tp;
+ struct topa *topa;
+
+ tp = (struct topa_page *)table;
+ if (tp->table != te)
+ return --te;
+
+ topa = &tp->topa;
+ if (topa == buf->first)
+ topa = buf->last;
+ else
+ topa = list_prev_entry(topa, list);
+
+ tp = topa_to_page(topa);
+
+ return &tp->table[topa->last - 1];
}
/**
@@ -922,22 +1068,28 @@
unsigned long head = local64_read(&buf->head);
unsigned long idx, npages, wakeup;
+ if (buf->single)
+ return 0;
+
/* can't stop in the middle of an output region */
- if (buf->output_off + handle->size + 1 <
- sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
+ if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
return -EINVAL;
}
/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return 0;
/* clear STOP and INT from current entry */
- buf->topa_index[buf->stop_pos]->stop = 0;
- buf->topa_index[buf->stop_pos]->intr = 0;
- buf->topa_index[buf->intr_pos]->intr = 0;
+ if (buf->stop_te) {
+ buf->stop_te->stop = 0;
+ buf->stop_te->intr = 0;
+ }
+
+ if (buf->intr_te)
+ buf->intr_te->intr = 0;
/* how many pages till the STOP marker */
npages = handle->size >> PAGE_SHIFT;
@@ -948,7 +1100,12 @@
idx = (head >> PAGE_SHIFT) + npages;
idx &= buf->nr_pages - 1;
- buf->stop_pos = idx;
+
+ if (idx != buf->stop_pos) {
+ buf->stop_pos = idx;
+ buf->stop_te = pt_topa_entry_for_page(buf, idx);
+ buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te);
+ }
wakeup = handle->wakeup >> PAGE_SHIFT;
@@ -958,48 +1115,17 @@
idx = wakeup;
idx &= buf->nr_pages - 1;
- buf->intr_pos = idx;
-
- buf->topa_index[buf->stop_pos]->stop = 1;
- buf->topa_index[buf->stop_pos]->intr = 1;
- buf->topa_index[buf->intr_pos]->intr = 1;
-
- return 0;
-}
-
-/**
- * pt_buffer_setup_topa_index() - build topa_index[] table of regions
- * @buf: PT buffer.
- *
- * topa_index[] references output regions indexed by offset into the
- * buffer for purposes of quick reverse lookup.
- */
-static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
-{
- struct topa *cur = buf->first, *prev = buf->last;
- struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
- *te_prev = TOPA_ENTRY(prev, prev->last - 1);
- int pg = 0, idx = 0;
-
- while (pg < buf->nr_pages) {
- int tidx;
-
- /* pages within one topa entry */
- for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
- buf->topa_index[pg] = te_prev;
-
- te_prev = te_cur;
-
- if (idx == cur->last - 1) {
- /* advance to next topa table */
- idx = 0;
- cur = list_entry(cur->list.next, struct topa, list);
- } else {
- idx++;
- }
- te_cur = TOPA_ENTRY(cur, idx);
+ if (idx != buf->intr_pos) {
+ buf->intr_pos = idx;
+ buf->intr_te = pt_topa_entry_for_page(buf, idx);
+ buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te);
}
+ buf->stop_te->stop = 1;
+ buf->stop_te->intr = 1;
+ buf->intr_te->intr = 1;
+
+ return 0;
}
/**
@@ -1019,18 +1145,24 @@
*/
static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
{
+ struct topa_page *cur_tp;
+ struct topa_entry *te;
int pg;
if (buf->snapshot)
head &= (buf->nr_pages << PAGE_SHIFT) - 1;
- pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
- pg = pt_topa_next_entry(buf, pg);
+ if (!buf->single) {
+ pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
+ te = pt_topa_entry_for_page(buf, pg);
- buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
- buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
- (unsigned long)buf->cur) / sizeof(struct topa_entry);
- buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
+ cur_tp = topa_entry_to_page(te);
+ buf->cur = &cur_tp->topa;
+ buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
+ buf->output_off = head & (pt_buffer_region_size(buf) - 1);
+ } else {
+ buf->output_off = head;
+ }
local64_set(&buf->head, head);
local_set(&buf->data_size, 0);
@@ -1043,6 +1175,9 @@
static void pt_buffer_fini_topa(struct pt_buffer *buf)
{
struct topa *topa, *iter;
+
+ if (buf->single)
+ return;
list_for_each_entry_safe(topa, iter, &buf->tables, list) {
/*
@@ -1059,36 +1194,73 @@
* @size: Total size of all regions within this ToPA.
* @gfp: Allocation flags.
*/
-static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
- gfp_t gfp)
+static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
+ unsigned long nr_pages, gfp_t gfp)
{
struct topa *topa;
int err;
- topa = topa_alloc(buf->cpu, gfp);
+ topa = topa_alloc(cpu, gfp);
if (!topa)
return -ENOMEM;
topa_insert_table(buf, topa);
while (buf->nr_pages < nr_pages) {
- err = topa_insert_pages(buf, gfp);
+ err = topa_insert_pages(buf, cpu, gfp);
if (err) {
pt_buffer_fini_topa(buf);
return -ENOMEM;
}
}
- pt_buffer_setup_topa_index(buf);
-
/* link last table to the first one, unless we're double buffering */
- if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
- TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
+ if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
+ TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first);
TOPA_ENTRY(buf->last, -1)->end = 1;
}
pt_topa_dump(buf);
return 0;
+}
+
+static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
+{
+ struct page *p = virt_to_page(buf->data_pages[0]);
+ int ret = -ENOTSUPP, order = 0;
+
+ /*
+ * We can use single range output mode
+ * + in snapshot mode, where we don't need interrupts;
+ * + if the hardware supports it;
+ * + if the entire buffer is one contiguous allocation.
+ */
+ if (!buf->snapshot)
+ goto out;
+
+ if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output))
+ goto out;
+
+ if (PagePrivate(p))
+ order = page_private(p);
+
+ if (1 << order != nr_pages)
+ goto out;
+
+ /*
+ * Some processors cannot always support single range for more than
+ * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
+ * also be affected, so for now rather than trying to keep track of
+ * which ones, just disable it for all.
+ */
+ if (nr_pages > 1)
+ goto out;
+
+ buf->single = true;
+ buf->nr_pages = nr_pages;
+ ret = 0;
+out:
+ return ret;
}
/**
@@ -1113,22 +1285,33 @@
if (!nr_pages)
return NULL;
+ /*
+ * Only support AUX sampling in snapshot mode, where we don't
+ * generate NMIs.
+ */
+ if (event->attr.aux_sample_size && !snapshot)
+ return NULL;
+
if (cpu == -1)
cpu = raw_smp_processor_id();
node = cpu_to_node(cpu);
- buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
- GFP_KERNEL, node);
+ buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node);
if (!buf)
return NULL;
- buf->cpu = cpu;
buf->snapshot = snapshot;
buf->data_pages = pages;
+ buf->stop_pos = -1;
+ buf->intr_pos = -1;
INIT_LIST_HEAD(&buf->tables);
- ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
+ ret = pt_buffer_try_single(buf, nr_pages);
+ if (!ret)
+ return buf;
+
+ ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
if (ret) {
kfree(buf);
return NULL;
@@ -1154,7 +1337,7 @@
struct pt_filters *filters;
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
- if (!pt_cap_get(PT_CAP_num_address_ranges))
+ if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return 0;
filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
@@ -1176,10 +1359,36 @@
event->hw.addr_filters = NULL;
}
-static inline bool valid_kernel_ip(unsigned long ip)
+#ifdef CONFIG_X86_64
+static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
{
- return virt_addr_valid(ip) && kernel_ip(ip);
+ return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}
+
+static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
+{
+ return canonical_address(vaddr, vaddr_bits) == vaddr;
+}
+
+/* Clamp to a canonical address greater-than-or-equal-to the address given */
+static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
+{
+ return is_canonical_address(vaddr, vaddr_bits) ?
+ vaddr :
+ -BIT_ULL(vaddr_bits - 1);
+}
+
+/* Clamp to a canonical address less-than-or-equal-to the address given */
+static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
+{
+ return is_canonical_address(vaddr, vaddr_bits) ?
+ vaddr :
+ BIT_ULL(vaddr_bits - 1) - 1;
+}
+#else
+#define clamp_to_ge_canonical_addr(x, y) (x)
+#define clamp_to_le_canonical_addr(x, y) (x)
+#endif
static int pt_event_addr_filters_validate(struct list_head *filters)
{
@@ -1195,15 +1404,7 @@
filter->action == PERF_ADDR_FILTER_ACTION_START)
return -EOPNOTSUPP;
- if (!filter->path.dentry) {
- if (!valid_kernel_ip(filter->offset))
- return -EINVAL;
-
- if (!valid_kernel_ip(filter->offset + filter->size))
- return -EINVAL;
- }
-
- if (++range > pt_cap_get(PT_CAP_num_address_ranges))
+ if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return -EOPNOTSUPP;
}
@@ -1226,9 +1427,26 @@
if (filter->path.dentry && !fr[range].start) {
msr_a = msr_b = 0;
} else {
- /* apply the offset */
- msr_a = fr[range].start;
- msr_b = msr_a + fr[range].size - 1;
+ unsigned long n = fr[range].size - 1;
+ unsigned long a = fr[range].start;
+ unsigned long b;
+
+ if (a > ULONG_MAX - n)
+ b = ULONG_MAX;
+ else
+ b = a + n;
+ /*
+ * Apply the offset. 64-bit addresses written to the
+ * MSRs must be canonical, but the range can encompass
+ * non-canonical addresses. Since software cannot
+ * execute at non-canonical addresses, adjusting to
+ * canonical addresses does not affect the result of the
+ * address filter.
+ */
+ msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
+ msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
+ if (msr_b < msr_a)
+ msr_a = msr_b = 0;
}
filters->filter[range].msr_a = msr_a;
@@ -1294,9 +1512,8 @@
return;
}
- pt_config_buffer(buf->cur->table, buf->cur_idx,
- buf->output_off);
- pt_config(event);
+ pt_config_buffer(buf);
+ pt_config_start(event);
}
}
@@ -1359,8 +1576,7 @@
WRITE_ONCE(pt->handle_nmi, 1);
hwc->state = 0;
- pt_config_buffer(buf->cur->table, buf->cur_idx,
- buf->output_off);
+ pt_config_buffer(buf);
pt_config(event);
return;
@@ -1409,6 +1625,52 @@
buf->nr_pages << PAGE_SHIFT);
perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
}
+}
+
+static long pt_event_snapshot_aux(struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ unsigned long from = 0, to;
+ long ret;
+
+ if (WARN_ON_ONCE(!buf))
+ return 0;
+
+ /*
+ * Sampling is only allowed on snapshot events;
+ * see pt_buffer_setup_aux().
+ */
+ if (WARN_ON_ONCE(!buf->snapshot))
+ return 0;
+
+ /*
+ * Here, handle_nmi tells us if the tracing is on
+ */
+ if (READ_ONCE(pt->handle_nmi))
+ pt_config_stop(event);
+
+ pt_read_offset(buf);
+ pt_update_head(pt);
+
+ to = local_read(&buf->data_size);
+ if (to < size)
+ from = buf->nr_pages << PAGE_SHIFT;
+ from += to - size;
+
+ ret = perf_output_copy_aux(&pt->handle, handle, from, to);
+
+ /*
+ * If the tracing was on when we turned up, restart it.
+ * Compiler barrier not needed as we couldn't have been
+ * preempted by anything that touches pt->handle_nmi.
+ */
+ if (pt->handle_nmi)
+ pt_config_start(event);
+
+ return ret;
}
static void pt_event_del(struct perf_event *event, int mode)
@@ -1479,6 +1741,11 @@
pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
}
+int is_intel_pt_event(struct perf_event *event)
+{
+ return event->pmu == &pt_pmu.pmu;
+}
+
static __init int pt_init(void)
{
int ret, cpu, prior_warn = 0;
@@ -1509,14 +1776,13 @@
if (ret)
return ret;
- if (!pt_cap_get(PT_CAP_topa_output)) {
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
pr_warn("ToPA output is not supported on this CPU\n");
return -ENODEV;
}
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
- pt_pmu.pmu.capabilities =
- PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
+ pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
pt_pmu.pmu.attr_groups = pt_attr_groups;
@@ -1526,13 +1792,14 @@
pt_pmu.pmu.del = pt_event_del;
pt_pmu.pmu.start = pt_event_start;
pt_pmu.pmu.stop = pt_event_stop;
+ pt_pmu.pmu.snapshot_aux = pt_event_snapshot_aux;
pt_pmu.pmu.read = pt_event_read;
pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
pt_pmu.pmu.free_aux = pt_buffer_free_aux;
pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
pt_pmu.pmu.nr_addr_filters =
- pt_cap_get(PT_CAP_num_address_ranges);
+ intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
--
Gitblit v1.6.2