From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 08 Dec 2023 10:40:48 +0000 Subject: [PATCH] 移去rt --- kernel/tools/perf/util/header.c | 1846 ++++++++++++++++++++++++++++++++-------------------------- 1 files changed, 1,013 insertions(+), 833 deletions(-) diff --git a/kernel/tools/perf/util/header.c b/kernel/tools/perf/util/header.c index 3c0d74f..be850e9 100644 --- a/kernel/tools/perf/util/header.c +++ b/kernel/tools/perf/util/header.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <errno.h> #include <inttypes.h> -#include "util.h" #include "string2.h" #include <sys/param.h> #include <sys/types.h> @@ -13,17 +12,22 @@ #include <linux/list.h> #include <linux/kernel.h> #include <linux/bitops.h> +#include <linux/string.h> #include <linux/stringify.h> +#include <linux/zalloc.h> #include <sys/stat.h> #include <sys/utsname.h> #include <linux/time64.h> #include <dirent.h> +#include <bpf/libbpf.h> +#include <perf/cpumap.h> +#include "dso.h" #include "evlist.h" #include "evsel.h" +#include "util/evsel_fprintf.h" #include "header.h" #include "memswap.h" -#include "../perf.h" #include "trace-event.h" #include "session.h" #include "symbol.h" @@ -39,8 +43,13 @@ #include "tool.h" #include "time-utils.h" #include "units.h" +#include "util/util.h" // perf_exe() +#include "cputopo.h" +#include "bpf-event.h" +#include "clockid.h" -#include "sane_ctype.h" +#include <linux/ctype.h> +#include <internal/lib.h> /* * magic2 = "PERFILE2" @@ -62,15 +71,6 @@ struct perf_file_attr { struct perf_event_attr attr; struct perf_file_section ids; -}; - -struct feat_fd { - struct perf_header *ph; - int fd; - void *buf; /* Either buf != NULL or fd >= 0 */ - ssize_t offset; - size_t size; - struct perf_evsel *events; }; void perf_header__set_feat(struct perf_header *header, int feat) @@ -295,16 +295,16 @@ } static int write_tracing_data(struct feat_fd *ff, - struct perf_evlist *evlist) + struct evlist *evlist) { if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) return -1; - return read_tracing_data(ff->fd, &evlist->entries); + return read_tracing_data(ff->fd, &evlist->core.entries); } static int write_build_id(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { struct perf_session *session; int err; @@ -328,7 +328,7 @@ } static int write_hostname(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -341,7 +341,7 @@ } static int write_osrelease(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -354,7 +354,7 @@ } static int write_arch(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -367,7 +367,7 @@ } static int write_version(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { return do_write_string(ff, perf_version_string); } @@ -413,10 +413,8 @@ while (*p) { if (isspace(*p)) { char *r = p + 1; - char *q = r; + char *q = skip_spaces(r); *p = ' '; - while (*q && isspace(*q)) - q++; if (q != (p+1)) while ((*r++ = *q++)); } @@ -430,9 +428,27 @@ } static int write_cpudesc(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { +#if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__) +#define CPUINFO_PROC { "cpu", } +#elif defined(__s390__) +#define CPUINFO_PROC { "vendor_id", } +#elif defined(__sh__) +#define CPUINFO_PROC { "cpu type", } +#elif defined(__alpha__) || defined(__mips__) +#define CPUINFO_PROC { "cpu model", } +#elif defined(__arm__) +#define CPUINFO_PROC { "model name", "Processor", } +#elif defined(__arc__) +#define CPUINFO_PROC { "Processor", } +#elif defined(__xtensa__) +#define CPUINFO_PROC { "core ID", } +#else +#define CPUINFO_PROC { "model name", } +#endif const char *cpuinfo_procs[] = CPUINFO_PROC; +#undef CPUINFO_PROC unsigned int i; for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { @@ -446,7 +462,7 @@ static int write_nrcpus(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { long nr; u32 nrc, nra; @@ -468,13 +484,13 @@ } static int write_event_desc(struct feat_fd *ff, - struct perf_evlist *evlist) + struct evlist *evlist) { - struct perf_evsel *evsel; + struct evsel *evsel; u32 nre, nri, sz; int ret; - nre = evlist->nr_entries; + nre = evlist->core.nr_entries; /* * write number of events @@ -486,13 +502,13 @@ /* * size of perf_event_attr struct */ - sz = (u32)sizeof(evsel->attr); + sz = (u32)sizeof(evsel->core.attr); ret = do_write(ff, &sz, sizeof(sz)); if (ret < 0) return ret; evlist__for_each_entry(evlist, evsel) { - ret = do_write(ff, &evsel->attr, sz); + ret = do_write(ff, &evsel->core.attr, sz); if (ret < 0) return ret; /* @@ -502,7 +518,7 @@ * copy into an nri to be independent of the * type of ids, */ - nri = evsel->ids; + nri = evsel->core.ids; ret = do_write(ff, &nri, sizeof(nri)); if (ret < 0) return ret; @@ -510,13 +526,13 @@ /* * write event string as passed on cmdline */ - ret = do_write_string(ff, perf_evsel__name(evsel)); + ret = do_write_string(ff, evsel__name(evsel)); if (ret < 0) return ret; /* * write unique ids for this event */ - ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); + ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64)); if (ret < 0) return ret; } @@ -524,19 +540,13 @@ } static int write_cmdline(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { - char buf[MAXPATHLEN]; - u32 n; - int i, ret; + char pbuf[MAXPATHLEN], *buf; + int i, ret, n; /* actual path to perf binary */ - ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1); - if (ret <= 0) - return -1; - - /* readlink() does not add null termination */ - buf[ret] = '\0'; + buf = perf_exe(pbuf, MAXPATHLEN); /* account for binary path */ n = perf_env.nr_cmdline + 1; @@ -557,160 +567,15 @@ return 0; } -#define CORE_SIB_FMT \ - "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" -#define THRD_SIB_FMT \ - "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" - -struct cpu_topo { - u32 cpu_nr; - u32 core_sib; - u32 thread_sib; - char **core_siblings; - char **thread_siblings; -}; - -static int build_cpu_topo(struct cpu_topo *tp, int cpu) -{ - FILE *fp; - char filename[MAXPATHLEN]; - char *buf = NULL, *p; - size_t len = 0; - ssize_t sret; - u32 i = 0; - int ret = -1; - - sprintf(filename, CORE_SIB_FMT, cpu); - fp = fopen(filename, "r"); - if (!fp) - goto try_threads; - - sret = getline(&buf, &len, fp); - fclose(fp); - if (sret <= 0) - goto try_threads; - - p = strchr(buf, '\n'); - if (p) - *p = '\0'; - - for (i = 0; i < tp->core_sib; i++) { - if (!strcmp(buf, tp->core_siblings[i])) - break; - } - if (i == tp->core_sib) { - tp->core_siblings[i] = buf; - tp->core_sib++; - buf = NULL; - len = 0; - } - ret = 0; - -try_threads: - sprintf(filename, THRD_SIB_FMT, cpu); - fp = fopen(filename, "r"); - if (!fp) - goto done; - - if (getline(&buf, &len, fp) <= 0) - goto done; - - p = strchr(buf, '\n'); - if (p) - *p = '\0'; - - for (i = 0; i < tp->thread_sib; i++) { - if (!strcmp(buf, tp->thread_siblings[i])) - break; - } - if (i == tp->thread_sib) { - tp->thread_siblings[i] = buf; - tp->thread_sib++; - buf = NULL; - } - ret = 0; -done: - if(fp) - fclose(fp); - free(buf); - return ret; -} - -static void free_cpu_topo(struct cpu_topo *tp) -{ - u32 i; - - if (!tp) - return; - - for (i = 0 ; i < tp->core_sib; i++) - zfree(&tp->core_siblings[i]); - - for (i = 0 ; i < tp->thread_sib; i++) - zfree(&tp->thread_siblings[i]); - - free(tp); -} - -static struct cpu_topo *build_cpu_topology(void) -{ - struct cpu_topo *tp = NULL; - void *addr; - u32 nr, i; - size_t sz; - long ncpus; - int ret = -1; - struct cpu_map *map; - - ncpus = cpu__max_present_cpu(); - - /* build online CPU map */ - map = cpu_map__new(NULL); - if (map == NULL) { - pr_debug("failed to get system cpumap\n"); - return NULL; - } - - nr = (u32)(ncpus & UINT_MAX); - - sz = nr * sizeof(char *); - addr = calloc(1, sizeof(*tp) + 2 * sz); - if (!addr) - goto out_free; - - tp = addr; - tp->cpu_nr = nr; - addr += sizeof(*tp); - tp->core_siblings = addr; - addr += sz; - tp->thread_siblings = addr; - - for (i = 0; i < nr; i++) { - if (!cpu_map__has(map, i)) - continue; - - ret = build_cpu_topo(tp, i); - if (ret < 0) - break; - } - -out_free: - cpu_map__put(map); - if (ret) { - free_cpu_topo(tp); - tp = NULL; - } - return tp; -} static int write_cpu_topology(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { - struct cpu_topo *tp; + struct cpu_topology *tp; u32 i; int ret, j; - tp = build_cpu_topology(); + tp = cpu_topology__new(); if (!tp) return -1; @@ -747,15 +612,36 @@ if (ret < 0) return ret; } + + if (!tp->die_sib) + goto done; + + ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib)); + if (ret < 0) + goto done; + + for (i = 0; i < tp->die_sib; i++) { + ret = do_write_string(ff, tp->die_siblings[i]); + if (ret < 0) + goto done; + } + + for (j = 0; j < perf_env.nr_cpus_avail; j++) { + ret = do_write(ff, &perf_env.cpu[j].die_id, + sizeof(perf_env.cpu[j].die_id)); + if (ret < 0) + return ret; + } + done: - free_cpu_topo(tp); + cpu_topology__delete(tp); return ret; } static int write_total_mem(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { char *buf = NULL; FILE *fp; @@ -783,112 +669,45 @@ return ret; } -static int write_topo_node(struct feat_fd *ff, int node) -{ - char str[MAXPATHLEN]; - char field[32]; - char *buf = NULL, *p; - size_t len = 0; - FILE *fp; - u64 mem_total, mem_free, mem; - int ret = -1; - - sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); - fp = fopen(str, "r"); - if (!fp) - return -1; - - while (getline(&buf, &len, fp) > 0) { - /* skip over invalid lines */ - if (!strchr(buf, ':')) - continue; - if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) - goto done; - if (!strcmp(field, "MemTotal:")) - mem_total = mem; - if (!strcmp(field, "MemFree:")) - mem_free = mem; - } - - fclose(fp); - fp = NULL; - - ret = do_write(ff, &mem_total, sizeof(u64)); - if (ret) - goto done; - - ret = do_write(ff, &mem_free, sizeof(u64)); - if (ret) - goto done; - - ret = -1; - sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); - - fp = fopen(str, "r"); - if (!fp) - goto done; - - if (getline(&buf, &len, fp) <= 0) - goto done; - - p = strchr(buf, '\n'); - if (p) - *p = '\0'; - - ret = do_write_string(ff, buf); -done: - free(buf); - if (fp) - fclose(fp); - return ret; -} - static int write_numa_topology(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { - char *buf = NULL; - size_t len = 0; - FILE *fp; - struct cpu_map *node_map = NULL; - char *c; - u32 nr, i, j; + struct numa_topology *tp; int ret = -1; + u32 i; - fp = fopen("/sys/devices/system/node/online", "r"); - if (!fp) - return -1; + tp = numa_topology__new(); + if (!tp) + return -ENOMEM; - if (getline(&buf, &len, fp) <= 0) - goto done; - - c = strchr(buf, '\n'); - if (c) - *c = '\0'; - - node_map = cpu_map__new(buf); - if (!node_map) - goto done; - - nr = (u32)node_map->nr; - - ret = do_write(ff, &nr, sizeof(nr)); + ret = do_write(ff, &tp->nr, sizeof(u32)); if (ret < 0) - goto done; + goto err; - for (i = 0; i < nr; i++) { - j = (u32)node_map->map[i]; - ret = do_write(ff, &j, sizeof(j)); - if (ret < 0) - break; + for (i = 0; i < tp->nr; i++) { + struct numa_topology_node *n = &tp->nodes[i]; - ret = write_topo_node(ff, i); + ret = do_write(ff, &n->node, sizeof(u32)); if (ret < 0) - break; + goto err; + + ret = do_write(ff, &n->mem_total, sizeof(u64)); + if (ret) + goto err; + + ret = do_write(ff, &n->mem_free, sizeof(u64)); + if (ret) + goto err; + + ret = do_write_string(ff, n->cpus); + if (ret < 0) + goto err; } -done: - free(buf); - fclose(fp); - cpu_map__put(node_map); + + ret = 0; + +err: + numa_topology__delete(tp); return ret; } @@ -905,7 +724,7 @@ */ static int write_pmu_mappings(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { struct perf_pmu *pmu = NULL; u32 pmu_num = 0; @@ -954,10 +773,10 @@ * }; */ static int write_group_desc(struct feat_fd *ff, - struct perf_evlist *evlist) + struct evlist *evlist) { u32 nr_groups = evlist->nr_groups; - struct perf_evsel *evsel; + struct evsel *evsel; int ret; ret = do_write(ff, &nr_groups, sizeof(nr_groups)); @@ -965,11 +784,10 @@ return ret; evlist__for_each_entry(evlist, evsel) { - if (perf_evsel__is_group_leader(evsel) && - evsel->nr_members > 1) { + if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) { const char *name = evsel->group_name ?: "{anon_group}"; u32 leader_idx = evsel->idx; - u32 nr_members = evsel->nr_members; + u32 nr_members = evsel->core.nr_members; ret = do_write_string(ff, name); if (ret < 0) @@ -988,37 +806,74 @@ } /* + * Return the CPU id as a raw string. + * + * Each architecture should provide a more precise id string that + * can be use to match the architecture's "mapfile". + */ +char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) +{ + return NULL; +} + +/* Return zero when the cpuid from the mapfile.csv matches the + * cpuid string generated on this platform. + * Otherwise return non-zero. + */ +int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) +{ + regex_t re; + regmatch_t pmatch[1]; + int match; + + if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { + /* Warn unable to generate match particular string. */ + pr_info("Invalid regular expression %s\n", mapcpuid); + return 1; + } + + match = !regexec(&re, cpuid, 1, pmatch, 0); + regfree(&re); + if (match) { + size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); + + /* Verify the entire string matched. */ + if (match_len == strlen(cpuid)) + return 0; + } + return 1; +} + +/* * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(SRCARCH)/util/header.c */ int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) { - return -1; + return ENOSYS; /* Not implemented */ } static int write_cpuid(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { char buffer[64]; int ret; ret = get_cpuid(buffer, sizeof(buffer)); - if (!ret) - goto write_it; + if (ret) + return -1; - return -1; -write_it: return do_write_string(ff, buffer); } static int write_branch_stack(struct feat_fd *ff __maybe_unused, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { return 0; } static int write_auxtrace(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { struct perf_session *session; int err; @@ -1032,6 +887,145 @@ if (err < 0) pr_err("Failed to write auxtrace index\n"); return err; +} + +static int write_clockid(struct feat_fd *ff, + struct evlist *evlist __maybe_unused) +{ + return do_write(ff, &ff->ph->env.clock.clockid_res_ns, + sizeof(ff->ph->env.clock.clockid_res_ns)); +} + +static int write_clock_data(struct feat_fd *ff, + struct evlist *evlist __maybe_unused) +{ + u64 *data64; + u32 data32; + int ret; + + /* version */ + data32 = 1; + + ret = do_write(ff, &data32, sizeof(data32)); + if (ret < 0) + return ret; + + /* clockid */ + data32 = ff->ph->env.clock.clockid; + + ret = do_write(ff, &data32, sizeof(data32)); + if (ret < 0) + return ret; + + /* TOD ref time */ + data64 = &ff->ph->env.clock.tod_ns; + + ret = do_write(ff, data64, sizeof(*data64)); + if (ret < 0) + return ret; + + /* clockid ref time */ + data64 = &ff->ph->env.clock.clockid_ns; + + return do_write(ff, data64, sizeof(*data64)); +} + +static int write_dir_format(struct feat_fd *ff, + struct evlist *evlist __maybe_unused) +{ + struct perf_session *session; + struct perf_data *data; + + session = container_of(ff->ph, struct perf_session, header); + data = session->data; + + if (WARN_ON(!perf_data__is_dir(data))) + return -1; + + return do_write(ff, &data->dir.version, sizeof(data->dir.version)); +} + +#ifdef HAVE_LIBBPF_SUPPORT +static int write_bpf_prog_info(struct feat_fd *ff, + struct evlist *evlist __maybe_unused) +{ + struct perf_env *env = &ff->ph->env; + struct rb_root *root; + struct rb_node *next; + int ret; + + down_read(&env->bpf_progs.lock); + + ret = do_write(ff, &env->bpf_progs.infos_cnt, + sizeof(env->bpf_progs.infos_cnt)); + if (ret < 0) + goto out; + + root = &env->bpf_progs.infos; + next = rb_first(root); + while (next) { + struct bpf_prog_info_node *node; + size_t len; + + node = rb_entry(next, struct bpf_prog_info_node, rb_node); + next = rb_next(&node->rb_node); + len = sizeof(struct bpf_prog_info_linear) + + node->info_linear->data_len; + + /* before writing to file, translate address to offset */ + bpf_program__bpil_addr_to_offs(node->info_linear); + ret = do_write(ff, node->info_linear, len); + /* + * translate back to address even when do_write() fails, + * so that this function never changes the data. + */ + bpf_program__bpil_offs_to_addr(node->info_linear); + if (ret < 0) + goto out; + } +out: + up_read(&env->bpf_progs.lock); + return ret; +} +#else // HAVE_LIBBPF_SUPPORT +static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused, + struct evlist *evlist __maybe_unused) +{ + return 0; +} +#endif // HAVE_LIBBPF_SUPPORT + +static int write_bpf_btf(struct feat_fd *ff, + struct evlist *evlist __maybe_unused) +{ + struct perf_env *env = &ff->ph->env; + struct rb_root *root; + struct rb_node *next; + int ret; + + down_read(&env->bpf_progs.lock); + + ret = do_write(ff, &env->bpf_progs.btfs_cnt, + sizeof(env->bpf_progs.btfs_cnt)); + + if (ret < 0) + goto out; + + root = &env->bpf_progs.btfs; + next = rb_first(root); + while (next) { + struct btf_node *node; + + node = rb_entry(next, struct btf_node, rb_node); + next = rb_next(&node->rb_node); + ret = do_write(ff, &node->id, + sizeof(u32) * 2 + node->data_size); + if (ret < 0) + goto out; + } +out: + up_read(&env->bpf_progs.lock); + return ret; } static int cpu_cache_level__sort(const void *a, const void *b) @@ -1101,26 +1095,26 @@ return -1; cache->type[len] = 0; - cache->type = rtrim(cache->type); + cache->type = strim(cache->type); scnprintf(file, PATH_MAX, "%s/size", path); if (sysfs__read_str(file, &cache->size, &len)) { - free(cache->type); + zfree(&cache->type); return -1; } cache->size[len] = 0; - cache->size = rtrim(cache->size); + cache->size = strim(cache->size); scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); if (sysfs__read_str(file, &cache->map, &len)) { - free(cache->size); - free(cache->type); + zfree(&cache->size); + zfree(&cache->type); return -1; } cache->map[len] = 0; - cache->map = rtrim(cache->map); + cache->map = strim(cache->map); return 0; } @@ -1129,21 +1123,18 @@ fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); } -static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) +#define MAX_CACHE_LVL 4 + +static int build_caches(struct cpu_cache_level caches[], u32 *cntp) { u32 i, cnt = 0; - long ncpus; u32 nr, cpu; u16 level; - ncpus = sysconf(_SC_NPROCESSORS_CONF); - if (ncpus < 0) - return -1; - - nr = (u32)(ncpus & UINT_MAX); + nr = cpu__max_cpu(); for (cpu = 0; cpu < nr; cpu++) { - for (level = 0; level < 10; level++) { + for (level = 0; level < MAX_CACHE_LVL; level++) { struct cpu_cache_level c; int err; @@ -1163,26 +1154,21 @@ caches[cnt++] = c; else cpu_cache_level__free(&c); - - if (WARN_ONCE(cnt == size, "way too many cpu caches..")) - goto out; } } - out: *cntp = cnt; return 0; } -#define MAX_CACHES (MAX_NR_CPUS * 4) - static int write_cache(struct feat_fd *ff, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { - struct cpu_cache_level caches[MAX_CACHES]; + u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL; + struct cpu_cache_level caches[max_caches]; u32 cnt = 0, i, version = 1; int ret; - ret = build_caches(caches, MAX_CACHES, &cnt); + ret = build_caches(caches, &cnt); if (ret) goto out; @@ -1228,13 +1214,13 @@ } static int write_stat(struct feat_fd *ff __maybe_unused, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { return 0; } static int write_sample_time(struct feat_fd *ff, - struct perf_evlist *evlist) + struct evlist *evlist) { int ret; @@ -1335,8 +1321,10 @@ continue; if (WARN_ONCE(cnt >= size, - "failed to write MEM_TOPOLOGY, way too many nodes\n")) + "failed to write MEM_TOPOLOGY, way too many nodes\n")) { + closedir(dir); return -1; + } ret = memory_node__read(&nodes[cnt++], idx); } @@ -1368,7 +1356,7 @@ * 48 - bitmap | bitmap of memory indexes that belongs to node */ static int write_mem_topology(struct feat_fd *ff __maybe_unused, - struct perf_evlist *evlist __maybe_unused) + struct evlist *evlist __maybe_unused) { static struct memory_node nodes[MAX_MEMORY_NODES]; u64 bsize, version = 1, i, nr; @@ -1414,6 +1402,62 @@ } out: + return ret; +} + +static int write_compressed(struct feat_fd *ff __maybe_unused, + struct evlist *evlist __maybe_unused) +{ + int ret; + + ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); + if (ret) + return ret; + + ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); + if (ret) + return ret; + + ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); + if (ret) + return ret; + + ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); + if (ret) + return ret; + + return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); +} + +static int write_cpu_pmu_caps(struct feat_fd *ff, + struct evlist *evlist __maybe_unused) +{ + struct perf_pmu *cpu_pmu = perf_pmu__find("cpu"); + struct perf_pmu_caps *caps = NULL; + int nr_caps; + int ret; + + if (!cpu_pmu) + return -ENOENT; + + nr_caps = perf_pmu__caps_parse(cpu_pmu); + if (nr_caps < 0) + return nr_caps; + + ret = do_write(ff, &nr_caps, sizeof(nr_caps)); + if (ret < 0) + return ret; + + list_for_each_entry(caps, &cpu_pmu->caps, list) { + ret = do_write_string(ff, caps->name); + if (ret < 0) + return ret; + + ret = do_write_string(ff, caps->value); + if (ret < 0) + return ret; + } + return ret; } @@ -1488,8 +1532,18 @@ str = ph->env.sibling_cores; for (i = 0; i < nr; i++) { - fprintf(fp, "# sibling cores : %s\n", str); + fprintf(fp, "# sibling sockets : %s\n", str); str += strlen(str) + 1; + } + + if (ph->env.nr_sibling_dies) { + nr = ph->env.nr_sibling_dies; + str = ph->env.sibling_dies; + + for (i = 0; i < nr; i++) { + fprintf(fp, "# sibling dies : %s\n", str); + str += strlen(str) + 1; + } } nr = ph->env.nr_sibling_threads; @@ -1500,32 +1554,188 @@ str += strlen(str) + 1; } - if (ph->env.cpu != NULL) { - for (i = 0; i < cpu_nr; i++) - fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, - ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); - } else - fprintf(fp, "# Core ID and Socket ID information is not available\n"); + if (ph->env.nr_sibling_dies) { + if (ph->env.cpu != NULL) { + for (i = 0; i < cpu_nr; i++) + fprintf(fp, "# CPU %d: Core ID %d, " + "Die ID %d, Socket ID %d\n", + i, ph->env.cpu[i].core_id, + ph->env.cpu[i].die_id, + ph->env.cpu[i].socket_id); + } else + fprintf(fp, "# Core ID, Die ID and Socket ID " + "information is not available\n"); + } else { + if (ph->env.cpu != NULL) { + for (i = 0; i < cpu_nr; i++) + fprintf(fp, "# CPU %d: Core ID %d, " + "Socket ID %d\n", + i, ph->env.cpu[i].core_id, + ph->env.cpu[i].socket_id); + } else + fprintf(fp, "# Core ID and Socket ID " + "information is not available\n"); + } } -static void free_event_desc(struct perf_evsel *events) +static void print_clockid(struct feat_fd *ff, FILE *fp) { - struct perf_evsel *evsel; + fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n", + ff->ph->env.clock.clockid_res_ns * 1000); +} + +static void print_clock_data(struct feat_fd *ff, FILE *fp) +{ + struct timespec clockid_ns; + char tstr[64], date[64]; + struct timeval tod_ns; + clockid_t clockid; + struct tm ltime; + u64 ref; + + if (!ff->ph->env.clock.enabled) { + fprintf(fp, "# reference time disabled\n"); + return; + } + + /* Compute TOD time. */ + ref = ff->ph->env.clock.tod_ns; + tod_ns.tv_sec = ref / NSEC_PER_SEC; + ref -= tod_ns.tv_sec * NSEC_PER_SEC; + tod_ns.tv_usec = ref / NSEC_PER_USEC; + + /* Compute clockid time. */ + ref = ff->ph->env.clock.clockid_ns; + clockid_ns.tv_sec = ref / NSEC_PER_SEC; + ref -= clockid_ns.tv_sec * NSEC_PER_SEC; + clockid_ns.tv_nsec = ref; + + clockid = ff->ph->env.clock.clockid; + + if (localtime_r(&tod_ns.tv_sec, <ime) == NULL) + snprintf(tstr, sizeof(tstr), "<error>"); + else { + strftime(date, sizeof(date), "%F %T", <ime); + scnprintf(tstr, sizeof(tstr), "%s.%06d", + date, (int) tod_ns.tv_usec); + } + + fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid); + fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n", + tstr, tod_ns.tv_sec, (int) tod_ns.tv_usec, + clockid_ns.tv_sec, clockid_ns.tv_nsec, + clockid_name(clockid)); +} + +static void print_dir_format(struct feat_fd *ff, FILE *fp) +{ + struct perf_session *session; + struct perf_data *data; + + session = container_of(ff->ph, struct perf_session, header); + data = session->data; + + fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version); +} + +static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) +{ + struct perf_env *env = &ff->ph->env; + struct rb_root *root; + struct rb_node *next; + + down_read(&env->bpf_progs.lock); + + root = &env->bpf_progs.infos; + next = rb_first(root); + + while (next) { + struct bpf_prog_info_node *node; + + node = rb_entry(next, struct bpf_prog_info_node, rb_node); + next = rb_next(&node->rb_node); + + bpf_event__print_bpf_prog_info(&node->info_linear->info, + env, fp); + } + + up_read(&env->bpf_progs.lock); +} + +static void print_bpf_btf(struct feat_fd *ff, FILE *fp) +{ + struct perf_env *env = &ff->ph->env; + struct rb_root *root; + struct rb_node *next; + + down_read(&env->bpf_progs.lock); + + root = &env->bpf_progs.btfs; + next = rb_first(root); + + while (next) { + struct btf_node *node; + + node = rb_entry(next, struct btf_node, rb_node); + next = rb_next(&node->rb_node); + fprintf(fp, "# btf info of id %u\n", node->id); + } + + up_read(&env->bpf_progs.lock); +} + +static void free_event_desc(struct evsel *events) +{ + struct evsel *evsel; if (!events) return; - for (evsel = events; evsel->attr.size; evsel++) { + for (evsel = events; evsel->core.attr.size; evsel++) { zfree(&evsel->name); - zfree(&evsel->id); + zfree(&evsel->core.id); } free(events); } -static struct perf_evsel *read_event_desc(struct feat_fd *ff) +static bool perf_attr_check(struct perf_event_attr *attr) { - struct perf_evsel *evsel, *events = NULL; + if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) { + pr_warning("Reserved bits are set unexpectedly. " + "Please update perf tool.\n"); + return false; + } + + if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) { + pr_warning("Unknown sample type (0x%llx) is detected. " + "Please update perf tool.\n", + attr->sample_type); + return false; + } + + if (attr->read_format & ~(PERF_FORMAT_MAX-1)) { + pr_warning("Unknown read format (0x%llx) is detected. " + "Please update perf tool.\n", + attr->read_format); + return false; + } + + if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) && + (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) { + pr_warning("Unknown branch sample type (0x%llx) is detected. " + "Please update perf tool.\n", + attr->branch_sample_type); + + return false; + } + + return true; +} + +static struct evsel *read_event_desc(struct feat_fd *ff) +{ + struct evsel *evsel, *events = NULL; u64 *id; void *buf = NULL; u32 nre, sz, nr, i, j; @@ -1543,12 +1753,12 @@ if (!buf) goto error; - /* the last event terminates with evsel->attr.size == 0: */ + /* the last event terminates with evsel->core.attr.size == 0: */ events = calloc(nre + 1, sizeof(*events)); if (!events) goto error; - msz = sizeof(evsel->attr); + msz = sizeof(evsel->core.attr); if (sz < msz) msz = sz; @@ -1565,7 +1775,10 @@ if (ff->ph->needs_swap) perf_event__attr_swap(buf); - memcpy(&evsel->attr, buf, msz); + memcpy(&evsel->core.attr, buf, msz); + + if (!perf_attr_check(&evsel->core.attr)) + goto error; if (do_read_u32(ff, &nr)) goto error; @@ -1583,8 +1796,8 @@ id = calloc(nr, sizeof(*id)); if (!id) goto error; - evsel->ids = nr; - evsel->id = id; + evsel->core.ids = nr; + evsel->core.id = id; for (j = 0 ; j < nr; j++) { if (do_read_u64(ff, id)) @@ -1609,7 +1822,7 @@ static void print_event_desc(struct feat_fd *ff, FILE *fp) { - struct perf_evsel *evsel, *events; + struct evsel *evsel, *events; u32 j; u64 *id; @@ -1623,12 +1836,12 @@ return; } - for (evsel = events; evsel->attr.size; evsel++) { + for (evsel = events; evsel->core.attr.size; evsel++) { fprintf(fp, "# event : name = %s, ", evsel->name); - if (evsel->ids) { + if (evsel->core.ids) { fprintf(fp, ", id = {"); - for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { + for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) { if (j) fputc(',', fp); fprintf(fp, " %"PRIu64, *id); @@ -1636,7 +1849,7 @@ fprintf(fp, " }"); } - perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); + perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL); fputc('\n', fp); } @@ -1698,6 +1911,34 @@ } } +static void print_compressed(struct feat_fd *ff, FILE *fp) +{ + fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n", + ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", + ff->ph->env.comp_level, ff->ph->env.comp_ratio); +} + +static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp) +{ + const char *delimiter = "# cpu pmu capabilities: "; + u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps; + char *str; + + if (!nr_caps) { + fprintf(fp, "# cpu pmu capabilities: not available\n"); + return; + } + + str = ff->ph->env.cpu_pmu_caps; + while (nr_caps--) { + fprintf(fp, "%s%s", delimiter, str); + delimiter = ", "; + str += strlen(str) + 1; + } + + fprintf(fp, "\n"); +} + static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) { const char *delimiter = "# pmu mappings: "; @@ -1737,20 +1978,18 @@ static void print_group_desc(struct feat_fd *ff, FILE *fp) { struct perf_session *session; - struct perf_evsel *evsel; + struct evsel *evsel; u32 nr = 0; session = container_of(ff->ph, struct perf_session, header); evlist__for_each_entry(session->evlist, evsel) { - if (perf_evsel__is_group_leader(evsel) && - evsel->nr_members > 1) { - fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", - perf_evsel__name(evsel)); + if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) { + fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel)); - nr = evsel->nr_members - 1; + nr = evsel->core.nr_members - 1; } else if (nr) { - fprintf(fp, ",%s", perf_evsel__name(evsel)); + fprintf(fp, ",%s", evsel__name(evsel)); if (--nr == 0) fprintf(fp, "}\n"); @@ -1809,7 +2048,7 @@ } } -static int __event_process_build_id(struct build_id_event *bev, +static int __event_process_build_id(struct perf_record_header_build_id *bev, char *filename, struct perf_session *session) { @@ -1817,7 +2056,7 @@ struct machine *machine; u16 cpumode; struct dso *dso; - enum dso_kernel_type dso_type; + enum dso_space_type dso_space; machine = perf_session__findnew_machine(session, bev->pid); if (!machine) @@ -1827,14 +2066,14 @@ switch (cpumode) { case PERF_RECORD_MISC_KERNEL: - dso_type = DSO_TYPE_KERNEL; + dso_space = DSO_SPACE__KERNEL; break; case PERF_RECORD_MISC_GUEST_KERNEL: - dso_type = DSO_TYPE_GUEST_KERNEL; + dso_space = DSO_SPACE__KERNEL_GUEST; break; case PERF_RECORD_MISC_USER: case PERF_RECORD_MISC_GUEST_USER: - dso_type = DSO_TYPE_USER; + dso_space = DSO_SPACE__USER; break; default: goto out; @@ -1843,24 +2082,28 @@ dso = machine__findnew_dso(machine, filename); if (dso != NULL) { char sbuild_id[SBUILD_ID_SIZE]; + struct build_id bid; + size_t size = BUILD_ID_SIZE; - dso__set_build_id(dso, &bev->build_id); + if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE) + size = bev->size; - if (dso_type != DSO_TYPE_USER) { + build_id__init(&bid, bev->data, size); + dso__set_build_id(dso, &bid); + + if (dso_space != DSO_SPACE__USER) { struct kmod_path m = { .name = NULL, }; if (!kmod_path__parse_name(&m, filename) && m.kmod) dso__set_module_info(dso, &m, machine); - else - dso->kernel = dso_type; + dso->kernel = dso_space; free(m.name); } - build_id__sprintf(dso->build_id, sizeof(dso->build_id), - sbuild_id); - pr_debug("build id event received for %s: %s\n", - dso->long_name, sbuild_id); + build_id__sprintf(&dso->bid, sbuild_id); + pr_debug("build id event received for %s: %s [%zu]\n", + dso->long_name, sbuild_id, size); dso__put(dso); } @@ -1878,7 +2121,7 @@ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; char filename[0]; } old_bev; - struct build_id_event bev; + struct perf_record_header_build_id bev; char filename[PATH_MAX]; u64 limit = offset + size; @@ -1919,7 +2162,7 @@ int input, u64 offset, u64 size) { struct perf_session *session = container_of(header, struct perf_session, header); - struct build_id_event bev; + struct perf_record_header_build_id bev; char filename[PATH_MAX]; u64 limit = offset + size, orig_offset = offset; int err = -1; @@ -1941,7 +2184,7 @@ * * "perf: 'perf kvm' tool for monitoring guest performance from host" * - * Added a field to struct build_id_event that broke the file + * Added a field to struct perf_record_header_build_id that broke the file * format. * * Since the kernel build-id is the first entry, process the @@ -2022,10 +2265,10 @@ return 0; } -static struct perf_evsel * -perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) +static struct evsel * +perf_evlist__find_by_index(struct evlist *evlist, int idx) { - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { if (evsel->idx == idx) @@ -2036,10 +2279,10 @@ } static void -perf_evlist__set_event_name(struct perf_evlist *evlist, - struct perf_evsel *event) +perf_evlist__set_event_name(struct evlist *evlist, + struct evsel *event) { - struct perf_evsel *evsel; + struct evsel *evsel; if (!event->name) return; @@ -2058,7 +2301,7 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused) { struct perf_session *session; - struct perf_evsel *evsel, *events = read_event_desc(ff); + struct evsel *evsel, *events = read_event_desc(ff); if (!events) return 0; @@ -2071,7 +2314,7 @@ ff->events = events; } - for (evsel = events; evsel->attr.size; evsel++) + for (evsel = events; evsel->core.attr.size; evsel++) perf_evlist__set_event_name(session->evlist, evsel); if (!session->data->is_pipe) @@ -2195,6 +2438,7 @@ goto free_cpu; ph->env.cpu[i].core_id = nr; + size += sizeof(u32); if (do_read_u32(ff, &nr)) goto free_cpu; @@ -2206,6 +2450,40 @@ } ph->env.cpu[i].socket_id = nr; + size += sizeof(u32); + } + + /* + * The header may be from old perf, + * which doesn't include die information. + */ + if (ff->size <= size) + return 0; + + if (do_read_u32(ff, &nr)) + return -1; + + ph->env.nr_sibling_dies = nr; + size += sizeof(u32); + + for (i = 0; i < nr; i++) { + str = do_read_string(ff); + if (!str) + goto error; + + /* include a NULL character at the end */ + if (strbuf_add(&sb, str, strlen(str) + 1) < 0) + goto error; + size += string_size(str); + free(str); + } + ph->env.sibling_dies = strbuf_detach(&sb, NULL); + + for (i = 0; i < (u32)cpu_nr; i++) { + if (do_read_u32(ff, &nr)) + goto free_cpu; + + ph->env.cpu[i].die_id = nr; } return 0; @@ -2248,7 +2526,7 @@ if (!str) goto error; - n->map = cpu_map__new(str); + n->map = perf_cpu_map__new(str); if (!n->map) goto error; @@ -2315,7 +2593,7 @@ size_t ret = -1; u32 i, nr, nr_groups; struct perf_session *session; - struct perf_evsel *evsel, *leader = NULL; + struct evsel *evsel, *leader = NULL; struct group_desc { char *name; u32 leader_idx; @@ -2362,7 +2640,7 @@ evsel->group_name = desc[i].name; desc[i].name = NULL; } - evsel->nr_members = desc[i].nr_members; + evsel->core.nr_members = desc[i].nr_members; if (i >= nr_groups || nr > 0) { pr_debug("invalid group desc\n"); @@ -2370,7 +2648,7 @@ } leader = evsel; - nr = evsel->nr_members - 1; + nr = evsel->core.nr_members - 1; i++; } else if (nr) { /* This is a group member */ @@ -2533,14 +2811,260 @@ return ret; } -struct feature_ops { - int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); - void (*print)(struct feat_fd *ff, FILE *fp); - int (*process)(struct feat_fd *ff, void *data); - const char *name; - bool full_only; - bool synthesize; -}; +static int process_clockid(struct feat_fd *ff, + void *data __maybe_unused) +{ + if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns)) + return -1; + + return 0; +} + +static int process_clock_data(struct feat_fd *ff, + void *_data __maybe_unused) +{ + u32 data32; + u64 data64; + + /* version */ + if (do_read_u32(ff, &data32)) + return -1; + + if (data32 != 1) + return -1; + + /* clockid */ + if (do_read_u32(ff, &data32)) + return -1; + + ff->ph->env.clock.clockid = data32; + + /* TOD ref time */ + if (do_read_u64(ff, &data64)) + return -1; + + ff->ph->env.clock.tod_ns = data64; + + /* clockid ref time */ + if (do_read_u64(ff, &data64)) + return -1; + + ff->ph->env.clock.clockid_ns = data64; + ff->ph->env.clock.enabled = true; + return 0; +} + +static int process_dir_format(struct feat_fd *ff, + void *_data __maybe_unused) +{ + struct perf_session *session; + struct perf_data *data; + + session = container_of(ff->ph, struct perf_session, header); + data = session->data; + + if (WARN_ON(!perf_data__is_dir(data))) + return -1; + + return do_read_u64(ff, &data->dir.version); +} + +#ifdef HAVE_LIBBPF_SUPPORT +static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) +{ + struct bpf_prog_info_linear *info_linear; + struct bpf_prog_info_node *info_node; + struct perf_env *env = &ff->ph->env; + u32 count, i; + int err = -1; + + if (ff->ph->needs_swap) { + pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n"); + return 0; + } + + if (do_read_u32(ff, &count)) + return -1; + + down_write(&env->bpf_progs.lock); + + for (i = 0; i < count; ++i) { + u32 info_len, data_len; + + info_linear = NULL; + info_node = NULL; + if (do_read_u32(ff, &info_len)) + goto out; + if (do_read_u32(ff, &data_len)) + goto out; + + if (info_len > sizeof(struct bpf_prog_info)) { + pr_warning("detected invalid bpf_prog_info\n"); + goto out; + } + + info_linear = malloc(sizeof(struct bpf_prog_info_linear) + + data_len); + if (!info_linear) + goto out; + info_linear->info_len = sizeof(struct bpf_prog_info); + info_linear->data_len = data_len; + if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) + goto out; + if (__do_read(ff, &info_linear->info, info_len)) + goto out; + if (info_len < sizeof(struct bpf_prog_info)) + memset(((void *)(&info_linear->info)) + info_len, 0, + sizeof(struct bpf_prog_info) - info_len); + + if (__do_read(ff, info_linear->data, data_len)) + goto out; + + info_node = malloc(sizeof(struct bpf_prog_info_node)); + if (!info_node) + goto out; + + /* after reading from file, translate offset to address */ + bpf_program__bpil_offs_to_addr(info_linear); + info_node->info_linear = info_linear; + perf_env__insert_bpf_prog_info(env, info_node); + } + + up_write(&env->bpf_progs.lock); + return 0; +out: + free(info_linear); + free(info_node); + up_write(&env->bpf_progs.lock); + return err; +} +#else // HAVE_LIBBPF_SUPPORT +static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused) +{ + return 0; +} +#endif // HAVE_LIBBPF_SUPPORT + +static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) +{ + struct perf_env *env = &ff->ph->env; + struct btf_node *node = NULL; + u32 count, i; + int err = -1; + + if (ff->ph->needs_swap) { + pr_warning("interpreting btf from systems with endianity is not yet supported\n"); + return 0; + } + + if (do_read_u32(ff, &count)) + return -1; + + down_write(&env->bpf_progs.lock); + + for (i = 0; i < count; ++i) { + u32 id, data_size; + + if (do_read_u32(ff, &id)) + goto out; + if (do_read_u32(ff, &data_size)) + goto out; + + node = malloc(sizeof(struct btf_node) + data_size); + if (!node) + goto out; + + node->id = id; + node->data_size = data_size; + + if (__do_read(ff, node->data, data_size)) + goto out; + + perf_env__insert_btf(env, node); + node = NULL; + } + + err = 0; +out: + up_write(&env->bpf_progs.lock); + free(node); + return err; +} + +static int process_compressed(struct feat_fd *ff, + void *data __maybe_unused) +{ + if (do_read_u32(ff, &(ff->ph->env.comp_ver))) + return -1; + + if (do_read_u32(ff, &(ff->ph->env.comp_type))) + return -1; + + if (do_read_u32(ff, &(ff->ph->env.comp_level))) + return -1; + + if (do_read_u32(ff, &(ff->ph->env.comp_ratio))) + return -1; + + if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len))) + return -1; + + return 0; +} + +static int process_cpu_pmu_caps(struct feat_fd *ff, + void *data __maybe_unused) +{ + char *name, *value; + struct strbuf sb; + u32 nr_caps; + + if (do_read_u32(ff, &nr_caps)) + return -1; + + if (!nr_caps) { + pr_debug("cpu pmu capabilities not available\n"); + return 0; + } + + ff->ph->env.nr_cpu_pmu_caps = nr_caps; + + if (strbuf_init(&sb, 128) < 0) + return -1; + + while (nr_caps--) { + name = do_read_string(ff); + if (!name) + goto error; + + value = do_read_string(ff); + if (!value) + goto free_name; + + if (strbuf_addf(&sb, "%s=%s", name, value) < 0) + goto free_value; + + /* include a NULL character at the end */ + if (strbuf_add(&sb, "", 1) < 0) + goto free_value; + + if (!strcmp(name, "branches")) + ff->ph->env.max_branches = atoi(value); + + free(value); + free(name); + } + ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL); + return 0; + +free_value: + free(value); +free_name: + free(name); +error: + strbuf_release(&sb); + return -1; +} #define FEAT_OPR(n, func, __full_only) \ [HEADER_##n] = { \ @@ -2568,8 +3092,10 @@ #define process_branch_stack NULL #define process_stat NULL +// Only used in util/synthetic-events.c +const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; -static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { +const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPN(TRACING_DATA, tracing_data, false), FEAT_OPN(BUILD_ID, build_id, false), FEAT_OPR(HOSTNAME, hostname, false), @@ -2592,6 +3118,13 @@ FEAT_OPN(CACHE, cache, true), FEAT_OPR(SAMPLE_TIME, sample_time, false), FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), + FEAT_OPR(CLOCKID, clockid, false), + FEAT_OPN(DIR_FORMAT, dir_format, false), + FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), + FEAT_OPR(BPF_BTF, bpf_btf, false), + FEAT_OPR(COMPRESSED, compressed, false), + FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false), + FEAT_OPR(CLOCK_DATA, clock_data, false), }; struct header_print_data { @@ -2648,7 +3181,7 @@ if (ret == -1) return -1; - stctime = st.st_ctime; + stctime = st.st_mtime; fprintf(fp, "# captured on : %s", ctime(&stctime)); fprintf(fp, "# header version : %u\n", header->version); @@ -2674,7 +3207,7 @@ static int do_write_feat(struct feat_fd *ff, int type, struct perf_file_section **p, - struct perf_evlist *evlist) + struct evlist *evlist) { int err; int ret = 0; @@ -2704,7 +3237,7 @@ } static int perf_header__adds_write(struct perf_header *header, - struct perf_evlist *evlist, int fd) + struct evlist *evlist, int fd) { int nr_sections; struct feat_fd ff; @@ -2740,7 +3273,7 @@ lseek(fd, sec_start, SEEK_SET); /* * may write more than needed due to dropped feature, but - * this is okay, reader will skip the mising entries + * this is okay, reader will skip the missing entries */ err = do_write(&ff, feat_sec, sec_size); if (err < 0) @@ -2772,13 +3305,13 @@ } int perf_session__write_header(struct perf_session *session, - struct perf_evlist *evlist, + struct evlist *evlist, int fd, bool at_exit) { struct perf_file_header f_header; struct perf_file_attr f_attr; struct perf_header *header = &session->header; - struct perf_evsel *evsel; + struct evsel *evsel; struct feat_fd ff; u64 attr_offset; int err; @@ -2788,7 +3321,7 @@ evlist__for_each_entry(session->evlist, evsel) { evsel->id_offset = lseek(fd, 0, SEEK_CUR); - err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); + err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64)); if (err < 0) { pr_debug("failed to write perf header\n"); return err; @@ -2799,10 +3332,10 @@ evlist__for_each_entry(evlist, evsel) { f_attr = (struct perf_file_attr){ - .attr = evsel->attr, + .attr = evsel->core.attr, .ids = { .offset = evsel->id_offset, - .size = evsel->ids * sizeof(u64), + .size = evsel->core.ids * sizeof(u64), } }; err = do_write(&ff, &f_attr, sizeof(f_attr)); @@ -2828,7 +3361,7 @@ .attr_size = sizeof(f_attr), .attrs = { .offset = attr_offset, - .size = evlist->nr_entries * sizeof(f_attr), + .size = evlist->core.nr_entries * sizeof(f_attr), }, .data = { .offset = header->data_offset, @@ -3158,7 +3691,7 @@ return -EINVAL; } - return 0; + return f_header.size == sizeof(f_header) ? 0 : -1; } static int read_attr(int fd, struct perf_header *ph, @@ -3207,10 +3740,10 @@ return ret <= 0 ? -1 : 0; } -static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, +static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent) { - struct event_format *event; + struct tep_event *event; char bf[128]; /* already prepared */ @@ -3222,9 +3755,9 @@ return -1; } - event = tep_find_event(pevent, evsel->attr.config); + event = tep_find_event(pevent, evsel->core.attr.config); if (event == NULL) { - pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); + pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config); return -1; } @@ -3239,13 +3772,13 @@ return 0; } -static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, +static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent) { - struct perf_evsel *pos; + struct evsel *pos; evlist__for_each_entry(evlist, pos) { - if (pos->attr.type == PERF_TYPE_TRACEPOINT && + if (pos->core.attr.type == PERF_TYPE_TRACEPOINT && perf_evsel__prepare_tracepoint_event(pos, pevent)) return -1; } @@ -3260,17 +3793,25 @@ struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; - int nr_attrs, nr_ids, i, j; + int nr_attrs, nr_ids, i, j, err; int fd = perf_data__fd(data); - session->evlist = perf_evlist__new(); + session->evlist = evlist__new(); if (session->evlist == NULL) return -ENOMEM; session->evlist->env = &header->env; session->machines.host.env = &header->env; - if (perf_data__is_pipe(data)) - return perf_header__read_pipe(session); + + /* + * We can read 'pipe' data event from regular file, + * check for the pipe header regardless of source. + */ + err = perf_header__read_pipe(session); + if (!err || (err && perf_data__is_pipe(data))) { + data->is_pipe = true; + return err; + } if (perf_file_header__read(&f_header, header, fd) < 0) return -EINVAL; @@ -3298,7 +3839,7 @@ lseek(fd, f_header.attrs.offset, SEEK_SET); for (i = 0; i < nr_attrs; i++) { - struct perf_evsel *evsel; + struct evsel *evsel; off_t tmp; if (read_attr(fd, header, &f_attr) < 0) @@ -3311,7 +3852,7 @@ } tmp = lseek(fd, 0, SEEK_CUR); - evsel = perf_evsel__new(&f_attr.attr); + evsel = evsel__new(&f_attr.attr); if (evsel == NULL) goto out_delete_evlist; @@ -3319,9 +3860,9 @@ evsel->needs_swap = header->needs_swap; /* * Do it before so that if perf_evsel__alloc_id fails, this - * entry gets purged too at perf_evlist__delete(). + * entry gets purged too at evlist__delete(). */ - perf_evlist__add(session->evlist, evsel); + evlist__add(session->evlist, evsel); nr_ids = f_attr.ids.size / sizeof(u64); /* @@ -3329,7 +3870,7 @@ * for allocating the perf_sample_id table we fake 1 cpu and * hattr->ids threads. */ - if (perf_evsel__alloc_id(evsel, 1, nr_ids)) + if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids)) goto out_delete_evlist; lseek(fd, f_attr.ids.offset, SEEK_SET); @@ -3338,7 +3879,7 @@ if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) goto out_errno; - perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); + perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id); } lseek(fd, tmp, SEEK_SET); @@ -3356,115 +3897,17 @@ return -errno; out_delete_evlist: - perf_evlist__delete(session->evlist); + evlist__delete(session->evlist); session->evlist = NULL; return -ENOMEM; } -int perf_event__synthesize_attr(struct perf_tool *tool, - struct perf_event_attr *attr, u32 ids, u64 *id, - perf_event__handler_t process) +int perf_event__process_feature(struct perf_session *session, + union perf_event *event) { - union perf_event *ev; - size_t size; - int err; - - size = sizeof(struct perf_event_attr); - size = PERF_ALIGN(size, sizeof(u64)); - size += sizeof(struct perf_event_header); - size += ids * sizeof(u64); - - ev = zalloc(size); - - if (ev == NULL) - return -ENOMEM; - - ev->attr.attr = *attr; - memcpy(ev->attr.id, id, ids * sizeof(u64)); - - ev->attr.header.type = PERF_RECORD_HEADER_ATTR; - ev->attr.header.size = (u16)size; - - if (ev->attr.header.size == size) - err = process(tool, ev, NULL, NULL); - else - err = -E2BIG; - - free(ev); - - return err; -} - -int perf_event__synthesize_features(struct perf_tool *tool, - struct perf_session *session, - struct perf_evlist *evlist, - perf_event__handler_t process) -{ - struct perf_header *header = &session->header; - struct feat_fd ff; - struct feature_event *fe; - size_t sz, sz_hdr; - int feat, ret; - - sz_hdr = sizeof(fe->header); - sz = sizeof(union perf_event); - /* get a nice alignment */ - sz = PERF_ALIGN(sz, page_size); - - memset(&ff, 0, sizeof(ff)); - - ff.buf = malloc(sz); - if (!ff.buf) - return -ENOMEM; - - ff.size = sz - sz_hdr; - - for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { - if (!feat_ops[feat].synthesize) { - pr_debug("No record header feature for header :%d\n", feat); - continue; - } - - ff.offset = sizeof(*fe); - - ret = feat_ops[feat].write(&ff, evlist); - if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { - pr_debug("Error writing feature\n"); - continue; - } - /* ff.buf may have changed due to realloc in do_write() */ - fe = ff.buf; - memset(fe, 0, sizeof(*fe)); - - fe->feat_id = feat; - fe->header.type = PERF_RECORD_HEADER_FEATURE; - fe->header.size = ff.offset; - - ret = process(tool, ff.buf, NULL, NULL); - if (ret) { - free(ff.buf); - return ret; - } - } - - /* Send HEADER_LAST_FEATURE mark. */ - fe = ff.buf; - fe->feat_id = HEADER_LAST_FEATURE; - fe->header.type = PERF_RECORD_HEADER_FEATURE; - fe->header.size = sizeof(*fe); - - ret = process(tool, ff.buf, NULL, NULL); - - free(ff.buf); - return ret; -} - -int perf_event__process_feature(struct perf_tool *tool, - union perf_event *event, - struct perf_session *session __maybe_unused) -{ + struct perf_tool *tool = session->tool; struct feat_fd ff = { .fd = 0 }; - struct feature_event *fe = (struct feature_event *)event; + struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; int type = fe->header.type; u64 feat = fe->feat_id; @@ -3501,126 +3944,19 @@ return 0; } -static struct event_update_event * -event_update_event__new(size_t size, u64 type, u64 id) -{ - struct event_update_event *ev; - - size += sizeof(*ev); - size = PERF_ALIGN(size, sizeof(u64)); - - ev = zalloc(size); - if (ev) { - ev->header.type = PERF_RECORD_EVENT_UPDATE; - ev->header.size = (u16)size; - ev->type = type; - ev->id = id; - } - return ev; -} - -int -perf_event__synthesize_event_update_unit(struct perf_tool *tool, - struct perf_evsel *evsel, - perf_event__handler_t process) -{ - struct event_update_event *ev; - size_t size = strlen(evsel->unit); - int err; - - ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); - if (ev == NULL) - return -ENOMEM; - - strlcpy(ev->data, evsel->unit, size + 1); - err = process(tool, (union perf_event *)ev, NULL, NULL); - free(ev); - return err; -} - -int -perf_event__synthesize_event_update_scale(struct perf_tool *tool, - struct perf_evsel *evsel, - perf_event__handler_t process) -{ - struct event_update_event *ev; - struct event_update_event_scale *ev_data; - int err; - - ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); - if (ev == NULL) - return -ENOMEM; - - ev_data = (struct event_update_event_scale *) ev->data; - ev_data->scale = evsel->scale; - err = process(tool, (union perf_event*) ev, NULL, NULL); - free(ev); - return err; -} - -int -perf_event__synthesize_event_update_name(struct perf_tool *tool, - struct perf_evsel *evsel, - perf_event__handler_t process) -{ - struct event_update_event *ev; - size_t len = strlen(evsel->name); - int err; - - ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); - if (ev == NULL) - return -ENOMEM; - - strlcpy(ev->data, evsel->name, len + 1); - err = process(tool, (union perf_event*) ev, NULL, NULL); - free(ev); - return err; -} - -int -perf_event__synthesize_event_update_cpus(struct perf_tool *tool, - struct perf_evsel *evsel, - perf_event__handler_t process) -{ - size_t size = sizeof(struct event_update_event); - struct event_update_event *ev; - int max, err; - u16 type; - - if (!evsel->own_cpus) - return 0; - - ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); - if (!ev) - return -ENOMEM; - - ev->header.type = PERF_RECORD_EVENT_UPDATE; - ev->header.size = (u16)size; - ev->type = PERF_EVENT_UPDATE__CPUS; - ev->id = evsel->id[0]; - - cpu_map_data__synthesize((struct cpu_map_data *) ev->data, - evsel->own_cpus, - type, max); - - err = process(tool, (union perf_event*) ev, NULL, NULL); - free(ev); - return err; -} - size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) { - struct event_update_event *ev = &event->event_update; - struct event_update_event_scale *ev_scale; - struct event_update_event_cpus *ev_cpus; - struct cpu_map *map; + struct perf_record_event_update *ev = &event->event_update; + struct perf_record_event_update_scale *ev_scale; + struct perf_record_event_update_cpus *ev_cpus; + struct perf_cpu_map *map; size_t ret; - ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); + ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id); switch (ev->type) { case PERF_EVENT_UPDATE__SCALE: - ev_scale = (struct event_update_event_scale *) ev->data; + ev_scale = (struct perf_record_event_update_scale *)ev->data; ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); break; case PERF_EVENT_UPDATE__UNIT: @@ -3630,7 +3966,7 @@ ret += fprintf(fp, "... name: %s\n", ev->data); break; case PERF_EVENT_UPDATE__CPUS: - ev_cpus = (struct event_update_event_cpus *) ev->data; + ev_cpus = (struct perf_record_event_update_cpus *)ev->data; ret += fprintf(fp, "... "); map = cpu_map__new_data(&ev_cpus->cpus); @@ -3647,112 +3983,25 @@ return ret; } -int perf_event__synthesize_attrs(struct perf_tool *tool, - struct perf_session *session, - perf_event__handler_t process) -{ - struct perf_evsel *evsel; - int err = 0; - - evlist__for_each_entry(session->evlist, evsel) { - err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, - evsel->id, process); - if (err) { - pr_debug("failed to create perf header attribute\n"); - return err; - } - } - - return err; -} - -static bool has_unit(struct perf_evsel *counter) -{ - return counter->unit && *counter->unit; -} - -static bool has_scale(struct perf_evsel *counter) -{ - return counter->scale != 1; -} - -int perf_event__synthesize_extra_attr(struct perf_tool *tool, - struct perf_evlist *evsel_list, - perf_event__handler_t process, - bool is_pipe) -{ - struct perf_evsel *counter; - int err; - - /* - * Synthesize other events stuff not carried within - * attr event - unit, scale, name - */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->supported) - continue; - - /* - * Synthesize unit and scale only if it's defined. - */ - if (has_unit(counter)) { - err = perf_event__synthesize_event_update_unit(tool, counter, process); - if (err < 0) { - pr_err("Couldn't synthesize evsel unit.\n"); - return err; - } - } - - if (has_scale(counter)) { - err = perf_event__synthesize_event_update_scale(tool, counter, process); - if (err < 0) { - pr_err("Couldn't synthesize evsel counter.\n"); - return err; - } - } - - if (counter->own_cpus) { - err = perf_event__synthesize_event_update_cpus(tool, counter, process); - if (err < 0) { - pr_err("Couldn't synthesize evsel cpus.\n"); - return err; - } - } - - /* - * Name is needed only for pipe output, - * perf.data carries event names. - */ - if (is_pipe) { - err = perf_event__synthesize_event_update_name(tool, counter, process); - if (err < 0) { - pr_err("Couldn't synthesize evsel name.\n"); - return err; - } - } - } - return 0; -} - int perf_event__process_attr(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_evlist **pevlist) + struct evlist **pevlist) { u32 i, ids, n_ids; - struct perf_evsel *evsel; - struct perf_evlist *evlist = *pevlist; + struct evsel *evsel; + struct evlist *evlist = *pevlist; if (evlist == NULL) { - *pevlist = evlist = perf_evlist__new(); + *pevlist = evlist = evlist__new(); if (evlist == NULL) return -ENOMEM; } - evsel = perf_evsel__new(&event->attr.attr); + evsel = evsel__new(&event->attr.attr); if (evsel == NULL) return -ENOMEM; - perf_evlist__add(evlist, evsel); + evlist__add(evlist, evsel); ids = event->header.size; ids -= (void *)&event->attr.id - (void *)event; @@ -3762,11 +4011,11 @@ * for allocating the perf_sample_id table we fake 1 cpu and * hattr->ids threads. */ - if (perf_evsel__alloc_id(evsel, 1, n_ids)) + if (perf_evsel__alloc_id(&evsel->core, 1, n_ids)) return -ENOMEM; for (i = 0; i < n_ids; i++) { - perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); + perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]); } return 0; @@ -3774,14 +4023,14 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_evlist **pevlist) + struct evlist **pevlist) { - struct event_update_event *ev = &event->event_update; - struct event_update_event_scale *ev_scale; - struct event_update_event_cpus *ev_cpus; - struct perf_evlist *evlist; - struct perf_evsel *evsel; - struct cpu_map *map; + struct perf_record_event_update *ev = &event->event_update; + struct perf_record_event_update_scale *ev_scale; + struct perf_record_event_update_cpus *ev_cpus; + struct evlist *evlist; + struct evsel *evsel; + struct perf_cpu_map *map; if (!pevlist || *pevlist == NULL) return -EINVAL; @@ -3800,15 +4049,15 @@ evsel->name = strdup(ev->data); break; case PERF_EVENT_UPDATE__SCALE: - ev_scale = (struct event_update_event_scale *) ev->data; + ev_scale = (struct perf_record_event_update_scale *)ev->data; evsel->scale = ev_scale->scale; break; case PERF_EVENT_UPDATE__CPUS: - ev_cpus = (struct event_update_event_cpus *) ev->data; + ev_cpus = (struct perf_record_event_update_cpus *)ev->data; map = cpu_map__new_data(&ev_cpus->cpus); if (map) - evsel->own_cpus = map; + evsel->core.own_cpus = map; else pr_err("failed to get event_update cpus\n"); default: @@ -3818,67 +4067,27 @@ return 0; } -int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, - struct perf_evlist *evlist, - perf_event__handler_t process) -{ - union perf_event ev; - struct tracing_data *tdata; - ssize_t size = 0, aligned_size = 0, padding; - struct feat_fd ff; - int err __maybe_unused = 0; - - /* - * We are going to store the size of the data followed - * by the data contents. Since the fd descriptor is a pipe, - * we cannot seek back to store the size of the data once - * we know it. Instead we: - * - * - write the tracing data to the temp file - * - get/write the data size to pipe - * - write the tracing data from the temp file - * to the pipe - */ - tdata = tracing_data_get(&evlist->entries, fd, true); - if (!tdata) - return -1; - - memset(&ev, 0, sizeof(ev)); - - ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; - size = tdata->size; - aligned_size = PERF_ALIGN(size, sizeof(u64)); - padding = aligned_size - size; - ev.tracing_data.header.size = sizeof(ev.tracing_data); - ev.tracing_data.size = aligned_size; - - process(tool, &ev, NULL, NULL); - - /* - * The put function will copy all the tracing data - * stored in temp file to the pipe. - */ - tracing_data_put(tdata); - - ff = (struct feat_fd){ .fd = fd }; - if (write_padded(&ff, NULL, 0, padding)) - return -1; - - return aligned_size; -} - -int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, - union perf_event *event, - struct perf_session *session) +int perf_event__process_tracing_data(struct perf_session *session, + union perf_event *event) { ssize_t size_read, padding, size = event->tracing_data.size; int fd = perf_data__fd(session->data); - off_t offset = lseek(fd, 0, SEEK_CUR); char buf[BUFSIZ]; - /* setup for reading amidst mmap */ - lseek(fd, offset + sizeof(struct tracing_data_event), - SEEK_SET); + /* + * The pipe fd is already in proper place and in any case + * we can't move it, and we'd screw the case where we read + * 'pipe' data from regular file. The trace_report reads + * data from 'fd' so we need to set it directly behind the + * event, where the tracing data starts. + */ + if (!perf_data__is_pipe(session->data)) { + off_t offset = lseek(fd, 0, SEEK_CUR); + + /* setup for reading amidst mmap */ + lseek(fd, offset + sizeof(struct perf_record_header_tracing_data), + SEEK_SET); + } size_read = trace_report(fd, &session->tevent, session->repipe); @@ -3907,37 +4116,8 @@ return size_read + padding; } -int perf_event__synthesize_build_id(struct perf_tool *tool, - struct dso *pos, u16 misc, - perf_event__handler_t process, - struct machine *machine) -{ - union perf_event ev; - size_t len; - int err = 0; - - if (!pos->hit) - return err; - - memset(&ev, 0, sizeof(ev)); - - len = pos->long_name_len + 1; - len = PERF_ALIGN(len, NAME_ALIGN); - memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); - ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; - ev.build_id.header.misc = misc; - ev.build_id.pid = machine->pid; - ev.build_id.header.size = sizeof(ev.build_id) + len; - memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); - - err = process(tool, &ev, NULL, machine); - - return err; -} - -int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, - union perf_event *event, - struct perf_session *session) +int perf_event__process_build_id(struct perf_session *session, + union perf_event *event) { __event_process_build_id(&event->build_id, event->build_id.filename, -- Gitblit v1.6.2