From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/tools/perf/builtin-record.c | 1418 +++++++++++++++++++++++++++++++++++++++++++++++++---------
1 files changed, 1,189 insertions(+), 229 deletions(-)
diff --git a/kernel/tools/perf/builtin-record.c b/kernel/tools/perf/builtin-record.c
index 22ebeb9..e5c938d 100644
--- a/kernel/tools/perf/builtin-record.c
+++ b/kernel/tools/perf/builtin-record.c
@@ -8,10 +8,7 @@
*/
#include "builtin.h"
-#include "perf.h"
-
#include "util/build-id.h"
-#include "util/util.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/config.h"
@@ -23,10 +20,12 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
+#include "util/mmap.h"
+#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
+#include "util/record.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/data.h"
@@ -35,24 +34,44 @@
#include "util/tsc.h"
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
+#include "util/perf_api_probe.h"
#include "util/llvm-utils.h"
#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
+#include "util/cpu-set-sched.h"
+#include "util/synthetic-events.h"
#include "util/time-utils.h"
#include "util/units.h"
+#include "util/bpf-event.h"
+#include "util/util.h"
+#include "util/pfm.h"
+#include "util/clockid.h"
#include "asm/bug.h"
+#include "perf.h"
#include <errno.h>
#include <inttypes.h>
#include <locale.h>
#include <poll.h>
+#include <pthread.h>
#include <unistd.h>
#include <sched.h>
#include <signal.h>
+#ifdef HAVE_EVENTFD_SUPPORT
+#include <sys/eventfd.h>
+#endif
#include <sys/mman.h>
#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <linux/err.h>
+#include <linux/string.h>
#include <linux/time64.h>
+#include <linux/zalloc.h>
+#include <linux/bitmap.h>
+#include <sys/time.h>
struct switch_output {
bool enabled;
@@ -61,6 +80,9 @@
unsigned long time;
const char *str;
bool set;
+ char **filenames;
+ int num_files;
+ int cur_file;
};
struct record {
@@ -69,9 +91,12 @@
u64 bytes_written;
struct perf_data data;
struct auxtrace_record *itr;
- struct perf_evlist *evlist;
+ struct evlist *evlist;
struct perf_session *session;
+ struct evlist *sb_evlist;
+ pthread_t thread_id;
int realtime_prio;
+ bool switch_output_event_set;
bool no_buildid;
bool no_buildid_set;
bool no_buildid_cache;
@@ -81,11 +106,19 @@
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ struct mmap_cpu_mask affinity_mask;
+ unsigned long output_max_size; /* = 0: unlimited */
};
+
+static volatile int done;
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+
+static const char *affinity_tags[PERF_AFFINITY_MAX] = {
+ "SYS", "NODE", "CPU"
+};
static bool switch_output_signal(struct record *rec)
{
@@ -106,19 +139,371 @@
trigger_is_ready(&switch_output_trigger);
}
-static int record__write(struct record *rec, void *bf, size_t size)
+static bool record__output_max_size_exceeded(struct record *rec)
{
- if (perf_data__write(rec->session->data, bf, size) < 0) {
+ return rec->output_max_size &&
+ (rec->bytes_written >= rec->output_max_size);
+}
+
+static int record__write(struct record *rec, struct mmap *map __maybe_unused,
+ void *bf, size_t size)
+{
+ struct perf_data_file *file = &rec->session->data->file;
+
+ if (perf_data_file__write(file, bf, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
rec->bytes_written += size;
+ if (record__output_max_size_exceeded(rec) && !done) {
+ fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
+ " stopping session ]\n",
+ rec->bytes_written >> 10);
+ done = 1;
+ }
+
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
return 0;
+}
+
+static int record__aio_enabled(struct record *rec);
+static int record__comp_enabled(struct record *rec);
+static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
+ void *src, size_t src_size);
+
+#ifdef HAVE_AIO_SUPPORT
+static int record__aio_write(struct aiocb *cblock, int trace_fd,
+ void *buf, size_t size, off_t off)
+{
+ int rc;
+
+ cblock->aio_fildes = trace_fd;
+ cblock->aio_buf = buf;
+ cblock->aio_nbytes = size;
+ cblock->aio_offset = off;
+ cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
+
+ do {
+ rc = aio_write(cblock);
+ if (rc == 0) {
+ break;
+ } else if (errno != EAGAIN) {
+ cblock->aio_fildes = -1;
+ pr_err("failed to queue perf data, error: %m\n");
+ break;
+ }
+ } while (1);
+
+ return rc;
+}
+
+static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
+{
+ void *rem_buf;
+ off_t rem_off;
+ size_t rem_size;
+ int rc, aio_errno;
+ ssize_t aio_ret, written;
+
+ aio_errno = aio_error(cblock);
+ if (aio_errno == EINPROGRESS)
+ return 0;
+
+ written = aio_ret = aio_return(cblock);
+ if (aio_ret < 0) {
+ if (aio_errno != EINTR)
+ pr_err("failed to write perf data, error: %m\n");
+ written = 0;
+ }
+
+ rem_size = cblock->aio_nbytes - written;
+
+ if (rem_size == 0) {
+ cblock->aio_fildes = -1;
+ /*
+ * md->refcount is incremented in record__aio_pushfn() for
+ * every aio write request started in record__aio_push() so
+ * decrement it because the request is now complete.
+ */
+ perf_mmap__put(&md->core);
+ rc = 1;
+ } else {
+ /*
+ * aio write request may require restart with the
+ * reminder if the kernel didn't write whole
+ * chunk at once.
+ */
+ rem_off = cblock->aio_offset + written;
+ rem_buf = (void *)(cblock->aio_buf + written);
+ record__aio_write(cblock, cblock->aio_fildes,
+ rem_buf, rem_size, rem_off);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int record__aio_sync(struct mmap *md, bool sync_all)
+{
+ struct aiocb **aiocb = md->aio.aiocb;
+ struct aiocb *cblocks = md->aio.cblocks;
+ struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
+ int i, do_suspend;
+
+ do {
+ do_suspend = 0;
+ for (i = 0; i < md->aio.nr_cblocks; ++i) {
+ if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
+ if (sync_all)
+ aiocb[i] = NULL;
+ else
+ return i;
+ } else {
+ /*
+ * Started aio write is not complete yet
+ * so it has to be waited before the
+ * next allocation.
+ */
+ aiocb[i] = &cblocks[i];
+ do_suspend = 1;
+ }
+ }
+ if (!do_suspend)
+ return -1;
+
+ while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
+ if (!(errno == EAGAIN || errno == EINTR))
+ pr_err("failed to sync perf data, error: %m\n");
+ }
+ } while (1);
+}
+
+struct record_aio {
+ struct record *rec;
+ void *data;
+ size_t size;
+};
+
+static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
+{
+ struct record_aio *aio = to;
+
+ /*
+ * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
+ * to release space in the kernel buffer as fast as possible, calling
+ * perf_mmap__consume() from perf_mmap__push() function.
+ *
+ * That lets the kernel to proceed with storing more profiling data into
+ * the kernel buffer earlier than other per-cpu kernel buffers are handled.
+ *
+ * Coping can be done in two steps in case the chunk of profiling data
+ * crosses the upper bound of the kernel buffer. In this case we first move
+ * part of data from map->start till the upper bound and then the reminder
+ * from the beginning of the kernel buffer till the end of the data chunk.
+ */
+
+ if (record__comp_enabled(aio->rec)) {
+ size = zstd_compress(aio->rec->session, aio->data + aio->size,
+ mmap__mmap_len(map) - aio->size,
+ buf, size);
+ } else {
+ memcpy(aio->data + aio->size, buf, size);
+ }
+
+ if (!aio->size) {
+ /*
+ * Increment map->refcount to guard map->aio.data[] buffer
+ * from premature deallocation because map object can be
+ * released earlier than aio write request started on
+ * map->aio.data[] buffer is complete.
+ *
+ * perf_mmap__put() is done at record__aio_complete()
+ * after started aio request completion or at record__aio_push()
+ * if the request failed to start.
+ */
+ perf_mmap__get(&map->core);
+ }
+
+ aio->size += size;
+
+ return size;
+}
+
+static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
+{
+ int ret, idx;
+ int trace_fd = rec->session->data->file.fd;
+ struct record_aio aio = { .rec = rec, .size = 0 };
+
+ /*
+ * Call record__aio_sync() to wait till map->aio.data[] buffer
+ * becomes available after previous aio write operation.
+ */
+
+ idx = record__aio_sync(map, false);
+ aio.data = map->aio.data[idx];
+ ret = perf_mmap__push(map, &aio, record__aio_pushfn);
+ if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
+ return ret;
+
+ rec->samples++;
+ ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
+ if (!ret) {
+ *off += aio.size;
+ rec->bytes_written += aio.size;
+ if (switch_output_size(rec))
+ trigger_hit(&switch_output_trigger);
+ } else {
+ /*
+ * Decrement map->refcount incremented in record__aio_pushfn()
+ * back if record__aio_write() operation failed to start, otherwise
+ * map->refcount is decremented in record__aio_complete() after
+ * aio write operation finishes successfully.
+ */
+ perf_mmap__put(&map->core);
+ }
+
+ return ret;
+}
+
+static off_t record__aio_get_pos(int trace_fd)
+{
+ return lseek(trace_fd, 0, SEEK_CUR);
+}
+
+static void record__aio_set_pos(int trace_fd, off_t pos)
+{
+ lseek(trace_fd, pos, SEEK_SET);
+}
+
+static void record__aio_mmap_read_sync(struct record *rec)
+{
+ int i;
+ struct evlist *evlist = rec->evlist;
+ struct mmap *maps = evlist->mmap;
+
+ if (!record__aio_enabled(rec))
+ return;
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &maps[i];
+
+ if (map->core.base)
+ record__aio_sync(map, true);
+ }
+}
+
+static int nr_cblocks_default = 1;
+static int nr_cblocks_max = 4;
+
+static int record__aio_parse(const struct option *opt,
+ const char *str,
+ int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset) {
+ opts->nr_cblocks = 0;
+ } else {
+ if (str)
+ opts->nr_cblocks = strtol(str, NULL, 0);
+ if (!opts->nr_cblocks)
+ opts->nr_cblocks = nr_cblocks_default;
+ }
+
+ return 0;
+}
+#else /* HAVE_AIO_SUPPORT */
+static int nr_cblocks_max = 0;
+
+static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
+ off_t *off __maybe_unused)
+{
+ return -1;
+}
+
+static off_t record__aio_get_pos(int trace_fd __maybe_unused)
+{
+ return -1;
+}
+
+static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
+{
+}
+
+static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
+{
+}
+#endif
+
+static int record__aio_enabled(struct record *rec)
+{
+ return rec->opts.nr_cblocks > 0;
+}
+
+#define MMAP_FLUSH_DEFAULT 1
+static int record__mmap_flush_parse(const struct option *opt,
+ const char *str,
+ int unset)
+{
+ int flush_max;
+ struct record_opts *opts = (struct record_opts *)opt->value;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ if (unset)
+ return 0;
+
+ if (str) {
+ opts->mmap_flush = parse_tag_value(str, tags);
+ if (opts->mmap_flush == (int)-1)
+ opts->mmap_flush = strtol(str, NULL, 0);
+ }
+
+ if (!opts->mmap_flush)
+ opts->mmap_flush = MMAP_FLUSH_DEFAULT;
+
+ flush_max = evlist__mmap_size(opts->mmap_pages);
+ flush_max /= 4;
+ if (opts->mmap_flush > flush_max)
+ opts->mmap_flush = flush_max;
+
+ return 0;
+}
+
+#ifdef HAVE_ZSTD_SUPPORT
+static unsigned int comp_level_default = 1;
+
+static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = opt->value;
+
+ if (unset) {
+ opts->comp_level = 0;
+ } else {
+ if (str)
+ opts->comp_level = strtol(str, NULL, 0);
+ if (!opts->comp_level)
+ opts->comp_level = comp_level_default;
+ }
+
+ return 0;
+}
+#endif
+static unsigned int comp_level_max = 22;
+
+static int record__comp_enabled(struct record *rec)
+{
+ return rec->opts.comp_level > 0;
}
static int process_synthesized_event(struct perf_tool *tool,
@@ -127,20 +512,41 @@
struct machine *machine __maybe_unused)
{
struct record *rec = container_of(tool, struct record, tool);
- return record__write(rec, event, event->header.size);
+ return record__write(rec, NULL, event, event->header.size);
}
-static int record__pushfn(void *to, void *bf, size_t size)
+static int process_locked_synthesized_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
+ int ret;
+
+ pthread_mutex_lock(&synth_lock);
+ ret = process_synthesized_event(tool, event, sample, machine);
+ pthread_mutex_unlock(&synth_lock);
+ return ret;
+}
+
+static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
+ if (record__comp_enabled(rec)) {
+ size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
+ bf = map->data;
+ }
+
rec->samples++;
- return record__write(rec, bf, size);
+ return record__write(rec, map, bf, size);
}
-static volatile int done;
static volatile int signr = -1;
static volatile int child_finished;
+#ifdef HAVE_EVENTFD_SUPPORT
+static int done_fd = -1;
+#endif
static void sig_handler(int sig)
{
@@ -150,6 +556,21 @@
signr = sig;
done = 1;
+#ifdef HAVE_EVENTFD_SUPPORT
+{
+ u64 tmp = 1;
+ /*
+ * It is possible for this signal handler to run after done is checked
+ * in the main loop, but before the perf counter fds are polled. If this
+ * happens, the poll() will continue to wait even though done is set,
+ * and will only break out if either another signal is received, or the
+ * counters are ready for read. To ensure the poll() doesn't sleep when
+ * done is set, use an eventfd (done_fd) to wake up the poll().
+ */
+ if (write(done_fd, &tmp, sizeof(tmp)) < 0)
+ pr_err("failed to signal wakeup fd, error: %m\n");
+}
+#endif // HAVE_EVENTFD_SUPPORT
}
static void sigsegv_handler(int sig)
@@ -170,6 +591,7 @@
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
+ struct mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
@@ -178,7 +600,7 @@
size_t padding;
u8 pad[8] = {0};
- if (!perf_data__is_pipe(data)) {
+ if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
@@ -197,21 +619,21 @@
if (padding)
padding = 8 - padding;
- record__write(rec, event, event->header.size);
- record__write(rec, data1, len1);
+ record__write(rec, map, event, event->header.size);
+ record__write(rec, map, data1, len1);
if (len2)
- record__write(rec, data2, len2);
- record__write(rec, &pad, padding);
+ record__write(rec, map, data2, len2);
+ record__write(rec, map, &pad, padding);
return 0;
}
static int record__auxtrace_mmap_read(struct record *rec,
- struct auxtrace_mmap *mm)
+ struct mmap *map)
{
int ret;
- ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
@@ -223,11 +645,11 @@
}
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
- struct auxtrace_mmap *mm)
+ struct mmap *map)
{
int ret;
- ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
@@ -244,14 +666,13 @@
int i;
int rc = 0;
- for (i = 0; i < rec->evlist->nr_mmaps; i++) {
- struct auxtrace_mmap *mm =
- &rec->evlist->mmap[i].auxtrace_mmap;
+ for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &rec->evlist->mmap[i];
- if (!mm->base)
+ if (!map->auxtrace_mmap.base)
continue;
- if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
+ if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
rc = -1;
goto out;
}
@@ -260,17 +681,33 @@
return rc;
}
-static void record__read_auxtrace_snapshot(struct record *rec)
+static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
{
pr_debug("Recording AUX area tracing snapshot\n");
if (record__auxtrace_read_snapshot_all(rec) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
} else {
- if (auxtrace_record__snapshot_finish(rec->itr))
+ if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
trigger_error(&auxtrace_snapshot_trigger);
else
trigger_ready(&auxtrace_snapshot_trigger);
}
+}
+
+static int record__auxtrace_snapshot_exit(struct record *rec)
+{
+ if (trigger_is_error(&auxtrace_snapshot_trigger))
+ return 0;
+
+ if (!auxtrace_record__snapshot_started &&
+ auxtrace_record__snapshot_start(rec->itr))
+ return -1;
+
+ record__read_auxtrace_snapshot(rec, true);
+ if (trigger_is_error(&auxtrace_snapshot_trigger))
+ return -1;
+
+ return 0;
}
static int record__auxtrace_init(struct record *rec)
@@ -288,6 +725,11 @@
if (err)
return err;
+ err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
+ rec->opts.auxtrace_sample_opts);
+ if (err)
+ return err;
+
return auxtrace_parse_filters(rec->evlist);
}
@@ -295,18 +737,25 @@
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct auxtrace_mmap *mm __maybe_unused)
+ struct mmap *map __maybe_unused)
{
return 0;
}
static inline
-void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
+void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
+ bool on_exit __maybe_unused)
{
}
static inline
int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
+{
+ return 0;
+}
+
+static inline
+int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
{
return 0;
}
@@ -318,15 +767,90 @@
#endif
+static int record__config_text_poke(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ int err;
+
+ /* Nothing to do if text poke is already configured */
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.text_poke)
+ return 0;
+ }
+
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ evsel = evlist__last(evlist);
+
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
+ evsel->core.attr.text_poke = 1;
+ evsel->core.attr.ksymbol = 1;
+
+ evsel->core.system_wide = true;
+ evsel->no_aux_samples = true;
+ evsel->immediate = true;
+
+ /* Text poke must be collected on all CPUs */
+ perf_cpu_map__put(evsel->core.own_cpus);
+ evsel->core.own_cpus = perf_cpu_map__new(NULL);
+ perf_cpu_map__put(evsel->core.cpus);
+ evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
+
+ evsel__set_sample_bit(evsel, TIME);
+
+ return 0;
+}
+
+static bool record__kcore_readable(struct machine *machine)
+{
+ char kcore[PATH_MAX];
+ int fd;
+
+ scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
+
+ fd = open(kcore, O_RDONLY);
+ if (fd < 0)
+ return false;
+
+ close(fd);
+
+ return true;
+}
+
+static int record__kcore_copy(struct machine *machine, struct perf_data *data)
+{
+ char from_dir[PATH_MAX];
+ char kcore_dir[PATH_MAX];
+ int ret;
+
+ snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
+
+ ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
+ if (ret)
+ return ret;
+
+ return kcore_copy(from_dir, kcore_dir);
+}
+
static int record__mmap_evlist(struct record *rec,
- struct perf_evlist *evlist)
+ struct evlist *evlist)
{
struct record_opts *opts = &rec->opts;
+ bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
+ opts->auxtrace_sample_mode;
char msg[512];
- if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
+ if (evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode) < 0) {
+ auxtrace_overwrite,
+ opts->nr_cblocks, opts->affinity,
+ opts->mmap_flush, opts->comp_level) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -355,43 +879,55 @@
static int record__open(struct record *rec)
{
char msg[BUFSIZ];
- struct perf_evsel *pos;
- struct perf_evlist *evlist = rec->evlist;
+ struct evsel *pos;
+ struct evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
- struct perf_evsel_config_term *err_term;
int rc = 0;
/*
- * For initial_delay we need to add a dummy event so that we can track
- * PERF_RECORD_MMAP while we wait for the initial delay to enable the
- * real events, the ones asked by the user.
+ * For initial_delay or system wide, we need to add a dummy event so
+ * that we can track PERF_RECORD_MMAP to cover the delay of waiting or
+ * event synthesis.
*/
- if (opts->initial_delay) {
- if (perf_evlist__add_dummy(evlist))
- return -ENOMEM;
+ if (opts->initial_delay || target__has_cpu(&opts->target)) {
+ pos = perf_evlist__get_tracking_event(evlist);
+ if (!evsel__is_dummy_event(pos)) {
+ /* Set up dummy event. */
+ if (evlist__add_dummy(evlist))
+ return -ENOMEM;
+ pos = evlist__last(evlist);
+ perf_evlist__set_tracking_event(evlist, pos);
+ }
- pos = perf_evlist__first(evlist);
- pos->tracking = 0;
- pos = perf_evlist__last(evlist);
- pos->tracking = 1;
- pos->attr.enable_on_exec = 1;
+ /*
+ * Enable the dummy event when the process is forked for
+ * initial_delay, immediately for system wide.
+ */
+ if (opts->initial_delay && !pos->immediate)
+ pos->core.attr.enable_on_exec = 1;
+ else
+ pos->immediate = 1;
}
perf_evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, pos) {
try_again:
- if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
- if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
+ if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
+ if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
-
+ if ((errno == EINVAL || errno == EBADF) &&
+ pos->leader != pos &&
+ pos->weak_group) {
+ pos = perf_evlist__reset_weak_group(evlist, pos, true);
+ goto try_again;
+ }
rc = -errno;
- perf_evsel__open_strerror(pos, &opts->target,
- errno, msg, sizeof(msg));
+ evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out;
}
@@ -399,18 +935,21 @@
pos->supported = true;
}
- if (perf_evlist__apply_filters(evlist, &pos)) {
- pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
- pos->filter, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- rc = -1;
- goto out;
+ if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
+ pr_warning(
+"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
+"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
+"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
+"file is not found in the buildid cache or in the vmlinux path.\n\n"
+"Samples in kernel modules won't be resolved at all.\n\n"
+"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
+"even with a suitable vmlinux or kallsyms file.\n\n");
}
- if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
+ if (perf_evlist__apply_filters(evlist, &pos)) {
+ pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
+ pos->filter, evsel__name(pos), errno,
+ str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
}
@@ -428,7 +967,7 @@
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel,
+ struct evsel *evsel,
struct machine *machine)
{
struct record *rec = container_of(tool, struct record, tool);
@@ -447,10 +986,9 @@
static int process_buildids(struct record *rec)
{
- struct perf_data *data = &rec->data;
struct perf_session *session = rec->session;
- if (data->size == 0)
+ if (perf_data__size(&rec->data) == 0)
return 0;
/*
@@ -510,13 +1048,61 @@
.type = PERF_RECORD_FINISHED_ROUND,
};
-static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
- bool overwrite)
+static void record__adjust_affinity(struct record *rec, struct mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
+ rec->affinity_mask.nbits)) {
+ bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
+ bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
+ map->affinity_mask.bits, rec->affinity_mask.nbits);
+ sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
+ (cpu_set_t *)rec->affinity_mask.bits);
+ if (verbose == 2)
+ mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
+ }
+}
+
+static size_t process_comp_header(void *record, size_t increment)
+{
+ struct perf_record_compressed *event = record;
+ size_t size = sizeof(*event);
+
+ if (increment) {
+ event->header.size += increment;
+ return increment;
+ }
+
+ event->header.type = PERF_RECORD_COMPRESSED;
+ event->header.size = size;
+
+ return size;
+}
+
+static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
+ void *src, size_t src_size)
+{
+ size_t compressed;
+ size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
+
+ compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
+ max_record_size, process_comp_header);
+
+ session->bytes_transferred += src_size;
+ session->bytes_compressed += compressed;
+
+ return compressed;
+}
+
+static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
+ bool overwrite, bool synch)
{
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
- struct perf_mmap *maps;
+ struct mmap *maps;
+ int trace_fd = rec->data.file.fd;
+ off_t off = 0;
if (!evlist)
return 0;
@@ -528,29 +1114,56 @@
if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
+ if (record__aio_enabled(rec))
+ off = record__aio_get_pos(trace_fd);
- if (maps[i].base) {
- if (perf_mmap__push(&maps[i], rec, record__pushfn) != 0) {
- rc = -1;
- goto out;
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ u64 flush = 0;
+ struct mmap *map = &maps[i];
+
+ if (map->core.base) {
+ record__adjust_affinity(rec, map);
+ if (synch) {
+ flush = map->core.flush;
+ map->core.flush = 1;
}
+ if (!record__aio_enabled(rec)) {
+ if (perf_mmap__push(map, rec, record__pushfn) < 0) {
+ if (synch)
+ map->core.flush = flush;
+ rc = -1;
+ goto out;
+ }
+ } else {
+ if (record__aio_push(rec, map, &off) < 0) {
+ record__aio_set_pos(trace_fd, off);
+ if (synch)
+ map->core.flush = flush;
+ rc = -1;
+ goto out;
+ }
+ }
+ if (synch)
+ map->core.flush = flush;
}
- if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
- record__auxtrace_mmap_read(rec, mm) != 0) {
+ if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
+ !rec->opts.auxtrace_sample_mode &&
+ record__auxtrace_mmap_read(rec, map) != 0) {
rc = -1;
goto out;
}
}
+
+ if (record__aio_enabled(rec))
+ record__aio_set_pos(trace_fd, off);
/*
* Mark the round finished in case we wrote
* at least one event.
*/
if (bytes_written != rec->bytes_written)
- rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
+ rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
@@ -558,15 +1171,15 @@
return rc;
}
-static int record__mmap_read_all(struct record *rec)
+static int record__mmap_read_all(struct record *rec, bool synch)
{
int err;
- err = record__mmap_read_evlist(rec, rec->evlist, false);
+ err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
if (err)
return err;
- return record__mmap_read_evlist(rec, rec->evlist, true);
+ return record__mmap_read_evlist(rec, rec->evlist, true, synch);
}
static void record__init_features(struct record *rec)
@@ -580,7 +1193,7 @@
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
- if (!have_tracepoints(&rec->evlist->entries))
+ if (!have_tracepoints(&rec->evlist->core.entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
if (!rec->opts.branch_stack)
@@ -588,6 +1201,16 @@
if (!rec->opts.full_auxtrace)
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
+
+ if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
+ perf_header__clear_feat(&session->header, HEADER_CLOCKID);
+
+ if (!rec->opts.use_clockid)
+ perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
+
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+ if (!record__comp_enabled(rec))
+ perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
perf_header__clear_feat(&session->header, HEADER_STAT);
}
@@ -602,7 +1225,7 @@
return;
rec->session->header.data_size += rec->bytes_written;
- data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
@@ -618,7 +1241,7 @@
static int record__synthesize_workload(struct record *rec, bool tail)
{
int err;
- struct thread_map *thread_map;
+ struct perf_thread_map *thread_map;
if (rec->opts.tail_synthesize != tail)
return 0;
@@ -630,9 +1253,8 @@
err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
process_synthesized_event,
&rec->session->machines.host,
- rec->opts.sample_address,
- rec->opts.proc_map_timeout);
- thread_map__put(thread_map);
+ rec->opts.sample_address);
+ perf_thread_map__put(thread_map);
return err;
}
@@ -643,9 +1265,12 @@
{
struct perf_data *data = &rec->data;
int fd, err;
+ char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
+
+ record__aio_mmap_read_sync(rec);
record__synthesize(rec, true);
if (target__none(&rec->opts.target))
@@ -661,7 +1286,7 @@
fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
- at_exit);
+ at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
@@ -669,7 +1294,22 @@
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
- data->file.path, timestamp);
+ data->path, timestamp);
+
+ if (rec->switch_output.num_files) {
+ int n = rec->switch_output.cur_file + 1;
+
+ if (n >= rec->switch_output.num_files)
+ n = 0;
+ rec->switch_output.cur_file = n;
+ if (rec->switch_output.filenames[n]) {
+ remove(rec->switch_output.filenames[n]);
+ zfree(&rec->switch_output.filenames[n]);
+ }
+ rec->switch_output.filenames[n] = new_filename;
+ } else {
+ free(new_filename);
+ }
/* Output tracking events */
if (!at_exit) {
@@ -709,23 +1349,14 @@
static void snapshot_sig_handler(int sig);
static void alarm_sig_handler(int sig);
-int __weak
-perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
- struct perf_tool *tool __maybe_unused,
- perf_event__handler_t process __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- return 0;
-}
-
static const struct perf_event_mmap_page *
-perf_evlist__pick_pc(struct perf_evlist *evlist)
+perf_evlist__pick_pc(struct evlist *evlist)
{
if (evlist) {
- if (evlist->mmap && evlist->mmap[0].base)
- return evlist->mmap[0].base;
- if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
- return evlist->overwrite_mmap[0].base;
+ if (evlist->mmap && evlist->mmap[0].core.base)
+ return evlist->mmap[0].core.base;
+ if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
+ return evlist->overwrite_mmap[0].core.base;
}
return NULL;
}
@@ -749,6 +1380,7 @@
struct perf_tool *tool = &rec->tool;
int fd = perf_data__fd(data);
int err = 0;
+ event_op f = process_synthesized_event;
if (rec->opts.tail_synthesize != tail)
return 0;
@@ -758,7 +1390,7 @@
* We need to synthesize events first, because some
* features works on top of them (on report side).
*/
- err = perf_event__synthesize_attrs(tool, session,
+ err = perf_event__synthesize_attrs(tool, rec->evlist,
process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
@@ -772,7 +1404,7 @@
return err;
}
- if (have_tracepoints(&rec->evlist->entries)) {
+ if (have_tracepoints(&rec->evlist->core.entries)) {
/*
* FIXME err <= 0 here actually means that
* there were no tracepoints so its not really
@@ -795,6 +1427,15 @@
process_synthesized_event, machine);
if (err)
goto out;
+
+ /* Synthesize id_index before auxtrace_info */
+ if (rec->opts.auxtrace_sample_mode) {
+ err = perf_event__synthesize_id_index(tool,
+ process_synthesized_event,
+ session->evlist, machine);
+ if (err)
+ goto out;
+ }
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
@@ -829,7 +1470,7 @@
if (err)
goto out;
- err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
+ err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
process_synthesized_event,
NULL);
if (err < 0) {
@@ -837,18 +1478,129 @@
return err;
}
- err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
return err;
}
- err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
- process_synthesized_event, opts->sample_address,
- opts->proc_map_timeout, 1);
+ err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
+ machine, opts);
+ if (err < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
+ err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
+ machine);
+ if (err < 0)
+ pr_warning("Couldn't synthesize cgroup events.\n");
+
+ if (rec->opts.nr_threads_synthesize > 1) {
+ perf_set_multithreaded();
+ f = process_locked_synthesized_event;
+ }
+
+ err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
+ f, opts->sample_address,
+ rec->opts.nr_threads_synthesize);
+
+ if (rec->opts.nr_threads_synthesize > 1)
+ perf_set_singlethreaded();
+
out:
return err;
+}
+
+static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
+{
+ struct record *rec = data;
+ pthread_kill(rec->thread_id, SIGUSR2);
+ return 0;
+}
+
+static int record__setup_sb_evlist(struct record *rec)
+{
+ struct record_opts *opts = &rec->opts;
+
+ if (rec->sb_evlist != NULL) {
+ /*
+ * We get here if --switch-output-event populated the
+ * sb_evlist, so associate a callback that will send a SIGUSR2
+ * to the main thread.
+ */
+ evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
+ rec->thread_id = pthread_self();
+ }
+#ifdef HAVE_LIBBPF_SUPPORT
+ if (!opts->no_bpf_event) {
+ if (rec->sb_evlist == NULL) {
+ rec->sb_evlist = evlist__new();
+
+ if (rec->sb_evlist == NULL) {
+ pr_err("Couldn't create side band evlist.\n.");
+ return -1;
+ }
+ }
+
+ if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
+ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
+ return -1;
+ }
+ }
+#endif
+ if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
+ return 0;
+}
+
+static int record__init_clock(struct record *rec)
+{
+ struct perf_session *session = rec->session;
+ struct timespec ref_clockid;
+ struct timeval ref_tod;
+ u64 ref;
+
+ if (!rec->opts.use_clockid)
+ return 0;
+
+ if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
+ session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
+
+ session->header.env.clock.clockid = rec->opts.clockid;
+
+ if (gettimeofday(&ref_tod, NULL) != 0) {
+ pr_err("gettimeofday failed, cannot set reference time.\n");
+ return -1;
+ }
+
+ if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
+ pr_err("clock_gettime failed, cannot set reference time.\n");
+ return -1;
+ }
+
+ ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
+ (u64) ref_tod.tv_usec * NSEC_PER_USEC;
+
+ session->header.env.clock.tod_ns = ref;
+
+ ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
+ (u64) ref_clockid.tv_nsec;
+
+ session->header.env.clock.clockid_ns = ref;
+ return 0;
+}
+
+static void hit_auxtrace_snapshot_trigger(struct record *rec)
+{
+ if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
+ trigger_hit(&auxtrace_snapshot_trigger);
+ auxtrace_record__snapshot_started = 1;
+ if (auxtrace_record__snapshot_start(rec->itr))
+ trigger_error(&auxtrace_snapshot_trigger);
+ }
}
static int __cmd_record(struct record *rec, int argc, const char **argv)
@@ -863,6 +1615,8 @@
struct perf_session *session;
bool disabled = false, draining = false;
int fd;
+ float ratio = 0;
+ enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
atexit(record__sig_exit);
signal(SIGCHLD, sig_handler);
@@ -872,6 +1626,15 @@
if (rec->opts.record_namespaces)
tool->namespace_events = true;
+
+ if (rec->opts.record_cgroup) {
+#ifdef HAVE_FILE_HANDLE
+ tool->cgroup_events = true;
+#else
+ pr_err("cgroup tracking is not supported\n");
+ return -1;
+#endif
+ }
if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
signal(SIGUSR2, snapshot_sig_handler);
@@ -884,13 +1647,44 @@
}
session = perf_session__new(data, false, tool);
- if (session == NULL) {
+ if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
- return -1;
+ return PTR_ERR(session);
}
fd = perf_data__fd(data);
rec->session = session;
+
+ if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
+ pr_err("Compression initialization failed.\n");
+ return -1;
+ }
+#ifdef HAVE_EVENTFD_SUPPORT
+ done_fd = eventfd(0, EFD_NONBLOCK);
+ if (done_fd < 0) {
+ pr_err("Failed to create wakeup eventfd, error: %m\n");
+ status = -1;
+ goto out_delete_session;
+ }
+ err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
+ if (err < 0) {
+ pr_err("Failed to add wakeup eventfd to poll list\n");
+ status = err;
+ goto out_delete_session;
+ }
+#endif // HAVE_EVENTFD_SUPPORT
+
+ session->header.env.comp_type = PERF_COMP_ZSTD;
+ session->header.env.comp_level = rec->opts.comp_level;
+
+ if (rec->opts.kcore &&
+ !record__kcore_readable(&session->machines.host)) {
+ pr_err("ERROR: kcore is not readable.\n");
+ return -1;
+ }
+
+ if (record__init_clock(rec))
+ return -1;
record__init_features(rec);
@@ -911,12 +1705,21 @@
* because we synthesize event name through the pipe
* and need the id for that.
*/
- if (data->is_pipe && rec->evlist->nr_entries == 1)
+ if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
if (record__open(rec) != 0) {
err = -1;
goto out_child;
+ }
+ session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
+
+ if (rec->opts.kcore) {
+ err = record__kcore_copy(&session->machines.host, data);
+ if (err) {
+ pr_err("ERROR: Failed to copy kcore\n");
+ goto out_child;
+ }
}
err = bpf__apply_obj_config();
@@ -933,7 +1736,7 @@
* Normally perf_session__new would do this, but it doesn't have the
* evlist.
*/
- if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+ if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
rec->tool.ordered_events = false;
}
@@ -951,13 +1754,17 @@
goto out_child;
}
+ err = -1;
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
- err = -1;
goto out_child;
}
+
+ err = record__setup_sb_evlist(rec);
+ if (err)
+ goto out_child;
err = record__synthesize(rec, false);
if (err < 0)
@@ -980,7 +1787,7 @@
* so don't spoil it by prematurely enabling them.
*/
if (!target__none(&opts->target) && !opts->initial_delay)
- perf_evlist__enable(rec->evlist);
+ evlist__enable(rec->evlist);
/*
* Let the child rip
@@ -1031,9 +1838,16 @@
perf_evlist__start_workload(rec->evlist);
}
+ if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
+ goto out_child;
+
if (opts->initial_delay) {
- usleep(opts->initial_delay * USEC_PER_MSEC);
- perf_evlist__enable(rec->evlist);
+ pr_info(EVLIST_DISABLED_MSG);
+ if (opts->initial_delay > 0) {
+ usleep(opts->initial_delay * USEC_PER_MSEC);
+ evlist__enable(rec->evlist);
+ pr_info(EVLIST_ENABLED_MSG);
+ }
}
trigger_ready(&auxtrace_snapshot_trigger);
@@ -1053,7 +1867,7 @@
if (trigger_is_hit(&switch_output_trigger) || done || draining)
perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
- if (record__mmap_read_all(rec) < 0) {
+ if (record__mmap_read_all(rec, false) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
trigger_error(&switch_output_trigger);
err = -1;
@@ -1063,7 +1877,7 @@
if (auxtrace_record__snapshot_started) {
auxtrace_record__snapshot_started = 0;
if (!trigger_is_error(&auxtrace_snapshot_trigger))
- record__read_auxtrace_snapshot(rec);
+ record__read_auxtrace_snapshot(rec, false);
if (trigger_is_error(&auxtrace_snapshot_trigger)) {
pr_err("AUX area tracing snapshot failed\n");
err = -1;
@@ -1112,7 +1926,7 @@
if (hits == rec->samples) {
if (done || draining)
break;
- err = perf_evlist__poll(rec->evlist, -1);
+ err = evlist__poll(rec->evlist, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
@@ -1121,8 +1935,27 @@
err = 0;
waking++;
- if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
+ if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
draining = true;
+ }
+
+ if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
+ switch (cmd) {
+ case EVLIST_CTL_CMD_ENABLE:
+ pr_info(EVLIST_ENABLED_MSG);
+ break;
+ case EVLIST_CTL_CMD_DISABLE:
+ pr_info(EVLIST_DISABLED_MSG);
+ break;
+ case EVLIST_CTL_CMD_SNAPSHOT:
+ hit_auxtrace_snapshot_trigger(rec);
+ evlist__ctlfd_ack(rec->evlist);
+ break;
+ case EVLIST_CTL_CMD_ACK:
+ case EVLIST_CTL_CMD_UNSUPPORTED:
+ default:
+ break;
+ }
}
/*
@@ -1132,12 +1965,16 @@
*/
if (done && !disabled && !target__none(&opts->target)) {
trigger_off(&auxtrace_snapshot_trigger);
- perf_evlist__disable(rec->evlist);
+ evlist__disable(rec->evlist);
disabled = true;
}
}
+
trigger_off(&auxtrace_snapshot_trigger);
trigger_off(&switch_output_trigger);
+
+ if (opts->auxtrace_snapshot_on_exit)
+ record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
char msg[STRERR_BUFSIZE];
@@ -1154,6 +1991,15 @@
record__synthesize_workload(rec, true);
out_child:
+ evlist__finalize_ctlfd(rec->evlist);
+ record__mmap_read_all(rec, true);
+ record__aio_mmap_read_sync(rec);
+
+ if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
+ ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
+ session->header.env.comp_ratio = ratio + 0.5;
+ }
+
if (forks) {
int exit_status;
@@ -1200,13 +2046,27 @@
else
samples[0] = '\0';
- fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
+ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
perf_data__size(data) / 1024.0 / 1024.0,
- data->file.path, postfix, samples);
+ data->path, postfix, samples);
+ if (ratio) {
+ fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
+ rec->session->bytes_transferred / 1024.0 / 1024.0,
+ ratio);
+ }
+ fprintf(stderr, " ]\n");
}
out_delete_session:
+#ifdef HAVE_EVENTFD_SUPPORT
+ if (done_fd >= 0)
+ close(done_fd);
+#endif
+ zstd_fini(&session->zstd_data);
perf_session__delete(session);
+
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(rec->sb_evlist);
return status;
}
@@ -1287,91 +2147,57 @@
var = "call-graph.record-mode";
return perf_default_config(var, value, cb);
}
+#ifdef HAVE_AIO_SUPPORT
+ if (!strcmp(var, "record.aio")) {
+ rec->opts.nr_cblocks = strtol(value, NULL, 0);
+ if (!rec->opts.nr_cblocks)
+ rec->opts.nr_cblocks = nr_cblocks_default;
+ }
+#endif
return 0;
}
-struct clockid_map {
- const char *name;
- int clockid;
-};
-#define CLOCKID_MAP(n, c) \
- { .name = n, .clockid = (c), }
-
-#define CLOCKID_END { .name = NULL, }
-
-
-/*
- * Add the missing ones, we need to build on many distros...
- */
-#ifndef CLOCK_MONOTONIC_RAW
-#define CLOCK_MONOTONIC_RAW 4
-#endif
-#ifndef CLOCK_BOOTTIME
-#define CLOCK_BOOTTIME 7
-#endif
-#ifndef CLOCK_TAI
-#define CLOCK_TAI 11
-#endif
-
-static const struct clockid_map clockids[] = {
- /* available for all events, NMI safe */
- CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
- CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
-
- /* available for some events */
- CLOCKID_MAP("realtime", CLOCK_REALTIME),
- CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
- CLOCKID_MAP("tai", CLOCK_TAI),
-
- /* available for the lazy */
- CLOCKID_MAP("mono", CLOCK_MONOTONIC),
- CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
- CLOCKID_MAP("real", CLOCK_REALTIME),
- CLOCKID_MAP("boot", CLOCK_BOOTTIME),
-
- CLOCKID_END,
-};
-
-static int parse_clockid(const struct option *opt, const char *str, int unset)
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
{
struct record_opts *opts = (struct record_opts *)opt->value;
- const struct clockid_map *cm;
- const char *ostr = str;
+
+ if (unset || !str)
+ return 0;
+
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+
+ return 0;
+}
+
+static int parse_output_max_size(const struct option *opt,
+ const char *str, int unset)
+{
+ unsigned long *s = (unsigned long *)opt->value;
+ static struct parse_tag tags_size[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+ unsigned long val;
if (unset) {
- opts->use_clockid = 0;
+ *s = 0;
return 0;
}
- /* no arg passed */
- if (!str)
+ val = parse_tag_value(str, tags_size);
+ if (val != (unsigned long) -1) {
+ *s = val;
return 0;
-
- /* no setting it twice */
- if (opts->use_clockid)
- return -1;
-
- opts->use_clockid = true;
-
- /* if its a number, we're done */
- if (sscanf(str, "%d", &opts->clockid) == 1)
- return 0;
-
- /* allow a "CLOCK_" prefix to the name */
- if (!strncasecmp(str, "CLOCK_", 6))
- str += 6;
-
- for (cm = clockids; cm->name; cm++) {
- if (!strcasecmp(str, cm->name)) {
- opts->clockid = cm->clockid;
- return 0;
- }
}
- opts->use_clockid = false;
- ui__warning("unknown clockid %s, check man page\n", ostr);
return -1;
}
@@ -1418,9 +2244,18 @@
return ret;
}
+static int parse_control_option(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+
+ return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
+}
+
static void switch_output_size_warn(struct record *rec)
{
- u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
+ u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
struct switch_output *s = &rec->switch_output;
wakeup_size /= 2;
@@ -1454,10 +2289,19 @@
};
unsigned long val;
+ /*
+ * If we're using --switch-output-events, then we imply its
+ * --switch-output=signal, as we'll send a SIGUSR2 from the side band
+ * thread to its parent.
+ */
+ if (rec->switch_output_event_set)
+ goto do_signal;
+
if (!s->set)
return 0;
if (!strcmp(s->str, "signal")) {
+do_signal:
s->signal = true;
pr_debug("switch-output with SIGUSR2 signal\n");
goto enabled;
@@ -1497,6 +2341,31 @@
};
const char * const *record_usage = __record_usage;
+static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
+{
+ /*
+ * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
+ * no need to add them twice.
+ */
+ if (!(event->header.misc & PERF_RECORD_MISC_USER))
+ return 0;
+ return perf_event__process_mmap(tool, event, sample, machine);
+}
+
+static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
+{
+ /*
+ * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
+ * no need to add them twice.
+ */
+ if (!(event->header.misc & PERF_RECORD_MISC_USER))
+ return 0;
+
+ return perf_event__process_mmap2(tool, event, sample, machine);
+}
+
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
@@ -1518,7 +2387,10 @@
.uses_mmap = true,
.default_per_cpu = true,
},
- .proc_map_timeout = 500,
+ .mmap_flush = MMAP_FLUSH_DEFAULT,
+ .nr_threads_synthesize = 1,
+ .ctl_fd = -1,
+ .ctl_fd_ack = -1,
},
.tool = {
.sample = process_sample_event,
@@ -1526,8 +2398,8 @@
.exit = perf_event__process_exit,
.comm = perf_event__process_comm,
.namespaces = perf_event__process_namespaces,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
+ .mmap = build_id__process_mmap,
+ .mmap2 = build_id__process_mmap2,
.ordered_events = true,
},
};
@@ -1568,7 +2440,7 @@
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.data.file.path, "file",
+ OPT_STRING('o', "output", &record.data.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
@@ -1576,6 +2448,7 @@
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1584,6 +2457,9 @@
OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
"number of mmap data pages and AUX area tracing mmap pages",
record__parse_mmap_pages),
+ OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
+ "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
+ record__mmap_flush_parse),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
@@ -1617,8 +2493,9 @@
OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
- OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
- "ms to wait before starting measurement after program start"),
+ OPT_INTEGER('D', "delay", &record.opts.initial_delay,
+ "ms to wait before starting measurement after program start (-1: start with events disabled)"),
+ OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
@@ -1637,10 +2514,10 @@
"use per-thread mmaps"),
OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
- " use -I ? to list register names", parse_regs),
+ " use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
- " use -I ? to list register names", parse_regs),
+ " use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
OPT_CALLBACK('k', "clockid", &record.opts,
@@ -1648,18 +2525,27 @@
parse_clockid),
OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
"opts", "AUX area tracing Snapshot Mode", ""),
- OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
+ OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
+ "opts", "sample AUX area", ""),
+ OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
"Record namespaces events"),
- OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
- "Record context switch events"),
+ OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
+ "Record cgroup events"),
+ OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
+ &record.opts.record_switch_events_set,
+ "Record context switch events"),
OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
"Configure all used events to run in user space.",
PARSE_OPT_EXCLUSIVE),
+ OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
+ "collect kernel callchains"),
+ OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
+ "collect user callchains"),
OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
"clang binary to use for compiling BPF scriptlets"),
OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
@@ -1673,11 +2559,45 @@
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
"Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
- &record.switch_output.set, "signal,size,time",
- "Switch output when receive SIGUSR2 or cross size,time threshold",
+ &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
+ "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
+ OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
+ "switch output event selector. use 'perf list' to list available events",
+ parse_events_option_new_evlist),
+ OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
+ "Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
+#ifdef HAVE_AIO_SUPPORT
+ OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
+ &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
+ record__aio_parse),
+#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
+#ifdef HAVE_ZSTD_SUPPORT
+ OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
+ "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
+ record__parse_comp_level),
+#endif
+ OPT_CALLBACK(0, "max-size", &record.output_max_size,
+ "size", "Limit the maximum size of the output file", parse_output_max_size),
+ OPT_UINTEGER(0, "num-thread-synthesize",
+ &record.opts.nr_threads_synthesize,
+ "number of threads to run for event synthesis"),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
+ OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
+ "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
+ "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
+ "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
+ "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
+ parse_control_option),
OPT_END()
};
@@ -1712,7 +2632,9 @@
# undef REASON
#endif
- rec->evlist = perf_evlist__new();
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
+ rec->evlist = evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -1734,21 +2656,41 @@
"cgroup monitoring only available in system-wide mode");
}
+
+ if (rec->opts.kcore)
+ rec->data.is_dir = true;
+
+ if (rec->opts.comp_level != 0) {
+ pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
+ rec->no_buildid = true;
+ }
+
if (rec->opts.record_switch_events &&
!perf_can_record_switch_events()) {
ui__error("kernel does not support recording context switch events\n");
parse_options_usage(record_usage, record_options, "switch-events", 0);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_opts;
}
if (switch_output_setup(rec)) {
parse_options_usage(record_usage, record_options, "switch-output", 0);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_opts;
}
if (rec->switch_output.time) {
signal(SIGALRM, alarm_sig_handler);
alarm(rec->switch_output.time);
+ }
+
+ if (rec->switch_output.num_files) {
+ rec->switch_output.filenames = calloc(sizeof(char *),
+ rec->switch_output.num_files);
+ if (!rec->switch_output.filenames) {
+ err = -EINVAL;
+ goto out_opts;
+ }
}
/*
@@ -1758,6 +2700,17 @@
symbol_conf.allow_aliases = true;
symbol__init(NULL);
+
+ if (rec->opts.affinity != PERF_AFFINITY_SYS) {
+ rec->affinity_mask.nbits = cpu__max_cpu();
+ rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
+ if (!rec->affinity_mask.bits) {
+ pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
+ err = -ENOMEM;
+ goto out_opts;
+ }
+ pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
+ }
err = record__auxtrace_init(rec);
if (err)
@@ -1775,16 +2728,6 @@
}
err = -ENOMEM;
-
- if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
- pr_warning(
-"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
-"check /proc/sys/kernel/kptr_restrict.\n\n"
-"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
-"file is not found in the buildid cache or in the vmlinux path.\n\n"
-"Samples in kernel modules won't be resolved at all.\n\n"
-"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
-"even with a suitable vmlinux or kallsyms file.\n\n");
if (rec->no_buildid_cache || rec->no_buildid) {
disable_buildid_cache();
@@ -1820,8 +2763,8 @@
if (record.opts.overwrite)
record.opts.tail_synthesize = true;
- if (rec->evlist->nr_entries == 0 &&
- __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
+ if (rec->evlist->core.nr_entries == 0 &&
+ __evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
pr_err("Not enough memory for event selector list\n");
goto out;
}
@@ -1865,16 +2808,38 @@
if (rec->opts.full_auxtrace)
rec->buildid_all = true;
+ if (rec->opts.text_poke) {
+ err = record__config_text_poke(rec->evlist);
+ if (err) {
+ pr_err("record__config_text_poke failed, error %d\n", err);
+ goto out;
+ }
+ }
+
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out;
}
+ if (rec->opts.nr_cblocks > nr_cblocks_max)
+ rec->opts.nr_cblocks = nr_cblocks_max;
+ pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+ pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
+
+ if (rec->opts.comp_level > comp_level_max)
+ rec->opts.comp_level = comp_level_max;
+ pr_debug("comp level: %d\n", rec->opts.comp_level);
+
err = __cmd_record(&record, argc, argv);
out:
- perf_evlist__delete(rec->evlist);
+ bitmap_free(rec->affinity_mask.bits);
+ evlist__delete(rec->evlist);
symbol__exit();
auxtrace_record__free(rec->itr);
+out_opts:
+ evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
return err;
}
@@ -1882,12 +2847,7 @@
{
struct record *rec = &record;
- if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
- trigger_hit(&auxtrace_snapshot_trigger);
- auxtrace_record__snapshot_started = 1;
- if (auxtrace_record__snapshot_start(record.itr))
- trigger_error(&auxtrace_snapshot_trigger);
- }
+ hit_auxtrace_snapshot_trigger(rec);
if (switch_output_signal(rec))
trigger_hit(&switch_output_trigger);
--
Gitblit v1.6.2