| .. | .. |
|---|
| 1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
|---|
| 2 | +#include "debug.h" |
|---|
| 2 | 3 | #include "evlist.h" |
|---|
| 3 | 4 | #include "evsel.h" |
|---|
| 4 | | -#include "cpumap.h" |
|---|
| 5 | +#include "evsel_config.h" |
|---|
| 5 | 6 | #include "parse-events.h" |
|---|
| 6 | 7 | #include <errno.h> |
|---|
| 8 | +#include <limits.h> |
|---|
| 9 | +#include <stdlib.h> |
|---|
| 7 | 10 | #include <api/fs/fs.h> |
|---|
| 8 | 11 | #include <subcmd/parse-options.h> |
|---|
| 9 | | -#include "util.h" |
|---|
| 12 | +#include <perf/cpumap.h> |
|---|
| 10 | 13 | #include "cloexec.h" |
|---|
| 14 | +#include "util/perf_api_probe.h" |
|---|
| 15 | +#include "record.h" |
|---|
| 16 | +#include "../perf-sys.h" |
|---|
| 17 | +#include "topdown.h" |
|---|
| 11 | 18 | |
|---|
| 12 | | -typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel); |
|---|
| 13 | | - |
|---|
| 14 | | -static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) |
|---|
| 19 | +/* |
|---|
| 20 | + * evsel__config_leader_sampling() uses special rules for leader sampling. |
|---|
| 21 | + * However, if the leader is an AUX area event, then assume the event to sample |
|---|
| 22 | + * is the next event. |
|---|
| 23 | + */ |
|---|
| 24 | +static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist) |
|---|
| 15 | 25 | { |
|---|
| 16 | | - struct perf_evlist *evlist; |
|---|
| 17 | | - struct perf_evsel *evsel; |
|---|
| 18 | | - unsigned long flags = perf_event_open_cloexec_flag(); |
|---|
| 19 | | - int err = -EAGAIN, fd; |
|---|
| 20 | | - static pid_t pid = -1; |
|---|
| 26 | + struct evsel *leader = evsel->leader; |
|---|
| 21 | 27 | |
|---|
| 22 | | - evlist = perf_evlist__new(); |
|---|
| 23 | | - if (!evlist) |
|---|
| 24 | | - return -ENOMEM; |
|---|
| 25 | | - |
|---|
| 26 | | - if (parse_events(evlist, str, NULL)) |
|---|
| 27 | | - goto out_delete; |
|---|
| 28 | | - |
|---|
| 29 | | - evsel = perf_evlist__first(evlist); |
|---|
| 30 | | - |
|---|
| 31 | | - while (1) { |
|---|
| 32 | | - fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); |
|---|
| 33 | | - if (fd < 0) { |
|---|
| 34 | | - if (pid == -1 && errno == EACCES) { |
|---|
| 35 | | - pid = 0; |
|---|
| 36 | | - continue; |
|---|
| 37 | | - } |
|---|
| 38 | | - goto out_delete; |
|---|
| 28 | + if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader)) { |
|---|
| 29 | + evlist__for_each_entry(evlist, evsel) { |
|---|
| 30 | + if (evsel->leader == leader && evsel != evsel->leader) |
|---|
| 31 | + return evsel; |
|---|
| 39 | 32 | } |
|---|
| 40 | | - break; |
|---|
| 41 | 33 | } |
|---|
| 42 | | - close(fd); |
|---|
| 43 | 34 | |
|---|
| 44 | | - fn(evsel); |
|---|
| 35 | + return leader; |
|---|
| 36 | +} |
|---|
| 45 | 37 | |
|---|
| 46 | | - fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); |
|---|
| 47 | | - if (fd < 0) { |
|---|
| 48 | | - if (errno == EINVAL) |
|---|
| 49 | | - err = -EINVAL; |
|---|
| 50 | | - goto out_delete; |
|---|
| 38 | +static u64 evsel__config_term_mask(struct evsel *evsel) |
|---|
| 39 | +{ |
|---|
| 40 | + struct evsel_config_term *term; |
|---|
| 41 | + struct list_head *config_terms = &evsel->config_terms; |
|---|
| 42 | + u64 term_types = 0; |
|---|
| 43 | + |
|---|
| 44 | + list_for_each_entry(term, config_terms, list) { |
|---|
| 45 | + term_types |= 1 << term->type; |
|---|
| 51 | 46 | } |
|---|
| 52 | | - close(fd); |
|---|
| 53 | | - err = 0; |
|---|
| 54 | | - |
|---|
| 55 | | -out_delete: |
|---|
| 56 | | - perf_evlist__delete(evlist); |
|---|
| 57 | | - return err; |
|---|
| 47 | + return term_types; |
|---|
| 58 | 48 | } |
|---|
| 59 | 49 | |
|---|
| 60 | | -static bool perf_probe_api(setup_probe_fn_t fn) |
|---|
| 50 | +static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist) |
|---|
| 61 | 51 | { |
|---|
| 62 | | - const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL}; |
|---|
| 63 | | - struct cpu_map *cpus; |
|---|
| 64 | | - int cpu, ret, i = 0; |
|---|
| 52 | + struct perf_event_attr *attr = &evsel->core.attr; |
|---|
| 53 | + struct evsel *leader = evsel->leader; |
|---|
| 54 | + struct evsel *read_sampler; |
|---|
| 55 | + u64 term_types, freq_mask; |
|---|
| 65 | 56 | |
|---|
| 66 | | - cpus = cpu_map__new(NULL); |
|---|
| 67 | | - if (!cpus) |
|---|
| 68 | | - return false; |
|---|
| 69 | | - cpu = cpus->map[0]; |
|---|
| 70 | | - cpu_map__put(cpus); |
|---|
| 57 | + if (!leader->sample_read) |
|---|
| 58 | + return; |
|---|
| 71 | 59 | |
|---|
| 72 | | - do { |
|---|
| 73 | | - ret = perf_do_probe_api(fn, cpu, try[i++]); |
|---|
| 74 | | - if (!ret) |
|---|
| 75 | | - return true; |
|---|
| 76 | | - } while (ret == -EAGAIN && try[i]); |
|---|
| 60 | + read_sampler = evsel__read_sampler(evsel, evlist); |
|---|
| 77 | 61 | |
|---|
| 78 | | - return false; |
|---|
| 62 | + if (evsel == read_sampler) |
|---|
| 63 | + return; |
|---|
| 64 | + |
|---|
| 65 | + term_types = evsel__config_term_mask(evsel); |
|---|
| 66 | + /* |
|---|
| 67 | + * Disable sampling for all group members except those with explicit |
|---|
| 68 | + * config terms or the leader. In the case of an AUX area event, the 2nd |
|---|
| 69 | + * event in the group is the one that 'leads' the sampling. |
|---|
| 70 | + */ |
|---|
| 71 | + freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD); |
|---|
| 72 | + if ((term_types & freq_mask) == 0) { |
|---|
| 73 | + attr->freq = 0; |
|---|
| 74 | + attr->sample_freq = 0; |
|---|
| 75 | + attr->sample_period = 0; |
|---|
| 76 | + } |
|---|
| 77 | + if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0) |
|---|
| 78 | + attr->write_backward = 0; |
|---|
| 79 | + |
|---|
| 80 | + /* |
|---|
| 81 | + * We don't get a sample for slave events, we make them when delivering |
|---|
| 82 | + * the group leader sample. Set the slave event to follow the master |
|---|
| 83 | + * sample_type to ease up reporting. |
|---|
| 84 | + * An AUX area event also has sample_type requirements, so also include |
|---|
| 85 | + * the sample type bits from the leader's sample_type to cover that |
|---|
| 86 | + * case. |
|---|
| 87 | + */ |
|---|
| 88 | + attr->sample_type = read_sampler->core.attr.sample_type | |
|---|
| 89 | + leader->core.attr.sample_type; |
|---|
| 79 | 90 | } |
|---|
| 80 | 91 | |
|---|
| 81 | | -static void perf_probe_sample_identifier(struct perf_evsel *evsel) |
|---|
| 82 | | -{ |
|---|
| 83 | | - evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER; |
|---|
| 84 | | -} |
|---|
| 85 | | - |
|---|
| 86 | | -static void perf_probe_comm_exec(struct perf_evsel *evsel) |
|---|
| 87 | | -{ |
|---|
| 88 | | - evsel->attr.comm_exec = 1; |
|---|
| 89 | | -} |
|---|
| 90 | | - |
|---|
| 91 | | -static void perf_probe_context_switch(struct perf_evsel *evsel) |
|---|
| 92 | | -{ |
|---|
| 93 | | - evsel->attr.context_switch = 1; |
|---|
| 94 | | -} |
|---|
| 95 | | - |
|---|
| 96 | | -bool perf_can_sample_identifier(void) |
|---|
| 97 | | -{ |
|---|
| 98 | | - return perf_probe_api(perf_probe_sample_identifier); |
|---|
| 99 | | -} |
|---|
| 100 | | - |
|---|
| 101 | | -static bool perf_can_comm_exec(void) |
|---|
| 102 | | -{ |
|---|
| 103 | | - return perf_probe_api(perf_probe_comm_exec); |
|---|
| 104 | | -} |
|---|
| 105 | | - |
|---|
| 106 | | -bool perf_can_record_switch_events(void) |
|---|
| 107 | | -{ |
|---|
| 108 | | - return perf_probe_api(perf_probe_context_switch); |
|---|
| 109 | | -} |
|---|
| 110 | | - |
|---|
| 111 | | -bool perf_can_record_cpu_wide(void) |
|---|
| 112 | | -{ |
|---|
| 113 | | - struct perf_event_attr attr = { |
|---|
| 114 | | - .type = PERF_TYPE_SOFTWARE, |
|---|
| 115 | | - .config = PERF_COUNT_SW_CPU_CLOCK, |
|---|
| 116 | | - .exclude_kernel = 1, |
|---|
| 117 | | - }; |
|---|
| 118 | | - struct cpu_map *cpus; |
|---|
| 119 | | - int cpu, fd; |
|---|
| 120 | | - |
|---|
| 121 | | - cpus = cpu_map__new(NULL); |
|---|
| 122 | | - if (!cpus) |
|---|
| 123 | | - return false; |
|---|
| 124 | | - cpu = cpus->map[0]; |
|---|
| 125 | | - cpu_map__put(cpus); |
|---|
| 126 | | - |
|---|
| 127 | | - fd = sys_perf_event_open(&attr, -1, cpu, -1, 0); |
|---|
| 128 | | - if (fd < 0) |
|---|
| 129 | | - return false; |
|---|
| 130 | | - close(fd); |
|---|
| 131 | | - |
|---|
| 132 | | - return true; |
|---|
| 133 | | -} |
|---|
| 134 | | - |
|---|
| 135 | | -void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts, |
|---|
| 92 | +void perf_evlist__config(struct evlist *evlist, struct record_opts *opts, |
|---|
| 136 | 93 | struct callchain_param *callchain) |
|---|
| 137 | 94 | { |
|---|
| 138 | | - struct perf_evsel *evsel; |
|---|
| 95 | + struct evsel *evsel; |
|---|
| 139 | 96 | bool use_sample_identifier = false; |
|---|
| 140 | 97 | bool use_comm_exec; |
|---|
| 141 | 98 | bool sample_id = opts->sample_id; |
|---|
| .. | .. |
|---|
| 147 | 104 | if (opts->group) |
|---|
| 148 | 105 | perf_evlist__set_leader(evlist); |
|---|
| 149 | 106 | |
|---|
| 150 | | - if (evlist->cpus->map[0] < 0) |
|---|
| 107 | + if (evlist->core.cpus->map[0] < 0) |
|---|
| 151 | 108 | opts->no_inherit = true; |
|---|
| 152 | 109 | |
|---|
| 153 | 110 | use_comm_exec = perf_can_comm_exec(); |
|---|
| 154 | 111 | |
|---|
| 155 | 112 | evlist__for_each_entry(evlist, evsel) { |
|---|
| 156 | | - perf_evsel__config(evsel, opts, callchain); |
|---|
| 113 | + evsel__config(evsel, opts, callchain); |
|---|
| 157 | 114 | if (evsel->tracking && use_comm_exec) |
|---|
| 158 | | - evsel->attr.comm_exec = 1; |
|---|
| 115 | + evsel->core.attr.comm_exec = 1; |
|---|
| 159 | 116 | } |
|---|
| 117 | + |
|---|
| 118 | + /* Configure leader sampling here now that the sample type is known */ |
|---|
| 119 | + evlist__for_each_entry(evlist, evsel) |
|---|
| 120 | + evsel__config_leader_sampling(evsel, evlist); |
|---|
| 160 | 121 | |
|---|
| 161 | 122 | if (opts->full_auxtrace) { |
|---|
| 162 | 123 | /* |
|---|
| .. | .. |
|---|
| 166 | 127 | */ |
|---|
| 167 | 128 | use_sample_identifier = perf_can_sample_identifier(); |
|---|
| 168 | 129 | sample_id = true; |
|---|
| 169 | | - } else if (evlist->nr_entries > 1) { |
|---|
| 170 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
|---|
| 130 | + } else if (evlist->core.nr_entries > 1) { |
|---|
| 131 | + struct evsel *first = evlist__first(evlist); |
|---|
| 171 | 132 | |
|---|
| 172 | 133 | evlist__for_each_entry(evlist, evsel) { |
|---|
| 173 | | - if (evsel->attr.sample_type == first->attr.sample_type) |
|---|
| 134 | + if (evsel->core.attr.sample_type == first->core.attr.sample_type) |
|---|
| 174 | 135 | continue; |
|---|
| 175 | 136 | use_sample_identifier = perf_can_sample_identifier(); |
|---|
| 176 | 137 | break; |
|---|
| .. | .. |
|---|
| 180 | 141 | |
|---|
| 181 | 142 | if (sample_id) { |
|---|
| 182 | 143 | evlist__for_each_entry(evlist, evsel) |
|---|
| 183 | | - perf_evsel__set_sample_id(evsel, use_sample_identifier); |
|---|
| 144 | + evsel__set_sample_id(evsel, use_sample_identifier); |
|---|
| 184 | 145 | } |
|---|
| 185 | 146 | |
|---|
| 186 | 147 | perf_evlist__set_id_pos(evlist); |
|---|
| .. | .. |
|---|
| 256 | 217 | return record_opts__config_freq(opts); |
|---|
| 257 | 218 | } |
|---|
| 258 | 219 | |
|---|
| 259 | | -bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) |
|---|
| 220 | +bool perf_evlist__can_select_event(struct evlist *evlist, const char *str) |
|---|
| 260 | 221 | { |
|---|
| 261 | | - struct perf_evlist *temp_evlist; |
|---|
| 262 | | - struct perf_evsel *evsel; |
|---|
| 222 | + struct evlist *temp_evlist; |
|---|
| 223 | + struct evsel *evsel; |
|---|
| 263 | 224 | int err, fd, cpu; |
|---|
| 264 | 225 | bool ret = false; |
|---|
| 265 | 226 | pid_t pid = -1; |
|---|
| 266 | 227 | |
|---|
| 267 | | - temp_evlist = perf_evlist__new(); |
|---|
| 228 | + temp_evlist = evlist__new(); |
|---|
| 268 | 229 | if (!temp_evlist) |
|---|
| 269 | 230 | return false; |
|---|
| 270 | 231 | |
|---|
| .. | .. |
|---|
| 272 | 233 | if (err) |
|---|
| 273 | 234 | goto out_delete; |
|---|
| 274 | 235 | |
|---|
| 275 | | - evsel = perf_evlist__last(temp_evlist); |
|---|
| 236 | + evsel = evlist__last(temp_evlist); |
|---|
| 276 | 237 | |
|---|
| 277 | | - if (!evlist || cpu_map__empty(evlist->cpus)) { |
|---|
| 278 | | - struct cpu_map *cpus = cpu_map__new(NULL); |
|---|
| 238 | + if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) { |
|---|
| 239 | + struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); |
|---|
| 279 | 240 | |
|---|
| 280 | 241 | cpu = cpus ? cpus->map[0] : 0; |
|---|
| 281 | | - cpu_map__put(cpus); |
|---|
| 242 | + perf_cpu_map__put(cpus); |
|---|
| 282 | 243 | } else { |
|---|
| 283 | | - cpu = evlist->cpus->map[0]; |
|---|
| 244 | + cpu = evlist->core.cpus->map[0]; |
|---|
| 284 | 245 | } |
|---|
| 285 | 246 | |
|---|
| 286 | 247 | while (1) { |
|---|
| 287 | | - fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, |
|---|
| 248 | + fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, |
|---|
| 288 | 249 | perf_event_open_cloexec_flag()); |
|---|
| 289 | 250 | if (fd < 0) { |
|---|
| 290 | 251 | if (pid == -1 && errno == EACCES) { |
|---|
| .. | .. |
|---|
| 299 | 260 | ret = true; |
|---|
| 300 | 261 | |
|---|
| 301 | 262 | out_delete: |
|---|
| 302 | | - perf_evlist__delete(temp_evlist); |
|---|
| 263 | + evlist__delete(temp_evlist); |
|---|
| 303 | 264 | return ret; |
|---|
| 304 | 265 | } |
|---|
| 305 | 266 | |
|---|