hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/tools/perf/util/record.c
....@@ -1,141 +1,98 @@
11 // SPDX-License-Identifier: GPL-2.0
2
+#include "debug.h"
23 #include "evlist.h"
34 #include "evsel.h"
4
-#include "cpumap.h"
5
+#include "evsel_config.h"
56 #include "parse-events.h"
67 #include <errno.h>
8
+#include <limits.h>
9
+#include <stdlib.h>
710 #include <api/fs/fs.h>
811 #include <subcmd/parse-options.h>
9
-#include "util.h"
12
+#include <perf/cpumap.h>
1013 #include "cloexec.h"
14
+#include "util/perf_api_probe.h"
15
+#include "record.h"
16
+#include "../perf-sys.h"
17
+#include "topdown.h"
1118
12
-typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
13
-
14
-static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
19
+/*
20
+ * evsel__config_leader_sampling() uses special rules for leader sampling.
21
+ * However, if the leader is an AUX area event, then assume the event to sample
22
+ * is the next event.
23
+ */
24
+static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
1525 {
16
- struct perf_evlist *evlist;
17
- struct perf_evsel *evsel;
18
- unsigned long flags = perf_event_open_cloexec_flag();
19
- int err = -EAGAIN, fd;
20
- static pid_t pid = -1;
26
+ struct evsel *leader = evsel->leader;
2127
22
- evlist = perf_evlist__new();
23
- if (!evlist)
24
- return -ENOMEM;
25
-
26
- if (parse_events(evlist, str, NULL))
27
- goto out_delete;
28
-
29
- evsel = perf_evlist__first(evlist);
30
-
31
- while (1) {
32
- fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
33
- if (fd < 0) {
34
- if (pid == -1 && errno == EACCES) {
35
- pid = 0;
36
- continue;
37
- }
38
- goto out_delete;
28
+ if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader)) {
29
+ evlist__for_each_entry(evlist, evsel) {
30
+ if (evsel->leader == leader && evsel != evsel->leader)
31
+ return evsel;
3932 }
40
- break;
4133 }
42
- close(fd);
4334
44
- fn(evsel);
35
+ return leader;
36
+}
4537
46
- fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
47
- if (fd < 0) {
48
- if (errno == EINVAL)
49
- err = -EINVAL;
50
- goto out_delete;
38
+static u64 evsel__config_term_mask(struct evsel *evsel)
39
+{
40
+ struct evsel_config_term *term;
41
+ struct list_head *config_terms = &evsel->config_terms;
42
+ u64 term_types = 0;
43
+
44
+ list_for_each_entry(term, config_terms, list) {
45
+ term_types |= 1 << term->type;
5146 }
52
- close(fd);
53
- err = 0;
54
-
55
-out_delete:
56
- perf_evlist__delete(evlist);
57
- return err;
47
+ return term_types;
5848 }
5949
60
-static bool perf_probe_api(setup_probe_fn_t fn)
50
+static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
6151 {
62
- const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
63
- struct cpu_map *cpus;
64
- int cpu, ret, i = 0;
52
+ struct perf_event_attr *attr = &evsel->core.attr;
53
+ struct evsel *leader = evsel->leader;
54
+ struct evsel *read_sampler;
55
+ u64 term_types, freq_mask;
6556
66
- cpus = cpu_map__new(NULL);
67
- if (!cpus)
68
- return false;
69
- cpu = cpus->map[0];
70
- cpu_map__put(cpus);
57
+ if (!leader->sample_read)
58
+ return;
7159
72
- do {
73
- ret = perf_do_probe_api(fn, cpu, try[i++]);
74
- if (!ret)
75
- return true;
76
- } while (ret == -EAGAIN && try[i]);
60
+ read_sampler = evsel__read_sampler(evsel, evlist);
7761
78
- return false;
62
+ if (evsel == read_sampler)
63
+ return;
64
+
65
+ term_types = evsel__config_term_mask(evsel);
66
+ /*
67
+ * Disable sampling for all group members except those with explicit
68
+ * config terms or the leader. In the case of an AUX area event, the 2nd
69
+ * event in the group is the one that 'leads' the sampling.
70
+ */
71
+ freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
72
+ if ((term_types & freq_mask) == 0) {
73
+ attr->freq = 0;
74
+ attr->sample_freq = 0;
75
+ attr->sample_period = 0;
76
+ }
77
+ if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
78
+ attr->write_backward = 0;
79
+
80
+ /*
81
+ * We don't get a sample for slave events, we make them when delivering
82
+ * the group leader sample. Set the slave event to follow the master
83
+ * sample_type to ease up reporting.
84
+ * An AUX area event also has sample_type requirements, so also include
85
+ * the sample type bits from the leader's sample_type to cover that
86
+ * case.
87
+ */
88
+ attr->sample_type = read_sampler->core.attr.sample_type |
89
+ leader->core.attr.sample_type;
7990 }
8091
81
-static void perf_probe_sample_identifier(struct perf_evsel *evsel)
82
-{
83
- evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
84
-}
85
-
86
-static void perf_probe_comm_exec(struct perf_evsel *evsel)
87
-{
88
- evsel->attr.comm_exec = 1;
89
-}
90
-
91
-static void perf_probe_context_switch(struct perf_evsel *evsel)
92
-{
93
- evsel->attr.context_switch = 1;
94
-}
95
-
96
-bool perf_can_sample_identifier(void)
97
-{
98
- return perf_probe_api(perf_probe_sample_identifier);
99
-}
100
-
101
-static bool perf_can_comm_exec(void)
102
-{
103
- return perf_probe_api(perf_probe_comm_exec);
104
-}
105
-
106
-bool perf_can_record_switch_events(void)
107
-{
108
- return perf_probe_api(perf_probe_context_switch);
109
-}
110
-
111
-bool perf_can_record_cpu_wide(void)
112
-{
113
- struct perf_event_attr attr = {
114
- .type = PERF_TYPE_SOFTWARE,
115
- .config = PERF_COUNT_SW_CPU_CLOCK,
116
- .exclude_kernel = 1,
117
- };
118
- struct cpu_map *cpus;
119
- int cpu, fd;
120
-
121
- cpus = cpu_map__new(NULL);
122
- if (!cpus)
123
- return false;
124
- cpu = cpus->map[0];
125
- cpu_map__put(cpus);
126
-
127
- fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
128
- if (fd < 0)
129
- return false;
130
- close(fd);
131
-
132
- return true;
133
-}
134
-
135
-void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
92
+void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
13693 struct callchain_param *callchain)
13794 {
138
- struct perf_evsel *evsel;
95
+ struct evsel *evsel;
13996 bool use_sample_identifier = false;
14097 bool use_comm_exec;
14198 bool sample_id = opts->sample_id;
....@@ -147,16 +104,20 @@
147104 if (opts->group)
148105 perf_evlist__set_leader(evlist);
149106
150
- if (evlist->cpus->map[0] < 0)
107
+ if (evlist->core.cpus->map[0] < 0)
151108 opts->no_inherit = true;
152109
153110 use_comm_exec = perf_can_comm_exec();
154111
155112 evlist__for_each_entry(evlist, evsel) {
156
- perf_evsel__config(evsel, opts, callchain);
113
+ evsel__config(evsel, opts, callchain);
157114 if (evsel->tracking && use_comm_exec)
158
- evsel->attr.comm_exec = 1;
115
+ evsel->core.attr.comm_exec = 1;
159116 }
117
+
118
+ /* Configure leader sampling here now that the sample type is known */
119
+ evlist__for_each_entry(evlist, evsel)
120
+ evsel__config_leader_sampling(evsel, evlist);
160121
161122 if (opts->full_auxtrace) {
162123 /*
....@@ -166,11 +127,11 @@
166127 */
167128 use_sample_identifier = perf_can_sample_identifier();
168129 sample_id = true;
169
- } else if (evlist->nr_entries > 1) {
170
- struct perf_evsel *first = perf_evlist__first(evlist);
130
+ } else if (evlist->core.nr_entries > 1) {
131
+ struct evsel *first = evlist__first(evlist);
171132
172133 evlist__for_each_entry(evlist, evsel) {
173
- if (evsel->attr.sample_type == first->attr.sample_type)
134
+ if (evsel->core.attr.sample_type == first->core.attr.sample_type)
174135 continue;
175136 use_sample_identifier = perf_can_sample_identifier();
176137 break;
....@@ -180,7 +141,7 @@
180141
181142 if (sample_id) {
182143 evlist__for_each_entry(evlist, evsel)
183
- perf_evsel__set_sample_id(evsel, use_sample_identifier);
144
+ evsel__set_sample_id(evsel, use_sample_identifier);
184145 }
185146
186147 perf_evlist__set_id_pos(evlist);
....@@ -256,15 +217,15 @@
256217 return record_opts__config_freq(opts);
257218 }
258219
259
-bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
220
+bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
260221 {
261
- struct perf_evlist *temp_evlist;
262
- struct perf_evsel *evsel;
222
+ struct evlist *temp_evlist;
223
+ struct evsel *evsel;
263224 int err, fd, cpu;
264225 bool ret = false;
265226 pid_t pid = -1;
266227
267
- temp_evlist = perf_evlist__new();
228
+ temp_evlist = evlist__new();
268229 if (!temp_evlist)
269230 return false;
270231
....@@ -272,19 +233,19 @@
272233 if (err)
273234 goto out_delete;
274235
275
- evsel = perf_evlist__last(temp_evlist);
236
+ evsel = evlist__last(temp_evlist);
276237
277
- if (!evlist || cpu_map__empty(evlist->cpus)) {
278
- struct cpu_map *cpus = cpu_map__new(NULL);
238
+ if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
239
+ struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
279240
280241 cpu = cpus ? cpus->map[0] : 0;
281
- cpu_map__put(cpus);
242
+ perf_cpu_map__put(cpus);
282243 } else {
283
- cpu = evlist->cpus->map[0];
244
+ cpu = evlist->core.cpus->map[0];
284245 }
285246
286247 while (1) {
287
- fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
248
+ fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
288249 perf_event_open_cloexec_flag());
289250 if (fd < 0) {
290251 if (pid == -1 && errno == EACCES) {
....@@ -299,7 +260,7 @@
299260 ret = true;
300261
301262 out_delete:
302
- perf_evlist__delete(temp_evlist);
263
+ evlist__delete(temp_evlist);
303264 return ret;
304265 }
305266