hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/tools/perf/util/evlist.c
....@@ -1,26 +1,33 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
34 *
45 * Parts came from builtin-{top,stat,record}.c, see those files for further
56 * copyright notes.
6
- *
7
- * Released under the GPL v2. (and only v2, not any later version)
87 */
9
-#include "util.h"
108 #include <api/fs/fs.h>
119 #include <errno.h>
1210 #include <inttypes.h>
1311 #include <poll.h>
1412 #include "cpumap.h"
13
+#include "util/mmap.h"
1514 #include "thread_map.h"
1615 #include "target.h"
1716 #include "evlist.h"
1817 #include "evsel.h"
1918 #include "debug.h"
2019 #include "units.h"
20
+#include <internal/lib.h> // page_size
21
+#include "affinity.h"
22
+#include "../perf.h"
2123 #include "asm/bug.h"
24
+#include "bpf-event.h"
25
+#include "util/string2.h"
26
+#include "util/perf_api_probe.h"
2227 #include <signal.h>
2328 #include <unistd.h>
29
+#include <sched.h>
30
+#include <stdlib.h>
2431
2532 #include "parse-events.h"
2633 #include <subcmd/parse-options.h>
....@@ -33,56 +40,62 @@
3340 #include <linux/hash.h>
3441 #include <linux/log2.h>
3542 #include <linux/err.h>
43
+#include <linux/string.h>
44
+#include <linux/zalloc.h>
45
+#include <perf/evlist.h>
46
+#include <perf/evsel.h>
47
+#include <perf/cpumap.h>
48
+#include <perf/mmap.h>
49
+
50
+#include <internal/xyarray.h>
3651
3752 #ifdef LACKS_SIGQUEUE_PROTOTYPE
3853 int sigqueue(pid_t pid, int sig, const union sigval value);
3954 #endif
4055
41
-#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
42
-#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
56
+#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
57
+#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
4358
44
-void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
45
- struct thread_map *threads)
59
+void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
60
+ struct perf_thread_map *threads)
4661 {
47
- int i;
48
-
49
- for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
50
- INIT_HLIST_HEAD(&evlist->heads[i]);
51
- INIT_LIST_HEAD(&evlist->entries);
52
- perf_evlist__set_maps(evlist, cpus, threads);
53
- fdarray__init(&evlist->pollfd, 64);
62
+ perf_evlist__init(&evlist->core);
63
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
5464 evlist->workload.pid = -1;
5565 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
66
+ evlist->ctl_fd.fd = -1;
67
+ evlist->ctl_fd.ack = -1;
68
+ evlist->ctl_fd.pos = -1;
5669 }
5770
58
-struct perf_evlist *perf_evlist__new(void)
71
+struct evlist *evlist__new(void)
5972 {
60
- struct perf_evlist *evlist = zalloc(sizeof(*evlist));
73
+ struct evlist *evlist = zalloc(sizeof(*evlist));
6174
6275 if (evlist != NULL)
63
- perf_evlist__init(evlist, NULL, NULL);
76
+ evlist__init(evlist, NULL, NULL);
6477
6578 return evlist;
6679 }
6780
68
-struct perf_evlist *perf_evlist__new_default(void)
81
+struct evlist *perf_evlist__new_default(void)
6982 {
70
- struct perf_evlist *evlist = perf_evlist__new();
83
+ struct evlist *evlist = evlist__new();
7184
72
- if (evlist && perf_evlist__add_default(evlist)) {
73
- perf_evlist__delete(evlist);
85
+ if (evlist && evlist__add_default(evlist)) {
86
+ evlist__delete(evlist);
7487 evlist = NULL;
7588 }
7689
7790 return evlist;
7891 }
7992
80
-struct perf_evlist *perf_evlist__new_dummy(void)
93
+struct evlist *perf_evlist__new_dummy(void)
8194 {
82
- struct perf_evlist *evlist = perf_evlist__new();
95
+ struct evlist *evlist = evlist__new();
8396
84
- if (evlist && perf_evlist__add_dummy(evlist)) {
85
- perf_evlist__delete(evlist);
97
+ if (evlist && evlist__add_dummy(evlist)) {
98
+ evlist__delete(evlist);
8699 evlist = NULL;
87100 }
88101
....@@ -96,193 +109,169 @@
96109 * Events with compatible sample types all have the same id_pos
97110 * and is_pos. For convenience, put a copy on evlist.
98111 */
99
-void perf_evlist__set_id_pos(struct perf_evlist *evlist)
112
+void perf_evlist__set_id_pos(struct evlist *evlist)
100113 {
101
- struct perf_evsel *first = perf_evlist__first(evlist);
114
+ struct evsel *first = evlist__first(evlist);
102115
103116 evlist->id_pos = first->id_pos;
104117 evlist->is_pos = first->is_pos;
105118 }
106119
107
-static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
120
+static void perf_evlist__update_id_pos(struct evlist *evlist)
108121 {
109
- struct perf_evsel *evsel;
122
+ struct evsel *evsel;
110123
111124 evlist__for_each_entry(evlist, evsel)
112
- perf_evsel__calc_id_pos(evsel);
125
+ evsel__calc_id_pos(evsel);
113126
114127 perf_evlist__set_id_pos(evlist);
115128 }
116129
117
-static void perf_evlist__purge(struct perf_evlist *evlist)
130
+static void evlist__purge(struct evlist *evlist)
118131 {
119
- struct perf_evsel *pos, *n;
132
+ struct evsel *pos, *n;
120133
121134 evlist__for_each_entry_safe(evlist, n, pos) {
122
- list_del_init(&pos->node);
135
+ list_del_init(&pos->core.node);
123136 pos->evlist = NULL;
124
- perf_evsel__delete(pos);
137
+ evsel__delete(pos);
125138 }
126139
127
- evlist->nr_entries = 0;
140
+ evlist->core.nr_entries = 0;
128141 }
129142
130
-void perf_evlist__exit(struct perf_evlist *evlist)
143
+void evlist__exit(struct evlist *evlist)
131144 {
132145 zfree(&evlist->mmap);
133146 zfree(&evlist->overwrite_mmap);
134
- fdarray__exit(&evlist->pollfd);
147
+ perf_evlist__exit(&evlist->core);
135148 }
136149
137
-void perf_evlist__delete(struct perf_evlist *evlist)
150
+void evlist__delete(struct evlist *evlist)
138151 {
139152 if (evlist == NULL)
140153 return;
141154
142
- perf_evlist__munmap(evlist);
143
- perf_evlist__close(evlist);
144
- cpu_map__put(evlist->cpus);
145
- thread_map__put(evlist->threads);
146
- evlist->cpus = NULL;
147
- evlist->threads = NULL;
148
- perf_evlist__purge(evlist);
149
- perf_evlist__exit(evlist);
155
+ evlist__munmap(evlist);
156
+ evlist__close(evlist);
157
+ evlist__purge(evlist);
158
+ evlist__exit(evlist);
150159 free(evlist);
151160 }
152161
153
-static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
154
- struct perf_evsel *evsel)
155
-{
156
- /*
157
- * We already have cpus for evsel (via PMU sysfs) so
158
- * keep it, if there's no target cpu list defined.
159
- */
160
- if (!evsel->own_cpus || evlist->has_user_cpus) {
161
- cpu_map__put(evsel->cpus);
162
- evsel->cpus = cpu_map__get(evlist->cpus);
163
- } else if (evsel->cpus != evsel->own_cpus) {
164
- cpu_map__put(evsel->cpus);
165
- evsel->cpus = cpu_map__get(evsel->own_cpus);
166
- }
167
-
168
- thread_map__put(evsel->threads);
169
- evsel->threads = thread_map__get(evlist->threads);
170
-}
171
-
172
-static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
173
-{
174
- struct perf_evsel *evsel;
175
-
176
- evlist__for_each_entry(evlist, evsel)
177
- __perf_evlist__propagate_maps(evlist, evsel);
178
-}
179
-
180
-void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
162
+void evlist__add(struct evlist *evlist, struct evsel *entry)
181163 {
182164 entry->evlist = evlist;
183
- list_add_tail(&entry->node, &evlist->entries);
184
- entry->idx = evlist->nr_entries;
165
+ entry->idx = evlist->core.nr_entries;
185166 entry->tracking = !entry->idx;
186167
187
- if (!evlist->nr_entries++)
188
- perf_evlist__set_id_pos(evlist);
168
+ perf_evlist__add(&evlist->core, &entry->core);
189169
190
- __perf_evlist__propagate_maps(evlist, entry);
170
+ if (evlist->core.nr_entries == 1)
171
+ perf_evlist__set_id_pos(evlist);
191172 }
192173
193
-void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
174
+void evlist__remove(struct evlist *evlist, struct evsel *evsel)
194175 {
195176 evsel->evlist = NULL;
196
- list_del_init(&evsel->node);
197
- evlist->nr_entries -= 1;
177
+ perf_evlist__remove(&evlist->core, &evsel->core);
198178 }
199179
200
-void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
180
+void perf_evlist__splice_list_tail(struct evlist *evlist,
201181 struct list_head *list)
202182 {
203
- struct perf_evsel *evsel, *temp;
183
+ struct evsel *evsel, *temp;
204184
205185 __evlist__for_each_entry_safe(list, temp, evsel) {
206
- list_del_init(&evsel->node);
207
- perf_evlist__add(evlist, evsel);
186
+ list_del_init(&evsel->core.node);
187
+ evlist__add(evlist, evsel);
208188 }
189
+}
190
+
191
+int __evlist__set_tracepoints_handlers(struct evlist *evlist,
192
+ const struct evsel_str_handler *assocs, size_t nr_assocs)
193
+{
194
+ struct evsel *evsel;
195
+ size_t i;
196
+ int err;
197
+
198
+ for (i = 0; i < nr_assocs; i++) {
199
+ // Adding a handler for an event not in this evlist, just ignore it.
200
+ evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
201
+ if (evsel == NULL)
202
+ continue;
203
+
204
+ err = -EEXIST;
205
+ if (evsel->handler != NULL)
206
+ goto out;
207
+ evsel->handler = assocs[i].handler;
208
+ }
209
+
210
+ err = 0;
211
+out:
212
+ return err;
209213 }
210214
211215 void __perf_evlist__set_leader(struct list_head *list)
212216 {
213
- struct perf_evsel *evsel, *leader;
217
+ struct evsel *evsel, *leader;
214218
215
- leader = list_entry(list->next, struct perf_evsel, node);
216
- evsel = list_entry(list->prev, struct perf_evsel, node);
219
+ leader = list_entry(list->next, struct evsel, core.node);
220
+ evsel = list_entry(list->prev, struct evsel, core.node);
217221
218
- leader->nr_members = evsel->idx - leader->idx + 1;
222
+ leader->core.nr_members = evsel->idx - leader->idx + 1;
219223
220224 __evlist__for_each_entry(list, evsel) {
221225 evsel->leader = leader;
222226 }
223227 }
224228
225
-void perf_evlist__set_leader(struct perf_evlist *evlist)
229
+void perf_evlist__set_leader(struct evlist *evlist)
226230 {
227
- if (evlist->nr_entries) {
228
- evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
229
- __perf_evlist__set_leader(&evlist->entries);
231
+ if (evlist->core.nr_entries) {
232
+ evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
233
+ __perf_evlist__set_leader(&evlist->core.entries);
230234 }
231235 }
232236
233
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
237
+int __evlist__add_default(struct evlist *evlist, bool precise)
234238 {
235
- attr->precise_ip = 3;
236
-
237
- while (attr->precise_ip != 0) {
238
- int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
239
- if (fd != -1) {
240
- close(fd);
241
- break;
242
- }
243
- --attr->precise_ip;
244
- }
245
-}
246
-
247
-int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
248
-{
249
- struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
239
+ struct evsel *evsel = evsel__new_cycles(precise);
250240
251241 if (evsel == NULL)
252242 return -ENOMEM;
253243
254
- perf_evlist__add(evlist, evsel);
244
+ evlist__add(evlist, evsel);
255245 return 0;
256246 }
257247
258
-int perf_evlist__add_dummy(struct perf_evlist *evlist)
248
+int evlist__add_dummy(struct evlist *evlist)
259249 {
260250 struct perf_event_attr attr = {
261251 .type = PERF_TYPE_SOFTWARE,
262252 .config = PERF_COUNT_SW_DUMMY,
263253 .size = sizeof(attr), /* to capture ABI version */
264254 };
265
- struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
255
+ struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
266256
267257 if (evsel == NULL)
268258 return -ENOMEM;
269259
270
- perf_evlist__add(evlist, evsel);
260
+ evlist__add(evlist, evsel);
271261 return 0;
272262 }
273263
274
-static int perf_evlist__add_attrs(struct perf_evlist *evlist,
275
- struct perf_event_attr *attrs, size_t nr_attrs)
264
+static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
276265 {
277
- struct perf_evsel *evsel, *n;
266
+ struct evsel *evsel, *n;
278267 LIST_HEAD(head);
279268 size_t i;
280269
281270 for (i = 0; i < nr_attrs; i++) {
282
- evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
271
+ evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
283272 if (evsel == NULL)
284273 goto out_delete_partial_list;
285
- list_add_tail(&evsel->node, &head);
274
+ list_add_tail(&evsel->core.node, &head);
286275 }
287276
288277 perf_evlist__splice_list_tail(evlist, &head);
....@@ -291,43 +280,42 @@
291280
292281 out_delete_partial_list:
293282 __evlist__for_each_entry_safe(&head, n, evsel)
294
- perf_evsel__delete(evsel);
283
+ evsel__delete(evsel);
295284 return -1;
296285 }
297286
298
-int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
299
- struct perf_event_attr *attrs, size_t nr_attrs)
287
+int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
300288 {
301289 size_t i;
302290
303291 for (i = 0; i < nr_attrs; i++)
304292 event_attr_init(attrs + i);
305293
306
- return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
294
+ return evlist__add_attrs(evlist, attrs, nr_attrs);
307295 }
308296
309
-struct perf_evsel *
310
-perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
297
+struct evsel *
298
+perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
311299 {
312
- struct perf_evsel *evsel;
300
+ struct evsel *evsel;
313301
314302 evlist__for_each_entry(evlist, evsel) {
315
- if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
316
- (int)evsel->attr.config == id)
303
+ if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
304
+ (int)evsel->core.attr.config == id)
317305 return evsel;
318306 }
319307
320308 return NULL;
321309 }
322310
323
-struct perf_evsel *
324
-perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
311
+struct evsel *
312
+perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
325313 const char *name)
326314 {
327
- struct perf_evsel *evsel;
315
+ struct evsel *evsel;
328316
329317 evlist__for_each_entry(evlist, evsel) {
330
- if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
318
+ if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
331319 (strcmp(evsel->name, name) == 0))
332320 return evsel;
333321 }
....@@ -335,66 +323,142 @@
335323 return NULL;
336324 }
337325
338
-int perf_evlist__add_newtp(struct perf_evlist *evlist,
339
- const char *sys, const char *name, void *handler)
326
+int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
340327 {
341
- struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
328
+ struct evsel *evsel = evsel__newtp(sys, name);
342329
343330 if (IS_ERR(evsel))
344331 return -1;
345332
346333 evsel->handler = handler;
347
- perf_evlist__add(evlist, evsel);
334
+ evlist__add(evlist, evsel);
348335 return 0;
349336 }
350337
351
-static int perf_evlist__nr_threads(struct perf_evlist *evlist,
352
- struct perf_evsel *evsel)
338
+static int perf_evlist__nr_threads(struct evlist *evlist,
339
+ struct evsel *evsel)
353340 {
354
- if (evsel->system_wide)
341
+ if (evsel->core.system_wide)
355342 return 1;
356343 else
357
- return thread_map__nr(evlist->threads);
344
+ return perf_thread_map__nr(evlist->core.threads);
358345 }
359346
360
-void perf_evlist__disable(struct perf_evlist *evlist)
347
+void evlist__cpu_iter_start(struct evlist *evlist)
361348 {
362
- struct perf_evsel *pos;
349
+ struct evsel *pos;
363350
351
+ /*
352
+ * Reset the per evsel cpu_iter. This is needed because
353
+ * each evsel's cpumap may have a different index space,
354
+ * and some operations need the index to modify
355
+ * the FD xyarray (e.g. open, close)
356
+ */
357
+ evlist__for_each_entry(evlist, pos)
358
+ pos->cpu_iter = 0;
359
+}
360
+
361
+bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
362
+{
363
+ if (ev->cpu_iter >= ev->core.cpus->nr)
364
+ return true;
365
+ if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
366
+ return true;
367
+ return false;
368
+}
369
+
370
+bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
371
+{
372
+ if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
373
+ ev->cpu_iter++;
374
+ return false;
375
+ }
376
+ return true;
377
+}
378
+
379
+void evlist__disable(struct evlist *evlist)
380
+{
381
+ struct evsel *pos;
382
+ struct affinity affinity;
383
+ int cpu, i, imm = 0;
384
+ bool has_imm = false;
385
+
386
+ if (affinity__setup(&affinity) < 0)
387
+ return;
388
+
389
+ /* Disable 'immediate' events last */
390
+ for (imm = 0; imm <= 1; imm++) {
391
+ evlist__for_each_cpu(evlist, i, cpu) {
392
+ affinity__set(&affinity, cpu);
393
+
394
+ evlist__for_each_entry(evlist, pos) {
395
+ if (evsel__cpu_iter_skip(pos, cpu))
396
+ continue;
397
+ if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
398
+ continue;
399
+ if (pos->immediate)
400
+ has_imm = true;
401
+ if (pos->immediate != imm)
402
+ continue;
403
+ evsel__disable_cpu(pos, pos->cpu_iter - 1);
404
+ }
405
+ }
406
+ if (!has_imm)
407
+ break;
408
+ }
409
+
410
+ affinity__cleanup(&affinity);
364411 evlist__for_each_entry(evlist, pos) {
365
- if (!perf_evsel__is_group_leader(pos) || !pos->fd)
412
+ if (!evsel__is_group_leader(pos) || !pos->core.fd)
366413 continue;
367
- perf_evsel__disable(pos);
414
+ pos->disabled = true;
368415 }
369416
370417 evlist->enabled = false;
371418 }
372419
373
-void perf_evlist__enable(struct perf_evlist *evlist)
420
+void evlist__enable(struct evlist *evlist)
374421 {
375
- struct perf_evsel *pos;
422
+ struct evsel *pos;
423
+ struct affinity affinity;
424
+ int cpu, i;
376425
426
+ if (affinity__setup(&affinity) < 0)
427
+ return;
428
+
429
+ evlist__for_each_cpu(evlist, i, cpu) {
430
+ affinity__set(&affinity, cpu);
431
+
432
+ evlist__for_each_entry(evlist, pos) {
433
+ if (evsel__cpu_iter_skip(pos, cpu))
434
+ continue;
435
+ if (!evsel__is_group_leader(pos) || !pos->core.fd)
436
+ continue;
437
+ evsel__enable_cpu(pos, pos->cpu_iter - 1);
438
+ }
439
+ }
440
+ affinity__cleanup(&affinity);
377441 evlist__for_each_entry(evlist, pos) {
378
- if (!perf_evsel__is_group_leader(pos) || !pos->fd)
442
+ if (!evsel__is_group_leader(pos) || !pos->core.fd)
379443 continue;
380
- perf_evsel__enable(pos);
444
+ pos->disabled = false;
381445 }
382446
383447 evlist->enabled = true;
384448 }
385449
386
-void perf_evlist__toggle_enable(struct perf_evlist *evlist)
450
+void perf_evlist__toggle_enable(struct evlist *evlist)
387451 {
388
- (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
452
+ (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
389453 }
390454
391
-static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
392
- struct perf_evsel *evsel, int cpu)
455
+static int perf_evlist__enable_event_cpu(struct evlist *evlist,
456
+ struct evsel *evsel, int cpu)
393457 {
394458 int thread;
395459 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
396460
397
- if (!evsel->fd)
461
+ if (!evsel->core.fd)
398462 return -EINVAL;
399463
400464 for (thread = 0; thread < nr_threads; thread++) {
....@@ -405,14 +469,14 @@
405469 return 0;
406470 }
407471
408
-static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
409
- struct perf_evsel *evsel,
472
+static int perf_evlist__enable_event_thread(struct evlist *evlist,
473
+ struct evsel *evsel,
410474 int thread)
411475 {
412476 int cpu;
413
- int nr_cpus = cpu_map__nr(evlist->cpus);
477
+ int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
414478
415
- if (!evsel->fd)
479
+ if (!evsel->core.fd)
416480 return -EINVAL;
417481
418482 for (cpu = 0; cpu < nr_cpus; cpu++) {
....@@ -423,10 +487,10 @@
423487 return 0;
424488 }
425489
426
-int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
427
- struct perf_evsel *evsel, int idx)
490
+int perf_evlist__enable_event_idx(struct evlist *evlist,
491
+ struct evsel *evsel, int idx)
428492 {
429
- bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
493
+ bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
430494
431495 if (per_cpu_mmaps)
432496 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
....@@ -434,154 +498,37 @@
434498 return perf_evlist__enable_event_thread(evlist, evsel, idx);
435499 }
436500
437
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
501
+int evlist__add_pollfd(struct evlist *evlist, int fd)
438502 {
439
- int nr_cpus = cpu_map__nr(evlist->cpus);
440
- int nr_threads = thread_map__nr(evlist->threads);
441
- int nfds = 0;
442
- struct perf_evsel *evsel;
443
-
444
- evlist__for_each_entry(evlist, evsel) {
445
- if (evsel->system_wide)
446
- nfds += nr_cpus;
447
- else
448
- nfds += nr_cpus * nr_threads;
449
- }
450
-
451
- if (fdarray__available_entries(&evlist->pollfd) < nfds &&
452
- fdarray__grow(&evlist->pollfd, nfds) < 0)
453
- return -ENOMEM;
454
-
455
- return 0;
503
+ return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
456504 }
457505
458
-static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
459
- struct perf_mmap *map, short revent)
506
+int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
460507 {
461
- int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
462
- /*
463
- * Save the idx so that when we filter out fds POLLHUP'ed we can
464
- * close the associated evlist->mmap[] entry.
465
- */
466
- if (pos >= 0) {
467
- evlist->pollfd.priv[pos].ptr = map;
468
-
469
- fcntl(fd, F_SETFL, O_NONBLOCK);
470
- }
471
-
472
- return pos;
508
+ return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
473509 }
474510
475
-int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
511
+#ifdef HAVE_EVENTFD_SUPPORT
512
+int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
476513 {
477
- return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
514
+ return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
515
+ fdarray_flag__nonfilterable);
516
+}
517
+#endif
518
+
519
+int evlist__poll(struct evlist *evlist, int timeout)
520
+{
521
+ return perf_evlist__poll(&evlist->core, timeout);
478522 }
479523
480
-static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
481
- void *arg __maybe_unused)
482
-{
483
- struct perf_mmap *map = fda->priv[fd].ptr;
484
-
485
- if (map)
486
- perf_mmap__put(map);
487
-}
488
-
489
-int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
490
-{
491
- return fdarray__filter(&evlist->pollfd, revents_and_mask,
492
- perf_evlist__munmap_filtered, NULL);
493
-}
494
-
495
-int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
496
-{
497
- return fdarray__poll(&evlist->pollfd, timeout);
498
-}
499
-
500
-static void perf_evlist__id_hash(struct perf_evlist *evlist,
501
- struct perf_evsel *evsel,
502
- int cpu, int thread, u64 id)
503
-{
504
- int hash;
505
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
506
-
507
- sid->id = id;
508
- sid->evsel = evsel;
509
- hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
510
- hlist_add_head(&sid->node, &evlist->heads[hash]);
511
-}
512
-
513
-void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
514
- int cpu, int thread, u64 id)
515
-{
516
- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
517
- evsel->id[evsel->ids++] = id;
518
-}
519
-
520
-int perf_evlist__id_add_fd(struct perf_evlist *evlist,
521
- struct perf_evsel *evsel,
522
- int cpu, int thread, int fd)
523
-{
524
- u64 read_data[4] = { 0, };
525
- int id_idx = 1; /* The first entry is the counter value */
526
- u64 id;
527
- int ret;
528
-
529
- ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
530
- if (!ret)
531
- goto add;
532
-
533
- if (errno != ENOTTY)
534
- return -1;
535
-
536
- /* Legacy way to get event id.. All hail to old kernels! */
537
-
538
- /*
539
- * This way does not work with group format read, so bail
540
- * out in that case.
541
- */
542
- if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
543
- return -1;
544
-
545
- if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
546
- read(fd, &read_data, sizeof(read_data)) == -1)
547
- return -1;
548
-
549
- if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
550
- ++id_idx;
551
- if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
552
- ++id_idx;
553
-
554
- id = read_data[id_idx];
555
-
556
- add:
557
- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
558
- return 0;
559
-}
560
-
561
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
562
- struct perf_evsel *evsel, int idx, int cpu,
563
- int thread)
564
-{
565
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
566
- sid->idx = idx;
567
- if (evlist->cpus && cpu >= 0)
568
- sid->cpu = evlist->cpus->map[cpu];
569
- else
570
- sid->cpu = -1;
571
- if (!evsel->system_wide && evlist->threads && thread >= 0)
572
- sid->tid = thread_map__pid(evlist->threads, thread);
573
- else
574
- sid->tid = -1;
575
-}
576
-
577
-struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
524
+struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
578525 {
579526 struct hlist_head *head;
580527 struct perf_sample_id *sid;
581528 int hash;
582529
583530 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
584
- head = &evlist->heads[hash];
531
+ head = &evlist->core.heads[hash];
585532
586533 hlist_for_each_entry(sid, head, node)
587534 if (sid->id == id)
....@@ -590,24 +537,24 @@
590537 return NULL;
591538 }
592539
593
-struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
540
+struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
594541 {
595542 struct perf_sample_id *sid;
596543
597
- if (evlist->nr_entries == 1 || !id)
598
- return perf_evlist__first(evlist);
544
+ if (evlist->core.nr_entries == 1 || !id)
545
+ return evlist__first(evlist);
599546
600547 sid = perf_evlist__id2sid(evlist, id);
601548 if (sid)
602
- return sid->evsel;
549
+ return container_of(sid->evsel, struct evsel, core);
603550
604
- if (!perf_evlist__sample_id_all(evlist))
605
- return perf_evlist__first(evlist);
551
+ if (!evlist__sample_id_all(evlist))
552
+ return evlist__first(evlist);
606553
607554 return NULL;
608555 }
609556
610
-struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
557
+struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
611558 u64 id)
612559 {
613560 struct perf_sample_id *sid;
....@@ -617,15 +564,15 @@
617564
618565 sid = perf_evlist__id2sid(evlist, id);
619566 if (sid)
620
- return sid->evsel;
567
+ return container_of(sid->evsel, struct evsel, core);
621568
622569 return NULL;
623570 }
624571
625
-static int perf_evlist__event2id(struct perf_evlist *evlist,
572
+static int perf_evlist__event2id(struct evlist *evlist,
626573 union perf_event *event, u64 *id)
627574 {
628
- const u64 *array = event->sample.array;
575
+ const __u64 *array = event->sample.array;
629576 ssize_t n;
630577
631578 n = (event->header.size - sizeof(event->header)) >> 3;
....@@ -643,19 +590,19 @@
643590 return 0;
644591 }
645592
646
-struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
593
+struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
647594 union perf_event *event)
648595 {
649
- struct perf_evsel *first = perf_evlist__first(evlist);
596
+ struct evsel *first = evlist__first(evlist);
650597 struct hlist_head *head;
651598 struct perf_sample_id *sid;
652599 int hash;
653600 u64 id;
654601
655
- if (evlist->nr_entries == 1)
602
+ if (evlist->core.nr_entries == 1)
656603 return first;
657604
658
- if (!first->attr.sample_id_all &&
605
+ if (!first->core.attr.sample_id_all &&
659606 event->header.type != PERF_RECORD_SAMPLE)
660607 return first;
661608
....@@ -667,24 +614,24 @@
667614 return first;
668615
669616 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
670
- head = &evlist->heads[hash];
617
+ head = &evlist->core.heads[hash];
671618
672619 hlist_for_each_entry(sid, head, node) {
673620 if (sid->id == id)
674
- return sid->evsel;
621
+ return container_of(sid->evsel, struct evsel, core);
675622 }
676623 return NULL;
677624 }
678625
679
-static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
626
+static int perf_evlist__set_paused(struct evlist *evlist, bool value)
680627 {
681628 int i;
682629
683630 if (!evlist->overwrite_mmap)
684631 return 0;
685632
686
- for (i = 0; i < evlist->nr_mmaps; i++) {
687
- int fd = evlist->overwrite_mmap[i].fd;
633
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
634
+ int fd = evlist->overwrite_mmap[i].core.fd;
688635 int err;
689636
690637 if (fd < 0)
....@@ -696,52 +643,56 @@
696643 return 0;
697644 }
698645
699
-static int perf_evlist__pause(struct perf_evlist *evlist)
646
+static int perf_evlist__pause(struct evlist *evlist)
700647 {
701648 return perf_evlist__set_paused(evlist, true);
702649 }
703650
704
-static int perf_evlist__resume(struct perf_evlist *evlist)
651
+static int perf_evlist__resume(struct evlist *evlist)
705652 {
706653 return perf_evlist__set_paused(evlist, false);
707654 }
708655
709
-static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
656
+static void evlist__munmap_nofree(struct evlist *evlist)
710657 {
711658 int i;
712659
713660 if (evlist->mmap)
714
- for (i = 0; i < evlist->nr_mmaps; i++)
715
- perf_mmap__munmap(&evlist->mmap[i]);
661
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
662
+ perf_mmap__munmap(&evlist->mmap[i].core);
716663
717664 if (evlist->overwrite_mmap)
718
- for (i = 0; i < evlist->nr_mmaps; i++)
719
- perf_mmap__munmap(&evlist->overwrite_mmap[i]);
665
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
666
+ perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
720667 }
721668
722
-void perf_evlist__munmap(struct perf_evlist *evlist)
669
+void evlist__munmap(struct evlist *evlist)
723670 {
724
- perf_evlist__munmap_nofree(evlist);
671
+ evlist__munmap_nofree(evlist);
725672 zfree(&evlist->mmap);
726673 zfree(&evlist->overwrite_mmap);
727674 }
728675
729
-static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist,
730
- bool overwrite)
676
+static void perf_mmap__unmap_cb(struct perf_mmap *map)
677
+{
678
+ struct mmap *m = container_of(map, struct mmap, core);
679
+
680
+ mmap__munmap(m);
681
+}
682
+
683
+static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
684
+ bool overwrite)
731685 {
732686 int i;
733
- struct perf_mmap *map;
687
+ struct mmap *map;
734688
735
- evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
736
- if (cpu_map__empty(evlist->cpus))
737
- evlist->nr_mmaps = thread_map__nr(evlist->threads);
738
- map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
689
+ map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
739690 if (!map)
740691 return NULL;
741692
742
- for (i = 0; i < evlist->nr_mmaps; i++) {
743
- map[i].fd = -1;
744
- map[i].overwrite = overwrite;
693
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
694
+ struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
695
+
745696 /*
746697 * When the perf_mmap() call is made we grab one refcount, plus
747698 * one extra to let perf_mmap__consume() get the last
....@@ -751,151 +702,56 @@
751702 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
752703 * thus does perf_mmap__get() on it.
753704 */
754
- refcount_set(&map[i].refcnt, 0);
705
+ perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
755706 }
707
+
756708 return map;
757709 }
758710
759
-static bool
760
-perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
761
- struct perf_evsel *evsel)
711
+static void
712
+perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
713
+ struct perf_mmap_param *_mp,
714
+ int idx, bool per_cpu)
762715 {
763
- if (evsel->attr.write_backward)
764
- return false;
765
- return true;
716
+ struct evlist *evlist = container_of(_evlist, struct evlist, core);
717
+ struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
718
+
719
+ auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
766720 }
767721
768
-static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
769
- struct mmap_params *mp, int cpu_idx,
770
- int thread, int *_output, int *_output_overwrite)
722
+static struct perf_mmap*
723
+perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
771724 {
772
- struct perf_evsel *evsel;
773
- int revent;
774
- int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
725
+ struct evlist *evlist = container_of(_evlist, struct evlist, core);
726
+ struct mmap *maps;
775727
776
- evlist__for_each_entry(evlist, evsel) {
777
- struct perf_mmap *maps = evlist->mmap;
778
- int *output = _output;
779
- int fd;
780
- int cpu;
728
+ maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
781729
782
- mp->prot = PROT_READ | PROT_WRITE;
783
- if (evsel->attr.write_backward) {
784
- output = _output_overwrite;
785
- maps = evlist->overwrite_mmap;
730
+ if (!maps) {
731
+ maps = evlist__alloc_mmap(evlist, overwrite);
732
+ if (!maps)
733
+ return NULL;
786734
787
- if (!maps) {
788
- maps = perf_evlist__alloc_mmap(evlist, true);
789
- if (!maps)
790
- return -1;
791
- evlist->overwrite_mmap = maps;
792
- if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
793
- perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
794
- }
795
- mp->prot &= ~PROT_WRITE;
796
- }
797
-
798
- if (evsel->system_wide && thread)
799
- continue;
800
-
801
- cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
802
- if (cpu == -1)
803
- continue;
804
-
805
- fd = FD(evsel, cpu, thread);
806
-
807
- if (*output == -1) {
808
- *output = fd;
809
-
810
- if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
811
- return -1;
735
+ if (overwrite) {
736
+ evlist->overwrite_mmap = maps;
737
+ if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
738
+ perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
812739 } else {
813
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
814
- return -1;
815
-
816
- perf_mmap__get(&maps[idx]);
817
- }
818
-
819
- revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
820
-
821
- /*
822
- * The system_wide flag causes a selected event to be opened
823
- * always without a pid. Consequently it will never get a
824
- * POLLHUP, but it is used for tracking in combination with
825
- * other events, so it should not need to be polled anyway.
826
- * Therefore don't add it for polling.
827
- */
828
- if (!evsel->system_wide &&
829
- __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
830
- perf_mmap__put(&maps[idx]);
831
- return -1;
832
- }
833
-
834
- if (evsel->attr.read_format & PERF_FORMAT_ID) {
835
- if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
836
- fd) < 0)
837
- return -1;
838
- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
839
- thread);
740
+ evlist->mmap = maps;
840741 }
841742 }
842743
843
- return 0;
744
+ return &maps[idx].core;
844745 }
845746
846
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
847
- struct mmap_params *mp)
747
+static int
748
+perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
749
+ int output, int cpu)
848750 {
849
- int cpu, thread;
850
- int nr_cpus = cpu_map__nr(evlist->cpus);
851
- int nr_threads = thread_map__nr(evlist->threads);
751
+ struct mmap *map = container_of(_map, struct mmap, core);
752
+ struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
852753
853
- pr_debug2("perf event ring buffer mmapped per cpu\n");
854
- for (cpu = 0; cpu < nr_cpus; cpu++) {
855
- int output = -1;
856
- int output_overwrite = -1;
857
-
858
- auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
859
- true);
860
-
861
- for (thread = 0; thread < nr_threads; thread++) {
862
- if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
863
- thread, &output, &output_overwrite))
864
- goto out_unmap;
865
- }
866
- }
867
-
868
- return 0;
869
-
870
-out_unmap:
871
- perf_evlist__munmap_nofree(evlist);
872
- return -1;
873
-}
874
-
875
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
876
- struct mmap_params *mp)
877
-{
878
- int thread;
879
- int nr_threads = thread_map__nr(evlist->threads);
880
-
881
- pr_debug2("perf event ring buffer mmapped per thread\n");
882
- for (thread = 0; thread < nr_threads; thread++) {
883
- int output = -1;
884
- int output_overwrite = -1;
885
-
886
- auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
887
- false);
888
-
889
- if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
890
- &output, &output_overwrite))
891
- goto out_unmap;
892
- }
893
-
894
- return 0;
895
-
896
-out_unmap:
897
- perf_evlist__munmap_nofree(evlist);
898
- return -1;
754
+ return mmap__mmap(map, mp, output, cpu);
899755 }
900756
901757 unsigned long perf_event_mlock_kb_in_pages(void)
....@@ -921,7 +777,7 @@
921777 return pages;
922778 }
923779
924
-size_t perf_evlist__mmap_size(unsigned long pages)
780
+size_t evlist__mmap_size(unsigned long pages)
925781 {
926782 if (pages == UINT_MAX)
927783 pages = perf_event_mlock_kb_in_pages();
....@@ -1004,7 +860,7 @@
1004860 }
1005861
1006862 /**
1007
- * perf_evlist__mmap_ex - Create mmaps to receive events.
863
+ * evlist__mmap_ex - Create mmaps to receive events.
1008864 * @evlist: list of events
1009865 * @pages: map length in pages
1010866 * @overwrite: overwrite older events?
....@@ -1012,7 +868,7 @@
1012868 * @auxtrace_overwrite - overwrite older auxtrace data?
1013869 *
1014870 * If @overwrite is %false the user needs to signal event consumption using
1015
- * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
871
+ * perf_mmap__write_tail(). Using evlist__mmap_read() does this
1016872 * automatically.
1017873 *
1018874 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
....@@ -1020,58 +876,47 @@
1020876 *
1021877 * Return: %0 on success, negative error code otherwise.
1022878 */
1023
-int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
879
+int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
1024880 unsigned int auxtrace_pages,
1025
- bool auxtrace_overwrite)
881
+ bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
882
+ int comp_level)
1026883 {
1027
- struct perf_evsel *evsel;
1028
- const struct cpu_map *cpus = evlist->cpus;
1029
- const struct thread_map *threads = evlist->threads;
1030884 /*
1031885 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
1032886 * Its value is decided by evsel's write_backward.
1033887 * So &mp should not be passed through const pointer.
1034888 */
1035
- struct mmap_params mp;
889
+ struct mmap_params mp = {
890
+ .nr_cblocks = nr_cblocks,
891
+ .affinity = affinity,
892
+ .flush = flush,
893
+ .comp_level = comp_level
894
+ };
895
+ struct perf_evlist_mmap_ops ops = {
896
+ .idx = perf_evlist__mmap_cb_idx,
897
+ .get = perf_evlist__mmap_cb_get,
898
+ .mmap = perf_evlist__mmap_cb_mmap,
899
+ };
1036900
1037
- if (!evlist->mmap)
1038
- evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
1039
- if (!evlist->mmap)
1040
- return -ENOMEM;
901
+ evlist->core.mmap_len = evlist__mmap_size(pages);
902
+ pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
1041903
1042
- if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1043
- return -ENOMEM;
1044
-
1045
- evlist->mmap_len = perf_evlist__mmap_size(pages);
1046
- pr_debug("mmap size %zuB\n", evlist->mmap_len);
1047
- mp.mask = evlist->mmap_len - page_size - 1;
1048
-
1049
- auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
904
+ auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
1050905 auxtrace_pages, auxtrace_overwrite);
1051906
1052
- evlist__for_each_entry(evlist, evsel) {
1053
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1054
- evsel->sample_id == NULL &&
1055
- perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1056
- return -ENOMEM;
1057
- }
1058
-
1059
- if (cpu_map__empty(cpus))
1060
- return perf_evlist__mmap_per_thread(evlist, &mp);
1061
-
1062
- return perf_evlist__mmap_per_cpu(evlist, &mp);
907
+ return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
1063908 }
1064909
1065
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
910
+int evlist__mmap(struct evlist *evlist, unsigned int pages)
1066911 {
1067
- return perf_evlist__mmap_ex(evlist, pages, 0, false);
912
+ return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
1068913 }
1069914
1070
-int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
915
+int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
1071916 {
1072917 bool all_threads = (target->per_thread && target->system_wide);
1073
- struct cpu_map *cpus;
1074
- struct thread_map *threads;
918
+ struct perf_cpu_map *cpus;
919
+ struct perf_thread_map *threads;
1075920
1076921 /*
1077922 * If specify '-a' and '--per-thread' to perf record, perf record
....@@ -1098,68 +943,49 @@
1098943 return -1;
1099944
1100945 if (target__uses_dummy_map(target))
1101
- cpus = cpu_map__dummy_new();
946
+ cpus = perf_cpu_map__dummy_new();
1102947 else
1103
- cpus = cpu_map__new(target->cpu_list);
948
+ cpus = perf_cpu_map__new(target->cpu_list);
1104949
1105950 if (!cpus)
1106951 goto out_delete_threads;
1107952
1108
- evlist->has_user_cpus = !!target->cpu_list;
953
+ evlist->core.has_user_cpus = !!target->cpu_list;
1109954
1110
- perf_evlist__set_maps(evlist, cpus, threads);
955
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
956
+
957
+ /* as evlist now has references, put count here */
958
+ perf_cpu_map__put(cpus);
959
+ perf_thread_map__put(threads);
1111960
1112961 return 0;
1113962
1114963 out_delete_threads:
1115
- thread_map__put(threads);
964
+ perf_thread_map__put(threads);
1116965 return -1;
1117966 }
1118967
1119
-void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1120
- struct thread_map *threads)
1121
-{
1122
- /*
1123
- * Allow for the possibility that one or another of the maps isn't being
1124
- * changed i.e. don't put it. Note we are assuming the maps that are
1125
- * being applied are brand new and evlist is taking ownership of the
1126
- * original reference count of 1. If that is not the case it is up to
1127
- * the caller to increase the reference count.
1128
- */
1129
- if (cpus != evlist->cpus) {
1130
- cpu_map__put(evlist->cpus);
1131
- evlist->cpus = cpu_map__get(cpus);
1132
- }
1133
-
1134
- if (threads != evlist->threads) {
1135
- thread_map__put(evlist->threads);
1136
- evlist->threads = thread_map__get(threads);
1137
- }
1138
-
1139
- perf_evlist__propagate_maps(evlist);
1140
-}
1141
-
1142
-void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
968
+void __perf_evlist__set_sample_bit(struct evlist *evlist,
1143969 enum perf_event_sample_format bit)
1144970 {
1145
- struct perf_evsel *evsel;
971
+ struct evsel *evsel;
1146972
1147973 evlist__for_each_entry(evlist, evsel)
1148
- __perf_evsel__set_sample_bit(evsel, bit);
974
+ __evsel__set_sample_bit(evsel, bit);
1149975 }
1150976
1151
-void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
977
+void __perf_evlist__reset_sample_bit(struct evlist *evlist,
1152978 enum perf_event_sample_format bit)
1153979 {
1154
- struct perf_evsel *evsel;
980
+ struct evsel *evsel;
1155981
1156982 evlist__for_each_entry(evlist, evsel)
1157
- __perf_evsel__reset_sample_bit(evsel, bit);
983
+ __evsel__reset_sample_bit(evsel, bit);
1158984 }
1159985
1160
-int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
986
+int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
1161987 {
1162
- struct perf_evsel *evsel;
988
+ struct evsel *evsel;
1163989 int err = 0;
1164990
1165991 evlist__for_each_entry(evlist, evsel) {
....@@ -1170,7 +996,7 @@
1170996 * filters only work for tracepoint event, which doesn't have cpu limit.
1171997 * So evlist and evsel should always be same.
1172998 */
1173
- err = perf_evsel__apply_filter(evsel, evsel->filter);
999
+ err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
11741000 if (err) {
11751001 *err_evsel = evsel;
11761002 break;
....@@ -1180,16 +1006,19 @@
11801006 return err;
11811007 }
11821008
1183
-int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1009
+int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
11841010 {
1185
- struct perf_evsel *evsel;
1011
+ struct evsel *evsel;
11861012 int err = 0;
11871013
1014
+ if (filter == NULL)
1015
+ return -1;
1016
+
11881017 evlist__for_each_entry(evlist, evsel) {
1189
- if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1018
+ if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
11901019 continue;
11911020
1192
- err = perf_evsel__set_filter(evsel, filter);
1021
+ err = evsel__set_filter(evsel, filter);
11931022 if (err)
11941023 break;
11951024 }
....@@ -1197,16 +1026,35 @@
11971026 return err;
11981027 }
11991028
1200
-int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1029
+int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1030
+{
1031
+ struct evsel *evsel;
1032
+ int err = 0;
1033
+
1034
+ if (filter == NULL)
1035
+ return -1;
1036
+
1037
+ evlist__for_each_entry(evlist, evsel) {
1038
+ if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1039
+ continue;
1040
+
1041
+ err = evsel__append_tp_filter(evsel, filter);
1042
+ if (err)
1043
+ break;
1044
+ }
1045
+
1046
+ return err;
1047
+}
1048
+
1049
+char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
12011050 {
12021051 char *filter;
1203
- int ret = -1;
12041052 size_t i;
12051053
12061054 for (i = 0; i < npids; ++i) {
12071055 if (i == 0) {
12081056 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1209
- return -1;
1057
+ return NULL;
12101058 } else {
12111059 char *tmp;
12121060
....@@ -1218,22 +1066,45 @@
12181066 }
12191067 }
12201068
1221
- ret = perf_evlist__set_filter(evlist, filter);
1069
+ return filter;
12221070 out_free:
1071
+ free(filter);
1072
+ return NULL;
1073
+}
1074
+
1075
+int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1076
+{
1077
+ char *filter = asprintf__tp_filter_pids(npids, pids);
1078
+ int ret = perf_evlist__set_tp_filter(evlist, filter);
1079
+
12231080 free(filter);
12241081 return ret;
12251082 }
12261083
1227
-int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1084
+int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
12281085 {
1229
- return perf_evlist__set_filter_pids(evlist, 1, &pid);
1086
+ return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
12301087 }
12311088
1232
-bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1089
+int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
12331090 {
1234
- struct perf_evsel *pos;
1091
+ char *filter = asprintf__tp_filter_pids(npids, pids);
1092
+ int ret = perf_evlist__append_tp_filter(evlist, filter);
12351093
1236
- if (evlist->nr_entries == 1)
1094
+ free(filter);
1095
+ return ret;
1096
+}
1097
+
1098
+int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1099
+{
1100
+ return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
1101
+}
1102
+
1103
+bool evlist__valid_sample_type(struct evlist *evlist)
1104
+{
1105
+ struct evsel *pos;
1106
+
1107
+ if (evlist->core.nr_entries == 1)
12371108 return true;
12381109
12391110 if (evlist->id_pos < 0 || evlist->is_pos < 0)
....@@ -1248,44 +1119,46 @@
12481119 return true;
12491120 }
12501121
1251
-u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1122
+u64 __evlist__combined_sample_type(struct evlist *evlist)
12521123 {
1253
- struct perf_evsel *evsel;
1124
+ struct evsel *evsel;
12541125
12551126 if (evlist->combined_sample_type)
12561127 return evlist->combined_sample_type;
12571128
12581129 evlist__for_each_entry(evlist, evsel)
1259
- evlist->combined_sample_type |= evsel->attr.sample_type;
1130
+ evlist->combined_sample_type |= evsel->core.attr.sample_type;
12601131
12611132 return evlist->combined_sample_type;
12621133 }
12631134
1264
-u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1135
+u64 evlist__combined_sample_type(struct evlist *evlist)
12651136 {
12661137 evlist->combined_sample_type = 0;
1267
- return __perf_evlist__combined_sample_type(evlist);
1138
+ return __evlist__combined_sample_type(evlist);
12681139 }
12691140
1270
-u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1141
+u64 evlist__combined_branch_type(struct evlist *evlist)
12711142 {
1272
- struct perf_evsel *evsel;
1143
+ struct evsel *evsel;
12731144 u64 branch_type = 0;
12741145
12751146 evlist__for_each_entry(evlist, evsel)
1276
- branch_type |= evsel->attr.branch_sample_type;
1147
+ branch_type |= evsel->core.attr.branch_sample_type;
12771148 return branch_type;
12781149 }
12791150
1280
-bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1151
+bool perf_evlist__valid_read_format(struct evlist *evlist)
12811152 {
1282
- struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1283
- u64 read_format = first->attr.read_format;
1284
- u64 sample_type = first->attr.sample_type;
1153
+ struct evsel *first = evlist__first(evlist), *pos = first;
1154
+ u64 read_format = first->core.attr.read_format;
1155
+ u64 sample_type = first->core.attr.sample_type;
12851156
12861157 evlist__for_each_entry(evlist, pos) {
1287
- if (read_format != pos->attr.read_format)
1288
- return false;
1158
+ if (read_format != pos->core.attr.read_format) {
1159
+ pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1160
+ read_format, (u64)pos->core.attr.read_format);
1161
+ }
12891162 }
12901163
12911164 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
....@@ -1297,23 +1170,17 @@
12971170 return true;
12981171 }
12991172
1300
-u64 perf_evlist__read_format(struct perf_evlist *evlist)
1173
+u16 perf_evlist__id_hdr_size(struct evlist *evlist)
13011174 {
1302
- struct perf_evsel *first = perf_evlist__first(evlist);
1303
- return first->attr.read_format;
1304
-}
1305
-
1306
-u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1307
-{
1308
- struct perf_evsel *first = perf_evlist__first(evlist);
1175
+ struct evsel *first = evlist__first(evlist);
13091176 struct perf_sample *data;
13101177 u64 sample_type;
13111178 u16 size = 0;
13121179
1313
- if (!first->attr.sample_id_all)
1180
+ if (!first->core.attr.sample_id_all)
13141181 goto out;
13151182
1316
- sample_type = first->attr.sample_type;
1183
+ sample_type = first->core.attr.sample_type;
13171184
13181185 if (sample_type & PERF_SAMPLE_TID)
13191186 size += sizeof(data->tid) * 2;
....@@ -1336,42 +1203,68 @@
13361203 return size;
13371204 }
13381205
1339
-bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1206
+bool evlist__valid_sample_id_all(struct evlist *evlist)
13401207 {
1341
- struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1208
+ struct evsel *first = evlist__first(evlist), *pos = first;
13421209
13431210 evlist__for_each_entry_continue(evlist, pos) {
1344
- if (first->attr.sample_id_all != pos->attr.sample_id_all)
1211
+ if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
13451212 return false;
13461213 }
13471214
13481215 return true;
13491216 }
13501217
1351
-bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1218
+bool evlist__sample_id_all(struct evlist *evlist)
13521219 {
1353
- struct perf_evsel *first = perf_evlist__first(evlist);
1354
- return first->attr.sample_id_all;
1220
+ struct evsel *first = evlist__first(evlist);
1221
+ return first->core.attr.sample_id_all;
13551222 }
13561223
1357
-void perf_evlist__set_selected(struct perf_evlist *evlist,
1358
- struct perf_evsel *evsel)
1224
+void perf_evlist__set_selected(struct evlist *evlist,
1225
+ struct evsel *evsel)
13591226 {
13601227 evlist->selected = evsel;
13611228 }
13621229
1363
-void perf_evlist__close(struct perf_evlist *evlist)
1230
+void evlist__close(struct evlist *evlist)
13641231 {
1365
- struct perf_evsel *evsel;
1232
+ struct evsel *evsel;
1233
+ struct affinity affinity;
1234
+ int cpu, i;
13661235
1367
- evlist__for_each_entry_reverse(evlist, evsel)
1368
- perf_evsel__close(evsel);
1236
+ /*
1237
+ * With perf record core.cpus is usually NULL.
1238
+ * Use the old method to handle this for now.
1239
+ */
1240
+ if (!evlist->core.cpus) {
1241
+ evlist__for_each_entry_reverse(evlist, evsel)
1242
+ evsel__close(evsel);
1243
+ return;
1244
+ }
1245
+
1246
+ if (affinity__setup(&affinity) < 0)
1247
+ return;
1248
+ evlist__for_each_cpu(evlist, i, cpu) {
1249
+ affinity__set(&affinity, cpu);
1250
+
1251
+ evlist__for_each_entry_reverse(evlist, evsel) {
1252
+ if (evsel__cpu_iter_skip(evsel, cpu))
1253
+ continue;
1254
+ perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1255
+ }
1256
+ }
1257
+ affinity__cleanup(&affinity);
1258
+ evlist__for_each_entry_reverse(evlist, evsel) {
1259
+ perf_evsel__free_fd(&evsel->core);
1260
+ perf_evsel__free_id(&evsel->core);
1261
+ }
13691262 }
13701263
1371
-static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1264
+static int perf_evlist__create_syswide_maps(struct evlist *evlist)
13721265 {
1373
- struct cpu_map *cpus;
1374
- struct thread_map *threads;
1266
+ struct perf_cpu_map *cpus;
1267
+ struct perf_thread_map *threads;
13751268 int err = -ENOMEM;
13761269
13771270 /*
....@@ -1383,32 +1276,33 @@
13831276 * error, and we may not want to do that fallback to a
13841277 * default cpu identity map :-\
13851278 */
1386
- cpus = cpu_map__new(NULL);
1279
+ cpus = perf_cpu_map__new(NULL);
13871280 if (!cpus)
13881281 goto out;
13891282
1390
- threads = thread_map__new_dummy();
1283
+ threads = perf_thread_map__new_dummy();
13911284 if (!threads)
13921285 goto out_put;
13931286
1394
- perf_evlist__set_maps(evlist, cpus, threads);
1287
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
1288
+
1289
+ perf_thread_map__put(threads);
1290
+out_put:
1291
+ perf_cpu_map__put(cpus);
13951292 out:
13961293 return err;
1397
-out_put:
1398
- cpu_map__put(cpus);
1399
- goto out;
14001294 }
14011295
1402
-int perf_evlist__open(struct perf_evlist *evlist)
1296
+int evlist__open(struct evlist *evlist)
14031297 {
1404
- struct perf_evsel *evsel;
1298
+ struct evsel *evsel;
14051299 int err;
14061300
14071301 /*
14081302 * Default: one fd per CPU, all threads, aka systemwide
14091303 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
14101304 */
1411
- if (evlist->threads == NULL && evlist->cpus == NULL) {
1305
+ if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
14121306 err = perf_evlist__create_syswide_maps(evlist);
14131307 if (err < 0)
14141308 goto out_err;
....@@ -1417,19 +1311,19 @@
14171311 perf_evlist__update_id_pos(evlist);
14181312
14191313 evlist__for_each_entry(evlist, evsel) {
1420
- err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1314
+ err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
14211315 if (err < 0)
14221316 goto out_err;
14231317 }
14241318
14251319 return 0;
14261320 out_err:
1427
- perf_evlist__close(evlist);
1321
+ evlist__close(evlist);
14281322 errno = -err;
14291323 return err;
14301324 }
14311325
1432
-int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1326
+int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
14331327 const char *argv[], bool pipe_output,
14341328 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
14351329 {
....@@ -1511,12 +1405,12 @@
15111405 }
15121406
15131407 if (target__none(target)) {
1514
- if (evlist->threads == NULL) {
1408
+ if (evlist->core.threads == NULL) {
15151409 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
15161410 __func__, __LINE__);
15171411 goto out_close_pipes;
15181412 }
1519
- thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1413
+ perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
15201414 }
15211415
15221416 close(child_ready_pipe[1]);
....@@ -1543,7 +1437,7 @@
15431437 return -1;
15441438 }
15451439
1546
-int perf_evlist__start_workload(struct perf_evlist *evlist)
1440
+int perf_evlist__start_workload(struct evlist *evlist)
15471441 {
15481442 if (evlist->workload.cork_fd > 0) {
15491443 char bf = 0;
....@@ -1562,42 +1456,28 @@
15621456 return 0;
15631457 }
15641458
1565
-int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1459
+int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
15661460 struct perf_sample *sample)
15671461 {
1568
- struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1462
+ struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
15691463
15701464 if (!evsel)
15711465 return -EFAULT;
1572
- return perf_evsel__parse_sample(evsel, event, sample);
1466
+ return evsel__parse_sample(evsel, event, sample);
15731467 }
15741468
1575
-int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
1469
+int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
15761470 union perf_event *event,
15771471 u64 *timestamp)
15781472 {
1579
- struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1473
+ struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
15801474
15811475 if (!evsel)
15821476 return -EFAULT;
1583
- return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
1477
+ return evsel__parse_sample_timestamp(evsel, event, timestamp);
15841478 }
15851479
1586
-size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1587
-{
1588
- struct perf_evsel *evsel;
1589
- size_t printed = 0;
1590
-
1591
- evlist__for_each_entry(evlist, evsel) {
1592
- printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1593
- perf_evsel__name(evsel));
1594
- }
1595
-
1596
- return printed + fprintf(fp, "\n");
1597
-}
1598
-
1599
-int perf_evlist__strerror_open(struct perf_evlist *evlist,
1600
- int err, char *buf, size_t size)
1480
+int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
16011481 {
16021482 int printed, value;
16031483 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
....@@ -1625,20 +1505,20 @@
16251505 "Hint:\tThe current value is %d.", value);
16261506 break;
16271507 case EINVAL: {
1628
- struct perf_evsel *first = perf_evlist__first(evlist);
1508
+ struct evsel *first = evlist__first(evlist);
16291509 int max_freq;
16301510
16311511 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
16321512 goto out_default;
16331513
1634
- if (first->attr.sample_freq < (u64)max_freq)
1514
+ if (first->core.attr.sample_freq < (u64)max_freq)
16351515 goto out_default;
16361516
16371517 printed = scnprintf(buf, size,
16381518 "Error:\t%s.\n"
16391519 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
16401520 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1641
- emsg, max_freq, first->attr.sample_freq);
1521
+ emsg, max_freq, first->core.attr.sample_freq);
16421522 break;
16431523 }
16441524 default:
....@@ -1650,10 +1530,10 @@
16501530 return 0;
16511531 }
16521532
1653
-int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1533
+int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
16541534 {
16551535 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1656
- int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1536
+ int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
16571537
16581538 switch (err) {
16591539 case EPERM:
....@@ -1681,27 +1561,39 @@
16811561 return 0;
16821562 }
16831563
1684
-void perf_evlist__to_front(struct perf_evlist *evlist,
1685
- struct perf_evsel *move_evsel)
1564
+void perf_evlist__to_front(struct evlist *evlist,
1565
+ struct evsel *move_evsel)
16861566 {
1687
- struct perf_evsel *evsel, *n;
1567
+ struct evsel *evsel, *n;
16881568 LIST_HEAD(move);
16891569
1690
- if (move_evsel == perf_evlist__first(evlist))
1570
+ if (move_evsel == evlist__first(evlist))
16911571 return;
16921572
16931573 evlist__for_each_entry_safe(evlist, n, evsel) {
16941574 if (evsel->leader == move_evsel->leader)
1695
- list_move_tail(&evsel->node, &move);
1575
+ list_move_tail(&evsel->core.node, &move);
16961576 }
16971577
1698
- list_splice(&move, &evlist->entries);
1578
+ list_splice(&move, &evlist->core.entries);
16991579 }
17001580
1701
-void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1702
- struct perf_evsel *tracking_evsel)
1581
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
17031582 {
1704
- struct perf_evsel *evsel;
1583
+ struct evsel *evsel;
1584
+
1585
+ evlist__for_each_entry(evlist, evsel) {
1586
+ if (evsel->tracking)
1587
+ return evsel;
1588
+ }
1589
+
1590
+ return evlist__first(evlist);
1591
+}
1592
+
1593
+void perf_evlist__set_tracking_event(struct evlist *evlist,
1594
+ struct evsel *tracking_evsel)
1595
+{
1596
+ struct evsel *evsel;
17051597
17061598 if (tracking_evsel->tracking)
17071599 return;
....@@ -1714,11 +1606,11 @@
17141606 tracking_evsel->tracking = true;
17151607 }
17161608
1717
-struct perf_evsel *
1718
-perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1609
+struct evsel *
1610
+perf_evlist__find_evsel_by_str(struct evlist *evlist,
17191611 const char *str)
17201612 {
1721
- struct perf_evsel *evsel;
1613
+ struct evsel *evsel;
17221614
17231615 evlist__for_each_entry(evlist, evsel) {
17241616 if (!evsel->name)
....@@ -1730,7 +1622,7 @@
17301622 return NULL;
17311623 }
17321624
1733
-void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1625
+void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
17341626 enum bkw_mmap_state state)
17351627 {
17361628 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
....@@ -1788,12 +1680,12 @@
17881680 return;
17891681 }
17901682
1791
-bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1683
+bool perf_evlist__exclude_kernel(struct evlist *evlist)
17921684 {
1793
- struct perf_evsel *evsel;
1685
+ struct evsel *evsel;
17941686
17951687 evlist__for_each_entry(evlist, evsel) {
1796
- if (!evsel->attr.exclude_kernel)
1688
+ if (!evsel->core.attr.exclude_kernel)
17971689 return false;
17981690 }
17991691
....@@ -1805,12 +1697,294 @@
18051697 * the group display. Set the artificial group and set the leader's
18061698 * forced_leader flag to notify the display code.
18071699 */
1808
-void perf_evlist__force_leader(struct perf_evlist *evlist)
1700
+void perf_evlist__force_leader(struct evlist *evlist)
18091701 {
18101702 if (!evlist->nr_groups) {
1811
- struct perf_evsel *leader = perf_evlist__first(evlist);
1703
+ struct evsel *leader = evlist__first(evlist);
18121704
18131705 perf_evlist__set_leader(evlist);
18141706 leader->forced_leader = true;
18151707 }
18161708 }
1709
+
1710
+struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
1711
+ struct evsel *evsel,
1712
+ bool close)
1713
+{
1714
+ struct evsel *c2, *leader;
1715
+ bool is_open = true;
1716
+
1717
+ leader = evsel->leader;
1718
+ pr_debug("Weak group for %s/%d failed\n",
1719
+ leader->name, leader->core.nr_members);
1720
+
1721
+ /*
1722
+ * for_each_group_member doesn't work here because it doesn't
1723
+ * include the first entry.
1724
+ */
1725
+ evlist__for_each_entry(evsel_list, c2) {
1726
+ if (c2 == evsel)
1727
+ is_open = false;
1728
+ if (c2->leader == leader) {
1729
+ if (is_open && close)
1730
+ perf_evsel__close(&c2->core);
1731
+ c2->leader = c2;
1732
+ c2->core.nr_members = 0;
1733
+ /*
1734
+ * Set this for all former members of the group
1735
+ * to indicate they get reopened.
1736
+ */
1737
+ c2->reset_group = true;
1738
+ }
1739
+ }
1740
+ return leader;
1741
+}
1742
+
1743
+static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1744
+{
1745
+ char *s, *p;
1746
+ int ret = 0, fd;
1747
+
1748
+ if (strncmp(str, "fifo:", 5))
1749
+ return -EINVAL;
1750
+
1751
+ str += 5;
1752
+ if (!*str || *str == ',')
1753
+ return -EINVAL;
1754
+
1755
+ s = strdup(str);
1756
+ if (!s)
1757
+ return -ENOMEM;
1758
+
1759
+ p = strchr(s, ',');
1760
+ if (p)
1761
+ *p = '\0';
1762
+
1763
+ /*
1764
+ * O_RDWR avoids POLLHUPs which is necessary to allow the other
1765
+ * end of a FIFO to be repeatedly opened and closed.
1766
+ */
1767
+ fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1768
+ if (fd < 0) {
1769
+ pr_err("Failed to open '%s'\n", s);
1770
+ ret = -errno;
1771
+ goto out_free;
1772
+ }
1773
+ *ctl_fd = fd;
1774
+ *ctl_fd_close = true;
1775
+
1776
+ if (p && *++p) {
1777
+ /* O_RDWR | O_NONBLOCK means the other end need not be open */
1778
+ fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1779
+ if (fd < 0) {
1780
+ pr_err("Failed to open '%s'\n", p);
1781
+ ret = -errno;
1782
+ goto out_free;
1783
+ }
1784
+ *ctl_fd_ack = fd;
1785
+ }
1786
+
1787
+out_free:
1788
+ free(s);
1789
+ return ret;
1790
+}
1791
+
1792
+int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1793
+{
1794
+ char *comma = NULL, *endptr = NULL;
1795
+
1796
+ *ctl_fd_close = false;
1797
+
1798
+ if (strncmp(str, "fd:", 3))
1799
+ return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1800
+
1801
+ *ctl_fd = strtoul(&str[3], &endptr, 0);
1802
+ if (endptr == &str[3])
1803
+ return -EINVAL;
1804
+
1805
+ comma = strchr(str, ',');
1806
+ if (comma) {
1807
+ if (endptr != comma)
1808
+ return -EINVAL;
1809
+
1810
+ *ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1811
+ if (endptr == comma + 1 || *endptr != '\0')
1812
+ return -EINVAL;
1813
+ }
1814
+
1815
+ return 0;
1816
+}
1817
+
1818
+void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
1819
+{
1820
+ if (*ctl_fd_close) {
1821
+ *ctl_fd_close = false;
1822
+ close(ctl_fd);
1823
+ if (ctl_fd_ack >= 0)
1824
+ close(ctl_fd_ack);
1825
+ }
1826
+}
1827
+
1828
+int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
1829
+{
1830
+ if (fd == -1) {
1831
+ pr_debug("Control descriptor is not initialized\n");
1832
+ return 0;
1833
+ }
1834
+
1835
+ evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
1836
+ fdarray_flag__nonfilterable);
1837
+ if (evlist->ctl_fd.pos < 0) {
1838
+ evlist->ctl_fd.pos = -1;
1839
+ pr_err("Failed to add ctl fd entry: %m\n");
1840
+ return -1;
1841
+ }
1842
+
1843
+ evlist->ctl_fd.fd = fd;
1844
+ evlist->ctl_fd.ack = ack;
1845
+
1846
+ return 0;
1847
+}
1848
+
1849
+bool evlist__ctlfd_initialized(struct evlist *evlist)
1850
+{
1851
+ return evlist->ctl_fd.pos >= 0;
1852
+}
1853
+
1854
+int evlist__finalize_ctlfd(struct evlist *evlist)
1855
+{
1856
+ struct pollfd *entries = evlist->core.pollfd.entries;
1857
+
1858
+ if (!evlist__ctlfd_initialized(evlist))
1859
+ return 0;
1860
+
1861
+ entries[evlist->ctl_fd.pos].fd = -1;
1862
+ entries[evlist->ctl_fd.pos].events = 0;
1863
+ entries[evlist->ctl_fd.pos].revents = 0;
1864
+
1865
+ evlist->ctl_fd.pos = -1;
1866
+ evlist->ctl_fd.ack = -1;
1867
+ evlist->ctl_fd.fd = -1;
1868
+
1869
+ return 0;
1870
+}
1871
+
1872
+static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
1873
+ char *cmd_data, size_t data_size)
1874
+{
1875
+ int err;
1876
+ char c;
1877
+ size_t bytes_read = 0;
1878
+
1879
+ *cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1880
+ memset(cmd_data, 0, data_size);
1881
+ data_size--;
1882
+
1883
+ do {
1884
+ err = read(evlist->ctl_fd.fd, &c, 1);
1885
+ if (err > 0) {
1886
+ if (c == '\n' || c == '\0')
1887
+ break;
1888
+ cmd_data[bytes_read++] = c;
1889
+ if (bytes_read == data_size)
1890
+ break;
1891
+ continue;
1892
+ } else if (err == -1) {
1893
+ if (errno == EINTR)
1894
+ continue;
1895
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
1896
+ err = 0;
1897
+ else
1898
+ pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
1899
+ }
1900
+ break;
1901
+ } while (1);
1902
+
1903
+ pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
1904
+ bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
1905
+
1906
+ if (bytes_read > 0) {
1907
+ if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
1908
+ (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
1909
+ *cmd = EVLIST_CTL_CMD_ENABLE;
1910
+ } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
1911
+ (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
1912
+ *cmd = EVLIST_CTL_CMD_DISABLE;
1913
+ } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
1914
+ (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
1915
+ *cmd = EVLIST_CTL_CMD_SNAPSHOT;
1916
+ pr_debug("is snapshot\n");
1917
+ }
1918
+ }
1919
+
1920
+ return bytes_read ? (int)bytes_read : err;
1921
+}
1922
+
1923
+int evlist__ctlfd_ack(struct evlist *evlist)
1924
+{
1925
+ int err;
1926
+
1927
+ if (evlist->ctl_fd.ack == -1)
1928
+ return 0;
1929
+
1930
+ err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
1931
+ sizeof(EVLIST_CTL_CMD_ACK_TAG));
1932
+ if (err == -1)
1933
+ pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
1934
+
1935
+ return err;
1936
+}
1937
+
1938
+int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
1939
+{
1940
+ int err = 0;
1941
+ char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
1942
+ int ctlfd_pos = evlist->ctl_fd.pos;
1943
+ struct pollfd *entries = evlist->core.pollfd.entries;
1944
+
1945
+ if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
1946
+ return 0;
1947
+
1948
+ if (entries[ctlfd_pos].revents & POLLIN) {
1949
+ err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
1950
+ EVLIST_CTL_CMD_MAX_LEN);
1951
+ if (err > 0) {
1952
+ switch (*cmd) {
1953
+ case EVLIST_CTL_CMD_ENABLE:
1954
+ evlist__enable(evlist);
1955
+ break;
1956
+ case EVLIST_CTL_CMD_DISABLE:
1957
+ evlist__disable(evlist);
1958
+ break;
1959
+ case EVLIST_CTL_CMD_SNAPSHOT:
1960
+ break;
1961
+ case EVLIST_CTL_CMD_ACK:
1962
+ case EVLIST_CTL_CMD_UNSUPPORTED:
1963
+ default:
1964
+ pr_debug("ctlfd: unsupported %d\n", *cmd);
1965
+ break;
1966
+ }
1967
+ if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
1968
+ *cmd == EVLIST_CTL_CMD_SNAPSHOT))
1969
+ evlist__ctlfd_ack(evlist);
1970
+ }
1971
+ }
1972
+
1973
+ if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
1974
+ evlist__finalize_ctlfd(evlist);
1975
+ else
1976
+ entries[ctlfd_pos].revents = 0;
1977
+
1978
+ return err;
1979
+}
1980
+
1981
+struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
1982
+{
1983
+ struct evsel *evsel;
1984
+
1985
+ evlist__for_each_entry(evlist, evsel) {
1986
+ if (evsel->idx == idx)
1987
+ return evsel;
1988
+ }
1989
+ return NULL;
1990
+}