.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> |
---|
3 | 4 | * |
---|
4 | 5 | * Parts came from builtin-{top,stat,record}.c, see those files for further |
---|
5 | 6 | * copyright notes. |
---|
6 | | - * |
---|
7 | | - * Released under the GPL v2. (and only v2, not any later version) |
---|
8 | 7 | */ |
---|
9 | | -#include "util.h" |
---|
10 | 8 | #include <api/fs/fs.h> |
---|
11 | 9 | #include <errno.h> |
---|
12 | 10 | #include <inttypes.h> |
---|
13 | 11 | #include <poll.h> |
---|
14 | 12 | #include "cpumap.h" |
---|
| 13 | +#include "util/mmap.h" |
---|
15 | 14 | #include "thread_map.h" |
---|
16 | 15 | #include "target.h" |
---|
17 | 16 | #include "evlist.h" |
---|
18 | 17 | #include "evsel.h" |
---|
19 | 18 | #include "debug.h" |
---|
20 | 19 | #include "units.h" |
---|
| 20 | +#include <internal/lib.h> // page_size |
---|
| 21 | +#include "affinity.h" |
---|
| 22 | +#include "../perf.h" |
---|
21 | 23 | #include "asm/bug.h" |
---|
| 24 | +#include "bpf-event.h" |
---|
| 25 | +#include "util/string2.h" |
---|
| 26 | +#include "util/perf_api_probe.h" |
---|
22 | 27 | #include <signal.h> |
---|
23 | 28 | #include <unistd.h> |
---|
| 29 | +#include <sched.h> |
---|
| 30 | +#include <stdlib.h> |
---|
24 | 31 | |
---|
25 | 32 | #include "parse-events.h" |
---|
26 | 33 | #include <subcmd/parse-options.h> |
---|
.. | .. |
---|
33 | 40 | #include <linux/hash.h> |
---|
34 | 41 | #include <linux/log2.h> |
---|
35 | 42 | #include <linux/err.h> |
---|
| 43 | +#include <linux/string.h> |
---|
| 44 | +#include <linux/zalloc.h> |
---|
| 45 | +#include <perf/evlist.h> |
---|
| 46 | +#include <perf/evsel.h> |
---|
| 47 | +#include <perf/cpumap.h> |
---|
| 48 | +#include <perf/mmap.h> |
---|
| 49 | + |
---|
| 50 | +#include <internal/xyarray.h> |
---|
36 | 51 | |
---|
37 | 52 | #ifdef LACKS_SIGQUEUE_PROTOTYPE |
---|
38 | 53 | int sigqueue(pid_t pid, int sig, const union sigval value); |
---|
39 | 54 | #endif |
---|
40 | 55 | |
---|
41 | | -#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
---|
42 | | -#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) |
---|
| 56 | +#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) |
---|
| 57 | +#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) |
---|
43 | 58 | |
---|
44 | | -void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, |
---|
45 | | - struct thread_map *threads) |
---|
| 59 | +void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, |
---|
| 60 | + struct perf_thread_map *threads) |
---|
46 | 61 | { |
---|
47 | | - int i; |
---|
48 | | - |
---|
49 | | - for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) |
---|
50 | | - INIT_HLIST_HEAD(&evlist->heads[i]); |
---|
51 | | - INIT_LIST_HEAD(&evlist->entries); |
---|
52 | | - perf_evlist__set_maps(evlist, cpus, threads); |
---|
53 | | - fdarray__init(&evlist->pollfd, 64); |
---|
| 62 | + perf_evlist__init(&evlist->core); |
---|
| 63 | + perf_evlist__set_maps(&evlist->core, cpus, threads); |
---|
54 | 64 | evlist->workload.pid = -1; |
---|
55 | 65 | evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; |
---|
| 66 | + evlist->ctl_fd.fd = -1; |
---|
| 67 | + evlist->ctl_fd.ack = -1; |
---|
| 68 | + evlist->ctl_fd.pos = -1; |
---|
56 | 69 | } |
---|
57 | 70 | |
---|
58 | | -struct perf_evlist *perf_evlist__new(void) |
---|
| 71 | +struct evlist *evlist__new(void) |
---|
59 | 72 | { |
---|
60 | | - struct perf_evlist *evlist = zalloc(sizeof(*evlist)); |
---|
| 73 | + struct evlist *evlist = zalloc(sizeof(*evlist)); |
---|
61 | 74 | |
---|
62 | 75 | if (evlist != NULL) |
---|
63 | | - perf_evlist__init(evlist, NULL, NULL); |
---|
| 76 | + evlist__init(evlist, NULL, NULL); |
---|
64 | 77 | |
---|
65 | 78 | return evlist; |
---|
66 | 79 | } |
---|
67 | 80 | |
---|
68 | | -struct perf_evlist *perf_evlist__new_default(void) |
---|
| 81 | +struct evlist *perf_evlist__new_default(void) |
---|
69 | 82 | { |
---|
70 | | - struct perf_evlist *evlist = perf_evlist__new(); |
---|
| 83 | + struct evlist *evlist = evlist__new(); |
---|
71 | 84 | |
---|
72 | | - if (evlist && perf_evlist__add_default(evlist)) { |
---|
73 | | - perf_evlist__delete(evlist); |
---|
| 85 | + if (evlist && evlist__add_default(evlist)) { |
---|
| 86 | + evlist__delete(evlist); |
---|
74 | 87 | evlist = NULL; |
---|
75 | 88 | } |
---|
76 | 89 | |
---|
77 | 90 | return evlist; |
---|
78 | 91 | } |
---|
79 | 92 | |
---|
80 | | -struct perf_evlist *perf_evlist__new_dummy(void) |
---|
| 93 | +struct evlist *perf_evlist__new_dummy(void) |
---|
81 | 94 | { |
---|
82 | | - struct perf_evlist *evlist = perf_evlist__new(); |
---|
| 95 | + struct evlist *evlist = evlist__new(); |
---|
83 | 96 | |
---|
84 | | - if (evlist && perf_evlist__add_dummy(evlist)) { |
---|
85 | | - perf_evlist__delete(evlist); |
---|
| 97 | + if (evlist && evlist__add_dummy(evlist)) { |
---|
| 98 | + evlist__delete(evlist); |
---|
86 | 99 | evlist = NULL; |
---|
87 | 100 | } |
---|
88 | 101 | |
---|
.. | .. |
---|
96 | 109 | * Events with compatible sample types all have the same id_pos |
---|
97 | 110 | * and is_pos. For convenience, put a copy on evlist. |
---|
98 | 111 | */ |
---|
99 | | -void perf_evlist__set_id_pos(struct perf_evlist *evlist) |
---|
| 112 | +void perf_evlist__set_id_pos(struct evlist *evlist) |
---|
100 | 113 | { |
---|
101 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
---|
| 114 | + struct evsel *first = evlist__first(evlist); |
---|
102 | 115 | |
---|
103 | 116 | evlist->id_pos = first->id_pos; |
---|
104 | 117 | evlist->is_pos = first->is_pos; |
---|
105 | 118 | } |
---|
106 | 119 | |
---|
107 | | -static void perf_evlist__update_id_pos(struct perf_evlist *evlist) |
---|
| 120 | +static void perf_evlist__update_id_pos(struct evlist *evlist) |
---|
108 | 121 | { |
---|
109 | | - struct perf_evsel *evsel; |
---|
| 122 | + struct evsel *evsel; |
---|
110 | 123 | |
---|
111 | 124 | evlist__for_each_entry(evlist, evsel) |
---|
112 | | - perf_evsel__calc_id_pos(evsel); |
---|
| 125 | + evsel__calc_id_pos(evsel); |
---|
113 | 126 | |
---|
114 | 127 | perf_evlist__set_id_pos(evlist); |
---|
115 | 128 | } |
---|
116 | 129 | |
---|
117 | | -static void perf_evlist__purge(struct perf_evlist *evlist) |
---|
| 130 | +static void evlist__purge(struct evlist *evlist) |
---|
118 | 131 | { |
---|
119 | | - struct perf_evsel *pos, *n; |
---|
| 132 | + struct evsel *pos, *n; |
---|
120 | 133 | |
---|
121 | 134 | evlist__for_each_entry_safe(evlist, n, pos) { |
---|
122 | | - list_del_init(&pos->node); |
---|
| 135 | + list_del_init(&pos->core.node); |
---|
123 | 136 | pos->evlist = NULL; |
---|
124 | | - perf_evsel__delete(pos); |
---|
| 137 | + evsel__delete(pos); |
---|
125 | 138 | } |
---|
126 | 139 | |
---|
127 | | - evlist->nr_entries = 0; |
---|
| 140 | + evlist->core.nr_entries = 0; |
---|
128 | 141 | } |
---|
129 | 142 | |
---|
130 | | -void perf_evlist__exit(struct perf_evlist *evlist) |
---|
| 143 | +void evlist__exit(struct evlist *evlist) |
---|
131 | 144 | { |
---|
132 | 145 | zfree(&evlist->mmap); |
---|
133 | 146 | zfree(&evlist->overwrite_mmap); |
---|
134 | | - fdarray__exit(&evlist->pollfd); |
---|
| 147 | + perf_evlist__exit(&evlist->core); |
---|
135 | 148 | } |
---|
136 | 149 | |
---|
137 | | -void perf_evlist__delete(struct perf_evlist *evlist) |
---|
| 150 | +void evlist__delete(struct evlist *evlist) |
---|
138 | 151 | { |
---|
139 | 152 | if (evlist == NULL) |
---|
140 | 153 | return; |
---|
141 | 154 | |
---|
142 | | - perf_evlist__munmap(evlist); |
---|
143 | | - perf_evlist__close(evlist); |
---|
144 | | - cpu_map__put(evlist->cpus); |
---|
145 | | - thread_map__put(evlist->threads); |
---|
146 | | - evlist->cpus = NULL; |
---|
147 | | - evlist->threads = NULL; |
---|
148 | | - perf_evlist__purge(evlist); |
---|
149 | | - perf_evlist__exit(evlist); |
---|
| 155 | + evlist__munmap(evlist); |
---|
| 156 | + evlist__close(evlist); |
---|
| 157 | + evlist__purge(evlist); |
---|
| 158 | + evlist__exit(evlist); |
---|
150 | 159 | free(evlist); |
---|
151 | 160 | } |
---|
152 | 161 | |
---|
153 | | -static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, |
---|
154 | | - struct perf_evsel *evsel) |
---|
155 | | -{ |
---|
156 | | - /* |
---|
157 | | - * We already have cpus for evsel (via PMU sysfs) so |
---|
158 | | - * keep it, if there's no target cpu list defined. |
---|
159 | | - */ |
---|
160 | | - if (!evsel->own_cpus || evlist->has_user_cpus) { |
---|
161 | | - cpu_map__put(evsel->cpus); |
---|
162 | | - evsel->cpus = cpu_map__get(evlist->cpus); |
---|
163 | | - } else if (evsel->cpus != evsel->own_cpus) { |
---|
164 | | - cpu_map__put(evsel->cpus); |
---|
165 | | - evsel->cpus = cpu_map__get(evsel->own_cpus); |
---|
166 | | - } |
---|
167 | | - |
---|
168 | | - thread_map__put(evsel->threads); |
---|
169 | | - evsel->threads = thread_map__get(evlist->threads); |
---|
170 | | -} |
---|
171 | | - |
---|
172 | | -static void perf_evlist__propagate_maps(struct perf_evlist *evlist) |
---|
173 | | -{ |
---|
174 | | - struct perf_evsel *evsel; |
---|
175 | | - |
---|
176 | | - evlist__for_each_entry(evlist, evsel) |
---|
177 | | - __perf_evlist__propagate_maps(evlist, evsel); |
---|
178 | | -} |
---|
179 | | - |
---|
180 | | -void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) |
---|
| 162 | +void evlist__add(struct evlist *evlist, struct evsel *entry) |
---|
181 | 163 | { |
---|
182 | 164 | entry->evlist = evlist; |
---|
183 | | - list_add_tail(&entry->node, &evlist->entries); |
---|
184 | | - entry->idx = evlist->nr_entries; |
---|
| 165 | + entry->idx = evlist->core.nr_entries; |
---|
185 | 166 | entry->tracking = !entry->idx; |
---|
186 | 167 | |
---|
187 | | - if (!evlist->nr_entries++) |
---|
188 | | - perf_evlist__set_id_pos(evlist); |
---|
| 168 | + perf_evlist__add(&evlist->core, &entry->core); |
---|
189 | 169 | |
---|
190 | | - __perf_evlist__propagate_maps(evlist, entry); |
---|
| 170 | + if (evlist->core.nr_entries == 1) |
---|
| 171 | + perf_evlist__set_id_pos(evlist); |
---|
191 | 172 | } |
---|
192 | 173 | |
---|
193 | | -void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel) |
---|
| 174 | +void evlist__remove(struct evlist *evlist, struct evsel *evsel) |
---|
194 | 175 | { |
---|
195 | 176 | evsel->evlist = NULL; |
---|
196 | | - list_del_init(&evsel->node); |
---|
197 | | - evlist->nr_entries -= 1; |
---|
| 177 | + perf_evlist__remove(&evlist->core, &evsel->core); |
---|
198 | 178 | } |
---|
199 | 179 | |
---|
200 | | -void perf_evlist__splice_list_tail(struct perf_evlist *evlist, |
---|
| 180 | +void perf_evlist__splice_list_tail(struct evlist *evlist, |
---|
201 | 181 | struct list_head *list) |
---|
202 | 182 | { |
---|
203 | | - struct perf_evsel *evsel, *temp; |
---|
| 183 | + struct evsel *evsel, *temp; |
---|
204 | 184 | |
---|
205 | 185 | __evlist__for_each_entry_safe(list, temp, evsel) { |
---|
206 | | - list_del_init(&evsel->node); |
---|
207 | | - perf_evlist__add(evlist, evsel); |
---|
| 186 | + list_del_init(&evsel->core.node); |
---|
| 187 | + evlist__add(evlist, evsel); |
---|
208 | 188 | } |
---|
| 189 | +} |
---|
| 190 | + |
---|
| 191 | +int __evlist__set_tracepoints_handlers(struct evlist *evlist, |
---|
| 192 | + const struct evsel_str_handler *assocs, size_t nr_assocs) |
---|
| 193 | +{ |
---|
| 194 | + struct evsel *evsel; |
---|
| 195 | + size_t i; |
---|
| 196 | + int err; |
---|
| 197 | + |
---|
| 198 | + for (i = 0; i < nr_assocs; i++) { |
---|
| 199 | + // Adding a handler for an event not in this evlist, just ignore it. |
---|
| 200 | + evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name); |
---|
| 201 | + if (evsel == NULL) |
---|
| 202 | + continue; |
---|
| 203 | + |
---|
| 204 | + err = -EEXIST; |
---|
| 205 | + if (evsel->handler != NULL) |
---|
| 206 | + goto out; |
---|
| 207 | + evsel->handler = assocs[i].handler; |
---|
| 208 | + } |
---|
| 209 | + |
---|
| 210 | + err = 0; |
---|
| 211 | +out: |
---|
| 212 | + return err; |
---|
209 | 213 | } |
---|
210 | 214 | |
---|
211 | 215 | void __perf_evlist__set_leader(struct list_head *list) |
---|
212 | 216 | { |
---|
213 | | - struct perf_evsel *evsel, *leader; |
---|
| 217 | + struct evsel *evsel, *leader; |
---|
214 | 218 | |
---|
215 | | - leader = list_entry(list->next, struct perf_evsel, node); |
---|
216 | | - evsel = list_entry(list->prev, struct perf_evsel, node); |
---|
| 219 | + leader = list_entry(list->next, struct evsel, core.node); |
---|
| 220 | + evsel = list_entry(list->prev, struct evsel, core.node); |
---|
217 | 221 | |
---|
218 | | - leader->nr_members = evsel->idx - leader->idx + 1; |
---|
| 222 | + leader->core.nr_members = evsel->idx - leader->idx + 1; |
---|
219 | 223 | |
---|
220 | 224 | __evlist__for_each_entry(list, evsel) { |
---|
221 | 225 | evsel->leader = leader; |
---|
222 | 226 | } |
---|
223 | 227 | } |
---|
224 | 228 | |
---|
225 | | -void perf_evlist__set_leader(struct perf_evlist *evlist) |
---|
| 229 | +void perf_evlist__set_leader(struct evlist *evlist) |
---|
226 | 230 | { |
---|
227 | | - if (evlist->nr_entries) { |
---|
228 | | - evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; |
---|
229 | | - __perf_evlist__set_leader(&evlist->entries); |
---|
| 231 | + if (evlist->core.nr_entries) { |
---|
| 232 | + evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0; |
---|
| 233 | + __perf_evlist__set_leader(&evlist->core.entries); |
---|
230 | 234 | } |
---|
231 | 235 | } |
---|
232 | 236 | |
---|
233 | | -void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr) |
---|
| 237 | +int __evlist__add_default(struct evlist *evlist, bool precise) |
---|
234 | 238 | { |
---|
235 | | - attr->precise_ip = 3; |
---|
236 | | - |
---|
237 | | - while (attr->precise_ip != 0) { |
---|
238 | | - int fd = sys_perf_event_open(attr, 0, -1, -1, 0); |
---|
239 | | - if (fd != -1) { |
---|
240 | | - close(fd); |
---|
241 | | - break; |
---|
242 | | - } |
---|
243 | | - --attr->precise_ip; |
---|
244 | | - } |
---|
245 | | -} |
---|
246 | | - |
---|
247 | | -int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) |
---|
248 | | -{ |
---|
249 | | - struct perf_evsel *evsel = perf_evsel__new_cycles(precise); |
---|
| 239 | + struct evsel *evsel = evsel__new_cycles(precise); |
---|
250 | 240 | |
---|
251 | 241 | if (evsel == NULL) |
---|
252 | 242 | return -ENOMEM; |
---|
253 | 243 | |
---|
254 | | - perf_evlist__add(evlist, evsel); |
---|
| 244 | + evlist__add(evlist, evsel); |
---|
255 | 245 | return 0; |
---|
256 | 246 | } |
---|
257 | 247 | |
---|
258 | | -int perf_evlist__add_dummy(struct perf_evlist *evlist) |
---|
| 248 | +int evlist__add_dummy(struct evlist *evlist) |
---|
259 | 249 | { |
---|
260 | 250 | struct perf_event_attr attr = { |
---|
261 | 251 | .type = PERF_TYPE_SOFTWARE, |
---|
262 | 252 | .config = PERF_COUNT_SW_DUMMY, |
---|
263 | 253 | .size = sizeof(attr), /* to capture ABI version */ |
---|
264 | 254 | }; |
---|
265 | | - struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries); |
---|
| 255 | + struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries); |
---|
266 | 256 | |
---|
267 | 257 | if (evsel == NULL) |
---|
268 | 258 | return -ENOMEM; |
---|
269 | 259 | |
---|
270 | | - perf_evlist__add(evlist, evsel); |
---|
| 260 | + evlist__add(evlist, evsel); |
---|
271 | 261 | return 0; |
---|
272 | 262 | } |
---|
273 | 263 | |
---|
274 | | -static int perf_evlist__add_attrs(struct perf_evlist *evlist, |
---|
275 | | - struct perf_event_attr *attrs, size_t nr_attrs) |
---|
| 264 | +static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) |
---|
276 | 265 | { |
---|
277 | | - struct perf_evsel *evsel, *n; |
---|
| 266 | + struct evsel *evsel, *n; |
---|
278 | 267 | LIST_HEAD(head); |
---|
279 | 268 | size_t i; |
---|
280 | 269 | |
---|
281 | 270 | for (i = 0; i < nr_attrs; i++) { |
---|
282 | | - evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); |
---|
| 271 | + evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); |
---|
283 | 272 | if (evsel == NULL) |
---|
284 | 273 | goto out_delete_partial_list; |
---|
285 | | - list_add_tail(&evsel->node, &head); |
---|
| 274 | + list_add_tail(&evsel->core.node, &head); |
---|
286 | 275 | } |
---|
287 | 276 | |
---|
288 | 277 | perf_evlist__splice_list_tail(evlist, &head); |
---|
.. | .. |
---|
291 | 280 | |
---|
292 | 281 | out_delete_partial_list: |
---|
293 | 282 | __evlist__for_each_entry_safe(&head, n, evsel) |
---|
294 | | - perf_evsel__delete(evsel); |
---|
| 283 | + evsel__delete(evsel); |
---|
295 | 284 | return -1; |
---|
296 | 285 | } |
---|
297 | 286 | |
---|
298 | | -int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, |
---|
299 | | - struct perf_event_attr *attrs, size_t nr_attrs) |
---|
| 287 | +int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) |
---|
300 | 288 | { |
---|
301 | 289 | size_t i; |
---|
302 | 290 | |
---|
303 | 291 | for (i = 0; i < nr_attrs; i++) |
---|
304 | 292 | event_attr_init(attrs + i); |
---|
305 | 293 | |
---|
306 | | - return perf_evlist__add_attrs(evlist, attrs, nr_attrs); |
---|
| 294 | + return evlist__add_attrs(evlist, attrs, nr_attrs); |
---|
307 | 295 | } |
---|
308 | 296 | |
---|
309 | | -struct perf_evsel * |
---|
310 | | -perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) |
---|
| 297 | +struct evsel * |
---|
| 298 | +perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id) |
---|
311 | 299 | { |
---|
312 | | - struct perf_evsel *evsel; |
---|
| 300 | + struct evsel *evsel; |
---|
313 | 301 | |
---|
314 | 302 | evlist__for_each_entry(evlist, evsel) { |
---|
315 | | - if (evsel->attr.type == PERF_TYPE_TRACEPOINT && |
---|
316 | | - (int)evsel->attr.config == id) |
---|
| 303 | + if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && |
---|
| 304 | + (int)evsel->core.attr.config == id) |
---|
317 | 305 | return evsel; |
---|
318 | 306 | } |
---|
319 | 307 | |
---|
320 | 308 | return NULL; |
---|
321 | 309 | } |
---|
322 | 310 | |
---|
323 | | -struct perf_evsel * |
---|
324 | | -perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, |
---|
| 311 | +struct evsel * |
---|
| 312 | +perf_evlist__find_tracepoint_by_name(struct evlist *evlist, |
---|
325 | 313 | const char *name) |
---|
326 | 314 | { |
---|
327 | | - struct perf_evsel *evsel; |
---|
| 315 | + struct evsel *evsel; |
---|
328 | 316 | |
---|
329 | 317 | evlist__for_each_entry(evlist, evsel) { |
---|
330 | | - if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && |
---|
| 318 | + if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && |
---|
331 | 319 | (strcmp(evsel->name, name) == 0)) |
---|
332 | 320 | return evsel; |
---|
333 | 321 | } |
---|
.. | .. |
---|
335 | 323 | return NULL; |
---|
336 | 324 | } |
---|
337 | 325 | |
---|
338 | | -int perf_evlist__add_newtp(struct perf_evlist *evlist, |
---|
339 | | - const char *sys, const char *name, void *handler) |
---|
| 326 | +int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) |
---|
340 | 327 | { |
---|
341 | | - struct perf_evsel *evsel = perf_evsel__newtp(sys, name); |
---|
| 328 | + struct evsel *evsel = evsel__newtp(sys, name); |
---|
342 | 329 | |
---|
343 | 330 | if (IS_ERR(evsel)) |
---|
344 | 331 | return -1; |
---|
345 | 332 | |
---|
346 | 333 | evsel->handler = handler; |
---|
347 | | - perf_evlist__add(evlist, evsel); |
---|
| 334 | + evlist__add(evlist, evsel); |
---|
348 | 335 | return 0; |
---|
349 | 336 | } |
---|
350 | 337 | |
---|
351 | | -static int perf_evlist__nr_threads(struct perf_evlist *evlist, |
---|
352 | | - struct perf_evsel *evsel) |
---|
| 338 | +static int perf_evlist__nr_threads(struct evlist *evlist, |
---|
| 339 | + struct evsel *evsel) |
---|
353 | 340 | { |
---|
354 | | - if (evsel->system_wide) |
---|
| 341 | + if (evsel->core.system_wide) |
---|
355 | 342 | return 1; |
---|
356 | 343 | else |
---|
357 | | - return thread_map__nr(evlist->threads); |
---|
| 344 | + return perf_thread_map__nr(evlist->core.threads); |
---|
358 | 345 | } |
---|
359 | 346 | |
---|
360 | | -void perf_evlist__disable(struct perf_evlist *evlist) |
---|
| 347 | +void evlist__cpu_iter_start(struct evlist *evlist) |
---|
361 | 348 | { |
---|
362 | | - struct perf_evsel *pos; |
---|
| 349 | + struct evsel *pos; |
---|
363 | 350 | |
---|
| 351 | + /* |
---|
| 352 | + * Reset the per evsel cpu_iter. This is needed because |
---|
| 353 | + * each evsel's cpumap may have a different index space, |
---|
| 354 | + * and some operations need the index to modify |
---|
| 355 | + * the FD xyarray (e.g. open, close) |
---|
| 356 | + */ |
---|
| 357 | + evlist__for_each_entry(evlist, pos) |
---|
| 358 | + pos->cpu_iter = 0; |
---|
| 359 | +} |
---|
| 360 | + |
---|
| 361 | +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) |
---|
| 362 | +{ |
---|
| 363 | + if (ev->cpu_iter >= ev->core.cpus->nr) |
---|
| 364 | + return true; |
---|
| 365 | + if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) |
---|
| 366 | + return true; |
---|
| 367 | + return false; |
---|
| 368 | +} |
---|
| 369 | + |
---|
| 370 | +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) |
---|
| 371 | +{ |
---|
| 372 | + if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { |
---|
| 373 | + ev->cpu_iter++; |
---|
| 374 | + return false; |
---|
| 375 | + } |
---|
| 376 | + return true; |
---|
| 377 | +} |
---|
| 378 | + |
---|
| 379 | +void evlist__disable(struct evlist *evlist) |
---|
| 380 | +{ |
---|
| 381 | + struct evsel *pos; |
---|
| 382 | + struct affinity affinity; |
---|
| 383 | + int cpu, i, imm = 0; |
---|
| 384 | + bool has_imm = false; |
---|
| 385 | + |
---|
| 386 | + if (affinity__setup(&affinity) < 0) |
---|
| 387 | + return; |
---|
| 388 | + |
---|
| 389 | + /* Disable 'immediate' events last */ |
---|
| 390 | + for (imm = 0; imm <= 1; imm++) { |
---|
| 391 | + evlist__for_each_cpu(evlist, i, cpu) { |
---|
| 392 | + affinity__set(&affinity, cpu); |
---|
| 393 | + |
---|
| 394 | + evlist__for_each_entry(evlist, pos) { |
---|
| 395 | + if (evsel__cpu_iter_skip(pos, cpu)) |
---|
| 396 | + continue; |
---|
| 397 | + if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) |
---|
| 398 | + continue; |
---|
| 399 | + if (pos->immediate) |
---|
| 400 | + has_imm = true; |
---|
| 401 | + if (pos->immediate != imm) |
---|
| 402 | + continue; |
---|
| 403 | + evsel__disable_cpu(pos, pos->cpu_iter - 1); |
---|
| 404 | + } |
---|
| 405 | + } |
---|
| 406 | + if (!has_imm) |
---|
| 407 | + break; |
---|
| 408 | + } |
---|
| 409 | + |
---|
| 410 | + affinity__cleanup(&affinity); |
---|
364 | 411 | evlist__for_each_entry(evlist, pos) { |
---|
365 | | - if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
---|
| 412 | + if (!evsel__is_group_leader(pos) || !pos->core.fd) |
---|
366 | 413 | continue; |
---|
367 | | - perf_evsel__disable(pos); |
---|
| 414 | + pos->disabled = true; |
---|
368 | 415 | } |
---|
369 | 416 | |
---|
370 | 417 | evlist->enabled = false; |
---|
371 | 418 | } |
---|
372 | 419 | |
---|
373 | | -void perf_evlist__enable(struct perf_evlist *evlist) |
---|
| 420 | +void evlist__enable(struct evlist *evlist) |
---|
374 | 421 | { |
---|
375 | | - struct perf_evsel *pos; |
---|
| 422 | + struct evsel *pos; |
---|
| 423 | + struct affinity affinity; |
---|
| 424 | + int cpu, i; |
---|
376 | 425 | |
---|
| 426 | + if (affinity__setup(&affinity) < 0) |
---|
| 427 | + return; |
---|
| 428 | + |
---|
| 429 | + evlist__for_each_cpu(evlist, i, cpu) { |
---|
| 430 | + affinity__set(&affinity, cpu); |
---|
| 431 | + |
---|
| 432 | + evlist__for_each_entry(evlist, pos) { |
---|
| 433 | + if (evsel__cpu_iter_skip(pos, cpu)) |
---|
| 434 | + continue; |
---|
| 435 | + if (!evsel__is_group_leader(pos) || !pos->core.fd) |
---|
| 436 | + continue; |
---|
| 437 | + evsel__enable_cpu(pos, pos->cpu_iter - 1); |
---|
| 438 | + } |
---|
| 439 | + } |
---|
| 440 | + affinity__cleanup(&affinity); |
---|
377 | 441 | evlist__for_each_entry(evlist, pos) { |
---|
378 | | - if (!perf_evsel__is_group_leader(pos) || !pos->fd) |
---|
| 442 | + if (!evsel__is_group_leader(pos) || !pos->core.fd) |
---|
379 | 443 | continue; |
---|
380 | | - perf_evsel__enable(pos); |
---|
| 444 | + pos->disabled = false; |
---|
381 | 445 | } |
---|
382 | 446 | |
---|
383 | 447 | evlist->enabled = true; |
---|
384 | 448 | } |
---|
385 | 449 | |
---|
386 | | -void perf_evlist__toggle_enable(struct perf_evlist *evlist) |
---|
| 450 | +void perf_evlist__toggle_enable(struct evlist *evlist) |
---|
387 | 451 | { |
---|
388 | | - (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist); |
---|
| 452 | + (evlist->enabled ? evlist__disable : evlist__enable)(evlist); |
---|
389 | 453 | } |
---|
390 | 454 | |
---|
391 | | -static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, |
---|
392 | | - struct perf_evsel *evsel, int cpu) |
---|
| 455 | +static int perf_evlist__enable_event_cpu(struct evlist *evlist, |
---|
| 456 | + struct evsel *evsel, int cpu) |
---|
393 | 457 | { |
---|
394 | 458 | int thread; |
---|
395 | 459 | int nr_threads = perf_evlist__nr_threads(evlist, evsel); |
---|
396 | 460 | |
---|
397 | | - if (!evsel->fd) |
---|
| 461 | + if (!evsel->core.fd) |
---|
398 | 462 | return -EINVAL; |
---|
399 | 463 | |
---|
400 | 464 | for (thread = 0; thread < nr_threads; thread++) { |
---|
.. | .. |
---|
405 | 469 | return 0; |
---|
406 | 470 | } |
---|
407 | 471 | |
---|
408 | | -static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, |
---|
409 | | - struct perf_evsel *evsel, |
---|
| 472 | +static int perf_evlist__enable_event_thread(struct evlist *evlist, |
---|
| 473 | + struct evsel *evsel, |
---|
410 | 474 | int thread) |
---|
411 | 475 | { |
---|
412 | 476 | int cpu; |
---|
413 | | - int nr_cpus = cpu_map__nr(evlist->cpus); |
---|
| 477 | + int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); |
---|
414 | 478 | |
---|
415 | | - if (!evsel->fd) |
---|
| 479 | + if (!evsel->core.fd) |
---|
416 | 480 | return -EINVAL; |
---|
417 | 481 | |
---|
418 | 482 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
---|
.. | .. |
---|
423 | 487 | return 0; |
---|
424 | 488 | } |
---|
425 | 489 | |
---|
426 | | -int perf_evlist__enable_event_idx(struct perf_evlist *evlist, |
---|
427 | | - struct perf_evsel *evsel, int idx) |
---|
| 490 | +int perf_evlist__enable_event_idx(struct evlist *evlist, |
---|
| 491 | + struct evsel *evsel, int idx) |
---|
428 | 492 | { |
---|
429 | | - bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); |
---|
| 493 | + bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); |
---|
430 | 494 | |
---|
431 | 495 | if (per_cpu_mmaps) |
---|
432 | 496 | return perf_evlist__enable_event_cpu(evlist, evsel, idx); |
---|
.. | .. |
---|
434 | 498 | return perf_evlist__enable_event_thread(evlist, evsel, idx); |
---|
435 | 499 | } |
---|
436 | 500 | |
---|
437 | | -int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
---|
| 501 | +int evlist__add_pollfd(struct evlist *evlist, int fd) |
---|
438 | 502 | { |
---|
439 | | - int nr_cpus = cpu_map__nr(evlist->cpus); |
---|
440 | | - int nr_threads = thread_map__nr(evlist->threads); |
---|
441 | | - int nfds = 0; |
---|
442 | | - struct perf_evsel *evsel; |
---|
443 | | - |
---|
444 | | - evlist__for_each_entry(evlist, evsel) { |
---|
445 | | - if (evsel->system_wide) |
---|
446 | | - nfds += nr_cpus; |
---|
447 | | - else |
---|
448 | | - nfds += nr_cpus * nr_threads; |
---|
449 | | - } |
---|
450 | | - |
---|
451 | | - if (fdarray__available_entries(&evlist->pollfd) < nfds && |
---|
452 | | - fdarray__grow(&evlist->pollfd, nfds) < 0) |
---|
453 | | - return -ENOMEM; |
---|
454 | | - |
---|
455 | | - return 0; |
---|
| 503 | + return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); |
---|
456 | 504 | } |
---|
457 | 505 | |
---|
458 | | -static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, |
---|
459 | | - struct perf_mmap *map, short revent) |
---|
| 506 | +int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) |
---|
460 | 507 | { |
---|
461 | | - int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); |
---|
462 | | - /* |
---|
463 | | - * Save the idx so that when we filter out fds POLLHUP'ed we can |
---|
464 | | - * close the associated evlist->mmap[] entry. |
---|
465 | | - */ |
---|
466 | | - if (pos >= 0) { |
---|
467 | | - evlist->pollfd.priv[pos].ptr = map; |
---|
468 | | - |
---|
469 | | - fcntl(fd, F_SETFL, O_NONBLOCK); |
---|
470 | | - } |
---|
471 | | - |
---|
472 | | - return pos; |
---|
| 508 | + return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); |
---|
473 | 509 | } |
---|
474 | 510 | |
---|
475 | | -int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) |
---|
| 511 | +#ifdef HAVE_EVENTFD_SUPPORT |
---|
| 512 | +int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) |
---|
476 | 513 | { |
---|
477 | | - return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN); |
---|
| 514 | + return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, |
---|
| 515 | + fdarray_flag__nonfilterable); |
---|
| 516 | +} |
---|
| 517 | +#endif |
---|
| 518 | + |
---|
| 519 | +int evlist__poll(struct evlist *evlist, int timeout) |
---|
| 520 | +{ |
---|
| 521 | + return perf_evlist__poll(&evlist->core, timeout); |
---|
478 | 522 | } |
---|
479 | 523 | |
---|
480 | | -static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, |
---|
481 | | - void *arg __maybe_unused) |
---|
482 | | -{ |
---|
483 | | - struct perf_mmap *map = fda->priv[fd].ptr; |
---|
484 | | - |
---|
485 | | - if (map) |
---|
486 | | - perf_mmap__put(map); |
---|
487 | | -} |
---|
488 | | - |
---|
489 | | -int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) |
---|
490 | | -{ |
---|
491 | | - return fdarray__filter(&evlist->pollfd, revents_and_mask, |
---|
492 | | - perf_evlist__munmap_filtered, NULL); |
---|
493 | | -} |
---|
494 | | - |
---|
495 | | -int perf_evlist__poll(struct perf_evlist *evlist, int timeout) |
---|
496 | | -{ |
---|
497 | | - return fdarray__poll(&evlist->pollfd, timeout); |
---|
498 | | -} |
---|
499 | | - |
---|
500 | | -static void perf_evlist__id_hash(struct perf_evlist *evlist, |
---|
501 | | - struct perf_evsel *evsel, |
---|
502 | | - int cpu, int thread, u64 id) |
---|
503 | | -{ |
---|
504 | | - int hash; |
---|
505 | | - struct perf_sample_id *sid = SID(evsel, cpu, thread); |
---|
506 | | - |
---|
507 | | - sid->id = id; |
---|
508 | | - sid->evsel = evsel; |
---|
509 | | - hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); |
---|
510 | | - hlist_add_head(&sid->node, &evlist->heads[hash]); |
---|
511 | | -} |
---|
512 | | - |
---|
513 | | -void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, |
---|
514 | | - int cpu, int thread, u64 id) |
---|
515 | | -{ |
---|
516 | | - perf_evlist__id_hash(evlist, evsel, cpu, thread, id); |
---|
517 | | - evsel->id[evsel->ids++] = id; |
---|
518 | | -} |
---|
519 | | - |
---|
520 | | -int perf_evlist__id_add_fd(struct perf_evlist *evlist, |
---|
521 | | - struct perf_evsel *evsel, |
---|
522 | | - int cpu, int thread, int fd) |
---|
523 | | -{ |
---|
524 | | - u64 read_data[4] = { 0, }; |
---|
525 | | - int id_idx = 1; /* The first entry is the counter value */ |
---|
526 | | - u64 id; |
---|
527 | | - int ret; |
---|
528 | | - |
---|
529 | | - ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); |
---|
530 | | - if (!ret) |
---|
531 | | - goto add; |
---|
532 | | - |
---|
533 | | - if (errno != ENOTTY) |
---|
534 | | - return -1; |
---|
535 | | - |
---|
536 | | - /* Legacy way to get event id.. All hail to old kernels! */ |
---|
537 | | - |
---|
538 | | - /* |
---|
539 | | - * This way does not work with group format read, so bail |
---|
540 | | - * out in that case. |
---|
541 | | - */ |
---|
542 | | - if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) |
---|
543 | | - return -1; |
---|
544 | | - |
---|
545 | | - if (!(evsel->attr.read_format & PERF_FORMAT_ID) || |
---|
546 | | - read(fd, &read_data, sizeof(read_data)) == -1) |
---|
547 | | - return -1; |
---|
548 | | - |
---|
549 | | - if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
---|
550 | | - ++id_idx; |
---|
551 | | - if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
---|
552 | | - ++id_idx; |
---|
553 | | - |
---|
554 | | - id = read_data[id_idx]; |
---|
555 | | - |
---|
556 | | - add: |
---|
557 | | - perf_evlist__id_add(evlist, evsel, cpu, thread, id); |
---|
558 | | - return 0; |
---|
559 | | -} |
---|
560 | | - |
---|
561 | | -static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, |
---|
562 | | - struct perf_evsel *evsel, int idx, int cpu, |
---|
563 | | - int thread) |
---|
564 | | -{ |
---|
565 | | - struct perf_sample_id *sid = SID(evsel, cpu, thread); |
---|
566 | | - sid->idx = idx; |
---|
567 | | - if (evlist->cpus && cpu >= 0) |
---|
568 | | - sid->cpu = evlist->cpus->map[cpu]; |
---|
569 | | - else |
---|
570 | | - sid->cpu = -1; |
---|
571 | | - if (!evsel->system_wide && evlist->threads && thread >= 0) |
---|
572 | | - sid->tid = thread_map__pid(evlist->threads, thread); |
---|
573 | | - else |
---|
574 | | - sid->tid = -1; |
---|
575 | | -} |
---|
576 | | - |
---|
577 | | -struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) |
---|
| 524 | +struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id) |
---|
578 | 525 | { |
---|
579 | 526 | struct hlist_head *head; |
---|
580 | 527 | struct perf_sample_id *sid; |
---|
581 | 528 | int hash; |
---|
582 | 529 | |
---|
583 | 530 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
---|
584 | | - head = &evlist->heads[hash]; |
---|
| 531 | + head = &evlist->core.heads[hash]; |
---|
585 | 532 | |
---|
586 | 533 | hlist_for_each_entry(sid, head, node) |
---|
587 | 534 | if (sid->id == id) |
---|
.. | .. |
---|
590 | 537 | return NULL; |
---|
591 | 538 | } |
---|
592 | 539 | |
---|
593 | | -struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) |
---|
| 540 | +struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id) |
---|
594 | 541 | { |
---|
595 | 542 | struct perf_sample_id *sid; |
---|
596 | 543 | |
---|
597 | | - if (evlist->nr_entries == 1 || !id) |
---|
598 | | - return perf_evlist__first(evlist); |
---|
| 544 | + if (evlist->core.nr_entries == 1 || !id) |
---|
| 545 | + return evlist__first(evlist); |
---|
599 | 546 | |
---|
600 | 547 | sid = perf_evlist__id2sid(evlist, id); |
---|
601 | 548 | if (sid) |
---|
602 | | - return sid->evsel; |
---|
| 549 | + return container_of(sid->evsel, struct evsel, core); |
---|
603 | 550 | |
---|
604 | | - if (!perf_evlist__sample_id_all(evlist)) |
---|
605 | | - return perf_evlist__first(evlist); |
---|
| 551 | + if (!evlist__sample_id_all(evlist)) |
---|
| 552 | + return evlist__first(evlist); |
---|
606 | 553 | |
---|
607 | 554 | return NULL; |
---|
608 | 555 | } |
---|
609 | 556 | |
---|
610 | | -struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, |
---|
| 557 | +struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist, |
---|
611 | 558 | u64 id) |
---|
612 | 559 | { |
---|
613 | 560 | struct perf_sample_id *sid; |
---|
.. | .. |
---|
617 | 564 | |
---|
618 | 565 | sid = perf_evlist__id2sid(evlist, id); |
---|
619 | 566 | if (sid) |
---|
620 | | - return sid->evsel; |
---|
| 567 | + return container_of(sid->evsel, struct evsel, core); |
---|
621 | 568 | |
---|
622 | 569 | return NULL; |
---|
623 | 570 | } |
---|
624 | 571 | |
---|
625 | | -static int perf_evlist__event2id(struct perf_evlist *evlist, |
---|
| 572 | +static int perf_evlist__event2id(struct evlist *evlist, |
---|
626 | 573 | union perf_event *event, u64 *id) |
---|
627 | 574 | { |
---|
628 | | - const u64 *array = event->sample.array; |
---|
| 575 | + const __u64 *array = event->sample.array; |
---|
629 | 576 | ssize_t n; |
---|
630 | 577 | |
---|
631 | 578 | n = (event->header.size - sizeof(event->header)) >> 3; |
---|
.. | .. |
---|
643 | 590 | return 0; |
---|
644 | 591 | } |
---|
645 | 592 | |
---|
646 | | -struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, |
---|
| 593 | +struct evsel *perf_evlist__event2evsel(struct evlist *evlist, |
---|
647 | 594 | union perf_event *event) |
---|
648 | 595 | { |
---|
649 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
---|
| 596 | + struct evsel *first = evlist__first(evlist); |
---|
650 | 597 | struct hlist_head *head; |
---|
651 | 598 | struct perf_sample_id *sid; |
---|
652 | 599 | int hash; |
---|
653 | 600 | u64 id; |
---|
654 | 601 | |
---|
655 | | - if (evlist->nr_entries == 1) |
---|
| 602 | + if (evlist->core.nr_entries == 1) |
---|
656 | 603 | return first; |
---|
657 | 604 | |
---|
658 | | - if (!first->attr.sample_id_all && |
---|
| 605 | + if (!first->core.attr.sample_id_all && |
---|
659 | 606 | event->header.type != PERF_RECORD_SAMPLE) |
---|
660 | 607 | return first; |
---|
661 | 608 | |
---|
.. | .. |
---|
667 | 614 | return first; |
---|
668 | 615 | |
---|
669 | 616 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); |
---|
670 | | - head = &evlist->heads[hash]; |
---|
| 617 | + head = &evlist->core.heads[hash]; |
---|
671 | 618 | |
---|
672 | 619 | hlist_for_each_entry(sid, head, node) { |
---|
673 | 620 | if (sid->id == id) |
---|
674 | | - return sid->evsel; |
---|
| 621 | + return container_of(sid->evsel, struct evsel, core); |
---|
675 | 622 | } |
---|
676 | 623 | return NULL; |
---|
677 | 624 | } |
---|
678 | 625 | |
---|
679 | | -static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) |
---|
| 626 | +static int perf_evlist__set_paused(struct evlist *evlist, bool value) |
---|
680 | 627 | { |
---|
681 | 628 | int i; |
---|
682 | 629 | |
---|
683 | 630 | if (!evlist->overwrite_mmap) |
---|
684 | 631 | return 0; |
---|
685 | 632 | |
---|
686 | | - for (i = 0; i < evlist->nr_mmaps; i++) { |
---|
687 | | - int fd = evlist->overwrite_mmap[i].fd; |
---|
| 633 | + for (i = 0; i < evlist->core.nr_mmaps; i++) { |
---|
| 634 | + int fd = evlist->overwrite_mmap[i].core.fd; |
---|
688 | 635 | int err; |
---|
689 | 636 | |
---|
690 | 637 | if (fd < 0) |
---|
.. | .. |
---|
696 | 643 | return 0; |
---|
697 | 644 | } |
---|
698 | 645 | |
---|
699 | | -static int perf_evlist__pause(struct perf_evlist *evlist) |
---|
| 646 | +static int perf_evlist__pause(struct evlist *evlist) |
---|
700 | 647 | { |
---|
701 | 648 | return perf_evlist__set_paused(evlist, true); |
---|
702 | 649 | } |
---|
703 | 650 | |
---|
704 | | -static int perf_evlist__resume(struct perf_evlist *evlist) |
---|
| 651 | +static int perf_evlist__resume(struct evlist *evlist) |
---|
705 | 652 | { |
---|
706 | 653 | return perf_evlist__set_paused(evlist, false); |
---|
707 | 654 | } |
---|
708 | 655 | |
---|
709 | | -static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) |
---|
| 656 | +static void evlist__munmap_nofree(struct evlist *evlist) |
---|
710 | 657 | { |
---|
711 | 658 | int i; |
---|
712 | 659 | |
---|
713 | 660 | if (evlist->mmap) |
---|
714 | | - for (i = 0; i < evlist->nr_mmaps; i++) |
---|
715 | | - perf_mmap__munmap(&evlist->mmap[i]); |
---|
| 661 | + for (i = 0; i < evlist->core.nr_mmaps; i++) |
---|
| 662 | + perf_mmap__munmap(&evlist->mmap[i].core); |
---|
716 | 663 | |
---|
717 | 664 | if (evlist->overwrite_mmap) |
---|
718 | | - for (i = 0; i < evlist->nr_mmaps; i++) |
---|
719 | | - perf_mmap__munmap(&evlist->overwrite_mmap[i]); |
---|
| 665 | + for (i = 0; i < evlist->core.nr_mmaps; i++) |
---|
| 666 | + perf_mmap__munmap(&evlist->overwrite_mmap[i].core); |
---|
720 | 667 | } |
---|
721 | 668 | |
---|
722 | | -void perf_evlist__munmap(struct perf_evlist *evlist) |
---|
| 669 | +void evlist__munmap(struct evlist *evlist) |
---|
723 | 670 | { |
---|
724 | | - perf_evlist__munmap_nofree(evlist); |
---|
| 671 | + evlist__munmap_nofree(evlist); |
---|
725 | 672 | zfree(&evlist->mmap); |
---|
726 | 673 | zfree(&evlist->overwrite_mmap); |
---|
727 | 674 | } |
---|
728 | 675 | |
---|
729 | | -static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist, |
---|
730 | | - bool overwrite) |
---|
| 676 | +static void perf_mmap__unmap_cb(struct perf_mmap *map) |
---|
| 677 | +{ |
---|
| 678 | + struct mmap *m = container_of(map, struct mmap, core); |
---|
| 679 | + |
---|
| 680 | + mmap__munmap(m); |
---|
| 681 | +} |
---|
| 682 | + |
---|
| 683 | +static struct mmap *evlist__alloc_mmap(struct evlist *evlist, |
---|
| 684 | + bool overwrite) |
---|
731 | 685 | { |
---|
732 | 686 | int i; |
---|
733 | | - struct perf_mmap *map; |
---|
| 687 | + struct mmap *map; |
---|
734 | 688 | |
---|
735 | | - evlist->nr_mmaps = cpu_map__nr(evlist->cpus); |
---|
736 | | - if (cpu_map__empty(evlist->cpus)) |
---|
737 | | - evlist->nr_mmaps = thread_map__nr(evlist->threads); |
---|
738 | | - map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); |
---|
| 689 | + map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); |
---|
739 | 690 | if (!map) |
---|
740 | 691 | return NULL; |
---|
741 | 692 | |
---|
742 | | - for (i = 0; i < evlist->nr_mmaps; i++) { |
---|
743 | | - map[i].fd = -1; |
---|
744 | | - map[i].overwrite = overwrite; |
---|
| 693 | + for (i = 0; i < evlist->core.nr_mmaps; i++) { |
---|
| 694 | + struct perf_mmap *prev = i ? &map[i - 1].core : NULL; |
---|
| 695 | + |
---|
745 | 696 | /* |
---|
746 | 697 | * When the perf_mmap() call is made we grab one refcount, plus |
---|
747 | 698 | * one extra to let perf_mmap__consume() get the last |
---|
.. | .. |
---|
751 | 702 | * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and |
---|
752 | 703 | * thus does perf_mmap__get() on it. |
---|
753 | 704 | */ |
---|
754 | | - refcount_set(&map[i].refcnt, 0); |
---|
| 705 | + perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); |
---|
755 | 706 | } |
---|
| 707 | + |
---|
756 | 708 | return map; |
---|
757 | 709 | } |
---|
758 | 710 | |
---|
759 | | -static bool |
---|
760 | | -perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, |
---|
761 | | - struct perf_evsel *evsel) |
---|
| 711 | +static void |
---|
| 712 | +perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, |
---|
| 713 | + struct perf_mmap_param *_mp, |
---|
| 714 | + int idx, bool per_cpu) |
---|
762 | 715 | { |
---|
763 | | - if (evsel->attr.write_backward) |
---|
764 | | - return false; |
---|
765 | | - return true; |
---|
| 716 | + struct evlist *evlist = container_of(_evlist, struct evlist, core); |
---|
| 717 | + struct mmap_params *mp = container_of(_mp, struct mmap_params, core); |
---|
| 718 | + |
---|
| 719 | + auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); |
---|
766 | 720 | } |
---|
767 | 721 | |
---|
768 | | -static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, |
---|
769 | | - struct mmap_params *mp, int cpu_idx, |
---|
770 | | - int thread, int *_output, int *_output_overwrite) |
---|
| 722 | +static struct perf_mmap* |
---|
| 723 | +perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) |
---|
771 | 724 | { |
---|
772 | | - struct perf_evsel *evsel; |
---|
773 | | - int revent; |
---|
774 | | - int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx); |
---|
| 725 | + struct evlist *evlist = container_of(_evlist, struct evlist, core); |
---|
| 726 | + struct mmap *maps; |
---|
775 | 727 | |
---|
776 | | - evlist__for_each_entry(evlist, evsel) { |
---|
777 | | - struct perf_mmap *maps = evlist->mmap; |
---|
778 | | - int *output = _output; |
---|
779 | | - int fd; |
---|
780 | | - int cpu; |
---|
| 728 | + maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; |
---|
781 | 729 | |
---|
782 | | - mp->prot = PROT_READ | PROT_WRITE; |
---|
783 | | - if (evsel->attr.write_backward) { |
---|
784 | | - output = _output_overwrite; |
---|
785 | | - maps = evlist->overwrite_mmap; |
---|
| 730 | + if (!maps) { |
---|
| 731 | + maps = evlist__alloc_mmap(evlist, overwrite); |
---|
| 732 | + if (!maps) |
---|
| 733 | + return NULL; |
---|
786 | 734 | |
---|
787 | | - if (!maps) { |
---|
788 | | - maps = perf_evlist__alloc_mmap(evlist, true); |
---|
789 | | - if (!maps) |
---|
790 | | - return -1; |
---|
791 | | - evlist->overwrite_mmap = maps; |
---|
792 | | - if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) |
---|
793 | | - perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); |
---|
794 | | - } |
---|
795 | | - mp->prot &= ~PROT_WRITE; |
---|
796 | | - } |
---|
797 | | - |
---|
798 | | - if (evsel->system_wide && thread) |
---|
799 | | - continue; |
---|
800 | | - |
---|
801 | | - cpu = cpu_map__idx(evsel->cpus, evlist_cpu); |
---|
802 | | - if (cpu == -1) |
---|
803 | | - continue; |
---|
804 | | - |
---|
805 | | - fd = FD(evsel, cpu, thread); |
---|
806 | | - |
---|
807 | | - if (*output == -1) { |
---|
808 | | - *output = fd; |
---|
809 | | - |
---|
810 | | - if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0) |
---|
811 | | - return -1; |
---|
| 735 | + if (overwrite) { |
---|
| 736 | + evlist->overwrite_mmap = maps; |
---|
| 737 | + if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) |
---|
| 738 | + perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); |
---|
812 | 739 | } else { |
---|
813 | | - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) |
---|
814 | | - return -1; |
---|
815 | | - |
---|
816 | | - perf_mmap__get(&maps[idx]); |
---|
817 | | - } |
---|
818 | | - |
---|
819 | | - revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; |
---|
820 | | - |
---|
821 | | - /* |
---|
822 | | - * The system_wide flag causes a selected event to be opened |
---|
823 | | - * always without a pid. Consequently it will never get a |
---|
824 | | - * POLLHUP, but it is used for tracking in combination with |
---|
825 | | - * other events, so it should not need to be polled anyway. |
---|
826 | | - * Therefore don't add it for polling. |
---|
827 | | - */ |
---|
828 | | - if (!evsel->system_wide && |
---|
829 | | - __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { |
---|
830 | | - perf_mmap__put(&maps[idx]); |
---|
831 | | - return -1; |
---|
832 | | - } |
---|
833 | | - |
---|
834 | | - if (evsel->attr.read_format & PERF_FORMAT_ID) { |
---|
835 | | - if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, |
---|
836 | | - fd) < 0) |
---|
837 | | - return -1; |
---|
838 | | - perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, |
---|
839 | | - thread); |
---|
| 740 | + evlist->mmap = maps; |
---|
840 | 741 | } |
---|
841 | 742 | } |
---|
842 | 743 | |
---|
843 | | - return 0; |
---|
| 744 | + return &maps[idx].core; |
---|
844 | 745 | } |
---|
845 | 746 | |
---|
846 | | -static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, |
---|
847 | | - struct mmap_params *mp) |
---|
| 747 | +static int |
---|
| 748 | +perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, |
---|
| 749 | + int output, int cpu) |
---|
848 | 750 | { |
---|
849 | | - int cpu, thread; |
---|
850 | | - int nr_cpus = cpu_map__nr(evlist->cpus); |
---|
851 | | - int nr_threads = thread_map__nr(evlist->threads); |
---|
| 751 | + struct mmap *map = container_of(_map, struct mmap, core); |
---|
| 752 | + struct mmap_params *mp = container_of(_mp, struct mmap_params, core); |
---|
852 | 753 | |
---|
853 | | - pr_debug2("perf event ring buffer mmapped per cpu\n"); |
---|
854 | | - for (cpu = 0; cpu < nr_cpus; cpu++) { |
---|
855 | | - int output = -1; |
---|
856 | | - int output_overwrite = -1; |
---|
857 | | - |
---|
858 | | - auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, |
---|
859 | | - true); |
---|
860 | | - |
---|
861 | | - for (thread = 0; thread < nr_threads; thread++) { |
---|
862 | | - if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, |
---|
863 | | - thread, &output, &output_overwrite)) |
---|
864 | | - goto out_unmap; |
---|
865 | | - } |
---|
866 | | - } |
---|
867 | | - |
---|
868 | | - return 0; |
---|
869 | | - |
---|
870 | | -out_unmap: |
---|
871 | | - perf_evlist__munmap_nofree(evlist); |
---|
872 | | - return -1; |
---|
873 | | -} |
---|
874 | | - |
---|
875 | | -static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, |
---|
876 | | - struct mmap_params *mp) |
---|
877 | | -{ |
---|
878 | | - int thread; |
---|
879 | | - int nr_threads = thread_map__nr(evlist->threads); |
---|
880 | | - |
---|
881 | | - pr_debug2("perf event ring buffer mmapped per thread\n"); |
---|
882 | | - for (thread = 0; thread < nr_threads; thread++) { |
---|
883 | | - int output = -1; |
---|
884 | | - int output_overwrite = -1; |
---|
885 | | - |
---|
886 | | - auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, |
---|
887 | | - false); |
---|
888 | | - |
---|
889 | | - if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, |
---|
890 | | - &output, &output_overwrite)) |
---|
891 | | - goto out_unmap; |
---|
892 | | - } |
---|
893 | | - |
---|
894 | | - return 0; |
---|
895 | | - |
---|
896 | | -out_unmap: |
---|
897 | | - perf_evlist__munmap_nofree(evlist); |
---|
898 | | - return -1; |
---|
| 754 | + return mmap__mmap(map, mp, output, cpu); |
---|
899 | 755 | } |
---|
900 | 756 | |
---|
901 | 757 | unsigned long perf_event_mlock_kb_in_pages(void) |
---|
.. | .. |
---|
921 | 777 | return pages; |
---|
922 | 778 | } |
---|
923 | 779 | |
---|
924 | | -size_t perf_evlist__mmap_size(unsigned long pages) |
---|
| 780 | +size_t evlist__mmap_size(unsigned long pages) |
---|
925 | 781 | { |
---|
926 | 782 | if (pages == UINT_MAX) |
---|
927 | 783 | pages = perf_event_mlock_kb_in_pages(); |
---|
.. | .. |
---|
1004 | 860 | } |
---|
1005 | 861 | |
---|
1006 | 862 | /** |
---|
1007 | | - * perf_evlist__mmap_ex - Create mmaps to receive events. |
---|
| 863 | + * evlist__mmap_ex - Create mmaps to receive events. |
---|
1008 | 864 | * @evlist: list of events |
---|
1009 | 865 | * @pages: map length in pages |
---|
1010 | 866 | * @overwrite: overwrite older events? |
---|
.. | .. |
---|
1012 | 868 | * @auxtrace_overwrite - overwrite older auxtrace data? |
---|
1013 | 869 | * |
---|
1014 | 870 | * If @overwrite is %false the user needs to signal event consumption using |
---|
1015 | | - * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this |
---|
| 871 | + * perf_mmap__write_tail(). Using evlist__mmap_read() does this |
---|
1016 | 872 | * automatically. |
---|
1017 | 873 | * |
---|
1018 | 874 | * Similarly, if @auxtrace_overwrite is %false the user needs to signal data |
---|
.. | .. |
---|
1020 | 876 | * |
---|
1021 | 877 | * Return: %0 on success, negative error code otherwise. |
---|
1022 | 878 | */ |
---|
1023 | | -int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, |
---|
| 879 | +int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, |
---|
1024 | 880 | unsigned int auxtrace_pages, |
---|
1025 | | - bool auxtrace_overwrite) |
---|
| 881 | + bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, |
---|
| 882 | + int comp_level) |
---|
1026 | 883 | { |
---|
1027 | | - struct perf_evsel *evsel; |
---|
1028 | | - const struct cpu_map *cpus = evlist->cpus; |
---|
1029 | | - const struct thread_map *threads = evlist->threads; |
---|
1030 | 884 | /* |
---|
1031 | 885 | * Delay setting mp.prot: set it before calling perf_mmap__mmap. |
---|
1032 | 886 | * Its value is decided by evsel's write_backward. |
---|
1033 | 887 | * So &mp should not be passed through const pointer. |
---|
1034 | 888 | */ |
---|
1035 | | - struct mmap_params mp; |
---|
| 889 | + struct mmap_params mp = { |
---|
| 890 | + .nr_cblocks = nr_cblocks, |
---|
| 891 | + .affinity = affinity, |
---|
| 892 | + .flush = flush, |
---|
| 893 | + .comp_level = comp_level |
---|
| 894 | + }; |
---|
| 895 | + struct perf_evlist_mmap_ops ops = { |
---|
| 896 | + .idx = perf_evlist__mmap_cb_idx, |
---|
| 897 | + .get = perf_evlist__mmap_cb_get, |
---|
| 898 | + .mmap = perf_evlist__mmap_cb_mmap, |
---|
| 899 | + }; |
---|
1036 | 900 | |
---|
1037 | | - if (!evlist->mmap) |
---|
1038 | | - evlist->mmap = perf_evlist__alloc_mmap(evlist, false); |
---|
1039 | | - if (!evlist->mmap) |
---|
1040 | | - return -ENOMEM; |
---|
| 901 | + evlist->core.mmap_len = evlist__mmap_size(pages); |
---|
| 902 | + pr_debug("mmap size %zuB\n", evlist->core.mmap_len); |
---|
1041 | 903 | |
---|
1042 | | - if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) |
---|
1043 | | - return -ENOMEM; |
---|
1044 | | - |
---|
1045 | | - evlist->mmap_len = perf_evlist__mmap_size(pages); |
---|
1046 | | - pr_debug("mmap size %zuB\n", evlist->mmap_len); |
---|
1047 | | - mp.mask = evlist->mmap_len - page_size - 1; |
---|
1048 | | - |
---|
1049 | | - auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, |
---|
| 904 | + auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, |
---|
1050 | 905 | auxtrace_pages, auxtrace_overwrite); |
---|
1051 | 906 | |
---|
1052 | | - evlist__for_each_entry(evlist, evsel) { |
---|
1053 | | - if ((evsel->attr.read_format & PERF_FORMAT_ID) && |
---|
1054 | | - evsel->sample_id == NULL && |
---|
1055 | | - perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) |
---|
1056 | | - return -ENOMEM; |
---|
1057 | | - } |
---|
1058 | | - |
---|
1059 | | - if (cpu_map__empty(cpus)) |
---|
1060 | | - return perf_evlist__mmap_per_thread(evlist, &mp); |
---|
1061 | | - |
---|
1062 | | - return perf_evlist__mmap_per_cpu(evlist, &mp); |
---|
| 907 | + return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); |
---|
1063 | 908 | } |
---|
1064 | 909 | |
---|
1065 | | -int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) |
---|
| 910 | +int evlist__mmap(struct evlist *evlist, unsigned int pages) |
---|
1066 | 911 | { |
---|
1067 | | - return perf_evlist__mmap_ex(evlist, pages, 0, false); |
---|
| 912 | + return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); |
---|
1068 | 913 | } |
---|
1069 | 914 | |
---|
1070 | | -int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) |
---|
| 915 | +int perf_evlist__create_maps(struct evlist *evlist, struct target *target) |
---|
1071 | 916 | { |
---|
1072 | 917 | bool all_threads = (target->per_thread && target->system_wide); |
---|
1073 | | - struct cpu_map *cpus; |
---|
1074 | | - struct thread_map *threads; |
---|
| 918 | + struct perf_cpu_map *cpus; |
---|
| 919 | + struct perf_thread_map *threads; |
---|
1075 | 920 | |
---|
1076 | 921 | /* |
---|
1077 | 922 | * If specify '-a' and '--per-thread' to perf record, perf record |
---|
.. | .. |
---|
1098 | 943 | return -1; |
---|
1099 | 944 | |
---|
1100 | 945 | if (target__uses_dummy_map(target)) |
---|
1101 | | - cpus = cpu_map__dummy_new(); |
---|
| 946 | + cpus = perf_cpu_map__dummy_new(); |
---|
1102 | 947 | else |
---|
1103 | | - cpus = cpu_map__new(target->cpu_list); |
---|
| 948 | + cpus = perf_cpu_map__new(target->cpu_list); |
---|
1104 | 949 | |
---|
1105 | 950 | if (!cpus) |
---|
1106 | 951 | goto out_delete_threads; |
---|
1107 | 952 | |
---|
1108 | | - evlist->has_user_cpus = !!target->cpu_list; |
---|
| 953 | + evlist->core.has_user_cpus = !!target->cpu_list; |
---|
1109 | 954 | |
---|
1110 | | - perf_evlist__set_maps(evlist, cpus, threads); |
---|
| 955 | + perf_evlist__set_maps(&evlist->core, cpus, threads); |
---|
| 956 | + |
---|
| 957 | + /* as evlist now has references, put count here */ |
---|
| 958 | + perf_cpu_map__put(cpus); |
---|
| 959 | + perf_thread_map__put(threads); |
---|
1111 | 960 | |
---|
1112 | 961 | return 0; |
---|
1113 | 962 | |
---|
1114 | 963 | out_delete_threads: |
---|
1115 | | - thread_map__put(threads); |
---|
| 964 | + perf_thread_map__put(threads); |
---|
1116 | 965 | return -1; |
---|
1117 | 966 | } |
---|
1118 | 967 | |
---|
1119 | | -void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, |
---|
1120 | | - struct thread_map *threads) |
---|
1121 | | -{ |
---|
1122 | | - /* |
---|
1123 | | - * Allow for the possibility that one or another of the maps isn't being |
---|
1124 | | - * changed i.e. don't put it. Note we are assuming the maps that are |
---|
1125 | | - * being applied are brand new and evlist is taking ownership of the |
---|
1126 | | - * original reference count of 1. If that is not the case it is up to |
---|
1127 | | - * the caller to increase the reference count. |
---|
1128 | | - */ |
---|
1129 | | - if (cpus != evlist->cpus) { |
---|
1130 | | - cpu_map__put(evlist->cpus); |
---|
1131 | | - evlist->cpus = cpu_map__get(cpus); |
---|
1132 | | - } |
---|
1133 | | - |
---|
1134 | | - if (threads != evlist->threads) { |
---|
1135 | | - thread_map__put(evlist->threads); |
---|
1136 | | - evlist->threads = thread_map__get(threads); |
---|
1137 | | - } |
---|
1138 | | - |
---|
1139 | | - perf_evlist__propagate_maps(evlist); |
---|
1140 | | -} |
---|
1141 | | - |
---|
1142 | | -void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, |
---|
| 968 | +void __perf_evlist__set_sample_bit(struct evlist *evlist, |
---|
1143 | 969 | enum perf_event_sample_format bit) |
---|
1144 | 970 | { |
---|
1145 | | - struct perf_evsel *evsel; |
---|
| 971 | + struct evsel *evsel; |
---|
1146 | 972 | |
---|
1147 | 973 | evlist__for_each_entry(evlist, evsel) |
---|
1148 | | - __perf_evsel__set_sample_bit(evsel, bit); |
---|
| 974 | + __evsel__set_sample_bit(evsel, bit); |
---|
1149 | 975 | } |
---|
1150 | 976 | |
---|
1151 | | -void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, |
---|
| 977 | +void __perf_evlist__reset_sample_bit(struct evlist *evlist, |
---|
1152 | 978 | enum perf_event_sample_format bit) |
---|
1153 | 979 | { |
---|
1154 | | - struct perf_evsel *evsel; |
---|
| 980 | + struct evsel *evsel; |
---|
1155 | 981 | |
---|
1156 | 982 | evlist__for_each_entry(evlist, evsel) |
---|
1157 | | - __perf_evsel__reset_sample_bit(evsel, bit); |
---|
| 983 | + __evsel__reset_sample_bit(evsel, bit); |
---|
1158 | 984 | } |
---|
1159 | 985 | |
---|
1160 | | -int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) |
---|
| 986 | +int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) |
---|
1161 | 987 | { |
---|
1162 | | - struct perf_evsel *evsel; |
---|
| 988 | + struct evsel *evsel; |
---|
1163 | 989 | int err = 0; |
---|
1164 | 990 | |
---|
1165 | 991 | evlist__for_each_entry(evlist, evsel) { |
---|
.. | .. |
---|
1170 | 996 | * filters only work for tracepoint event, which doesn't have cpu limit. |
---|
1171 | 997 | * So evlist and evsel should always be same. |
---|
1172 | 998 | */ |
---|
1173 | | - err = perf_evsel__apply_filter(evsel, evsel->filter); |
---|
| 999 | + err = perf_evsel__apply_filter(&evsel->core, evsel->filter); |
---|
1174 | 1000 | if (err) { |
---|
1175 | 1001 | *err_evsel = evsel; |
---|
1176 | 1002 | break; |
---|
.. | .. |
---|
1180 | 1006 | return err; |
---|
1181 | 1007 | } |
---|
1182 | 1008 | |
---|
1183 | | -int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) |
---|
| 1009 | +int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter) |
---|
1184 | 1010 | { |
---|
1185 | | - struct perf_evsel *evsel; |
---|
| 1011 | + struct evsel *evsel; |
---|
1186 | 1012 | int err = 0; |
---|
1187 | 1013 | |
---|
| 1014 | + if (filter == NULL) |
---|
| 1015 | + return -1; |
---|
| 1016 | + |
---|
1188 | 1017 | evlist__for_each_entry(evlist, evsel) { |
---|
1189 | | - if (evsel->attr.type != PERF_TYPE_TRACEPOINT) |
---|
| 1018 | + if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) |
---|
1190 | 1019 | continue; |
---|
1191 | 1020 | |
---|
1192 | | - err = perf_evsel__set_filter(evsel, filter); |
---|
| 1021 | + err = evsel__set_filter(evsel, filter); |
---|
1193 | 1022 | if (err) |
---|
1194 | 1023 | break; |
---|
1195 | 1024 | } |
---|
.. | .. |
---|
1197 | 1026 | return err; |
---|
1198 | 1027 | } |
---|
1199 | 1028 | |
---|
1200 | | -int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) |
---|
| 1029 | +int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter) |
---|
| 1030 | +{ |
---|
| 1031 | + struct evsel *evsel; |
---|
| 1032 | + int err = 0; |
---|
| 1033 | + |
---|
| 1034 | + if (filter == NULL) |
---|
| 1035 | + return -1; |
---|
| 1036 | + |
---|
| 1037 | + evlist__for_each_entry(evlist, evsel) { |
---|
| 1038 | + if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) |
---|
| 1039 | + continue; |
---|
| 1040 | + |
---|
| 1041 | + err = evsel__append_tp_filter(evsel, filter); |
---|
| 1042 | + if (err) |
---|
| 1043 | + break; |
---|
| 1044 | + } |
---|
| 1045 | + |
---|
| 1046 | + return err; |
---|
| 1047 | +} |
---|
| 1048 | + |
---|
| 1049 | +char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) |
---|
1201 | 1050 | { |
---|
1202 | 1051 | char *filter; |
---|
1203 | | - int ret = -1; |
---|
1204 | 1052 | size_t i; |
---|
1205 | 1053 | |
---|
1206 | 1054 | for (i = 0; i < npids; ++i) { |
---|
1207 | 1055 | if (i == 0) { |
---|
1208 | 1056 | if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) |
---|
1209 | | - return -1; |
---|
| 1057 | + return NULL; |
---|
1210 | 1058 | } else { |
---|
1211 | 1059 | char *tmp; |
---|
1212 | 1060 | |
---|
.. | .. |
---|
1218 | 1066 | } |
---|
1219 | 1067 | } |
---|
1220 | 1068 | |
---|
1221 | | - ret = perf_evlist__set_filter(evlist, filter); |
---|
| 1069 | + return filter; |
---|
1222 | 1070 | out_free: |
---|
| 1071 | + free(filter); |
---|
| 1072 | + return NULL; |
---|
| 1073 | +} |
---|
| 1074 | + |
---|
| 1075 | +int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) |
---|
| 1076 | +{ |
---|
| 1077 | + char *filter = asprintf__tp_filter_pids(npids, pids); |
---|
| 1078 | + int ret = perf_evlist__set_tp_filter(evlist, filter); |
---|
| 1079 | + |
---|
1223 | 1080 | free(filter); |
---|
1224 | 1081 | return ret; |
---|
1225 | 1082 | } |
---|
1226 | 1083 | |
---|
1227 | | -int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) |
---|
| 1084 | +int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) |
---|
1228 | 1085 | { |
---|
1229 | | - return perf_evlist__set_filter_pids(evlist, 1, &pid); |
---|
| 1086 | + return perf_evlist__set_tp_filter_pids(evlist, 1, &pid); |
---|
1230 | 1087 | } |
---|
1231 | 1088 | |
---|
1232 | | -bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) |
---|
| 1089 | +int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) |
---|
1233 | 1090 | { |
---|
1234 | | - struct perf_evsel *pos; |
---|
| 1091 | + char *filter = asprintf__tp_filter_pids(npids, pids); |
---|
| 1092 | + int ret = perf_evlist__append_tp_filter(evlist, filter); |
---|
1235 | 1093 | |
---|
1236 | | - if (evlist->nr_entries == 1) |
---|
| 1094 | + free(filter); |
---|
| 1095 | + return ret; |
---|
| 1096 | +} |
---|
| 1097 | + |
---|
| 1098 | +int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) |
---|
| 1099 | +{ |
---|
| 1100 | + return perf_evlist__append_tp_filter_pids(evlist, 1, &pid); |
---|
| 1101 | +} |
---|
| 1102 | + |
---|
| 1103 | +bool evlist__valid_sample_type(struct evlist *evlist) |
---|
| 1104 | +{ |
---|
| 1105 | + struct evsel *pos; |
---|
| 1106 | + |
---|
| 1107 | + if (evlist->core.nr_entries == 1) |
---|
1237 | 1108 | return true; |
---|
1238 | 1109 | |
---|
1239 | 1110 | if (evlist->id_pos < 0 || evlist->is_pos < 0) |
---|
.. | .. |
---|
1248 | 1119 | return true; |
---|
1249 | 1120 | } |
---|
1250 | 1121 | |
---|
1251 | | -u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) |
---|
| 1122 | +u64 __evlist__combined_sample_type(struct evlist *evlist) |
---|
1252 | 1123 | { |
---|
1253 | | - struct perf_evsel *evsel; |
---|
| 1124 | + struct evsel *evsel; |
---|
1254 | 1125 | |
---|
1255 | 1126 | if (evlist->combined_sample_type) |
---|
1256 | 1127 | return evlist->combined_sample_type; |
---|
1257 | 1128 | |
---|
1258 | 1129 | evlist__for_each_entry(evlist, evsel) |
---|
1259 | | - evlist->combined_sample_type |= evsel->attr.sample_type; |
---|
| 1130 | + evlist->combined_sample_type |= evsel->core.attr.sample_type; |
---|
1260 | 1131 | |
---|
1261 | 1132 | return evlist->combined_sample_type; |
---|
1262 | 1133 | } |
---|
1263 | 1134 | |
---|
1264 | | -u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) |
---|
| 1135 | +u64 evlist__combined_sample_type(struct evlist *evlist) |
---|
1265 | 1136 | { |
---|
1266 | 1137 | evlist->combined_sample_type = 0; |
---|
1267 | | - return __perf_evlist__combined_sample_type(evlist); |
---|
| 1138 | + return __evlist__combined_sample_type(evlist); |
---|
1268 | 1139 | } |
---|
1269 | 1140 | |
---|
1270 | | -u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) |
---|
| 1141 | +u64 evlist__combined_branch_type(struct evlist *evlist) |
---|
1271 | 1142 | { |
---|
1272 | | - struct perf_evsel *evsel; |
---|
| 1143 | + struct evsel *evsel; |
---|
1273 | 1144 | u64 branch_type = 0; |
---|
1274 | 1145 | |
---|
1275 | 1146 | evlist__for_each_entry(evlist, evsel) |
---|
1276 | | - branch_type |= evsel->attr.branch_sample_type; |
---|
| 1147 | + branch_type |= evsel->core.attr.branch_sample_type; |
---|
1277 | 1148 | return branch_type; |
---|
1278 | 1149 | } |
---|
1279 | 1150 | |
---|
1280 | | -bool perf_evlist__valid_read_format(struct perf_evlist *evlist) |
---|
| 1151 | +bool perf_evlist__valid_read_format(struct evlist *evlist) |
---|
1281 | 1152 | { |
---|
1282 | | - struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
---|
1283 | | - u64 read_format = first->attr.read_format; |
---|
1284 | | - u64 sample_type = first->attr.sample_type; |
---|
| 1153 | + struct evsel *first = evlist__first(evlist), *pos = first; |
---|
| 1154 | + u64 read_format = first->core.attr.read_format; |
---|
| 1155 | + u64 sample_type = first->core.attr.sample_type; |
---|
1285 | 1156 | |
---|
1286 | 1157 | evlist__for_each_entry(evlist, pos) { |
---|
1287 | | - if (read_format != pos->attr.read_format) |
---|
1288 | | - return false; |
---|
| 1158 | + if (read_format != pos->core.attr.read_format) { |
---|
| 1159 | + pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", |
---|
| 1160 | + read_format, (u64)pos->core.attr.read_format); |
---|
| 1161 | + } |
---|
1289 | 1162 | } |
---|
1290 | 1163 | |
---|
1291 | 1164 | /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ |
---|
.. | .. |
---|
1297 | 1170 | return true; |
---|
1298 | 1171 | } |
---|
1299 | 1172 | |
---|
1300 | | -u64 perf_evlist__read_format(struct perf_evlist *evlist) |
---|
| 1173 | +u16 perf_evlist__id_hdr_size(struct evlist *evlist) |
---|
1301 | 1174 | { |
---|
1302 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
---|
1303 | | - return first->attr.read_format; |
---|
1304 | | -} |
---|
1305 | | - |
---|
1306 | | -u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) |
---|
1307 | | -{ |
---|
1308 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
---|
| 1175 | + struct evsel *first = evlist__first(evlist); |
---|
1309 | 1176 | struct perf_sample *data; |
---|
1310 | 1177 | u64 sample_type; |
---|
1311 | 1178 | u16 size = 0; |
---|
1312 | 1179 | |
---|
1313 | | - if (!first->attr.sample_id_all) |
---|
| 1180 | + if (!first->core.attr.sample_id_all) |
---|
1314 | 1181 | goto out; |
---|
1315 | 1182 | |
---|
1316 | | - sample_type = first->attr.sample_type; |
---|
| 1183 | + sample_type = first->core.attr.sample_type; |
---|
1317 | 1184 | |
---|
1318 | 1185 | if (sample_type & PERF_SAMPLE_TID) |
---|
1319 | 1186 | size += sizeof(data->tid) * 2; |
---|
.. | .. |
---|
1336 | 1203 | return size; |
---|
1337 | 1204 | } |
---|
1338 | 1205 | |
---|
1339 | | -bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) |
---|
| 1206 | +bool evlist__valid_sample_id_all(struct evlist *evlist) |
---|
1340 | 1207 | { |
---|
1341 | | - struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; |
---|
| 1208 | + struct evsel *first = evlist__first(evlist), *pos = first; |
---|
1342 | 1209 | |
---|
1343 | 1210 | evlist__for_each_entry_continue(evlist, pos) { |
---|
1344 | | - if (first->attr.sample_id_all != pos->attr.sample_id_all) |
---|
| 1211 | + if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) |
---|
1345 | 1212 | return false; |
---|
1346 | 1213 | } |
---|
1347 | 1214 | |
---|
1348 | 1215 | return true; |
---|
1349 | 1216 | } |
---|
1350 | 1217 | |
---|
1351 | | -bool perf_evlist__sample_id_all(struct perf_evlist *evlist) |
---|
| 1218 | +bool evlist__sample_id_all(struct evlist *evlist) |
---|
1352 | 1219 | { |
---|
1353 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
---|
1354 | | - return first->attr.sample_id_all; |
---|
| 1220 | + struct evsel *first = evlist__first(evlist); |
---|
| 1221 | + return first->core.attr.sample_id_all; |
---|
1355 | 1222 | } |
---|
1356 | 1223 | |
---|
1357 | | -void perf_evlist__set_selected(struct perf_evlist *evlist, |
---|
1358 | | - struct perf_evsel *evsel) |
---|
| 1224 | +void perf_evlist__set_selected(struct evlist *evlist, |
---|
| 1225 | + struct evsel *evsel) |
---|
1359 | 1226 | { |
---|
1360 | 1227 | evlist->selected = evsel; |
---|
1361 | 1228 | } |
---|
1362 | 1229 | |
---|
1363 | | -void perf_evlist__close(struct perf_evlist *evlist) |
---|
| 1230 | +void evlist__close(struct evlist *evlist) |
---|
1364 | 1231 | { |
---|
1365 | | - struct perf_evsel *evsel; |
---|
| 1232 | + struct evsel *evsel; |
---|
| 1233 | + struct affinity affinity; |
---|
| 1234 | + int cpu, i; |
---|
1366 | 1235 | |
---|
1367 | | - evlist__for_each_entry_reverse(evlist, evsel) |
---|
1368 | | - perf_evsel__close(evsel); |
---|
| 1236 | + /* |
---|
| 1237 | + * With perf record core.cpus is usually NULL. |
---|
| 1238 | + * Use the old method to handle this for now. |
---|
| 1239 | + */ |
---|
| 1240 | + if (!evlist->core.cpus) { |
---|
| 1241 | + evlist__for_each_entry_reverse(evlist, evsel) |
---|
| 1242 | + evsel__close(evsel); |
---|
| 1243 | + return; |
---|
| 1244 | + } |
---|
| 1245 | + |
---|
| 1246 | + if (affinity__setup(&affinity) < 0) |
---|
| 1247 | + return; |
---|
| 1248 | + evlist__for_each_cpu(evlist, i, cpu) { |
---|
| 1249 | + affinity__set(&affinity, cpu); |
---|
| 1250 | + |
---|
| 1251 | + evlist__for_each_entry_reverse(evlist, evsel) { |
---|
| 1252 | + if (evsel__cpu_iter_skip(evsel, cpu)) |
---|
| 1253 | + continue; |
---|
| 1254 | + perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); |
---|
| 1255 | + } |
---|
| 1256 | + } |
---|
| 1257 | + affinity__cleanup(&affinity); |
---|
| 1258 | + evlist__for_each_entry_reverse(evlist, evsel) { |
---|
| 1259 | + perf_evsel__free_fd(&evsel->core); |
---|
| 1260 | + perf_evsel__free_id(&evsel->core); |
---|
| 1261 | + } |
---|
1369 | 1262 | } |
---|
1370 | 1263 | |
---|
1371 | | -static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) |
---|
| 1264 | +static int perf_evlist__create_syswide_maps(struct evlist *evlist) |
---|
1372 | 1265 | { |
---|
1373 | | - struct cpu_map *cpus; |
---|
1374 | | - struct thread_map *threads; |
---|
| 1266 | + struct perf_cpu_map *cpus; |
---|
| 1267 | + struct perf_thread_map *threads; |
---|
1375 | 1268 | int err = -ENOMEM; |
---|
1376 | 1269 | |
---|
1377 | 1270 | /* |
---|
.. | .. |
---|
1383 | 1276 | * error, and we may not want to do that fallback to a |
---|
1384 | 1277 | * default cpu identity map :-\ |
---|
1385 | 1278 | */ |
---|
1386 | | - cpus = cpu_map__new(NULL); |
---|
| 1279 | + cpus = perf_cpu_map__new(NULL); |
---|
1387 | 1280 | if (!cpus) |
---|
1388 | 1281 | goto out; |
---|
1389 | 1282 | |
---|
1390 | | - threads = thread_map__new_dummy(); |
---|
| 1283 | + threads = perf_thread_map__new_dummy(); |
---|
1391 | 1284 | if (!threads) |
---|
1392 | 1285 | goto out_put; |
---|
1393 | 1286 | |
---|
1394 | | - perf_evlist__set_maps(evlist, cpus, threads); |
---|
| 1287 | + perf_evlist__set_maps(&evlist->core, cpus, threads); |
---|
| 1288 | + |
---|
| 1289 | + perf_thread_map__put(threads); |
---|
| 1290 | +out_put: |
---|
| 1291 | + perf_cpu_map__put(cpus); |
---|
1395 | 1292 | out: |
---|
1396 | 1293 | return err; |
---|
1397 | | -out_put: |
---|
1398 | | - cpu_map__put(cpus); |
---|
1399 | | - goto out; |
---|
1400 | 1294 | } |
---|
1401 | 1295 | |
---|
1402 | | -int perf_evlist__open(struct perf_evlist *evlist) |
---|
| 1296 | +int evlist__open(struct evlist *evlist) |
---|
1403 | 1297 | { |
---|
1404 | | - struct perf_evsel *evsel; |
---|
| 1298 | + struct evsel *evsel; |
---|
1405 | 1299 | int err; |
---|
1406 | 1300 | |
---|
1407 | 1301 | /* |
---|
1408 | 1302 | * Default: one fd per CPU, all threads, aka systemwide |
---|
1409 | 1303 | * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL |
---|
1410 | 1304 | */ |
---|
1411 | | - if (evlist->threads == NULL && evlist->cpus == NULL) { |
---|
| 1305 | + if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { |
---|
1412 | 1306 | err = perf_evlist__create_syswide_maps(evlist); |
---|
1413 | 1307 | if (err < 0) |
---|
1414 | 1308 | goto out_err; |
---|
.. | .. |
---|
1417 | 1311 | perf_evlist__update_id_pos(evlist); |
---|
1418 | 1312 | |
---|
1419 | 1313 | evlist__for_each_entry(evlist, evsel) { |
---|
1420 | | - err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); |
---|
| 1314 | + err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); |
---|
1421 | 1315 | if (err < 0) |
---|
1422 | 1316 | goto out_err; |
---|
1423 | 1317 | } |
---|
1424 | 1318 | |
---|
1425 | 1319 | return 0; |
---|
1426 | 1320 | out_err: |
---|
1427 | | - perf_evlist__close(evlist); |
---|
| 1321 | + evlist__close(evlist); |
---|
1428 | 1322 | errno = -err; |
---|
1429 | 1323 | return err; |
---|
1430 | 1324 | } |
---|
1431 | 1325 | |
---|
1432 | | -int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, |
---|
| 1326 | +int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target, |
---|
1433 | 1327 | const char *argv[], bool pipe_output, |
---|
1434 | 1328 | void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) |
---|
1435 | 1329 | { |
---|
.. | .. |
---|
1511 | 1405 | } |
---|
1512 | 1406 | |
---|
1513 | 1407 | if (target__none(target)) { |
---|
1514 | | - if (evlist->threads == NULL) { |
---|
| 1408 | + if (evlist->core.threads == NULL) { |
---|
1515 | 1409 | fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", |
---|
1516 | 1410 | __func__, __LINE__); |
---|
1517 | 1411 | goto out_close_pipes; |
---|
1518 | 1412 | } |
---|
1519 | | - thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); |
---|
| 1413 | + perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); |
---|
1520 | 1414 | } |
---|
1521 | 1415 | |
---|
1522 | 1416 | close(child_ready_pipe[1]); |
---|
.. | .. |
---|
1543 | 1437 | return -1; |
---|
1544 | 1438 | } |
---|
1545 | 1439 | |
---|
1546 | | -int perf_evlist__start_workload(struct perf_evlist *evlist) |
---|
| 1440 | +int perf_evlist__start_workload(struct evlist *evlist) |
---|
1547 | 1441 | { |
---|
1548 | 1442 | if (evlist->workload.cork_fd > 0) { |
---|
1549 | 1443 | char bf = 0; |
---|
.. | .. |
---|
1562 | 1456 | return 0; |
---|
1563 | 1457 | } |
---|
1564 | 1458 | |
---|
1565 | | -int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, |
---|
| 1459 | +int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event, |
---|
1566 | 1460 | struct perf_sample *sample) |
---|
1567 | 1461 | { |
---|
1568 | | - struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); |
---|
| 1462 | + struct evsel *evsel = perf_evlist__event2evsel(evlist, event); |
---|
1569 | 1463 | |
---|
1570 | 1464 | if (!evsel) |
---|
1571 | 1465 | return -EFAULT; |
---|
1572 | | - return perf_evsel__parse_sample(evsel, event, sample); |
---|
| 1466 | + return evsel__parse_sample(evsel, event, sample); |
---|
1573 | 1467 | } |
---|
1574 | 1468 | |
---|
1575 | | -int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist, |
---|
| 1469 | +int perf_evlist__parse_sample_timestamp(struct evlist *evlist, |
---|
1576 | 1470 | union perf_event *event, |
---|
1577 | 1471 | u64 *timestamp) |
---|
1578 | 1472 | { |
---|
1579 | | - struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); |
---|
| 1473 | + struct evsel *evsel = perf_evlist__event2evsel(evlist, event); |
---|
1580 | 1474 | |
---|
1581 | 1475 | if (!evsel) |
---|
1582 | 1476 | return -EFAULT; |
---|
1583 | | - return perf_evsel__parse_sample_timestamp(evsel, event, timestamp); |
---|
| 1477 | + return evsel__parse_sample_timestamp(evsel, event, timestamp); |
---|
1584 | 1478 | } |
---|
1585 | 1479 | |
---|
1586 | | -size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) |
---|
1587 | | -{ |
---|
1588 | | - struct perf_evsel *evsel; |
---|
1589 | | - size_t printed = 0; |
---|
1590 | | - |
---|
1591 | | - evlist__for_each_entry(evlist, evsel) { |
---|
1592 | | - printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", |
---|
1593 | | - perf_evsel__name(evsel)); |
---|
1594 | | - } |
---|
1595 | | - |
---|
1596 | | - return printed + fprintf(fp, "\n"); |
---|
1597 | | -} |
---|
1598 | | - |
---|
1599 | | -int perf_evlist__strerror_open(struct perf_evlist *evlist, |
---|
1600 | | - int err, char *buf, size_t size) |
---|
| 1480 | +int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) |
---|
1601 | 1481 | { |
---|
1602 | 1482 | int printed, value; |
---|
1603 | 1483 | char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); |
---|
.. | .. |
---|
1625 | 1505 | "Hint:\tThe current value is %d.", value); |
---|
1626 | 1506 | break; |
---|
1627 | 1507 | case EINVAL: { |
---|
1628 | | - struct perf_evsel *first = perf_evlist__first(evlist); |
---|
| 1508 | + struct evsel *first = evlist__first(evlist); |
---|
1629 | 1509 | int max_freq; |
---|
1630 | 1510 | |
---|
1631 | 1511 | if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) |
---|
1632 | 1512 | goto out_default; |
---|
1633 | 1513 | |
---|
1634 | | - if (first->attr.sample_freq < (u64)max_freq) |
---|
| 1514 | + if (first->core.attr.sample_freq < (u64)max_freq) |
---|
1635 | 1515 | goto out_default; |
---|
1636 | 1516 | |
---|
1637 | 1517 | printed = scnprintf(buf, size, |
---|
1638 | 1518 | "Error:\t%s.\n" |
---|
1639 | 1519 | "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" |
---|
1640 | 1520 | "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", |
---|
1641 | | - emsg, max_freq, first->attr.sample_freq); |
---|
| 1521 | + emsg, max_freq, first->core.attr.sample_freq); |
---|
1642 | 1522 | break; |
---|
1643 | 1523 | } |
---|
1644 | 1524 | default: |
---|
.. | .. |
---|
1650 | 1530 | return 0; |
---|
1651 | 1531 | } |
---|
1652 | 1532 | |
---|
1653 | | -int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) |
---|
| 1533 | +int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) |
---|
1654 | 1534 | { |
---|
1655 | 1535 | char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); |
---|
1656 | | - int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; |
---|
| 1536 | + int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; |
---|
1657 | 1537 | |
---|
1658 | 1538 | switch (err) { |
---|
1659 | 1539 | case EPERM: |
---|
.. | .. |
---|
1681 | 1561 | return 0; |
---|
1682 | 1562 | } |
---|
1683 | 1563 | |
---|
1684 | | -void perf_evlist__to_front(struct perf_evlist *evlist, |
---|
1685 | | - struct perf_evsel *move_evsel) |
---|
| 1564 | +void perf_evlist__to_front(struct evlist *evlist, |
---|
| 1565 | + struct evsel *move_evsel) |
---|
1686 | 1566 | { |
---|
1687 | | - struct perf_evsel *evsel, *n; |
---|
| 1567 | + struct evsel *evsel, *n; |
---|
1688 | 1568 | LIST_HEAD(move); |
---|
1689 | 1569 | |
---|
1690 | | - if (move_evsel == perf_evlist__first(evlist)) |
---|
| 1570 | + if (move_evsel == evlist__first(evlist)) |
---|
1691 | 1571 | return; |
---|
1692 | 1572 | |
---|
1693 | 1573 | evlist__for_each_entry_safe(evlist, n, evsel) { |
---|
1694 | 1574 | if (evsel->leader == move_evsel->leader) |
---|
1695 | | - list_move_tail(&evsel->node, &move); |
---|
| 1575 | + list_move_tail(&evsel->core.node, &move); |
---|
1696 | 1576 | } |
---|
1697 | 1577 | |
---|
1698 | | - list_splice(&move, &evlist->entries); |
---|
| 1578 | + list_splice(&move, &evlist->core.entries); |
---|
1699 | 1579 | } |
---|
1700 | 1580 | |
---|
1701 | | -void perf_evlist__set_tracking_event(struct perf_evlist *evlist, |
---|
1702 | | - struct perf_evsel *tracking_evsel) |
---|
| 1581 | +struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist) |
---|
1703 | 1582 | { |
---|
1704 | | - struct perf_evsel *evsel; |
---|
| 1583 | + struct evsel *evsel; |
---|
| 1584 | + |
---|
| 1585 | + evlist__for_each_entry(evlist, evsel) { |
---|
| 1586 | + if (evsel->tracking) |
---|
| 1587 | + return evsel; |
---|
| 1588 | + } |
---|
| 1589 | + |
---|
| 1590 | + return evlist__first(evlist); |
---|
| 1591 | +} |
---|
| 1592 | + |
---|
| 1593 | +void perf_evlist__set_tracking_event(struct evlist *evlist, |
---|
| 1594 | + struct evsel *tracking_evsel) |
---|
| 1595 | +{ |
---|
| 1596 | + struct evsel *evsel; |
---|
1705 | 1597 | |
---|
1706 | 1598 | if (tracking_evsel->tracking) |
---|
1707 | 1599 | return; |
---|
.. | .. |
---|
1714 | 1606 | tracking_evsel->tracking = true; |
---|
1715 | 1607 | } |
---|
1716 | 1608 | |
---|
1717 | | -struct perf_evsel * |
---|
1718 | | -perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, |
---|
| 1609 | +struct evsel * |
---|
| 1610 | +perf_evlist__find_evsel_by_str(struct evlist *evlist, |
---|
1719 | 1611 | const char *str) |
---|
1720 | 1612 | { |
---|
1721 | | - struct perf_evsel *evsel; |
---|
| 1613 | + struct evsel *evsel; |
---|
1722 | 1614 | |
---|
1723 | 1615 | evlist__for_each_entry(evlist, evsel) { |
---|
1724 | 1616 | if (!evsel->name) |
---|
.. | .. |
---|
1730 | 1622 | return NULL; |
---|
1731 | 1623 | } |
---|
1732 | 1624 | |
---|
1733 | | -void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, |
---|
| 1625 | +void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, |
---|
1734 | 1626 | enum bkw_mmap_state state) |
---|
1735 | 1627 | { |
---|
1736 | 1628 | enum bkw_mmap_state old_state = evlist->bkw_mmap_state; |
---|
.. | .. |
---|
1788 | 1680 | return; |
---|
1789 | 1681 | } |
---|
1790 | 1682 | |
---|
1791 | | -bool perf_evlist__exclude_kernel(struct perf_evlist *evlist) |
---|
| 1683 | +bool perf_evlist__exclude_kernel(struct evlist *evlist) |
---|
1792 | 1684 | { |
---|
1793 | | - struct perf_evsel *evsel; |
---|
| 1685 | + struct evsel *evsel; |
---|
1794 | 1686 | |
---|
1795 | 1687 | evlist__for_each_entry(evlist, evsel) { |
---|
1796 | | - if (!evsel->attr.exclude_kernel) |
---|
| 1688 | + if (!evsel->core.attr.exclude_kernel) |
---|
1797 | 1689 | return false; |
---|
1798 | 1690 | } |
---|
1799 | 1691 | |
---|
.. | .. |
---|
1805 | 1697 | * the group display. Set the artificial group and set the leader's |
---|
1806 | 1698 | * forced_leader flag to notify the display code. |
---|
1807 | 1699 | */ |
---|
1808 | | -void perf_evlist__force_leader(struct perf_evlist *evlist) |
---|
| 1700 | +void perf_evlist__force_leader(struct evlist *evlist) |
---|
1809 | 1701 | { |
---|
1810 | 1702 | if (!evlist->nr_groups) { |
---|
1811 | | - struct perf_evsel *leader = perf_evlist__first(evlist); |
---|
| 1703 | + struct evsel *leader = evlist__first(evlist); |
---|
1812 | 1704 | |
---|
1813 | 1705 | perf_evlist__set_leader(evlist); |
---|
1814 | 1706 | leader->forced_leader = true; |
---|
1815 | 1707 | } |
---|
1816 | 1708 | } |
---|
| 1709 | + |
---|
| 1710 | +struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, |
---|
| 1711 | + struct evsel *evsel, |
---|
| 1712 | + bool close) |
---|
| 1713 | +{ |
---|
| 1714 | + struct evsel *c2, *leader; |
---|
| 1715 | + bool is_open = true; |
---|
| 1716 | + |
---|
| 1717 | + leader = evsel->leader; |
---|
| 1718 | + pr_debug("Weak group for %s/%d failed\n", |
---|
| 1719 | + leader->name, leader->core.nr_members); |
---|
| 1720 | + |
---|
| 1721 | + /* |
---|
| 1722 | + * for_each_group_member doesn't work here because it doesn't |
---|
| 1723 | + * include the first entry. |
---|
| 1724 | + */ |
---|
| 1725 | + evlist__for_each_entry(evsel_list, c2) { |
---|
| 1726 | + if (c2 == evsel) |
---|
| 1727 | + is_open = false; |
---|
| 1728 | + if (c2->leader == leader) { |
---|
| 1729 | + if (is_open && close) |
---|
| 1730 | + perf_evsel__close(&c2->core); |
---|
| 1731 | + c2->leader = c2; |
---|
| 1732 | + c2->core.nr_members = 0; |
---|
| 1733 | + /* |
---|
| 1734 | + * Set this for all former members of the group |
---|
| 1735 | + * to indicate they get reopened. |
---|
| 1736 | + */ |
---|
| 1737 | + c2->reset_group = true; |
---|
| 1738 | + } |
---|
| 1739 | + } |
---|
| 1740 | + return leader; |
---|
| 1741 | +} |
---|
| 1742 | + |
---|
| 1743 | +static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) |
---|
| 1744 | +{ |
---|
| 1745 | + char *s, *p; |
---|
| 1746 | + int ret = 0, fd; |
---|
| 1747 | + |
---|
| 1748 | + if (strncmp(str, "fifo:", 5)) |
---|
| 1749 | + return -EINVAL; |
---|
| 1750 | + |
---|
| 1751 | + str += 5; |
---|
| 1752 | + if (!*str || *str == ',') |
---|
| 1753 | + return -EINVAL; |
---|
| 1754 | + |
---|
| 1755 | + s = strdup(str); |
---|
| 1756 | + if (!s) |
---|
| 1757 | + return -ENOMEM; |
---|
| 1758 | + |
---|
| 1759 | + p = strchr(s, ','); |
---|
| 1760 | + if (p) |
---|
| 1761 | + *p = '\0'; |
---|
| 1762 | + |
---|
| 1763 | + /* |
---|
| 1764 | + * O_RDWR avoids POLLHUPs which is necessary to allow the other |
---|
| 1765 | + * end of a FIFO to be repeatedly opened and closed. |
---|
| 1766 | + */ |
---|
| 1767 | + fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); |
---|
| 1768 | + if (fd < 0) { |
---|
| 1769 | + pr_err("Failed to open '%s'\n", s); |
---|
| 1770 | + ret = -errno; |
---|
| 1771 | + goto out_free; |
---|
| 1772 | + } |
---|
| 1773 | + *ctl_fd = fd; |
---|
| 1774 | + *ctl_fd_close = true; |
---|
| 1775 | + |
---|
| 1776 | + if (p && *++p) { |
---|
| 1777 | + /* O_RDWR | O_NONBLOCK means the other end need not be open */ |
---|
| 1778 | + fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); |
---|
| 1779 | + if (fd < 0) { |
---|
| 1780 | + pr_err("Failed to open '%s'\n", p); |
---|
| 1781 | + ret = -errno; |
---|
| 1782 | + goto out_free; |
---|
| 1783 | + } |
---|
| 1784 | + *ctl_fd_ack = fd; |
---|
| 1785 | + } |
---|
| 1786 | + |
---|
| 1787 | +out_free: |
---|
| 1788 | + free(s); |
---|
| 1789 | + return ret; |
---|
| 1790 | +} |
---|
| 1791 | + |
---|
| 1792 | +int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) |
---|
| 1793 | +{ |
---|
| 1794 | + char *comma = NULL, *endptr = NULL; |
---|
| 1795 | + |
---|
| 1796 | + *ctl_fd_close = false; |
---|
| 1797 | + |
---|
| 1798 | + if (strncmp(str, "fd:", 3)) |
---|
| 1799 | + return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); |
---|
| 1800 | + |
---|
| 1801 | + *ctl_fd = strtoul(&str[3], &endptr, 0); |
---|
| 1802 | + if (endptr == &str[3]) |
---|
| 1803 | + return -EINVAL; |
---|
| 1804 | + |
---|
| 1805 | + comma = strchr(str, ','); |
---|
| 1806 | + if (comma) { |
---|
| 1807 | + if (endptr != comma) |
---|
| 1808 | + return -EINVAL; |
---|
| 1809 | + |
---|
| 1810 | + *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); |
---|
| 1811 | + if (endptr == comma + 1 || *endptr != '\0') |
---|
| 1812 | + return -EINVAL; |
---|
| 1813 | + } |
---|
| 1814 | + |
---|
| 1815 | + return 0; |
---|
| 1816 | +} |
---|
| 1817 | + |
---|
| 1818 | +void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) |
---|
| 1819 | +{ |
---|
| 1820 | + if (*ctl_fd_close) { |
---|
| 1821 | + *ctl_fd_close = false; |
---|
| 1822 | + close(ctl_fd); |
---|
| 1823 | + if (ctl_fd_ack >= 0) |
---|
| 1824 | + close(ctl_fd_ack); |
---|
| 1825 | + } |
---|
| 1826 | +} |
---|
| 1827 | + |
---|
| 1828 | +int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) |
---|
| 1829 | +{ |
---|
| 1830 | + if (fd == -1) { |
---|
| 1831 | + pr_debug("Control descriptor is not initialized\n"); |
---|
| 1832 | + return 0; |
---|
| 1833 | + } |
---|
| 1834 | + |
---|
| 1835 | + evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, |
---|
| 1836 | + fdarray_flag__nonfilterable); |
---|
| 1837 | + if (evlist->ctl_fd.pos < 0) { |
---|
| 1838 | + evlist->ctl_fd.pos = -1; |
---|
| 1839 | + pr_err("Failed to add ctl fd entry: %m\n"); |
---|
| 1840 | + return -1; |
---|
| 1841 | + } |
---|
| 1842 | + |
---|
| 1843 | + evlist->ctl_fd.fd = fd; |
---|
| 1844 | + evlist->ctl_fd.ack = ack; |
---|
| 1845 | + |
---|
| 1846 | + return 0; |
---|
| 1847 | +} |
---|
| 1848 | + |
---|
| 1849 | +bool evlist__ctlfd_initialized(struct evlist *evlist) |
---|
| 1850 | +{ |
---|
| 1851 | + return evlist->ctl_fd.pos >= 0; |
---|
| 1852 | +} |
---|
| 1853 | + |
---|
| 1854 | +int evlist__finalize_ctlfd(struct evlist *evlist) |
---|
| 1855 | +{ |
---|
| 1856 | + struct pollfd *entries = evlist->core.pollfd.entries; |
---|
| 1857 | + |
---|
| 1858 | + if (!evlist__ctlfd_initialized(evlist)) |
---|
| 1859 | + return 0; |
---|
| 1860 | + |
---|
| 1861 | + entries[evlist->ctl_fd.pos].fd = -1; |
---|
| 1862 | + entries[evlist->ctl_fd.pos].events = 0; |
---|
| 1863 | + entries[evlist->ctl_fd.pos].revents = 0; |
---|
| 1864 | + |
---|
| 1865 | + evlist->ctl_fd.pos = -1; |
---|
| 1866 | + evlist->ctl_fd.ack = -1; |
---|
| 1867 | + evlist->ctl_fd.fd = -1; |
---|
| 1868 | + |
---|
| 1869 | + return 0; |
---|
| 1870 | +} |
---|
| 1871 | + |
---|
| 1872 | +static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, |
---|
| 1873 | + char *cmd_data, size_t data_size) |
---|
| 1874 | +{ |
---|
| 1875 | + int err; |
---|
| 1876 | + char c; |
---|
| 1877 | + size_t bytes_read = 0; |
---|
| 1878 | + |
---|
| 1879 | + *cmd = EVLIST_CTL_CMD_UNSUPPORTED; |
---|
| 1880 | + memset(cmd_data, 0, data_size); |
---|
| 1881 | + data_size--; |
---|
| 1882 | + |
---|
| 1883 | + do { |
---|
| 1884 | + err = read(evlist->ctl_fd.fd, &c, 1); |
---|
| 1885 | + if (err > 0) { |
---|
| 1886 | + if (c == '\n' || c == '\0') |
---|
| 1887 | + break; |
---|
| 1888 | + cmd_data[bytes_read++] = c; |
---|
| 1889 | + if (bytes_read == data_size) |
---|
| 1890 | + break; |
---|
| 1891 | + continue; |
---|
| 1892 | + } else if (err == -1) { |
---|
| 1893 | + if (errno == EINTR) |
---|
| 1894 | + continue; |
---|
| 1895 | + if (errno == EAGAIN || errno == EWOULDBLOCK) |
---|
| 1896 | + err = 0; |
---|
| 1897 | + else |
---|
| 1898 | + pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); |
---|
| 1899 | + } |
---|
| 1900 | + break; |
---|
| 1901 | + } while (1); |
---|
| 1902 | + |
---|
| 1903 | + pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, |
---|
| 1904 | + bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); |
---|
| 1905 | + |
---|
| 1906 | + if (bytes_read > 0) { |
---|
| 1907 | + if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, |
---|
| 1908 | + (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { |
---|
| 1909 | + *cmd = EVLIST_CTL_CMD_ENABLE; |
---|
| 1910 | + } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, |
---|
| 1911 | + (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { |
---|
| 1912 | + *cmd = EVLIST_CTL_CMD_DISABLE; |
---|
| 1913 | + } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, |
---|
| 1914 | + (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { |
---|
| 1915 | + *cmd = EVLIST_CTL_CMD_SNAPSHOT; |
---|
| 1916 | + pr_debug("is snapshot\n"); |
---|
| 1917 | + } |
---|
| 1918 | + } |
---|
| 1919 | + |
---|
| 1920 | + return bytes_read ? (int)bytes_read : err; |
---|
| 1921 | +} |
---|
| 1922 | + |
---|
| 1923 | +int evlist__ctlfd_ack(struct evlist *evlist) |
---|
| 1924 | +{ |
---|
| 1925 | + int err; |
---|
| 1926 | + |
---|
| 1927 | + if (evlist->ctl_fd.ack == -1) |
---|
| 1928 | + return 0; |
---|
| 1929 | + |
---|
| 1930 | + err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, |
---|
| 1931 | + sizeof(EVLIST_CTL_CMD_ACK_TAG)); |
---|
| 1932 | + if (err == -1) |
---|
| 1933 | + pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); |
---|
| 1934 | + |
---|
| 1935 | + return err; |
---|
| 1936 | +} |
---|
| 1937 | + |
---|
| 1938 | +int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) |
---|
| 1939 | +{ |
---|
| 1940 | + int err = 0; |
---|
| 1941 | + char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; |
---|
| 1942 | + int ctlfd_pos = evlist->ctl_fd.pos; |
---|
| 1943 | + struct pollfd *entries = evlist->core.pollfd.entries; |
---|
| 1944 | + |
---|
| 1945 | + if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) |
---|
| 1946 | + return 0; |
---|
| 1947 | + |
---|
| 1948 | + if (entries[ctlfd_pos].revents & POLLIN) { |
---|
| 1949 | + err = evlist__ctlfd_recv(evlist, cmd, cmd_data, |
---|
| 1950 | + EVLIST_CTL_CMD_MAX_LEN); |
---|
| 1951 | + if (err > 0) { |
---|
| 1952 | + switch (*cmd) { |
---|
| 1953 | + case EVLIST_CTL_CMD_ENABLE: |
---|
| 1954 | + evlist__enable(evlist); |
---|
| 1955 | + break; |
---|
| 1956 | + case EVLIST_CTL_CMD_DISABLE: |
---|
| 1957 | + evlist__disable(evlist); |
---|
| 1958 | + break; |
---|
| 1959 | + case EVLIST_CTL_CMD_SNAPSHOT: |
---|
| 1960 | + break; |
---|
| 1961 | + case EVLIST_CTL_CMD_ACK: |
---|
| 1962 | + case EVLIST_CTL_CMD_UNSUPPORTED: |
---|
| 1963 | + default: |
---|
| 1964 | + pr_debug("ctlfd: unsupported %d\n", *cmd); |
---|
| 1965 | + break; |
---|
| 1966 | + } |
---|
| 1967 | + if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || |
---|
| 1968 | + *cmd == EVLIST_CTL_CMD_SNAPSHOT)) |
---|
| 1969 | + evlist__ctlfd_ack(evlist); |
---|
| 1970 | + } |
---|
| 1971 | + } |
---|
| 1972 | + |
---|
| 1973 | + if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) |
---|
| 1974 | + evlist__finalize_ctlfd(evlist); |
---|
| 1975 | + else |
---|
| 1976 | + entries[ctlfd_pos].revents = 0; |
---|
| 1977 | + |
---|
| 1978 | + return err; |
---|
| 1979 | +} |
---|
| 1980 | + |
---|
| 1981 | +struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) |
---|
| 1982 | +{ |
---|
| 1983 | + struct evsel *evsel; |
---|
| 1984 | + |
---|
| 1985 | + evlist__for_each_entry(evlist, evsel) { |
---|
| 1986 | + if (evsel->idx == idx) |
---|
| 1987 | + return evsel; |
---|
| 1988 | + } |
---|
| 1989 | + return NULL; |
---|
| 1990 | +} |
---|