.. | .. |
---|
2 | 2 | #include <errno.h> |
---|
3 | 3 | #include <inttypes.h> |
---|
4 | 4 | #include <math.h> |
---|
| 5 | +#include <string.h> |
---|
| 6 | +#include "counts.h" |
---|
| 7 | +#include "cpumap.h" |
---|
| 8 | +#include "debug.h" |
---|
| 9 | +#include "header.h" |
---|
5 | 10 | #include "stat.h" |
---|
| 11 | +#include "session.h" |
---|
| 12 | +#include "target.h" |
---|
6 | 13 | #include "evlist.h" |
---|
7 | 14 | #include "evsel.h" |
---|
8 | 15 | #include "thread_map.h" |
---|
| 16 | +#include <linux/zalloc.h> |
---|
9 | 17 | |
---|
10 | 18 | void update_stats(struct stats *stats, u64 val) |
---|
11 | 19 | { |
---|
.. | .. |
---|
67 | 75 | return pct; |
---|
68 | 76 | } |
---|
69 | 77 | |
---|
70 | | -bool __perf_evsel_stat__is(struct perf_evsel *evsel, |
---|
| 78 | +bool __perf_evsel_stat__is(struct evsel *evsel, |
---|
71 | 79 | enum perf_stat_evsel_id id) |
---|
72 | 80 | { |
---|
73 | 81 | struct perf_stat_evsel *ps = evsel->stats; |
---|
.. | .. |
---|
87 | 95 | ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired), |
---|
88 | 96 | ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles), |
---|
89 | 97 | ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles), |
---|
| 98 | + ID(TOPDOWN_RETIRING, topdown-retiring), |
---|
| 99 | + ID(TOPDOWN_BAD_SPEC, topdown-bad-spec), |
---|
| 100 | + ID(TOPDOWN_FE_BOUND, topdown-fe-bound), |
---|
| 101 | + ID(TOPDOWN_BE_BOUND, topdown-be-bound), |
---|
90 | 102 | ID(SMI_NUM, msr/smi/), |
---|
91 | 103 | ID(APERF, msr/aperf/), |
---|
92 | 104 | }; |
---|
93 | 105 | #undef ID |
---|
94 | 106 | |
---|
95 | | -static void perf_stat_evsel_id_init(struct perf_evsel *evsel) |
---|
| 107 | +static void perf_stat_evsel_id_init(struct evsel *evsel) |
---|
96 | 108 | { |
---|
97 | 109 | struct perf_stat_evsel *ps = evsel->stats; |
---|
98 | 110 | int i; |
---|
.. | .. |
---|
100 | 112 | /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ |
---|
101 | 113 | |
---|
102 | 114 | for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { |
---|
103 | | - if (!strcmp(perf_evsel__name(evsel), id_str[i])) { |
---|
| 115 | + if (!strcmp(evsel__name(evsel), id_str[i])) { |
---|
104 | 116 | ps->id = i; |
---|
105 | 117 | break; |
---|
106 | 118 | } |
---|
107 | 119 | } |
---|
108 | 120 | } |
---|
109 | 121 | |
---|
110 | | -static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) |
---|
| 122 | +static void evsel__reset_stat_priv(struct evsel *evsel) |
---|
111 | 123 | { |
---|
112 | 124 | int i; |
---|
113 | 125 | struct perf_stat_evsel *ps = evsel->stats; |
---|
.. | .. |
---|
118 | 130 | perf_stat_evsel_id_init(evsel); |
---|
119 | 131 | } |
---|
120 | 132 | |
---|
121 | | -static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) |
---|
| 133 | +static int evsel__alloc_stat_priv(struct evsel *evsel) |
---|
122 | 134 | { |
---|
123 | 135 | evsel->stats = zalloc(sizeof(struct perf_stat_evsel)); |
---|
124 | 136 | if (evsel->stats == NULL) |
---|
125 | 137 | return -ENOMEM; |
---|
126 | | - perf_evsel__reset_stat_priv(evsel); |
---|
| 138 | + evsel__reset_stat_priv(evsel); |
---|
127 | 139 | return 0; |
---|
128 | 140 | } |
---|
129 | 141 | |
---|
130 | | -static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) |
---|
| 142 | +static void evsel__free_stat_priv(struct evsel *evsel) |
---|
131 | 143 | { |
---|
132 | 144 | struct perf_stat_evsel *ps = evsel->stats; |
---|
133 | 145 | |
---|
134 | 146 | if (ps) |
---|
135 | | - free(ps->group_data); |
---|
| 147 | + zfree(&ps->group_data); |
---|
136 | 148 | zfree(&evsel->stats); |
---|
137 | 149 | } |
---|
138 | 150 | |
---|
139 | | -static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, |
---|
140 | | - int ncpus, int nthreads) |
---|
| 151 | +static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads) |
---|
141 | 152 | { |
---|
142 | 153 | struct perf_counts *counts; |
---|
143 | 154 | |
---|
.. | .. |
---|
148 | 159 | return counts ? 0 : -ENOMEM; |
---|
149 | 160 | } |
---|
150 | 161 | |
---|
151 | | -static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) |
---|
| 162 | +static void evsel__free_prev_raw_counts(struct evsel *evsel) |
---|
152 | 163 | { |
---|
153 | 164 | perf_counts__delete(evsel->prev_raw_counts); |
---|
154 | 165 | evsel->prev_raw_counts = NULL; |
---|
155 | 166 | } |
---|
156 | 167 | |
---|
157 | | -static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel) |
---|
| 168 | +static void evsel__reset_prev_raw_counts(struct evsel *evsel) |
---|
158 | 169 | { |
---|
159 | | - if (evsel->prev_raw_counts) { |
---|
160 | | - evsel->prev_raw_counts->aggr.val = 0; |
---|
161 | | - evsel->prev_raw_counts->aggr.ena = 0; |
---|
162 | | - evsel->prev_raw_counts->aggr.run = 0; |
---|
163 | | - } |
---|
| 170 | + if (evsel->prev_raw_counts) |
---|
| 171 | + perf_counts__reset(evsel->prev_raw_counts); |
---|
164 | 172 | } |
---|
165 | 173 | |
---|
166 | | -static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) |
---|
| 174 | +static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw) |
---|
167 | 175 | { |
---|
168 | | - int ncpus = perf_evsel__nr_cpus(evsel); |
---|
169 | | - int nthreads = thread_map__nr(evsel->threads); |
---|
| 176 | + int ncpus = evsel__nr_cpus(evsel); |
---|
| 177 | + int nthreads = perf_thread_map__nr(evsel->core.threads); |
---|
170 | 178 | |
---|
171 | | - if (perf_evsel__alloc_stat_priv(evsel) < 0 || |
---|
172 | | - perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || |
---|
173 | | - (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) |
---|
| 179 | + if (evsel__alloc_stat_priv(evsel) < 0 || |
---|
| 180 | + evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || |
---|
| 181 | + (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) |
---|
174 | 182 | return -ENOMEM; |
---|
175 | 183 | |
---|
176 | 184 | return 0; |
---|
177 | 185 | } |
---|
178 | 186 | |
---|
179 | | -int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) |
---|
| 187 | +int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw) |
---|
180 | 188 | { |
---|
181 | | - struct perf_evsel *evsel; |
---|
| 189 | + struct evsel *evsel; |
---|
182 | 190 | |
---|
183 | 191 | evlist__for_each_entry(evlist, evsel) { |
---|
184 | | - if (perf_evsel__alloc_stats(evsel, alloc_raw)) |
---|
| 192 | + if (evsel__alloc_stats(evsel, alloc_raw)) |
---|
185 | 193 | goto out_free; |
---|
186 | 194 | } |
---|
187 | 195 | |
---|
.. | .. |
---|
192 | 200 | return -1; |
---|
193 | 201 | } |
---|
194 | 202 | |
---|
195 | | -void perf_evlist__free_stats(struct perf_evlist *evlist) |
---|
| 203 | +void perf_evlist__free_stats(struct evlist *evlist) |
---|
196 | 204 | { |
---|
197 | | - struct perf_evsel *evsel; |
---|
| 205 | + struct evsel *evsel; |
---|
198 | 206 | |
---|
199 | 207 | evlist__for_each_entry(evlist, evsel) { |
---|
200 | | - perf_evsel__free_stat_priv(evsel); |
---|
201 | | - perf_evsel__free_counts(evsel); |
---|
202 | | - perf_evsel__free_prev_raw_counts(evsel); |
---|
| 208 | + evsel__free_stat_priv(evsel); |
---|
| 209 | + evsel__free_counts(evsel); |
---|
| 210 | + evsel__free_prev_raw_counts(evsel); |
---|
203 | 211 | } |
---|
204 | 212 | } |
---|
205 | 213 | |
---|
206 | | -void perf_evlist__reset_stats(struct perf_evlist *evlist) |
---|
| 214 | +void perf_evlist__reset_stats(struct evlist *evlist) |
---|
207 | 215 | { |
---|
208 | | - struct perf_evsel *evsel; |
---|
| 216 | + struct evsel *evsel; |
---|
209 | 217 | |
---|
210 | 218 | evlist__for_each_entry(evlist, evsel) { |
---|
211 | | - perf_evsel__reset_stat_priv(evsel); |
---|
212 | | - perf_evsel__reset_counts(evsel); |
---|
| 219 | + evsel__reset_stat_priv(evsel); |
---|
| 220 | + evsel__reset_counts(evsel); |
---|
213 | 221 | } |
---|
214 | 222 | } |
---|
215 | 223 | |
---|
216 | | -void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist) |
---|
| 224 | +void perf_evlist__reset_prev_raw_counts(struct evlist *evlist) |
---|
217 | 225 | { |
---|
218 | | - struct perf_evsel *evsel; |
---|
| 226 | + struct evsel *evsel; |
---|
219 | 227 | |
---|
220 | 228 | evlist__for_each_entry(evlist, evsel) |
---|
221 | | - perf_evsel__reset_prev_raw_counts(evsel); |
---|
| 229 | + evsel__reset_prev_raw_counts(evsel); |
---|
222 | 230 | } |
---|
223 | 231 | |
---|
224 | | -static void zero_per_pkg(struct perf_evsel *counter) |
---|
| 232 | +static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel) |
---|
| 233 | +{ |
---|
| 234 | + int ncpus = evsel__nr_cpus(evsel); |
---|
| 235 | + int nthreads = perf_thread_map__nr(evsel->core.threads); |
---|
| 236 | + |
---|
| 237 | + for (int thread = 0; thread < nthreads; thread++) { |
---|
| 238 | + for (int cpu = 0; cpu < ncpus; cpu++) { |
---|
| 239 | + *perf_counts(evsel->counts, cpu, thread) = |
---|
| 240 | + *perf_counts(evsel->prev_raw_counts, cpu, |
---|
| 241 | + thread); |
---|
| 242 | + } |
---|
| 243 | + } |
---|
| 244 | + |
---|
| 245 | + evsel->counts->aggr = evsel->prev_raw_counts->aggr; |
---|
| 246 | +} |
---|
| 247 | + |
---|
| 248 | +void perf_evlist__copy_prev_raw_counts(struct evlist *evlist) |
---|
| 249 | +{ |
---|
| 250 | + struct evsel *evsel; |
---|
| 251 | + |
---|
| 252 | + evlist__for_each_entry(evlist, evsel) |
---|
| 253 | + perf_evsel__copy_prev_raw_counts(evsel); |
---|
| 254 | +} |
---|
| 255 | + |
---|
| 256 | +void perf_evlist__save_aggr_prev_raw_counts(struct evlist *evlist) |
---|
| 257 | +{ |
---|
| 258 | + struct evsel *evsel; |
---|
| 259 | + |
---|
| 260 | + /* |
---|
| 261 | + * To collect the overall statistics for interval mode, |
---|
| 262 | + * we copy the counts from evsel->prev_raw_counts to |
---|
| 263 | + * evsel->counts. The perf_stat_process_counter creates |
---|
| 264 | + * aggr values from per cpu values, but the per cpu values |
---|
| 265 | + * are 0 for AGGR_GLOBAL. So we use a trick that saves the |
---|
| 266 | + * previous aggr value to the first member of perf_counts, |
---|
| 267 | + * then aggr calculation in process_counter_values can work |
---|
| 268 | + * correctly. |
---|
| 269 | + */ |
---|
| 270 | + evlist__for_each_entry(evlist, evsel) { |
---|
| 271 | + *perf_counts(evsel->prev_raw_counts, 0, 0) = |
---|
| 272 | + evsel->prev_raw_counts->aggr; |
---|
| 273 | + } |
---|
| 274 | +} |
---|
| 275 | + |
---|
| 276 | +static void zero_per_pkg(struct evsel *counter) |
---|
225 | 277 | { |
---|
226 | 278 | if (counter->per_pkg_mask) |
---|
227 | | - memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); |
---|
| 279 | + memset(counter->per_pkg_mask, 0, cpu__max_cpu()); |
---|
228 | 280 | } |
---|
229 | 281 | |
---|
230 | | -static int check_per_pkg(struct perf_evsel *counter, |
---|
| 282 | +static int check_per_pkg(struct evsel *counter, |
---|
231 | 283 | struct perf_counts_values *vals, int cpu, bool *skip) |
---|
232 | 284 | { |
---|
233 | 285 | unsigned long *mask = counter->per_pkg_mask; |
---|
234 | | - struct cpu_map *cpus = perf_evsel__cpus(counter); |
---|
| 286 | + struct perf_cpu_map *cpus = evsel__cpus(counter); |
---|
235 | 287 | int s; |
---|
236 | 288 | |
---|
237 | 289 | *skip = false; |
---|
.. | .. |
---|
239 | 291 | if (!counter->per_pkg) |
---|
240 | 292 | return 0; |
---|
241 | 293 | |
---|
242 | | - if (cpu_map__empty(cpus)) |
---|
| 294 | + if (perf_cpu_map__empty(cpus)) |
---|
243 | 295 | return 0; |
---|
244 | 296 | |
---|
245 | 297 | if (!mask) { |
---|
246 | | - mask = zalloc(MAX_NR_CPUS); |
---|
| 298 | + mask = zalloc(cpu__max_cpu()); |
---|
247 | 299 | if (!mask) |
---|
248 | 300 | return -ENOMEM; |
---|
249 | 301 | |
---|
.. | .. |
---|
270 | 322 | } |
---|
271 | 323 | |
---|
272 | 324 | static int |
---|
273 | | -process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel, |
---|
| 325 | +process_counter_values(struct perf_stat_config *config, struct evsel *evsel, |
---|
274 | 326 | int cpu, int thread, |
---|
275 | 327 | struct perf_counts_values *count) |
---|
276 | 328 | { |
---|
.. | .. |
---|
289 | 341 | switch (config->aggr_mode) { |
---|
290 | 342 | case AGGR_THREAD: |
---|
291 | 343 | case AGGR_CORE: |
---|
| 344 | + case AGGR_DIE: |
---|
292 | 345 | case AGGR_SOCKET: |
---|
| 346 | + case AGGR_NODE: |
---|
293 | 347 | case AGGR_NONE: |
---|
294 | 348 | if (!evsel->snapshot) |
---|
295 | | - perf_evsel__compute_deltas(evsel, cpu, thread, count); |
---|
| 349 | + evsel__compute_deltas(evsel, cpu, thread, count); |
---|
296 | 350 | perf_counts_values__scale(count, config->scale, NULL); |
---|
297 | | - if (config->aggr_mode == AGGR_NONE) |
---|
298 | | - perf_stat__update_shadow_stats(evsel, count->val, cpu, |
---|
299 | | - &rt_stat); |
---|
| 351 | + if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) { |
---|
| 352 | + perf_stat__update_shadow_stats(evsel, count->val, |
---|
| 353 | + cpu, &rt_stat); |
---|
| 354 | + } |
---|
| 355 | + |
---|
300 | 356 | if (config->aggr_mode == AGGR_THREAD) { |
---|
301 | 357 | if (config->stats) |
---|
302 | 358 | perf_stat__update_shadow_stats(evsel, |
---|
.. | .. |
---|
308 | 364 | break; |
---|
309 | 365 | case AGGR_GLOBAL: |
---|
310 | 366 | aggr->val += count->val; |
---|
311 | | - if (config->scale) { |
---|
312 | | - aggr->ena += count->ena; |
---|
313 | | - aggr->run += count->run; |
---|
314 | | - } |
---|
| 367 | + aggr->ena += count->ena; |
---|
| 368 | + aggr->run += count->run; |
---|
315 | 369 | case AGGR_UNSET: |
---|
316 | 370 | default: |
---|
317 | 371 | break; |
---|
.. | .. |
---|
321 | 375 | } |
---|
322 | 376 | |
---|
323 | 377 | static int process_counter_maps(struct perf_stat_config *config, |
---|
324 | | - struct perf_evsel *counter) |
---|
| 378 | + struct evsel *counter) |
---|
325 | 379 | { |
---|
326 | | - int nthreads = thread_map__nr(counter->threads); |
---|
327 | | - int ncpus = perf_evsel__nr_cpus(counter); |
---|
| 380 | + int nthreads = perf_thread_map__nr(counter->core.threads); |
---|
| 381 | + int ncpus = evsel__nr_cpus(counter); |
---|
328 | 382 | int cpu, thread; |
---|
329 | 383 | |
---|
330 | | - if (counter->system_wide) |
---|
| 384 | + if (counter->core.system_wide) |
---|
331 | 385 | nthreads = 1; |
---|
332 | 386 | |
---|
333 | 387 | for (thread = 0; thread < nthreads; thread++) { |
---|
.. | .. |
---|
342 | 396 | } |
---|
343 | 397 | |
---|
344 | 398 | int perf_stat_process_counter(struct perf_stat_config *config, |
---|
345 | | - struct perf_evsel *counter) |
---|
| 399 | + struct evsel *counter) |
---|
346 | 400 | { |
---|
347 | 401 | struct perf_counts_values *aggr = &counter->counts->aggr; |
---|
348 | 402 | struct perf_stat_evsel *ps = counter->stats; |
---|
.. | .. |
---|
358 | 412 | * interval mode, otherwise overall avg running |
---|
359 | 413 | * averages will be shown for each interval. |
---|
360 | 414 | */ |
---|
361 | | - if (config->interval) { |
---|
| 415 | + if (config->interval || config->summary) { |
---|
362 | 416 | for (i = 0; i < 3; i++) |
---|
363 | 417 | init_stats(&ps->res_stats[i]); |
---|
364 | 418 | } |
---|
.. | .. |
---|
374 | 428 | return 0; |
---|
375 | 429 | |
---|
376 | 430 | if (!counter->snapshot) |
---|
377 | | - perf_evsel__compute_deltas(counter, -1, -1, aggr); |
---|
| 431 | + evsel__compute_deltas(counter, -1, -1, aggr); |
---|
378 | 432 | perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); |
---|
379 | 433 | |
---|
380 | 434 | for (i = 0; i < 3; i++) |
---|
.. | .. |
---|
382 | 436 | |
---|
383 | 437 | if (verbose > 0) { |
---|
384 | 438 | fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", |
---|
385 | | - perf_evsel__name(counter), count[0], count[1], count[2]); |
---|
| 439 | + evsel__name(counter), count[0], count[1], count[2]); |
---|
386 | 440 | } |
---|
387 | 441 | |
---|
388 | 442 | /* |
---|
.. | .. |
---|
393 | 447 | return 0; |
---|
394 | 448 | } |
---|
395 | 449 | |
---|
396 | | -int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused, |
---|
397 | | - union perf_event *event, |
---|
398 | | - struct perf_session *session) |
---|
| 450 | +int perf_event__process_stat_event(struct perf_session *session, |
---|
| 451 | + union perf_event *event) |
---|
399 | 452 | { |
---|
400 | 453 | struct perf_counts_values count; |
---|
401 | | - struct stat_event *st = &event->stat; |
---|
402 | | - struct perf_evsel *counter; |
---|
| 454 | + struct perf_record_stat *st = &event->stat; |
---|
| 455 | + struct evsel *counter; |
---|
403 | 456 | |
---|
404 | 457 | count.val = st->val; |
---|
405 | 458 | count.ena = st->ena; |
---|
.. | .. |
---|
418 | 471 | |
---|
419 | 472 | size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp) |
---|
420 | 473 | { |
---|
421 | | - struct stat_event *st = (struct stat_event *) event; |
---|
| 474 | + struct perf_record_stat *st = (struct perf_record_stat *)event; |
---|
422 | 475 | size_t ret; |
---|
423 | 476 | |
---|
424 | | - ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n", |
---|
| 477 | + ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n", |
---|
425 | 478 | st->id, st->cpu, st->thread); |
---|
426 | | - ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n", |
---|
| 479 | + ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n", |
---|
427 | 480 | st->val, st->ena, st->run); |
---|
428 | 481 | |
---|
429 | 482 | return ret; |
---|
.. | .. |
---|
431 | 484 | |
---|
432 | 485 | size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp) |
---|
433 | 486 | { |
---|
434 | | - struct stat_round_event *rd = (struct stat_round_event *)event; |
---|
| 487 | + struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event; |
---|
435 | 488 | size_t ret; |
---|
436 | 489 | |
---|
437 | | - ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time, |
---|
| 490 | + ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time, |
---|
438 | 491 | rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL"); |
---|
439 | 492 | |
---|
440 | 493 | return ret; |
---|
.. | .. |
---|
454 | 507 | |
---|
455 | 508 | return ret; |
---|
456 | 509 | } |
---|
| 510 | + |
---|
| 511 | +int create_perf_stat_counter(struct evsel *evsel, |
---|
| 512 | + struct perf_stat_config *config, |
---|
| 513 | + struct target *target, |
---|
| 514 | + int cpu) |
---|
| 515 | +{ |
---|
| 516 | + struct perf_event_attr *attr = &evsel->core.attr; |
---|
| 517 | + struct evsel *leader = evsel->leader; |
---|
| 518 | + |
---|
| 519 | + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | |
---|
| 520 | + PERF_FORMAT_TOTAL_TIME_RUNNING; |
---|
| 521 | + |
---|
| 522 | + /* |
---|
| 523 | + * The event is part of non trivial group, let's enable |
---|
| 524 | + * the group read (for leader) and ID retrieval for all |
---|
| 525 | + * members. |
---|
| 526 | + */ |
---|
| 527 | + if (leader->core.nr_members > 1) |
---|
| 528 | + attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; |
---|
| 529 | + |
---|
| 530 | + attr->inherit = !config->no_inherit; |
---|
| 531 | + |
---|
| 532 | + /* |
---|
| 533 | + * Some events get initialized with sample_(period/type) set, |
---|
| 534 | + * like tracepoints. Clear it up for counting. |
---|
| 535 | + */ |
---|
| 536 | + attr->sample_period = 0; |
---|
| 537 | + |
---|
| 538 | + if (config->identifier) |
---|
| 539 | + attr->sample_type = PERF_SAMPLE_IDENTIFIER; |
---|
| 540 | + |
---|
| 541 | + if (config->all_user) { |
---|
| 542 | + attr->exclude_kernel = 1; |
---|
| 543 | + attr->exclude_user = 0; |
---|
| 544 | + } |
---|
| 545 | + |
---|
| 546 | + if (config->all_kernel) { |
---|
| 547 | + attr->exclude_kernel = 0; |
---|
| 548 | + attr->exclude_user = 1; |
---|
| 549 | + } |
---|
| 550 | + |
---|
| 551 | + /* |
---|
| 552 | + * Disabling all counters initially, they will be enabled |
---|
| 553 | + * either manually by us or by kernel via enable_on_exec |
---|
| 554 | + * set later. |
---|
| 555 | + */ |
---|
| 556 | + if (evsel__is_group_leader(evsel)) { |
---|
| 557 | + attr->disabled = 1; |
---|
| 558 | + |
---|
| 559 | + /* |
---|
| 560 | + * In case of initial_delay we enable tracee |
---|
| 561 | + * events manually. |
---|
| 562 | + */ |
---|
| 563 | + if (target__none(target) && !config->initial_delay) |
---|
| 564 | + attr->enable_on_exec = 1; |
---|
| 565 | + } |
---|
| 566 | + |
---|
| 567 | + if (target__has_cpu(target) && !target__has_per_thread(target)) |
---|
| 568 | + return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); |
---|
| 569 | + |
---|
| 570 | + return evsel__open_per_thread(evsel, evsel->core.threads); |
---|
| 571 | +} |
---|