hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/tools/perf/util/evsel.h
....@@ -4,153 +4,129 @@
44
55 #include <linux/list.h>
66 #include <stdbool.h>
7
-#include <stddef.h>
7
+#include <sys/types.h>
88 #include <linux/perf_event.h>
99 #include <linux/types.h>
10
-#include "xyarray.h"
11
-#include "symbol.h"
12
-#include "cpumap.h"
13
-#include "counts.h"
10
+#include <internal/evsel.h>
11
+#include <perf/evsel.h>
12
+#include "symbol_conf.h"
13
+#include <internal/cpumap.h>
1414
15
-struct perf_evsel;
16
-
17
-/*
18
- * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
19
- * more than one entry in the evlist.
20
- */
21
-struct perf_sample_id {
22
- struct hlist_node node;
23
- u64 id;
24
- struct perf_evsel *evsel;
25
- int idx;
26
- int cpu;
27
- pid_t tid;
28
-
29
- /* Holds total ID period value for PERF_SAMPLE_READ processing. */
30
- u64 period;
31
-};
32
-
15
+struct bpf_object;
3316 struct cgroup;
34
-
35
-/*
36
- * The 'struct perf_evsel_config_term' is used to pass event
37
- * specific configuration data to perf_evsel__config routine.
38
- * It is allocated within event parsing and attached to
39
- * perf_evsel::config_terms list head.
40
-*/
41
-enum term_type {
42
- PERF_EVSEL__CONFIG_TERM_PERIOD,
43
- PERF_EVSEL__CONFIG_TERM_FREQ,
44
- PERF_EVSEL__CONFIG_TERM_TIME,
45
- PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
46
- PERF_EVSEL__CONFIG_TERM_STACK_USER,
47
- PERF_EVSEL__CONFIG_TERM_INHERIT,
48
- PERF_EVSEL__CONFIG_TERM_MAX_STACK,
49
- PERF_EVSEL__CONFIG_TERM_OVERWRITE,
50
- PERF_EVSEL__CONFIG_TERM_DRV_CFG,
51
- PERF_EVSEL__CONFIG_TERM_BRANCH,
52
-};
53
-
54
-struct perf_evsel_config_term {
55
- struct list_head list;
56
- enum term_type type;
57
- union {
58
- u64 period;
59
- u64 freq;
60
- bool time;
61
- char *callgraph;
62
- char *drv_cfg;
63
- u64 stack_user;
64
- int max_stack;
65
- bool inherit;
66
- bool overwrite;
67
- char *branch;
68
- } val;
69
- bool weak;
70
-};
71
-
17
+struct perf_counts;
7218 struct perf_stat_evsel;
19
+union perf_event;
7320
74
-/** struct perf_evsel - event selector
21
+typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
22
+
23
+enum perf_tool_event {
24
+ PERF_TOOL_NONE = 0,
25
+ PERF_TOOL_DURATION_TIME = 1,
26
+};
27
+
28
+/** struct evsel - event selector
7529 *
7630 * @evlist - evlist this evsel is in, if it is in one.
77
- * @node - To insert it into evlist->entries or in other list_heads, say in
78
- * the event parsing routines.
31
+ * @core - libperf evsel object
7932 * @name - Can be set to retain the original event name passed by the user,
8033 * so that when showing results in tools such as 'perf stat', we
8134 * show the name used, not some alias.
8235 * @id_pos: the position of the event id (PERF_SAMPLE_ID or
8336 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
84
- * struct sample_event
37
+ * struct perf_record_sample
8538 * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
8639 * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
8740 * is used there is an id sample appended to non-sample events
8841 * @priv: And what is in its containing unnamed union are tool specific
8942 */
90
-struct perf_evsel {
91
- struct list_head node;
92
- struct perf_evlist *evlist;
93
- struct perf_event_attr attr;
94
- char *filter;
95
- struct xyarray *fd;
96
- struct xyarray *sample_id;
97
- u64 *id;
43
+struct evsel {
44
+ struct perf_evsel core;
45
+ struct evlist *evlist;
46
+ off_t id_offset;
47
+ int idx;
48
+ int id_pos;
49
+ int is_pos;
50
+ unsigned int sample_size;
51
+
52
+ /*
53
+ * These fields can be set in the parse-events code or similar.
54
+ * Please check evsel__clone() to copy them properly so that
55
+ * they can be released properly.
56
+ */
57
+ struct {
58
+ char *name;
59
+ char *group_name;
60
+ const char *pmu_name;
61
+ struct tep_event *tp_format;
62
+ char *filter;
63
+ unsigned long max_events;
64
+ double scale;
65
+ const char *unit;
66
+ struct cgroup *cgrp;
67
+ enum perf_tool_event tool_event;
68
+ /* parse modifier helper */
69
+ int exclude_GH;
70
+ int sample_read;
71
+ bool snapshot;
72
+ bool per_pkg;
73
+ bool percore;
74
+ bool precise_max;
75
+ bool use_uncore_alias;
76
+ bool is_libpfm_event;
77
+ bool auto_merge_stats;
78
+ bool collect_stat;
79
+ bool weak_group;
80
+ int bpf_fd;
81
+ struct bpf_object *bpf_obj;
82
+ };
83
+
84
+ /*
85
+ * metric fields are similar, but needs more care as they can have
86
+ * references to other metric (evsel).
87
+ */
88
+ const char * metric_expr;
89
+ const char * metric_name;
90
+ struct evsel **metric_events;
91
+ struct evsel *metric_leader;
92
+
93
+ void *handler;
9894 struct perf_counts *counts;
9995 struct perf_counts *prev_raw_counts;
100
- int idx;
101
- u32 ids;
102
- char *name;
103
- double scale;
104
- const char *unit;
105
- struct event_format *tp_format;
106
- off_t id_offset;
96
+ unsigned long nr_events_printed;
10797 struct perf_stat_evsel *stats;
10898 void *priv;
10999 u64 db_id;
110
- struct cgroup *cgrp;
111
- void *handler;
112
- struct cpu_map *cpus;
113
- struct cpu_map *own_cpus;
114
- struct thread_map *threads;
115
- unsigned int sample_size;
116
- int id_pos;
117
- int is_pos;
118100 bool uniquified_name;
119
- bool snapshot;
120101 bool supported;
121102 bool needs_swap;
103
+ bool disabled;
122104 bool no_aux_samples;
123105 bool immediate;
124
- bool system_wide;
125106 bool tracking;
126
- bool per_pkg;
127
- bool precise_max;
128107 bool ignore_missing_thread;
129108 bool forced_leader;
130
- bool use_uncore_alias;
131
- /* parse modifier helper */
132
- int exclude_GH;
133
- int nr_members;
134
- int sample_read;
135
- unsigned long *per_pkg_mask;
136
- struct perf_evsel *leader;
137
- char *group_name;
138109 bool cmdline_group_boundary;
139
- struct list_head config_terms;
140
- int bpf_fd;
141
- bool auto_merge_stats;
142110 bool merged_stat;
143
- const char * metric_expr;
144
- const char * metric_name;
145
- struct perf_evsel **metric_events;
146
- bool collect_stat;
147
- bool weak_group;
148
- const char *pmu_name;
149
-};
150
-
151
-union u64_swap {
152
- u64 val64;
153
- u32 val32[2];
111
+ bool reset_group;
112
+ bool errored;
113
+ unsigned long *per_pkg_mask;
114
+ struct evsel *leader;
115
+ struct list_head config_terms;
116
+ int err;
117
+ int cpu_iter;
118
+ struct {
119
+ evsel__sb_cb_t *cb;
120
+ void *data;
121
+ } side_band;
122
+ /*
123
+ * For reporting purposes, an evsel sample can have a callchain
124
+ * synthesized from AUX area data. Keep track of synthesized sample
125
+ * types here. Note, the recorded sample_type cannot be changed because
126
+ * it is needed to continue to parse events.
127
+ * See also evsel__has_callchain().
128
+ */
129
+ __u64 synth_sample_type;
154130 };
155131
156132 struct perf_missing_features {
....@@ -163,322 +139,288 @@
163139 bool lbr_flags;
164140 bool write_backward;
165141 bool group_read;
142
+ bool ksymbol;
143
+ bool bpf;
144
+ bool aux_output;
145
+ bool branch_hw_idx;
146
+ bool cgroup;
166147 };
167148
168149 extern struct perf_missing_features perf_missing_features;
169150
170
-struct cpu_map;
151
+struct perf_cpu_map;
171152 struct target;
172153 struct thread_map;
173154 struct record_opts;
174155
175
-static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
156
+static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
176157 {
177
- return evsel->cpus;
158
+ return perf_evsel__cpus(&evsel->core);
178159 }
179160
180
-static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
161
+static inline int evsel__nr_cpus(struct evsel *evsel)
181162 {
182
- return perf_evsel__cpus(evsel)->nr;
163
+ return evsel__cpus(evsel)->nr;
183164 }
184165
185166 void perf_counts_values__scale(struct perf_counts_values *count,
186167 bool scale, s8 *pscaled);
187168
188
-void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
189
- struct perf_counts_values *count);
169
+void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
170
+ struct perf_counts_values *count);
190171
191
-int perf_evsel__object_config(size_t object_size,
192
- int (*init)(struct perf_evsel *evsel),
193
- void (*fini)(struct perf_evsel *evsel));
172
+int evsel__object_config(size_t object_size,
173
+ int (*init)(struct evsel *evsel),
174
+ void (*fini)(struct evsel *evsel));
194175
195
-struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
176
+struct perf_pmu *evsel__find_pmu(struct evsel *evsel);
177
+bool evsel__is_aux_event(struct evsel *evsel);
196178
197
-static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
179
+struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx);
180
+
181
+static inline struct evsel *evsel__new(struct perf_event_attr *attr)
198182 {
199
- return perf_evsel__new_idx(attr, 0);
183
+ return evsel__new_idx(attr, 0);
200184 }
201185
202
-struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
186
+struct evsel *evsel__clone(struct evsel *orig);
187
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
203188
204189 /*
205190 * Returns pointer with encoded error via <linux/err.h> interface.
206191 */
207
-static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
192
+static inline struct evsel *evsel__newtp(const char *sys, const char *name)
208193 {
209
- return perf_evsel__newtp_idx(sys, name, 0);
194
+ return evsel__newtp_idx(sys, name, 0);
210195 }
211196
212
-struct perf_evsel *perf_evsel__new_cycles(bool precise);
197
+struct evsel *evsel__new_cycles(bool precise);
213198
214
-struct event_format *event_format__new(const char *sys, const char *name);
199
+struct tep_event *event_format__new(const char *sys, const char *name);
215200
216
-void perf_evsel__init(struct perf_evsel *evsel,
217
- struct perf_event_attr *attr, int idx);
218
-void perf_evsel__exit(struct perf_evsel *evsel);
219
-void perf_evsel__delete(struct perf_evsel *evsel);
201
+void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
202
+void evsel__exit(struct evsel *evsel);
203
+void evsel__delete(struct evsel *evsel);
220204
221205 struct callchain_param;
222206
223
-void perf_evsel__config(struct perf_evsel *evsel,
224
- struct record_opts *opts,
225
- struct callchain_param *callchain);
226
-void perf_evsel__config_callchain(struct perf_evsel *evsel,
227
- struct record_opts *opts,
228
- struct callchain_param *callchain);
207
+void evsel__config(struct evsel *evsel, struct record_opts *opts,
208
+ struct callchain_param *callchain);
209
+void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
210
+ struct callchain_param *callchain);
229211
230
-int __perf_evsel__sample_size(u64 sample_type);
231
-void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
212
+int __evsel__sample_size(u64 sample_type);
213
+void evsel__calc_id_pos(struct evsel *evsel);
232214
233
-bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
215
+bool evsel__is_cache_op_valid(u8 type, u8 op);
234216
235
-#define PERF_EVSEL__MAX_ALIASES 8
217
+#define EVSEL__MAX_ALIASES 8
236218
237
-extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
238
- [PERF_EVSEL__MAX_ALIASES];
239
-extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
240
- [PERF_EVSEL__MAX_ALIASES];
241
-extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
242
- [PERF_EVSEL__MAX_ALIASES];
243
-extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
244
-extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
245
-int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
246
- char *bf, size_t size);
247
-const char *perf_evsel__name(struct perf_evsel *evsel);
219
+extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
220
+extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
221
+extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
222
+extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
223
+extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
224
+int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
225
+const char *evsel__name(struct evsel *evsel);
248226
249
-const char *perf_evsel__group_name(struct perf_evsel *evsel);
250
-int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
227
+const char *evsel__group_name(struct evsel *evsel);
228
+int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
251229
252
-int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
253
-void perf_evsel__close_fd(struct perf_evsel *evsel);
230
+void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
231
+void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
254232
255
-void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
256
- enum perf_event_sample_format bit);
257
-void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
258
- enum perf_event_sample_format bit);
233
+#define evsel__set_sample_bit(evsel, bit) \
234
+ __evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
259235
260
-#define perf_evsel__set_sample_bit(evsel, bit) \
261
- __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
236
+#define evsel__reset_sample_bit(evsel, bit) \
237
+ __evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
262238
263
-#define perf_evsel__reset_sample_bit(evsel, bit) \
264
- __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
239
+void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
265240
266
-void perf_evsel__set_sample_id(struct perf_evsel *evsel,
267
- bool use_sample_identifier);
241
+int evsel__set_filter(struct evsel *evsel, const char *filter);
242
+int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
243
+int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
244
+int evsel__enable_cpu(struct evsel *evsel, int cpu);
245
+int evsel__enable(struct evsel *evsel);
246
+int evsel__disable(struct evsel *evsel);
247
+int evsel__disable_cpu(struct evsel *evsel, int cpu);
268248
269
-int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter);
270
-int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter);
271
-int perf_evsel__append_addr_filter(struct perf_evsel *evsel,
272
- const char *filter);
273
-int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
274
-int perf_evsel__enable(struct perf_evsel *evsel);
275
-int perf_evsel__disable(struct perf_evsel *evsel);
276
-
277
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
278
- struct cpu_map *cpus);
279
-int perf_evsel__open_per_thread(struct perf_evsel *evsel,
280
- struct thread_map *threads);
281
-int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
282
- struct thread_map *threads);
283
-void perf_evsel__close(struct perf_evsel *evsel);
249
+int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu);
250
+int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
251
+int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
252
+ struct perf_thread_map *threads);
253
+void evsel__close(struct evsel *evsel);
284254
285255 struct perf_sample;
286256
287
-void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
288
- const char *name);
289
-u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
290
- const char *name);
257
+void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
258
+u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
291259
292
-static inline char *perf_evsel__strval(struct perf_evsel *evsel,
293
- struct perf_sample *sample,
294
- const char *name)
260
+static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
295261 {
296
- return perf_evsel__rawptr(evsel, sample, name);
262
+ return evsel__rawptr(evsel, sample, name);
297263 }
298264
299
-struct format_field;
265
+struct tep_format_field;
300266
301
-u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap);
267
+u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
302268
303
-struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
269
+struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
304270
305
-#define perf_evsel__match(evsel, t, c) \
306
- (evsel->attr.type == PERF_TYPE_##t && \
307
- evsel->attr.config == PERF_COUNT_##c)
271
+#define evsel__match(evsel, t, c) \
272
+ (evsel->core.attr.type == PERF_TYPE_##t && \
273
+ evsel->core.attr.config == PERF_COUNT_##c)
308274
309
-static inline bool perf_evsel__match2(struct perf_evsel *e1,
310
- struct perf_evsel *e2)
275
+static inline bool evsel__match2(struct evsel *e1, struct evsel *e2)
311276 {
312
- return (e1->attr.type == e2->attr.type) &&
313
- (e1->attr.config == e2->attr.config);
277
+ return (e1->core.attr.type == e2->core.attr.type) &&
278
+ (e1->core.attr.config == e2->core.attr.config);
314279 }
315280
316
-#define perf_evsel__cmp(a, b) \
317
- ((a) && \
318
- (b) && \
319
- (a)->attr.type == (b)->attr.type && \
320
- (a)->attr.config == (b)->attr.config)
281
+int evsel__read_counter(struct evsel *evsel, int cpu, int thread);
321282
322
-int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
323
- struct perf_counts_values *count);
324
-
325
-int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread);
326
-
327
-int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
328
- int cpu, int thread, bool scale);
283
+int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale);
329284
330285 /**
331
- * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
286
+ * evsel__read_on_cpu - Read out the results on a CPU and thread
332287 *
333288 * @evsel - event selector to read value
334289 * @cpu - CPU of interest
335290 * @thread - thread of interest
336291 */
337
-static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
338
- int cpu, int thread)
292
+static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread)
339293 {
340
- return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
294
+ return __evsel__read_on_cpu(evsel, cpu, thread, false);
341295 }
342296
343297 /**
344
- * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
298
+ * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
345299 *
346300 * @evsel - event selector to read value
347301 * @cpu - CPU of interest
348302 * @thread - thread of interest
349303 */
350
-static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
351
- int cpu, int thread)
304
+static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu, int thread)
352305 {
353
- return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
306
+ return __evsel__read_on_cpu(evsel, cpu, thread, true);
354307 }
355308
356
-int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
357
- struct perf_sample *sample);
309
+int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
310
+ struct perf_sample *sample);
358311
359
-int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
360
- union perf_event *event,
361
- u64 *timestamp);
312
+int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
313
+ u64 *timestamp);
362314
363
-static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
315
+static inline struct evsel *evsel__next(struct evsel *evsel)
364316 {
365
- return list_entry(evsel->node.next, struct perf_evsel, node);
317
+ return list_entry(evsel->core.node.next, struct evsel, core.node);
366318 }
367319
368
-static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
320
+static inline struct evsel *evsel__prev(struct evsel *evsel)
369321 {
370
- return list_entry(evsel->node.prev, struct perf_evsel, node);
322
+ return list_entry(evsel->core.node.prev, struct evsel, core.node);
371323 }
372324
373325 /**
374
- * perf_evsel__is_group_leader - Return whether given evsel is a leader event
326
+ * evsel__is_group_leader - Return whether given evsel is a leader event
375327 *
376328 * @evsel - evsel selector to be tested
377329 *
378330 * Return %true if @evsel is a group leader or a stand-alone event
379331 */
380
-static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
332
+static inline bool evsel__is_group_leader(const struct evsel *evsel)
381333 {
382334 return evsel->leader == evsel;
383335 }
384336
385337 /**
386
- * perf_evsel__is_group_event - Return whether given evsel is a group event
338
+ * evsel__is_group_event - Return whether given evsel is a group event
387339 *
388340 * @evsel - evsel selector to be tested
389341 *
390342 * Return %true iff event group view is enabled and @evsel is a actual group
391343 * leader which has other members in the group
392344 */
393
-static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
345
+static inline bool evsel__is_group_event(struct evsel *evsel)
394346 {
395347 if (!symbol_conf.event_group)
396348 return false;
397349
398
- return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
350
+ return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1;
399351 }
400352
401
-bool perf_evsel__is_function_event(struct perf_evsel *evsel);
353
+bool evsel__is_function_event(struct evsel *evsel);
402354
403
-static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
355
+static inline bool evsel__is_bpf_output(struct evsel *evsel)
404356 {
405
- return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
357
+ return evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
406358 }
407359
408
-static inline bool perf_evsel__is_clock(struct perf_evsel *evsel)
360
+static inline bool evsel__is_clock(struct evsel *evsel)
409361 {
410
- return perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
411
- perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
362
+ return evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
363
+ evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
412364 }
413365
414
-struct perf_attr_details {
415
- bool freq;
416
- bool verbose;
417
- bool event_group;
418
- bool force;
419
- bool trace_fields;
420
-};
366
+bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
367
+int evsel__open_strerror(struct evsel *evsel, struct target *target,
368
+ int err, char *msg, size_t size);
421369
422
-int perf_evsel__fprintf(struct perf_evsel *evsel,
423
- struct perf_attr_details *details, FILE *fp);
424
-
425
-#define EVSEL__PRINT_IP (1<<0)
426
-#define EVSEL__PRINT_SYM (1<<1)
427
-#define EVSEL__PRINT_DSO (1<<2)
428
-#define EVSEL__PRINT_SYMOFFSET (1<<3)
429
-#define EVSEL__PRINT_ONELINE (1<<4)
430
-#define EVSEL__PRINT_SRCLINE (1<<5)
431
-#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
432
-#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
433
-#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
434
-
435
-struct callchain_cursor;
436
-
437
-int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
438
- unsigned int print_opts,
439
- struct callchain_cursor *cursor, FILE *fp);
440
-
441
-int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
442
- int left_alignment, unsigned int print_opts,
443
- struct callchain_cursor *cursor, FILE *fp);
444
-
445
-bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
446
- char *msg, size_t msgsize);
447
-int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
448
- int err, char *msg, size_t size);
449
-
450
-static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
370
+static inline int evsel__group_idx(struct evsel *evsel)
451371 {
452372 return evsel->idx - evsel->leader->idx;
453373 }
454374
455375 /* Iterates group WITHOUT the leader. */
456376 #define for_each_group_member(_evsel, _leader) \
457
-for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
377
+for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
458378 (_evsel) && (_evsel)->leader == (_leader); \
459
- (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
379
+ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
460380
461381 /* Iterates group WITH the leader. */
462382 #define for_each_group_evsel(_evsel, _leader) \
463383 for ((_evsel) = _leader; \
464384 (_evsel) && (_evsel)->leader == (_leader); \
465
- (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
385
+ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
466386
467
-static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
387
+static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
468388 {
469
- return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
389
+ return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
470390 }
471391
472
-static inline bool evsel__has_callchain(const struct perf_evsel *evsel)
392
+static inline bool evsel__has_branch_hw_idx(const struct evsel *evsel)
473393 {
474
- return (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
394
+ return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
475395 }
476396
477
-typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
397
+static inline bool evsel__has_callchain(const struct evsel *evsel)
398
+{
399
+ /*
400
+ * For reporting purposes, an evsel sample can have a recorded callchain
401
+ * or a callchain synthesized from AUX area data.
402
+ */
403
+ return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN ||
404
+ evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN;
405
+}
478406
479
-int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
480
- attr__fprintf_f attr__fprintf, void *priv);
407
+static inline bool evsel__has_br_stack(const struct evsel *evsel)
408
+{
409
+ /*
410
+ * For reporting purposes, an evsel sample can have a recorded branch
411
+ * stack or a branch stack synthesized from AUX area data.
412
+ */
413
+ return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK ||
414
+ evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
415
+}
481416
482
-struct perf_env *perf_evsel__env(struct perf_evsel *evsel);
417
+static inline bool evsel__is_dummy_event(struct evsel *evsel)
418
+{
419
+ return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
420
+ (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
421
+}
483422
423
+struct perf_env *evsel__env(struct evsel *evsel);
424
+
425
+int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
484426 #endif /* __PERF_EVSEL_H */