hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/tools/perf/builtin-top.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * builtin-top.c
34 *
....@@ -14,38 +15,45 @@
1415 * Wu Fengguang <fengguang.wu@intel.com>
1516 * Mike Galbraith <efault@gmx.de>
1617 * Paul Mackerras <paulus@samba.org>
17
- *
18
- * Released under the GPL v2. (and only v2, not any later version)
1918 */
2019 #include "builtin.h"
2120
2221 #include "perf.h"
2322
2423 #include "util/annotate.h"
24
+#include "util/bpf-event.h"
2525 #include "util/config.h"
2626 #include "util/color.h"
27
-#include "util/drv_configs.h"
27
+#include "util/dso.h"
2828 #include "util/evlist.h"
2929 #include "util/evsel.h"
30
+#include "util/evsel_config.h"
3031 #include "util/event.h"
3132 #include "util/machine.h"
33
+#include "util/map.h"
34
+#include "util/mmap.h"
3235 #include "util/session.h"
33
-#include "util/symbol.h"
3436 #include "util/thread.h"
35
-#include "util/thread_map.h"
37
+#include "util/symbol.h"
38
+#include "util/synthetic-events.h"
3639 #include "util/top.h"
40
+#include "util/util.h"
3741 #include <linux/rbtree.h>
3842 #include <subcmd/parse-options.h>
3943 #include "util/parse-events.h"
44
+#include "util/callchain.h"
4045 #include "util/cpumap.h"
41
-#include "util/xyarray.h"
4246 #include "util/sort.h"
47
+#include "util/string2.h"
4348 #include "util/term.h"
4449 #include "util/intlist.h"
4550 #include "util/parse-branch-options.h"
4651 #include "arch/common.h"
52
+#include "ui/ui.h"
4753
4854 #include "util/debug.h"
55
+#include "util/ordered-events.h"
56
+#include "util/pfm.h"
4957
5058 #include <assert.h>
5159 #include <elf.h>
....@@ -73,8 +81,10 @@
7381 #include <linux/stringify.h>
7482 #include <linux/time64.h>
7583 #include <linux/types.h>
84
+#include <linux/err.h>
7685
77
-#include "sane_ctype.h"
86
+#include <linux/ctype.h>
87
+#include <perf/mmap.h>
7888
7989 static volatile int done;
8090 static volatile int resize;
....@@ -99,7 +109,7 @@
99109
100110 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
101111 {
102
- struct perf_evsel *evsel;
112
+ struct evsel *evsel;
103113 struct symbol *sym;
104114 struct annotation *notes;
105115 struct map *map;
....@@ -127,7 +137,7 @@
127137 notes = symbol__annotation(sym);
128138 pthread_mutex_lock(&notes->lock);
129139
130
- if (!symbol__hists(sym, top->evlist->nr_entries)) {
140
+ if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
131141 pthread_mutex_unlock(&notes->lock);
132142 pr_err("Not enough memory for annotating '%s' symbol!\n",
133143 sym->name);
....@@ -135,12 +145,12 @@
135145 return err;
136146 }
137147
138
- err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
148
+ err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
139149 if (err == 0) {
140150 top->sym_filter_entry = he;
141151 } else {
142152 char msg[BUFSIZ];
143
- symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
153
+ symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg));
144154 pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
145155 }
146156
....@@ -184,7 +194,7 @@
184194 static void perf_top__record_precise_ip(struct perf_top *top,
185195 struct hist_entry *he,
186196 struct perf_sample *sample,
187
- struct perf_evsel *evsel, u64 ip)
197
+ struct evsel *evsel, u64 ip)
188198 {
189199 struct annotation *notes;
190200 struct symbol *sym = he->ms.sym;
....@@ -226,7 +236,7 @@
226236 static void perf_top__show_details(struct perf_top *top)
227237 {
228238 struct hist_entry *he = top->sym_filter_entry;
229
- struct perf_evsel *evsel;
239
+ struct evsel *evsel;
230240 struct annotation *notes;
231241 struct symbol *symbol;
232242 int more;
....@@ -246,10 +256,10 @@
246256 if (notes->src == NULL)
247257 goto out_unlock;
248258
249
- printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
259
+ printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
250260 printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
251261
252
- more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
262
+ more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
253263
254264 if (top->evlist->enabled) {
255265 if (top->zero)
....@@ -263,20 +273,57 @@
263273 pthread_mutex_unlock(&notes->lock);
264274 }
265275
276
+static void perf_top__resort_hists(struct perf_top *t)
277
+{
278
+ struct evlist *evlist = t->evlist;
279
+ struct evsel *pos;
280
+
281
+ evlist__for_each_entry(evlist, pos) {
282
+ struct hists *hists = evsel__hists(pos);
283
+
284
+ /*
285
+ * unlink existing entries so that they can be linked
286
+ * in a correct order in hists__match() below.
287
+ */
288
+ hists__unlink(hists);
289
+
290
+ if (evlist->enabled) {
291
+ if (t->zero) {
292
+ hists__delete_entries(hists);
293
+ } else {
294
+ hists__decay_entries(hists, t->hide_user_symbols,
295
+ t->hide_kernel_symbols);
296
+ }
297
+ }
298
+
299
+ hists__collapse_resort(hists, NULL);
300
+
301
+ /* Non-group events are considered as leader */
302
+ if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
303
+ struct hists *leader_hists = evsel__hists(pos->leader);
304
+
305
+ hists__match(leader_hists, hists);
306
+ hists__link(leader_hists, hists);
307
+ }
308
+ }
309
+
310
+ evlist__for_each_entry(evlist, pos) {
311
+ evsel__output_resort(pos, NULL);
312
+ }
313
+}
314
+
266315 static void perf_top__print_sym_table(struct perf_top *top)
267316 {
268317 char bf[160];
269318 int printed = 0;
270319 const int win_width = top->winsize.ws_col - 1;
271
- struct perf_evsel *evsel = top->sym_evsel;
320
+ struct evsel *evsel = top->sym_evsel;
272321 struct hists *hists = evsel__hists(evsel);
273322
274323 puts(CONSOLE_CLEAR);
275324
276325 perf_top__header_snprintf(top, bf, sizeof(bf));
277326 printf("%s\n", bf);
278
-
279
- perf_top__reset_sample_counters(top);
280327
281328 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
282329
....@@ -296,17 +343,7 @@
296343 return;
297344 }
298345
299
- if (top->evlist->enabled) {
300
- if (top->zero) {
301
- hists__delete_entries(hists);
302
- } else {
303
- hists__decay_entries(hists, top->hide_user_symbols,
304
- top->hide_kernel_symbols);
305
- }
306
- }
307
-
308
- hists__collapse_resort(hists, NULL);
309
- perf_evsel__output_resort(evsel, NULL);
346
+ perf_top__resort_hists(top);
310347
311348 hists__output_recalc_col_len(hists, top->print_entries - printed);
312349 putchar('\n');
....@@ -371,7 +408,7 @@
371408 if (p)
372409 *p = 0;
373410
374
- next = rb_first(&hists->entries);
411
+ next = rb_first_cached(&hists->entries);
375412 while (next) {
376413 n = rb_entry(next, struct hist_entry, rb_node);
377414 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
....@@ -404,8 +441,8 @@
404441 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
405442 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
406443
407
- if (top->evlist->nr_entries > 1)
408
- fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
444
+ if (top->evlist->core.nr_entries > 1)
445
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
409446
410447 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
411448
....@@ -439,7 +476,7 @@
439476 case 'S':
440477 return 1;
441478 case 'E':
442
- return top->evlist->nr_entries > 1 ? 1 : 0;
479
+ return top->evlist->core.nr_entries > 1 ? 1 : 0;
443480 default:
444481 break;
445482 }
....@@ -485,20 +522,20 @@
485522 }
486523 break;
487524 case 'E':
488
- if (top->evlist->nr_entries > 1) {
525
+ if (top->evlist->core.nr_entries > 1) {
489526 /* Select 0 as the default event: */
490527 int counter = 0;
491528
492529 fprintf(stderr, "\nAvailable events:");
493530
494531 evlist__for_each_entry(top->evlist, top->sym_evsel)
495
- fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
532
+ fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, evsel__name(top->sym_evsel));
496533
497534 prompt_integer(&counter, "Enter details event counter");
498535
499
- if (counter >= top->evlist->nr_entries) {
500
- top->sym_evsel = perf_evlist__first(top->evlist);
501
- fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
536
+ if (counter >= top->evlist->core.nr_entries) {
537
+ top->sym_evsel = evlist__first(top->evlist);
538
+ fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
502539 sleep(1);
503540 break;
504541 }
....@@ -506,7 +543,7 @@
506543 if (top->sym_evsel->idx == counter)
507544 break;
508545 } else
509
- top->sym_evsel = perf_evlist__first(top->evlist);
546
+ top->sym_evsel = evlist__first(top->evlist);
510547 break;
511548 case 'f':
512549 prompt_integer(&top->count_filter, "Enter display event count filter");
....@@ -554,32 +591,25 @@
554591 static void perf_top__sort_new_samples(void *arg)
555592 {
556593 struct perf_top *t = arg;
557
- struct perf_evsel *evsel = t->sym_evsel;
558
- struct hists *hists;
559
-
560
- perf_top__reset_sample_counters(t);
561594
562595 if (t->evlist->selected != NULL)
563596 t->sym_evsel = t->evlist->selected;
564597
565
- hists = evsel__hists(evsel);
598
+ perf_top__resort_hists(t);
566599
567
- if (t->evlist->enabled) {
568
- if (t->zero) {
569
- hists__delete_entries(hists);
570
- } else {
571
- hists__decay_entries(hists, t->hide_user_symbols,
572
- t->hide_kernel_symbols);
573
- }
574
- }
600
+ if (t->lost || t->drop)
601
+ pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
602
+}
575603
576
- hists__collapse_resort(hists, NULL);
577
- perf_evsel__output_resort(evsel, NULL);
604
+static void stop_top(void)
605
+{
606
+ session_done = 1;
607
+ done = 1;
578608 }
579609
580610 static void *display_thread_tui(void *arg)
581611 {
582
- struct perf_evsel *pos;
612
+ struct evsel *pos;
583613 struct perf_top *top = arg;
584614 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
585615 struct hist_browser_timer hbt = {
....@@ -587,6 +617,7 @@
587617 .arg = top,
588618 .refresh = top->delay_secs,
589619 };
620
+ int ret;
590621
591622 /* In order to read symbols from other namespaces perf to needs to call
592623 * setns(2). This isn't permitted if the struct_fs has multiple users.
....@@ -595,11 +626,14 @@
595626 */
596627 unshare(CLONE_FS);
597628
629
+ prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
630
+
631
+repeat:
598632 perf_top__sort_new_samples(top);
599633
600634 /*
601635 * Initialize the uid_filter_str, in the future the TUI will allow
602
- * Zooming in/out UIDs. For now juse use whatever the user passed
636
+ * Zooming in/out UIDs. For now just use whatever the user passed
603637 * via --uid.
604638 */
605639 evlist__for_each_entry(top->evlist, pos) {
....@@ -607,19 +641,24 @@
607641 hists->uid_filter_str = top->record_opts.target.uid_str;
608642 }
609643
610
- perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
644
+ ret = perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
611645 top->min_percent,
612646 &top->session->header.env,
613647 !top->record_opts.overwrite,
614648 &top->annotation_opts);
615649
616
- done = 1;
650
+ if (ret == K_RELOAD) {
651
+ top->zero = true;
652
+ goto repeat;
653
+ } else
654
+ stop_top();
655
+
617656 return NULL;
618657 }
619658
620659 static void display_sig(int sig __maybe_unused)
621660 {
622
- done = 1;
661
+ stop_top();
623662 }
624663
625664 static void display_setup_sig(void)
....@@ -644,6 +683,8 @@
644683 * that we're observing.
645684 */
646685 unshare(CLONE_FS);
686
+
687
+ prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
647688
648689 display_setup_sig();
649690 pthread__unblock_sigwinch();
....@@ -674,7 +715,7 @@
674715
675716 if (perf_top__handle_keypress(top, c))
676717 goto repeat;
677
- done = 1;
718
+ stop_top();
678719 }
679720 }
680721
....@@ -688,19 +729,20 @@
688729 {
689730 struct perf_top *top = arg;
690731 struct hist_entry *he = iter->he;
691
- struct perf_evsel *evsel = iter->evsel;
732
+ struct evsel *evsel = iter->evsel;
692733
693734 if (perf_hpp_list.sym && single)
694735 perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
695736
696737 hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
697
- !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
738
+ !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
739
+ NULL);
698740 return 0;
699741 }
700742
701743 static void perf_event__process_sample(struct perf_tool *tool,
702744 const union perf_event *event,
703
- struct perf_evsel *evsel,
745
+ struct evsel *evsel,
704746 struct perf_sample *sample,
705747 struct machine *machine)
706748 {
....@@ -734,13 +776,16 @@
734776 if (machine__resolve(machine, &al, sample) < 0)
735777 return;
736778
779
+ if (top->stitch_lbr)
780
+ al.thread->lbr_stitch_enable = true;
781
+
737782 if (!machine->kptr_restrict_warned &&
738783 symbol_conf.kptr_restrict &&
739784 al.cpumode == PERF_RECORD_MISC_KERNEL) {
740785 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
741786 ui__warning(
742787 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
743
-"Check /proc/sys/kernel/kptr_restrict.\n\n"
788
+"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
744789 "Kernel%s samples will not be resolved.\n",
745790 al.map && map__has_symbols(al.map) ?
746791 " modules" : "");
....@@ -806,108 +851,82 @@
806851 addr_location__put(&al);
807852 }
808853
854
+static void
855
+perf_top__process_lost(struct perf_top *top, union perf_event *event,
856
+ struct evsel *evsel)
857
+{
858
+ struct hists *hists = evsel__hists(evsel);
859
+
860
+ top->lost += event->lost.lost;
861
+ top->lost_total += event->lost.lost;
862
+ hists->stats.total_lost += event->lost.lost;
863
+}
864
+
865
+static void
866
+perf_top__process_lost_samples(struct perf_top *top,
867
+ union perf_event *event,
868
+ struct evsel *evsel)
869
+{
870
+ struct hists *hists = evsel__hists(evsel);
871
+
872
+ top->lost += event->lost_samples.lost;
873
+ top->lost_total += event->lost_samples.lost;
874
+ hists->stats.total_lost_samples += event->lost_samples.lost;
875
+}
876
+
877
+static u64 last_timestamp;
878
+
809879 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
810880 {
811881 struct record_opts *opts = &top->record_opts;
812
- struct perf_evlist *evlist = top->evlist;
813
- struct perf_sample sample;
814
- struct perf_evsel *evsel;
815
- struct perf_mmap *md;
816
- struct perf_session *session = top->session;
882
+ struct evlist *evlist = top->evlist;
883
+ struct mmap *md;
817884 union perf_event *event;
818
- struct machine *machine;
819
- int ret;
820885
821886 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
822
- if (perf_mmap__read_init(md) < 0)
887
+ if (perf_mmap__read_init(&md->core) < 0)
823888 return;
824889
825
- while ((event = perf_mmap__read_event(md)) != NULL) {
826
- ret = perf_evlist__parse_sample(evlist, event, &sample);
827
- if (ret) {
828
- pr_err("Can't parse sample, err = %d\n", ret);
829
- goto next_event;
890
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
891
+ int ret;
892
+
893
+ ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
894
+ if (ret && ret != -1)
895
+ break;
896
+
897
+ ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
898
+ if (ret)
899
+ break;
900
+
901
+ perf_mmap__consume(&md->core);
902
+
903
+ if (top->qe.rotate) {
904
+ pthread_mutex_lock(&top->qe.mutex);
905
+ top->qe.rotate = false;
906
+ pthread_cond_signal(&top->qe.cond);
907
+ pthread_mutex_unlock(&top->qe.mutex);
830908 }
831
-
832
- evsel = perf_evlist__id2evsel(session->evlist, sample.id);
833
- assert(evsel != NULL);
834
-
835
- if (event->header.type == PERF_RECORD_SAMPLE)
836
- ++top->samples;
837
-
838
- switch (sample.cpumode) {
839
- case PERF_RECORD_MISC_USER:
840
- ++top->us_samples;
841
- if (top->hide_user_symbols)
842
- goto next_event;
843
- machine = &session->machines.host;
844
- break;
845
- case PERF_RECORD_MISC_KERNEL:
846
- ++top->kernel_samples;
847
- if (top->hide_kernel_symbols)
848
- goto next_event;
849
- machine = &session->machines.host;
850
- break;
851
- case PERF_RECORD_MISC_GUEST_KERNEL:
852
- ++top->guest_kernel_samples;
853
- machine = perf_session__find_machine(session,
854
- sample.pid);
855
- break;
856
- case PERF_RECORD_MISC_GUEST_USER:
857
- ++top->guest_us_samples;
858
- /*
859
- * TODO: we don't process guest user from host side
860
- * except simple counting.
861
- */
862
- goto next_event;
863
- default:
864
- if (event->header.type == PERF_RECORD_SAMPLE)
865
- goto next_event;
866
- machine = &session->machines.host;
867
- break;
868
- }
869
-
870
-
871
- if (event->header.type == PERF_RECORD_SAMPLE) {
872
- perf_event__process_sample(&top->tool, event, evsel,
873
- &sample, machine);
874
- } else if (event->header.type < PERF_RECORD_MAX) {
875
- hists__inc_nr_events(evsel__hists(evsel), event->header.type);
876
- machine__process_event(machine, event, &sample);
877
- } else
878
- ++session->evlist->stats.nr_unknown_events;
879
-next_event:
880
- perf_mmap__consume(md);
881909 }
882910
883
- perf_mmap__read_done(md);
911
+ perf_mmap__read_done(&md->core);
884912 }
885913
886914 static void perf_top__mmap_read(struct perf_top *top)
887915 {
888916 bool overwrite = top->record_opts.overwrite;
889
- struct perf_evlist *evlist = top->evlist;
890
- unsigned long long start, end;
917
+ struct evlist *evlist = top->evlist;
891918 int i;
892919
893
- start = rdclock();
894920 if (overwrite)
895921 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
896922
897
- for (i = 0; i < top->evlist->nr_mmaps; i++)
923
+ for (i = 0; i < top->evlist->core.nr_mmaps; i++)
898924 perf_top__mmap_read_idx(top, i);
899925
900926 if (overwrite) {
901927 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
902928 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
903929 }
904
- end = rdclock();
905
-
906
- if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
907
- ui__warning("Too slow to read ring buffer.\n"
908
- "Please try increasing the period (-c) or\n"
909
- "decreasing the freq (-F) or\n"
910
- "limiting the number of CPUs (-C)\n");
911930 }
912931
913932 /*
....@@ -930,17 +949,17 @@
930949 static int perf_top__overwrite_check(struct perf_top *top)
931950 {
932951 struct record_opts *opts = &top->record_opts;
933
- struct perf_evlist *evlist = top->evlist;
934
- struct perf_evsel_config_term *term;
952
+ struct evlist *evlist = top->evlist;
953
+ struct evsel_config_term *term;
935954 struct list_head *config_terms;
936
- struct perf_evsel *evsel;
955
+ struct evsel *evsel;
937956 int set, overwrite = -1;
938957
939958 evlist__for_each_entry(evlist, evsel) {
940959 set = -1;
941960 config_terms = &evsel->config_terms;
942961 list_for_each_entry(term, config_terms, list) {
943
- if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
962
+ if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
944963 set = term->val.overwrite ? 1 : 0;
945964 }
946965
....@@ -959,7 +978,7 @@
959978 /* has term for current event */
960979 if ((overwrite < 0) && (set >= 0)) {
961980 /* if it's first event, set overwrite */
962
- if (evsel == perf_evlist__first(evlist))
981
+ if (evsel == evlist__first(evlist))
963982 overwrite = set;
964983 else
965984 return -1;
....@@ -973,21 +992,21 @@
973992 }
974993
975994 static int perf_top_overwrite_fallback(struct perf_top *top,
976
- struct perf_evsel *evsel)
995
+ struct evsel *evsel)
977996 {
978997 struct record_opts *opts = &top->record_opts;
979
- struct perf_evlist *evlist = top->evlist;
980
- struct perf_evsel *counter;
998
+ struct evlist *evlist = top->evlist;
999
+ struct evsel *counter;
9811000
9821001 if (!opts->overwrite)
9831002 return 0;
9841003
9851004 /* only fall back when first event fails */
986
- if (evsel != perf_evlist__first(evlist))
1005
+ if (evsel != evlist__first(evlist))
9871006 return 0;
9881007
9891008 evlist__for_each_entry(evlist, counter)
990
- counter->attr.write_backward = false;
1009
+ counter->core.attr.write_backward = false;
9911010 opts->overwrite = false;
9921011 pr_debug2("fall back to non-overwrite mode\n");
9931012 return 1;
....@@ -996,8 +1015,8 @@
9961015 static int perf_top__start_counters(struct perf_top *top)
9971016 {
9981017 char msg[BUFSIZ];
999
- struct perf_evsel *counter;
1000
- struct perf_evlist *evlist = top->evlist;
1018
+ struct evsel *counter;
1019
+ struct evlist *evlist = top->evlist;
10011020 struct record_opts *opts = &top->record_opts;
10021021
10031022 if (perf_top__overwrite_check(top)) {
....@@ -1010,8 +1029,8 @@
10101029
10111030 evlist__for_each_entry(evlist, counter) {
10121031 try_again:
1013
- if (perf_evsel__open(counter, top->evlist->cpus,
1014
- top->evlist->threads) < 0) {
1032
+ if (evsel__open(counter, top->evlist->core.cpus,
1033
+ top->evlist->core.threads) < 0) {
10151034
10161035 /*
10171036 * Specially handle overwrite fall back.
....@@ -1027,20 +1046,19 @@
10271046 perf_top_overwrite_fallback(top, counter))
10281047 goto try_again;
10291048
1030
- if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
1049
+ if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
10311050 if (verbose > 0)
10321051 ui__warning("%s\n", msg);
10331052 goto try_again;
10341053 }
10351054
1036
- perf_evsel__open_strerror(counter, &opts->target,
1037
- errno, msg, sizeof(msg));
1055
+ evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
10381056 ui__error("%s\n", msg);
10391057 goto out_err;
10401058 }
10411059 }
10421060
1043
- if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
1061
+ if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
10441062 ui__error("Failed to mmap with %d (%s)\n",
10451063 errno, str_error_r(errno, msg, sizeof(msg)));
10461064 goto out_err;
....@@ -1054,12 +1072,7 @@
10541072
10551073 static int callchain_param__setup_sample_type(struct callchain_param *callchain)
10561074 {
1057
- if (!perf_hpp_list.sym) {
1058
- if (callchain->enabled) {
1059
- ui__error("Selected -g but \"sym\" not present in --sort/-s.");
1060
- return -EINVAL;
1061
- }
1062
- } else if (callchain->mode != CHAIN_NONE) {
1075
+ if (callchain->mode != CHAIN_NONE) {
10631076 if (callchain_register_param(callchain) < 0) {
10641077 ui__error("Can't register callchain params.\n");
10651078 return -EINVAL;
....@@ -1069,40 +1082,202 @@
10691082 return 0;
10701083 }
10711084
1085
+static struct ordered_events *rotate_queues(struct perf_top *top)
1086
+{
1087
+ struct ordered_events *in = top->qe.in;
1088
+
1089
+ if (top->qe.in == &top->qe.data[1])
1090
+ top->qe.in = &top->qe.data[0];
1091
+ else
1092
+ top->qe.in = &top->qe.data[1];
1093
+
1094
+ return in;
1095
+}
1096
+
1097
+static void *process_thread(void *arg)
1098
+{
1099
+ struct perf_top *top = arg;
1100
+
1101
+ while (!done) {
1102
+ struct ordered_events *out, *in = top->qe.in;
1103
+
1104
+ if (!in->nr_events) {
1105
+ usleep(100);
1106
+ continue;
1107
+ }
1108
+
1109
+ out = rotate_queues(top);
1110
+
1111
+ pthread_mutex_lock(&top->qe.mutex);
1112
+ top->qe.rotate = true;
1113
+ pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
1114
+ pthread_mutex_unlock(&top->qe.mutex);
1115
+
1116
+ if (ordered_events__flush(out, OE_FLUSH__TOP))
1117
+ pr_err("failed to process events\n");
1118
+ }
1119
+
1120
+ return NULL;
1121
+}
1122
+
1123
+/*
1124
+ * Allow only 'top->delay_secs' seconds behind samples.
1125
+ */
1126
+static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1127
+{
1128
+ union perf_event *event = qevent->event;
1129
+ u64 delay_timestamp;
1130
+
1131
+ if (event->header.type != PERF_RECORD_SAMPLE)
1132
+ return false;
1133
+
1134
+ delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1135
+ return delay_timestamp < last_timestamp;
1136
+}
1137
+
1138
+static int deliver_event(struct ordered_events *qe,
1139
+ struct ordered_event *qevent)
1140
+{
1141
+ struct perf_top *top = qe->data;
1142
+ struct evlist *evlist = top->evlist;
1143
+ struct perf_session *session = top->session;
1144
+ union perf_event *event = qevent->event;
1145
+ struct perf_sample sample;
1146
+ struct evsel *evsel;
1147
+ struct machine *machine;
1148
+ int ret = -1;
1149
+
1150
+ if (should_drop(qevent, top)) {
1151
+ top->drop++;
1152
+ top->drop_total++;
1153
+ return 0;
1154
+ }
1155
+
1156
+ ret = perf_evlist__parse_sample(evlist, event, &sample);
1157
+ if (ret) {
1158
+ pr_err("Can't parse sample, err = %d\n", ret);
1159
+ goto next_event;
1160
+ }
1161
+
1162
+ evsel = perf_evlist__id2evsel(session->evlist, sample.id);
1163
+ assert(evsel != NULL);
1164
+
1165
+ if (event->header.type == PERF_RECORD_SAMPLE) {
1166
+ if (evswitch__discard(&top->evswitch, evsel))
1167
+ return 0;
1168
+ ++top->samples;
1169
+ }
1170
+
1171
+ switch (sample.cpumode) {
1172
+ case PERF_RECORD_MISC_USER:
1173
+ ++top->us_samples;
1174
+ if (top->hide_user_symbols)
1175
+ goto next_event;
1176
+ machine = &session->machines.host;
1177
+ break;
1178
+ case PERF_RECORD_MISC_KERNEL:
1179
+ ++top->kernel_samples;
1180
+ if (top->hide_kernel_symbols)
1181
+ goto next_event;
1182
+ machine = &session->machines.host;
1183
+ break;
1184
+ case PERF_RECORD_MISC_GUEST_KERNEL:
1185
+ ++top->guest_kernel_samples;
1186
+ machine = perf_session__find_machine(session,
1187
+ sample.pid);
1188
+ break;
1189
+ case PERF_RECORD_MISC_GUEST_USER:
1190
+ ++top->guest_us_samples;
1191
+ /*
1192
+ * TODO: we don't process guest user from host side
1193
+ * except simple counting.
1194
+ */
1195
+ goto next_event;
1196
+ default:
1197
+ if (event->header.type == PERF_RECORD_SAMPLE)
1198
+ goto next_event;
1199
+ machine = &session->machines.host;
1200
+ break;
1201
+ }
1202
+
1203
+ if (event->header.type == PERF_RECORD_SAMPLE) {
1204
+ perf_event__process_sample(&top->tool, event, evsel,
1205
+ &sample, machine);
1206
+ } else if (event->header.type == PERF_RECORD_LOST) {
1207
+ perf_top__process_lost(top, event, evsel);
1208
+ } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1209
+ perf_top__process_lost_samples(top, event, evsel);
1210
+ } else if (event->header.type < PERF_RECORD_MAX) {
1211
+ hists__inc_nr_events(evsel__hists(evsel), event->header.type);
1212
+ machine__process_event(machine, event, &sample);
1213
+ } else
1214
+ ++session->evlist->stats.nr_unknown_events;
1215
+
1216
+ ret = 0;
1217
+next_event:
1218
+ return ret;
1219
+}
1220
+
1221
+static void init_process_thread(struct perf_top *top)
1222
+{
1223
+ ordered_events__init(&top->qe.data[0], deliver_event, top);
1224
+ ordered_events__init(&top->qe.data[1], deliver_event, top);
1225
+ ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1226
+ ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1227
+ top->qe.in = &top->qe.data[0];
1228
+ pthread_mutex_init(&top->qe.mutex, NULL);
1229
+ pthread_cond_init(&top->qe.cond, NULL);
1230
+}
1231
+
10721232 static int __cmd_top(struct perf_top *top)
10731233 {
1074
- char msg[512];
1075
- struct perf_evsel *pos;
1076
- struct perf_evsel_config_term *err_term;
1077
- struct perf_evlist *evlist = top->evlist;
10781234 struct record_opts *opts = &top->record_opts;
1079
- pthread_t thread;
1235
+ pthread_t thread, thread_process;
10801236 int ret;
1081
-
1082
- top->session = perf_session__new(NULL, false, NULL);
1083
- if (top->session == NULL)
1084
- return -1;
10851237
10861238 if (!top->annotation_opts.objdump_path) {
10871239 ret = perf_env__lookup_objdump(&top->session->header.env,
10881240 &top->annotation_opts.objdump_path);
10891241 if (ret)
1090
- goto out_delete;
1242
+ return ret;
10911243 }
10921244
10931245 ret = callchain_param__setup_sample_type(&callchain_param);
10941246 if (ret)
1095
- goto out_delete;
1247
+ return ret;
10961248
10971249 if (perf_session__register_idle_thread(top->session) < 0)
1098
- goto out_delete;
1250
+ return ret;
10991251
11001252 if (top->nr_threads_synthesize > 1)
11011253 perf_set_multithreaded();
11021254
1255
+ init_process_thread(top);
1256
+
1257
+ if (opts->record_namespaces)
1258
+ top->tool.namespace_events = true;
1259
+ if (opts->record_cgroup) {
1260
+#ifdef HAVE_FILE_HANDLE
1261
+ top->tool.cgroup_events = true;
1262
+#else
1263
+ pr_err("cgroup tracking is not supported.\n");
1264
+ return -1;
1265
+#endif
1266
+ }
1267
+
1268
+ ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1269
+ &top->session->machines.host,
1270
+ &top->record_opts);
1271
+ if (ret < 0)
1272
+ pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
1273
+
1274
+ ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
1275
+ &top->session->machines.host);
1276
+ if (ret < 0)
1277
+ pr_debug("Couldn't synthesize cgroup events.\n");
1278
+
11031279 machine__synthesize_threads(&top->session->machines.host, &opts->target,
1104
- top->evlist->threads, false,
1105
- opts->proc_map_timeout,
1280
+ top->evlist->core.threads, false,
11061281 top->nr_threads_synthesize);
11071282
11081283 if (top->nr_threads_synthesize > 1)
....@@ -1110,21 +1285,18 @@
11101285
11111286 if (perf_hpp_list.socket) {
11121287 ret = perf_env__read_cpu_topology_map(&perf_env);
1113
- if (ret < 0)
1114
- goto out_err_cpu_topo;
1288
+ if (ret < 0) {
1289
+ char errbuf[BUFSIZ];
1290
+ const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1291
+
1292
+ ui__error("Could not read the CPU topology map: %s\n", err);
1293
+ return ret;
1294
+ }
11151295 }
11161296
11171297 ret = perf_top__start_counters(top);
11181298 if (ret)
1119
- goto out_delete;
1120
-
1121
- ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
1122
- if (ret) {
1123
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
1124
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
1125
- str_error_r(errno, msg, sizeof(msg)));
1126
- goto out_delete;
1127
- }
1299
+ return ret;
11281300
11291301 top->session->evlist = top->evlist;
11301302 perf_session__set_id_hdr_size(top->session);
....@@ -1138,18 +1310,18 @@
11381310 * so leave the check here.
11391311 */
11401312 if (!target__none(&opts->target))
1141
- perf_evlist__enable(top->evlist);
1142
-
1143
- /* Wait for a minimal set of events before starting the snapshot */
1144
- perf_evlist__poll(top->evlist, 100);
1145
-
1146
- perf_top__mmap_read(top);
1313
+ evlist__enable(top->evlist);
11471314
11481315 ret = -1;
1316
+ if (pthread_create(&thread_process, NULL, process_thread, top)) {
1317
+ ui__error("Could not create process thread.\n");
1318
+ return ret;
1319
+ }
1320
+
11491321 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
11501322 display_thread), top)) {
11511323 ui__error("Could not create display thread.\n");
1152
- goto out_delete;
1324
+ goto out_join_thread;
11531325 }
11541326
11551327 if (top->realtime_prio) {
....@@ -1162,13 +1334,18 @@
11621334 }
11631335 }
11641336
1337
+ /* Wait for a minimal set of events before starting the snapshot */
1338
+ evlist__poll(top->evlist, 100);
1339
+
1340
+ perf_top__mmap_read(top);
1341
+
11651342 while (!done) {
11661343 u64 hits = top->samples;
11671344
11681345 perf_top__mmap_read(top);
11691346
11701347 if (opts->overwrite || (hits == top->samples))
1171
- ret = perf_evlist__poll(top->evlist, 100);
1348
+ ret = evlist__poll(top->evlist, 100);
11721349
11731350 if (resize) {
11741351 perf_top__resize(top);
....@@ -1179,19 +1356,10 @@
11791356 ret = 0;
11801357 out_join:
11811358 pthread_join(thread, NULL);
1182
-out_delete:
1183
- perf_session__delete(top->session);
1184
- top->session = NULL;
1185
-
1359
+out_join_thread:
1360
+ pthread_cond_signal(&top->qe.cond);
1361
+ pthread_join(thread_process, NULL);
11861362 return ret;
1187
-
1188
-out_err_cpu_topo: {
1189
- char errbuf[BUFSIZ];
1190
- const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1191
-
1192
- ui__error("Could not read the CPU topology map: %s\n", err);
1193
- goto out_delete;
1194
-}
11951363 }
11961364
11971365 static int
....@@ -1262,8 +1430,16 @@
12621430 .target = {
12631431 .uses_mmap = true,
12641432 },
1265
- .proc_map_timeout = 500,
1266
- .overwrite = 1,
1433
+ /*
1434
+ * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1435
+ * when we pause, fix that and reenable. Probably using a
1436
+ * separate evlist with a dummy event, i.e. a non-overwrite
1437
+ * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1438
+ * stays in overwrite mode. -acme
1439
+ * */
1440
+ .overwrite = 0,
1441
+ .sample_time = true,
1442
+ .sample_time_set = true,
12671443 },
12681444 .max_stack = sysctl__max_stack(),
12691445 .annotation_opts = annotation__default_options,
....@@ -1288,6 +1464,8 @@
12881464 "file", "vmlinux pathname"),
12891465 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
12901466 "don't load vmlinux even if found"),
1467
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1468
+ "file", "kallsyms pathname"),
12911469 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
12921470 "hide kernel symbols"),
12931471 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
....@@ -1354,10 +1532,15 @@
13541532 "Display raw encoding of assembly instructions (default)"),
13551533 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
13561534 "Enable kernel symbol demangling"),
1535
+ OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
13571536 OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
13581537 "objdump binary to use for disassembly and annotations"),
13591538 OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
13601539 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1540
+ OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
1541
+ "Add prefix to source file path names in programs (with --prefix-strip)"),
1542
+ OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
1543
+ "Strip first N entries of source file path name in programs (with --prefix)"),
13611544 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
13621545 OPT_CALLBACK(0, "percent-limit", &top, "percent",
13631546 "Don't show entries under that percent", parse_percent_limit),
....@@ -1366,7 +1549,7 @@
13661549 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
13671550 "width[,width...]",
13681551 "don't try to adjust column width, use these fixed values"),
1369
- OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
1552
+ OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
13701553 "per thread proc mmap processing timeout in ms"),
13711554 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
13721555 "branch any", "sample any taken branches",
....@@ -1378,9 +1561,27 @@
13781561 "Show raw trace event output (do not use print fmt or plugins)"),
13791562 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
13801563 "Show entries in a hierarchy"),
1564
+ OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1565
+ "Use a backward ring buffer, default: no"),
13811566 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
13821567 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
13831568 "number of thread to run event synthesize"),
1569
+ OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
1570
+ "Record namespaces events"),
1571
+ OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
1572
+ "Record cgroup events"),
1573
+ OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1574
+ "Sort the output by the event at the index n in group. "
1575
+ "If n is invalid, sort by the first event. "
1576
+ "WARNING: should be used on grouped events."),
1577
+ OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
1578
+ "Enable LBR callgraph stitching approach"),
1579
+#ifdef HAVE_LIBPFM
1580
+ OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
1581
+ "libpfm4 event selector. use 'perf list' to list available events",
1582
+ parse_libpfm_events_option),
1583
+#endif
1584
+ OPTS_EVSWITCH(&top.evswitch),
13841585 OPT_END()
13851586 };
13861587 const char * const top_usage[] = {
....@@ -1395,23 +1596,45 @@
13951596 top.annotation_opts.min_pcnt = 5;
13961597 top.annotation_opts.context = 4;
13971598
1398
- top.evlist = perf_evlist__new();
1599
+ top.evlist = evlist__new();
13991600 if (top.evlist == NULL)
14001601 return -ENOMEM;
14011602
14021603 status = perf_config(perf_top_config, &top);
14031604 if (status)
14041605 return status;
1606
+ /*
1607
+ * Since the per arch annotation init routine may need the cpuid, read
1608
+ * it here, since we are not getting this from the perf.data header.
1609
+ */
1610
+ status = perf_env__read_cpuid(&perf_env);
1611
+ if (status) {
1612
+ /*
1613
+ * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
1614
+ * warn the user explicitely.
1615
+ */
1616
+ eprintf(status == ENOSYS ? 1 : 0, verbose,
1617
+ "Couldn't read the cpuid for this machine: %s\n",
1618
+ str_error_r(errno, errbuf, sizeof(errbuf)));
1619
+ }
1620
+ top.evlist->env = &perf_env;
14051621
14061622 argc = parse_options(argc, argv, options, top_usage, 0);
14071623 if (argc)
14081624 usage_with_options(top_usage, options);
14091625
1410
- if (!top.evlist->nr_entries &&
1411
- perf_evlist__add_default(top.evlist) < 0) {
1626
+ if (annotate_check_args(&top.annotation_opts) < 0)
1627
+ goto out_delete_evlist;
1628
+
1629
+ if (!top.evlist->core.nr_entries &&
1630
+ evlist__add_default(top.evlist) < 0) {
14121631 pr_err("Not enough memory for event selector list\n");
14131632 goto out_delete_evlist;
14141633 }
1634
+
1635
+ status = evswitch__init(&top.evswitch, top.evlist, stderr);
1636
+ if (status)
1637
+ goto out_delete_evlist;
14151638
14161639 if (symbol_conf.report_hierarchy) {
14171640 /* disable incompatible options */
....@@ -1425,6 +1648,14 @@
14251648 goto out_delete_evlist;
14261649 }
14271650 }
1651
+
1652
+ if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
1653
+ pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
1654
+ goto out_delete_evlist;
1655
+ }
1656
+
1657
+ if (opts->branch_stack && callchain_param.enabled)
1658
+ symbol_conf.show_branchflag_count = true;
14281659
14291660 sort__mode = SORT_MODE__TOP;
14301661 /* display thread wants entries to be collapsed in a different tree */
....@@ -1480,7 +1711,7 @@
14801711 goto out_delete_evlist;
14811712 }
14821713
1483
- top.sym_evsel = perf_evlist__first(top.evlist);
1714
+ top.sym_evsel = evlist__first(top.evlist);
14841715
14851716 if (!callchain_param.enabled) {
14861717 symbol_conf.cumulate_callchain = false;
....@@ -1494,7 +1725,7 @@
14941725 if (status < 0)
14951726 goto out_delete_evlist;
14961727
1497
- annotation_config__init();
1728
+ annotation_config__init(&top.annotation_opts);
14981729
14991730 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
15001731 status = symbol__init(NULL);
....@@ -1509,10 +1740,42 @@
15091740 signal(SIGWINCH, winch_sig);
15101741 }
15111742
1743
+ top.session = perf_session__new(NULL, false, NULL);
1744
+ if (IS_ERR(top.session)) {
1745
+ status = PTR_ERR(top.session);
1746
+ top.session = NULL;
1747
+ goto out_delete_evlist;
1748
+ }
1749
+
1750
+#ifdef HAVE_LIBBPF_SUPPORT
1751
+ if (!top.record_opts.no_bpf_event) {
1752
+ top.sb_evlist = evlist__new();
1753
+
1754
+ if (top.sb_evlist == NULL) {
1755
+ pr_err("Couldn't create side band evlist.\n.");
1756
+ goto out_delete_evlist;
1757
+ }
1758
+
1759
+ if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
1760
+ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1761
+ goto out_delete_evlist;
1762
+ }
1763
+ }
1764
+#endif
1765
+
1766
+ if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
1767
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1768
+ opts->no_bpf_event = true;
1769
+ }
1770
+
15121771 status = __cmd_top(&top);
15131772
1773
+ if (!opts->no_bpf_event)
1774
+ perf_evlist__stop_sb_thread(top.sb_evlist);
1775
+
15141776 out_delete_evlist:
1515
- perf_evlist__delete(top.evlist);
1777
+ evlist__delete(top.evlist);
1778
+ perf_session__delete(top.session);
15161779
15171780 return status;
15181781 }