hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/tools/perf/util/unwind-libunwind.c
....@@ -1,29 +1,33 @@
11 // SPDX-License-Identifier: GPL-2.0
22 #include "unwind.h"
3
+#include "dso.h"
4
+#include "map.h"
35 #include "thread.h"
46 #include "session.h"
57 #include "debug.h"
68 #include "env.h"
9
+#include "callchain.h"
710
811 struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
912 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
1013 struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
1114
12
-static void unwind__register_ops(struct thread *thread,
13
- struct unwind_libunwind_ops *ops)
15
+static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
1416 {
15
- thread->unwind_libunwind_ops = ops;
17
+ maps->unwind_libunwind_ops = ops;
1618 }
1719
18
-int unwind__prepare_access(struct thread *thread, struct map *map,
19
- bool *initialized)
20
+int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
2021 {
2122 const char *arch;
2223 enum dso_type dso_type;
2324 struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
2425 int err;
2526
26
- if (thread->addr_space) {
27
+ if (!dwarf_callchain_users)
28
+ return 0;
29
+
30
+ if (maps->addr_space) {
2731 pr_debug("unwind: thread map already set, dso=%s\n",
2832 map->dso->name);
2933 if (initialized)
....@@ -32,14 +36,14 @@
3236 }
3337
3438 /* env->arch is NULL for live-mode (i.e. perf top) */
35
- if (!thread->mg->machine->env || !thread->mg->machine->env->arch)
39
+ if (!maps->machine->env || !maps->machine->env->arch)
3640 goto out_register;
3741
38
- dso_type = dso__type(map->dso, thread->mg->machine);
42
+ dso_type = dso__type(map->dso, maps->machine);
3943 if (dso_type == DSO__TYPE_UNKNOWN)
4044 return 0;
4145
42
- arch = perf_env__arch(thread->mg->machine->env);
46
+ arch = perf_env__arch(maps->machine->env);
4347
4448 if (!strcmp(arch, "x86")) {
4549 if (dso_type != DSO__TYPE_64BIT)
....@@ -54,31 +58,31 @@
5458 return 0;
5559 }
5660 out_register:
57
- unwind__register_ops(thread, ops);
61
+ unwind__register_ops(maps, ops);
5862
59
- err = thread->unwind_libunwind_ops->prepare_access(thread);
63
+ err = maps->unwind_libunwind_ops->prepare_access(maps);
6064 if (initialized)
6165 *initialized = err ? false : true;
6266 return err;
6367 }
6468
65
-void unwind__flush_access(struct thread *thread)
69
+void unwind__flush_access(struct maps *maps)
6670 {
67
- if (thread->unwind_libunwind_ops)
68
- thread->unwind_libunwind_ops->flush_access(thread);
71
+ if (maps->unwind_libunwind_ops)
72
+ maps->unwind_libunwind_ops->flush_access(maps);
6973 }
7074
71
-void unwind__finish_access(struct thread *thread)
75
+void unwind__finish_access(struct maps *maps)
7276 {
73
- if (thread->unwind_libunwind_ops)
74
- thread->unwind_libunwind_ops->finish_access(thread);
77
+ if (maps->unwind_libunwind_ops)
78
+ maps->unwind_libunwind_ops->finish_access(maps);
7579 }
7680
7781 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
7882 struct thread *thread,
7983 struct perf_sample *data, int max_stack)
8084 {
81
- if (thread->unwind_libunwind_ops)
82
- return thread->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
85
+ if (thread->maps->unwind_libunwind_ops)
86
+ return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
8387 return 0;
8488 }