hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/tools/perf/util/symbol.c
....@@ -4,8 +4,11 @@
44 #include <stdlib.h>
55 #include <stdio.h>
66 #include <string.h>
7
+#include <linux/capability.h>
78 #include <linux/kernel.h>
89 #include <linux/mman.h>
10
+#include <linux/string.h>
11
+#include <linux/time64.h>
912 #include <sys/types.h>
1013 #include <sys/stat.h>
1114 #include <sys/param.h>
....@@ -14,16 +17,24 @@
1417 #include <inttypes.h>
1518 #include "annotate.h"
1619 #include "build-id.h"
17
-#include "util.h"
20
+#include "cap.h"
21
+#include "dso.h"
22
+#include "util.h" // lsdir()
1823 #include "debug.h"
24
+#include "event.h"
1925 #include "machine.h"
26
+#include "map.h"
2027 #include "symbol.h"
28
+#include "map_symbol.h"
29
+#include "mem-events.h"
30
+#include "symsrc.h"
2131 #include "strlist.h"
2232 #include "intlist.h"
2333 #include "namespaces.h"
2434 #include "header.h"
2535 #include "path.h"
26
-#include "sane_ctype.h"
36
+#include <linux/ctype.h>
37
+#include <linux/zalloc.h>
2738
2839 #include <elf.h>
2940 #include <limits.h>
....@@ -38,15 +49,18 @@
3849 char **vmlinux_path;
3950
4051 struct symbol_conf symbol_conf = {
52
+ .nanosecs = false,
4153 .use_modules = true,
4254 .try_vmlinux_path = true,
4355 .demangle = true,
4456 .demangle_kernel = false,
4557 .cumulate_callchain = true,
58
+ .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
4659 .show_hist_headers = true,
4760 .symfs = "",
4861 .event_group = true,
4962 .inline_name = true,
63
+ .res_sample = 0,
5064 };
5165
5266 static enum dso_binary_type binary_type_symtab[] = {
....@@ -85,11 +99,6 @@
8599 tail++;
86100
87101 return tail - str;
88
-}
89
-
90
-void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
91
-{
92
- p->end = c->start;
93102 }
94103
95104 const char * __weak arch__normalize_symbol_name(const char *name)
....@@ -169,7 +178,7 @@
169178 return arch__choose_best_symbol(syma, symb);
170179 }
171180
172
-void symbols__fixup_duplicate(struct rb_root *symbols)
181
+void symbols__fixup_duplicate(struct rb_root_cached *symbols)
173182 {
174183 struct rb_node *nd;
175184 struct symbol *curr, *next;
....@@ -177,7 +186,7 @@
177186 if (symbol_conf.allow_aliases)
178187 return;
179188
180
- nd = rb_first(symbols);
189
+ nd = rb_first_cached(symbols);
181190
182191 while (nd) {
183192 curr = rb_entry(nd, struct symbol, rb_node);
....@@ -192,20 +201,21 @@
192201 continue;
193202
194203 if (choose_best_symbol(curr, next) == SYMBOL_A) {
195
- rb_erase(&next->rb_node, symbols);
204
+ rb_erase_cached(&next->rb_node, symbols);
196205 symbol__delete(next);
197206 goto again;
198207 } else {
199208 nd = rb_next(&curr->rb_node);
200
- rb_erase(&curr->rb_node, symbols);
209
+ rb_erase_cached(&curr->rb_node, symbols);
201210 symbol__delete(curr);
202211 }
203212 }
204213 }
205214
206
-void symbols__fixup_end(struct rb_root *symbols)
215
+/* Update zero-sized symbols using the address of the next symbol */
216
+void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
207217 {
208
- struct rb_node *nd, *prevnd = rb_first(symbols);
218
+ struct rb_node *nd, *prevnd = rb_first_cached(symbols);
209219 struct symbol *curr, *prev;
210220
211221 if (prevnd == NULL)
....@@ -217,8 +227,29 @@
217227 prev = curr;
218228 curr = rb_entry(nd, struct symbol, rb_node);
219229
220
- if (prev->end == prev->start && prev->end != curr->start)
221
- arch__symbols__fixup_end(prev, curr);
230
+ /*
231
+ * On some architecture kernel text segment start is located at
232
+ * some low memory address, while modules are located at high
233
+ * memory addresses (or vice versa). The gap between end of
234
+ * kernel text segment and beginning of first module's text
235
+ * segment is very big. Therefore do not fill this gap and do
236
+ * not assign it to the kernel dso map (kallsyms).
237
+ *
238
+ * In kallsyms, it determines module symbols using '[' character
239
+ * like in:
240
+ * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
241
+ */
242
+ if (prev->end == prev->start) {
243
+ /* Last kernel/module symbol mapped to end of page */
244
+ if (is_kallsyms && (!strchr(prev->name, '[') !=
245
+ !strchr(curr->name, '[')))
246
+ prev->end = roundup(prev->end + 4096, 4096);
247
+ else
248
+ prev->end = curr->start;
249
+
250
+ pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
251
+ __func__, prev->name, prev->end);
252
+ }
222253 }
223254
224255 /* Last entry */
....@@ -226,31 +257,26 @@
226257 curr->end = roundup(curr->start, 4096) + 4096;
227258 }
228259
229
-void map_groups__fixup_end(struct map_groups *mg)
260
+void maps__fixup_end(struct maps *maps)
230261 {
231
- struct maps *maps = &mg->maps;
232
- struct map *next, *curr;
262
+ struct map *prev = NULL, *curr;
233263
234264 down_write(&maps->lock);
235265
236
- curr = maps__first(maps);
237
- if (curr == NULL)
238
- goto out_unlock;
266
+ maps__for_each_entry(maps, curr) {
267
+ if (prev != NULL && !prev->end)
268
+ prev->end = curr->start;
239269
240
- for (next = map__next(curr); next; next = map__next(curr)) {
241
- if (!curr->end)
242
- curr->end = next->start;
243
- curr = next;
270
+ prev = curr;
244271 }
245272
246273 /*
247274 * We still haven't the actual symbols, so guess the
248275 * last map final address.
249276 */
250
- if (!curr->end)
277
+ if (curr && !curr->end)
251278 curr->end = ~0ULL;
252279
253
-out_unlock:
254280 up_write(&maps->lock);
255281 }
256282
....@@ -288,25 +314,27 @@
288314 free(((void *)sym) - symbol_conf.priv_size);
289315 }
290316
291
-void symbols__delete(struct rb_root *symbols)
317
+void symbols__delete(struct rb_root_cached *symbols)
292318 {
293319 struct symbol *pos;
294
- struct rb_node *next = rb_first(symbols);
320
+ struct rb_node *next = rb_first_cached(symbols);
295321
296322 while (next) {
297323 pos = rb_entry(next, struct symbol, rb_node);
298324 next = rb_next(&pos->rb_node);
299
- rb_erase(&pos->rb_node, symbols);
325
+ rb_erase_cached(&pos->rb_node, symbols);
300326 symbol__delete(pos);
301327 }
302328 }
303329
304
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
330
+void __symbols__insert(struct rb_root_cached *symbols,
331
+ struct symbol *sym, bool kernel)
305332 {
306
- struct rb_node **p = &symbols->rb_node;
333
+ struct rb_node **p = &symbols->rb_root.rb_node;
307334 struct rb_node *parent = NULL;
308335 const u64 ip = sym->start;
309336 struct symbol *s;
337
+ bool leftmost = true;
310338
311339 if (kernel) {
312340 const char *name = sym->name;
....@@ -324,26 +352,28 @@
324352 s = rb_entry(parent, struct symbol, rb_node);
325353 if (ip < s->start)
326354 p = &(*p)->rb_left;
327
- else
355
+ else {
328356 p = &(*p)->rb_right;
357
+ leftmost = false;
358
+ }
329359 }
330360 rb_link_node(&sym->rb_node, parent, p);
331
- rb_insert_color(&sym->rb_node, symbols);
361
+ rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
332362 }
333363
334
-void symbols__insert(struct rb_root *symbols, struct symbol *sym)
364
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
335365 {
336366 __symbols__insert(symbols, sym, false);
337367 }
338368
339
-static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
369
+static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
340370 {
341371 struct rb_node *n;
342372
343373 if (symbols == NULL)
344374 return NULL;
345375
346
- n = symbols->rb_node;
376
+ n = symbols->rb_root.rb_node;
347377
348378 while (n) {
349379 struct symbol *s = rb_entry(n, struct symbol, rb_node);
....@@ -359,9 +389,9 @@
359389 return NULL;
360390 }
361391
362
-static struct symbol *symbols__first(struct rb_root *symbols)
392
+static struct symbol *symbols__first(struct rb_root_cached *symbols)
363393 {
364
- struct rb_node *n = rb_first(symbols);
394
+ struct rb_node *n = rb_first_cached(symbols);
365395
366396 if (n)
367397 return rb_entry(n, struct symbol, rb_node);
....@@ -369,9 +399,9 @@
369399 return NULL;
370400 }
371401
372
-static struct symbol *symbols__last(struct rb_root *symbols)
402
+static struct symbol *symbols__last(struct rb_root_cached *symbols)
373403 {
374
- struct rb_node *n = rb_last(symbols);
404
+ struct rb_node *n = rb_last(&symbols->rb_root);
375405
376406 if (n)
377407 return rb_entry(n, struct symbol, rb_node);
....@@ -389,11 +419,12 @@
389419 return NULL;
390420 }
391421
392
-static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
422
+static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
393423 {
394
- struct rb_node **p = &symbols->rb_node;
424
+ struct rb_node **p = &symbols->rb_root.rb_node;
395425 struct rb_node *parent = NULL;
396426 struct symbol_name_rb_node *symn, *s;
427
+ bool leftmost = true;
397428
398429 symn = container_of(sym, struct symbol_name_rb_node, sym);
399430
....@@ -402,19 +433,21 @@
402433 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
403434 if (strcmp(sym->name, s->sym.name) < 0)
404435 p = &(*p)->rb_left;
405
- else
436
+ else {
406437 p = &(*p)->rb_right;
438
+ leftmost = false;
439
+ }
407440 }
408441 rb_link_node(&symn->rb_node, parent, p);
409
- rb_insert_color(&symn->rb_node, symbols);
442
+ rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
410443 }
411444
412
-static void symbols__sort_by_name(struct rb_root *symbols,
413
- struct rb_root *source)
445
+static void symbols__sort_by_name(struct rb_root_cached *symbols,
446
+ struct rb_root_cached *source)
414447 {
415448 struct rb_node *nd;
416449
417
- for (nd = rb_first(source); nd; nd = rb_next(nd)) {
450
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
418451 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
419452 symbols__insert_by_name(symbols, pos);
420453 }
....@@ -437,7 +470,7 @@
437470 return arch__compare_symbol_names(name, str);
438471 }
439472
440
-static struct symbol *symbols__find_by_name(struct rb_root *symbols,
473
+static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
441474 const char *name,
442475 enum symbol_tag_include includes)
443476 {
....@@ -447,7 +480,7 @@
447480 if (symbols == NULL)
448481 return NULL;
449482
450
- n = symbols->rb_node;
483
+ n = symbols->rb_root.rb_node;
451484
452485 while (n) {
453486 int cmp;
....@@ -497,6 +530,13 @@
497530 sym->start == sym->end)) {
498531 dso->last_find_result.symbol = sym;
499532 }
533
+}
534
+
535
+void dso__delete_symbol(struct dso *dso, struct symbol *sym)
536
+{
537
+ rb_erase_cached(&sym->rb_node, &dso->symbols);
538
+ symbol__delete(sym);
539
+ dso__reset_find_symbol_cache(dso);
500540 }
501541
502542 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
....@@ -550,6 +590,20 @@
550590 dso__set_sorted_by_name(dso);
551591 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
552592 }
593
+
594
+/*
595
+ * While we find nice hex chars, build a long_val.
596
+ * Return number of chars processed.
597
+ */
598
+static int hex2u64(const char *ptr, u64 *long_val)
599
+{
600
+ char *p;
601
+
602
+ *long_val = strtoull(ptr, &p, 16);
603
+
604
+ return p - ptr;
605
+}
606
+
553607
554608 int modules__parse(const char *filename, void *arg,
555609 int (*process_module)(void *arg, const char *name,
....@@ -620,8 +674,12 @@
620674 static bool symbol__is_idle(const char *name)
621675 {
622676 const char * const idle_symbols[] = {
677
+ "acpi_idle_do_entry",
678
+ "acpi_processor_ffh_cstate_enter",
679
+ "arch_cpu_idle",
623680 "cpu_idle",
624681 "cpu_startup_entry",
682
+ "idle_cpu",
625683 "intel_idle",
626684 "default_idle",
627685 "native_safe_halt",
....@@ -629,19 +687,26 @@
629687 "exit_idle",
630688 "mwait_idle",
631689 "mwait_idle_with_hints",
690
+ "mwait_idle_with_hints.constprop.0",
632691 "poll_idle",
633692 "ppc64_runlatch_off",
634693 "pseries_dedicated_idle_sleep",
694
+ "psw_idle",
695
+ "psw_idle_exit",
635696 NULL
636697 };
637698 int i;
699
+ static struct strlist *idle_symbols_list;
638700
639
- for (i = 0; idle_symbols[i]; i++) {
640
- if (!strcmp(idle_symbols[i], name))
641
- return true;
642
- }
701
+ if (idle_symbols_list)
702
+ return strlist__has_entry(idle_symbols_list, name);
643703
644
- return false;
704
+ idle_symbols_list = strlist__new(NULL, NULL);
705
+
706
+ for (i = 0; idle_symbols[i]; i++)
707
+ strlist__add(idle_symbols_list, idle_symbols[i]);
708
+
709
+ return strlist__has_entry(idle_symbols_list, name);
645710 }
646711
647712 static int map__process_kallsym_symbol(void *arg, const char *name,
....@@ -649,7 +714,7 @@
649714 {
650715 struct symbol *sym;
651716 struct dso *dso = arg;
652
- struct rb_root *root = &dso->symbols;
717
+ struct rb_root_cached *root = &dso->symbols;
653718
654719 if (!symbol_type__filter(type))
655720 return 0;
....@@ -681,19 +746,19 @@
681746 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
682747 }
683748
684
-static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
749
+static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
685750 {
686751 struct map *curr_map;
687752 struct symbol *pos;
688753 int count = 0;
689
- struct rb_root old_root = dso->symbols;
690
- struct rb_root *root = &dso->symbols;
691
- struct rb_node *next = rb_first(root);
754
+ struct rb_root_cached old_root = dso->symbols;
755
+ struct rb_root_cached *root = &dso->symbols;
756
+ struct rb_node *next = rb_first_cached(root);
692757
693758 if (!kmaps)
694759 return -1;
695760
696
- *root = RB_ROOT;
761
+ *root = RB_ROOT_CACHED;
697762
698763 while (next) {
699764 char *module;
....@@ -701,13 +766,13 @@
701766 pos = rb_entry(next, struct symbol, rb_node);
702767 next = rb_next(&pos->rb_node);
703768
704
- rb_erase_init(&pos->rb_node, &old_root);
705
-
769
+ rb_erase_cached(&pos->rb_node, &old_root);
770
+ RB_CLEAR_NODE(&pos->rb_node);
706771 module = strchr(pos->name, '\t');
707772 if (module)
708773 *module = '\0';
709774
710
- curr_map = map_groups__find(kmaps, pos->start);
775
+ curr_map = maps__find(kmaps, pos->start);
711776
712777 if (!curr_map) {
713778 symbol__delete(pos);
....@@ -734,15 +799,15 @@
734799 * kernel range is broken in several maps, named [kernel].N, as we don't have
735800 * the original ELF section names vmlinux have.
736801 */
737
-static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
738
- struct map *initial_map)
802
+static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
803
+ struct map *initial_map)
739804 {
740805 struct machine *machine;
741806 struct map *curr_map = initial_map;
742807 struct symbol *pos;
743808 int count = 0, moved = 0;
744
- struct rb_root *root = &dso->symbols;
745
- struct rb_node *next = rb_first(root);
809
+ struct rb_root_cached *root = &dso->symbols;
810
+ struct rb_node *next = rb_first_cached(root);
746811 int kernel_range = 0;
747812 bool x86_64;
748813
....@@ -768,7 +833,7 @@
768833
769834 if (strcmp(curr_map->dso->short_name, module)) {
770835 if (curr_map != initial_map &&
771
- dso->kernel == DSO_TYPE_GUEST_KERNEL &&
836
+ dso->kernel == DSO_SPACE__KERNEL_GUEST &&
772837 machine__is_default_guest(machine)) {
773838 /*
774839 * We assume all symbols of a module are
....@@ -780,7 +845,7 @@
780845 dso__set_loaded(curr_map->dso);
781846 }
782847
783
- curr_map = map_groups__find_by_name(kmaps, module);
848
+ curr_map = maps__find_by_name(kmaps, module);
784849 if (curr_map == NULL) {
785850 pr_debug("%s/proc/{kallsyms,modules} "
786851 "inconsistency while looking "
....@@ -825,7 +890,7 @@
825890 goto add_symbol;
826891 }
827892
828
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
893
+ if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
829894 snprintf(dso_name, sizeof(dso_name),
830895 "[guest.kernel].%d",
831896 kernel_range++);
....@@ -847,7 +912,7 @@
847912 }
848913
849914 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
850
- map_groups__insert(kmaps, curr_map);
915
+ maps__insert(kmaps, curr_map);
851916 ++kernel_range;
852917 } else if (delta) {
853918 /* Kernel was relocated at boot time */
....@@ -856,7 +921,7 @@
856921 }
857922 add_symbol:
858923 if (curr_map != initial_map) {
859
- rb_erase(&pos->rb_node, root);
924
+ rb_erase_cached(&pos->rb_node, root);
860925 symbols__insert(&curr_map->dso->symbols, pos);
861926 ++moved;
862927 } else
....@@ -864,12 +929,12 @@
864929
865930 continue;
866931 discard_symbol:
867
- rb_erase(&pos->rb_node, root);
932
+ rb_erase_cached(&pos->rb_node, root);
868933 symbol__delete(pos);
869934 }
870935
871936 if (curr_map != initial_map &&
872
- dso->kernel == DSO_TYPE_GUEST_KERNEL &&
937
+ dso->kernel == DSO_SPACE__KERNEL_GUEST &&
873938 machine__is_default_guest(kmaps->machine)) {
874939 dso__set_loaded(curr_map->dso);
875940 }
....@@ -1032,13 +1097,7 @@
10321097 return ret;
10331098 }
10341099
1035
-struct map *map_groups__first(struct map_groups *mg)
1036
-{
1037
- return maps__first(&mg->maps);
1038
-}
1039
-
1040
-static int do_validate_kcore_modules(const char *filename,
1041
- struct map_groups *kmaps)
1100
+static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
10421101 {
10431102 struct rb_root modules = RB_ROOT;
10441103 struct map *old_map;
....@@ -1048,13 +1107,10 @@
10481107 if (err)
10491108 return err;
10501109
1051
- old_map = map_groups__first(kmaps);
1052
- while (old_map) {
1053
- struct map *next = map_groups__next(old_map);
1110
+ maps__for_each_entry(kmaps, old_map) {
10541111 struct module_info *mi;
10551112
10561113 if (!__map__is_kmodule(old_map)) {
1057
- old_map = next;
10581114 continue;
10591115 }
10601116
....@@ -1064,8 +1120,6 @@
10641120 err = -EINVAL;
10651121 goto out;
10661122 }
1067
-
1068
- old_map = next;
10691123 }
10701124 out:
10711125 delete_modules(&modules);
....@@ -1100,7 +1154,7 @@
11001154 static int validate_kcore_modules(const char *kallsyms_filename,
11011155 struct map *map)
11021156 {
1103
- struct map_groups *kmaps = map__kmaps(map);
1157
+ struct maps *kmaps = map__kmaps(map);
11041158 char modules_filename[PATH_MAX];
11051159
11061160 if (!kmaps)
....@@ -1159,12 +1213,91 @@
11591213 return 0;
11601214 }
11611215
1216
+/*
1217
+ * Merges map into maps by splitting the new map within the existing map
1218
+ * regions.
1219
+ */
1220
+int maps__merge_in(struct maps *kmaps, struct map *new_map)
1221
+{
1222
+ struct map *old_map;
1223
+ LIST_HEAD(merged);
1224
+
1225
+ maps__for_each_entry(kmaps, old_map) {
1226
+ /* no overload with this one */
1227
+ if (new_map->end < old_map->start ||
1228
+ new_map->start >= old_map->end)
1229
+ continue;
1230
+
1231
+ if (new_map->start < old_map->start) {
1232
+ /*
1233
+ * |new......
1234
+ * |old....
1235
+ */
1236
+ if (new_map->end < old_map->end) {
1237
+ /*
1238
+ * |new......| -> |new..|
1239
+ * |old....| -> |old....|
1240
+ */
1241
+ new_map->end = old_map->start;
1242
+ } else {
1243
+ /*
1244
+ * |new.............| -> |new..| |new..|
1245
+ * |old....| -> |old....|
1246
+ */
1247
+ struct map *m = map__clone(new_map);
1248
+
1249
+ if (!m)
1250
+ return -ENOMEM;
1251
+
1252
+ m->end = old_map->start;
1253
+ list_add_tail(&m->node, &merged);
1254
+ new_map->pgoff += old_map->end - new_map->start;
1255
+ new_map->start = old_map->end;
1256
+ }
1257
+ } else {
1258
+ /*
1259
+ * |new......
1260
+ * |old....
1261
+ */
1262
+ if (new_map->end < old_map->end) {
1263
+ /*
1264
+ * |new..| -> x
1265
+ * |old.........| -> |old.........|
1266
+ */
1267
+ map__put(new_map);
1268
+ new_map = NULL;
1269
+ break;
1270
+ } else {
1271
+ /*
1272
+ * |new......| -> |new...|
1273
+ * |old....| -> |old....|
1274
+ */
1275
+ new_map->pgoff += old_map->end - new_map->start;
1276
+ new_map->start = old_map->end;
1277
+ }
1278
+ }
1279
+ }
1280
+
1281
+ while (!list_empty(&merged)) {
1282
+ old_map = list_entry(merged.next, struct map, node);
1283
+ list_del_init(&old_map->node);
1284
+ maps__insert(kmaps, old_map);
1285
+ map__put(old_map);
1286
+ }
1287
+
1288
+ if (new_map) {
1289
+ maps__insert(kmaps, new_map);
1290
+ map__put(new_map);
1291
+ }
1292
+ return 0;
1293
+}
1294
+
11621295 static int dso__load_kcore(struct dso *dso, struct map *map,
11631296 const char *kallsyms_filename)
11641297 {
1165
- struct map_groups *kmaps = map__kmaps(map);
1298
+ struct maps *kmaps = map__kmaps(map);
11661299 struct kcore_mapfn_data md;
1167
- struct map *old_map, *new_map, *replacement_map = NULL;
1300
+ struct map *old_map, *new_map, *replacement_map = NULL, *next;
11681301 struct machine *machine;
11691302 bool is_64_bit;
11701303 int err, fd;
....@@ -1211,13 +1344,14 @@
12111344 }
12121345
12131346 /* Remove old maps */
1214
- old_map = map_groups__first(kmaps);
1215
- while (old_map) {
1216
- struct map *next = map_groups__next(old_map);
1217
-
1218
- if (old_map != map)
1219
- map_groups__remove(kmaps, old_map);
1220
- old_map = next;
1347
+ maps__for_each_entry_safe(kmaps, old_map, next) {
1348
+ /*
1349
+ * We need to preserve eBPF maps even if they are
1350
+ * covered by kcore, because we need to access
1351
+ * eBPF dso for source data.
1352
+ */
1353
+ if (old_map != map && !__map__is_bpf_prog(old_map))
1354
+ maps__remove(kmaps, old_map);
12211355 }
12221356 machine->trampolines_mapped = false;
12231357
....@@ -1246,14 +1380,19 @@
12461380 map->unmap_ip = new_map->unmap_ip;
12471381 /* Ensure maps are correctly ordered */
12481382 map__get(map);
1249
- map_groups__remove(kmaps, map);
1250
- map_groups__insert(kmaps, map);
1383
+ maps__remove(kmaps, map);
1384
+ maps__insert(kmaps, map);
12511385 map__put(map);
1386
+ map__put(new_map);
12521387 } else {
1253
- map_groups__insert(kmaps, new_map);
1388
+ /*
1389
+ * Merge kcore map into existing maps,
1390
+ * and ensure that current maps (eBPF)
1391
+ * stay intact.
1392
+ */
1393
+ if (maps__merge_in(kmaps, new_map))
1394
+ goto out_err;
12541395 }
1255
-
1256
- map__put(new_map);
12571396 }
12581397
12591398 if (machine__is(machine, "x86_64")) {
....@@ -1273,7 +1412,7 @@
12731412 * Set the data type and long name so that kcore can be read via
12741413 * dso__data_read_addr().
12751414 */
1276
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1415
+ if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
12771416 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
12781417 else
12791418 dso->binary_type = DSO_BINARY_TYPE__KCORE;
....@@ -1334,18 +1473,18 @@
13341473 if (kallsyms__delta(kmap, filename, &delta))
13351474 return -1;
13361475
1337
- symbols__fixup_end(&dso->symbols);
1476
+ symbols__fixup_end(&dso->symbols, true);
13381477 symbols__fixup_duplicate(&dso->symbols);
13391478
1340
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1479
+ if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
13411480 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
13421481 else
13431482 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
13441483
13451484 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1346
- return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1485
+ return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
13471486 else
1348
- return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1487
+ return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
13491488 }
13501489
13511490 int dso__load_kallsyms(struct dso *dso, const char *filename,
....@@ -1411,6 +1550,137 @@
14111550 return -1;
14121551 }
14131552
1553
+#ifdef HAVE_LIBBFD_SUPPORT
1554
+#define PACKAGE 'perf'
1555
+#include <bfd.h>
1556
+
1557
+static int bfd_symbols__cmpvalue(const void *a, const void *b)
1558
+{
1559
+ const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
1560
+
1561
+ if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
1562
+ return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
1563
+
1564
+ return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
1565
+}
1566
+
1567
+static int bfd2elf_binding(asymbol *symbol)
1568
+{
1569
+ if (symbol->flags & BSF_WEAK)
1570
+ return STB_WEAK;
1571
+ if (symbol->flags & BSF_GLOBAL)
1572
+ return STB_GLOBAL;
1573
+ if (symbol->flags & BSF_LOCAL)
1574
+ return STB_LOCAL;
1575
+ return -1;
1576
+}
1577
+
1578
+int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
1579
+{
1580
+ int err = -1;
1581
+ long symbols_size, symbols_count, i;
1582
+ asection *section;
1583
+ asymbol **symbols, *sym;
1584
+ struct symbol *symbol;
1585
+ bfd *abfd;
1586
+ u64 start, len;
1587
+
1588
+ abfd = bfd_openr(dso->long_name, NULL);
1589
+ if (!abfd)
1590
+ return -1;
1591
+
1592
+ if (!bfd_check_format(abfd, bfd_object)) {
1593
+ pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1594
+ dso->long_name);
1595
+ goto out_close;
1596
+ }
1597
+
1598
+ if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1599
+ goto out_close;
1600
+
1601
+ section = bfd_get_section_by_name(abfd, ".text");
1602
+ if (section)
1603
+ dso->text_offset = section->vma - section->filepos;
1604
+
1605
+ bfd_close(abfd);
1606
+
1607
+ abfd = bfd_openr(debugfile, NULL);
1608
+ if (!abfd)
1609
+ return -1;
1610
+
1611
+ if (!bfd_check_format(abfd, bfd_object)) {
1612
+ pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1613
+ debugfile);
1614
+ goto out_close;
1615
+ }
1616
+
1617
+ if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1618
+ goto out_close;
1619
+
1620
+ symbols_size = bfd_get_symtab_upper_bound(abfd);
1621
+ if (symbols_size == 0) {
1622
+ bfd_close(abfd);
1623
+ return 0;
1624
+ }
1625
+
1626
+ if (symbols_size < 0)
1627
+ goto out_close;
1628
+
1629
+ symbols = malloc(symbols_size);
1630
+ if (!symbols)
1631
+ goto out_close;
1632
+
1633
+ symbols_count = bfd_canonicalize_symtab(abfd, symbols);
1634
+ if (symbols_count < 0)
1635
+ goto out_free;
1636
+
1637
+ qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
1638
+
1639
+#ifdef bfd_get_section
1640
+#define bfd_asymbol_section bfd_get_section
1641
+#endif
1642
+ for (i = 0; i < symbols_count; ++i) {
1643
+ sym = symbols[i];
1644
+ section = bfd_asymbol_section(sym);
1645
+ if (bfd2elf_binding(sym) < 0)
1646
+ continue;
1647
+
1648
+ while (i + 1 < symbols_count &&
1649
+ bfd_asymbol_section(symbols[i + 1]) == section &&
1650
+ bfd2elf_binding(symbols[i + 1]) < 0)
1651
+ i++;
1652
+
1653
+ if (i + 1 < symbols_count &&
1654
+ bfd_asymbol_section(symbols[i + 1]) == section)
1655
+ len = symbols[i + 1]->value - sym->value;
1656
+ else
1657
+ len = section->size - sym->value;
1658
+
1659
+ start = bfd_asymbol_value(sym) - dso->text_offset;
1660
+ symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
1661
+ bfd_asymbol_name(sym));
1662
+ if (!symbol)
1663
+ goto out_free;
1664
+
1665
+ symbols__insert(&dso->symbols, symbol);
1666
+ }
1667
+#ifdef bfd_get_section
1668
+#undef bfd_asymbol_section
1669
+#endif
1670
+
1671
+ symbols__fixup_end(&dso->symbols, false);
1672
+ symbols__fixup_duplicate(&dso->symbols);
1673
+ dso->adjust_symbols = 1;
1674
+
1675
+ err = 0;
1676
+out_free:
1677
+ free(symbols);
1678
+out_close:
1679
+ bfd_close(abfd);
1680
+ return err;
1681
+}
1682
+#endif
1683
+
14141684 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
14151685 enum dso_binary_type type)
14161686 {
....@@ -1423,17 +1693,17 @@
14231693 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
14241694 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
14251695 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1426
- return !kmod && dso->kernel == DSO_TYPE_USER;
1696
+ return !kmod && dso->kernel == DSO_SPACE__USER;
14271697
14281698 case DSO_BINARY_TYPE__KALLSYMS:
14291699 case DSO_BINARY_TYPE__VMLINUX:
14301700 case DSO_BINARY_TYPE__KCORE:
1431
- return dso->kernel == DSO_TYPE_KERNEL;
1701
+ return dso->kernel == DSO_SPACE__KERNEL;
14321702
14331703 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
14341704 case DSO_BINARY_TYPE__GUEST_VMLINUX:
14351705 case DSO_BINARY_TYPE__GUEST_KCORE:
1436
- return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1706
+ return dso->kernel == DSO_SPACE__KERNEL_GUEST;
14371707
14381708 case DSO_BINARY_TYPE__GUEST_KMODULE:
14391709 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
....@@ -1441,7 +1711,7 @@
14411711 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
14421712 /*
14431713 * kernel modules know their symtab type - it's set when
1444
- * creating a module dso in machine__findnew_module_map().
1714
+ * creating a module dso in machine__addnew_module_map().
14451715 */
14461716 return kmod && dso->symtab_type == type;
14471717
....@@ -1449,6 +1719,9 @@
14491719 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
14501720 return true;
14511721
1722
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
1723
+ case DSO_BINARY_TYPE__BPF_IMAGE:
1724
+ case DSO_BINARY_TYPE__OOL:
14521725 case DSO_BINARY_TYPE__NOT_FOUND:
14531726 default:
14541727 return false;
....@@ -1498,14 +1771,14 @@
14981771 char *name;
14991772 int ret = -1;
15001773 u_int i;
1501
- struct machine *machine;
1774
+ struct machine *machine = NULL;
15021775 char *root_dir = (char *) "";
15031776 int ss_pos = 0;
15041777 struct symsrc ss_[2];
15051778 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
15061779 bool kmod;
15071780 bool perfmap;
1508
- unsigned char build_id[BUILD_ID_SIZE];
1781
+ struct build_id bid;
15091782 struct nscookie nsc;
15101783 char newmapname[PATH_MAX];
15111784 const char *map_path = dso->long_name;
....@@ -1527,17 +1800,18 @@
15271800 goto out;
15281801 }
15291802
1530
- if (map->groups && map->groups->machine)
1531
- machine = map->groups->machine;
1532
- else
1533
- machine = NULL;
1803
+ kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1804
+ dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1805
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1806
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
15341807
1535
- if (dso->kernel) {
1536
- if (dso->kernel == DSO_TYPE_KERNEL)
1808
+ if (dso->kernel && !kmod) {
1809
+ if (dso->kernel == DSO_SPACE__KERNEL)
15371810 ret = dso__load_kernel_sym(dso, map);
1538
- else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1811
+ else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
15391812 ret = dso__load_guest_kernel_sym(dso, map);
15401813
1814
+ machine = map__kmaps(map)->machine;
15411815 if (machine__is(machine, "x86_64"))
15421816 machine__map_x86_64_entry_trampolines(machine, dso);
15431817 goto out;
....@@ -1546,17 +1820,6 @@
15461820 dso->adjust_symbols = 0;
15471821
15481822 if (perfmap) {
1549
- struct stat st;
1550
-
1551
- if (lstat(map_path, &st) < 0)
1552
- goto out;
1553
-
1554
- if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1555
- pr_warning("File %s not owned by current user or root, "
1556
- "ignoring it (use -f to override).\n", map_path);
1557
- goto out;
1558
- }
1559
-
15601823 ret = dso__load_perf_map(map_path, dso);
15611824 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
15621825 DSO_BINARY_TYPE__NOT_FOUND;
....@@ -1570,12 +1833,6 @@
15701833 if (!name)
15711834 goto out;
15721835
1573
- kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1574
- dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1575
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1576
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1577
-
1578
-
15791836 /*
15801837 * Read the build id if possible. This is required for
15811838 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
....@@ -1583,8 +1840,8 @@
15831840 if (!dso->has_build_id &&
15841841 is_regular_file(dso->long_name)) {
15851842 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1586
- if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1587
- dso__set_build_id(dso, build_id);
1843
+ if (filename__read_build_id(name, &bid) > 0)
1844
+ dso__set_build_id(dso, &bid);
15881845 }
15891846
15901847 /*
....@@ -1597,6 +1854,7 @@
15971854 bool next_slot = false;
15981855 bool is_reg;
15991856 bool nsexit;
1857
+ int bfdrc = -1;
16001858 int sirc = -1;
16011859
16021860 enum dso_binary_type symtab_type = binary_type_symtab[i];
....@@ -1615,11 +1873,20 @@
16151873 nsinfo__mountns_exit(&nsc);
16161874
16171875 is_reg = is_regular_file(name);
1876
+#ifdef HAVE_LIBBFD_SUPPORT
16181877 if (is_reg)
1878
+ bfdrc = dso__load_bfd_symbols(dso, name);
1879
+#endif
1880
+ if (is_reg && bfdrc < 0)
16191881 sirc = symsrc__init(ss, dso, name, symtab_type);
16201882
16211883 if (nsexit)
16221884 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1885
+
1886
+ if (bfdrc == 0) {
1887
+ ret = 0;
1888
+ break;
1889
+ }
16231890
16241891 if (!is_reg || sirc < 0)
16251892 continue;
....@@ -1685,17 +1952,81 @@
16851952 return ret;
16861953 }
16871954
1688
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1955
+static int map__strcmp(const void *a, const void *b)
16891956 {
1690
- struct maps *maps = &mg->maps;
1957
+ const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b;
1958
+ return strcmp(ma->dso->short_name, mb->dso->short_name);
1959
+}
1960
+
1961
+static int map__strcmp_name(const void *name, const void *b)
1962
+{
1963
+ const struct map *map = *(const struct map **)b;
1964
+ return strcmp(name, map->dso->short_name);
1965
+}
1966
+
1967
+void __maps__sort_by_name(struct maps *maps)
1968
+{
1969
+ qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp);
1970
+}
1971
+
1972
+static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
1973
+{
1974
+ struct map *map;
1975
+ struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map));
1976
+ int i = 0;
1977
+
1978
+ if (maps_by_name == NULL)
1979
+ return -1;
1980
+
1981
+ maps->maps_by_name = maps_by_name;
1982
+ maps->nr_maps_allocated = maps->nr_maps;
1983
+
1984
+ maps__for_each_entry(maps, map)
1985
+ maps_by_name[i++] = map;
1986
+
1987
+ __maps__sort_by_name(maps);
1988
+ return 0;
1989
+}
1990
+
1991
+static struct map *__maps__find_by_name(struct maps *maps, const char *name)
1992
+{
1993
+ struct map **mapp;
1994
+
1995
+ if (maps->maps_by_name == NULL &&
1996
+ map__groups__sort_by_name_from_rbtree(maps))
1997
+ return NULL;
1998
+
1999
+ mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name);
2000
+ if (mapp)
2001
+ return *mapp;
2002
+ return NULL;
2003
+}
2004
+
2005
+struct map *maps__find_by_name(struct maps *maps, const char *name)
2006
+{
16912007 struct map *map;
16922008
16932009 down_read(&maps->lock);
16942010
1695
- for (map = maps__first(maps); map; map = map__next(map)) {
1696
- if (map->dso && strcmp(map->dso->short_name, name) == 0)
1697
- goto out_unlock;
2011
+ if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
2012
+ map = maps->last_search_by_name;
2013
+ goto out_unlock;
16982014 }
2015
+ /*
2016
+ * If we have maps->maps_by_name, then the name isn't in the rbtree,
2017
+ * as maps->maps_by_name mirrors the rbtree when lookups by name are
2018
+ * made.
2019
+ */
2020
+ map = __maps__find_by_name(maps, name);
2021
+ if (map || maps->maps_by_name != NULL)
2022
+ goto out_unlock;
2023
+
2024
+ /* Fallback to traversing the rbtree... */
2025
+ maps__for_each_entry(maps, map)
2026
+ if (strcmp(map->dso->short_name, name) == 0) {
2027
+ maps->last_search_by_name = map;
2028
+ goto out_unlock;
2029
+ }
16992030
17002031 map = NULL;
17012032
....@@ -1717,7 +2048,7 @@
17172048 else
17182049 symbol__join_symfs(symfs_vmlinux, vmlinux);
17192050
1720
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
2051
+ if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
17212052 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
17222053 else
17232054 symtab_type = DSO_BINARY_TYPE__VMLINUX;
....@@ -1729,7 +2060,7 @@
17292060 symsrc__destroy(&ss);
17302061
17312062 if (err > 0) {
1732
- if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
2063
+ if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
17332064 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
17342065 else
17352066 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
....@@ -1816,7 +2147,7 @@
18162147
18172148 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
18182149 {
1819
- u8 host_build_id[BUILD_ID_SIZE];
2150
+ struct build_id bid;
18202151 char sbuild_id[SBUILD_ID_SIZE];
18212152 bool is_host = false;
18222153 char path[PATH_MAX];
....@@ -1829,9 +2160,8 @@
18292160 goto proc_kallsyms;
18302161 }
18312162
1832
- if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1833
- sizeof(host_build_id)) == 0)
1834
- is_host = dso__build_id_equal(dso, host_build_id);
2163
+ if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
2164
+ is_host = dso__build_id_equal(dso, &bid);
18352165
18362166 /* Try a fast path for /proc/kallsyms if possible */
18372167 if (is_host) {
....@@ -1847,7 +2177,7 @@
18472177 goto proc_kallsyms;
18482178 }
18492179
1850
- build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
2180
+ build_id__sprintf(&dso->bid, sbuild_id);
18512181
18522182 /* Find kallsyms in build-id cache with kcore */
18532183 scnprintf(path, sizeof(path), "%s/%s/%s",
....@@ -1937,14 +2267,8 @@
19372267 {
19382268 int err;
19392269 const char *kallsyms_filename = NULL;
1940
- struct machine *machine;
2270
+ struct machine *machine = map__kmaps(map)->machine;
19412271 char path[PATH_MAX];
1942
-
1943
- if (!map->groups) {
1944
- pr_debug("Guest kernel map hasn't the point to groups\n");
1945
- return -1;
1946
- }
1947
- machine = map->groups->machine;
19482272
19492273 if (machine__is_default_guest(machine)) {
19502274 /*
....@@ -2093,13 +2417,19 @@
20932417 char line[8];
20942418
20952419 if (fgets(line, sizeof(line), fp) != NULL)
2096
- value = ((geteuid() != 0) || (getuid() != 0)) ?
2097
- (atoi(line) != 0) :
2098
- (atoi(line) == 2);
2420
+ value = perf_cap__capable(CAP_SYSLOG) ?
2421
+ (atoi(line) >= 2) :
2422
+ (atoi(line) != 0);
20992423
21002424 fclose(fp);
21012425 }
21022426
2427
+ /* Per kernel/kallsyms.c:
2428
+ * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2429
+ */
2430
+ if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2431
+ value = true;
2432
+
21032433 return value;
21042434 }
21052435