.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | | -#include "../perf.h" |
---|
3 | 2 | #include <errno.h> |
---|
4 | 3 | #include <stdlib.h> |
---|
5 | 4 | #include <stdio.h> |
---|
6 | 5 | #include <string.h> |
---|
7 | 6 | #include <linux/kernel.h> |
---|
| 7 | +#include <linux/zalloc.h> |
---|
| 8 | +#include "dso.h" |
---|
8 | 9 | #include "session.h" |
---|
9 | 10 | #include "thread.h" |
---|
10 | 11 | #include "thread-stack.h" |
---|
11 | | -#include "util.h" |
---|
12 | 12 | #include "debug.h" |
---|
13 | 13 | #include "namespaces.h" |
---|
14 | 14 | #include "comm.h" |
---|
| 15 | +#include "map.h" |
---|
| 16 | +#include "symbol.h" |
---|
15 | 17 | #include "unwind.h" |
---|
| 18 | +#include "callchain.h" |
---|
16 | 19 | |
---|
17 | 20 | #include <api/fs/fs.h> |
---|
18 | 21 | |
---|
19 | | -int thread__init_map_groups(struct thread *thread, struct machine *machine) |
---|
| 22 | +int thread__init_maps(struct thread *thread, struct machine *machine) |
---|
20 | 23 | { |
---|
21 | 24 | pid_t pid = thread->pid_; |
---|
22 | 25 | |
---|
23 | 26 | if (pid == thread->tid || pid == -1) { |
---|
24 | | - thread->mg = map_groups__new(machine); |
---|
| 27 | + thread->maps = maps__new(machine); |
---|
25 | 28 | } else { |
---|
26 | 29 | struct thread *leader = __machine__findnew_thread(machine, pid, pid); |
---|
27 | 30 | if (leader) { |
---|
28 | | - thread->mg = map_groups__get(leader->mg); |
---|
| 31 | + thread->maps = maps__get(leader->maps); |
---|
29 | 32 | thread__put(leader); |
---|
30 | 33 | } |
---|
31 | 34 | } |
---|
32 | 35 | |
---|
33 | | - return thread->mg ? 0 : -1; |
---|
| 36 | + return thread->maps ? 0 : -1; |
---|
34 | 37 | } |
---|
35 | 38 | |
---|
36 | 39 | struct thread *thread__new(pid_t pid, pid_t tid) |
---|
.. | .. |
---|
44 | 47 | thread->tid = tid; |
---|
45 | 48 | thread->ppid = -1; |
---|
46 | 49 | thread->cpu = -1; |
---|
| 50 | + thread->lbr_stitch_enable = false; |
---|
47 | 51 | INIT_LIST_HEAD(&thread->namespaces_list); |
---|
48 | 52 | INIT_LIST_HEAD(&thread->comm_list); |
---|
49 | 53 | init_rwsem(&thread->namespaces_lock); |
---|
.. | .. |
---|
64 | 68 | RB_CLEAR_NODE(&thread->rb_node); |
---|
65 | 69 | /* Thread holds first ref to nsdata. */ |
---|
66 | 70 | thread->nsinfo = nsinfo__new(pid); |
---|
| 71 | + srccode_state_init(&thread->srccode_state); |
---|
67 | 72 | } |
---|
68 | 73 | |
---|
69 | 74 | return thread; |
---|
.. | .. |
---|
82 | 87 | |
---|
83 | 88 | thread_stack__free(thread); |
---|
84 | 89 | |
---|
85 | | - if (thread->mg) { |
---|
86 | | - map_groups__put(thread->mg); |
---|
87 | | - thread->mg = NULL; |
---|
| 90 | + if (thread->maps) { |
---|
| 91 | + maps__put(thread->maps); |
---|
| 92 | + thread->maps = NULL; |
---|
88 | 93 | } |
---|
89 | 94 | down_write(&thread->namespaces_lock); |
---|
90 | 95 | list_for_each_entry_safe(namespaces, tmp_namespaces, |
---|
91 | 96 | &thread->namespaces_list, list) { |
---|
92 | | - list_del(&namespaces->list); |
---|
| 97 | + list_del_init(&namespaces->list); |
---|
93 | 98 | namespaces__free(namespaces); |
---|
94 | 99 | } |
---|
95 | 100 | up_write(&thread->namespaces_lock); |
---|
96 | 101 | |
---|
97 | 102 | down_write(&thread->comm_lock); |
---|
98 | 103 | list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { |
---|
99 | | - list_del(&comm->list); |
---|
| 104 | + list_del_init(&comm->list); |
---|
100 | 105 | comm__free(comm); |
---|
101 | 106 | } |
---|
102 | 107 | up_write(&thread->comm_lock); |
---|
103 | 108 | |
---|
104 | | - unwind__finish_access(thread); |
---|
105 | 109 | nsinfo__zput(thread->nsinfo); |
---|
| 110 | + srccode_state_free(&thread->srccode_state); |
---|
106 | 111 | |
---|
107 | 112 | exit_rwsem(&thread->namespaces_lock); |
---|
108 | 113 | exit_rwsem(&thread->comm_lock); |
---|
| 114 | + thread__free_stitch_list(thread); |
---|
109 | 115 | free(thread); |
---|
110 | 116 | } |
---|
111 | 117 | |
---|
.. | .. |
---|
120 | 126 | { |
---|
121 | 127 | if (thread && refcount_dec_and_test(&thread->refcnt)) { |
---|
122 | 128 | /* |
---|
123 | | - * Remove it from the dead_threads list, as last reference |
---|
124 | | - * is gone. |
---|
| 129 | + * Remove it from the dead threads list, as last reference is |
---|
| 130 | + * gone, if it is in a dead threads list. |
---|
| 131 | + * |
---|
| 132 | + * We may not be there anymore if say, the machine where it was |
---|
| 133 | + * stored was already deleted, so we already removed it from |
---|
| 134 | + * the dead threads and some other piece of code still keeps a |
---|
| 135 | + * reference. |
---|
| 136 | + * |
---|
| 137 | + * This is what 'perf sched' does and finally drops it in |
---|
| 138 | + * perf_sched__lat(), where it calls perf_sched__read_events(), |
---|
| 139 | + * that processes the events by creating a session and deleting |
---|
| 140 | + * it, which ends up destroying the list heads for the dead |
---|
| 141 | + * threads, but before it does that it removes all threads from |
---|
| 142 | + * it using list_del_init(). |
---|
| 143 | + * |
---|
| 144 | + * So we need to check here if it is in a dead threads list and |
---|
| 145 | + * if so, remove it before finally deleting the thread, to avoid |
---|
| 146 | + * an use after free situation. |
---|
125 | 147 | */ |
---|
126 | | - list_del_init(&thread->node); |
---|
| 148 | + if (!list_empty(&thread->node)) |
---|
| 149 | + list_del_init(&thread->node); |
---|
127 | 150 | thread__delete(thread); |
---|
128 | 151 | } |
---|
129 | 152 | } |
---|
.. | .. |
---|
136 | 159 | return list_first_entry(&thread->namespaces_list, struct namespaces, list); |
---|
137 | 160 | } |
---|
138 | 161 | |
---|
139 | | -struct namespaces *thread__namespaces(const struct thread *thread) |
---|
| 162 | +struct namespaces *thread__namespaces(struct thread *thread) |
---|
140 | 163 | { |
---|
141 | 164 | struct namespaces *ns; |
---|
142 | 165 | |
---|
143 | | - down_read((struct rw_semaphore *)&thread->namespaces_lock); |
---|
| 166 | + down_read(&thread->namespaces_lock); |
---|
144 | 167 | ns = __thread__namespaces(thread); |
---|
145 | | - up_read((struct rw_semaphore *)&thread->namespaces_lock); |
---|
| 168 | + up_read(&thread->namespaces_lock); |
---|
146 | 169 | |
---|
147 | 170 | return ns; |
---|
148 | 171 | } |
---|
149 | 172 | |
---|
150 | 173 | static int __thread__set_namespaces(struct thread *thread, u64 timestamp, |
---|
151 | | - struct namespaces_event *event) |
---|
| 174 | + struct perf_record_namespaces *event) |
---|
152 | 175 | { |
---|
153 | 176 | struct namespaces *new, *curr = __thread__namespaces(thread); |
---|
154 | 177 | |
---|
.. | .. |
---|
172 | 195 | } |
---|
173 | 196 | |
---|
174 | 197 | int thread__set_namespaces(struct thread *thread, u64 timestamp, |
---|
175 | | - struct namespaces_event *event) |
---|
| 198 | + struct perf_record_namespaces *event) |
---|
176 | 199 | { |
---|
177 | 200 | int ret; |
---|
178 | 201 | |
---|
.. | .. |
---|
230 | 253 | list_add(&new->list, &thread->comm_list); |
---|
231 | 254 | |
---|
232 | 255 | if (exec) |
---|
233 | | - unwind__flush_access(thread); |
---|
| 256 | + unwind__flush_access(thread->maps); |
---|
234 | 257 | } |
---|
235 | 258 | |
---|
236 | 259 | thread->comm_set = true; |
---|
.. | .. |
---|
276 | 299 | return comm__str(comm); |
---|
277 | 300 | } |
---|
278 | 301 | |
---|
279 | | -const char *thread__comm_str(const struct thread *thread) |
---|
| 302 | +const char *thread__comm_str(struct thread *thread) |
---|
280 | 303 | { |
---|
281 | 304 | const char *str; |
---|
282 | 305 | |
---|
283 | | - down_read((struct rw_semaphore *)&thread->comm_lock); |
---|
| 306 | + down_read(&thread->comm_lock); |
---|
284 | 307 | str = __thread__comm_str(thread); |
---|
285 | | - up_read((struct rw_semaphore *)&thread->comm_lock); |
---|
| 308 | + up_read(&thread->comm_lock); |
---|
286 | 309 | |
---|
287 | 310 | return str; |
---|
288 | 311 | } |
---|
.. | .. |
---|
303 | 326 | size_t thread__fprintf(struct thread *thread, FILE *fp) |
---|
304 | 327 | { |
---|
305 | 328 | return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + |
---|
306 | | - map_groups__fprintf(thread->mg, fp); |
---|
| 329 | + maps__fprintf(thread->maps, fp); |
---|
307 | 330 | } |
---|
308 | 331 | |
---|
309 | 332 | int thread__insert_map(struct thread *thread, struct map *map) |
---|
310 | 333 | { |
---|
311 | 334 | int ret; |
---|
312 | 335 | |
---|
313 | | - ret = unwind__prepare_access(thread, map, NULL); |
---|
| 336 | + ret = unwind__prepare_access(thread->maps, map, NULL); |
---|
314 | 337 | if (ret) |
---|
315 | 338 | return ret; |
---|
316 | 339 | |
---|
317 | | - map_groups__fixup_overlappings(thread->mg, map, stderr); |
---|
318 | | - map_groups__insert(thread->mg, map); |
---|
| 340 | + maps__fixup_overlappings(thread->maps, map, stderr); |
---|
| 341 | + maps__insert(thread->maps, map); |
---|
319 | 342 | |
---|
320 | 343 | return 0; |
---|
321 | 344 | } |
---|
.. | .. |
---|
324 | 347 | { |
---|
325 | 348 | bool initialized = false; |
---|
326 | 349 | int err = 0; |
---|
327 | | - struct maps *maps = &thread->mg->maps; |
---|
| 350 | + struct maps *maps = thread->maps; |
---|
328 | 351 | struct map *map; |
---|
329 | 352 | |
---|
330 | 353 | down_read(&maps->lock); |
---|
331 | 354 | |
---|
332 | | - for (map = maps__first(maps); map; map = map__next(map)) { |
---|
333 | | - err = unwind__prepare_access(thread, map, &initialized); |
---|
| 355 | + maps__for_each_entry(maps, map) { |
---|
| 356 | + err = unwind__prepare_access(thread->maps, map, &initialized); |
---|
334 | 357 | if (err || initialized) |
---|
335 | 358 | break; |
---|
336 | 359 | } |
---|
.. | .. |
---|
344 | 367 | { |
---|
345 | 368 | int err = 0; |
---|
346 | 369 | |
---|
347 | | - if (symbol_conf.use_callchain) |
---|
| 370 | + if (dwarf_callchain_users) |
---|
348 | 371 | err = __thread__prepare_access(thread); |
---|
349 | 372 | |
---|
350 | 373 | return err; |
---|
351 | 374 | } |
---|
352 | 375 | |
---|
353 | | -static int thread__clone_map_groups(struct thread *thread, |
---|
354 | | - struct thread *parent) |
---|
| 376 | +static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone) |
---|
355 | 377 | { |
---|
356 | 378 | /* This is new thread, we share map groups for process. */ |
---|
357 | 379 | if (thread->pid_ == parent->pid_) |
---|
358 | 380 | return thread__prepare_access(thread); |
---|
359 | 381 | |
---|
360 | | - if (thread->mg == parent->mg) { |
---|
| 382 | + if (thread->maps == parent->maps) { |
---|
361 | 383 | pr_debug("broken map groups on thread %d/%d parent %d/%d\n", |
---|
362 | 384 | thread->pid_, thread->tid, parent->pid_, parent->tid); |
---|
363 | 385 | return 0; |
---|
364 | 386 | } |
---|
365 | | - |
---|
366 | 387 | /* But this one is new process, copy maps. */ |
---|
367 | | - if (map_groups__clone(thread, parent->mg) < 0) |
---|
368 | | - return -ENOMEM; |
---|
369 | | - |
---|
370 | | - return 0; |
---|
| 388 | + return do_maps_clone ? maps__clone(thread, parent->maps) : 0; |
---|
371 | 389 | } |
---|
372 | 390 | |
---|
373 | | -int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) |
---|
| 391 | +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) |
---|
374 | 392 | { |
---|
375 | 393 | if (parent->comm_set) { |
---|
376 | 394 | const char *comm = thread__comm_str(parent); |
---|
.. | .. |
---|
383 | 401 | } |
---|
384 | 402 | |
---|
385 | 403 | thread->ppid = parent->tid; |
---|
386 | | - return thread__clone_map_groups(thread, parent); |
---|
| 404 | + return thread__clone_maps(thread, parent, do_maps_clone); |
---|
387 | 405 | } |
---|
388 | 406 | |
---|
389 | 407 | void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, |
---|
.. | .. |
---|
414 | 432 | |
---|
415 | 433 | return machine__find_thread(machine, thread->pid_, thread->pid_); |
---|
416 | 434 | } |
---|
| 435 | + |
---|
| 436 | +int thread__memcpy(struct thread *thread, struct machine *machine, |
---|
| 437 | + void *buf, u64 ip, int len, bool *is64bit) |
---|
| 438 | +{ |
---|
| 439 | + u8 cpumode = PERF_RECORD_MISC_USER; |
---|
| 440 | + struct addr_location al; |
---|
| 441 | + long offset; |
---|
| 442 | + |
---|
| 443 | + if (machine__kernel_ip(machine, ip)) |
---|
| 444 | + cpumode = PERF_RECORD_MISC_KERNEL; |
---|
| 445 | + |
---|
| 446 | + if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso || |
---|
| 447 | + al.map->dso->data.status == DSO_DATA_STATUS_ERROR || |
---|
| 448 | + map__load(al.map) < 0) |
---|
| 449 | + return -1; |
---|
| 450 | + |
---|
| 451 | + offset = al.map->map_ip(al.map, ip); |
---|
| 452 | + if (is64bit) |
---|
| 453 | + *is64bit = al.map->dso->is_64_bit; |
---|
| 454 | + |
---|
| 455 | + return dso__data_read_offset(al.map->dso, machine, offset, buf, len); |
---|
| 456 | +} |
---|
| 457 | + |
---|
| 458 | +void thread__free_stitch_list(struct thread *thread) |
---|
| 459 | +{ |
---|
| 460 | + struct lbr_stitch *lbr_stitch = thread->lbr_stitch; |
---|
| 461 | + struct stitch_list *pos, *tmp; |
---|
| 462 | + |
---|
| 463 | + if (!lbr_stitch) |
---|
| 464 | + return; |
---|
| 465 | + |
---|
| 466 | + list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) { |
---|
| 467 | + list_del_init(&pos->node); |
---|
| 468 | + free(pos); |
---|
| 469 | + } |
---|
| 470 | + |
---|
| 471 | + list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) { |
---|
| 472 | + list_del_init(&pos->node); |
---|
| 473 | + free(pos); |
---|
| 474 | + } |
---|
| 475 | + |
---|
| 476 | + zfree(&lbr_stitch->prev_lbr_cursor); |
---|
| 477 | + zfree(&thread->lbr_stitch); |
---|
| 478 | +} |
---|