.. | .. |
---|
12 | 12 | #define pr_fmt(fmt) fmt |
---|
13 | 13 | |
---|
14 | 14 | #include <linux/workqueue.h> |
---|
| 15 | +#include <linux/security.h> |
---|
15 | 16 | #include <linux/spinlock.h> |
---|
16 | 17 | #include <linux/kthread.h> |
---|
17 | 18 | #include <linux/tracefs.h> |
---|
.. | .. |
---|
23 | 24 | #include <linux/delay.h> |
---|
24 | 25 | |
---|
25 | 26 | #include <trace/events/sched.h> |
---|
| 27 | +#include <trace/syscall.h> |
---|
26 | 28 | |
---|
27 | 29 | #include <asm/setup.h> |
---|
28 | 30 | |
---|
.. | .. |
---|
36 | 38 | LIST_HEAD(ftrace_events); |
---|
37 | 39 | static LIST_HEAD(ftrace_generic_fields); |
---|
38 | 40 | static LIST_HEAD(ftrace_common_fields); |
---|
| 41 | +static bool eventdir_initialized; |
---|
39 | 42 | |
---|
40 | 43 | #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) |
---|
41 | 44 | |
---|
.. | .. |
---|
69 | 72 | |
---|
70 | 73 | #define while_for_each_event_file() \ |
---|
71 | 74 | } |
---|
72 | | - |
---|
73 | | -static struct list_head * |
---|
74 | | -trace_get_fields(struct trace_event_call *event_call) |
---|
75 | | -{ |
---|
76 | | - if (!event_call->class->get_fields) |
---|
77 | | - return &event_call->class->fields; |
---|
78 | | - return event_call->class->get_fields(event_call); |
---|
79 | | -} |
---|
80 | 75 | |
---|
81 | 76 | static struct ftrace_event_field * |
---|
82 | 77 | __find_event_field(struct list_head *head, char *name) |
---|
.. | .. |
---|
173 | 168 | |
---|
174 | 169 | __generic_field(int, CPU, FILTER_CPU); |
---|
175 | 170 | __generic_field(int, cpu, FILTER_CPU); |
---|
| 171 | + __generic_field(int, common_cpu, FILTER_CPU); |
---|
176 | 172 | __generic_field(char *, COMM, FILTER_COMM); |
---|
177 | 173 | __generic_field(char *, comm, FILTER_COMM); |
---|
178 | 174 | |
---|
.. | .. |
---|
188 | 184 | __common_field(unsigned char, flags); |
---|
189 | 185 | __common_field(unsigned char, preempt_count); |
---|
190 | 186 | __common_field(int, pid); |
---|
191 | | - __common_field(unsigned char, migrate_disable); |
---|
192 | | - __common_field(unsigned char, preempt_lazy_count); |
---|
193 | 187 | |
---|
194 | 188 | return ret; |
---|
195 | 189 | } |
---|
.. | .. |
---|
240 | 234 | { |
---|
241 | 235 | struct trace_array *tr = trace_file->tr; |
---|
242 | 236 | struct trace_array_cpu *data; |
---|
| 237 | + struct trace_pid_list *no_pid_list; |
---|
243 | 238 | struct trace_pid_list *pid_list; |
---|
244 | 239 | |
---|
245 | 240 | pid_list = rcu_dereference_raw(tr->filtered_pids); |
---|
246 | | - if (!pid_list) |
---|
| 241 | + no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); |
---|
| 242 | + |
---|
| 243 | + if (!pid_list && !no_pid_list) |
---|
247 | 244 | return false; |
---|
248 | 245 | |
---|
249 | | - data = this_cpu_ptr(tr->trace_buffer.data); |
---|
| 246 | + data = this_cpu_ptr(tr->array_buffer.data); |
---|
250 | 247 | |
---|
251 | 248 | return data->ignore_pid; |
---|
252 | 249 | } |
---|
.. | .. |
---|
265 | 262 | local_save_flags(fbuffer->flags); |
---|
266 | 263 | fbuffer->pc = preempt_count(); |
---|
267 | 264 | /* |
---|
268 | | - * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables |
---|
| 265 | + * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables |
---|
269 | 266 | * preemption (adding one to the preempt_count). Since we are |
---|
270 | 267 | * interested in the preempt_count at the time the tracepoint was |
---|
271 | 268 | * hit, we need to subtract one to offset the increment. |
---|
272 | 269 | */ |
---|
273 | | - if (IS_ENABLED(CONFIG_PREEMPT)) |
---|
| 270 | + if (IS_ENABLED(CONFIG_PREEMPTION)) |
---|
274 | 271 | fbuffer->pc--; |
---|
275 | 272 | fbuffer->trace_file = trace_file; |
---|
276 | 273 | |
---|
.. | .. |
---|
281 | 278 | if (!fbuffer->event) |
---|
282 | 279 | return NULL; |
---|
283 | 280 | |
---|
| 281 | + fbuffer->regs = NULL; |
---|
284 | 282 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); |
---|
285 | 283 | return fbuffer->entry; |
---|
286 | 284 | } |
---|
.. | .. |
---|
373 | 371 | { |
---|
374 | 372 | struct trace_event_call *call = file->event_call; |
---|
375 | 373 | struct trace_array *tr = file->tr; |
---|
376 | | - unsigned long file_flags = file->flags; |
---|
377 | 374 | int ret = 0; |
---|
378 | 375 | int disable; |
---|
379 | 376 | |
---|
.. | .. |
---|
397 | 394 | break; |
---|
398 | 395 | disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; |
---|
399 | 396 | clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); |
---|
| 397 | + /* Disable use of trace_buffered_event */ |
---|
| 398 | + trace_buffered_event_disable(); |
---|
400 | 399 | } else |
---|
401 | 400 | disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); |
---|
402 | 401 | |
---|
.. | .. |
---|
435 | 434 | if (atomic_inc_return(&file->sm_ref) > 1) |
---|
436 | 435 | break; |
---|
437 | 436 | set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); |
---|
| 437 | + /* Enable use of trace_buffered_event */ |
---|
| 438 | + trace_buffered_event_enable(); |
---|
438 | 439 | } |
---|
439 | 440 | |
---|
440 | 441 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) { |
---|
.. | .. |
---|
474 | 475 | break; |
---|
475 | 476 | } |
---|
476 | 477 | |
---|
477 | | - /* Enable or disable use of trace_buffered_event */ |
---|
478 | | - if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != |
---|
479 | | - (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { |
---|
480 | | - if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) |
---|
481 | | - trace_buffered_event_enable(); |
---|
482 | | - else |
---|
483 | | - trace_buffered_event_disable(); |
---|
484 | | - } |
---|
485 | | - |
---|
486 | 478 | return ret; |
---|
487 | 479 | } |
---|
488 | 480 | |
---|
.. | .. |
---|
517 | 509 | |
---|
518 | 510 | pid_list = rcu_dereference_raw(tr->filtered_pids); |
---|
519 | 511 | trace_filter_add_remove_task(pid_list, NULL, task); |
---|
| 512 | + |
---|
| 513 | + pid_list = rcu_dereference_raw(tr->filtered_no_pids); |
---|
| 514 | + trace_filter_add_remove_task(pid_list, NULL, task); |
---|
520 | 515 | } |
---|
521 | 516 | |
---|
522 | 517 | static void |
---|
.. | .. |
---|
528 | 523 | struct trace_array *tr = data; |
---|
529 | 524 | |
---|
530 | 525 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 526 | + trace_filter_add_remove_task(pid_list, self, task); |
---|
| 527 | + |
---|
| 528 | + pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
531 | 529 | trace_filter_add_remove_task(pid_list, self, task); |
---|
532 | 530 | } |
---|
533 | 531 | |
---|
.. | .. |
---|
551 | 549 | struct task_struct *prev, struct task_struct *next) |
---|
552 | 550 | { |
---|
553 | 551 | struct trace_array *tr = data; |
---|
| 552 | + struct trace_pid_list *no_pid_list; |
---|
554 | 553 | struct trace_pid_list *pid_list; |
---|
| 554 | + bool ret; |
---|
555 | 555 | |
---|
556 | 556 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 557 | + no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
557 | 558 | |
---|
558 | | - this_cpu_write(tr->trace_buffer.data->ignore_pid, |
---|
559 | | - trace_ignore_this_task(pid_list, prev) && |
---|
560 | | - trace_ignore_this_task(pid_list, next)); |
---|
| 559 | + /* |
---|
| 560 | + * Sched switch is funny, as we only want to ignore it |
---|
| 561 | + * in the notrace case if both prev and next should be ignored. |
---|
| 562 | + */ |
---|
| 563 | + ret = trace_ignore_this_task(NULL, no_pid_list, prev) && |
---|
| 564 | + trace_ignore_this_task(NULL, no_pid_list, next); |
---|
| 565 | + |
---|
| 566 | + this_cpu_write(tr->array_buffer.data->ignore_pid, ret || |
---|
| 567 | + (trace_ignore_this_task(pid_list, NULL, prev) && |
---|
| 568 | + trace_ignore_this_task(pid_list, NULL, next))); |
---|
561 | 569 | } |
---|
562 | 570 | |
---|
563 | 571 | static void |
---|
.. | .. |
---|
565 | 573 | struct task_struct *prev, struct task_struct *next) |
---|
566 | 574 | { |
---|
567 | 575 | struct trace_array *tr = data; |
---|
| 576 | + struct trace_pid_list *no_pid_list; |
---|
568 | 577 | struct trace_pid_list *pid_list; |
---|
569 | 578 | |
---|
570 | 579 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 580 | + no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
571 | 581 | |
---|
572 | | - this_cpu_write(tr->trace_buffer.data->ignore_pid, |
---|
573 | | - trace_ignore_this_task(pid_list, next)); |
---|
| 582 | + this_cpu_write(tr->array_buffer.data->ignore_pid, |
---|
| 583 | + trace_ignore_this_task(pid_list, no_pid_list, next)); |
---|
574 | 584 | } |
---|
575 | 585 | |
---|
576 | 586 | static void |
---|
577 | 587 | event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) |
---|
578 | 588 | { |
---|
579 | 589 | struct trace_array *tr = data; |
---|
| 590 | + struct trace_pid_list *no_pid_list; |
---|
580 | 591 | struct trace_pid_list *pid_list; |
---|
581 | 592 | |
---|
582 | 593 | /* Nothing to do if we are already tracing */ |
---|
583 | | - if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) |
---|
| 594 | + if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) |
---|
584 | 595 | return; |
---|
585 | 596 | |
---|
586 | 597 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 598 | + no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
587 | 599 | |
---|
588 | | - this_cpu_write(tr->trace_buffer.data->ignore_pid, |
---|
589 | | - trace_ignore_this_task(pid_list, task)); |
---|
| 600 | + this_cpu_write(tr->array_buffer.data->ignore_pid, |
---|
| 601 | + trace_ignore_this_task(pid_list, no_pid_list, task)); |
---|
590 | 602 | } |
---|
591 | 603 | |
---|
592 | 604 | static void |
---|
593 | 605 | event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) |
---|
594 | 606 | { |
---|
595 | 607 | struct trace_array *tr = data; |
---|
| 608 | + struct trace_pid_list *no_pid_list; |
---|
596 | 609 | struct trace_pid_list *pid_list; |
---|
597 | 610 | |
---|
598 | 611 | /* Nothing to do if we are not tracing */ |
---|
599 | | - if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) |
---|
| 612 | + if (this_cpu_read(tr->array_buffer.data->ignore_pid)) |
---|
600 | 613 | return; |
---|
601 | 614 | |
---|
602 | 615 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 616 | + no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
603 | 617 | |
---|
604 | 618 | /* Set tracing if current is enabled */ |
---|
605 | | - this_cpu_write(tr->trace_buffer.data->ignore_pid, |
---|
606 | | - trace_ignore_this_task(pid_list, current)); |
---|
| 619 | + this_cpu_write(tr->array_buffer.data->ignore_pid, |
---|
| 620 | + trace_ignore_this_task(pid_list, no_pid_list, current)); |
---|
607 | 621 | } |
---|
608 | 622 | |
---|
609 | | -static void __ftrace_clear_event_pids(struct trace_array *tr) |
---|
| 623 | +static void unregister_pid_events(struct trace_array *tr) |
---|
610 | 624 | { |
---|
611 | | - struct trace_pid_list *pid_list; |
---|
612 | | - struct trace_event_file *file; |
---|
613 | | - int cpu; |
---|
614 | | - |
---|
615 | | - pid_list = rcu_dereference_protected(tr->filtered_pids, |
---|
616 | | - lockdep_is_held(&event_mutex)); |
---|
617 | | - if (!pid_list) |
---|
618 | | - return; |
---|
619 | | - |
---|
620 | 625 | unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); |
---|
621 | 626 | unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); |
---|
622 | 627 | |
---|
.. | .. |
---|
628 | 633 | |
---|
629 | 634 | unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); |
---|
630 | 635 | unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); |
---|
| 636 | +} |
---|
631 | 637 | |
---|
632 | | - list_for_each_entry(file, &tr->events, list) { |
---|
633 | | - clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); |
---|
| 638 | +static void __ftrace_clear_event_pids(struct trace_array *tr, int type) |
---|
| 639 | +{ |
---|
| 640 | + struct trace_pid_list *pid_list; |
---|
| 641 | + struct trace_pid_list *no_pid_list; |
---|
| 642 | + struct trace_event_file *file; |
---|
| 643 | + int cpu; |
---|
| 644 | + |
---|
| 645 | + pid_list = rcu_dereference_protected(tr->filtered_pids, |
---|
| 646 | + lockdep_is_held(&event_mutex)); |
---|
| 647 | + no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
---|
| 648 | + lockdep_is_held(&event_mutex)); |
---|
| 649 | + |
---|
| 650 | + /* Make sure there's something to do */ |
---|
| 651 | + if (!pid_type_enabled(type, pid_list, no_pid_list)) |
---|
| 652 | + return; |
---|
| 653 | + |
---|
| 654 | + if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
---|
| 655 | + unregister_pid_events(tr); |
---|
| 656 | + |
---|
| 657 | + list_for_each_entry(file, &tr->events, list) { |
---|
| 658 | + clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); |
---|
| 659 | + } |
---|
| 660 | + |
---|
| 661 | + for_each_possible_cpu(cpu) |
---|
| 662 | + per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; |
---|
634 | 663 | } |
---|
635 | 664 | |
---|
636 | | - for_each_possible_cpu(cpu) |
---|
637 | | - per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; |
---|
| 665 | + if (type & TRACE_PIDS) |
---|
| 666 | + rcu_assign_pointer(tr->filtered_pids, NULL); |
---|
638 | 667 | |
---|
639 | | - rcu_assign_pointer(tr->filtered_pids, NULL); |
---|
| 668 | + if (type & TRACE_NO_PIDS) |
---|
| 669 | + rcu_assign_pointer(tr->filtered_no_pids, NULL); |
---|
640 | 670 | |
---|
641 | 671 | /* Wait till all users are no longer using pid filtering */ |
---|
642 | 672 | tracepoint_synchronize_unregister(); |
---|
643 | 673 | |
---|
644 | | - trace_free_pid_list(pid_list); |
---|
| 674 | + if ((type & TRACE_PIDS) && pid_list) |
---|
| 675 | + trace_free_pid_list(pid_list); |
---|
| 676 | + |
---|
| 677 | + if ((type & TRACE_NO_PIDS) && no_pid_list) |
---|
| 678 | + trace_free_pid_list(no_pid_list); |
---|
645 | 679 | } |
---|
646 | 680 | |
---|
647 | | -static void ftrace_clear_event_pids(struct trace_array *tr) |
---|
| 681 | +static void ftrace_clear_event_pids(struct trace_array *tr, int type) |
---|
648 | 682 | { |
---|
649 | 683 | mutex_lock(&event_mutex); |
---|
650 | | - __ftrace_clear_event_pids(tr); |
---|
| 684 | + __ftrace_clear_event_pids(tr, type); |
---|
651 | 685 | mutex_unlock(&event_mutex); |
---|
652 | 686 | } |
---|
653 | 687 | |
---|
.. | .. |
---|
706 | 740 | return; |
---|
707 | 741 | |
---|
708 | 742 | if (!--dir->nr_events) { |
---|
709 | | - tracefs_remove_recursive(dir->entry); |
---|
| 743 | + tracefs_remove(dir->entry); |
---|
710 | 744 | list_del(&dir->list); |
---|
711 | 745 | __put_system_dir(dir); |
---|
712 | 746 | } |
---|
.. | .. |
---|
725 | 759 | } |
---|
726 | 760 | spin_unlock(&dir->d_lock); |
---|
727 | 761 | |
---|
728 | | - tracefs_remove_recursive(dir); |
---|
| 762 | + tracefs_remove(dir); |
---|
729 | 763 | } |
---|
730 | 764 | |
---|
731 | 765 | list_del(&file->list); |
---|
.. | .. |
---|
797 | 831 | return ret; |
---|
798 | 832 | } |
---|
799 | 833 | |
---|
800 | | -static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
---|
| 834 | +int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
---|
801 | 835 | { |
---|
802 | 836 | char *event = NULL, *sub = NULL, *match; |
---|
803 | 837 | int ret; |
---|
.. | .. |
---|
859 | 893 | return __ftrace_set_clr_event(tr, NULL, system, event, set); |
---|
860 | 894 | } |
---|
861 | 895 | EXPORT_SYMBOL_GPL(trace_set_clr_event); |
---|
| 896 | + |
---|
| 897 | +/** |
---|
| 898 | + * trace_array_set_clr_event - enable or disable an event for a trace array. |
---|
| 899 | + * @tr: concerned trace array. |
---|
| 900 | + * @system: system name to match (NULL for any system) |
---|
| 901 | + * @event: event name to match (NULL for all events, within system) |
---|
| 902 | + * @enable: true to enable, false to disable |
---|
| 903 | + * |
---|
| 904 | + * This is a way for other parts of the kernel to enable or disable |
---|
| 905 | + * event recording. |
---|
| 906 | + * |
---|
| 907 | + * Returns 0 on success, -EINVAL if the parameters do not match any |
---|
| 908 | + * registered events. |
---|
| 909 | + */ |
---|
| 910 | +int trace_array_set_clr_event(struct trace_array *tr, const char *system, |
---|
| 911 | + const char *event, bool enable) |
---|
| 912 | +{ |
---|
| 913 | + int set; |
---|
| 914 | + |
---|
| 915 | + if (!tr) |
---|
| 916 | + return -ENOENT; |
---|
| 917 | + |
---|
| 918 | + set = (enable == true) ? 1 : 0; |
---|
| 919 | + return __ftrace_set_clr_event(tr, NULL, system, event, set); |
---|
| 920 | +} |
---|
| 921 | +EXPORT_SYMBOL_GPL(trace_array_set_clr_event); |
---|
862 | 922 | |
---|
863 | 923 | /* 128 should be much more than enough */ |
---|
864 | 924 | #define EVENT_BUF_SIZE 127 |
---|
.. | .. |
---|
994 | 1054 | } |
---|
995 | 1055 | |
---|
996 | 1056 | static void * |
---|
997 | | -p_next(struct seq_file *m, void *v, loff_t *pos) |
---|
| 1057 | +__next(struct seq_file *m, void *v, loff_t *pos, int type) |
---|
998 | 1058 | { |
---|
999 | 1059 | struct trace_array *tr = m->private; |
---|
1000 | | - struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 1060 | + struct trace_pid_list *pid_list; |
---|
| 1061 | + |
---|
| 1062 | + if (type == TRACE_PIDS) |
---|
| 1063 | + pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 1064 | + else |
---|
| 1065 | + pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
1001 | 1066 | |
---|
1002 | 1067 | return trace_pid_next(pid_list, v, pos); |
---|
1003 | 1068 | } |
---|
1004 | 1069 | |
---|
1005 | | -static void *p_start(struct seq_file *m, loff_t *pos) |
---|
| 1070 | +static void * |
---|
| 1071 | +p_next(struct seq_file *m, void *v, loff_t *pos) |
---|
| 1072 | +{ |
---|
| 1073 | + return __next(m, v, pos, TRACE_PIDS); |
---|
| 1074 | +} |
---|
| 1075 | + |
---|
| 1076 | +static void * |
---|
| 1077 | +np_next(struct seq_file *m, void *v, loff_t *pos) |
---|
| 1078 | +{ |
---|
| 1079 | + return __next(m, v, pos, TRACE_NO_PIDS); |
---|
| 1080 | +} |
---|
| 1081 | + |
---|
| 1082 | +static void *__start(struct seq_file *m, loff_t *pos, int type) |
---|
1006 | 1083 | __acquires(RCU) |
---|
1007 | 1084 | { |
---|
1008 | 1085 | struct trace_pid_list *pid_list; |
---|
.. | .. |
---|
1017 | 1094 | mutex_lock(&event_mutex); |
---|
1018 | 1095 | rcu_read_lock_sched(); |
---|
1019 | 1096 | |
---|
1020 | | - pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 1097 | + if (type == TRACE_PIDS) |
---|
| 1098 | + pid_list = rcu_dereference_sched(tr->filtered_pids); |
---|
| 1099 | + else |
---|
| 1100 | + pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
---|
1021 | 1101 | |
---|
1022 | 1102 | if (!pid_list) |
---|
1023 | 1103 | return NULL; |
---|
1024 | 1104 | |
---|
1025 | 1105 | return trace_pid_start(pid_list, pos); |
---|
| 1106 | +} |
---|
| 1107 | + |
---|
| 1108 | +static void *p_start(struct seq_file *m, loff_t *pos) |
---|
| 1109 | + __acquires(RCU) |
---|
| 1110 | +{ |
---|
| 1111 | + return __start(m, pos, TRACE_PIDS); |
---|
| 1112 | +} |
---|
| 1113 | + |
---|
| 1114 | +static void *np_start(struct seq_file *m, loff_t *pos) |
---|
| 1115 | + __acquires(RCU) |
---|
| 1116 | +{ |
---|
| 1117 | + return __start(m, pos, TRACE_NO_PIDS); |
---|
1026 | 1118 | } |
---|
1027 | 1119 | |
---|
1028 | 1120 | static void p_stop(struct seq_file *m, void *p) |
---|
.. | .. |
---|
1256 | 1348 | */ |
---|
1257 | 1349 | array_descriptor = strchr(field->type, '['); |
---|
1258 | 1350 | |
---|
1259 | | - if (!strncmp(field->type, "__data_loc", 10)) |
---|
| 1351 | + if (str_has_prefix(field->type, "__data_loc")) |
---|
1260 | 1352 | array_descriptor = NULL; |
---|
1261 | 1353 | |
---|
1262 | 1354 | if (!array_descriptor) |
---|
.. | .. |
---|
1305 | 1397 | { |
---|
1306 | 1398 | struct seq_file *m; |
---|
1307 | 1399 | int ret; |
---|
| 1400 | + |
---|
| 1401 | + /* Do we want to hide event format files on tracefs lockdown? */ |
---|
1308 | 1402 | |
---|
1309 | 1403 | ret = seq_open(file, &trace_format_seq_ops); |
---|
1310 | 1404 | if (ret < 0) |
---|
.. | .. |
---|
1452 | 1546 | struct trace_array *tr = inode->i_private; |
---|
1453 | 1547 | int ret; |
---|
1454 | 1548 | |
---|
1455 | | - if (tracing_is_disabled()) |
---|
1456 | | - return -ENODEV; |
---|
1457 | | - |
---|
1458 | | - if (trace_array_get(tr) < 0) |
---|
1459 | | - return -ENODEV; |
---|
1460 | | - |
---|
1461 | 1549 | /* Make a temporary dir that has no system but points to tr */ |
---|
1462 | 1550 | dir = kzalloc(sizeof(*dir), GFP_KERNEL); |
---|
1463 | | - if (!dir) { |
---|
1464 | | - trace_array_put(tr); |
---|
| 1551 | + if (!dir) |
---|
1465 | 1552 | return -ENOMEM; |
---|
1466 | | - } |
---|
1467 | 1553 | |
---|
1468 | | - dir->tr = tr; |
---|
1469 | | - |
---|
1470 | | - ret = tracing_open_generic(inode, filp); |
---|
| 1554 | + ret = tracing_open_generic_tr(inode, filp); |
---|
1471 | 1555 | if (ret < 0) { |
---|
1472 | | - trace_array_put(tr); |
---|
1473 | 1556 | kfree(dir); |
---|
1474 | 1557 | return ret; |
---|
1475 | 1558 | } |
---|
1476 | | - |
---|
| 1559 | + dir->tr = tr; |
---|
1477 | 1560 | filp->private_data = dir; |
---|
1478 | 1561 | |
---|
1479 | 1562 | return 0; |
---|
.. | .. |
---|
1579 | 1662 | { |
---|
1580 | 1663 | struct trace_array *tr = data; |
---|
1581 | 1664 | struct trace_pid_list *pid_list; |
---|
| 1665 | + struct trace_pid_list *no_pid_list; |
---|
1582 | 1666 | |
---|
1583 | 1667 | /* |
---|
1584 | 1668 | * This function is called by on_each_cpu() while the |
---|
.. | .. |
---|
1586 | 1670 | */ |
---|
1587 | 1671 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
---|
1588 | 1672 | mutex_is_locked(&event_mutex)); |
---|
| 1673 | + no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
---|
| 1674 | + mutex_is_locked(&event_mutex)); |
---|
1589 | 1675 | |
---|
1590 | | - this_cpu_write(tr->trace_buffer.data->ignore_pid, |
---|
1591 | | - trace_ignore_this_task(pid_list, current)); |
---|
| 1676 | + this_cpu_write(tr->array_buffer.data->ignore_pid, |
---|
| 1677 | + trace_ignore_this_task(pid_list, no_pid_list, current)); |
---|
| 1678 | +} |
---|
| 1679 | + |
---|
| 1680 | +static void register_pid_events(struct trace_array *tr) |
---|
| 1681 | +{ |
---|
| 1682 | + /* |
---|
| 1683 | + * Register a probe that is called before all other probes |
---|
| 1684 | + * to set ignore_pid if next or prev do not match. |
---|
| 1685 | + * Register a probe this is called after all other probes |
---|
| 1686 | + * to only keep ignore_pid set if next pid matches. |
---|
| 1687 | + */ |
---|
| 1688 | + register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, |
---|
| 1689 | + tr, INT_MAX); |
---|
| 1690 | + register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, |
---|
| 1691 | + tr, 0); |
---|
| 1692 | + |
---|
| 1693 | + register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, |
---|
| 1694 | + tr, INT_MAX); |
---|
| 1695 | + register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, |
---|
| 1696 | + tr, 0); |
---|
| 1697 | + |
---|
| 1698 | + register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, |
---|
| 1699 | + tr, INT_MAX); |
---|
| 1700 | + register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, |
---|
| 1701 | + tr, 0); |
---|
| 1702 | + |
---|
| 1703 | + register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, |
---|
| 1704 | + tr, INT_MAX); |
---|
| 1705 | + register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, |
---|
| 1706 | + tr, 0); |
---|
1592 | 1707 | } |
---|
1593 | 1708 | |
---|
1594 | 1709 | static ssize_t |
---|
1595 | | -ftrace_event_pid_write(struct file *filp, const char __user *ubuf, |
---|
1596 | | - size_t cnt, loff_t *ppos) |
---|
| 1710 | +event_pid_write(struct file *filp, const char __user *ubuf, |
---|
| 1711 | + size_t cnt, loff_t *ppos, int type) |
---|
1597 | 1712 | { |
---|
1598 | 1713 | struct seq_file *m = filp->private_data; |
---|
1599 | 1714 | struct trace_array *tr = m->private; |
---|
1600 | 1715 | struct trace_pid_list *filtered_pids = NULL; |
---|
| 1716 | + struct trace_pid_list *other_pids = NULL; |
---|
1601 | 1717 | struct trace_pid_list *pid_list; |
---|
1602 | 1718 | struct trace_event_file *file; |
---|
1603 | 1719 | ssize_t ret; |
---|
.. | .. |
---|
1611 | 1727 | |
---|
1612 | 1728 | mutex_lock(&event_mutex); |
---|
1613 | 1729 | |
---|
1614 | | - filtered_pids = rcu_dereference_protected(tr->filtered_pids, |
---|
1615 | | - lockdep_is_held(&event_mutex)); |
---|
| 1730 | + if (type == TRACE_PIDS) { |
---|
| 1731 | + filtered_pids = rcu_dereference_protected(tr->filtered_pids, |
---|
| 1732 | + lockdep_is_held(&event_mutex)); |
---|
| 1733 | + other_pids = rcu_dereference_protected(tr->filtered_no_pids, |
---|
| 1734 | + lockdep_is_held(&event_mutex)); |
---|
| 1735 | + } else { |
---|
| 1736 | + filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, |
---|
| 1737 | + lockdep_is_held(&event_mutex)); |
---|
| 1738 | + other_pids = rcu_dereference_protected(tr->filtered_pids, |
---|
| 1739 | + lockdep_is_held(&event_mutex)); |
---|
| 1740 | + } |
---|
1616 | 1741 | |
---|
1617 | 1742 | ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); |
---|
1618 | 1743 | if (ret < 0) |
---|
1619 | 1744 | goto out; |
---|
1620 | 1745 | |
---|
1621 | | - rcu_assign_pointer(tr->filtered_pids, pid_list); |
---|
| 1746 | + if (type == TRACE_PIDS) |
---|
| 1747 | + rcu_assign_pointer(tr->filtered_pids, pid_list); |
---|
| 1748 | + else |
---|
| 1749 | + rcu_assign_pointer(tr->filtered_no_pids, pid_list); |
---|
1622 | 1750 | |
---|
1623 | 1751 | list_for_each_entry(file, &tr->events, list) { |
---|
1624 | 1752 | set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); |
---|
.. | .. |
---|
1627 | 1755 | if (filtered_pids) { |
---|
1628 | 1756 | tracepoint_synchronize_unregister(); |
---|
1629 | 1757 | trace_free_pid_list(filtered_pids); |
---|
1630 | | - } else if (pid_list) { |
---|
1631 | | - /* |
---|
1632 | | - * Register a probe that is called before all other probes |
---|
1633 | | - * to set ignore_pid if next or prev do not match. |
---|
1634 | | - * Register a probe this is called after all other probes |
---|
1635 | | - * to only keep ignore_pid set if next pid matches. |
---|
1636 | | - */ |
---|
1637 | | - register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, |
---|
1638 | | - tr, INT_MAX); |
---|
1639 | | - register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, |
---|
1640 | | - tr, 0); |
---|
1641 | | - |
---|
1642 | | - register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, |
---|
1643 | | - tr, INT_MAX); |
---|
1644 | | - register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, |
---|
1645 | | - tr, 0); |
---|
1646 | | - |
---|
1647 | | - register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, |
---|
1648 | | - tr, INT_MAX); |
---|
1649 | | - register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, |
---|
1650 | | - tr, 0); |
---|
1651 | | - |
---|
1652 | | - register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, |
---|
1653 | | - tr, INT_MAX); |
---|
1654 | | - register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, |
---|
1655 | | - tr, 0); |
---|
| 1758 | + } else if (pid_list && !other_pids) { |
---|
| 1759 | + register_pid_events(tr); |
---|
1656 | 1760 | } |
---|
1657 | 1761 | |
---|
1658 | 1762 | /* |
---|
.. | .. |
---|
1671 | 1775 | return ret; |
---|
1672 | 1776 | } |
---|
1673 | 1777 | |
---|
| 1778 | +static ssize_t |
---|
| 1779 | +ftrace_event_pid_write(struct file *filp, const char __user *ubuf, |
---|
| 1780 | + size_t cnt, loff_t *ppos) |
---|
| 1781 | +{ |
---|
| 1782 | + return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); |
---|
| 1783 | +} |
---|
| 1784 | + |
---|
| 1785 | +static ssize_t |
---|
| 1786 | +ftrace_event_npid_write(struct file *filp, const char __user *ubuf, |
---|
| 1787 | + size_t cnt, loff_t *ppos) |
---|
| 1788 | +{ |
---|
| 1789 | + return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); |
---|
| 1790 | +} |
---|
| 1791 | + |
---|
1674 | 1792 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); |
---|
1675 | 1793 | static int ftrace_event_set_open(struct inode *inode, struct file *file); |
---|
1676 | 1794 | static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); |
---|
| 1795 | +static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); |
---|
1677 | 1796 | static int ftrace_event_release(struct inode *inode, struct file *file); |
---|
1678 | 1797 | |
---|
1679 | 1798 | static const struct seq_operations show_event_seq_ops = { |
---|
.. | .. |
---|
1693 | 1812 | static const struct seq_operations show_set_pid_seq_ops = { |
---|
1694 | 1813 | .start = p_start, |
---|
1695 | 1814 | .next = p_next, |
---|
| 1815 | + .show = trace_pid_show, |
---|
| 1816 | + .stop = p_stop, |
---|
| 1817 | +}; |
---|
| 1818 | + |
---|
| 1819 | +static const struct seq_operations show_set_no_pid_seq_ops = { |
---|
| 1820 | + .start = np_start, |
---|
| 1821 | + .next = np_next, |
---|
1696 | 1822 | .show = trace_pid_show, |
---|
1697 | 1823 | .stop = p_stop, |
---|
1698 | 1824 | }; |
---|
.. | .. |
---|
1720 | 1846 | .release = ftrace_event_release, |
---|
1721 | 1847 | }; |
---|
1722 | 1848 | |
---|
| 1849 | +static const struct file_operations ftrace_set_event_notrace_pid_fops = { |
---|
| 1850 | + .open = ftrace_event_set_npid_open, |
---|
| 1851 | + .read = seq_read, |
---|
| 1852 | + .write = ftrace_event_npid_write, |
---|
| 1853 | + .llseek = seq_lseek, |
---|
| 1854 | + .release = ftrace_event_release, |
---|
| 1855 | +}; |
---|
| 1856 | + |
---|
1723 | 1857 | static const struct file_operations ftrace_enable_fops = { |
---|
1724 | | - .open = tracing_open_generic, |
---|
| 1858 | + .open = tracing_open_file_tr, |
---|
1725 | 1859 | .read = event_enable_read, |
---|
1726 | 1860 | .write = event_enable_write, |
---|
| 1861 | + .release = tracing_release_file_tr, |
---|
1727 | 1862 | .llseek = default_llseek, |
---|
1728 | 1863 | }; |
---|
1729 | 1864 | |
---|
.. | .. |
---|
1740 | 1875 | }; |
---|
1741 | 1876 | |
---|
1742 | 1877 | static const struct file_operations ftrace_event_filter_fops = { |
---|
1743 | | - .open = tracing_open_generic, |
---|
| 1878 | + .open = tracing_open_file_tr, |
---|
1744 | 1879 | .read = event_filter_read, |
---|
1745 | 1880 | .write = event_filter_write, |
---|
| 1881 | + .release = tracing_release_file_tr, |
---|
1746 | 1882 | .llseek = default_llseek, |
---|
1747 | 1883 | }; |
---|
1748 | 1884 | |
---|
.. | .. |
---|
1783 | 1919 | struct seq_file *m; |
---|
1784 | 1920 | int ret; |
---|
1785 | 1921 | |
---|
| 1922 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
---|
| 1923 | + if (ret) |
---|
| 1924 | + return ret; |
---|
| 1925 | + |
---|
1786 | 1926 | ret = seq_open(file, seq_ops); |
---|
1787 | 1927 | if (ret < 0) |
---|
1788 | 1928 | return ret; |
---|
.. | .. |
---|
1807 | 1947 | { |
---|
1808 | 1948 | const struct seq_operations *seq_ops = &show_event_seq_ops; |
---|
1809 | 1949 | |
---|
| 1950 | + /* Checks for tracefs lockdown */ |
---|
1810 | 1951 | return ftrace_event_open(inode, file, seq_ops); |
---|
1811 | 1952 | } |
---|
1812 | 1953 | |
---|
.. | .. |
---|
1817 | 1958 | struct trace_array *tr = inode->i_private; |
---|
1818 | 1959 | int ret; |
---|
1819 | 1960 | |
---|
1820 | | - if (trace_array_get(tr) < 0) |
---|
1821 | | - return -ENODEV; |
---|
| 1961 | + ret = tracing_check_open_get_tr(tr); |
---|
| 1962 | + if (ret) |
---|
| 1963 | + return ret; |
---|
1822 | 1964 | |
---|
1823 | 1965 | if ((file->f_mode & FMODE_WRITE) && |
---|
1824 | 1966 | (file->f_flags & O_TRUNC)) |
---|
.. | .. |
---|
1837 | 1979 | struct trace_array *tr = inode->i_private; |
---|
1838 | 1980 | int ret; |
---|
1839 | 1981 | |
---|
1840 | | - if (trace_array_get(tr) < 0) |
---|
1841 | | - return -ENODEV; |
---|
| 1982 | + ret = tracing_check_open_get_tr(tr); |
---|
| 1983 | + if (ret) |
---|
| 1984 | + return ret; |
---|
1842 | 1985 | |
---|
1843 | 1986 | if ((file->f_mode & FMODE_WRITE) && |
---|
1844 | 1987 | (file->f_flags & O_TRUNC)) |
---|
1845 | | - ftrace_clear_event_pids(tr); |
---|
| 1988 | + ftrace_clear_event_pids(tr, TRACE_PIDS); |
---|
| 1989 | + |
---|
| 1990 | + ret = ftrace_event_open(inode, file, seq_ops); |
---|
| 1991 | + if (ret < 0) |
---|
| 1992 | + trace_array_put(tr); |
---|
| 1993 | + return ret; |
---|
| 1994 | +} |
---|
| 1995 | + |
---|
| 1996 | +static int |
---|
| 1997 | +ftrace_event_set_npid_open(struct inode *inode, struct file *file) |
---|
| 1998 | +{ |
---|
| 1999 | + const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; |
---|
| 2000 | + struct trace_array *tr = inode->i_private; |
---|
| 2001 | + int ret; |
---|
| 2002 | + |
---|
| 2003 | + ret = tracing_check_open_get_tr(tr); |
---|
| 2004 | + if (ret) |
---|
| 2005 | + return ret; |
---|
| 2006 | + |
---|
| 2007 | + if ((file->f_mode & FMODE_WRITE) && |
---|
| 2008 | + (file->f_flags & O_TRUNC)) |
---|
| 2009 | + ftrace_clear_event_pids(tr, TRACE_NO_PIDS); |
---|
1846 | 2010 | |
---|
1847 | 2011 | ret = ftrace_event_open(inode, file, seq_ops); |
---|
1848 | 2012 | if (ret < 0) |
---|
.. | .. |
---|
1959 | 2123 | } |
---|
1960 | 2124 | |
---|
1961 | 2125 | static int |
---|
| 2126 | +event_define_fields(struct trace_event_call *call) |
---|
| 2127 | +{ |
---|
| 2128 | + struct list_head *head; |
---|
| 2129 | + int ret = 0; |
---|
| 2130 | + |
---|
| 2131 | + /* |
---|
| 2132 | + * Other events may have the same class. Only update |
---|
| 2133 | + * the fields if they are not already defined. |
---|
| 2134 | + */ |
---|
| 2135 | + head = trace_get_fields(call); |
---|
| 2136 | + if (list_empty(head)) { |
---|
| 2137 | + struct trace_event_fields *field = call->class->fields_array; |
---|
| 2138 | + unsigned int offset = sizeof(struct trace_entry); |
---|
| 2139 | + |
---|
| 2140 | + for (; field->type; field++) { |
---|
| 2141 | + if (field->type == TRACE_FUNCTION_TYPE) { |
---|
| 2142 | + field->define_fields(call); |
---|
| 2143 | + break; |
---|
| 2144 | + } |
---|
| 2145 | + |
---|
| 2146 | + offset = ALIGN(offset, field->align); |
---|
| 2147 | + ret = trace_define_field(call, field->type, field->name, |
---|
| 2148 | + offset, field->size, |
---|
| 2149 | + field->is_signed, field->filter_type); |
---|
| 2150 | + if (WARN_ON_ONCE(ret)) { |
---|
| 2151 | + pr_err("error code is %d\n", ret); |
---|
| 2152 | + break; |
---|
| 2153 | + } |
---|
| 2154 | + |
---|
| 2155 | + offset += field->size; |
---|
| 2156 | + } |
---|
| 2157 | + } |
---|
| 2158 | + |
---|
| 2159 | + return ret; |
---|
| 2160 | +} |
---|
| 2161 | + |
---|
| 2162 | +static int |
---|
1962 | 2163 | event_create_dir(struct dentry *parent, struct trace_event_file *file) |
---|
1963 | 2164 | { |
---|
1964 | 2165 | struct trace_event_call *call = file->event_call; |
---|
1965 | 2166 | struct trace_array *tr = file->tr; |
---|
1966 | | - struct list_head *head; |
---|
1967 | 2167 | struct dentry *d_events; |
---|
1968 | 2168 | const char *name; |
---|
1969 | 2169 | int ret; |
---|
.. | .. |
---|
1997 | 2197 | &ftrace_event_id_fops); |
---|
1998 | 2198 | #endif |
---|
1999 | 2199 | |
---|
2000 | | - /* |
---|
2001 | | - * Other events may have the same class. Only update |
---|
2002 | | - * the fields if they are not already defined. |
---|
2003 | | - */ |
---|
2004 | | - head = trace_get_fields(call); |
---|
2005 | | - if (list_empty(head)) { |
---|
2006 | | - ret = call->class->define_fields(call); |
---|
2007 | | - if (ret < 0) { |
---|
2008 | | - pr_warn("Could not initialize trace point events/%s\n", |
---|
2009 | | - name); |
---|
2010 | | - return -1; |
---|
2011 | | - } |
---|
| 2200 | + ret = event_define_fields(call); |
---|
| 2201 | + if (ret < 0) { |
---|
| 2202 | + pr_warn("Could not initialize trace point events/%s\n", name); |
---|
| 2203 | + return ret; |
---|
2012 | 2204 | } |
---|
2013 | 2205 | |
---|
2014 | 2206 | /* |
---|
.. | .. |
---|
2027 | 2219 | trace_create_file("hist", 0444, file->dir, file, |
---|
2028 | 2220 | &event_hist_fops); |
---|
2029 | 2221 | #endif |
---|
| 2222 | +#ifdef CONFIG_HIST_TRIGGERS_DEBUG |
---|
| 2223 | + trace_create_file("hist_debug", 0444, file->dir, file, |
---|
| 2224 | + &event_hist_debug_fops); |
---|
| 2225 | +#endif |
---|
2030 | 2226 | trace_create_file("format", 0444, file->dir, call, |
---|
2031 | 2227 | &ftrace_event_format_fops); |
---|
| 2228 | + |
---|
| 2229 | +#ifdef CONFIG_TRACE_EVENT_INJECT |
---|
| 2230 | + if (call->event.type && call->class->reg) |
---|
| 2231 | + trace_create_file("inject", 0200, file->dir, file, |
---|
| 2232 | + &event_inject_fops); |
---|
| 2233 | +#endif |
---|
2032 | 2234 | |
---|
2033 | 2235 | return 0; |
---|
2034 | 2236 | } |
---|
.. | .. |
---|
2257 | 2459 | trace_create_new_event(struct trace_event_call *call, |
---|
2258 | 2460 | struct trace_array *tr) |
---|
2259 | 2461 | { |
---|
| 2462 | + struct trace_pid_list *no_pid_list; |
---|
2260 | 2463 | struct trace_pid_list *pid_list; |
---|
2261 | 2464 | struct trace_event_file *file; |
---|
2262 | 2465 | |
---|
.. | .. |
---|
2266 | 2469 | |
---|
2267 | 2470 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
---|
2268 | 2471 | lockdep_is_held(&event_mutex)); |
---|
| 2472 | + no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
---|
| 2473 | + lockdep_is_held(&event_mutex)); |
---|
2269 | 2474 | |
---|
2270 | | - if (pid_list) |
---|
| 2475 | + if (pid_list || no_pid_list) |
---|
2271 | 2476 | file->flags |= EVENT_FILE_FL_PID_FILTER; |
---|
2272 | 2477 | |
---|
2273 | 2478 | file->event_call = call; |
---|
.. | .. |
---|
2290 | 2495 | if (!file) |
---|
2291 | 2496 | return -ENOMEM; |
---|
2292 | 2497 | |
---|
2293 | | - return event_create_dir(tr->event_dir, file); |
---|
| 2498 | + if (eventdir_initialized) |
---|
| 2499 | + return event_create_dir(tr->event_dir, file); |
---|
| 2500 | + else |
---|
| 2501 | + return event_define_fields(call); |
---|
2294 | 2502 | } |
---|
2295 | 2503 | |
---|
2296 | 2504 | /* |
---|
.. | .. |
---|
2298 | 2506 | * for enabling events at boot. We want to enable events before |
---|
2299 | 2507 | * the filesystem is initialized. |
---|
2300 | 2508 | */ |
---|
2301 | | -static __init int |
---|
| 2509 | +static int |
---|
2302 | 2510 | __trace_early_add_new_event(struct trace_event_call *call, |
---|
2303 | 2511 | struct trace_array *tr) |
---|
2304 | 2512 | { |
---|
.. | .. |
---|
2308 | 2516 | if (!file) |
---|
2309 | 2517 | return -ENOMEM; |
---|
2310 | 2518 | |
---|
2311 | | - return 0; |
---|
| 2519 | + return event_define_fields(call); |
---|
2312 | 2520 | } |
---|
2313 | 2521 | |
---|
2314 | 2522 | struct ftrace_module_file_ops; |
---|
2315 | 2523 | static void __add_event_to_tracers(struct trace_event_call *call); |
---|
2316 | 2524 | |
---|
2317 | | -int trace_add_event_call_nolock(struct trace_event_call *call) |
---|
| 2525 | +/* Add an additional event_call dynamically */ |
---|
| 2526 | +int trace_add_event_call(struct trace_event_call *call) |
---|
2318 | 2527 | { |
---|
2319 | 2528 | int ret; |
---|
2320 | 2529 | lockdep_assert_held(&event_mutex); |
---|
.. | .. |
---|
2326 | 2535 | __add_event_to_tracers(call); |
---|
2327 | 2536 | |
---|
2328 | 2537 | mutex_unlock(&trace_types_lock); |
---|
2329 | | - return ret; |
---|
2330 | | -} |
---|
2331 | | - |
---|
2332 | | -/* Add an additional event_call dynamically */ |
---|
2333 | | -int trace_add_event_call(struct trace_event_call *call) |
---|
2334 | | -{ |
---|
2335 | | - int ret; |
---|
2336 | | - |
---|
2337 | | - mutex_lock(&event_mutex); |
---|
2338 | | - ret = trace_add_event_call_nolock(call); |
---|
2339 | | - mutex_unlock(&event_mutex); |
---|
2340 | 2538 | return ret; |
---|
2341 | 2539 | } |
---|
2342 | 2540 | |
---|
.. | .. |
---|
2370 | 2568 | * TRACE_REG_UNREGISTER. |
---|
2371 | 2569 | */ |
---|
2372 | 2570 | if (file->flags & EVENT_FILE_FL_ENABLED) |
---|
2373 | | - return -EBUSY; |
---|
| 2571 | + goto busy; |
---|
| 2572 | + |
---|
| 2573 | + if (file->flags & EVENT_FILE_FL_WAS_ENABLED) |
---|
| 2574 | + tr->clear_trace = true; |
---|
2374 | 2575 | /* |
---|
2375 | 2576 | * The do_for_each_event_file_safe() is |
---|
2376 | 2577 | * a double loop. After finding the call for this |
---|
.. | .. |
---|
2383 | 2584 | __trace_remove_event_call(call); |
---|
2384 | 2585 | |
---|
2385 | 2586 | return 0; |
---|
| 2587 | + busy: |
---|
| 2588 | + /* No need to clear the trace now */ |
---|
| 2589 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
| 2590 | + tr->clear_trace = false; |
---|
| 2591 | + } |
---|
| 2592 | + return -EBUSY; |
---|
2386 | 2593 | } |
---|
2387 | 2594 | |
---|
2388 | | -/* no event_mutex version */ |
---|
2389 | | -int trace_remove_event_call_nolock(struct trace_event_call *call) |
---|
| 2595 | +/* Remove an event_call */ |
---|
| 2596 | +int trace_remove_event_call(struct trace_event_call *call) |
---|
2390 | 2597 | { |
---|
2391 | 2598 | int ret; |
---|
2392 | 2599 | |
---|
.. | .. |
---|
2397 | 2604 | ret = probe_remove_event_call(call); |
---|
2398 | 2605 | up_write(&trace_event_sem); |
---|
2399 | 2606 | mutex_unlock(&trace_types_lock); |
---|
2400 | | - |
---|
2401 | | - return ret; |
---|
2402 | | -} |
---|
2403 | | - |
---|
2404 | | -/* Remove an event_call */ |
---|
2405 | | -int trace_remove_event_call(struct trace_event_call *call) |
---|
2406 | | -{ |
---|
2407 | | - int ret; |
---|
2408 | | - |
---|
2409 | | - mutex_lock(&event_mutex); |
---|
2410 | | - ret = trace_remove_event_call_nolock(call); |
---|
2411 | | - mutex_unlock(&event_mutex); |
---|
2412 | 2607 | |
---|
2413 | 2608 | return ret; |
---|
2414 | 2609 | } |
---|
.. | .. |
---|
2462 | 2657 | * over from this module may be passed to the new module events and |
---|
2463 | 2658 | * unexpected results may occur. |
---|
2464 | 2659 | */ |
---|
2465 | | - tracing_reset_all_online_cpus(); |
---|
| 2660 | + tracing_reset_all_online_cpus_unlocked(); |
---|
2466 | 2661 | } |
---|
2467 | 2662 | |
---|
2468 | 2663 | static int trace_module_notify(struct notifier_block *self, |
---|
.. | .. |
---|
2483 | 2678 | mutex_unlock(&trace_types_lock); |
---|
2484 | 2679 | mutex_unlock(&event_mutex); |
---|
2485 | 2680 | |
---|
2486 | | - return 0; |
---|
| 2681 | + return NOTIFY_OK; |
---|
2487 | 2682 | } |
---|
2488 | 2683 | |
---|
2489 | 2684 | static struct notifier_block trace_module_nb = { |
---|
.. | .. |
---|
2543 | 2738 | |
---|
2544 | 2739 | return file; |
---|
2545 | 2740 | } |
---|
| 2741 | + |
---|
| 2742 | +/** |
---|
| 2743 | + * trace_get_event_file - Find and return a trace event file |
---|
| 2744 | + * @instance: The name of the trace instance containing the event |
---|
| 2745 | + * @system: The name of the system containing the event |
---|
| 2746 | + * @event: The name of the event |
---|
| 2747 | + * |
---|
| 2748 | + * Return a trace event file given the trace instance name, trace |
---|
| 2749 | + * system, and trace event name. If the instance name is NULL, it |
---|
| 2750 | + * refers to the top-level trace array. |
---|
| 2751 | + * |
---|
| 2752 | + * This function will look it up and return it if found, after calling |
---|
| 2753 | + * trace_array_get() to prevent the instance from going away, and |
---|
| 2754 | + * increment the event's module refcount to prevent it from being |
---|
| 2755 | + * removed. |
---|
| 2756 | + * |
---|
| 2757 | + * To release the file, call trace_put_event_file(), which will call |
---|
| 2758 | + * trace_array_put() and decrement the event's module refcount. |
---|
| 2759 | + * |
---|
| 2760 | + * Return: The trace event on success, ERR_PTR otherwise. |
---|
| 2761 | + */ |
---|
| 2762 | +struct trace_event_file *trace_get_event_file(const char *instance, |
---|
| 2763 | + const char *system, |
---|
| 2764 | + const char *event) |
---|
| 2765 | +{ |
---|
| 2766 | + struct trace_array *tr = top_trace_array(); |
---|
| 2767 | + struct trace_event_file *file = NULL; |
---|
| 2768 | + int ret = -EINVAL; |
---|
| 2769 | + |
---|
| 2770 | + if (instance) { |
---|
| 2771 | + tr = trace_array_find_get(instance); |
---|
| 2772 | + if (!tr) |
---|
| 2773 | + return ERR_PTR(-ENOENT); |
---|
| 2774 | + } else { |
---|
| 2775 | + ret = trace_array_get(tr); |
---|
| 2776 | + if (ret) |
---|
| 2777 | + return ERR_PTR(ret); |
---|
| 2778 | + } |
---|
| 2779 | + |
---|
| 2780 | + mutex_lock(&event_mutex); |
---|
| 2781 | + |
---|
| 2782 | + file = find_event_file(tr, system, event); |
---|
| 2783 | + if (!file) { |
---|
| 2784 | + trace_array_put(tr); |
---|
| 2785 | + ret = -EINVAL; |
---|
| 2786 | + goto out; |
---|
| 2787 | + } |
---|
| 2788 | + |
---|
| 2789 | + /* Don't let event modules unload while in use */ |
---|
| 2790 | + ret = try_module_get(file->event_call->mod); |
---|
| 2791 | + if (!ret) { |
---|
| 2792 | + trace_array_put(tr); |
---|
| 2793 | + ret = -EBUSY; |
---|
| 2794 | + goto out; |
---|
| 2795 | + } |
---|
| 2796 | + |
---|
| 2797 | + ret = 0; |
---|
| 2798 | + out: |
---|
| 2799 | + mutex_unlock(&event_mutex); |
---|
| 2800 | + |
---|
| 2801 | + if (ret) |
---|
| 2802 | + file = ERR_PTR(ret); |
---|
| 2803 | + |
---|
| 2804 | + return file; |
---|
| 2805 | +} |
---|
| 2806 | +EXPORT_SYMBOL_GPL(trace_get_event_file); |
---|
| 2807 | + |
---|
| 2808 | +/** |
---|
| 2809 | + * trace_put_event_file - Release a file from trace_get_event_file() |
---|
| 2810 | + * @file: The trace event file |
---|
| 2811 | + * |
---|
| 2812 | + * If a file was retrieved using trace_get_event_file(), this should |
---|
| 2813 | + * be called when it's no longer needed. It will cancel the previous |
---|
| 2814 | + * trace_array_get() called by that function, and decrement the |
---|
| 2815 | + * event's module refcount. |
---|
| 2816 | + */ |
---|
| 2817 | +void trace_put_event_file(struct trace_event_file *file) |
---|
| 2818 | +{ |
---|
| 2819 | + mutex_lock(&event_mutex); |
---|
| 2820 | + module_put(file->event_call->mod); |
---|
| 2821 | + mutex_unlock(&event_mutex); |
---|
| 2822 | + |
---|
| 2823 | + trace_array_put(file->tr); |
---|
| 2824 | +} |
---|
| 2825 | +EXPORT_SYMBOL_GPL(trace_put_event_file); |
---|
2546 | 2826 | |
---|
2547 | 2827 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
2548 | 2828 | |
---|
.. | .. |
---|
2868 | 3148 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
---|
2869 | 3149 | |
---|
2870 | 3150 | /* |
---|
2871 | | - * The top level array has already had its trace_event_file |
---|
2872 | | - * descriptors created in order to allow for early events to |
---|
2873 | | - * be recorded. This function is called after the tracefs has been |
---|
2874 | | - * initialized, and we now have to create the files associated |
---|
2875 | | - * to the events. |
---|
| 3151 | + * The top level array and trace arrays created by boot-time tracing |
---|
| 3152 | + * have already had its trace_event_file descriptors created in order |
---|
| 3153 | + * to allow for early events to be recorded. |
---|
| 3154 | + * This function is called after the tracefs has been initialized, |
---|
| 3155 | + * and we now have to create the files associated to the events. |
---|
2876 | 3156 | */ |
---|
2877 | | -static __init void |
---|
2878 | | -__trace_early_add_event_dirs(struct trace_array *tr) |
---|
| 3157 | +static void __trace_early_add_event_dirs(struct trace_array *tr) |
---|
2879 | 3158 | { |
---|
2880 | 3159 | struct trace_event_file *file; |
---|
2881 | 3160 | int ret; |
---|
.. | .. |
---|
2890 | 3169 | } |
---|
2891 | 3170 | |
---|
2892 | 3171 | /* |
---|
2893 | | - * For early boot up, the top trace array requires to have |
---|
2894 | | - * a list of events that can be enabled. This must be done before |
---|
2895 | | - * the filesystem is set up in order to allow events to be traced |
---|
2896 | | - * early. |
---|
| 3172 | + * For early boot up, the top trace array and the trace arrays created |
---|
| 3173 | + * by boot-time tracing require to have a list of events that can be |
---|
| 3174 | + * enabled. This must be done before the filesystem is set up in order |
---|
| 3175 | + * to allow events to be traced early. |
---|
2897 | 3176 | */ |
---|
2898 | | -static __init void |
---|
2899 | | -__trace_early_add_events(struct trace_array *tr) |
---|
| 3177 | +void __trace_early_add_events(struct trace_array *tr) |
---|
2900 | 3178 | { |
---|
2901 | 3179 | struct trace_event_call *call; |
---|
2902 | 3180 | int ret; |
---|
.. | .. |
---|
2940 | 3218 | { |
---|
2941 | 3219 | strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); |
---|
2942 | 3220 | ring_buffer_expanded = true; |
---|
2943 | | - tracing_selftest_disabled = true; |
---|
| 3221 | + disable_tracing_selftest("running event tracing"); |
---|
2944 | 3222 | |
---|
2945 | 3223 | return 1; |
---|
2946 | 3224 | } |
---|
.. | .. |
---|
2979 | 3257 | tr, &ftrace_set_event_pid_fops); |
---|
2980 | 3258 | if (!entry) |
---|
2981 | 3259 | pr_warn("Could not create tracefs 'set_event_pid' entry\n"); |
---|
| 3260 | + |
---|
| 3261 | + entry = tracefs_create_file("set_event_notrace_pid", 0644, parent, |
---|
| 3262 | + tr, &ftrace_set_event_notrace_pid_fops); |
---|
| 3263 | + if (!entry) |
---|
| 3264 | + pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n"); |
---|
2982 | 3265 | |
---|
2983 | 3266 | /* ring buffer internal formats */ |
---|
2984 | 3267 | entry = trace_create_file("header_page", 0444, d_events, |
---|
.. | .. |
---|
3022 | 3305 | goto out; |
---|
3023 | 3306 | |
---|
3024 | 3307 | down_write(&trace_event_sem); |
---|
3025 | | - __trace_add_event_dirs(tr); |
---|
| 3308 | + /* If tr already has the event list, it is initialized in early boot. */ |
---|
| 3309 | + if (unlikely(!list_empty(&tr->events))) |
---|
| 3310 | + __trace_early_add_event_dirs(tr); |
---|
| 3311 | + else |
---|
| 3312 | + __trace_add_event_dirs(tr); |
---|
3026 | 3313 | up_write(&trace_event_sem); |
---|
3027 | 3314 | |
---|
3028 | 3315 | out: |
---|
.. | .. |
---|
3063 | 3350 | clear_event_triggers(tr); |
---|
3064 | 3351 | |
---|
3065 | 3352 | /* Clear the pid list */ |
---|
3066 | | - __ftrace_clear_event_pids(tr); |
---|
| 3353 | + __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); |
---|
3067 | 3354 | |
---|
3068 | 3355 | /* Disable any running events */ |
---|
3069 | 3356 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); |
---|
.. | .. |
---|
3073 | 3360 | |
---|
3074 | 3361 | down_write(&trace_event_sem); |
---|
3075 | 3362 | __trace_remove_event_dirs(tr); |
---|
3076 | | - tracefs_remove_recursive(tr->event_dir); |
---|
| 3363 | + tracefs_remove(tr->event_dir); |
---|
3077 | 3364 | up_write(&trace_event_sem); |
---|
3078 | 3365 | |
---|
3079 | 3366 | tr->event_dir = NULL; |
---|
.. | .. |
---|
3178 | 3465 | |
---|
3179 | 3466 | early_initcall(event_trace_enable_again); |
---|
3180 | 3467 | |
---|
| 3468 | +/* Init fields which doesn't related to the tracefs */ |
---|
| 3469 | +static __init int event_trace_init_fields(void) |
---|
| 3470 | +{ |
---|
| 3471 | + if (trace_define_generic_fields()) |
---|
| 3472 | + pr_warn("tracing: Failed to allocated generic fields"); |
---|
| 3473 | + |
---|
| 3474 | + if (trace_define_common_fields()) |
---|
| 3475 | + pr_warn("tracing: Failed to allocate common fields"); |
---|
| 3476 | + |
---|
| 3477 | + return 0; |
---|
| 3478 | +} |
---|
| 3479 | + |
---|
3181 | 3480 | __init int event_trace_init(void) |
---|
3182 | 3481 | { |
---|
3183 | 3482 | struct trace_array *tr; |
---|
3184 | | - struct dentry *d_tracer; |
---|
3185 | 3483 | struct dentry *entry; |
---|
3186 | 3484 | int ret; |
---|
3187 | 3485 | |
---|
.. | .. |
---|
3189 | 3487 | if (!tr) |
---|
3190 | 3488 | return -ENODEV; |
---|
3191 | 3489 | |
---|
3192 | | - d_tracer = tracing_init_dentry(); |
---|
3193 | | - if (IS_ERR(d_tracer)) |
---|
3194 | | - return 0; |
---|
3195 | | - |
---|
3196 | | - entry = tracefs_create_file("available_events", 0444, d_tracer, |
---|
| 3490 | + entry = tracefs_create_file("available_events", 0444, NULL, |
---|
3197 | 3491 | tr, &ftrace_avail_fops); |
---|
3198 | 3492 | if (!entry) |
---|
3199 | 3493 | pr_warn("Could not create tracefs 'available_events' entry\n"); |
---|
3200 | 3494 | |
---|
3201 | | - if (trace_define_generic_fields()) |
---|
3202 | | - pr_warn("tracing: Failed to allocated generic fields"); |
---|
3203 | | - |
---|
3204 | | - if (trace_define_common_fields()) |
---|
3205 | | - pr_warn("tracing: Failed to allocate common fields"); |
---|
3206 | | - |
---|
3207 | | - ret = early_event_add_tracer(d_tracer, tr); |
---|
| 3495 | + ret = early_event_add_tracer(NULL, tr); |
---|
3208 | 3496 | if (ret) |
---|
3209 | 3497 | return ret; |
---|
3210 | 3498 | |
---|
.. | .. |
---|
3213 | 3501 | if (ret) |
---|
3214 | 3502 | pr_warn("Failed to register trace events module notifier\n"); |
---|
3215 | 3503 | #endif |
---|
| 3504 | + |
---|
| 3505 | + eventdir_initialized = true; |
---|
| 3506 | + |
---|
3216 | 3507 | return 0; |
---|
3217 | 3508 | } |
---|
3218 | 3509 | |
---|
.. | .. |
---|
3221 | 3512 | event_trace_memsetup(); |
---|
3222 | 3513 | init_ftrace_syscalls(); |
---|
3223 | 3514 | event_trace_enable(); |
---|
| 3515 | + event_trace_init_fields(); |
---|
3224 | 3516 | } |
---|
3225 | 3517 | |
---|
3226 | | -#ifdef CONFIG_FTRACE_STARTUP_TEST |
---|
| 3518 | +#ifdef CONFIG_EVENT_TRACE_STARTUP_TEST |
---|
3227 | 3519 | |
---|
3228 | 3520 | static DEFINE_SPINLOCK(test_spinlock); |
---|
3229 | 3521 | static DEFINE_SPINLOCK(test_spinlock_irq); |
---|
.. | .. |
---|
3400 | 3692 | function_test_events_call(unsigned long ip, unsigned long parent_ip, |
---|
3401 | 3693 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
---|
3402 | 3694 | { |
---|
| 3695 | + struct trace_buffer *buffer; |
---|
3403 | 3696 | struct ring_buffer_event *event; |
---|
3404 | | - struct ring_buffer *buffer; |
---|
3405 | 3697 | struct ftrace_entry *entry; |
---|
3406 | 3698 | unsigned long flags; |
---|
3407 | 3699 | long disabled; |
---|