hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/trace/trace_events.c
....@@ -12,6 +12,7 @@
1212 #define pr_fmt(fmt) fmt
1313
1414 #include <linux/workqueue.h>
15
+#include <linux/security.h>
1516 #include <linux/spinlock.h>
1617 #include <linux/kthread.h>
1718 #include <linux/tracefs.h>
....@@ -23,6 +24,7 @@
2324 #include <linux/delay.h>
2425
2526 #include <trace/events/sched.h>
27
+#include <trace/syscall.h>
2628
2729 #include <asm/setup.h>
2830
....@@ -36,6 +38,7 @@
3638 LIST_HEAD(ftrace_events);
3739 static LIST_HEAD(ftrace_generic_fields);
3840 static LIST_HEAD(ftrace_common_fields);
41
+static bool eventdir_initialized;
3942
4043 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
4144
....@@ -69,14 +72,6 @@
6972
7073 #define while_for_each_event_file() \
7174 }
72
-
73
-static struct list_head *
74
-trace_get_fields(struct trace_event_call *event_call)
75
-{
76
- if (!event_call->class->get_fields)
77
- return &event_call->class->fields;
78
- return event_call->class->get_fields(event_call);
79
-}
8075
8176 static struct ftrace_event_field *
8277 __find_event_field(struct list_head *head, char *name)
....@@ -173,6 +168,7 @@
173168
174169 __generic_field(int, CPU, FILTER_CPU);
175170 __generic_field(int, cpu, FILTER_CPU);
171
+ __generic_field(int, common_cpu, FILTER_CPU);
176172 __generic_field(char *, COMM, FILTER_COMM);
177173 __generic_field(char *, comm, FILTER_COMM);
178174
....@@ -188,8 +184,6 @@
188184 __common_field(unsigned char, flags);
189185 __common_field(unsigned char, preempt_count);
190186 __common_field(int, pid);
191
- __common_field(unsigned char, migrate_disable);
192
- __common_field(unsigned char, preempt_lazy_count);
193187
194188 return ret;
195189 }
....@@ -240,13 +234,16 @@
240234 {
241235 struct trace_array *tr = trace_file->tr;
242236 struct trace_array_cpu *data;
237
+ struct trace_pid_list *no_pid_list;
243238 struct trace_pid_list *pid_list;
244239
245240 pid_list = rcu_dereference_raw(tr->filtered_pids);
246
- if (!pid_list)
241
+ no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
242
+
243
+ if (!pid_list && !no_pid_list)
247244 return false;
248245
249
- data = this_cpu_ptr(tr->trace_buffer.data);
246
+ data = this_cpu_ptr(tr->array_buffer.data);
250247
251248 return data->ignore_pid;
252249 }
....@@ -265,12 +262,12 @@
265262 local_save_flags(fbuffer->flags);
266263 fbuffer->pc = preempt_count();
267264 /*
268
- * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
265
+ * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
269266 * preemption (adding one to the preempt_count). Since we are
270267 * interested in the preempt_count at the time the tracepoint was
271268 * hit, we need to subtract one to offset the increment.
272269 */
273
- if (IS_ENABLED(CONFIG_PREEMPT))
270
+ if (IS_ENABLED(CONFIG_PREEMPTION))
274271 fbuffer->pc--;
275272 fbuffer->trace_file = trace_file;
276273
....@@ -281,6 +278,7 @@
281278 if (!fbuffer->event)
282279 return NULL;
283280
281
+ fbuffer->regs = NULL;
284282 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
285283 return fbuffer->entry;
286284 }
....@@ -373,7 +371,6 @@
373371 {
374372 struct trace_event_call *call = file->event_call;
375373 struct trace_array *tr = file->tr;
376
- unsigned long file_flags = file->flags;
377374 int ret = 0;
378375 int disable;
379376
....@@ -397,6 +394,8 @@
397394 break;
398395 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
399396 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
397
+ /* Disable use of trace_buffered_event */
398
+ trace_buffered_event_disable();
400399 } else
401400 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
402401
....@@ -435,6 +434,8 @@
435434 if (atomic_inc_return(&file->sm_ref) > 1)
436435 break;
437436 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
437
+ /* Enable use of trace_buffered_event */
438
+ trace_buffered_event_enable();
438439 }
439440
440441 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
....@@ -474,15 +475,6 @@
474475 break;
475476 }
476477
477
- /* Enable or disable use of trace_buffered_event */
478
- if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
479
- (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
480
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
481
- trace_buffered_event_enable();
482
- else
483
- trace_buffered_event_disable();
484
- }
485
-
486478 return ret;
487479 }
488480
....@@ -517,6 +509,9 @@
517509
518510 pid_list = rcu_dereference_raw(tr->filtered_pids);
519511 trace_filter_add_remove_task(pid_list, NULL, task);
512
+
513
+ pid_list = rcu_dereference_raw(tr->filtered_no_pids);
514
+ trace_filter_add_remove_task(pid_list, NULL, task);
520515 }
521516
522517 static void
....@@ -528,6 +523,9 @@
528523 struct trace_array *tr = data;
529524
530525 pid_list = rcu_dereference_sched(tr->filtered_pids);
526
+ trace_filter_add_remove_task(pid_list, self, task);
527
+
528
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
531529 trace_filter_add_remove_task(pid_list, self, task);
532530 }
533531
....@@ -551,13 +549,23 @@
551549 struct task_struct *prev, struct task_struct *next)
552550 {
553551 struct trace_array *tr = data;
552
+ struct trace_pid_list *no_pid_list;
554553 struct trace_pid_list *pid_list;
554
+ bool ret;
555555
556556 pid_list = rcu_dereference_sched(tr->filtered_pids);
557
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
557558
558
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
559
- trace_ignore_this_task(pid_list, prev) &&
560
- trace_ignore_this_task(pid_list, next));
559
+ /*
560
+ * Sched switch is funny, as we only want to ignore it
561
+ * in the notrace case if both prev and next should be ignored.
562
+ */
563
+ ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
564
+ trace_ignore_this_task(NULL, no_pid_list, next);
565
+
566
+ this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
567
+ (trace_ignore_this_task(pid_list, NULL, prev) &&
568
+ trace_ignore_this_task(pid_list, NULL, next)));
561569 }
562570
563571 static void
....@@ -565,58 +573,55 @@
565573 struct task_struct *prev, struct task_struct *next)
566574 {
567575 struct trace_array *tr = data;
576
+ struct trace_pid_list *no_pid_list;
568577 struct trace_pid_list *pid_list;
569578
570579 pid_list = rcu_dereference_sched(tr->filtered_pids);
580
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
571581
572
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
573
- trace_ignore_this_task(pid_list, next));
582
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
583
+ trace_ignore_this_task(pid_list, no_pid_list, next));
574584 }
575585
576586 static void
577587 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
578588 {
579589 struct trace_array *tr = data;
590
+ struct trace_pid_list *no_pid_list;
580591 struct trace_pid_list *pid_list;
581592
582593 /* Nothing to do if we are already tracing */
583
- if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
594
+ if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
584595 return;
585596
586597 pid_list = rcu_dereference_sched(tr->filtered_pids);
598
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
587599
588
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
589
- trace_ignore_this_task(pid_list, task));
600
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
601
+ trace_ignore_this_task(pid_list, no_pid_list, task));
590602 }
591603
592604 static void
593605 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
594606 {
595607 struct trace_array *tr = data;
608
+ struct trace_pid_list *no_pid_list;
596609 struct trace_pid_list *pid_list;
597610
598611 /* Nothing to do if we are not tracing */
599
- if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
612
+ if (this_cpu_read(tr->array_buffer.data->ignore_pid))
600613 return;
601614
602615 pid_list = rcu_dereference_sched(tr->filtered_pids);
616
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
603617
604618 /* Set tracing if current is enabled */
605
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
606
- trace_ignore_this_task(pid_list, current));
619
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
620
+ trace_ignore_this_task(pid_list, no_pid_list, current));
607621 }
608622
609
-static void __ftrace_clear_event_pids(struct trace_array *tr)
623
+static void unregister_pid_events(struct trace_array *tr)
610624 {
611
- struct trace_pid_list *pid_list;
612
- struct trace_event_file *file;
613
- int cpu;
614
-
615
- pid_list = rcu_dereference_protected(tr->filtered_pids,
616
- lockdep_is_held(&event_mutex));
617
- if (!pid_list)
618
- return;
619
-
620625 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
621626 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
622627
....@@ -628,26 +633,55 @@
628633
629634 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
630635 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
636
+}
631637
632
- list_for_each_entry(file, &tr->events, list) {
633
- clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
638
+static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
639
+{
640
+ struct trace_pid_list *pid_list;
641
+ struct trace_pid_list *no_pid_list;
642
+ struct trace_event_file *file;
643
+ int cpu;
644
+
645
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
646
+ lockdep_is_held(&event_mutex));
647
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
648
+ lockdep_is_held(&event_mutex));
649
+
650
+ /* Make sure there's something to do */
651
+ if (!pid_type_enabled(type, pid_list, no_pid_list))
652
+ return;
653
+
654
+ if (!still_need_pid_events(type, pid_list, no_pid_list)) {
655
+ unregister_pid_events(tr);
656
+
657
+ list_for_each_entry(file, &tr->events, list) {
658
+ clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
659
+ }
660
+
661
+ for_each_possible_cpu(cpu)
662
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
634663 }
635664
636
- for_each_possible_cpu(cpu)
637
- per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
665
+ if (type & TRACE_PIDS)
666
+ rcu_assign_pointer(tr->filtered_pids, NULL);
638667
639
- rcu_assign_pointer(tr->filtered_pids, NULL);
668
+ if (type & TRACE_NO_PIDS)
669
+ rcu_assign_pointer(tr->filtered_no_pids, NULL);
640670
641671 /* Wait till all users are no longer using pid filtering */
642672 tracepoint_synchronize_unregister();
643673
644
- trace_free_pid_list(pid_list);
674
+ if ((type & TRACE_PIDS) && pid_list)
675
+ trace_free_pid_list(pid_list);
676
+
677
+ if ((type & TRACE_NO_PIDS) && no_pid_list)
678
+ trace_free_pid_list(no_pid_list);
645679 }
646680
647
-static void ftrace_clear_event_pids(struct trace_array *tr)
681
+static void ftrace_clear_event_pids(struct trace_array *tr, int type)
648682 {
649683 mutex_lock(&event_mutex);
650
- __ftrace_clear_event_pids(tr);
684
+ __ftrace_clear_event_pids(tr, type);
651685 mutex_unlock(&event_mutex);
652686 }
653687
....@@ -706,7 +740,7 @@
706740 return;
707741
708742 if (!--dir->nr_events) {
709
- tracefs_remove_recursive(dir->entry);
743
+ tracefs_remove(dir->entry);
710744 list_del(&dir->list);
711745 __put_system_dir(dir);
712746 }
....@@ -725,7 +759,7 @@
725759 }
726760 spin_unlock(&dir->d_lock);
727761
728
- tracefs_remove_recursive(dir);
762
+ tracefs_remove(dir);
729763 }
730764
731765 list_del(&file->list);
....@@ -797,7 +831,7 @@
797831 return ret;
798832 }
799833
800
-static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
834
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
801835 {
802836 char *event = NULL, *sub = NULL, *match;
803837 int ret;
....@@ -859,6 +893,32 @@
859893 return __ftrace_set_clr_event(tr, NULL, system, event, set);
860894 }
861895 EXPORT_SYMBOL_GPL(trace_set_clr_event);
896
+
897
+/**
898
+ * trace_array_set_clr_event - enable or disable an event for a trace array.
899
+ * @tr: concerned trace array.
900
+ * @system: system name to match (NULL for any system)
901
+ * @event: event name to match (NULL for all events, within system)
902
+ * @enable: true to enable, false to disable
903
+ *
904
+ * This is a way for other parts of the kernel to enable or disable
905
+ * event recording.
906
+ *
907
+ * Returns 0 on success, -EINVAL if the parameters do not match any
908
+ * registered events.
909
+ */
910
+int trace_array_set_clr_event(struct trace_array *tr, const char *system,
911
+ const char *event, bool enable)
912
+{
913
+ int set;
914
+
915
+ if (!tr)
916
+ return -ENOENT;
917
+
918
+ set = (enable == true) ? 1 : 0;
919
+ return __ftrace_set_clr_event(tr, NULL, system, event, set);
920
+}
921
+EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
862922
863923 /* 128 should be much more than enough */
864924 #define EVENT_BUF_SIZE 127
....@@ -994,15 +1054,32 @@
9941054 }
9951055
9961056 static void *
997
-p_next(struct seq_file *m, void *v, loff_t *pos)
1057
+__next(struct seq_file *m, void *v, loff_t *pos, int type)
9981058 {
9991059 struct trace_array *tr = m->private;
1000
- struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
1060
+ struct trace_pid_list *pid_list;
1061
+
1062
+ if (type == TRACE_PIDS)
1063
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
1064
+ else
1065
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
10011066
10021067 return trace_pid_next(pid_list, v, pos);
10031068 }
10041069
1005
-static void *p_start(struct seq_file *m, loff_t *pos)
1070
+static void *
1071
+p_next(struct seq_file *m, void *v, loff_t *pos)
1072
+{
1073
+ return __next(m, v, pos, TRACE_PIDS);
1074
+}
1075
+
1076
+static void *
1077
+np_next(struct seq_file *m, void *v, loff_t *pos)
1078
+{
1079
+ return __next(m, v, pos, TRACE_NO_PIDS);
1080
+}
1081
+
1082
+static void *__start(struct seq_file *m, loff_t *pos, int type)
10061083 __acquires(RCU)
10071084 {
10081085 struct trace_pid_list *pid_list;
....@@ -1017,12 +1094,27 @@
10171094 mutex_lock(&event_mutex);
10181095 rcu_read_lock_sched();
10191096
1020
- pid_list = rcu_dereference_sched(tr->filtered_pids);
1097
+ if (type == TRACE_PIDS)
1098
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
1099
+ else
1100
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
10211101
10221102 if (!pid_list)
10231103 return NULL;
10241104
10251105 return trace_pid_start(pid_list, pos);
1106
+}
1107
+
1108
+static void *p_start(struct seq_file *m, loff_t *pos)
1109
+ __acquires(RCU)
1110
+{
1111
+ return __start(m, pos, TRACE_PIDS);
1112
+}
1113
+
1114
+static void *np_start(struct seq_file *m, loff_t *pos)
1115
+ __acquires(RCU)
1116
+{
1117
+ return __start(m, pos, TRACE_NO_PIDS);
10261118 }
10271119
10281120 static void p_stop(struct seq_file *m, void *p)
....@@ -1256,7 +1348,7 @@
12561348 */
12571349 array_descriptor = strchr(field->type, '[');
12581350
1259
- if (!strncmp(field->type, "__data_loc", 10))
1351
+ if (str_has_prefix(field->type, "__data_loc"))
12601352 array_descriptor = NULL;
12611353
12621354 if (!array_descriptor)
....@@ -1305,6 +1397,8 @@
13051397 {
13061398 struct seq_file *m;
13071399 int ret;
1400
+
1401
+ /* Do we want to hide event format files on tracefs lockdown? */
13081402
13091403 ret = seq_open(file, &trace_format_seq_ops);
13101404 if (ret < 0)
....@@ -1452,28 +1546,17 @@
14521546 struct trace_array *tr = inode->i_private;
14531547 int ret;
14541548
1455
- if (tracing_is_disabled())
1456
- return -ENODEV;
1457
-
1458
- if (trace_array_get(tr) < 0)
1459
- return -ENODEV;
1460
-
14611549 /* Make a temporary dir that has no system but points to tr */
14621550 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1463
- if (!dir) {
1464
- trace_array_put(tr);
1551
+ if (!dir)
14651552 return -ENOMEM;
1466
- }
14671553
1468
- dir->tr = tr;
1469
-
1470
- ret = tracing_open_generic(inode, filp);
1554
+ ret = tracing_open_generic_tr(inode, filp);
14711555 if (ret < 0) {
1472
- trace_array_put(tr);
14731556 kfree(dir);
14741557 return ret;
14751558 }
1476
-
1559
+ dir->tr = tr;
14771560 filp->private_data = dir;
14781561
14791562 return 0;
....@@ -1579,6 +1662,7 @@
15791662 {
15801663 struct trace_array *tr = data;
15811664 struct trace_pid_list *pid_list;
1665
+ struct trace_pid_list *no_pid_list;
15821666
15831667 /*
15841668 * This function is called by on_each_cpu() while the
....@@ -1586,18 +1670,50 @@
15861670 */
15871671 pid_list = rcu_dereference_protected(tr->filtered_pids,
15881672 mutex_is_locked(&event_mutex));
1673
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1674
+ mutex_is_locked(&event_mutex));
15891675
1590
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
1591
- trace_ignore_this_task(pid_list, current));
1676
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
1677
+ trace_ignore_this_task(pid_list, no_pid_list, current));
1678
+}
1679
+
1680
+static void register_pid_events(struct trace_array *tr)
1681
+{
1682
+ /*
1683
+ * Register a probe that is called before all other probes
1684
+ * to set ignore_pid if next or prev do not match.
1685
+ * Register a probe this is called after all other probes
1686
+ * to only keep ignore_pid set if next pid matches.
1687
+ */
1688
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1689
+ tr, INT_MAX);
1690
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1691
+ tr, 0);
1692
+
1693
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1694
+ tr, INT_MAX);
1695
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1696
+ tr, 0);
1697
+
1698
+ register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1699
+ tr, INT_MAX);
1700
+ register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1701
+ tr, 0);
1702
+
1703
+ register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1704
+ tr, INT_MAX);
1705
+ register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1706
+ tr, 0);
15921707 }
15931708
15941709 static ssize_t
1595
-ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1596
- size_t cnt, loff_t *ppos)
1710
+event_pid_write(struct file *filp, const char __user *ubuf,
1711
+ size_t cnt, loff_t *ppos, int type)
15971712 {
15981713 struct seq_file *m = filp->private_data;
15991714 struct trace_array *tr = m->private;
16001715 struct trace_pid_list *filtered_pids = NULL;
1716
+ struct trace_pid_list *other_pids = NULL;
16011717 struct trace_pid_list *pid_list;
16021718 struct trace_event_file *file;
16031719 ssize_t ret;
....@@ -1611,14 +1727,26 @@
16111727
16121728 mutex_lock(&event_mutex);
16131729
1614
- filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1615
- lockdep_is_held(&event_mutex));
1730
+ if (type == TRACE_PIDS) {
1731
+ filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1732
+ lockdep_is_held(&event_mutex));
1733
+ other_pids = rcu_dereference_protected(tr->filtered_no_pids,
1734
+ lockdep_is_held(&event_mutex));
1735
+ } else {
1736
+ filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
1737
+ lockdep_is_held(&event_mutex));
1738
+ other_pids = rcu_dereference_protected(tr->filtered_pids,
1739
+ lockdep_is_held(&event_mutex));
1740
+ }
16161741
16171742 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
16181743 if (ret < 0)
16191744 goto out;
16201745
1621
- rcu_assign_pointer(tr->filtered_pids, pid_list);
1746
+ if (type == TRACE_PIDS)
1747
+ rcu_assign_pointer(tr->filtered_pids, pid_list);
1748
+ else
1749
+ rcu_assign_pointer(tr->filtered_no_pids, pid_list);
16221750
16231751 list_for_each_entry(file, &tr->events, list) {
16241752 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
....@@ -1627,32 +1755,8 @@
16271755 if (filtered_pids) {
16281756 tracepoint_synchronize_unregister();
16291757 trace_free_pid_list(filtered_pids);
1630
- } else if (pid_list) {
1631
- /*
1632
- * Register a probe that is called before all other probes
1633
- * to set ignore_pid if next or prev do not match.
1634
- * Register a probe this is called after all other probes
1635
- * to only keep ignore_pid set if next pid matches.
1636
- */
1637
- register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1638
- tr, INT_MAX);
1639
- register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1640
- tr, 0);
1641
-
1642
- register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1643
- tr, INT_MAX);
1644
- register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1645
- tr, 0);
1646
-
1647
- register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1648
- tr, INT_MAX);
1649
- register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1650
- tr, 0);
1651
-
1652
- register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1653
- tr, INT_MAX);
1654
- register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1655
- tr, 0);
1758
+ } else if (pid_list && !other_pids) {
1759
+ register_pid_events(tr);
16561760 }
16571761
16581762 /*
....@@ -1671,9 +1775,24 @@
16711775 return ret;
16721776 }
16731777
1778
+static ssize_t
1779
+ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1780
+ size_t cnt, loff_t *ppos)
1781
+{
1782
+ return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
1783
+}
1784
+
1785
+static ssize_t
1786
+ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
1787
+ size_t cnt, loff_t *ppos)
1788
+{
1789
+ return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
1790
+}
1791
+
16741792 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
16751793 static int ftrace_event_set_open(struct inode *inode, struct file *file);
16761794 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1795
+static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
16771796 static int ftrace_event_release(struct inode *inode, struct file *file);
16781797
16791798 static const struct seq_operations show_event_seq_ops = {
....@@ -1693,6 +1812,13 @@
16931812 static const struct seq_operations show_set_pid_seq_ops = {
16941813 .start = p_start,
16951814 .next = p_next,
1815
+ .show = trace_pid_show,
1816
+ .stop = p_stop,
1817
+};
1818
+
1819
+static const struct seq_operations show_set_no_pid_seq_ops = {
1820
+ .start = np_start,
1821
+ .next = np_next,
16961822 .show = trace_pid_show,
16971823 .stop = p_stop,
16981824 };
....@@ -1720,10 +1846,19 @@
17201846 .release = ftrace_event_release,
17211847 };
17221848
1849
+static const struct file_operations ftrace_set_event_notrace_pid_fops = {
1850
+ .open = ftrace_event_set_npid_open,
1851
+ .read = seq_read,
1852
+ .write = ftrace_event_npid_write,
1853
+ .llseek = seq_lseek,
1854
+ .release = ftrace_event_release,
1855
+};
1856
+
17231857 static const struct file_operations ftrace_enable_fops = {
1724
- .open = tracing_open_generic,
1858
+ .open = tracing_open_file_tr,
17251859 .read = event_enable_read,
17261860 .write = event_enable_write,
1861
+ .release = tracing_release_file_tr,
17271862 .llseek = default_llseek,
17281863 };
17291864
....@@ -1740,9 +1875,10 @@
17401875 };
17411876
17421877 static const struct file_operations ftrace_event_filter_fops = {
1743
- .open = tracing_open_generic,
1878
+ .open = tracing_open_file_tr,
17441879 .read = event_filter_read,
17451880 .write = event_filter_write,
1881
+ .release = tracing_release_file_tr,
17461882 .llseek = default_llseek,
17471883 };
17481884
....@@ -1783,6 +1919,10 @@
17831919 struct seq_file *m;
17841920 int ret;
17851921
1922
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
1923
+ if (ret)
1924
+ return ret;
1925
+
17861926 ret = seq_open(file, seq_ops);
17871927 if (ret < 0)
17881928 return ret;
....@@ -1807,6 +1947,7 @@
18071947 {
18081948 const struct seq_operations *seq_ops = &show_event_seq_ops;
18091949
1950
+ /* Checks for tracefs lockdown */
18101951 return ftrace_event_open(inode, file, seq_ops);
18111952 }
18121953
....@@ -1817,8 +1958,9 @@
18171958 struct trace_array *tr = inode->i_private;
18181959 int ret;
18191960
1820
- if (trace_array_get(tr) < 0)
1821
- return -ENODEV;
1961
+ ret = tracing_check_open_get_tr(tr);
1962
+ if (ret)
1963
+ return ret;
18221964
18231965 if ((file->f_mode & FMODE_WRITE) &&
18241966 (file->f_flags & O_TRUNC))
....@@ -1837,12 +1979,34 @@
18371979 struct trace_array *tr = inode->i_private;
18381980 int ret;
18391981
1840
- if (trace_array_get(tr) < 0)
1841
- return -ENODEV;
1982
+ ret = tracing_check_open_get_tr(tr);
1983
+ if (ret)
1984
+ return ret;
18421985
18431986 if ((file->f_mode & FMODE_WRITE) &&
18441987 (file->f_flags & O_TRUNC))
1845
- ftrace_clear_event_pids(tr);
1988
+ ftrace_clear_event_pids(tr, TRACE_PIDS);
1989
+
1990
+ ret = ftrace_event_open(inode, file, seq_ops);
1991
+ if (ret < 0)
1992
+ trace_array_put(tr);
1993
+ return ret;
1994
+}
1995
+
1996
+static int
1997
+ftrace_event_set_npid_open(struct inode *inode, struct file *file)
1998
+{
1999
+ const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2000
+ struct trace_array *tr = inode->i_private;
2001
+ int ret;
2002
+
2003
+ ret = tracing_check_open_get_tr(tr);
2004
+ if (ret)
2005
+ return ret;
2006
+
2007
+ if ((file->f_mode & FMODE_WRITE) &&
2008
+ (file->f_flags & O_TRUNC))
2009
+ ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
18462010
18472011 ret = ftrace_event_open(inode, file, seq_ops);
18482012 if (ret < 0)
....@@ -1959,11 +2123,47 @@
19592123 }
19602124
19612125 static int
2126
+event_define_fields(struct trace_event_call *call)
2127
+{
2128
+ struct list_head *head;
2129
+ int ret = 0;
2130
+
2131
+ /*
2132
+ * Other events may have the same class. Only update
2133
+ * the fields if they are not already defined.
2134
+ */
2135
+ head = trace_get_fields(call);
2136
+ if (list_empty(head)) {
2137
+ struct trace_event_fields *field = call->class->fields_array;
2138
+ unsigned int offset = sizeof(struct trace_entry);
2139
+
2140
+ for (; field->type; field++) {
2141
+ if (field->type == TRACE_FUNCTION_TYPE) {
2142
+ field->define_fields(call);
2143
+ break;
2144
+ }
2145
+
2146
+ offset = ALIGN(offset, field->align);
2147
+ ret = trace_define_field(call, field->type, field->name,
2148
+ offset, field->size,
2149
+ field->is_signed, field->filter_type);
2150
+ if (WARN_ON_ONCE(ret)) {
2151
+ pr_err("error code is %d\n", ret);
2152
+ break;
2153
+ }
2154
+
2155
+ offset += field->size;
2156
+ }
2157
+ }
2158
+
2159
+ return ret;
2160
+}
2161
+
2162
+static int
19622163 event_create_dir(struct dentry *parent, struct trace_event_file *file)
19632164 {
19642165 struct trace_event_call *call = file->event_call;
19652166 struct trace_array *tr = file->tr;
1966
- struct list_head *head;
19672167 struct dentry *d_events;
19682168 const char *name;
19692169 int ret;
....@@ -1997,18 +2197,10 @@
19972197 &ftrace_event_id_fops);
19982198 #endif
19992199
2000
- /*
2001
- * Other events may have the same class. Only update
2002
- * the fields if they are not already defined.
2003
- */
2004
- head = trace_get_fields(call);
2005
- if (list_empty(head)) {
2006
- ret = call->class->define_fields(call);
2007
- if (ret < 0) {
2008
- pr_warn("Could not initialize trace point events/%s\n",
2009
- name);
2010
- return -1;
2011
- }
2200
+ ret = event_define_fields(call);
2201
+ if (ret < 0) {
2202
+ pr_warn("Could not initialize trace point events/%s\n", name);
2203
+ return ret;
20122204 }
20132205
20142206 /*
....@@ -2027,8 +2219,18 @@
20272219 trace_create_file("hist", 0444, file->dir, file,
20282220 &event_hist_fops);
20292221 #endif
2222
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
2223
+ trace_create_file("hist_debug", 0444, file->dir, file,
2224
+ &event_hist_debug_fops);
2225
+#endif
20302226 trace_create_file("format", 0444, file->dir, call,
20312227 &ftrace_event_format_fops);
2228
+
2229
+#ifdef CONFIG_TRACE_EVENT_INJECT
2230
+ if (call->event.type && call->class->reg)
2231
+ trace_create_file("inject", 0200, file->dir, file,
2232
+ &event_inject_fops);
2233
+#endif
20322234
20332235 return 0;
20342236 }
....@@ -2257,6 +2459,7 @@
22572459 trace_create_new_event(struct trace_event_call *call,
22582460 struct trace_array *tr)
22592461 {
2462
+ struct trace_pid_list *no_pid_list;
22602463 struct trace_pid_list *pid_list;
22612464 struct trace_event_file *file;
22622465
....@@ -2266,8 +2469,10 @@
22662469
22672470 pid_list = rcu_dereference_protected(tr->filtered_pids,
22682471 lockdep_is_held(&event_mutex));
2472
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2473
+ lockdep_is_held(&event_mutex));
22692474
2270
- if (pid_list)
2475
+ if (pid_list || no_pid_list)
22712476 file->flags |= EVENT_FILE_FL_PID_FILTER;
22722477
22732478 file->event_call = call;
....@@ -2290,7 +2495,10 @@
22902495 if (!file)
22912496 return -ENOMEM;
22922497
2293
- return event_create_dir(tr->event_dir, file);
2498
+ if (eventdir_initialized)
2499
+ return event_create_dir(tr->event_dir, file);
2500
+ else
2501
+ return event_define_fields(call);
22942502 }
22952503
22962504 /*
....@@ -2298,7 +2506,7 @@
22982506 * for enabling events at boot. We want to enable events before
22992507 * the filesystem is initialized.
23002508 */
2301
-static __init int
2509
+static int
23022510 __trace_early_add_new_event(struct trace_event_call *call,
23032511 struct trace_array *tr)
23042512 {
....@@ -2308,13 +2516,14 @@
23082516 if (!file)
23092517 return -ENOMEM;
23102518
2311
- return 0;
2519
+ return event_define_fields(call);
23122520 }
23132521
23142522 struct ftrace_module_file_ops;
23152523 static void __add_event_to_tracers(struct trace_event_call *call);
23162524
2317
-int trace_add_event_call_nolock(struct trace_event_call *call)
2525
+/* Add an additional event_call dynamically */
2526
+int trace_add_event_call(struct trace_event_call *call)
23182527 {
23192528 int ret;
23202529 lockdep_assert_held(&event_mutex);
....@@ -2326,17 +2535,6 @@
23262535 __add_event_to_tracers(call);
23272536
23282537 mutex_unlock(&trace_types_lock);
2329
- return ret;
2330
-}
2331
-
2332
-/* Add an additional event_call dynamically */
2333
-int trace_add_event_call(struct trace_event_call *call)
2334
-{
2335
- int ret;
2336
-
2337
- mutex_lock(&event_mutex);
2338
- ret = trace_add_event_call_nolock(call);
2339
- mutex_unlock(&event_mutex);
23402538 return ret;
23412539 }
23422540
....@@ -2370,7 +2568,10 @@
23702568 * TRACE_REG_UNREGISTER.
23712569 */
23722570 if (file->flags & EVENT_FILE_FL_ENABLED)
2373
- return -EBUSY;
2571
+ goto busy;
2572
+
2573
+ if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2574
+ tr->clear_trace = true;
23742575 /*
23752576 * The do_for_each_event_file_safe() is
23762577 * a double loop. After finding the call for this
....@@ -2383,10 +2584,16 @@
23832584 __trace_remove_event_call(call);
23842585
23852586 return 0;
2587
+ busy:
2588
+ /* No need to clear the trace now */
2589
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2590
+ tr->clear_trace = false;
2591
+ }
2592
+ return -EBUSY;
23862593 }
23872594
2388
-/* no event_mutex version */
2389
-int trace_remove_event_call_nolock(struct trace_event_call *call)
2595
+/* Remove an event_call */
2596
+int trace_remove_event_call(struct trace_event_call *call)
23902597 {
23912598 int ret;
23922599
....@@ -2397,18 +2604,6 @@
23972604 ret = probe_remove_event_call(call);
23982605 up_write(&trace_event_sem);
23992606 mutex_unlock(&trace_types_lock);
2400
-
2401
- return ret;
2402
-}
2403
-
2404
-/* Remove an event_call */
2405
-int trace_remove_event_call(struct trace_event_call *call)
2406
-{
2407
- int ret;
2408
-
2409
- mutex_lock(&event_mutex);
2410
- ret = trace_remove_event_call_nolock(call);
2411
- mutex_unlock(&event_mutex);
24122607
24132608 return ret;
24142609 }
....@@ -2462,7 +2657,7 @@
24622657 * over from this module may be passed to the new module events and
24632658 * unexpected results may occur.
24642659 */
2465
- tracing_reset_all_online_cpus();
2660
+ tracing_reset_all_online_cpus_unlocked();
24662661 }
24672662
24682663 static int trace_module_notify(struct notifier_block *self,
....@@ -2483,7 +2678,7 @@
24832678 mutex_unlock(&trace_types_lock);
24842679 mutex_unlock(&event_mutex);
24852680
2486
- return 0;
2681
+ return NOTIFY_OK;
24872682 }
24882683
24892684 static struct notifier_block trace_module_nb = {
....@@ -2543,6 +2738,91 @@
25432738
25442739 return file;
25452740 }
2741
+
2742
+/**
2743
+ * trace_get_event_file - Find and return a trace event file
2744
+ * @instance: The name of the trace instance containing the event
2745
+ * @system: The name of the system containing the event
2746
+ * @event: The name of the event
2747
+ *
2748
+ * Return a trace event file given the trace instance name, trace
2749
+ * system, and trace event name. If the instance name is NULL, it
2750
+ * refers to the top-level trace array.
2751
+ *
2752
+ * This function will look it up and return it if found, after calling
2753
+ * trace_array_get() to prevent the instance from going away, and
2754
+ * increment the event's module refcount to prevent it from being
2755
+ * removed.
2756
+ *
2757
+ * To release the file, call trace_put_event_file(), which will call
2758
+ * trace_array_put() and decrement the event's module refcount.
2759
+ *
2760
+ * Return: The trace event on success, ERR_PTR otherwise.
2761
+ */
2762
+struct trace_event_file *trace_get_event_file(const char *instance,
2763
+ const char *system,
2764
+ const char *event)
2765
+{
2766
+ struct trace_array *tr = top_trace_array();
2767
+ struct trace_event_file *file = NULL;
2768
+ int ret = -EINVAL;
2769
+
2770
+ if (instance) {
2771
+ tr = trace_array_find_get(instance);
2772
+ if (!tr)
2773
+ return ERR_PTR(-ENOENT);
2774
+ } else {
2775
+ ret = trace_array_get(tr);
2776
+ if (ret)
2777
+ return ERR_PTR(ret);
2778
+ }
2779
+
2780
+ mutex_lock(&event_mutex);
2781
+
2782
+ file = find_event_file(tr, system, event);
2783
+ if (!file) {
2784
+ trace_array_put(tr);
2785
+ ret = -EINVAL;
2786
+ goto out;
2787
+ }
2788
+
2789
+ /* Don't let event modules unload while in use */
2790
+ ret = try_module_get(file->event_call->mod);
2791
+ if (!ret) {
2792
+ trace_array_put(tr);
2793
+ ret = -EBUSY;
2794
+ goto out;
2795
+ }
2796
+
2797
+ ret = 0;
2798
+ out:
2799
+ mutex_unlock(&event_mutex);
2800
+
2801
+ if (ret)
2802
+ file = ERR_PTR(ret);
2803
+
2804
+ return file;
2805
+}
2806
+EXPORT_SYMBOL_GPL(trace_get_event_file);
2807
+
2808
+/**
2809
+ * trace_put_event_file - Release a file from trace_get_event_file()
2810
+ * @file: The trace event file
2811
+ *
2812
+ * If a file was retrieved using trace_get_event_file(), this should
2813
+ * be called when it's no longer needed. It will cancel the previous
2814
+ * trace_array_get() called by that function, and decrement the
2815
+ * event's module refcount.
2816
+ */
2817
+void trace_put_event_file(struct trace_event_file *file)
2818
+{
2819
+ mutex_lock(&event_mutex);
2820
+ module_put(file->event_call->mod);
2821
+ mutex_unlock(&event_mutex);
2822
+
2823
+ trace_array_put(file->tr);
2824
+}
2825
+EXPORT_SYMBOL_GPL(trace_put_event_file);
25462826
25472827 #ifdef CONFIG_DYNAMIC_FTRACE
25482828
....@@ -2868,14 +3148,13 @@
28683148 #endif /* CONFIG_DYNAMIC_FTRACE */
28693149
28703150 /*
2871
- * The top level array has already had its trace_event_file
2872
- * descriptors created in order to allow for early events to
2873
- * be recorded. This function is called after the tracefs has been
2874
- * initialized, and we now have to create the files associated
2875
- * to the events.
3151
+ * The top level array and trace arrays created by boot-time tracing
3152
+ * have already had its trace_event_file descriptors created in order
3153
+ * to allow for early events to be recorded.
3154
+ * This function is called after the tracefs has been initialized,
3155
+ * and we now have to create the files associated to the events.
28763156 */
2877
-static __init void
2878
-__trace_early_add_event_dirs(struct trace_array *tr)
3157
+static void __trace_early_add_event_dirs(struct trace_array *tr)
28793158 {
28803159 struct trace_event_file *file;
28813160 int ret;
....@@ -2890,13 +3169,12 @@
28903169 }
28913170
28923171 /*
2893
- * For early boot up, the top trace array requires to have
2894
- * a list of events that can be enabled. This must be done before
2895
- * the filesystem is set up in order to allow events to be traced
2896
- * early.
3172
+ * For early boot up, the top trace array and the trace arrays created
3173
+ * by boot-time tracing require to have a list of events that can be
3174
+ * enabled. This must be done before the filesystem is set up in order
3175
+ * to allow events to be traced early.
28973176 */
2898
-static __init void
2899
-__trace_early_add_events(struct trace_array *tr)
3177
+void __trace_early_add_events(struct trace_array *tr)
29003178 {
29013179 struct trace_event_call *call;
29023180 int ret;
....@@ -2940,7 +3218,7 @@
29403218 {
29413219 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
29423220 ring_buffer_expanded = true;
2943
- tracing_selftest_disabled = true;
3221
+ disable_tracing_selftest("running event tracing");
29443222
29453223 return 1;
29463224 }
....@@ -2979,6 +3257,11 @@
29793257 tr, &ftrace_set_event_pid_fops);
29803258 if (!entry)
29813259 pr_warn("Could not create tracefs 'set_event_pid' entry\n");
3260
+
3261
+ entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
3262
+ tr, &ftrace_set_event_notrace_pid_fops);
3263
+ if (!entry)
3264
+ pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
29823265
29833266 /* ring buffer internal formats */
29843267 entry = trace_create_file("header_page", 0444, d_events,
....@@ -3022,7 +3305,11 @@
30223305 goto out;
30233306
30243307 down_write(&trace_event_sem);
3025
- __trace_add_event_dirs(tr);
3308
+ /* If tr already has the event list, it is initialized in early boot. */
3309
+ if (unlikely(!list_empty(&tr->events)))
3310
+ __trace_early_add_event_dirs(tr);
3311
+ else
3312
+ __trace_add_event_dirs(tr);
30263313 up_write(&trace_event_sem);
30273314
30283315 out:
....@@ -3063,7 +3350,7 @@
30633350 clear_event_triggers(tr);
30643351
30653352 /* Clear the pid list */
3066
- __ftrace_clear_event_pids(tr);
3353
+ __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
30673354
30683355 /* Disable any running events */
30693356 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
....@@ -3073,7 +3360,7 @@
30733360
30743361 down_write(&trace_event_sem);
30753362 __trace_remove_event_dirs(tr);
3076
- tracefs_remove_recursive(tr->event_dir);
3363
+ tracefs_remove(tr->event_dir);
30773364 up_write(&trace_event_sem);
30783365
30793366 tr->event_dir = NULL;
....@@ -3178,10 +3465,21 @@
31783465
31793466 early_initcall(event_trace_enable_again);
31803467
3468
+/* Init fields which doesn't related to the tracefs */
3469
+static __init int event_trace_init_fields(void)
3470
+{
3471
+ if (trace_define_generic_fields())
3472
+ pr_warn("tracing: Failed to allocated generic fields");
3473
+
3474
+ if (trace_define_common_fields())
3475
+ pr_warn("tracing: Failed to allocate common fields");
3476
+
3477
+ return 0;
3478
+}
3479
+
31813480 __init int event_trace_init(void)
31823481 {
31833482 struct trace_array *tr;
3184
- struct dentry *d_tracer;
31853483 struct dentry *entry;
31863484 int ret;
31873485
....@@ -3189,22 +3487,12 @@
31893487 if (!tr)
31903488 return -ENODEV;
31913489
3192
- d_tracer = tracing_init_dentry();
3193
- if (IS_ERR(d_tracer))
3194
- return 0;
3195
-
3196
- entry = tracefs_create_file("available_events", 0444, d_tracer,
3490
+ entry = tracefs_create_file("available_events", 0444, NULL,
31973491 tr, &ftrace_avail_fops);
31983492 if (!entry)
31993493 pr_warn("Could not create tracefs 'available_events' entry\n");
32003494
3201
- if (trace_define_generic_fields())
3202
- pr_warn("tracing: Failed to allocated generic fields");
3203
-
3204
- if (trace_define_common_fields())
3205
- pr_warn("tracing: Failed to allocate common fields");
3206
-
3207
- ret = early_event_add_tracer(d_tracer, tr);
3495
+ ret = early_event_add_tracer(NULL, tr);
32083496 if (ret)
32093497 return ret;
32103498
....@@ -3213,6 +3501,9 @@
32133501 if (ret)
32143502 pr_warn("Failed to register trace events module notifier\n");
32153503 #endif
3504
+
3505
+ eventdir_initialized = true;
3506
+
32163507 return 0;
32173508 }
32183509
....@@ -3221,9 +3512,10 @@
32213512 event_trace_memsetup();
32223513 init_ftrace_syscalls();
32233514 event_trace_enable();
3515
+ event_trace_init_fields();
32243516 }
32253517
3226
-#ifdef CONFIG_FTRACE_STARTUP_TEST
3518
+#ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
32273519
32283520 static DEFINE_SPINLOCK(test_spinlock);
32293521 static DEFINE_SPINLOCK(test_spinlock_irq);
....@@ -3400,8 +3692,8 @@
34003692 function_test_events_call(unsigned long ip, unsigned long parent_ip,
34013693 struct ftrace_ops *op, struct pt_regs *pt_regs)
34023694 {
3695
+ struct trace_buffer *buffer;
34033696 struct ring_buffer_event *event;
3404
- struct ring_buffer *buffer;
34053697 struct ftrace_entry *entry;
34063698 unsigned long flags;
34073699 long disabled;