hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/trace/trace.h
....@@ -11,12 +11,15 @@
1111 #include <linux/mmiotrace.h>
1212 #include <linux/tracepoint.h>
1313 #include <linux/ftrace.h>
14
+#include <linux/trace.h>
1415 #include <linux/hw_breakpoint.h>
1516 #include <linux/trace_seq.h>
1617 #include <linux/trace_events.h>
1718 #include <linux/compiler.h>
18
-#include <linux/trace_seq.h>
1919 #include <linux/glob.h>
20
+#include <linux/irq_work.h>
21
+#include <linux/workqueue.h>
22
+#include <linux/ctype.h>
2023
2124 #ifdef CONFIG_FTRACE_SYSCALLS
2225 #include <asm/unistd.h> /* For NR_SYSCALLS */
....@@ -50,11 +53,17 @@
5053 #undef __field
5154 #define __field(type, item) type item;
5255
56
+#undef __field_fn
57
+#define __field_fn(type, item) type item;
58
+
5359 #undef __field_struct
5460 #define __field_struct(type, item) __field(type, item)
5561
5662 #undef __field_desc
5763 #define __field_desc(type, container, item)
64
+
65
+#undef __field_packed
66
+#define __field_packed(type, container, item)
5867
5968 #undef __array
6069 #define __array(type, item, size) type item[size];
....@@ -69,28 +78,36 @@
6978 #define F_STRUCT(args...) args
7079
7180 #undef FTRACE_ENTRY
72
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
81
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
7382 struct struct_name { \
7483 struct trace_entry ent; \
7584 tstruct \
7685 }
7786
7887 #undef FTRACE_ENTRY_DUP
79
-#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
88
+#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
8089
8190 #undef FTRACE_ENTRY_REG
82
-#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83
- filter, regfn) \
84
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
85
- filter)
91
+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
92
+ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
8693
8794 #undef FTRACE_ENTRY_PACKED
88
-#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
89
- filter) \
90
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
91
- filter) __packed
95
+#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
96
+ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
9297
9398 #include "trace_entries.h"
99
+
100
+/* Use this for memory failure errors */
101
+#define MEM_FAIL(condition, fmt, ...) ({ \
102
+ static bool __section(".data.once") __warned; \
103
+ int __ret_warn_once = !!(condition); \
104
+ \
105
+ if (unlikely(__ret_warn_once && !__warned)) { \
106
+ __warned = true; \
107
+ pr_err("ERROR: " fmt, ##__VA_ARGS__); \
108
+ } \
109
+ unlikely(__ret_warn_once); \
110
+})
94111
95112 /*
96113 * syscalls are special, and need special handling, this is why
....@@ -117,25 +134,6 @@
117134 struct trace_entry ent;
118135 unsigned long func;
119136 unsigned long ret_ip;
120
-};
121
-
122
-/*
123
- * trace_flag_type is an enumeration that holds different
124
- * states when a trace occurs. These are:
125
- * IRQS_OFF - interrupts were disabled
126
- * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
127
- * NEED_RESCHED - reschedule is requested
128
- * HARDIRQ - inside an interrupt handler
129
- * SOFTIRQ - inside a softirq handler
130
- */
131
-enum trace_flag_type {
132
- TRACE_FLAG_IRQS_OFF = 0x01,
133
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
134
- TRACE_FLAG_NEED_RESCHED = 0x04,
135
- TRACE_FLAG_HARDIRQ = 0x08,
136
- TRACE_FLAG_SOFTIRQ = 0x10,
137
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
138
- TRACE_FLAG_NMI = 0x40,
139137 };
140138
141139 #define TRACE_BUF_SIZE 1024
....@@ -165,18 +163,18 @@
165163 kuid_t uid;
166164 char comm[TASK_COMM_LEN];
167165
168
- bool ignore_pid;
169166 #ifdef CONFIG_FUNCTION_TRACER
170
- bool ftrace_ignore_pid;
167
+ int ftrace_ignore_pid;
171168 #endif
169
+ bool ignore_pid;
172170 };
173171
174172 struct tracer;
175173 struct trace_option_dentry;
176174
177
-struct trace_buffer {
175
+struct array_buffer {
178176 struct trace_array *tr;
179
- struct ring_buffer *buffer;
177
+ struct trace_buffer *buffer;
180178 struct trace_array_cpu __percpu *data;
181179 u64 time_start;
182180 int cpu;
....@@ -194,6 +192,75 @@
194192 unsigned long *pids;
195193 };
196194
195
+enum {
196
+ TRACE_PIDS = BIT(0),
197
+ TRACE_NO_PIDS = BIT(1),
198
+};
199
+
200
+static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
201
+ struct trace_pid_list *no_pid_list)
202
+{
203
+ /* Return true if the pid list in type has pids */
204
+ return ((type & TRACE_PIDS) && pid_list) ||
205
+ ((type & TRACE_NO_PIDS) && no_pid_list);
206
+}
207
+
208
+static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
209
+ struct trace_pid_list *no_pid_list)
210
+{
211
+ /*
212
+ * Turning off what is in @type, return true if the "other"
213
+ * pid list, still has pids in it.
214
+ */
215
+ return (!(type & TRACE_PIDS) && pid_list) ||
216
+ (!(type & TRACE_NO_PIDS) && no_pid_list);
217
+}
218
+
219
+typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
220
+
221
+/**
222
+ * struct cond_snapshot - conditional snapshot data and callback
223
+ *
224
+ * The cond_snapshot structure encapsulates a callback function and
225
+ * data associated with the snapshot for a given tracing instance.
226
+ *
227
+ * When a snapshot is taken conditionally, by invoking
228
+ * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
229
+ * passed in turn to the cond_snapshot.update() function. That data
230
+ * can be compared by the update() implementation with the cond_data
231
+ * contained within the struct cond_snapshot instance associated with
232
+ * the trace_array. Because the tr->max_lock is held throughout the
233
+ * update() call, the update() function can directly retrieve the
234
+ * cond_snapshot and cond_data associated with the per-instance
235
+ * snapshot associated with the trace_array.
236
+ *
237
+ * The cond_snapshot.update() implementation can save data to be
238
+ * associated with the snapshot if it decides to, and returns 'true'
239
+ * in that case, or it returns 'false' if the conditional snapshot
240
+ * shouldn't be taken.
241
+ *
242
+ * The cond_snapshot instance is created and associated with the
243
+ * user-defined cond_data by tracing_cond_snapshot_enable().
244
+ * Likewise, the cond_snapshot instance is destroyed and is no longer
245
+ * associated with the trace instance by
246
+ * tracing_cond_snapshot_disable().
247
+ *
248
+ * The method below is required.
249
+ *
250
+ * @update: When a conditional snapshot is invoked, the update()
251
+ * callback function is invoked with the tr->max_lock held. The
252
+ * update() implementation signals whether or not to actually
253
+ * take the snapshot, by returning 'true' if so, 'false' if no
254
+ * snapshot should be taken. Because the max_lock is held for
255
+ * the duration of update(), the implementation is safe to
256
+ * directly retrieved and save any implementation data it needs
257
+ * to in association with the snapshot.
258
+ */
259
+struct cond_snapshot {
260
+ void *cond_data;
261
+ cond_update_fn_t update;
262
+};
263
+
197264 /*
198265 * The trace array - an array of per-CPU trace arrays. This is the
199266 * highest level data structure that individual tracers deal with.
....@@ -202,7 +269,7 @@
202269 struct trace_array {
203270 struct list_head list;
204271 char *name;
205
- struct trace_buffer trace_buffer;
272
+ struct array_buffer array_buffer;
206273 #ifdef CONFIG_TRACER_MAX_TRACE
207274 /*
208275 * The max_buffer is used to snapshot the trace when a maximum
....@@ -210,18 +277,24 @@
210277 * Some tracers will use this to store a maximum trace while
211278 * it continues examining live traces.
212279 *
213
- * The buffers for the max_buffer are set up the same as the trace_buffer
280
+ * The buffers for the max_buffer are set up the same as the array_buffer
214281 * When a snapshot is taken, the buffer of the max_buffer is swapped
215
- * with the buffer of the trace_buffer and the buffers are reset for
216
- * the trace_buffer so the tracing can continue.
282
+ * with the buffer of the array_buffer and the buffers are reset for
283
+ * the array_buffer so the tracing can continue.
217284 */
218
- struct trace_buffer max_buffer;
285
+ struct array_buffer max_buffer;
219286 bool allocated_snapshot;
220287 #endif
221288 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
222289 unsigned long max_latency;
290
+#ifdef CONFIG_FSNOTIFY
291
+ struct dentry *d_max_latency;
292
+ struct work_struct fsnotify_work;
293
+ struct irq_work fsnotify_irqwork;
294
+#endif
223295 #endif
224296 struct trace_pid_list __rcu *filtered_pids;
297
+ struct trace_pid_list __rcu *filtered_no_pids;
225298 /*
226299 * max_lock is used to protect the swapping of buffers
227300 * when taking a max snapshot. The buffers themselves are
....@@ -247,11 +320,14 @@
247320 int clock_id;
248321 int nr_topts;
249322 bool clear_trace;
323
+ int buffer_percent;
324
+ unsigned int n_err_log_entries;
250325 struct tracer *current_trace;
251326 unsigned int trace_flags;
252327 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
253328 unsigned int flags;
254329 raw_spinlock_t start_lock;
330
+ struct list_head err_log;
255331 struct dentry *dir;
256332 struct dentry *options;
257333 struct dentry *percpu_dir;
....@@ -262,9 +338,11 @@
262338 struct trace_event_file *trace_marker_file;
263339 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
264340 int ref;
341
+ int trace_ref;
265342 #ifdef CONFIG_FUNCTION_TRACER
266343 struct ftrace_ops *ops;
267344 struct trace_pid_list __rcu *function_pids;
345
+ struct trace_pid_list __rcu *function_no_pids;
268346 #ifdef CONFIG_DYNAMIC_FTRACE
269347 /* All of these are protected by the ftrace_lock */
270348 struct list_head func_probes;
....@@ -276,6 +354,9 @@
276354 #endif
277355 int time_stamp_abs_ref;
278356 struct list_head hist_vars;
357
+#ifdef CONFIG_TRACER_SNAPSHOT
358
+ struct cond_snapshot *cond_snapshot;
359
+#endif
279360 };
280361
281362 enum {
....@@ -287,7 +368,9 @@
287368 extern struct mutex trace_types_lock;
288369
289370 extern int trace_array_get(struct trace_array *tr);
290
-extern void trace_array_put(struct trace_array *tr);
371
+extern int tracing_check_open_get_tr(struct trace_array *tr);
372
+extern struct trace_array *trace_array_find(const char *instance);
373
+extern struct trace_array *trace_array_find_get(const char *instance);
291374
292375 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
293376 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
....@@ -315,11 +398,11 @@
315398 __builtin_types_compatible_p(typeof(var), type *)
316399
317400 #undef IF_ASSIGN
318
-#define IF_ASSIGN(var, entry, etype, id) \
319
- if (FTRACE_CMP_TYPE(var, etype)) { \
320
- var = (typeof(var))(entry); \
321
- WARN_ON(id && (entry)->type != id); \
322
- break; \
401
+#define IF_ASSIGN(var, entry, etype, id) \
402
+ if (FTRACE_CMP_TYPE(var, etype)) { \
403
+ var = (typeof(var))(entry); \
404
+ WARN_ON(id != 0 && (entry)->type != id); \
405
+ break; \
323406 }
324407
325408 /* Will cause compile errors if type is not found. */
....@@ -447,7 +530,6 @@
447530 struct tracer *next;
448531 struct tracer_flags *flags;
449532 int enabled;
450
- int ref;
451533 bool print_max;
452534 bool allow_instances;
453535 #ifdef CONFIG_TRACER_MAX_TRACE
....@@ -467,16 +549,14 @@
467549 * When function tracing occurs, the following steps are made:
468550 * If arch does not support a ftrace feature:
469551 * call internal function (uses INTERNAL bits) which calls...
552
+ * If callback is registered to the "global" list, the list
553
+ * function is called and recursion checks the GLOBAL bits.
554
+ * then this function calls...
470555 * The function callback, which can use the FTRACE bits to
471556 * check for recursion.
472557 */
473558 enum {
474
- TRACE_BUFFER_BIT,
475
- TRACE_BUFFER_NMI_BIT,
476
- TRACE_BUFFER_IRQ_BIT,
477
- TRACE_BUFFER_SIRQ_BIT,
478
-
479
- /* Start of function recursion bits */
559
+ /* Function recursion bits */
480560 TRACE_FTRACE_BIT,
481561 TRACE_FTRACE_NMI_BIT,
482562 TRACE_FTRACE_IRQ_BIT,
....@@ -521,6 +601,13 @@
521601
522602 TRACE_GRAPH_DEPTH_START_BIT,
523603 TRACE_GRAPH_DEPTH_END_BIT,
604
+
605
+ /*
606
+ * To implement set_graph_notrace, if this bit is set, we ignore
607
+ * function graph tracing of called functions, until the return
608
+ * function is called to clear it.
609
+ */
610
+ TRACE_GRAPH_NOTRACE_BIT,
524611 };
525612
526613 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
....@@ -616,11 +703,11 @@
616703
617704 int tracer_init(struct tracer *t, struct trace_array *tr);
618705 int tracing_is_enabled(void);
619
-void tracing_reset(struct trace_buffer *buf, int cpu);
620
-void tracing_reset_online_cpus(struct trace_buffer *buf);
706
+void tracing_reset_online_cpus(struct array_buffer *buf);
621707 void tracing_reset_current(int cpu);
622708 void tracing_reset_all_online_cpus(void);
623709 int tracing_open_generic(struct inode *inode, struct file *filp);
710
+int tracing_open_generic_tr(struct inode *inode, struct file *filp);
624711 bool tracing_is_disabled(void);
625712 bool tracer_tracing_is_on(struct trace_array *tr);
626713 void tracer_tracing_on(struct trace_array *tr);
....@@ -631,16 +718,15 @@
631718 void *data,
632719 const struct file_operations *fops);
633720
634
-struct dentry *tracing_init_dentry(void);
721
+int tracing_init_dentry(void);
635722
636723 struct ring_buffer_event;
637724
638725 struct ring_buffer_event *
639
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
726
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
640727 int type,
641728 unsigned long len,
642
- unsigned long flags,
643
- int pc);
729
+ unsigned int trace_ctx);
644730
645731 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
646732 struct trace_array_cpu *data);
....@@ -648,7 +734,7 @@
648734 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
649735 int *ent_cpu, u64 *ent_ts);
650736
651
-void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
737
+void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
652738 struct ring_buffer_event *event);
653739
654740 int trace_empty(struct trace_iterator *iter);
....@@ -659,14 +745,17 @@
659745
660746 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
661747
748
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
749
+unsigned long trace_total_entries(struct trace_array *tr);
750
+
662751 void trace_function(struct trace_array *tr,
663752 unsigned long ip,
664753 unsigned long parent_ip,
665
- unsigned long flags, int pc);
754
+ unsigned int trace_ctx);
666755 void trace_graph_function(struct trace_array *tr,
667756 unsigned long ip,
668757 unsigned long parent_ip,
669
- unsigned long flags, int pc);
758
+ unsigned int trace_ctx);
670759 void trace_latency_header(struct seq_file *m);
671760 void trace_default_header(struct seq_file *m);
672761 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
....@@ -702,6 +791,7 @@
702791 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
703792 pid_t search_pid);
704793 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
794
+ struct trace_pid_list *filtered_no_pids,
705795 struct task_struct *task);
706796 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
707797 struct task_struct *self,
....@@ -715,27 +805,28 @@
715805 const char __user *ubuf, size_t cnt);
716806
717807 #ifdef CONFIG_TRACER_MAX_TRACE
718
-void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
808
+void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
809
+ void *cond_data);
719810 void update_max_tr_single(struct trace_array *tr,
720811 struct task_struct *tsk, int cpu);
721812 #endif /* CONFIG_TRACER_MAX_TRACE */
722813
723
-#ifdef CONFIG_STACKTRACE
724
-void ftrace_trace_userstack(struct trace_array *tr,
725
- struct ring_buffer *buffer, unsigned long flags,
726
- int pc);
814
+#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
815
+ defined(CONFIG_FSNOTIFY)
727816
728
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
729
- int pc);
817
+void latency_fsnotify(struct trace_array *tr);
818
+
730819 #else
731
-static inline void ftrace_trace_userstack(struct trace_array *tr,
732
- struct ring_buffer *buffer,
733
- unsigned long flags, int pc)
734
-{
735
-}
736820
737
-static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
738
- int skip, int pc)
821
+static inline void latency_fsnotify(struct trace_array *tr) { }
822
+
823
+#endif
824
+
825
+#ifdef CONFIG_STACKTRACE
826
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
827
+#else
828
+static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
829
+ int skip)
739830 {
740831 }
741832 #endif /* CONFIG_STACKTRACE */
....@@ -748,6 +839,8 @@
748839
749840 #ifdef CONFIG_DYNAMIC_FTRACE
750841 extern unsigned long ftrace_update_tot_cnt;
842
+extern unsigned long ftrace_number_of_pages;
843
+extern unsigned long ftrace_number_of_groups;
751844 void ftrace_init_trace_array(struct trace_array *tr);
752845 #else
753846 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
....@@ -761,6 +854,8 @@
761854 extern bool tracing_selftest_disabled;
762855
763856 #ifdef CONFIG_FTRACE_STARTUP_TEST
857
+extern void __init disable_tracing_selftest(const char *reason);
858
+
764859 extern int trace_selftest_startup_function(struct tracer *trace,
765860 struct trace_array *tr);
766861 extern int trace_selftest_startup_function_graph(struct tracer *trace,
....@@ -784,6 +879,9 @@
784879 */
785880 #define __tracer_data __refdata
786881 #else
882
+static inline void __init disable_tracing_selftest(const char *reason)
883
+{
884
+}
787885 /* Tracers are seldom changed. Optimize when selftests are disabled. */
788886 #define __tracer_data __read_mostly
789887 #endif /* CONFIG_FTRACE_STARTUP_TEST */
....@@ -797,9 +895,7 @@
797895 extern int
798896 trace_array_vprintk(struct trace_array *tr,
799897 unsigned long ip, const char *fmt, va_list args);
800
-int trace_array_printk(struct trace_array *tr,
801
- unsigned long ip, const char *fmt, ...);
802
-int trace_array_printk_buf(struct ring_buffer *buffer,
898
+int trace_array_printk_buf(struct trace_buffer *buffer,
803899 unsigned long ip, const char *fmt, ...);
804900 void trace_printk_seq(struct trace_seq *s);
805901 enum print_line_t print_trace_line(struct trace_iterator *iter);
....@@ -845,15 +941,21 @@
845941 #define TRACE_GRAPH_PRINT_PROC 0x8
846942 #define TRACE_GRAPH_PRINT_DURATION 0x10
847943 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
848
-#define TRACE_GRAPH_PRINT_IRQS 0x40
849
-#define TRACE_GRAPH_PRINT_TAIL 0x80
850
-#define TRACE_GRAPH_SLEEP_TIME 0x100
851
-#define TRACE_GRAPH_GRAPH_TIME 0x200
944
+#define TRACE_GRAPH_PRINT_REL_TIME 0x40
945
+#define TRACE_GRAPH_PRINT_IRQS 0x80
946
+#define TRACE_GRAPH_PRINT_TAIL 0x100
947
+#define TRACE_GRAPH_SLEEP_TIME 0x200
948
+#define TRACE_GRAPH_GRAPH_TIME 0x400
852949 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
853950 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
854951
855952 extern void ftrace_graph_sleep_time_control(bool enable);
953
+
954
+#ifdef CONFIG_FUNCTION_PROFILER
856955 extern void ftrace_graph_graph_time_control(bool enable);
956
+#else
957
+static inline void ftrace_graph_graph_time_control(bool enable) { }
958
+#endif
857959
858960 extern enum print_line_t
859961 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
....@@ -864,10 +966,10 @@
864966 extern void graph_trace_close(struct trace_iterator *iter);
865967 extern int __trace_graph_entry(struct trace_array *tr,
866968 struct ftrace_graph_ent *trace,
867
- unsigned long flags, int pc);
969
+ unsigned int trace_ctx);
868970 extern void __trace_graph_return(struct trace_array *tr,
869971 struct ftrace_graph_ret *trace,
870
- unsigned long flags, int pc);
972
+ unsigned int trace_ctx);
871973
872974 #ifdef CONFIG_DYNAMIC_FTRACE
873975 extern struct ftrace_hash __rcu *ftrace_graph_hash;
....@@ -985,6 +1087,10 @@
9851087 extern struct list_head ftrace_pids;
9861088
9871089 #ifdef CONFIG_FUNCTION_TRACER
1090
+
1091
+#define FTRACE_PID_IGNORE -1
1092
+#define FTRACE_PID_TRACE -2
1093
+
9881094 struct ftrace_func_command {
9891095 struct list_head list;
9901096 char *name;
....@@ -996,12 +1102,15 @@
9961102 extern bool ftrace_filter_param __initdata;
9971103 static inline int ftrace_trace_task(struct trace_array *tr)
9981104 {
999
- return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1105
+ return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1106
+ FTRACE_PID_IGNORE;
10001107 }
10011108 extern int ftrace_is_dead(void);
10021109 int ftrace_create_function_files(struct trace_array *tr,
10031110 struct dentry *parent);
10041111 void ftrace_destroy_function_files(struct trace_array *tr);
1112
+int ftrace_allocate_ftrace_ops(struct trace_array *tr);
1113
+void ftrace_free_ftrace_ops(struct trace_array *tr);
10051114 void ftrace_init_global_array_ops(struct trace_array *tr);
10061115 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
10071116 void ftrace_reset_array_ops(struct trace_array *tr);
....@@ -1023,6 +1132,11 @@
10231132 {
10241133 return 0;
10251134 }
1135
+static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1136
+{
1137
+ return 0;
1138
+}
1139
+static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
10261140 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
10271141 static inline __init void
10281142 ftrace_init_global_array_ops(struct trace_array *tr) { }
....@@ -1084,6 +1198,11 @@
10841198 void ftrace_create_filter_files(struct ftrace_ops *ops,
10851199 struct dentry *parent);
10861200 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1201
+
1202
+extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1203
+ int len, int reset);
1204
+extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1205
+ int len, int reset);
10871206 #else
10881207 struct ftrace_func_command;
10891208
....@@ -1209,6 +1328,7 @@
12091328 C(IRQ_INFO, "irq-info"), \
12101329 C(MARKERS, "markers"), \
12111330 C(EVENT_FORK, "event-fork"), \
1331
+ C(PAUSE_ON_TRACE, "pause-on-trace"), \
12121332 FUNCTION_FLAGS \
12131333 FGRAPH_FLAGS \
12141334 STACK_FLAGS \
....@@ -1306,21 +1426,21 @@
13061426 };
13071427
13081428 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1309
- struct ring_buffer *buffer,
1429
+ struct trace_buffer *buffer,
13101430 struct ring_buffer_event *event);
13111431
13121432 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1313
- struct ring_buffer *buffer,
1433
+ struct trace_buffer *buffer,
13141434 struct ring_buffer_event *event,
1315
- unsigned long flags, int pc,
1435
+ unsigned int trcace_ctx,
13161436 struct pt_regs *regs);
13171437
13181438 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1319
- struct ring_buffer *buffer,
1439
+ struct trace_buffer *buffer,
13201440 struct ring_buffer_event *event,
1321
- unsigned long flags, int pc)
1441
+ unsigned int trace_ctx)
13221442 {
1323
- trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1443
+ trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
13241444 }
13251445
13261446 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
....@@ -1329,7 +1449,7 @@
13291449 void trace_buffered_event_enable(void);
13301450
13311451 static inline void
1332
-__trace_event_discard_commit(struct ring_buffer *buffer,
1452
+__trace_event_discard_commit(struct trace_buffer *buffer,
13331453 struct ring_buffer_event *event)
13341454 {
13351455 if (this_cpu_read(trace_buffered_event) == event) {
....@@ -1343,7 +1463,7 @@
13431463 /*
13441464 * Helper function for event_trigger_unlock_commit{_regs}().
13451465 * If there are event triggers attached to this event that requires
1346
- * filtering against its fields, then they wil be called as the
1466
+ * filtering against its fields, then they will be called as the
13471467 * entry already holds the field information of the current event.
13481468 *
13491469 * It also checks if the event should be discarded or not.
....@@ -1355,7 +1475,7 @@
13551475 */
13561476 static inline bool
13571477 __event_trigger_test_discard(struct trace_event_file *file,
1358
- struct ring_buffer *buffer,
1478
+ struct trace_buffer *buffer,
13591479 struct ring_buffer_event *event,
13601480 void *entry,
13611481 enum event_trigger_type *tt)
....@@ -1393,8 +1513,7 @@
13931513 * @buffer: The ring buffer that the event is being written to
13941514 * @event: The event meta data in the ring buffer
13951515 * @entry: The event itself
1396
- * @irq_flags: The state of the interrupts at the start of the event
1397
- * @pc: The state of the preempt count at the start of the event.
1516
+ * @trace_ctx: The tracing context flags.
13981517 *
13991518 * This is a helper function to handle triggers that require data
14001519 * from the event itself. It also tests the event against filters and
....@@ -1402,14 +1521,14 @@
14021521 */
14031522 static inline void
14041523 event_trigger_unlock_commit(struct trace_event_file *file,
1405
- struct ring_buffer *buffer,
1524
+ struct trace_buffer *buffer,
14061525 struct ring_buffer_event *event,
1407
- void *entry, unsigned long irq_flags, int pc)
1526
+ void *entry, unsigned int trace_ctx)
14081527 {
14091528 enum event_trigger_type tt = ETT_NONE;
14101529
14111530 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1412
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1531
+ trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
14131532
14141533 if (tt)
14151534 event_triggers_post_call(file, tt);
....@@ -1421,8 +1540,7 @@
14211540 * @buffer: The ring buffer that the event is being written to
14221541 * @event: The event meta data in the ring buffer
14231542 * @entry: The event itself
1424
- * @irq_flags: The state of the interrupts at the start of the event
1425
- * @pc: The state of the preempt count at the start of the event.
1543
+ * @trace_ctx: The tracing context flags.
14261544 *
14271545 * This is a helper function to handle triggers that require data
14281546 * from the event itself. It also tests the event against filters and
....@@ -1433,16 +1551,16 @@
14331551 */
14341552 static inline void
14351553 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1436
- struct ring_buffer *buffer,
1554
+ struct trace_buffer *buffer,
14371555 struct ring_buffer_event *event,
1438
- void *entry, unsigned long irq_flags, int pc,
1556
+ void *entry, unsigned int trace_ctx,
14391557 struct pt_regs *regs)
14401558 {
14411559 enum event_trigger_type tt = ETT_NONE;
14421560
14431561 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
14441562 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1445
- irq_flags, pc, regs);
1563
+ trace_ctx, regs);
14461564
14471565 if (tt)
14481566 event_triggers_post_call(file, tt);
....@@ -1474,6 +1592,7 @@
14741592 MATCH_MIDDLE_ONLY,
14751593 MATCH_END_ONLY,
14761594 MATCH_GLOB,
1595
+ MATCH_INDEX,
14771596 };
14781597
14791598 struct regex {
....@@ -1518,7 +1637,8 @@
15181637 extern void print_subsystem_event_filter(struct event_subsystem *system,
15191638 struct trace_seq *s);
15201639 extern int filter_assign_type(const char *type);
1521
-extern int create_event_filter(struct trace_event_call *call,
1640
+extern int create_event_filter(struct trace_array *tr,
1641
+ struct trace_event_call *call,
15221642 char *filter_str, bool set_str,
15231643 struct event_filter **filterp);
15241644 extern void free_event_filter(struct event_filter *filter);
....@@ -1532,6 +1652,7 @@
15321652 extern int event_trace_init(void);
15331653 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
15341654 extern int event_trace_del_tracer(struct trace_array *tr);
1655
+extern void __trace_early_add_events(struct trace_array *tr);
15351656
15361657 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
15371658 const char *system,
....@@ -1550,6 +1671,8 @@
15501671
15511672 extern const struct file_operations event_trigger_fops;
15521673 extern const struct file_operations event_hist_fops;
1674
+extern const struct file_operations event_hist_debug_fops;
1675
+extern const struct file_operations event_inject_fops;
15531676
15541677 #ifdef CONFIG_HIST_TRIGGERS
15551678 extern int register_trigger_hist_cmd(void);
....@@ -1824,6 +1947,11 @@
18241947 extern int trace_event_enable_disable(struct trace_event_file *file,
18251948 int enable, int soft_disable);
18261949 extern int tracing_alloc_snapshot(void);
1950
+extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1951
+extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1952
+
1953
+extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1954
+extern void *tracing_cond_snapshot_data(struct trace_array *tr);
18271955
18281956 extern const char *__start___trace_bprintk_fmt[];
18291957 extern const char *__stop___trace_bprintk_fmt[];
....@@ -1832,10 +1960,18 @@
18321960 extern const char *__stop___tracepoint_str[];
18331961
18341962 void trace_printk_control(bool enabled);
1835
-void trace_printk_init_buffers(void);
18361963 void trace_printk_start_comm(void);
18371964 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
18381965 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1966
+
1967
+/* Used from boot time tracer */
1968
+extern int trace_set_options(struct trace_array *tr, char *option);
1969
+extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1970
+extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1971
+ unsigned long size, int cpu_id);
1972
+extern int tracing_set_cpumask(struct trace_array *tr,
1973
+ cpumask_var_t tracing_cpumask_new);
1974
+
18391975
18401976 #define MAX_EVENT_NAME_LEN 64
18411977
....@@ -1843,6 +1979,11 @@
18431979 extern ssize_t trace_parse_run_command(struct file *file,
18441980 const char __user *buffer, size_t count, loff_t *ppos,
18451981 int (*createfn)(int, char**));
1982
+
1983
+extern unsigned int err_pos(char *cmd, const char *str);
1984
+extern void tracing_log_err(struct trace_array *tr,
1985
+ const char *loc, const char *cmd,
1986
+ const char **errs, u8 type, u8 pos);
18461987
18471988 /*
18481989 * Normal trace_printk() and friends allocates special buffers
....@@ -1856,17 +1997,15 @@
18561997 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
18571998
18581999 #undef FTRACE_ENTRY
1859
-#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
2000
+#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
18602001 extern struct trace_event_call \
18612002 __aligned(4) event_##call;
18622003 #undef FTRACE_ENTRY_DUP
1863
-#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1864
- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1865
- filter)
2004
+#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
2005
+ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
18662006 #undef FTRACE_ENTRY_PACKED
1867
-#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1868
- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1869
- filter)
2007
+#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
2008
+ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
18702009
18712010 #include "trace_entries.h"
18722011
....@@ -1891,6 +2030,9 @@
18912030 #ifdef CONFIG_EVENT_TRACING
18922031 void trace_event_init(void);
18932032 void trace_event_eval_update(struct trace_eval_map **map, int len);
2033
+/* Used from boot time tracer */
2034
+extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2035
+extern int trigger_process_regex(struct trace_event_file *file, char *buff);
18942036 #else
18952037 static inline void __init trace_event_init(void) { }
18962038 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
....@@ -1942,4 +2084,16 @@
19422084 iter->pos = -1;
19432085 }
19442086
2087
+/* Check the name is good for event/group/fields */
2088
+static inline bool is_good_name(const char *name)
2089
+{
2090
+ if (!isalpha(*name) && *name != '_')
2091
+ return false;
2092
+ while (*++name != '\0') {
2093
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
2094
+ return false;
2095
+ }
2096
+ return true;
2097
+}
2098
+
19452099 #endif /* _LINUX_KERNEL_TRACE_H */