hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/trace/trace.h
....@@ -11,12 +11,15 @@
1111 #include <linux/mmiotrace.h>
1212 #include <linux/tracepoint.h>
1313 #include <linux/ftrace.h>
14
+#include <linux/trace.h>
1415 #include <linux/hw_breakpoint.h>
1516 #include <linux/trace_seq.h>
1617 #include <linux/trace_events.h>
1718 #include <linux/compiler.h>
18
-#include <linux/trace_seq.h>
1919 #include <linux/glob.h>
20
+#include <linux/irq_work.h>
21
+#include <linux/workqueue.h>
22
+#include <linux/ctype.h>
2023
2124 #ifdef CONFIG_FTRACE_SYSCALLS
2225 #include <asm/unistd.h> /* For NR_SYSCALLS */
....@@ -50,11 +53,17 @@
5053 #undef __field
5154 #define __field(type, item) type item;
5255
56
+#undef __field_fn
57
+#define __field_fn(type, item) type item;
58
+
5359 #undef __field_struct
5460 #define __field_struct(type, item) __field(type, item)
5561
5662 #undef __field_desc
5763 #define __field_desc(type, container, item)
64
+
65
+#undef __field_packed
66
+#define __field_packed(type, container, item)
5867
5968 #undef __array
6069 #define __array(type, item, size) type item[size];
....@@ -69,28 +78,36 @@
6978 #define F_STRUCT(args...) args
7079
7180 #undef FTRACE_ENTRY
72
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
81
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
7382 struct struct_name { \
7483 struct trace_entry ent; \
7584 tstruct \
7685 }
7786
7887 #undef FTRACE_ENTRY_DUP
79
-#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
88
+#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
8089
8190 #undef FTRACE_ENTRY_REG
82
-#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83
- filter, regfn) \
84
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
85
- filter)
91
+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
92
+ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
8693
8794 #undef FTRACE_ENTRY_PACKED
88
-#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
89
- filter) \
90
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
91
- filter) __packed
95
+#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
96
+ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
9297
9398 #include "trace_entries.h"
99
+
100
+/* Use this for memory failure errors */
101
+#define MEM_FAIL(condition, fmt, ...) ({ \
102
+ static bool __section(".data.once") __warned; \
103
+ int __ret_warn_once = !!(condition); \
104
+ \
105
+ if (unlikely(__ret_warn_once && !__warned)) { \
106
+ __warned = true; \
107
+ pr_err("ERROR: " fmt, ##__VA_ARGS__); \
108
+ } \
109
+ unlikely(__ret_warn_once); \
110
+})
94111
95112 /*
96113 * syscalls are special, and need special handling, this is why
....@@ -117,27 +134,6 @@
117134 struct trace_entry ent;
118135 unsigned long func;
119136 unsigned long ret_ip;
120
-};
121
-
122
-/*
123
- * trace_flag_type is an enumeration that holds different
124
- * states when a trace occurs. These are:
125
- * IRQS_OFF - interrupts were disabled
126
- * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
127
- * NEED_RESCHED - reschedule is requested
128
- * HARDIRQ - inside an interrupt handler
129
- * SOFTIRQ - inside a softirq handler
130
- * NEED_RESCHED_LAZY - lazy reschedule is requested
131
- */
132
-enum trace_flag_type {
133
- TRACE_FLAG_IRQS_OFF = 0x01,
134
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
135
- TRACE_FLAG_NEED_RESCHED = 0x04,
136
- TRACE_FLAG_HARDIRQ = 0x08,
137
- TRACE_FLAG_SOFTIRQ = 0x10,
138
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
139
- TRACE_FLAG_NMI = 0x40,
140
- TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
141137 };
142138
143139 #define TRACE_BUF_SIZE 1024
....@@ -167,18 +163,18 @@
167163 kuid_t uid;
168164 char comm[TASK_COMM_LEN];
169165
170
- bool ignore_pid;
171166 #ifdef CONFIG_FUNCTION_TRACER
172
- bool ftrace_ignore_pid;
167
+ int ftrace_ignore_pid;
173168 #endif
169
+ bool ignore_pid;
174170 };
175171
176172 struct tracer;
177173 struct trace_option_dentry;
178174
179
-struct trace_buffer {
175
+struct array_buffer {
180176 struct trace_array *tr;
181
- struct ring_buffer *buffer;
177
+ struct trace_buffer *buffer;
182178 struct trace_array_cpu __percpu *data;
183179 u64 time_start;
184180 int cpu;
....@@ -196,6 +192,75 @@
196192 unsigned long *pids;
197193 };
198194
195
+enum {
196
+ TRACE_PIDS = BIT(0),
197
+ TRACE_NO_PIDS = BIT(1),
198
+};
199
+
200
+static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
201
+ struct trace_pid_list *no_pid_list)
202
+{
203
+ /* Return true if the pid list in type has pids */
204
+ return ((type & TRACE_PIDS) && pid_list) ||
205
+ ((type & TRACE_NO_PIDS) && no_pid_list);
206
+}
207
+
208
+static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
209
+ struct trace_pid_list *no_pid_list)
210
+{
211
+ /*
212
+ * Turning off what is in @type, return true if the "other"
213
+ * pid list, still has pids in it.
214
+ */
215
+ return (!(type & TRACE_PIDS) && pid_list) ||
216
+ (!(type & TRACE_NO_PIDS) && no_pid_list);
217
+}
218
+
219
+typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
220
+
221
+/**
222
+ * struct cond_snapshot - conditional snapshot data and callback
223
+ *
224
+ * The cond_snapshot structure encapsulates a callback function and
225
+ * data associated with the snapshot for a given tracing instance.
226
+ *
227
+ * When a snapshot is taken conditionally, by invoking
228
+ * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
229
+ * passed in turn to the cond_snapshot.update() function. That data
230
+ * can be compared by the update() implementation with the cond_data
231
+ * contained within the struct cond_snapshot instance associated with
232
+ * the trace_array. Because the tr->max_lock is held throughout the
233
+ * update() call, the update() function can directly retrieve the
234
+ * cond_snapshot and cond_data associated with the per-instance
235
+ * snapshot associated with the trace_array.
236
+ *
237
+ * The cond_snapshot.update() implementation can save data to be
238
+ * associated with the snapshot if it decides to, and returns 'true'
239
+ * in that case, or it returns 'false' if the conditional snapshot
240
+ * shouldn't be taken.
241
+ *
242
+ * The cond_snapshot instance is created and associated with the
243
+ * user-defined cond_data by tracing_cond_snapshot_enable().
244
+ * Likewise, the cond_snapshot instance is destroyed and is no longer
245
+ * associated with the trace instance by
246
+ * tracing_cond_snapshot_disable().
247
+ *
248
+ * The method below is required.
249
+ *
250
+ * @update: When a conditional snapshot is invoked, the update()
251
+ * callback function is invoked with the tr->max_lock held. The
252
+ * update() implementation signals whether or not to actually
253
+ * take the snapshot, by returning 'true' if so, 'false' if no
254
+ * snapshot should be taken. Because the max_lock is held for
255
+ * the duration of update(), the implementation is safe to
256
+ * directly retrieved and save any implementation data it needs
257
+ * to in association with the snapshot.
258
+ */
259
+struct cond_snapshot {
260
+ void *cond_data;
261
+ cond_update_fn_t update;
262
+};
263
+
199264 /*
200265 * The trace array - an array of per-CPU trace arrays. This is the
201266 * highest level data structure that individual tracers deal with.
....@@ -204,7 +269,7 @@
204269 struct trace_array {
205270 struct list_head list;
206271 char *name;
207
- struct trace_buffer trace_buffer;
272
+ struct array_buffer array_buffer;
208273 #ifdef CONFIG_TRACER_MAX_TRACE
209274 /*
210275 * The max_buffer is used to snapshot the trace when a maximum
....@@ -212,18 +277,24 @@
212277 * Some tracers will use this to store a maximum trace while
213278 * it continues examining live traces.
214279 *
215
- * The buffers for the max_buffer are set up the same as the trace_buffer
280
+ * The buffers for the max_buffer are set up the same as the array_buffer
216281 * When a snapshot is taken, the buffer of the max_buffer is swapped
217
- * with the buffer of the trace_buffer and the buffers are reset for
218
- * the trace_buffer so the tracing can continue.
282
+ * with the buffer of the array_buffer and the buffers are reset for
283
+ * the array_buffer so the tracing can continue.
219284 */
220
- struct trace_buffer max_buffer;
285
+ struct array_buffer max_buffer;
221286 bool allocated_snapshot;
222287 #endif
223288 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
224289 unsigned long max_latency;
290
+#ifdef CONFIG_FSNOTIFY
291
+ struct dentry *d_max_latency;
292
+ struct work_struct fsnotify_work;
293
+ struct irq_work fsnotify_irqwork;
294
+#endif
225295 #endif
226296 struct trace_pid_list __rcu *filtered_pids;
297
+ struct trace_pid_list __rcu *filtered_no_pids;
227298 /*
228299 * max_lock is used to protect the swapping of buffers
229300 * when taking a max snapshot. The buffers themselves are
....@@ -249,11 +320,14 @@
249320 int clock_id;
250321 int nr_topts;
251322 bool clear_trace;
323
+ int buffer_percent;
324
+ unsigned int n_err_log_entries;
252325 struct tracer *current_trace;
253326 unsigned int trace_flags;
254327 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
255328 unsigned int flags;
256329 raw_spinlock_t start_lock;
330
+ struct list_head err_log;
257331 struct dentry *dir;
258332 struct dentry *options;
259333 struct dentry *percpu_dir;
....@@ -264,9 +338,11 @@
264338 struct trace_event_file *trace_marker_file;
265339 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
266340 int ref;
341
+ int trace_ref;
267342 #ifdef CONFIG_FUNCTION_TRACER
268343 struct ftrace_ops *ops;
269344 struct trace_pid_list __rcu *function_pids;
345
+ struct trace_pid_list __rcu *function_no_pids;
270346 #ifdef CONFIG_DYNAMIC_FTRACE
271347 /* All of these are protected by the ftrace_lock */
272348 struct list_head func_probes;
....@@ -278,6 +354,9 @@
278354 #endif
279355 int time_stamp_abs_ref;
280356 struct list_head hist_vars;
357
+#ifdef CONFIG_TRACER_SNAPSHOT
358
+ struct cond_snapshot *cond_snapshot;
359
+#endif
281360 };
282361
283362 enum {
....@@ -289,7 +368,9 @@
289368 extern struct mutex trace_types_lock;
290369
291370 extern int trace_array_get(struct trace_array *tr);
292
-extern void trace_array_put(struct trace_array *tr);
371
+extern int tracing_check_open_get_tr(struct trace_array *tr);
372
+extern struct trace_array *trace_array_find(const char *instance);
373
+extern struct trace_array *trace_array_find_get(const char *instance);
293374
294375 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
295376 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
....@@ -317,11 +398,11 @@
317398 __builtin_types_compatible_p(typeof(var), type *)
318399
319400 #undef IF_ASSIGN
320
-#define IF_ASSIGN(var, entry, etype, id) \
321
- if (FTRACE_CMP_TYPE(var, etype)) { \
322
- var = (typeof(var))(entry); \
323
- WARN_ON(id && (entry)->type != id); \
324
- break; \
401
+#define IF_ASSIGN(var, entry, etype, id) \
402
+ if (FTRACE_CMP_TYPE(var, etype)) { \
403
+ var = (typeof(var))(entry); \
404
+ WARN_ON(id != 0 && (entry)->type != id); \
405
+ break; \
325406 }
326407
327408 /* Will cause compile errors if type is not found. */
....@@ -449,7 +530,6 @@
449530 struct tracer *next;
450531 struct tracer_flags *flags;
451532 int enabled;
452
- int ref;
453533 bool print_max;
454534 bool allow_instances;
455535 #ifdef CONFIG_TRACER_MAX_TRACE
....@@ -469,16 +549,14 @@
469549 * When function tracing occurs, the following steps are made:
470550 * If arch does not support a ftrace feature:
471551 * call internal function (uses INTERNAL bits) which calls...
552
+ * If callback is registered to the "global" list, the list
553
+ * function is called and recursion checks the GLOBAL bits.
554
+ * then this function calls...
472555 * The function callback, which can use the FTRACE bits to
473556 * check for recursion.
474557 */
475558 enum {
476
- TRACE_BUFFER_BIT,
477
- TRACE_BUFFER_NMI_BIT,
478
- TRACE_BUFFER_IRQ_BIT,
479
- TRACE_BUFFER_SIRQ_BIT,
480
-
481
- /* Start of function recursion bits */
559
+ /* Function recursion bits */
482560 TRACE_FTRACE_BIT,
483561 TRACE_FTRACE_NMI_BIT,
484562 TRACE_FTRACE_IRQ_BIT,
....@@ -523,6 +601,13 @@
523601
524602 TRACE_GRAPH_DEPTH_START_BIT,
525603 TRACE_GRAPH_DEPTH_END_BIT,
604
+
605
+ /*
606
+ * To implement set_graph_notrace, if this bit is set, we ignore
607
+ * function graph tracing of called functions, until the return
608
+ * function is called to clear it.
609
+ */
610
+ TRACE_GRAPH_NOTRACE_BIT,
526611 };
527612
528613 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
....@@ -618,11 +703,11 @@
618703
619704 int tracer_init(struct tracer *t, struct trace_array *tr);
620705 int tracing_is_enabled(void);
621
-void tracing_reset(struct trace_buffer *buf, int cpu);
622
-void tracing_reset_online_cpus(struct trace_buffer *buf);
706
+void tracing_reset_online_cpus(struct array_buffer *buf);
623707 void tracing_reset_current(int cpu);
624708 void tracing_reset_all_online_cpus(void);
625709 int tracing_open_generic(struct inode *inode, struct file *filp);
710
+int tracing_open_generic_tr(struct inode *inode, struct file *filp);
626711 bool tracing_is_disabled(void);
627712 bool tracer_tracing_is_on(struct trace_array *tr);
628713 void tracer_tracing_on(struct trace_array *tr);
....@@ -633,16 +718,15 @@
633718 void *data,
634719 const struct file_operations *fops);
635720
636
-struct dentry *tracing_init_dentry(void);
721
+int tracing_init_dentry(void);
637722
638723 struct ring_buffer_event;
639724
640725 struct ring_buffer_event *
641
-trace_buffer_lock_reserve(struct ring_buffer *buffer,
726
+trace_buffer_lock_reserve(struct trace_buffer *buffer,
642727 int type,
643728 unsigned long len,
644
- unsigned long flags,
645
- int pc);
729
+ unsigned int trace_ctx);
646730
647731 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
648732 struct trace_array_cpu *data);
....@@ -650,7 +734,7 @@
650734 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
651735 int *ent_cpu, u64 *ent_ts);
652736
653
-void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
737
+void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
654738 struct ring_buffer_event *event);
655739
656740 int trace_empty(struct trace_iterator *iter);
....@@ -661,14 +745,17 @@
661745
662746 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
663747
748
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
749
+unsigned long trace_total_entries(struct trace_array *tr);
750
+
664751 void trace_function(struct trace_array *tr,
665752 unsigned long ip,
666753 unsigned long parent_ip,
667
- unsigned long flags, int pc);
754
+ unsigned int trace_ctx);
668755 void trace_graph_function(struct trace_array *tr,
669756 unsigned long ip,
670757 unsigned long parent_ip,
671
- unsigned long flags, int pc);
758
+ unsigned int trace_ctx);
672759 void trace_latency_header(struct seq_file *m);
673760 void trace_default_header(struct seq_file *m);
674761 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
....@@ -704,6 +791,7 @@
704791 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
705792 pid_t search_pid);
706793 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
794
+ struct trace_pid_list *filtered_no_pids,
707795 struct task_struct *task);
708796 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
709797 struct task_struct *self,
....@@ -717,27 +805,28 @@
717805 const char __user *ubuf, size_t cnt);
718806
719807 #ifdef CONFIG_TRACER_MAX_TRACE
720
-void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
808
+void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
809
+ void *cond_data);
721810 void update_max_tr_single(struct trace_array *tr,
722811 struct task_struct *tsk, int cpu);
723812 #endif /* CONFIG_TRACER_MAX_TRACE */
724813
725
-#ifdef CONFIG_STACKTRACE
726
-void ftrace_trace_userstack(struct trace_array *tr,
727
- struct ring_buffer *buffer, unsigned long flags,
728
- int pc);
814
+#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
815
+ defined(CONFIG_FSNOTIFY)
729816
730
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
731
- int pc);
817
+void latency_fsnotify(struct trace_array *tr);
818
+
732819 #else
733
-static inline void ftrace_trace_userstack(struct trace_array *tr,
734
- struct ring_buffer *buffer,
735
- unsigned long flags, int pc)
736
-{
737
-}
738820
739
-static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
740
- int skip, int pc)
821
+static inline void latency_fsnotify(struct trace_array *tr) { }
822
+
823
+#endif
824
+
825
+#ifdef CONFIG_STACKTRACE
826
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
827
+#else
828
+static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
829
+ int skip)
741830 {
742831 }
743832 #endif /* CONFIG_STACKTRACE */
....@@ -750,6 +839,8 @@
750839
751840 #ifdef CONFIG_DYNAMIC_FTRACE
752841 extern unsigned long ftrace_update_tot_cnt;
842
+extern unsigned long ftrace_number_of_pages;
843
+extern unsigned long ftrace_number_of_groups;
753844 void ftrace_init_trace_array(struct trace_array *tr);
754845 #else
755846 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
....@@ -763,6 +854,8 @@
763854 extern bool tracing_selftest_disabled;
764855
765856 #ifdef CONFIG_FTRACE_STARTUP_TEST
857
+extern void __init disable_tracing_selftest(const char *reason);
858
+
766859 extern int trace_selftest_startup_function(struct tracer *trace,
767860 struct trace_array *tr);
768861 extern int trace_selftest_startup_function_graph(struct tracer *trace,
....@@ -786,6 +879,9 @@
786879 */
787880 #define __tracer_data __refdata
788881 #else
882
+static inline void __init disable_tracing_selftest(const char *reason)
883
+{
884
+}
789885 /* Tracers are seldom changed. Optimize when selftests are disabled. */
790886 #define __tracer_data __read_mostly
791887 #endif /* CONFIG_FTRACE_STARTUP_TEST */
....@@ -799,9 +895,7 @@
799895 extern int
800896 trace_array_vprintk(struct trace_array *tr,
801897 unsigned long ip, const char *fmt, va_list args);
802
-int trace_array_printk(struct trace_array *tr,
803
- unsigned long ip, const char *fmt, ...);
804
-int trace_array_printk_buf(struct ring_buffer *buffer,
898
+int trace_array_printk_buf(struct trace_buffer *buffer,
805899 unsigned long ip, const char *fmt, ...);
806900 void trace_printk_seq(struct trace_seq *s);
807901 enum print_line_t print_trace_line(struct trace_iterator *iter);
....@@ -847,15 +941,21 @@
847941 #define TRACE_GRAPH_PRINT_PROC 0x8
848942 #define TRACE_GRAPH_PRINT_DURATION 0x10
849943 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
850
-#define TRACE_GRAPH_PRINT_IRQS 0x40
851
-#define TRACE_GRAPH_PRINT_TAIL 0x80
852
-#define TRACE_GRAPH_SLEEP_TIME 0x100
853
-#define TRACE_GRAPH_GRAPH_TIME 0x200
944
+#define TRACE_GRAPH_PRINT_REL_TIME 0x40
945
+#define TRACE_GRAPH_PRINT_IRQS 0x80
946
+#define TRACE_GRAPH_PRINT_TAIL 0x100
947
+#define TRACE_GRAPH_SLEEP_TIME 0x200
948
+#define TRACE_GRAPH_GRAPH_TIME 0x400
854949 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
855950 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
856951
857952 extern void ftrace_graph_sleep_time_control(bool enable);
953
+
954
+#ifdef CONFIG_FUNCTION_PROFILER
858955 extern void ftrace_graph_graph_time_control(bool enable);
956
+#else
957
+static inline void ftrace_graph_graph_time_control(bool enable) { }
958
+#endif
859959
860960 extern enum print_line_t
861961 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
....@@ -866,10 +966,10 @@
866966 extern void graph_trace_close(struct trace_iterator *iter);
867967 extern int __trace_graph_entry(struct trace_array *tr,
868968 struct ftrace_graph_ent *trace,
869
- unsigned long flags, int pc);
969
+ unsigned int trace_ctx);
870970 extern void __trace_graph_return(struct trace_array *tr,
871971 struct ftrace_graph_ret *trace,
872
- unsigned long flags, int pc);
972
+ unsigned int trace_ctx);
873973
874974 #ifdef CONFIG_DYNAMIC_FTRACE
875975 extern struct ftrace_hash __rcu *ftrace_graph_hash;
....@@ -987,6 +1087,10 @@
9871087 extern struct list_head ftrace_pids;
9881088
9891089 #ifdef CONFIG_FUNCTION_TRACER
1090
+
1091
+#define FTRACE_PID_IGNORE -1
1092
+#define FTRACE_PID_TRACE -2
1093
+
9901094 struct ftrace_func_command {
9911095 struct list_head list;
9921096 char *name;
....@@ -998,12 +1102,15 @@
9981102 extern bool ftrace_filter_param __initdata;
9991103 static inline int ftrace_trace_task(struct trace_array *tr)
10001104 {
1001
- return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
1105
+ return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1106
+ FTRACE_PID_IGNORE;
10021107 }
10031108 extern int ftrace_is_dead(void);
10041109 int ftrace_create_function_files(struct trace_array *tr,
10051110 struct dentry *parent);
10061111 void ftrace_destroy_function_files(struct trace_array *tr);
1112
+int ftrace_allocate_ftrace_ops(struct trace_array *tr);
1113
+void ftrace_free_ftrace_ops(struct trace_array *tr);
10071114 void ftrace_init_global_array_ops(struct trace_array *tr);
10081115 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
10091116 void ftrace_reset_array_ops(struct trace_array *tr);
....@@ -1025,6 +1132,11 @@
10251132 {
10261133 return 0;
10271134 }
1135
+static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1136
+{
1137
+ return 0;
1138
+}
1139
+static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
10281140 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
10291141 static inline __init void
10301142 ftrace_init_global_array_ops(struct trace_array *tr) { }
....@@ -1086,6 +1198,11 @@
10861198 void ftrace_create_filter_files(struct ftrace_ops *ops,
10871199 struct dentry *parent);
10881200 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1201
+
1202
+extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1203
+ int len, int reset);
1204
+extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1205
+ int len, int reset);
10891206 #else
10901207 struct ftrace_func_command;
10911208
....@@ -1211,6 +1328,7 @@
12111328 C(IRQ_INFO, "irq-info"), \
12121329 C(MARKERS, "markers"), \
12131330 C(EVENT_FORK, "event-fork"), \
1331
+ C(PAUSE_ON_TRACE, "pause-on-trace"), \
12141332 FUNCTION_FLAGS \
12151333 FGRAPH_FLAGS \
12161334 STACK_FLAGS \
....@@ -1308,21 +1426,21 @@
13081426 };
13091427
13101428 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1311
- struct ring_buffer *buffer,
1429
+ struct trace_buffer *buffer,
13121430 struct ring_buffer_event *event);
13131431
13141432 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1315
- struct ring_buffer *buffer,
1433
+ struct trace_buffer *buffer,
13161434 struct ring_buffer_event *event,
1317
- unsigned long flags, int pc,
1435
+ unsigned int trcace_ctx,
13181436 struct pt_regs *regs);
13191437
13201438 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1321
- struct ring_buffer *buffer,
1439
+ struct trace_buffer *buffer,
13221440 struct ring_buffer_event *event,
1323
- unsigned long flags, int pc)
1441
+ unsigned int trace_ctx)
13241442 {
1325
- trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1443
+ trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
13261444 }
13271445
13281446 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
....@@ -1331,7 +1449,7 @@
13311449 void trace_buffered_event_enable(void);
13321450
13331451 static inline void
1334
-__trace_event_discard_commit(struct ring_buffer *buffer,
1452
+__trace_event_discard_commit(struct trace_buffer *buffer,
13351453 struct ring_buffer_event *event)
13361454 {
13371455 if (this_cpu_read(trace_buffered_event) == event) {
....@@ -1345,7 +1463,7 @@
13451463 /*
13461464 * Helper function for event_trigger_unlock_commit{_regs}().
13471465 * If there are event triggers attached to this event that requires
1348
- * filtering against its fields, then they wil be called as the
1466
+ * filtering against its fields, then they will be called as the
13491467 * entry already holds the field information of the current event.
13501468 *
13511469 * It also checks if the event should be discarded or not.
....@@ -1357,7 +1475,7 @@
13571475 */
13581476 static inline bool
13591477 __event_trigger_test_discard(struct trace_event_file *file,
1360
- struct ring_buffer *buffer,
1478
+ struct trace_buffer *buffer,
13611479 struct ring_buffer_event *event,
13621480 void *entry,
13631481 enum event_trigger_type *tt)
....@@ -1395,8 +1513,7 @@
13951513 * @buffer: The ring buffer that the event is being written to
13961514 * @event: The event meta data in the ring buffer
13971515 * @entry: The event itself
1398
- * @irq_flags: The state of the interrupts at the start of the event
1399
- * @pc: The state of the preempt count at the start of the event.
1516
+ * @trace_ctx: The tracing context flags.
14001517 *
14011518 * This is a helper function to handle triggers that require data
14021519 * from the event itself. It also tests the event against filters and
....@@ -1404,14 +1521,14 @@
14041521 */
14051522 static inline void
14061523 event_trigger_unlock_commit(struct trace_event_file *file,
1407
- struct ring_buffer *buffer,
1524
+ struct trace_buffer *buffer,
14081525 struct ring_buffer_event *event,
1409
- void *entry, unsigned long irq_flags, int pc)
1526
+ void *entry, unsigned int trace_ctx)
14101527 {
14111528 enum event_trigger_type tt = ETT_NONE;
14121529
14131530 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1414
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1531
+ trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
14151532
14161533 if (tt)
14171534 event_triggers_post_call(file, tt);
....@@ -1423,8 +1540,7 @@
14231540 * @buffer: The ring buffer that the event is being written to
14241541 * @event: The event meta data in the ring buffer
14251542 * @entry: The event itself
1426
- * @irq_flags: The state of the interrupts at the start of the event
1427
- * @pc: The state of the preempt count at the start of the event.
1543
+ * @trace_ctx: The tracing context flags.
14281544 *
14291545 * This is a helper function to handle triggers that require data
14301546 * from the event itself. It also tests the event against filters and
....@@ -1435,16 +1551,16 @@
14351551 */
14361552 static inline void
14371553 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1438
- struct ring_buffer *buffer,
1554
+ struct trace_buffer *buffer,
14391555 struct ring_buffer_event *event,
1440
- void *entry, unsigned long irq_flags, int pc,
1556
+ void *entry, unsigned int trace_ctx,
14411557 struct pt_regs *regs)
14421558 {
14431559 enum event_trigger_type tt = ETT_NONE;
14441560
14451561 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
14461562 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1447
- irq_flags, pc, regs);
1563
+ trace_ctx, regs);
14481564
14491565 if (tt)
14501566 event_triggers_post_call(file, tt);
....@@ -1476,6 +1592,7 @@
14761592 MATCH_MIDDLE_ONLY,
14771593 MATCH_END_ONLY,
14781594 MATCH_GLOB,
1595
+ MATCH_INDEX,
14791596 };
14801597
14811598 struct regex {
....@@ -1520,7 +1637,8 @@
15201637 extern void print_subsystem_event_filter(struct event_subsystem *system,
15211638 struct trace_seq *s);
15221639 extern int filter_assign_type(const char *type);
1523
-extern int create_event_filter(struct trace_event_call *call,
1640
+extern int create_event_filter(struct trace_array *tr,
1641
+ struct trace_event_call *call,
15241642 char *filter_str, bool set_str,
15251643 struct event_filter **filterp);
15261644 extern void free_event_filter(struct event_filter *filter);
....@@ -1534,6 +1652,7 @@
15341652 extern int event_trace_init(void);
15351653 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
15361654 extern int event_trace_del_tracer(struct trace_array *tr);
1655
+extern void __trace_early_add_events(struct trace_array *tr);
15371656
15381657 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
15391658 const char *system,
....@@ -1552,6 +1671,8 @@
15521671
15531672 extern const struct file_operations event_trigger_fops;
15541673 extern const struct file_operations event_hist_fops;
1674
+extern const struct file_operations event_hist_debug_fops;
1675
+extern const struct file_operations event_inject_fops;
15551676
15561677 #ifdef CONFIG_HIST_TRIGGERS
15571678 extern int register_trigger_hist_cmd(void);
....@@ -1826,6 +1947,11 @@
18261947 extern int trace_event_enable_disable(struct trace_event_file *file,
18271948 int enable, int soft_disable);
18281949 extern int tracing_alloc_snapshot(void);
1950
+extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1951
+extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1952
+
1953
+extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1954
+extern void *tracing_cond_snapshot_data(struct trace_array *tr);
18291955
18301956 extern const char *__start___trace_bprintk_fmt[];
18311957 extern const char *__stop___trace_bprintk_fmt[];
....@@ -1834,10 +1960,18 @@
18341960 extern const char *__stop___tracepoint_str[];
18351961
18361962 void trace_printk_control(bool enabled);
1837
-void trace_printk_init_buffers(void);
18381963 void trace_printk_start_comm(void);
18391964 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
18401965 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1966
+
1967
+/* Used from boot time tracer */
1968
+extern int trace_set_options(struct trace_array *tr, char *option);
1969
+extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1970
+extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1971
+ unsigned long size, int cpu_id);
1972
+extern int tracing_set_cpumask(struct trace_array *tr,
1973
+ cpumask_var_t tracing_cpumask_new);
1974
+
18411975
18421976 #define MAX_EVENT_NAME_LEN 64
18431977
....@@ -1845,6 +1979,11 @@
18451979 extern ssize_t trace_parse_run_command(struct file *file,
18461980 const char __user *buffer, size_t count, loff_t *ppos,
18471981 int (*createfn)(int, char**));
1982
+
1983
+extern unsigned int err_pos(char *cmd, const char *str);
1984
+extern void tracing_log_err(struct trace_array *tr,
1985
+ const char *loc, const char *cmd,
1986
+ const char **errs, u8 type, u8 pos);
18481987
18491988 /*
18501989 * Normal trace_printk() and friends allocates special buffers
....@@ -1858,17 +1997,15 @@
18581997 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
18591998
18601999 #undef FTRACE_ENTRY
1861
-#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
2000
+#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
18622001 extern struct trace_event_call \
18632002 __aligned(4) event_##call;
18642003 #undef FTRACE_ENTRY_DUP
1865
-#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1866
- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1867
- filter)
2004
+#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
2005
+ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
18682006 #undef FTRACE_ENTRY_PACKED
1869
-#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1870
- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1871
- filter)
2007
+#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
2008
+ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
18722009
18732010 #include "trace_entries.h"
18742011
....@@ -1893,6 +2030,9 @@
18932030 #ifdef CONFIG_EVENT_TRACING
18942031 void trace_event_init(void);
18952032 void trace_event_eval_update(struct trace_eval_map **map, int len);
2033
+/* Used from boot time tracer */
2034
+extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2035
+extern int trigger_process_regex(struct trace_event_file *file, char *buff);
18962036 #else
18972037 static inline void __init trace_event_init(void) { }
18982038 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
....@@ -1944,4 +2084,16 @@
19442084 iter->pos = -1;
19452085 }
19462086
2087
+/* Check the name is good for event/group/fields */
2088
+static inline bool is_good_name(const char *name)
2089
+{
2090
+ if (!isalpha(*name) && *name != '_')
2091
+ return false;
2092
+ while (*++name != '\0') {
2093
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
2094
+ return false;
2095
+ }
2096
+ return true;
2097
+}
2098
+
19472099 #endif /* _LINUX_KERNEL_TRACE_H */