.. | .. |
---|
11 | 11 | #include <linux/mmiotrace.h> |
---|
12 | 12 | #include <linux/tracepoint.h> |
---|
13 | 13 | #include <linux/ftrace.h> |
---|
| 14 | +#include <linux/trace.h> |
---|
14 | 15 | #include <linux/hw_breakpoint.h> |
---|
15 | 16 | #include <linux/trace_seq.h> |
---|
16 | 17 | #include <linux/trace_events.h> |
---|
17 | 18 | #include <linux/compiler.h> |
---|
18 | | -#include <linux/trace_seq.h> |
---|
19 | 19 | #include <linux/glob.h> |
---|
| 20 | +#include <linux/irq_work.h> |
---|
| 21 | +#include <linux/workqueue.h> |
---|
| 22 | +#include <linux/ctype.h> |
---|
20 | 23 | |
---|
21 | 24 | #ifdef CONFIG_FTRACE_SYSCALLS |
---|
22 | 25 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
---|
.. | .. |
---|
50 | 53 | #undef __field |
---|
51 | 54 | #define __field(type, item) type item; |
---|
52 | 55 | |
---|
| 56 | +#undef __field_fn |
---|
| 57 | +#define __field_fn(type, item) type item; |
---|
| 58 | + |
---|
53 | 59 | #undef __field_struct |
---|
54 | 60 | #define __field_struct(type, item) __field(type, item) |
---|
55 | 61 | |
---|
56 | 62 | #undef __field_desc |
---|
57 | 63 | #define __field_desc(type, container, item) |
---|
| 64 | + |
---|
| 65 | +#undef __field_packed |
---|
| 66 | +#define __field_packed(type, container, item) |
---|
58 | 67 | |
---|
59 | 68 | #undef __array |
---|
60 | 69 | #define __array(type, item, size) type item[size]; |
---|
.. | .. |
---|
69 | 78 | #define F_STRUCT(args...) args |
---|
70 | 79 | |
---|
71 | 80 | #undef FTRACE_ENTRY |
---|
72 | | -#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ |
---|
| 81 | +#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
---|
73 | 82 | struct struct_name { \ |
---|
74 | 83 | struct trace_entry ent; \ |
---|
75 | 84 | tstruct \ |
---|
76 | 85 | } |
---|
77 | 86 | |
---|
78 | 87 | #undef FTRACE_ENTRY_DUP |
---|
79 | | -#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) |
---|
| 88 | +#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
---|
80 | 89 | |
---|
81 | 90 | #undef FTRACE_ENTRY_REG |
---|
82 | | -#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ |
---|
83 | | - filter, regfn) \ |
---|
84 | | - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
85 | | - filter) |
---|
| 91 | +#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ |
---|
| 92 | + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
---|
86 | 93 | |
---|
87 | 94 | #undef FTRACE_ENTRY_PACKED |
---|
88 | | -#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ |
---|
89 | | - filter) \ |
---|
90 | | - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
91 | | - filter) __packed |
---|
| 95 | +#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ |
---|
| 96 | + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed |
---|
92 | 97 | |
---|
93 | 98 | #include "trace_entries.h" |
---|
| 99 | + |
---|
| 100 | +/* Use this for memory failure errors */ |
---|
| 101 | +#define MEM_FAIL(condition, fmt, ...) ({ \ |
---|
| 102 | + static bool __section(".data.once") __warned; \ |
---|
| 103 | + int __ret_warn_once = !!(condition); \ |
---|
| 104 | + \ |
---|
| 105 | + if (unlikely(__ret_warn_once && !__warned)) { \ |
---|
| 106 | + __warned = true; \ |
---|
| 107 | + pr_err("ERROR: " fmt, ##__VA_ARGS__); \ |
---|
| 108 | + } \ |
---|
| 109 | + unlikely(__ret_warn_once); \ |
---|
| 110 | +}) |
---|
94 | 111 | |
---|
95 | 112 | /* |
---|
96 | 113 | * syscalls are special, and need special handling, this is why |
---|
.. | .. |
---|
117 | 134 | struct trace_entry ent; |
---|
118 | 135 | unsigned long func; |
---|
119 | 136 | unsigned long ret_ip; |
---|
120 | | -}; |
---|
121 | | - |
---|
122 | | -/* |
---|
123 | | - * trace_flag_type is an enumeration that holds different |
---|
124 | | - * states when a trace occurs. These are: |
---|
125 | | - * IRQS_OFF - interrupts were disabled |
---|
126 | | - * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
---|
127 | | - * NEED_RESCHED - reschedule is requested |
---|
128 | | - * HARDIRQ - inside an interrupt handler |
---|
129 | | - * SOFTIRQ - inside a softirq handler |
---|
130 | | - */ |
---|
131 | | -enum trace_flag_type { |
---|
132 | | - TRACE_FLAG_IRQS_OFF = 0x01, |
---|
133 | | - TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
---|
134 | | - TRACE_FLAG_NEED_RESCHED = 0x04, |
---|
135 | | - TRACE_FLAG_HARDIRQ = 0x08, |
---|
136 | | - TRACE_FLAG_SOFTIRQ = 0x10, |
---|
137 | | - TRACE_FLAG_PREEMPT_RESCHED = 0x20, |
---|
138 | | - TRACE_FLAG_NMI = 0x40, |
---|
139 | 137 | }; |
---|
140 | 138 | |
---|
141 | 139 | #define TRACE_BUF_SIZE 1024 |
---|
.. | .. |
---|
165 | 163 | kuid_t uid; |
---|
166 | 164 | char comm[TASK_COMM_LEN]; |
---|
167 | 165 | |
---|
168 | | - bool ignore_pid; |
---|
169 | 166 | #ifdef CONFIG_FUNCTION_TRACER |
---|
170 | | - bool ftrace_ignore_pid; |
---|
| 167 | + int ftrace_ignore_pid; |
---|
171 | 168 | #endif |
---|
| 169 | + bool ignore_pid; |
---|
172 | 170 | }; |
---|
173 | 171 | |
---|
174 | 172 | struct tracer; |
---|
175 | 173 | struct trace_option_dentry; |
---|
176 | 174 | |
---|
177 | | -struct trace_buffer { |
---|
| 175 | +struct array_buffer { |
---|
178 | 176 | struct trace_array *tr; |
---|
179 | | - struct ring_buffer *buffer; |
---|
| 177 | + struct trace_buffer *buffer; |
---|
180 | 178 | struct trace_array_cpu __percpu *data; |
---|
181 | 179 | u64 time_start; |
---|
182 | 180 | int cpu; |
---|
.. | .. |
---|
194 | 192 | unsigned long *pids; |
---|
195 | 193 | }; |
---|
196 | 194 | |
---|
| 195 | +enum { |
---|
| 196 | + TRACE_PIDS = BIT(0), |
---|
| 197 | + TRACE_NO_PIDS = BIT(1), |
---|
| 198 | +}; |
---|
| 199 | + |
---|
| 200 | +static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, |
---|
| 201 | + struct trace_pid_list *no_pid_list) |
---|
| 202 | +{ |
---|
| 203 | + /* Return true if the pid list in type has pids */ |
---|
| 204 | + return ((type & TRACE_PIDS) && pid_list) || |
---|
| 205 | + ((type & TRACE_NO_PIDS) && no_pid_list); |
---|
| 206 | +} |
---|
| 207 | + |
---|
| 208 | +static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, |
---|
| 209 | + struct trace_pid_list *no_pid_list) |
---|
| 210 | +{ |
---|
| 211 | + /* |
---|
| 212 | + * Turning off what is in @type, return true if the "other" |
---|
| 213 | + * pid list, still has pids in it. |
---|
| 214 | + */ |
---|
| 215 | + return (!(type & TRACE_PIDS) && pid_list) || |
---|
| 216 | + (!(type & TRACE_NO_PIDS) && no_pid_list); |
---|
| 217 | +} |
---|
| 218 | + |
---|
| 219 | +typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); |
---|
| 220 | + |
---|
| 221 | +/** |
---|
| 222 | + * struct cond_snapshot - conditional snapshot data and callback |
---|
| 223 | + * |
---|
| 224 | + * The cond_snapshot structure encapsulates a callback function and |
---|
| 225 | + * data associated with the snapshot for a given tracing instance. |
---|
| 226 | + * |
---|
| 227 | + * When a snapshot is taken conditionally, by invoking |
---|
| 228 | + * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is |
---|
| 229 | + * passed in turn to the cond_snapshot.update() function. That data |
---|
| 230 | + * can be compared by the update() implementation with the cond_data |
---|
| 231 | + * contained within the struct cond_snapshot instance associated with |
---|
| 232 | + * the trace_array. Because the tr->max_lock is held throughout the |
---|
| 233 | + * update() call, the update() function can directly retrieve the |
---|
| 234 | + * cond_snapshot and cond_data associated with the per-instance |
---|
| 235 | + * snapshot associated with the trace_array. |
---|
| 236 | + * |
---|
| 237 | + * The cond_snapshot.update() implementation can save data to be |
---|
| 238 | + * associated with the snapshot if it decides to, and returns 'true' |
---|
| 239 | + * in that case, or it returns 'false' if the conditional snapshot |
---|
| 240 | + * shouldn't be taken. |
---|
| 241 | + * |
---|
| 242 | + * The cond_snapshot instance is created and associated with the |
---|
| 243 | + * user-defined cond_data by tracing_cond_snapshot_enable(). |
---|
| 244 | + * Likewise, the cond_snapshot instance is destroyed and is no longer |
---|
| 245 | + * associated with the trace instance by |
---|
| 246 | + * tracing_cond_snapshot_disable(). |
---|
| 247 | + * |
---|
| 248 | + * The method below is required. |
---|
| 249 | + * |
---|
| 250 | + * @update: When a conditional snapshot is invoked, the update() |
---|
| 251 | + * callback function is invoked with the tr->max_lock held. The |
---|
| 252 | + * update() implementation signals whether or not to actually |
---|
| 253 | + * take the snapshot, by returning 'true' if so, 'false' if no |
---|
| 254 | + * snapshot should be taken. Because the max_lock is held for |
---|
| 255 | + * the duration of update(), the implementation is safe to |
---|
| 256 | + * directly retrieved and save any implementation data it needs |
---|
| 257 | + * to in association with the snapshot. |
---|
| 258 | + */ |
---|
| 259 | +struct cond_snapshot { |
---|
| 260 | + void *cond_data; |
---|
| 261 | + cond_update_fn_t update; |
---|
| 262 | +}; |
---|
| 263 | + |
---|
197 | 264 | /* |
---|
198 | 265 | * The trace array - an array of per-CPU trace arrays. This is the |
---|
199 | 266 | * highest level data structure that individual tracers deal with. |
---|
.. | .. |
---|
202 | 269 | struct trace_array { |
---|
203 | 270 | struct list_head list; |
---|
204 | 271 | char *name; |
---|
205 | | - struct trace_buffer trace_buffer; |
---|
| 272 | + struct array_buffer array_buffer; |
---|
206 | 273 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
207 | 274 | /* |
---|
208 | 275 | * The max_buffer is used to snapshot the trace when a maximum |
---|
.. | .. |
---|
210 | 277 | * Some tracers will use this to store a maximum trace while |
---|
211 | 278 | * it continues examining live traces. |
---|
212 | 279 | * |
---|
213 | | - * The buffers for the max_buffer are set up the same as the trace_buffer |
---|
| 280 | + * The buffers for the max_buffer are set up the same as the array_buffer |
---|
214 | 281 | * When a snapshot is taken, the buffer of the max_buffer is swapped |
---|
215 | | - * with the buffer of the trace_buffer and the buffers are reset for |
---|
216 | | - * the trace_buffer so the tracing can continue. |
---|
| 282 | + * with the buffer of the array_buffer and the buffers are reset for |
---|
| 283 | + * the array_buffer so the tracing can continue. |
---|
217 | 284 | */ |
---|
218 | | - struct trace_buffer max_buffer; |
---|
| 285 | + struct array_buffer max_buffer; |
---|
219 | 286 | bool allocated_snapshot; |
---|
220 | 287 | #endif |
---|
221 | 288 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
---|
222 | 289 | unsigned long max_latency; |
---|
| 290 | +#ifdef CONFIG_FSNOTIFY |
---|
| 291 | + struct dentry *d_max_latency; |
---|
| 292 | + struct work_struct fsnotify_work; |
---|
| 293 | + struct irq_work fsnotify_irqwork; |
---|
| 294 | +#endif |
---|
223 | 295 | #endif |
---|
224 | 296 | struct trace_pid_list __rcu *filtered_pids; |
---|
| 297 | + struct trace_pid_list __rcu *filtered_no_pids; |
---|
225 | 298 | /* |
---|
226 | 299 | * max_lock is used to protect the swapping of buffers |
---|
227 | 300 | * when taking a max snapshot. The buffers themselves are |
---|
.. | .. |
---|
247 | 320 | int clock_id; |
---|
248 | 321 | int nr_topts; |
---|
249 | 322 | bool clear_trace; |
---|
| 323 | + int buffer_percent; |
---|
| 324 | + unsigned int n_err_log_entries; |
---|
250 | 325 | struct tracer *current_trace; |
---|
251 | 326 | unsigned int trace_flags; |
---|
252 | 327 | unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; |
---|
253 | 328 | unsigned int flags; |
---|
254 | 329 | raw_spinlock_t start_lock; |
---|
| 330 | + struct list_head err_log; |
---|
255 | 331 | struct dentry *dir; |
---|
256 | 332 | struct dentry *options; |
---|
257 | 333 | struct dentry *percpu_dir; |
---|
.. | .. |
---|
262 | 338 | struct trace_event_file *trace_marker_file; |
---|
263 | 339 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
---|
264 | 340 | int ref; |
---|
| 341 | + int trace_ref; |
---|
265 | 342 | #ifdef CONFIG_FUNCTION_TRACER |
---|
266 | 343 | struct ftrace_ops *ops; |
---|
267 | 344 | struct trace_pid_list __rcu *function_pids; |
---|
| 345 | + struct trace_pid_list __rcu *function_no_pids; |
---|
268 | 346 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
269 | 347 | /* All of these are protected by the ftrace_lock */ |
---|
270 | 348 | struct list_head func_probes; |
---|
.. | .. |
---|
276 | 354 | #endif |
---|
277 | 355 | int time_stamp_abs_ref; |
---|
278 | 356 | struct list_head hist_vars; |
---|
| 357 | +#ifdef CONFIG_TRACER_SNAPSHOT |
---|
| 358 | + struct cond_snapshot *cond_snapshot; |
---|
| 359 | +#endif |
---|
279 | 360 | }; |
---|
280 | 361 | |
---|
281 | 362 | enum { |
---|
.. | .. |
---|
287 | 368 | extern struct mutex trace_types_lock; |
---|
288 | 369 | |
---|
289 | 370 | extern int trace_array_get(struct trace_array *tr); |
---|
290 | | -extern void trace_array_put(struct trace_array *tr); |
---|
| 371 | +extern int tracing_check_open_get_tr(struct trace_array *tr); |
---|
| 372 | +extern struct trace_array *trace_array_find(const char *instance); |
---|
| 373 | +extern struct trace_array *trace_array_find_get(const char *instance); |
---|
291 | 374 | |
---|
292 | 375 | extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); |
---|
293 | 376 | extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); |
---|
.. | .. |
---|
315 | 398 | __builtin_types_compatible_p(typeof(var), type *) |
---|
316 | 399 | |
---|
317 | 400 | #undef IF_ASSIGN |
---|
318 | | -#define IF_ASSIGN(var, entry, etype, id) \ |
---|
319 | | - if (FTRACE_CMP_TYPE(var, etype)) { \ |
---|
320 | | - var = (typeof(var))(entry); \ |
---|
321 | | - WARN_ON(id && (entry)->type != id); \ |
---|
322 | | - break; \ |
---|
| 401 | +#define IF_ASSIGN(var, entry, etype, id) \ |
---|
| 402 | + if (FTRACE_CMP_TYPE(var, etype)) { \ |
---|
| 403 | + var = (typeof(var))(entry); \ |
---|
| 404 | + WARN_ON(id != 0 && (entry)->type != id); \ |
---|
| 405 | + break; \ |
---|
323 | 406 | } |
---|
324 | 407 | |
---|
325 | 408 | /* Will cause compile errors if type is not found. */ |
---|
.. | .. |
---|
447 | 530 | struct tracer *next; |
---|
448 | 531 | struct tracer_flags *flags; |
---|
449 | 532 | int enabled; |
---|
450 | | - int ref; |
---|
451 | 533 | bool print_max; |
---|
452 | 534 | bool allow_instances; |
---|
453 | 535 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
.. | .. |
---|
467 | 549 | * When function tracing occurs, the following steps are made: |
---|
468 | 550 | * If arch does not support a ftrace feature: |
---|
469 | 551 | * call internal function (uses INTERNAL bits) which calls... |
---|
| 552 | + * If callback is registered to the "global" list, the list |
---|
| 553 | + * function is called and recursion checks the GLOBAL bits. |
---|
| 554 | + * then this function calls... |
---|
470 | 555 | * The function callback, which can use the FTRACE bits to |
---|
471 | 556 | * check for recursion. |
---|
472 | 557 | */ |
---|
473 | 558 | enum { |
---|
474 | | - TRACE_BUFFER_BIT, |
---|
475 | | - TRACE_BUFFER_NMI_BIT, |
---|
476 | | - TRACE_BUFFER_IRQ_BIT, |
---|
477 | | - TRACE_BUFFER_SIRQ_BIT, |
---|
478 | | - |
---|
479 | | - /* Start of function recursion bits */ |
---|
| 559 | + /* Function recursion bits */ |
---|
480 | 560 | TRACE_FTRACE_BIT, |
---|
481 | 561 | TRACE_FTRACE_NMI_BIT, |
---|
482 | 562 | TRACE_FTRACE_IRQ_BIT, |
---|
.. | .. |
---|
521 | 601 | |
---|
522 | 602 | TRACE_GRAPH_DEPTH_START_BIT, |
---|
523 | 603 | TRACE_GRAPH_DEPTH_END_BIT, |
---|
| 604 | + |
---|
| 605 | + /* |
---|
| 606 | + * To implement set_graph_notrace, if this bit is set, we ignore |
---|
| 607 | + * function graph tracing of called functions, until the return |
---|
| 608 | + * function is called to clear it. |
---|
| 609 | + */ |
---|
| 610 | + TRACE_GRAPH_NOTRACE_BIT, |
---|
524 | 611 | }; |
---|
525 | 612 | |
---|
526 | 613 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) |
---|
.. | .. |
---|
616 | 703 | |
---|
617 | 704 | int tracer_init(struct tracer *t, struct trace_array *tr); |
---|
618 | 705 | int tracing_is_enabled(void); |
---|
619 | | -void tracing_reset(struct trace_buffer *buf, int cpu); |
---|
620 | | -void tracing_reset_online_cpus(struct trace_buffer *buf); |
---|
| 706 | +void tracing_reset_online_cpus(struct array_buffer *buf); |
---|
621 | 707 | void tracing_reset_current(int cpu); |
---|
622 | 708 | void tracing_reset_all_online_cpus(void); |
---|
623 | 709 | int tracing_open_generic(struct inode *inode, struct file *filp); |
---|
| 710 | +int tracing_open_generic_tr(struct inode *inode, struct file *filp); |
---|
624 | 711 | bool tracing_is_disabled(void); |
---|
625 | 712 | bool tracer_tracing_is_on(struct trace_array *tr); |
---|
626 | 713 | void tracer_tracing_on(struct trace_array *tr); |
---|
.. | .. |
---|
631 | 718 | void *data, |
---|
632 | 719 | const struct file_operations *fops); |
---|
633 | 720 | |
---|
634 | | -struct dentry *tracing_init_dentry(void); |
---|
| 721 | +int tracing_init_dentry(void); |
---|
635 | 722 | |
---|
636 | 723 | struct ring_buffer_event; |
---|
637 | 724 | |
---|
638 | 725 | struct ring_buffer_event * |
---|
639 | | -trace_buffer_lock_reserve(struct ring_buffer *buffer, |
---|
| 726 | +trace_buffer_lock_reserve(struct trace_buffer *buffer, |
---|
640 | 727 | int type, |
---|
641 | 728 | unsigned long len, |
---|
642 | | - unsigned long flags, |
---|
643 | | - int pc); |
---|
| 729 | + unsigned int trace_ctx); |
---|
644 | 730 | |
---|
645 | 731 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
---|
646 | 732 | struct trace_array_cpu *data); |
---|
.. | .. |
---|
648 | 734 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
---|
649 | 735 | int *ent_cpu, u64 *ent_ts); |
---|
650 | 736 | |
---|
651 | | -void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, |
---|
| 737 | +void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
---|
652 | 738 | struct ring_buffer_event *event); |
---|
653 | 739 | |
---|
654 | 740 | int trace_empty(struct trace_iterator *iter); |
---|
.. | .. |
---|
659 | 745 | |
---|
660 | 746 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
---|
661 | 747 | |
---|
| 748 | +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); |
---|
| 749 | +unsigned long trace_total_entries(struct trace_array *tr); |
---|
| 750 | + |
---|
662 | 751 | void trace_function(struct trace_array *tr, |
---|
663 | 752 | unsigned long ip, |
---|
664 | 753 | unsigned long parent_ip, |
---|
665 | | - unsigned long flags, int pc); |
---|
| 754 | + unsigned int trace_ctx); |
---|
666 | 755 | void trace_graph_function(struct trace_array *tr, |
---|
667 | 756 | unsigned long ip, |
---|
668 | 757 | unsigned long parent_ip, |
---|
669 | | - unsigned long flags, int pc); |
---|
| 758 | + unsigned int trace_ctx); |
---|
670 | 759 | void trace_latency_header(struct seq_file *m); |
---|
671 | 760 | void trace_default_header(struct seq_file *m); |
---|
672 | 761 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); |
---|
.. | .. |
---|
702 | 791 | bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, |
---|
703 | 792 | pid_t search_pid); |
---|
704 | 793 | bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, |
---|
| 794 | + struct trace_pid_list *filtered_no_pids, |
---|
705 | 795 | struct task_struct *task); |
---|
706 | 796 | void trace_filter_add_remove_task(struct trace_pid_list *pid_list, |
---|
707 | 797 | struct task_struct *self, |
---|
.. | .. |
---|
715 | 805 | const char __user *ubuf, size_t cnt); |
---|
716 | 806 | |
---|
717 | 807 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
718 | | -void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
---|
| 808 | +void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, |
---|
| 809 | + void *cond_data); |
---|
719 | 810 | void update_max_tr_single(struct trace_array *tr, |
---|
720 | 811 | struct task_struct *tsk, int cpu); |
---|
721 | 812 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
---|
722 | 813 | |
---|
723 | | -#ifdef CONFIG_STACKTRACE |
---|
724 | | -void ftrace_trace_userstack(struct trace_array *tr, |
---|
725 | | - struct ring_buffer *buffer, unsigned long flags, |
---|
726 | | - int pc); |
---|
| 814 | +#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ |
---|
| 815 | + defined(CONFIG_FSNOTIFY) |
---|
727 | 816 | |
---|
728 | | -void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
---|
729 | | - int pc); |
---|
| 817 | +void latency_fsnotify(struct trace_array *tr); |
---|
| 818 | + |
---|
730 | 819 | #else |
---|
731 | | -static inline void ftrace_trace_userstack(struct trace_array *tr, |
---|
732 | | - struct ring_buffer *buffer, |
---|
733 | | - unsigned long flags, int pc) |
---|
734 | | -{ |
---|
735 | | -} |
---|
736 | 820 | |
---|
737 | | -static inline void __trace_stack(struct trace_array *tr, unsigned long flags, |
---|
738 | | - int skip, int pc) |
---|
| 821 | +static inline void latency_fsnotify(struct trace_array *tr) { } |
---|
| 822 | + |
---|
| 823 | +#endif |
---|
| 824 | + |
---|
| 825 | +#ifdef CONFIG_STACKTRACE |
---|
| 826 | +void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip); |
---|
| 827 | +#else |
---|
| 828 | +static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, |
---|
| 829 | + int skip) |
---|
739 | 830 | { |
---|
740 | 831 | } |
---|
741 | 832 | #endif /* CONFIG_STACKTRACE */ |
---|
.. | .. |
---|
748 | 839 | |
---|
749 | 840 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
750 | 841 | extern unsigned long ftrace_update_tot_cnt; |
---|
| 842 | +extern unsigned long ftrace_number_of_pages; |
---|
| 843 | +extern unsigned long ftrace_number_of_groups; |
---|
751 | 844 | void ftrace_init_trace_array(struct trace_array *tr); |
---|
752 | 845 | #else |
---|
753 | 846 | static inline void ftrace_init_trace_array(struct trace_array *tr) { } |
---|
.. | .. |
---|
761 | 854 | extern bool tracing_selftest_disabled; |
---|
762 | 855 | |
---|
763 | 856 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
---|
| 857 | +extern void __init disable_tracing_selftest(const char *reason); |
---|
| 858 | + |
---|
764 | 859 | extern int trace_selftest_startup_function(struct tracer *trace, |
---|
765 | 860 | struct trace_array *tr); |
---|
766 | 861 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
---|
.. | .. |
---|
784 | 879 | */ |
---|
785 | 880 | #define __tracer_data __refdata |
---|
786 | 881 | #else |
---|
| 882 | +static inline void __init disable_tracing_selftest(const char *reason) |
---|
| 883 | +{ |
---|
| 884 | +} |
---|
787 | 885 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ |
---|
788 | 886 | #define __tracer_data __read_mostly |
---|
789 | 887 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
---|
.. | .. |
---|
797 | 895 | extern int |
---|
798 | 896 | trace_array_vprintk(struct trace_array *tr, |
---|
799 | 897 | unsigned long ip, const char *fmt, va_list args); |
---|
800 | | -int trace_array_printk(struct trace_array *tr, |
---|
801 | | - unsigned long ip, const char *fmt, ...); |
---|
802 | | -int trace_array_printk_buf(struct ring_buffer *buffer, |
---|
| 898 | +int trace_array_printk_buf(struct trace_buffer *buffer, |
---|
803 | 899 | unsigned long ip, const char *fmt, ...); |
---|
804 | 900 | void trace_printk_seq(struct trace_seq *s); |
---|
805 | 901 | enum print_line_t print_trace_line(struct trace_iterator *iter); |
---|
.. | .. |
---|
845 | 941 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
---|
846 | 942 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
---|
847 | 943 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
---|
848 | | -#define TRACE_GRAPH_PRINT_IRQS 0x40 |
---|
849 | | -#define TRACE_GRAPH_PRINT_TAIL 0x80 |
---|
850 | | -#define TRACE_GRAPH_SLEEP_TIME 0x100 |
---|
851 | | -#define TRACE_GRAPH_GRAPH_TIME 0x200 |
---|
| 944 | +#define TRACE_GRAPH_PRINT_REL_TIME 0x40 |
---|
| 945 | +#define TRACE_GRAPH_PRINT_IRQS 0x80 |
---|
| 946 | +#define TRACE_GRAPH_PRINT_TAIL 0x100 |
---|
| 947 | +#define TRACE_GRAPH_SLEEP_TIME 0x200 |
---|
| 948 | +#define TRACE_GRAPH_GRAPH_TIME 0x400 |
---|
852 | 949 | #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 |
---|
853 | 950 | #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) |
---|
854 | 951 | |
---|
855 | 952 | extern void ftrace_graph_sleep_time_control(bool enable); |
---|
| 953 | + |
---|
| 954 | +#ifdef CONFIG_FUNCTION_PROFILER |
---|
856 | 955 | extern void ftrace_graph_graph_time_control(bool enable); |
---|
| 956 | +#else |
---|
| 957 | +static inline void ftrace_graph_graph_time_control(bool enable) { } |
---|
| 958 | +#endif |
---|
857 | 959 | |
---|
858 | 960 | extern enum print_line_t |
---|
859 | 961 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
---|
.. | .. |
---|
864 | 966 | extern void graph_trace_close(struct trace_iterator *iter); |
---|
865 | 967 | extern int __trace_graph_entry(struct trace_array *tr, |
---|
866 | 968 | struct ftrace_graph_ent *trace, |
---|
867 | | - unsigned long flags, int pc); |
---|
| 969 | + unsigned int trace_ctx); |
---|
868 | 970 | extern void __trace_graph_return(struct trace_array *tr, |
---|
869 | 971 | struct ftrace_graph_ret *trace, |
---|
870 | | - unsigned long flags, int pc); |
---|
| 972 | + unsigned int trace_ctx); |
---|
871 | 973 | |
---|
872 | 974 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
873 | 975 | extern struct ftrace_hash __rcu *ftrace_graph_hash; |
---|
.. | .. |
---|
985 | 1087 | extern struct list_head ftrace_pids; |
---|
986 | 1088 | |
---|
987 | 1089 | #ifdef CONFIG_FUNCTION_TRACER |
---|
| 1090 | + |
---|
| 1091 | +#define FTRACE_PID_IGNORE -1 |
---|
| 1092 | +#define FTRACE_PID_TRACE -2 |
---|
| 1093 | + |
---|
988 | 1094 | struct ftrace_func_command { |
---|
989 | 1095 | struct list_head list; |
---|
990 | 1096 | char *name; |
---|
.. | .. |
---|
996 | 1102 | extern bool ftrace_filter_param __initdata; |
---|
997 | 1103 | static inline int ftrace_trace_task(struct trace_array *tr) |
---|
998 | 1104 | { |
---|
999 | | - return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); |
---|
| 1105 | + return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != |
---|
| 1106 | + FTRACE_PID_IGNORE; |
---|
1000 | 1107 | } |
---|
1001 | 1108 | extern int ftrace_is_dead(void); |
---|
1002 | 1109 | int ftrace_create_function_files(struct trace_array *tr, |
---|
1003 | 1110 | struct dentry *parent); |
---|
1004 | 1111 | void ftrace_destroy_function_files(struct trace_array *tr); |
---|
| 1112 | +int ftrace_allocate_ftrace_ops(struct trace_array *tr); |
---|
| 1113 | +void ftrace_free_ftrace_ops(struct trace_array *tr); |
---|
1005 | 1114 | void ftrace_init_global_array_ops(struct trace_array *tr); |
---|
1006 | 1115 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); |
---|
1007 | 1116 | void ftrace_reset_array_ops(struct trace_array *tr); |
---|
.. | .. |
---|
1023 | 1132 | { |
---|
1024 | 1133 | return 0; |
---|
1025 | 1134 | } |
---|
| 1135 | +static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) |
---|
| 1136 | +{ |
---|
| 1137 | + return 0; |
---|
| 1138 | +} |
---|
| 1139 | +static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } |
---|
1026 | 1140 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } |
---|
1027 | 1141 | static inline __init void |
---|
1028 | 1142 | ftrace_init_global_array_ops(struct trace_array *tr) { } |
---|
.. | .. |
---|
1084 | 1198 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
---|
1085 | 1199 | struct dentry *parent); |
---|
1086 | 1200 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); |
---|
| 1201 | + |
---|
| 1202 | +extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
---|
| 1203 | + int len, int reset); |
---|
| 1204 | +extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
---|
| 1205 | + int len, int reset); |
---|
1087 | 1206 | #else |
---|
1088 | 1207 | struct ftrace_func_command; |
---|
1089 | 1208 | |
---|
.. | .. |
---|
1209 | 1328 | C(IRQ_INFO, "irq-info"), \ |
---|
1210 | 1329 | C(MARKERS, "markers"), \ |
---|
1211 | 1330 | C(EVENT_FORK, "event-fork"), \ |
---|
| 1331 | + C(PAUSE_ON_TRACE, "pause-on-trace"), \ |
---|
1212 | 1332 | FUNCTION_FLAGS \ |
---|
1213 | 1333 | FGRAPH_FLAGS \ |
---|
1214 | 1334 | STACK_FLAGS \ |
---|
.. | .. |
---|
1306 | 1426 | }; |
---|
1307 | 1427 | |
---|
1308 | 1428 | extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
---|
1309 | | - struct ring_buffer *buffer, |
---|
| 1429 | + struct trace_buffer *buffer, |
---|
1310 | 1430 | struct ring_buffer_event *event); |
---|
1311 | 1431 | |
---|
1312 | 1432 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
---|
1313 | | - struct ring_buffer *buffer, |
---|
| 1433 | + struct trace_buffer *buffer, |
---|
1314 | 1434 | struct ring_buffer_event *event, |
---|
1315 | | - unsigned long flags, int pc, |
---|
| 1435 | + unsigned int trcace_ctx, |
---|
1316 | 1436 | struct pt_regs *regs); |
---|
1317 | 1437 | |
---|
1318 | 1438 | static inline void trace_buffer_unlock_commit(struct trace_array *tr, |
---|
1319 | | - struct ring_buffer *buffer, |
---|
| 1439 | + struct trace_buffer *buffer, |
---|
1320 | 1440 | struct ring_buffer_event *event, |
---|
1321 | | - unsigned long flags, int pc) |
---|
| 1441 | + unsigned int trace_ctx) |
---|
1322 | 1442 | { |
---|
1323 | | - trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); |
---|
| 1443 | + trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL); |
---|
1324 | 1444 | } |
---|
1325 | 1445 | |
---|
1326 | 1446 | DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); |
---|
.. | .. |
---|
1329 | 1449 | void trace_buffered_event_enable(void); |
---|
1330 | 1450 | |
---|
1331 | 1451 | static inline void |
---|
1332 | | -__trace_event_discard_commit(struct ring_buffer *buffer, |
---|
| 1452 | +__trace_event_discard_commit(struct trace_buffer *buffer, |
---|
1333 | 1453 | struct ring_buffer_event *event) |
---|
1334 | 1454 | { |
---|
1335 | 1455 | if (this_cpu_read(trace_buffered_event) == event) { |
---|
.. | .. |
---|
1343 | 1463 | /* |
---|
1344 | 1464 | * Helper function for event_trigger_unlock_commit{_regs}(). |
---|
1345 | 1465 | * If there are event triggers attached to this event that requires |
---|
1346 | | - * filtering against its fields, then they wil be called as the |
---|
| 1466 | + * filtering against its fields, then they will be called as the |
---|
1347 | 1467 | * entry already holds the field information of the current event. |
---|
1348 | 1468 | * |
---|
1349 | 1469 | * It also checks if the event should be discarded or not. |
---|
.. | .. |
---|
1355 | 1475 | */ |
---|
1356 | 1476 | static inline bool |
---|
1357 | 1477 | __event_trigger_test_discard(struct trace_event_file *file, |
---|
1358 | | - struct ring_buffer *buffer, |
---|
| 1478 | + struct trace_buffer *buffer, |
---|
1359 | 1479 | struct ring_buffer_event *event, |
---|
1360 | 1480 | void *entry, |
---|
1361 | 1481 | enum event_trigger_type *tt) |
---|
.. | .. |
---|
1393 | 1513 | * @buffer: The ring buffer that the event is being written to |
---|
1394 | 1514 | * @event: The event meta data in the ring buffer |
---|
1395 | 1515 | * @entry: The event itself |
---|
1396 | | - * @irq_flags: The state of the interrupts at the start of the event |
---|
1397 | | - * @pc: The state of the preempt count at the start of the event. |
---|
| 1516 | + * @trace_ctx: The tracing context flags. |
---|
1398 | 1517 | * |
---|
1399 | 1518 | * This is a helper function to handle triggers that require data |
---|
1400 | 1519 | * from the event itself. It also tests the event against filters and |
---|
.. | .. |
---|
1402 | 1521 | */ |
---|
1403 | 1522 | static inline void |
---|
1404 | 1523 | event_trigger_unlock_commit(struct trace_event_file *file, |
---|
1405 | | - struct ring_buffer *buffer, |
---|
| 1524 | + struct trace_buffer *buffer, |
---|
1406 | 1525 | struct ring_buffer_event *event, |
---|
1407 | | - void *entry, unsigned long irq_flags, int pc) |
---|
| 1526 | + void *entry, unsigned int trace_ctx) |
---|
1408 | 1527 | { |
---|
1409 | 1528 | enum event_trigger_type tt = ETT_NONE; |
---|
1410 | 1529 | |
---|
1411 | 1530 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) |
---|
1412 | | - trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); |
---|
| 1531 | + trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx); |
---|
1413 | 1532 | |
---|
1414 | 1533 | if (tt) |
---|
1415 | 1534 | event_triggers_post_call(file, tt); |
---|
.. | .. |
---|
1421 | 1540 | * @buffer: The ring buffer that the event is being written to |
---|
1422 | 1541 | * @event: The event meta data in the ring buffer |
---|
1423 | 1542 | * @entry: The event itself |
---|
1424 | | - * @irq_flags: The state of the interrupts at the start of the event |
---|
1425 | | - * @pc: The state of the preempt count at the start of the event. |
---|
| 1543 | + * @trace_ctx: The tracing context flags. |
---|
1426 | 1544 | * |
---|
1427 | 1545 | * This is a helper function to handle triggers that require data |
---|
1428 | 1546 | * from the event itself. It also tests the event against filters and |
---|
.. | .. |
---|
1433 | 1551 | */ |
---|
1434 | 1552 | static inline void |
---|
1435 | 1553 | event_trigger_unlock_commit_regs(struct trace_event_file *file, |
---|
1436 | | - struct ring_buffer *buffer, |
---|
| 1554 | + struct trace_buffer *buffer, |
---|
1437 | 1555 | struct ring_buffer_event *event, |
---|
1438 | | - void *entry, unsigned long irq_flags, int pc, |
---|
| 1556 | + void *entry, unsigned int trace_ctx, |
---|
1439 | 1557 | struct pt_regs *regs) |
---|
1440 | 1558 | { |
---|
1441 | 1559 | enum event_trigger_type tt = ETT_NONE; |
---|
1442 | 1560 | |
---|
1443 | 1561 | if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) |
---|
1444 | 1562 | trace_buffer_unlock_commit_regs(file->tr, buffer, event, |
---|
1445 | | - irq_flags, pc, regs); |
---|
| 1563 | + trace_ctx, regs); |
---|
1446 | 1564 | |
---|
1447 | 1565 | if (tt) |
---|
1448 | 1566 | event_triggers_post_call(file, tt); |
---|
.. | .. |
---|
1474 | 1592 | MATCH_MIDDLE_ONLY, |
---|
1475 | 1593 | MATCH_END_ONLY, |
---|
1476 | 1594 | MATCH_GLOB, |
---|
| 1595 | + MATCH_INDEX, |
---|
1477 | 1596 | }; |
---|
1478 | 1597 | |
---|
1479 | 1598 | struct regex { |
---|
.. | .. |
---|
1518 | 1637 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
---|
1519 | 1638 | struct trace_seq *s); |
---|
1520 | 1639 | extern int filter_assign_type(const char *type); |
---|
1521 | | -extern int create_event_filter(struct trace_event_call *call, |
---|
| 1640 | +extern int create_event_filter(struct trace_array *tr, |
---|
| 1641 | + struct trace_event_call *call, |
---|
1522 | 1642 | char *filter_str, bool set_str, |
---|
1523 | 1643 | struct event_filter **filterp); |
---|
1524 | 1644 | extern void free_event_filter(struct event_filter *filter); |
---|
.. | .. |
---|
1532 | 1652 | extern int event_trace_init(void); |
---|
1533 | 1653 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
---|
1534 | 1654 | extern int event_trace_del_tracer(struct trace_array *tr); |
---|
| 1655 | +extern void __trace_early_add_events(struct trace_array *tr); |
---|
1535 | 1656 | |
---|
1536 | 1657 | extern struct trace_event_file *__find_event_file(struct trace_array *tr, |
---|
1537 | 1658 | const char *system, |
---|
.. | .. |
---|
1550 | 1671 | |
---|
1551 | 1672 | extern const struct file_operations event_trigger_fops; |
---|
1552 | 1673 | extern const struct file_operations event_hist_fops; |
---|
| 1674 | +extern const struct file_operations event_hist_debug_fops; |
---|
| 1675 | +extern const struct file_operations event_inject_fops; |
---|
1553 | 1676 | |
---|
1554 | 1677 | #ifdef CONFIG_HIST_TRIGGERS |
---|
1555 | 1678 | extern int register_trigger_hist_cmd(void); |
---|
.. | .. |
---|
1824 | 1947 | extern int trace_event_enable_disable(struct trace_event_file *file, |
---|
1825 | 1948 | int enable, int soft_disable); |
---|
1826 | 1949 | extern int tracing_alloc_snapshot(void); |
---|
| 1950 | +extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); |
---|
| 1951 | +extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); |
---|
| 1952 | + |
---|
| 1953 | +extern int tracing_snapshot_cond_disable(struct trace_array *tr); |
---|
| 1954 | +extern void *tracing_cond_snapshot_data(struct trace_array *tr); |
---|
1827 | 1955 | |
---|
1828 | 1956 | extern const char *__start___trace_bprintk_fmt[]; |
---|
1829 | 1957 | extern const char *__stop___trace_bprintk_fmt[]; |
---|
.. | .. |
---|
1832 | 1960 | extern const char *__stop___tracepoint_str[]; |
---|
1833 | 1961 | |
---|
1834 | 1962 | void trace_printk_control(bool enabled); |
---|
1835 | | -void trace_printk_init_buffers(void); |
---|
1836 | 1963 | void trace_printk_start_comm(void); |
---|
1837 | 1964 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
---|
1838 | 1965 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
---|
| 1966 | + |
---|
| 1967 | +/* Used from boot time tracer */ |
---|
| 1968 | +extern int trace_set_options(struct trace_array *tr, char *option); |
---|
| 1969 | +extern int tracing_set_tracer(struct trace_array *tr, const char *buf); |
---|
| 1970 | +extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
---|
| 1971 | + unsigned long size, int cpu_id); |
---|
| 1972 | +extern int tracing_set_cpumask(struct trace_array *tr, |
---|
| 1973 | + cpumask_var_t tracing_cpumask_new); |
---|
| 1974 | + |
---|
1839 | 1975 | |
---|
1840 | 1976 | #define MAX_EVENT_NAME_LEN 64 |
---|
1841 | 1977 | |
---|
.. | .. |
---|
1843 | 1979 | extern ssize_t trace_parse_run_command(struct file *file, |
---|
1844 | 1980 | const char __user *buffer, size_t count, loff_t *ppos, |
---|
1845 | 1981 | int (*createfn)(int, char**)); |
---|
| 1982 | + |
---|
| 1983 | +extern unsigned int err_pos(char *cmd, const char *str); |
---|
| 1984 | +extern void tracing_log_err(struct trace_array *tr, |
---|
| 1985 | + const char *loc, const char *cmd, |
---|
| 1986 | + const char **errs, u8 type, u8 pos); |
---|
1846 | 1987 | |
---|
1847 | 1988 | /* |
---|
1848 | 1989 | * Normal trace_printk() and friends allocates special buffers |
---|
.. | .. |
---|
1856 | 1997 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) |
---|
1857 | 1998 | |
---|
1858 | 1999 | #undef FTRACE_ENTRY |
---|
1859 | | -#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
---|
| 2000 | +#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
---|
1860 | 2001 | extern struct trace_event_call \ |
---|
1861 | 2002 | __aligned(4) event_##call; |
---|
1862 | 2003 | #undef FTRACE_ENTRY_DUP |
---|
1863 | | -#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
---|
1864 | | - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
1865 | | - filter) |
---|
| 2004 | +#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
---|
| 2005 | + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
---|
1866 | 2006 | #undef FTRACE_ENTRY_PACKED |
---|
1867 | | -#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ |
---|
1868 | | - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
1869 | | - filter) |
---|
| 2007 | +#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ |
---|
| 2008 | + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
---|
1870 | 2009 | |
---|
1871 | 2010 | #include "trace_entries.h" |
---|
1872 | 2011 | |
---|
.. | .. |
---|
1891 | 2030 | #ifdef CONFIG_EVENT_TRACING |
---|
1892 | 2031 | void trace_event_init(void); |
---|
1893 | 2032 | void trace_event_eval_update(struct trace_eval_map **map, int len); |
---|
| 2033 | +/* Used from boot time tracer */ |
---|
| 2034 | +extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); |
---|
| 2035 | +extern int trigger_process_regex(struct trace_event_file *file, char *buff); |
---|
1894 | 2036 | #else |
---|
1895 | 2037 | static inline void __init trace_event_init(void) { } |
---|
1896 | 2038 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
---|
.. | .. |
---|
1942 | 2084 | iter->pos = -1; |
---|
1943 | 2085 | } |
---|
1944 | 2086 | |
---|
| 2087 | +/* Check the name is good for event/group/fields */ |
---|
| 2088 | +static inline bool is_good_name(const char *name) |
---|
| 2089 | +{ |
---|
| 2090 | + if (!isalpha(*name) && *name != '_') |
---|
| 2091 | + return false; |
---|
| 2092 | + while (*++name != '\0') { |
---|
| 2093 | + if (!isalpha(*name) && !isdigit(*name) && *name != '_') |
---|
| 2094 | + return false; |
---|
| 2095 | + } |
---|
| 2096 | + return true; |
---|
| 2097 | +} |
---|
| 2098 | + |
---|
1945 | 2099 | #endif /* _LINUX_KERNEL_TRACE_H */ |
---|