.. | .. |
---|
11 | 11 | #include <linux/mmiotrace.h> |
---|
12 | 12 | #include <linux/tracepoint.h> |
---|
13 | 13 | #include <linux/ftrace.h> |
---|
| 14 | +#include <linux/trace.h> |
---|
14 | 15 | #include <linux/hw_breakpoint.h> |
---|
15 | 16 | #include <linux/trace_seq.h> |
---|
16 | 17 | #include <linux/trace_events.h> |
---|
17 | 18 | #include <linux/compiler.h> |
---|
18 | | -#include <linux/trace_seq.h> |
---|
19 | 19 | #include <linux/glob.h> |
---|
| 20 | +#include <linux/irq_work.h> |
---|
| 21 | +#include <linux/workqueue.h> |
---|
| 22 | +#include <linux/ctype.h> |
---|
20 | 23 | |
---|
21 | 24 | #ifdef CONFIG_FTRACE_SYSCALLS |
---|
22 | 25 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
---|
.. | .. |
---|
50 | 53 | #undef __field |
---|
51 | 54 | #define __field(type, item) type item; |
---|
52 | 55 | |
---|
| 56 | +#undef __field_fn |
---|
| 57 | +#define __field_fn(type, item) type item; |
---|
| 58 | + |
---|
53 | 59 | #undef __field_struct |
---|
54 | 60 | #define __field_struct(type, item) __field(type, item) |
---|
55 | 61 | |
---|
56 | 62 | #undef __field_desc |
---|
57 | 63 | #define __field_desc(type, container, item) |
---|
| 64 | + |
---|
| 65 | +#undef __field_packed |
---|
| 66 | +#define __field_packed(type, container, item) |
---|
58 | 67 | |
---|
59 | 68 | #undef __array |
---|
60 | 69 | #define __array(type, item, size) type item[size]; |
---|
.. | .. |
---|
69 | 78 | #define F_STRUCT(args...) args |
---|
70 | 79 | |
---|
71 | 80 | #undef FTRACE_ENTRY |
---|
72 | | -#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ |
---|
| 81 | +#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
---|
73 | 82 | struct struct_name { \ |
---|
74 | 83 | struct trace_entry ent; \ |
---|
75 | 84 | tstruct \ |
---|
76 | 85 | } |
---|
77 | 86 | |
---|
78 | 87 | #undef FTRACE_ENTRY_DUP |
---|
79 | | -#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) |
---|
| 88 | +#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
---|
80 | 89 | |
---|
81 | 90 | #undef FTRACE_ENTRY_REG |
---|
82 | | -#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ |
---|
83 | | - filter, regfn) \ |
---|
84 | | - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
85 | | - filter) |
---|
| 91 | +#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ |
---|
| 92 | + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
---|
86 | 93 | |
---|
87 | 94 | #undef FTRACE_ENTRY_PACKED |
---|
88 | | -#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ |
---|
89 | | - filter) \ |
---|
90 | | - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
91 | | - filter) __packed |
---|
| 95 | +#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ |
---|
| 96 | + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed |
---|
92 | 97 | |
---|
93 | 98 | #include "trace_entries.h" |
---|
| 99 | + |
---|
| 100 | +/* Use this for memory failure errors */ |
---|
| 101 | +#define MEM_FAIL(condition, fmt, ...) ({ \ |
---|
| 102 | + static bool __section(".data.once") __warned; \ |
---|
| 103 | + int __ret_warn_once = !!(condition); \ |
---|
| 104 | + \ |
---|
| 105 | + if (unlikely(__ret_warn_once && !__warned)) { \ |
---|
| 106 | + __warned = true; \ |
---|
| 107 | + pr_err("ERROR: " fmt, ##__VA_ARGS__); \ |
---|
| 108 | + } \ |
---|
| 109 | + unlikely(__ret_warn_once); \ |
---|
| 110 | +}) |
---|
94 | 111 | |
---|
95 | 112 | /* |
---|
96 | 113 | * syscalls are special, and need special handling, this is why |
---|
.. | .. |
---|
165 | 182 | kuid_t uid; |
---|
166 | 183 | char comm[TASK_COMM_LEN]; |
---|
167 | 184 | |
---|
168 | | - bool ignore_pid; |
---|
169 | 185 | #ifdef CONFIG_FUNCTION_TRACER |
---|
170 | | - bool ftrace_ignore_pid; |
---|
| 186 | + int ftrace_ignore_pid; |
---|
171 | 187 | #endif |
---|
| 188 | + bool ignore_pid; |
---|
172 | 189 | }; |
---|
173 | 190 | |
---|
174 | 191 | struct tracer; |
---|
175 | 192 | struct trace_option_dentry; |
---|
176 | 193 | |
---|
177 | | -struct trace_buffer { |
---|
| 194 | +struct array_buffer { |
---|
178 | 195 | struct trace_array *tr; |
---|
179 | | - struct ring_buffer *buffer; |
---|
| 196 | + struct trace_buffer *buffer; |
---|
180 | 197 | struct trace_array_cpu __percpu *data; |
---|
181 | 198 | u64 time_start; |
---|
182 | 199 | int cpu; |
---|
.. | .. |
---|
194 | 211 | unsigned long *pids; |
---|
195 | 212 | }; |
---|
196 | 213 | |
---|
| 214 | +enum { |
---|
| 215 | + TRACE_PIDS = BIT(0), |
---|
| 216 | + TRACE_NO_PIDS = BIT(1), |
---|
| 217 | +}; |
---|
| 218 | + |
---|
| 219 | +static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, |
---|
| 220 | + struct trace_pid_list *no_pid_list) |
---|
| 221 | +{ |
---|
| 222 | + /* Return true if the pid list in type has pids */ |
---|
| 223 | + return ((type & TRACE_PIDS) && pid_list) || |
---|
| 224 | + ((type & TRACE_NO_PIDS) && no_pid_list); |
---|
| 225 | +} |
---|
| 226 | + |
---|
| 227 | +static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, |
---|
| 228 | + struct trace_pid_list *no_pid_list) |
---|
| 229 | +{ |
---|
| 230 | + /* |
---|
| 231 | + * Turning off what is in @type, return true if the "other" |
---|
| 232 | + * pid list, still has pids in it. |
---|
| 233 | + */ |
---|
| 234 | + return (!(type & TRACE_PIDS) && pid_list) || |
---|
| 235 | + (!(type & TRACE_NO_PIDS) && no_pid_list); |
---|
| 236 | +} |
---|
| 237 | + |
---|
| 238 | +typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); |
---|
| 239 | + |
---|
| 240 | +/** |
---|
| 241 | + * struct cond_snapshot - conditional snapshot data and callback |
---|
| 242 | + * |
---|
| 243 | + * The cond_snapshot structure encapsulates a callback function and |
---|
| 244 | + * data associated with the snapshot for a given tracing instance. |
---|
| 245 | + * |
---|
| 246 | + * When a snapshot is taken conditionally, by invoking |
---|
| 247 | + * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is |
---|
| 248 | + * passed in turn to the cond_snapshot.update() function. That data |
---|
| 249 | + * can be compared by the update() implementation with the cond_data |
---|
| 250 | + * contained within the struct cond_snapshot instance associated with |
---|
| 251 | + * the trace_array. Because the tr->max_lock is held throughout the |
---|
| 252 | + * update() call, the update() function can directly retrieve the |
---|
| 253 | + * cond_snapshot and cond_data associated with the per-instance |
---|
| 254 | + * snapshot associated with the trace_array. |
---|
| 255 | + * |
---|
| 256 | + * The cond_snapshot.update() implementation can save data to be |
---|
| 257 | + * associated with the snapshot if it decides to, and returns 'true' |
---|
| 258 | + * in that case, or it returns 'false' if the conditional snapshot |
---|
| 259 | + * shouldn't be taken. |
---|
| 260 | + * |
---|
| 261 | + * The cond_snapshot instance is created and associated with the |
---|
| 262 | + * user-defined cond_data by tracing_cond_snapshot_enable(). |
---|
| 263 | + * Likewise, the cond_snapshot instance is destroyed and is no longer |
---|
| 264 | + * associated with the trace instance by |
---|
| 265 | + * tracing_cond_snapshot_disable(). |
---|
| 266 | + * |
---|
| 267 | + * The method below is required. |
---|
| 268 | + * |
---|
| 269 | + * @update: When a conditional snapshot is invoked, the update() |
---|
| 270 | + * callback function is invoked with the tr->max_lock held. The |
---|
| 271 | + * update() implementation signals whether or not to actually |
---|
| 272 | + * take the snapshot, by returning 'true' if so, 'false' if no |
---|
| 273 | + * snapshot should be taken. Because the max_lock is held for |
---|
| 274 | + * the duration of update(), the implementation is safe to |
---|
| 275 | + * directly retrieved and save any implementation data it needs |
---|
| 276 | + * to in association with the snapshot. |
---|
| 277 | + */ |
---|
| 278 | +struct cond_snapshot { |
---|
| 279 | + void *cond_data; |
---|
| 280 | + cond_update_fn_t update; |
---|
| 281 | +}; |
---|
| 282 | + |
---|
197 | 283 | /* |
---|
198 | 284 | * The trace array - an array of per-CPU trace arrays. This is the |
---|
199 | 285 | * highest level data structure that individual tracers deal with. |
---|
.. | .. |
---|
202 | 288 | struct trace_array { |
---|
203 | 289 | struct list_head list; |
---|
204 | 290 | char *name; |
---|
205 | | - struct trace_buffer trace_buffer; |
---|
| 291 | + struct array_buffer array_buffer; |
---|
206 | 292 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
207 | 293 | /* |
---|
208 | 294 | * The max_buffer is used to snapshot the trace when a maximum |
---|
.. | .. |
---|
210 | 296 | * Some tracers will use this to store a maximum trace while |
---|
211 | 297 | * it continues examining live traces. |
---|
212 | 298 | * |
---|
213 | | - * The buffers for the max_buffer are set up the same as the trace_buffer |
---|
| 299 | + * The buffers for the max_buffer are set up the same as the array_buffer |
---|
214 | 300 | * When a snapshot is taken, the buffer of the max_buffer is swapped |
---|
215 | | - * with the buffer of the trace_buffer and the buffers are reset for |
---|
216 | | - * the trace_buffer so the tracing can continue. |
---|
| 301 | + * with the buffer of the array_buffer and the buffers are reset for |
---|
| 302 | + * the array_buffer so the tracing can continue. |
---|
217 | 303 | */ |
---|
218 | | - struct trace_buffer max_buffer; |
---|
| 304 | + struct array_buffer max_buffer; |
---|
219 | 305 | bool allocated_snapshot; |
---|
220 | 306 | #endif |
---|
221 | 307 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
---|
222 | 308 | unsigned long max_latency; |
---|
| 309 | +#ifdef CONFIG_FSNOTIFY |
---|
| 310 | + struct dentry *d_max_latency; |
---|
| 311 | + struct work_struct fsnotify_work; |
---|
| 312 | + struct irq_work fsnotify_irqwork; |
---|
| 313 | +#endif |
---|
223 | 314 | #endif |
---|
224 | 315 | struct trace_pid_list __rcu *filtered_pids; |
---|
| 316 | + struct trace_pid_list __rcu *filtered_no_pids; |
---|
225 | 317 | /* |
---|
226 | 318 | * max_lock is used to protect the swapping of buffers |
---|
227 | 319 | * when taking a max snapshot. The buffers themselves are |
---|
.. | .. |
---|
247 | 339 | int clock_id; |
---|
248 | 340 | int nr_topts; |
---|
249 | 341 | bool clear_trace; |
---|
| 342 | + int buffer_percent; |
---|
| 343 | + unsigned int n_err_log_entries; |
---|
250 | 344 | struct tracer *current_trace; |
---|
251 | 345 | unsigned int trace_flags; |
---|
252 | 346 | unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; |
---|
253 | 347 | unsigned int flags; |
---|
254 | 348 | raw_spinlock_t start_lock; |
---|
| 349 | + struct list_head err_log; |
---|
255 | 350 | struct dentry *dir; |
---|
256 | 351 | struct dentry *options; |
---|
257 | 352 | struct dentry *percpu_dir; |
---|
.. | .. |
---|
262 | 357 | struct trace_event_file *trace_marker_file; |
---|
263 | 358 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
---|
264 | 359 | int ref; |
---|
| 360 | + int trace_ref; |
---|
265 | 361 | #ifdef CONFIG_FUNCTION_TRACER |
---|
266 | 362 | struct ftrace_ops *ops; |
---|
267 | 363 | struct trace_pid_list __rcu *function_pids; |
---|
| 364 | + struct trace_pid_list __rcu *function_no_pids; |
---|
268 | 365 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
269 | 366 | /* All of these are protected by the ftrace_lock */ |
---|
270 | 367 | struct list_head func_probes; |
---|
.. | .. |
---|
276 | 373 | #endif |
---|
277 | 374 | int time_stamp_abs_ref; |
---|
278 | 375 | struct list_head hist_vars; |
---|
| 376 | +#ifdef CONFIG_TRACER_SNAPSHOT |
---|
| 377 | + struct cond_snapshot *cond_snapshot; |
---|
| 378 | +#endif |
---|
279 | 379 | }; |
---|
280 | 380 | |
---|
281 | 381 | enum { |
---|
.. | .. |
---|
287 | 387 | extern struct mutex trace_types_lock; |
---|
288 | 388 | |
---|
289 | 389 | extern int trace_array_get(struct trace_array *tr); |
---|
290 | | -extern void trace_array_put(struct trace_array *tr); |
---|
| 390 | +extern int tracing_check_open_get_tr(struct trace_array *tr); |
---|
| 391 | +extern struct trace_array *trace_array_find(const char *instance); |
---|
| 392 | +extern struct trace_array *trace_array_find_get(const char *instance); |
---|
291 | 393 | |
---|
292 | 394 | extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs); |
---|
293 | 395 | extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); |
---|
.. | .. |
---|
315 | 417 | __builtin_types_compatible_p(typeof(var), type *) |
---|
316 | 418 | |
---|
317 | 419 | #undef IF_ASSIGN |
---|
318 | | -#define IF_ASSIGN(var, entry, etype, id) \ |
---|
319 | | - if (FTRACE_CMP_TYPE(var, etype)) { \ |
---|
320 | | - var = (typeof(var))(entry); \ |
---|
321 | | - WARN_ON(id && (entry)->type != id); \ |
---|
322 | | - break; \ |
---|
| 420 | +#define IF_ASSIGN(var, entry, etype, id) \ |
---|
| 421 | + if (FTRACE_CMP_TYPE(var, etype)) { \ |
---|
| 422 | + var = (typeof(var))(entry); \ |
---|
| 423 | + WARN_ON(id != 0 && (entry)->type != id); \ |
---|
| 424 | + break; \ |
---|
323 | 425 | } |
---|
324 | 426 | |
---|
325 | 427 | /* Will cause compile errors if type is not found. */ |
---|
.. | .. |
---|
447 | 549 | struct tracer *next; |
---|
448 | 550 | struct tracer_flags *flags; |
---|
449 | 551 | int enabled; |
---|
450 | | - int ref; |
---|
451 | 552 | bool print_max; |
---|
452 | 553 | bool allow_instances; |
---|
453 | 554 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
.. | .. |
---|
467 | 568 | * When function tracing occurs, the following steps are made: |
---|
468 | 569 | * If arch does not support a ftrace feature: |
---|
469 | 570 | * call internal function (uses INTERNAL bits) which calls... |
---|
| 571 | + * If callback is registered to the "global" list, the list |
---|
| 572 | + * function is called and recursion checks the GLOBAL bits. |
---|
| 573 | + * then this function calls... |
---|
470 | 574 | * The function callback, which can use the FTRACE bits to |
---|
471 | 575 | * check for recursion. |
---|
472 | 576 | */ |
---|
473 | 577 | enum { |
---|
474 | | - TRACE_BUFFER_BIT, |
---|
475 | | - TRACE_BUFFER_NMI_BIT, |
---|
476 | | - TRACE_BUFFER_IRQ_BIT, |
---|
477 | | - TRACE_BUFFER_SIRQ_BIT, |
---|
478 | | - |
---|
479 | | - /* Start of function recursion bits */ |
---|
| 578 | + /* Function recursion bits */ |
---|
480 | 579 | TRACE_FTRACE_BIT, |
---|
481 | 580 | TRACE_FTRACE_NMI_BIT, |
---|
482 | 581 | TRACE_FTRACE_IRQ_BIT, |
---|
.. | .. |
---|
521 | 620 | |
---|
522 | 621 | TRACE_GRAPH_DEPTH_START_BIT, |
---|
523 | 622 | TRACE_GRAPH_DEPTH_END_BIT, |
---|
| 623 | + |
---|
| 624 | + /* |
---|
| 625 | + * To implement set_graph_notrace, if this bit is set, we ignore |
---|
| 626 | + * function graph tracing of called functions, until the return |
---|
| 627 | + * function is called to clear it. |
---|
| 628 | + */ |
---|
| 629 | + TRACE_GRAPH_NOTRACE_BIT, |
---|
524 | 630 | }; |
---|
525 | 631 | |
---|
526 | 632 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) |
---|
.. | .. |
---|
616 | 722 | |
---|
617 | 723 | int tracer_init(struct tracer *t, struct trace_array *tr); |
---|
618 | 724 | int tracing_is_enabled(void); |
---|
619 | | -void tracing_reset(struct trace_buffer *buf, int cpu); |
---|
620 | | -void tracing_reset_online_cpus(struct trace_buffer *buf); |
---|
| 725 | +void tracing_reset_online_cpus(struct array_buffer *buf); |
---|
621 | 726 | void tracing_reset_current(int cpu); |
---|
622 | 727 | void tracing_reset_all_online_cpus(void); |
---|
| 728 | +void tracing_reset_all_online_cpus_unlocked(void); |
---|
623 | 729 | int tracing_open_generic(struct inode *inode, struct file *filp); |
---|
| 730 | +int tracing_open_generic_tr(struct inode *inode, struct file *filp); |
---|
| 731 | +int tracing_open_file_tr(struct inode *inode, struct file *filp); |
---|
| 732 | +int tracing_release_file_tr(struct inode *inode, struct file *filp); |
---|
624 | 733 | bool tracing_is_disabled(void); |
---|
625 | 734 | bool tracer_tracing_is_on(struct trace_array *tr); |
---|
626 | 735 | void tracer_tracing_on(struct trace_array *tr); |
---|
.. | .. |
---|
631 | 740 | void *data, |
---|
632 | 741 | const struct file_operations *fops); |
---|
633 | 742 | |
---|
634 | | -struct dentry *tracing_init_dentry(void); |
---|
| 743 | +int tracing_init_dentry(void); |
---|
635 | 744 | |
---|
636 | 745 | struct ring_buffer_event; |
---|
637 | 746 | |
---|
638 | 747 | struct ring_buffer_event * |
---|
639 | | -trace_buffer_lock_reserve(struct ring_buffer *buffer, |
---|
| 748 | +trace_buffer_lock_reserve(struct trace_buffer *buffer, |
---|
640 | 749 | int type, |
---|
641 | 750 | unsigned long len, |
---|
642 | 751 | unsigned long flags, |
---|
.. | .. |
---|
648 | 757 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
---|
649 | 758 | int *ent_cpu, u64 *ent_ts); |
---|
650 | 759 | |
---|
651 | | -void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, |
---|
| 760 | +void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
---|
652 | 761 | struct ring_buffer_event *event); |
---|
653 | 762 | |
---|
654 | 763 | int trace_empty(struct trace_iterator *iter); |
---|
.. | .. |
---|
658 | 767 | void trace_init_global_iter(struct trace_iterator *iter); |
---|
659 | 768 | |
---|
660 | 769 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
---|
| 770 | + |
---|
| 771 | +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); |
---|
| 772 | +unsigned long trace_total_entries(struct trace_array *tr); |
---|
661 | 773 | |
---|
662 | 774 | void trace_function(struct trace_array *tr, |
---|
663 | 775 | unsigned long ip, |
---|
.. | .. |
---|
702 | 814 | bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, |
---|
703 | 815 | pid_t search_pid); |
---|
704 | 816 | bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, |
---|
| 817 | + struct trace_pid_list *filtered_no_pids, |
---|
705 | 818 | struct task_struct *task); |
---|
706 | 819 | void trace_filter_add_remove_task(struct trace_pid_list *pid_list, |
---|
707 | 820 | struct task_struct *self, |
---|
.. | .. |
---|
715 | 828 | const char __user *ubuf, size_t cnt); |
---|
716 | 829 | |
---|
717 | 830 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
718 | | -void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); |
---|
| 831 | +void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, |
---|
| 832 | + void *cond_data); |
---|
719 | 833 | void update_max_tr_single(struct trace_array *tr, |
---|
720 | 834 | struct task_struct *tsk, int cpu); |
---|
721 | 835 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
---|
722 | 836 | |
---|
723 | | -#ifdef CONFIG_STACKTRACE |
---|
724 | | -void ftrace_trace_userstack(struct trace_array *tr, |
---|
725 | | - struct ring_buffer *buffer, unsigned long flags, |
---|
726 | | - int pc); |
---|
| 837 | +#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ |
---|
| 838 | + defined(CONFIG_FSNOTIFY) |
---|
727 | 839 | |
---|
| 840 | +void latency_fsnotify(struct trace_array *tr); |
---|
| 841 | + |
---|
| 842 | +#else |
---|
| 843 | + |
---|
| 844 | +static inline void latency_fsnotify(struct trace_array *tr) { } |
---|
| 845 | + |
---|
| 846 | +#endif |
---|
| 847 | + |
---|
| 848 | +#ifdef CONFIG_STACKTRACE |
---|
728 | 849 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
---|
729 | 850 | int pc); |
---|
730 | 851 | #else |
---|
731 | | -static inline void ftrace_trace_userstack(struct trace_array *tr, |
---|
732 | | - struct ring_buffer *buffer, |
---|
733 | | - unsigned long flags, int pc) |
---|
734 | | -{ |
---|
735 | | -} |
---|
736 | | - |
---|
737 | 852 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, |
---|
738 | 853 | int skip, int pc) |
---|
739 | 854 | { |
---|
.. | .. |
---|
748 | 863 | |
---|
749 | 864 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
750 | 865 | extern unsigned long ftrace_update_tot_cnt; |
---|
| 866 | +extern unsigned long ftrace_number_of_pages; |
---|
| 867 | +extern unsigned long ftrace_number_of_groups; |
---|
751 | 868 | void ftrace_init_trace_array(struct trace_array *tr); |
---|
752 | 869 | #else |
---|
753 | 870 | static inline void ftrace_init_trace_array(struct trace_array *tr) { } |
---|
.. | .. |
---|
761 | 878 | extern bool tracing_selftest_disabled; |
---|
762 | 879 | |
---|
763 | 880 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
---|
| 881 | +extern void __init disable_tracing_selftest(const char *reason); |
---|
| 882 | + |
---|
764 | 883 | extern int trace_selftest_startup_function(struct tracer *trace, |
---|
765 | 884 | struct trace_array *tr); |
---|
766 | 885 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
---|
.. | .. |
---|
784 | 903 | */ |
---|
785 | 904 | #define __tracer_data __refdata |
---|
786 | 905 | #else |
---|
| 906 | +static inline void __init disable_tracing_selftest(const char *reason) |
---|
| 907 | +{ |
---|
| 908 | +} |
---|
787 | 909 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ |
---|
788 | 910 | #define __tracer_data __read_mostly |
---|
789 | 911 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
---|
.. | .. |
---|
797 | 919 | extern int |
---|
798 | 920 | trace_array_vprintk(struct trace_array *tr, |
---|
799 | 921 | unsigned long ip, const char *fmt, va_list args); |
---|
800 | | -int trace_array_printk(struct trace_array *tr, |
---|
801 | | - unsigned long ip, const char *fmt, ...); |
---|
802 | | -int trace_array_printk_buf(struct ring_buffer *buffer, |
---|
| 922 | +int trace_array_printk_buf(struct trace_buffer *buffer, |
---|
803 | 923 | unsigned long ip, const char *fmt, ...); |
---|
804 | 924 | void trace_printk_seq(struct trace_seq *s); |
---|
805 | 925 | enum print_line_t print_trace_line(struct trace_iterator *iter); |
---|
.. | .. |
---|
845 | 965 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
---|
846 | 966 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
---|
847 | 967 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
---|
848 | | -#define TRACE_GRAPH_PRINT_IRQS 0x40 |
---|
849 | | -#define TRACE_GRAPH_PRINT_TAIL 0x80 |
---|
850 | | -#define TRACE_GRAPH_SLEEP_TIME 0x100 |
---|
851 | | -#define TRACE_GRAPH_GRAPH_TIME 0x200 |
---|
| 968 | +#define TRACE_GRAPH_PRINT_REL_TIME 0x40 |
---|
| 969 | +#define TRACE_GRAPH_PRINT_IRQS 0x80 |
---|
| 970 | +#define TRACE_GRAPH_PRINT_TAIL 0x100 |
---|
| 971 | +#define TRACE_GRAPH_SLEEP_TIME 0x200 |
---|
| 972 | +#define TRACE_GRAPH_GRAPH_TIME 0x400 |
---|
852 | 973 | #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 |
---|
853 | 974 | #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) |
---|
854 | 975 | |
---|
855 | 976 | extern void ftrace_graph_sleep_time_control(bool enable); |
---|
| 977 | + |
---|
| 978 | +#ifdef CONFIG_FUNCTION_PROFILER |
---|
856 | 979 | extern void ftrace_graph_graph_time_control(bool enable); |
---|
| 980 | +#else |
---|
| 981 | +static inline void ftrace_graph_graph_time_control(bool enable) { } |
---|
| 982 | +#endif |
---|
857 | 983 | |
---|
858 | 984 | extern enum print_line_t |
---|
859 | 985 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
---|
.. | .. |
---|
985 | 1111 | extern struct list_head ftrace_pids; |
---|
986 | 1112 | |
---|
987 | 1113 | #ifdef CONFIG_FUNCTION_TRACER |
---|
| 1114 | + |
---|
| 1115 | +#define FTRACE_PID_IGNORE -1 |
---|
| 1116 | +#define FTRACE_PID_TRACE -2 |
---|
| 1117 | + |
---|
988 | 1118 | struct ftrace_func_command { |
---|
989 | 1119 | struct list_head list; |
---|
990 | 1120 | char *name; |
---|
.. | .. |
---|
996 | 1126 | extern bool ftrace_filter_param __initdata; |
---|
997 | 1127 | static inline int ftrace_trace_task(struct trace_array *tr) |
---|
998 | 1128 | { |
---|
999 | | - return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); |
---|
| 1129 | + return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != |
---|
| 1130 | + FTRACE_PID_IGNORE; |
---|
1000 | 1131 | } |
---|
1001 | 1132 | extern int ftrace_is_dead(void); |
---|
1002 | 1133 | int ftrace_create_function_files(struct trace_array *tr, |
---|
1003 | 1134 | struct dentry *parent); |
---|
1004 | 1135 | void ftrace_destroy_function_files(struct trace_array *tr); |
---|
| 1136 | +int ftrace_allocate_ftrace_ops(struct trace_array *tr); |
---|
| 1137 | +void ftrace_free_ftrace_ops(struct trace_array *tr); |
---|
1005 | 1138 | void ftrace_init_global_array_ops(struct trace_array *tr); |
---|
1006 | 1139 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); |
---|
1007 | 1140 | void ftrace_reset_array_ops(struct trace_array *tr); |
---|
.. | .. |
---|
1023 | 1156 | { |
---|
1024 | 1157 | return 0; |
---|
1025 | 1158 | } |
---|
| 1159 | +static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) |
---|
| 1160 | +{ |
---|
| 1161 | + return 0; |
---|
| 1162 | +} |
---|
| 1163 | +static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } |
---|
1026 | 1164 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } |
---|
1027 | 1165 | static inline __init void |
---|
1028 | 1166 | ftrace_init_global_array_ops(struct trace_array *tr) { } |
---|
.. | .. |
---|
1084 | 1222 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
---|
1085 | 1223 | struct dentry *parent); |
---|
1086 | 1224 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); |
---|
| 1225 | + |
---|
| 1226 | +extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
---|
| 1227 | + int len, int reset); |
---|
| 1228 | +extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
---|
| 1229 | + int len, int reset); |
---|
1087 | 1230 | #else |
---|
1088 | 1231 | struct ftrace_func_command; |
---|
1089 | 1232 | |
---|
.. | .. |
---|
1209 | 1352 | C(IRQ_INFO, "irq-info"), \ |
---|
1210 | 1353 | C(MARKERS, "markers"), \ |
---|
1211 | 1354 | C(EVENT_FORK, "event-fork"), \ |
---|
| 1355 | + C(PAUSE_ON_TRACE, "pause-on-trace"), \ |
---|
1212 | 1356 | FUNCTION_FLAGS \ |
---|
1213 | 1357 | FGRAPH_FLAGS \ |
---|
1214 | 1358 | STACK_FLAGS \ |
---|
.. | .. |
---|
1306 | 1450 | }; |
---|
1307 | 1451 | |
---|
1308 | 1452 | extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
---|
1309 | | - struct ring_buffer *buffer, |
---|
| 1453 | + struct trace_buffer *buffer, |
---|
1310 | 1454 | struct ring_buffer_event *event); |
---|
1311 | 1455 | |
---|
1312 | 1456 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
---|
1313 | | - struct ring_buffer *buffer, |
---|
| 1457 | + struct trace_buffer *buffer, |
---|
1314 | 1458 | struct ring_buffer_event *event, |
---|
1315 | 1459 | unsigned long flags, int pc, |
---|
1316 | 1460 | struct pt_regs *regs); |
---|
1317 | 1461 | |
---|
1318 | 1462 | static inline void trace_buffer_unlock_commit(struct trace_array *tr, |
---|
1319 | | - struct ring_buffer *buffer, |
---|
| 1463 | + struct trace_buffer *buffer, |
---|
1320 | 1464 | struct ring_buffer_event *event, |
---|
1321 | 1465 | unsigned long flags, int pc) |
---|
1322 | 1466 | { |
---|
.. | .. |
---|
1329 | 1473 | void trace_buffered_event_enable(void); |
---|
1330 | 1474 | |
---|
1331 | 1475 | static inline void |
---|
1332 | | -__trace_event_discard_commit(struct ring_buffer *buffer, |
---|
| 1476 | +__trace_event_discard_commit(struct trace_buffer *buffer, |
---|
1333 | 1477 | struct ring_buffer_event *event) |
---|
1334 | 1478 | { |
---|
1335 | 1479 | if (this_cpu_read(trace_buffered_event) == event) { |
---|
.. | .. |
---|
1343 | 1487 | /* |
---|
1344 | 1488 | * Helper function for event_trigger_unlock_commit{_regs}(). |
---|
1345 | 1489 | * If there are event triggers attached to this event that requires |
---|
1346 | | - * filtering against its fields, then they wil be called as the |
---|
| 1490 | + * filtering against its fields, then they will be called as the |
---|
1347 | 1491 | * entry already holds the field information of the current event. |
---|
1348 | 1492 | * |
---|
1349 | 1493 | * It also checks if the event should be discarded or not. |
---|
.. | .. |
---|
1355 | 1499 | */ |
---|
1356 | 1500 | static inline bool |
---|
1357 | 1501 | __event_trigger_test_discard(struct trace_event_file *file, |
---|
1358 | | - struct ring_buffer *buffer, |
---|
| 1502 | + struct trace_buffer *buffer, |
---|
1359 | 1503 | struct ring_buffer_event *event, |
---|
1360 | 1504 | void *entry, |
---|
1361 | 1505 | enum event_trigger_type *tt) |
---|
.. | .. |
---|
1402 | 1546 | */ |
---|
1403 | 1547 | static inline void |
---|
1404 | 1548 | event_trigger_unlock_commit(struct trace_event_file *file, |
---|
1405 | | - struct ring_buffer *buffer, |
---|
| 1549 | + struct trace_buffer *buffer, |
---|
1406 | 1550 | struct ring_buffer_event *event, |
---|
1407 | 1551 | void *entry, unsigned long irq_flags, int pc) |
---|
1408 | 1552 | { |
---|
.. | .. |
---|
1433 | 1577 | */ |
---|
1434 | 1578 | static inline void |
---|
1435 | 1579 | event_trigger_unlock_commit_regs(struct trace_event_file *file, |
---|
1436 | | - struct ring_buffer *buffer, |
---|
| 1580 | + struct trace_buffer *buffer, |
---|
1437 | 1581 | struct ring_buffer_event *event, |
---|
1438 | 1582 | void *entry, unsigned long irq_flags, int pc, |
---|
1439 | 1583 | struct pt_regs *regs) |
---|
.. | .. |
---|
1474 | 1618 | MATCH_MIDDLE_ONLY, |
---|
1475 | 1619 | MATCH_END_ONLY, |
---|
1476 | 1620 | MATCH_GLOB, |
---|
| 1621 | + MATCH_INDEX, |
---|
1477 | 1622 | }; |
---|
1478 | 1623 | |
---|
1479 | 1624 | struct regex { |
---|
.. | .. |
---|
1518 | 1663 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
---|
1519 | 1664 | struct trace_seq *s); |
---|
1520 | 1665 | extern int filter_assign_type(const char *type); |
---|
1521 | | -extern int create_event_filter(struct trace_event_call *call, |
---|
| 1666 | +extern int create_event_filter(struct trace_array *tr, |
---|
| 1667 | + struct trace_event_call *call, |
---|
1522 | 1668 | char *filter_str, bool set_str, |
---|
1523 | 1669 | struct event_filter **filterp); |
---|
1524 | 1670 | extern void free_event_filter(struct event_filter *filter); |
---|
.. | .. |
---|
1530 | 1676 | extern void trace_event_enable_tgid_record(bool enable); |
---|
1531 | 1677 | |
---|
1532 | 1678 | extern int event_trace_init(void); |
---|
| 1679 | +extern int init_events(void); |
---|
1533 | 1680 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
---|
1534 | 1681 | extern int event_trace_del_tracer(struct trace_array *tr); |
---|
| 1682 | +extern void __trace_early_add_events(struct trace_array *tr); |
---|
1535 | 1683 | |
---|
1536 | 1684 | extern struct trace_event_file *__find_event_file(struct trace_array *tr, |
---|
1537 | 1685 | const char *system, |
---|
.. | .. |
---|
1550 | 1698 | |
---|
1551 | 1699 | extern const struct file_operations event_trigger_fops; |
---|
1552 | 1700 | extern const struct file_operations event_hist_fops; |
---|
| 1701 | +extern const struct file_operations event_hist_debug_fops; |
---|
| 1702 | +extern const struct file_operations event_inject_fops; |
---|
1553 | 1703 | |
---|
1554 | 1704 | #ifdef CONFIG_HIST_TRIGGERS |
---|
1555 | 1705 | extern int register_trigger_hist_cmd(void); |
---|
.. | .. |
---|
1824 | 1974 | extern int trace_event_enable_disable(struct trace_event_file *file, |
---|
1825 | 1975 | int enable, int soft_disable); |
---|
1826 | 1976 | extern int tracing_alloc_snapshot(void); |
---|
| 1977 | +extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); |
---|
| 1978 | +extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); |
---|
| 1979 | + |
---|
| 1980 | +extern int tracing_snapshot_cond_disable(struct trace_array *tr); |
---|
| 1981 | +extern void *tracing_cond_snapshot_data(struct trace_array *tr); |
---|
1827 | 1982 | |
---|
1828 | 1983 | extern const char *__start___trace_bprintk_fmt[]; |
---|
1829 | 1984 | extern const char *__stop___trace_bprintk_fmt[]; |
---|
.. | .. |
---|
1832 | 1987 | extern const char *__stop___tracepoint_str[]; |
---|
1833 | 1988 | |
---|
1834 | 1989 | void trace_printk_control(bool enabled); |
---|
1835 | | -void trace_printk_init_buffers(void); |
---|
1836 | 1990 | void trace_printk_start_comm(void); |
---|
1837 | 1991 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
---|
1838 | 1992 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
---|
| 1993 | + |
---|
| 1994 | +/* Used from boot time tracer */ |
---|
| 1995 | +extern int trace_set_options(struct trace_array *tr, char *option); |
---|
| 1996 | +extern int tracing_set_tracer(struct trace_array *tr, const char *buf); |
---|
| 1997 | +extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
---|
| 1998 | + unsigned long size, int cpu_id); |
---|
| 1999 | +extern int tracing_set_cpumask(struct trace_array *tr, |
---|
| 2000 | + cpumask_var_t tracing_cpumask_new); |
---|
| 2001 | + |
---|
1839 | 2002 | |
---|
1840 | 2003 | #define MAX_EVENT_NAME_LEN 64 |
---|
1841 | 2004 | |
---|
.. | .. |
---|
1843 | 2006 | extern ssize_t trace_parse_run_command(struct file *file, |
---|
1844 | 2007 | const char __user *buffer, size_t count, loff_t *ppos, |
---|
1845 | 2008 | int (*createfn)(int, char**)); |
---|
| 2009 | + |
---|
| 2010 | +extern unsigned int err_pos(char *cmd, const char *str); |
---|
| 2011 | +extern void tracing_log_err(struct trace_array *tr, |
---|
| 2012 | + const char *loc, const char *cmd, |
---|
| 2013 | + const char **errs, u8 type, u8 pos); |
---|
1846 | 2014 | |
---|
1847 | 2015 | /* |
---|
1848 | 2016 | * Normal trace_printk() and friends allocates special buffers |
---|
.. | .. |
---|
1856 | 2024 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) |
---|
1857 | 2025 | |
---|
1858 | 2026 | #undef FTRACE_ENTRY |
---|
1859 | | -#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
---|
| 2027 | +#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
---|
1860 | 2028 | extern struct trace_event_call \ |
---|
1861 | 2029 | __aligned(4) event_##call; |
---|
1862 | 2030 | #undef FTRACE_ENTRY_DUP |
---|
1863 | | -#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
---|
1864 | | - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
1865 | | - filter) |
---|
| 2031 | +#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
---|
| 2032 | + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
---|
1866 | 2033 | #undef FTRACE_ENTRY_PACKED |
---|
1867 | | -#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ |
---|
1868 | | - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ |
---|
1869 | | - filter) |
---|
| 2034 | +#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ |
---|
| 2035 | + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
---|
1870 | 2036 | |
---|
1871 | 2037 | #include "trace_entries.h" |
---|
1872 | 2038 | |
---|
.. | .. |
---|
1891 | 2057 | #ifdef CONFIG_EVENT_TRACING |
---|
1892 | 2058 | void trace_event_init(void); |
---|
1893 | 2059 | void trace_event_eval_update(struct trace_eval_map **map, int len); |
---|
| 2060 | +/* Used from boot time tracer */ |
---|
| 2061 | +extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); |
---|
| 2062 | +extern int trigger_process_regex(struct trace_event_file *file, char *buff); |
---|
1894 | 2063 | #else |
---|
1895 | 2064 | static inline void __init trace_event_init(void) { } |
---|
1896 | 2065 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
---|
.. | .. |
---|
1942 | 2111 | iter->pos = -1; |
---|
1943 | 2112 | } |
---|
1944 | 2113 | |
---|
| 2114 | +/* Check the name is good for event/group/fields */ |
---|
| 2115 | +static inline bool is_good_name(const char *name) |
---|
| 2116 | +{ |
---|
| 2117 | + if (!isalpha(*name) && *name != '_') |
---|
| 2118 | + return false; |
---|
| 2119 | + while (*++name != '\0') { |
---|
| 2120 | + if (!isalpha(*name) && !isdigit(*name) && *name != '_') |
---|
| 2121 | + return false; |
---|
| 2122 | + } |
---|
| 2123 | + return true; |
---|
| 2124 | +} |
---|
| 2125 | + |
---|
1945 | 2126 | #endif /* _LINUX_KERNEL_TRACE_H */ |
---|