.. | .. |
---|
17 | 17 | #include <linux/stacktrace.h> |
---|
18 | 18 | #include <linux/writeback.h> |
---|
19 | 19 | #include <linux/kallsyms.h> |
---|
| 20 | +#include <linux/security.h> |
---|
20 | 21 | #include <linux/seq_file.h> |
---|
21 | 22 | #include <linux/notifier.h> |
---|
22 | 23 | #include <linux/irqflags.h> |
---|
.. | .. |
---|
44 | 45 | #include <linux/trace.h> |
---|
45 | 46 | #include <linux/sched/clock.h> |
---|
46 | 47 | #include <linux/sched/rt.h> |
---|
| 48 | +#include <linux/fsnotify.h> |
---|
| 49 | +#include <linux/irq_work.h> |
---|
| 50 | +#include <linux/workqueue.h> |
---|
| 51 | +#include <trace/hooks/ftrace_dump.h> |
---|
47 | 52 | |
---|
48 | 53 | #include "trace.h" |
---|
49 | 54 | #include "trace_output.h" |
---|
.. | .. |
---|
64 | 69 | static bool __read_mostly tracing_selftest_running; |
---|
65 | 70 | |
---|
66 | 71 | /* |
---|
67 | | - * If a tracer is running, we do not want to run SELFTEST. |
---|
| 72 | + * If boot-time tracing including tracers/events via kernel cmdline |
---|
| 73 | + * is running, we do not want to run SELFTEST. |
---|
68 | 74 | */ |
---|
69 | 75 | bool __read_mostly tracing_selftest_disabled; |
---|
| 76 | + |
---|
| 77 | +#ifdef CONFIG_FTRACE_STARTUP_TEST |
---|
| 78 | +void __init disable_tracing_selftest(const char *reason) |
---|
| 79 | +{ |
---|
| 80 | + if (!tracing_selftest_disabled) { |
---|
| 81 | + tracing_selftest_disabled = true; |
---|
| 82 | + pr_info("Ftrace startup test is disabled due to %s\n", reason); |
---|
| 83 | + } |
---|
| 84 | +} |
---|
| 85 | +#endif |
---|
70 | 86 | |
---|
71 | 87 | /* Pipe tracepoints to printk */ |
---|
72 | 88 | struct trace_iterator *tracepoint_print_iter; |
---|
.. | .. |
---|
158 | 174 | static union trace_eval_map_item *trace_eval_maps; |
---|
159 | 175 | #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ |
---|
160 | 176 | |
---|
161 | | -static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
---|
| 177 | +int tracing_set_tracer(struct trace_array *tr, const char *buf); |
---|
| 178 | +static void ftrace_trace_userstack(struct trace_array *tr, |
---|
| 179 | + struct trace_buffer *buffer, |
---|
| 180 | + unsigned long flags, int pc); |
---|
162 | 181 | |
---|
163 | 182 | #define MAX_TRACER_SIZE 100 |
---|
164 | 183 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
---|
.. | .. |
---|
215 | 234 | static int __init set_trace_boot_options(char *str) |
---|
216 | 235 | { |
---|
217 | 236 | strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); |
---|
218 | | - return 0; |
---|
| 237 | + return 1; |
---|
219 | 238 | } |
---|
220 | 239 | __setup("trace_options=", set_trace_boot_options); |
---|
221 | 240 | |
---|
.. | .. |
---|
226 | 245 | { |
---|
227 | 246 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); |
---|
228 | 247 | trace_boot_clock = trace_boot_clock_buf; |
---|
229 | | - return 0; |
---|
| 248 | + return 1; |
---|
230 | 249 | } |
---|
231 | 250 | __setup("trace_clock=", set_trace_boot_clock); |
---|
232 | 251 | |
---|
.. | .. |
---|
248 | 267 | do_div(nsec, 1000); |
---|
249 | 268 | return nsec; |
---|
250 | 269 | } |
---|
| 270 | + |
---|
| 271 | +static void |
---|
| 272 | +trace_process_export(struct trace_export *export, |
---|
| 273 | + struct ring_buffer_event *event, int flag) |
---|
| 274 | +{ |
---|
| 275 | + struct trace_entry *entry; |
---|
| 276 | + unsigned int size = 0; |
---|
| 277 | + |
---|
| 278 | + if (export->flags & flag) { |
---|
| 279 | + entry = ring_buffer_event_data(event); |
---|
| 280 | + size = ring_buffer_event_length(event); |
---|
| 281 | + export->write(export, entry, size); |
---|
| 282 | + } |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +static DEFINE_MUTEX(ftrace_export_lock); |
---|
| 286 | + |
---|
| 287 | +static struct trace_export __rcu *ftrace_exports_list __read_mostly; |
---|
| 288 | + |
---|
| 289 | +static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); |
---|
| 290 | +static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); |
---|
| 291 | +static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); |
---|
| 292 | + |
---|
| 293 | +static inline void ftrace_exports_enable(struct trace_export *export) |
---|
| 294 | +{ |
---|
| 295 | + if (export->flags & TRACE_EXPORT_FUNCTION) |
---|
| 296 | + static_branch_inc(&trace_function_exports_enabled); |
---|
| 297 | + |
---|
| 298 | + if (export->flags & TRACE_EXPORT_EVENT) |
---|
| 299 | + static_branch_inc(&trace_event_exports_enabled); |
---|
| 300 | + |
---|
| 301 | + if (export->flags & TRACE_EXPORT_MARKER) |
---|
| 302 | + static_branch_inc(&trace_marker_exports_enabled); |
---|
| 303 | +} |
---|
| 304 | + |
---|
| 305 | +static inline void ftrace_exports_disable(struct trace_export *export) |
---|
| 306 | +{ |
---|
| 307 | + if (export->flags & TRACE_EXPORT_FUNCTION) |
---|
| 308 | + static_branch_dec(&trace_function_exports_enabled); |
---|
| 309 | + |
---|
| 310 | + if (export->flags & TRACE_EXPORT_EVENT) |
---|
| 311 | + static_branch_dec(&trace_event_exports_enabled); |
---|
| 312 | + |
---|
| 313 | + if (export->flags & TRACE_EXPORT_MARKER) |
---|
| 314 | + static_branch_dec(&trace_marker_exports_enabled); |
---|
| 315 | +} |
---|
| 316 | + |
---|
| 317 | +static void ftrace_exports(struct ring_buffer_event *event, int flag) |
---|
| 318 | +{ |
---|
| 319 | + struct trace_export *export; |
---|
| 320 | + |
---|
| 321 | + preempt_disable_notrace(); |
---|
| 322 | + |
---|
| 323 | + export = rcu_dereference_raw_check(ftrace_exports_list); |
---|
| 324 | + while (export) { |
---|
| 325 | + trace_process_export(export, event, flag); |
---|
| 326 | + export = rcu_dereference_raw_check(export->next); |
---|
| 327 | + } |
---|
| 328 | + |
---|
| 329 | + preempt_enable_notrace(); |
---|
| 330 | +} |
---|
| 331 | + |
---|
| 332 | +static inline void |
---|
| 333 | +add_trace_export(struct trace_export **list, struct trace_export *export) |
---|
| 334 | +{ |
---|
| 335 | + rcu_assign_pointer(export->next, *list); |
---|
| 336 | + /* |
---|
| 337 | + * We are entering export into the list but another |
---|
| 338 | + * CPU might be walking that list. We need to make sure |
---|
| 339 | + * the export->next pointer is valid before another CPU sees |
---|
| 340 | + * the export pointer included into the list. |
---|
| 341 | + */ |
---|
| 342 | + rcu_assign_pointer(*list, export); |
---|
| 343 | +} |
---|
| 344 | + |
---|
| 345 | +static inline int |
---|
| 346 | +rm_trace_export(struct trace_export **list, struct trace_export *export) |
---|
| 347 | +{ |
---|
| 348 | + struct trace_export **p; |
---|
| 349 | + |
---|
| 350 | + for (p = list; *p != NULL; p = &(*p)->next) |
---|
| 351 | + if (*p == export) |
---|
| 352 | + break; |
---|
| 353 | + |
---|
| 354 | + if (*p != export) |
---|
| 355 | + return -1; |
---|
| 356 | + |
---|
| 357 | + rcu_assign_pointer(*p, (*p)->next); |
---|
| 358 | + |
---|
| 359 | + return 0; |
---|
| 360 | +} |
---|
| 361 | + |
---|
| 362 | +static inline void |
---|
| 363 | +add_ftrace_export(struct trace_export **list, struct trace_export *export) |
---|
| 364 | +{ |
---|
| 365 | + ftrace_exports_enable(export); |
---|
| 366 | + |
---|
| 367 | + add_trace_export(list, export); |
---|
| 368 | +} |
---|
| 369 | + |
---|
| 370 | +static inline int |
---|
| 371 | +rm_ftrace_export(struct trace_export **list, struct trace_export *export) |
---|
| 372 | +{ |
---|
| 373 | + int ret; |
---|
| 374 | + |
---|
| 375 | + ret = rm_trace_export(list, export); |
---|
| 376 | + ftrace_exports_disable(export); |
---|
| 377 | + |
---|
| 378 | + return ret; |
---|
| 379 | +} |
---|
| 380 | + |
---|
| 381 | +int register_ftrace_export(struct trace_export *export) |
---|
| 382 | +{ |
---|
| 383 | + if (WARN_ON_ONCE(!export->write)) |
---|
| 384 | + return -1; |
---|
| 385 | + |
---|
| 386 | + mutex_lock(&ftrace_export_lock); |
---|
| 387 | + |
---|
| 388 | + add_ftrace_export(&ftrace_exports_list, export); |
---|
| 389 | + |
---|
| 390 | + mutex_unlock(&ftrace_export_lock); |
---|
| 391 | + |
---|
| 392 | + return 0; |
---|
| 393 | +} |
---|
| 394 | +EXPORT_SYMBOL_GPL(register_ftrace_export); |
---|
| 395 | + |
---|
| 396 | +int unregister_ftrace_export(struct trace_export *export) |
---|
| 397 | +{ |
---|
| 398 | + int ret; |
---|
| 399 | + |
---|
| 400 | + mutex_lock(&ftrace_export_lock); |
---|
| 401 | + |
---|
| 402 | + ret = rm_ftrace_export(&ftrace_exports_list, export); |
---|
| 403 | + |
---|
| 404 | + mutex_unlock(&ftrace_export_lock); |
---|
| 405 | + |
---|
| 406 | + return ret; |
---|
| 407 | +} |
---|
| 408 | +EXPORT_SYMBOL_GPL(unregister_ftrace_export); |
---|
251 | 409 | |
---|
252 | 410 | /* trace_flags holds trace_options default values */ |
---|
253 | 411 | #define TRACE_DEFAULT_FLAGS \ |
---|
.. | .. |
---|
299 | 457 | this_tr->ref--; |
---|
300 | 458 | } |
---|
301 | 459 | |
---|
| 460 | +/** |
---|
| 461 | + * trace_array_put - Decrement the reference counter for this trace array. |
---|
| 462 | + * |
---|
| 463 | + * NOTE: Use this when we no longer need the trace array returned by |
---|
| 464 | + * trace_array_get_by_name(). This ensures the trace array can be later |
---|
| 465 | + * destroyed. |
---|
| 466 | + * |
---|
| 467 | + */ |
---|
302 | 468 | void trace_array_put(struct trace_array *this_tr) |
---|
303 | 469 | { |
---|
| 470 | + if (!this_tr) |
---|
| 471 | + return; |
---|
| 472 | + |
---|
304 | 473 | mutex_lock(&trace_types_lock); |
---|
305 | 474 | __trace_array_put(this_tr); |
---|
306 | 475 | mutex_unlock(&trace_types_lock); |
---|
307 | 476 | } |
---|
| 477 | +EXPORT_SYMBOL_GPL(trace_array_put); |
---|
| 478 | + |
---|
| 479 | +int tracing_check_open_get_tr(struct trace_array *tr) |
---|
| 480 | +{ |
---|
| 481 | + int ret; |
---|
| 482 | + |
---|
| 483 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
---|
| 484 | + if (ret) |
---|
| 485 | + return ret; |
---|
| 486 | + |
---|
| 487 | + if (tracing_disabled) |
---|
| 488 | + return -ENODEV; |
---|
| 489 | + |
---|
| 490 | + if (tr && trace_array_get(tr) < 0) |
---|
| 491 | + return -ENODEV; |
---|
| 492 | + |
---|
| 493 | + return 0; |
---|
| 494 | +} |
---|
308 | 495 | |
---|
309 | 496 | int call_filter_check_discard(struct trace_event_call *call, void *rec, |
---|
310 | | - struct ring_buffer *buffer, |
---|
| 497 | + struct trace_buffer *buffer, |
---|
311 | 498 | struct ring_buffer_event *event) |
---|
312 | 499 | { |
---|
313 | 500 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && |
---|
.. | .. |
---|
355 | 542 | * Returns false if @task should be traced. |
---|
356 | 543 | */ |
---|
357 | 544 | bool |
---|
358 | | -trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) |
---|
| 545 | +trace_ignore_this_task(struct trace_pid_list *filtered_pids, |
---|
| 546 | + struct trace_pid_list *filtered_no_pids, |
---|
| 547 | + struct task_struct *task) |
---|
359 | 548 | { |
---|
360 | 549 | /* |
---|
361 | | - * Return false, because if filtered_pids does not exist, |
---|
362 | | - * all pids are good to trace. |
---|
| 550 | + * If filterd_no_pids is not empty, and the task's pid is listed |
---|
| 551 | + * in filtered_no_pids, then return true. |
---|
| 552 | + * Otherwise, if filtered_pids is empty, that means we can |
---|
| 553 | + * trace all tasks. If it has content, then only trace pids |
---|
| 554 | + * within filtered_pids. |
---|
363 | 555 | */ |
---|
364 | | - if (!filtered_pids) |
---|
365 | | - return false; |
---|
366 | 556 | |
---|
367 | | - return !trace_find_filtered_pid(filtered_pids, task->pid); |
---|
| 557 | + return (filtered_pids && |
---|
| 558 | + !trace_find_filtered_pid(filtered_pids, task->pid)) || |
---|
| 559 | + (filtered_no_pids && |
---|
| 560 | + trace_find_filtered_pid(filtered_no_pids, task->pid)); |
---|
368 | 561 | } |
---|
369 | 562 | |
---|
370 | 563 | /** |
---|
371 | | - * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list |
---|
| 564 | + * trace_filter_add_remove_task - Add or remove a task from a pid_list |
---|
372 | 565 | * @pid_list: The list to modify |
---|
373 | 566 | * @self: The current task for fork or NULL for exit |
---|
374 | 567 | * @task: The task to add or remove |
---|
.. | .. |
---|
572 | 765 | return read; |
---|
573 | 766 | } |
---|
574 | 767 | |
---|
575 | | -static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
---|
| 768 | +static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) |
---|
576 | 769 | { |
---|
577 | 770 | u64 ts; |
---|
578 | 771 | |
---|
.. | .. |
---|
588 | 781 | |
---|
589 | 782 | u64 ftrace_now(int cpu) |
---|
590 | 783 | { |
---|
591 | | - return buffer_ftrace_now(&global_trace.trace_buffer, cpu); |
---|
| 784 | + return buffer_ftrace_now(&global_trace.array_buffer, cpu); |
---|
592 | 785 | } |
---|
593 | 786 | |
---|
594 | 787 | /** |
---|
.. | .. |
---|
716 | 909 | #endif |
---|
717 | 910 | |
---|
718 | 911 | #ifdef CONFIG_STACKTRACE |
---|
719 | | -static void __ftrace_trace_stack(struct ring_buffer *buffer, |
---|
| 912 | +static void __ftrace_trace_stack(struct trace_buffer *buffer, |
---|
720 | 913 | unsigned long flags, |
---|
721 | 914 | int skip, int pc, struct pt_regs *regs); |
---|
722 | 915 | static inline void ftrace_trace_stack(struct trace_array *tr, |
---|
723 | | - struct ring_buffer *buffer, |
---|
| 916 | + struct trace_buffer *buffer, |
---|
724 | 917 | unsigned long flags, |
---|
725 | 918 | int skip, int pc, struct pt_regs *regs); |
---|
726 | 919 | |
---|
727 | 920 | #else |
---|
728 | | -static inline void __ftrace_trace_stack(struct ring_buffer *buffer, |
---|
| 921 | +static inline void __ftrace_trace_stack(struct trace_buffer *buffer, |
---|
729 | 922 | unsigned long flags, |
---|
730 | 923 | int skip, int pc, struct pt_regs *regs) |
---|
731 | 924 | { |
---|
732 | 925 | } |
---|
733 | 926 | static inline void ftrace_trace_stack(struct trace_array *tr, |
---|
734 | | - struct ring_buffer *buffer, |
---|
| 927 | + struct trace_buffer *buffer, |
---|
735 | 928 | unsigned long flags, |
---|
736 | 929 | int skip, int pc, struct pt_regs *regs) |
---|
737 | 930 | { |
---|
.. | .. |
---|
745 | 938 | { |
---|
746 | 939 | struct trace_entry *ent = ring_buffer_event_data(event); |
---|
747 | 940 | |
---|
748 | | - tracing_generic_entry_update(ent, flags, pc); |
---|
749 | | - ent->type = type; |
---|
| 941 | + tracing_generic_entry_update(ent, type, flags, pc); |
---|
750 | 942 | } |
---|
751 | 943 | |
---|
752 | 944 | static __always_inline struct ring_buffer_event * |
---|
753 | | -__trace_buffer_lock_reserve(struct ring_buffer *buffer, |
---|
| 945 | +__trace_buffer_lock_reserve(struct trace_buffer *buffer, |
---|
754 | 946 | int type, |
---|
755 | 947 | unsigned long len, |
---|
756 | 948 | unsigned long flags, int pc) |
---|
.. | .. |
---|
766 | 958 | |
---|
767 | 959 | void tracer_tracing_on(struct trace_array *tr) |
---|
768 | 960 | { |
---|
769 | | - if (tr->trace_buffer.buffer) |
---|
770 | | - ring_buffer_record_on(tr->trace_buffer.buffer); |
---|
| 961 | + if (tr->array_buffer.buffer) |
---|
| 962 | + ring_buffer_record_on(tr->array_buffer.buffer); |
---|
771 | 963 | /* |
---|
772 | 964 | * This flag is looked at when buffers haven't been allocated |
---|
773 | 965 | * yet, or by some tracers (like irqsoff), that just want to |
---|
.. | .. |
---|
795 | 987 | |
---|
796 | 988 | |
---|
797 | 989 | static __always_inline void |
---|
798 | | -__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) |
---|
| 990 | +__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) |
---|
799 | 991 | { |
---|
800 | 992 | __this_cpu_write(trace_taskinfo_save, true); |
---|
801 | 993 | |
---|
.. | .. |
---|
818 | 1010 | int __trace_puts(unsigned long ip, const char *str, int size) |
---|
819 | 1011 | { |
---|
820 | 1012 | struct ring_buffer_event *event; |
---|
821 | | - struct ring_buffer *buffer; |
---|
| 1013 | + struct trace_buffer *buffer; |
---|
822 | 1014 | struct print_entry *entry; |
---|
823 | 1015 | unsigned long irq_flags; |
---|
824 | 1016 | int alloc; |
---|
.. | .. |
---|
835 | 1027 | alloc = sizeof(*entry) + size + 2; /* possible \n added */ |
---|
836 | 1028 | |
---|
837 | 1029 | local_save_flags(irq_flags); |
---|
838 | | - buffer = global_trace.trace_buffer.buffer; |
---|
| 1030 | + buffer = global_trace.array_buffer.buffer; |
---|
| 1031 | + ring_buffer_nest_start(buffer); |
---|
839 | 1032 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
---|
840 | 1033 | irq_flags, pc); |
---|
841 | | - if (!event) |
---|
842 | | - return 0; |
---|
| 1034 | + if (!event) { |
---|
| 1035 | + size = 0; |
---|
| 1036 | + goto out; |
---|
| 1037 | + } |
---|
843 | 1038 | |
---|
844 | 1039 | entry = ring_buffer_event_data(event); |
---|
845 | 1040 | entry->ip = ip; |
---|
.. | .. |
---|
855 | 1050 | |
---|
856 | 1051 | __buffer_unlock_commit(buffer, event); |
---|
857 | 1052 | ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
---|
858 | | - |
---|
| 1053 | + out: |
---|
| 1054 | + ring_buffer_nest_end(buffer); |
---|
859 | 1055 | return size; |
---|
860 | 1056 | } |
---|
861 | 1057 | EXPORT_SYMBOL_GPL(__trace_puts); |
---|
.. | .. |
---|
868 | 1064 | int __trace_bputs(unsigned long ip, const char *str) |
---|
869 | 1065 | { |
---|
870 | 1066 | struct ring_buffer_event *event; |
---|
871 | | - struct ring_buffer *buffer; |
---|
| 1067 | + struct trace_buffer *buffer; |
---|
872 | 1068 | struct bputs_entry *entry; |
---|
873 | 1069 | unsigned long irq_flags; |
---|
874 | 1070 | int size = sizeof(struct bputs_entry); |
---|
| 1071 | + int ret = 0; |
---|
875 | 1072 | int pc; |
---|
876 | 1073 | |
---|
877 | 1074 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
---|
.. | .. |
---|
883 | 1080 | return 0; |
---|
884 | 1081 | |
---|
885 | 1082 | local_save_flags(irq_flags); |
---|
886 | | - buffer = global_trace.trace_buffer.buffer; |
---|
| 1083 | + buffer = global_trace.array_buffer.buffer; |
---|
| 1084 | + |
---|
| 1085 | + ring_buffer_nest_start(buffer); |
---|
887 | 1086 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, |
---|
888 | 1087 | irq_flags, pc); |
---|
889 | 1088 | if (!event) |
---|
890 | | - return 0; |
---|
| 1089 | + goto out; |
---|
891 | 1090 | |
---|
892 | 1091 | entry = ring_buffer_event_data(event); |
---|
893 | 1092 | entry->ip = ip; |
---|
.. | .. |
---|
896 | 1095 | __buffer_unlock_commit(buffer, event); |
---|
897 | 1096 | ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
---|
898 | 1097 | |
---|
899 | | - return 1; |
---|
| 1098 | + ret = 1; |
---|
| 1099 | + out: |
---|
| 1100 | + ring_buffer_nest_end(buffer); |
---|
| 1101 | + return ret; |
---|
900 | 1102 | } |
---|
901 | 1103 | EXPORT_SYMBOL_GPL(__trace_bputs); |
---|
902 | 1104 | |
---|
903 | 1105 | #ifdef CONFIG_TRACER_SNAPSHOT |
---|
904 | | -void tracing_snapshot_instance(struct trace_array *tr) |
---|
| 1106 | +static void tracing_snapshot_instance_cond(struct trace_array *tr, |
---|
| 1107 | + void *cond_data) |
---|
905 | 1108 | { |
---|
906 | 1109 | struct tracer *tracer = tr->current_trace; |
---|
907 | 1110 | unsigned long flags; |
---|
.. | .. |
---|
927 | 1130 | } |
---|
928 | 1131 | |
---|
929 | 1132 | local_irq_save(flags); |
---|
930 | | - update_max_tr(tr, current, smp_processor_id()); |
---|
| 1133 | + update_max_tr(tr, current, smp_processor_id(), cond_data); |
---|
931 | 1134 | local_irq_restore(flags); |
---|
| 1135 | +} |
---|
| 1136 | + |
---|
| 1137 | +void tracing_snapshot_instance(struct trace_array *tr) |
---|
| 1138 | +{ |
---|
| 1139 | + tracing_snapshot_instance_cond(tr, NULL); |
---|
932 | 1140 | } |
---|
933 | 1141 | |
---|
934 | 1142 | /** |
---|
.. | .. |
---|
953 | 1161 | } |
---|
954 | 1162 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
---|
955 | 1163 | |
---|
956 | | -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
---|
957 | | - struct trace_buffer *size_buf, int cpu_id); |
---|
958 | | -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); |
---|
| 1164 | +/** |
---|
| 1165 | + * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. |
---|
| 1166 | + * @tr: The tracing instance to snapshot |
---|
| 1167 | + * @cond_data: The data to be tested conditionally, and possibly saved |
---|
| 1168 | + * |
---|
| 1169 | + * This is the same as tracing_snapshot() except that the snapshot is |
---|
| 1170 | + * conditional - the snapshot will only happen if the |
---|
| 1171 | + * cond_snapshot.update() implementation receiving the cond_data |
---|
| 1172 | + * returns true, which means that the trace array's cond_snapshot |
---|
| 1173 | + * update() operation used the cond_data to determine whether the |
---|
| 1174 | + * snapshot should be taken, and if it was, presumably saved it along |
---|
| 1175 | + * with the snapshot. |
---|
| 1176 | + */ |
---|
| 1177 | +void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) |
---|
| 1178 | +{ |
---|
| 1179 | + tracing_snapshot_instance_cond(tr, cond_data); |
---|
| 1180 | +} |
---|
| 1181 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond); |
---|
| 1182 | + |
---|
| 1183 | +/** |
---|
| 1184 | + * tracing_snapshot_cond_data - get the user data associated with a snapshot |
---|
| 1185 | + * @tr: The tracing instance |
---|
| 1186 | + * |
---|
| 1187 | + * When the user enables a conditional snapshot using |
---|
| 1188 | + * tracing_snapshot_cond_enable(), the user-defined cond_data is saved |
---|
| 1189 | + * with the snapshot. This accessor is used to retrieve it. |
---|
| 1190 | + * |
---|
| 1191 | + * Should not be called from cond_snapshot.update(), since it takes |
---|
| 1192 | + * the tr->max_lock lock, which the code calling |
---|
| 1193 | + * cond_snapshot.update() has already done. |
---|
| 1194 | + * |
---|
| 1195 | + * Returns the cond_data associated with the trace array's snapshot. |
---|
| 1196 | + */ |
---|
| 1197 | +void *tracing_cond_snapshot_data(struct trace_array *tr) |
---|
| 1198 | +{ |
---|
| 1199 | + void *cond_data = NULL; |
---|
| 1200 | + |
---|
| 1201 | + local_irq_disable(); |
---|
| 1202 | + arch_spin_lock(&tr->max_lock); |
---|
| 1203 | + |
---|
| 1204 | + if (tr->cond_snapshot) |
---|
| 1205 | + cond_data = tr->cond_snapshot->cond_data; |
---|
| 1206 | + |
---|
| 1207 | + arch_spin_unlock(&tr->max_lock); |
---|
| 1208 | + local_irq_enable(); |
---|
| 1209 | + |
---|
| 1210 | + return cond_data; |
---|
| 1211 | +} |
---|
| 1212 | +EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); |
---|
| 1213 | + |
---|
| 1214 | +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, |
---|
| 1215 | + struct array_buffer *size_buf, int cpu_id); |
---|
| 1216 | +static void set_buffer_entries(struct array_buffer *buf, unsigned long val); |
---|
959 | 1217 | |
---|
960 | 1218 | int tracing_alloc_snapshot_instance(struct trace_array *tr) |
---|
961 | 1219 | { |
---|
.. | .. |
---|
965 | 1223 | |
---|
966 | 1224 | /* allocate spare buffer */ |
---|
967 | 1225 | ret = resize_buffer_duplicate_size(&tr->max_buffer, |
---|
968 | | - &tr->trace_buffer, RING_BUFFER_ALL_CPUS); |
---|
| 1226 | + &tr->array_buffer, RING_BUFFER_ALL_CPUS); |
---|
969 | 1227 | if (ret < 0) |
---|
970 | 1228 | return ret; |
---|
971 | 1229 | |
---|
.. | .. |
---|
1032 | 1290 | tracing_snapshot(); |
---|
1033 | 1291 | } |
---|
1034 | 1292 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
---|
| 1293 | + |
---|
| 1294 | +/** |
---|
| 1295 | + * tracing_snapshot_cond_enable - enable conditional snapshot for an instance |
---|
| 1296 | + * @tr: The tracing instance |
---|
| 1297 | + * @cond_data: User data to associate with the snapshot |
---|
| 1298 | + * @update: Implementation of the cond_snapshot update function |
---|
| 1299 | + * |
---|
| 1300 | + * Check whether the conditional snapshot for the given instance has |
---|
| 1301 | + * already been enabled, or if the current tracer is already using a |
---|
| 1302 | + * snapshot; if so, return -EBUSY, else create a cond_snapshot and |
---|
| 1303 | + * save the cond_data and update function inside. |
---|
| 1304 | + * |
---|
| 1305 | + * Returns 0 if successful, error otherwise. |
---|
| 1306 | + */ |
---|
| 1307 | +int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, |
---|
| 1308 | + cond_update_fn_t update) |
---|
| 1309 | +{ |
---|
| 1310 | + struct cond_snapshot *cond_snapshot; |
---|
| 1311 | + int ret = 0; |
---|
| 1312 | + |
---|
| 1313 | + cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); |
---|
| 1314 | + if (!cond_snapshot) |
---|
| 1315 | + return -ENOMEM; |
---|
| 1316 | + |
---|
| 1317 | + cond_snapshot->cond_data = cond_data; |
---|
| 1318 | + cond_snapshot->update = update; |
---|
| 1319 | + |
---|
| 1320 | + mutex_lock(&trace_types_lock); |
---|
| 1321 | + |
---|
| 1322 | + ret = tracing_alloc_snapshot_instance(tr); |
---|
| 1323 | + if (ret) |
---|
| 1324 | + goto fail_unlock; |
---|
| 1325 | + |
---|
| 1326 | + if (tr->current_trace->use_max_tr) { |
---|
| 1327 | + ret = -EBUSY; |
---|
| 1328 | + goto fail_unlock; |
---|
| 1329 | + } |
---|
| 1330 | + |
---|
| 1331 | + /* |
---|
| 1332 | + * The cond_snapshot can only change to NULL without the |
---|
| 1333 | + * trace_types_lock. We don't care if we race with it going |
---|
| 1334 | + * to NULL, but we want to make sure that it's not set to |
---|
| 1335 | + * something other than NULL when we get here, which we can |
---|
| 1336 | + * do safely with only holding the trace_types_lock and not |
---|
| 1337 | + * having to take the max_lock. |
---|
| 1338 | + */ |
---|
| 1339 | + if (tr->cond_snapshot) { |
---|
| 1340 | + ret = -EBUSY; |
---|
| 1341 | + goto fail_unlock; |
---|
| 1342 | + } |
---|
| 1343 | + |
---|
| 1344 | + local_irq_disable(); |
---|
| 1345 | + arch_spin_lock(&tr->max_lock); |
---|
| 1346 | + tr->cond_snapshot = cond_snapshot; |
---|
| 1347 | + arch_spin_unlock(&tr->max_lock); |
---|
| 1348 | + local_irq_enable(); |
---|
| 1349 | + |
---|
| 1350 | + mutex_unlock(&trace_types_lock); |
---|
| 1351 | + |
---|
| 1352 | + return ret; |
---|
| 1353 | + |
---|
| 1354 | + fail_unlock: |
---|
| 1355 | + mutex_unlock(&trace_types_lock); |
---|
| 1356 | + kfree(cond_snapshot); |
---|
| 1357 | + return ret; |
---|
| 1358 | +} |
---|
| 1359 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); |
---|
| 1360 | + |
---|
| 1361 | +/** |
---|
| 1362 | + * tracing_snapshot_cond_disable - disable conditional snapshot for an instance |
---|
| 1363 | + * @tr: The tracing instance |
---|
| 1364 | + * |
---|
| 1365 | + * Check whether the conditional snapshot for the given instance is |
---|
| 1366 | + * enabled; if so, free the cond_snapshot associated with it, |
---|
| 1367 | + * otherwise return -EINVAL. |
---|
| 1368 | + * |
---|
| 1369 | + * Returns 0 if successful, error otherwise. |
---|
| 1370 | + */ |
---|
| 1371 | +int tracing_snapshot_cond_disable(struct trace_array *tr) |
---|
| 1372 | +{ |
---|
| 1373 | + int ret = 0; |
---|
| 1374 | + |
---|
| 1375 | + local_irq_disable(); |
---|
| 1376 | + arch_spin_lock(&tr->max_lock); |
---|
| 1377 | + |
---|
| 1378 | + if (!tr->cond_snapshot) |
---|
| 1379 | + ret = -EINVAL; |
---|
| 1380 | + else { |
---|
| 1381 | + kfree(tr->cond_snapshot); |
---|
| 1382 | + tr->cond_snapshot = NULL; |
---|
| 1383 | + } |
---|
| 1384 | + |
---|
| 1385 | + arch_spin_unlock(&tr->max_lock); |
---|
| 1386 | + local_irq_enable(); |
---|
| 1387 | + |
---|
| 1388 | + return ret; |
---|
| 1389 | +} |
---|
| 1390 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); |
---|
1035 | 1391 | #else |
---|
1036 | 1392 | void tracing_snapshot(void) |
---|
1037 | 1393 | { |
---|
1038 | 1394 | WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); |
---|
1039 | 1395 | } |
---|
1040 | 1396 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
---|
| 1397 | +void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) |
---|
| 1398 | +{ |
---|
| 1399 | + WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); |
---|
| 1400 | +} |
---|
| 1401 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond); |
---|
1041 | 1402 | int tracing_alloc_snapshot(void) |
---|
1042 | 1403 | { |
---|
1043 | 1404 | WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); |
---|
.. | .. |
---|
1050 | 1411 | tracing_snapshot(); |
---|
1051 | 1412 | } |
---|
1052 | 1413 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
---|
| 1414 | +void *tracing_cond_snapshot_data(struct trace_array *tr) |
---|
| 1415 | +{ |
---|
| 1416 | + return NULL; |
---|
| 1417 | +} |
---|
| 1418 | +EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); |
---|
| 1419 | +int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) |
---|
| 1420 | +{ |
---|
| 1421 | + return -ENODEV; |
---|
| 1422 | +} |
---|
| 1423 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); |
---|
| 1424 | +int tracing_snapshot_cond_disable(struct trace_array *tr) |
---|
| 1425 | +{ |
---|
| 1426 | + return false; |
---|
| 1427 | +} |
---|
| 1428 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); |
---|
1053 | 1429 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
---|
1054 | 1430 | |
---|
1055 | 1431 | void tracer_tracing_off(struct trace_array *tr) |
---|
1056 | 1432 | { |
---|
1057 | | - if (tr->trace_buffer.buffer) |
---|
1058 | | - ring_buffer_record_off(tr->trace_buffer.buffer); |
---|
| 1433 | + if (tr->array_buffer.buffer) |
---|
| 1434 | + ring_buffer_record_off(tr->array_buffer.buffer); |
---|
1059 | 1435 | /* |
---|
1060 | 1436 | * This flag is looked at when buffers haven't been allocated |
---|
1061 | 1437 | * yet, or by some tracers (like irqsoff), that just want to |
---|
.. | .. |
---|
1085 | 1461 | |
---|
1086 | 1462 | void disable_trace_on_warning(void) |
---|
1087 | 1463 | { |
---|
1088 | | - if (__disable_trace_on_warning) |
---|
| 1464 | + if (__disable_trace_on_warning) { |
---|
| 1465 | + trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, |
---|
| 1466 | + "Disabling tracing due to warning\n"); |
---|
1089 | 1467 | tracing_off(); |
---|
| 1468 | + } |
---|
1090 | 1469 | } |
---|
1091 | 1470 | |
---|
1092 | 1471 | /** |
---|
.. | .. |
---|
1097 | 1476 | */ |
---|
1098 | 1477 | bool tracer_tracing_is_on(struct trace_array *tr) |
---|
1099 | 1478 | { |
---|
1100 | | - if (tr->trace_buffer.buffer) |
---|
1101 | | - return ring_buffer_record_is_on(tr->trace_buffer.buffer); |
---|
| 1479 | + if (tr->array_buffer.buffer) |
---|
| 1480 | + return ring_buffer_record_is_on(tr->array_buffer.buffer); |
---|
1102 | 1481 | return !tr->buffer_disabled; |
---|
1103 | 1482 | } |
---|
1104 | 1483 | |
---|
.. | .. |
---|
1118 | 1497 | if (!str) |
---|
1119 | 1498 | return 0; |
---|
1120 | 1499 | buf_size = memparse(str, &str); |
---|
1121 | | - /* nr_entries can not be zero */ |
---|
1122 | | - if (buf_size == 0) |
---|
1123 | | - return 0; |
---|
1124 | | - trace_buf_size = buf_size; |
---|
| 1500 | + /* |
---|
| 1501 | + * nr_entries can not be zero and the startup |
---|
| 1502 | + * tests require some buffer space. Therefore |
---|
| 1503 | + * ensure we have at least 4096 bytes of buffer. |
---|
| 1504 | + */ |
---|
| 1505 | + trace_buf_size = max(4096UL, buf_size); |
---|
1125 | 1506 | return 1; |
---|
1126 | 1507 | } |
---|
1127 | 1508 | __setup("trace_buf_size=", set_buf_size); |
---|
.. | .. |
---|
1315 | 1696 | } |
---|
1316 | 1697 | |
---|
1317 | 1698 | unsigned long __read_mostly tracing_thresh; |
---|
| 1699 | +static const struct file_operations tracing_max_lat_fops; |
---|
| 1700 | + |
---|
| 1701 | +#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ |
---|
| 1702 | + defined(CONFIG_FSNOTIFY) |
---|
| 1703 | + |
---|
| 1704 | +static struct workqueue_struct *fsnotify_wq; |
---|
| 1705 | + |
---|
| 1706 | +static void latency_fsnotify_workfn(struct work_struct *work) |
---|
| 1707 | +{ |
---|
| 1708 | + struct trace_array *tr = container_of(work, struct trace_array, |
---|
| 1709 | + fsnotify_work); |
---|
| 1710 | + fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); |
---|
| 1711 | +} |
---|
| 1712 | + |
---|
| 1713 | +static void latency_fsnotify_workfn_irq(struct irq_work *iwork) |
---|
| 1714 | +{ |
---|
| 1715 | + struct trace_array *tr = container_of(iwork, struct trace_array, |
---|
| 1716 | + fsnotify_irqwork); |
---|
| 1717 | + queue_work(fsnotify_wq, &tr->fsnotify_work); |
---|
| 1718 | +} |
---|
| 1719 | + |
---|
| 1720 | +static void trace_create_maxlat_file(struct trace_array *tr, |
---|
| 1721 | + struct dentry *d_tracer) |
---|
| 1722 | +{ |
---|
| 1723 | + INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); |
---|
| 1724 | + init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); |
---|
| 1725 | + tr->d_max_latency = trace_create_file("tracing_max_latency", 0644, |
---|
| 1726 | + d_tracer, &tr->max_latency, |
---|
| 1727 | + &tracing_max_lat_fops); |
---|
| 1728 | +} |
---|
| 1729 | + |
---|
| 1730 | +__init static int latency_fsnotify_init(void) |
---|
| 1731 | +{ |
---|
| 1732 | + fsnotify_wq = alloc_workqueue("tr_max_lat_wq", |
---|
| 1733 | + WQ_UNBOUND | WQ_HIGHPRI, 0); |
---|
| 1734 | + if (!fsnotify_wq) { |
---|
| 1735 | + pr_err("Unable to allocate tr_max_lat_wq\n"); |
---|
| 1736 | + return -ENOMEM; |
---|
| 1737 | + } |
---|
| 1738 | + return 0; |
---|
| 1739 | +} |
---|
| 1740 | + |
---|
| 1741 | +late_initcall_sync(latency_fsnotify_init); |
---|
| 1742 | + |
---|
| 1743 | +void latency_fsnotify(struct trace_array *tr) |
---|
| 1744 | +{ |
---|
| 1745 | + if (!fsnotify_wq) |
---|
| 1746 | + return; |
---|
| 1747 | + /* |
---|
| 1748 | + * We cannot call queue_work(&tr->fsnotify_work) from here because it's |
---|
| 1749 | + * possible that we are called from __schedule() or do_idle(), which |
---|
| 1750 | + * could cause a deadlock. |
---|
| 1751 | + */ |
---|
| 1752 | + irq_work_queue(&tr->fsnotify_irqwork); |
---|
| 1753 | +} |
---|
| 1754 | + |
---|
| 1755 | +/* |
---|
| 1756 | + * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ |
---|
| 1757 | + * defined(CONFIG_FSNOTIFY) |
---|
| 1758 | + */ |
---|
| 1759 | +#else |
---|
| 1760 | + |
---|
| 1761 | +#define trace_create_maxlat_file(tr, d_tracer) \ |
---|
| 1762 | + trace_create_file("tracing_max_latency", 0644, d_tracer, \ |
---|
| 1763 | + &tr->max_latency, &tracing_max_lat_fops) |
---|
| 1764 | + |
---|
| 1765 | +#endif |
---|
1318 | 1766 | |
---|
1319 | 1767 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
1320 | 1768 | /* |
---|
.. | .. |
---|
1325 | 1773 | static void |
---|
1326 | 1774 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
---|
1327 | 1775 | { |
---|
1328 | | - struct trace_buffer *trace_buf = &tr->trace_buffer; |
---|
1329 | | - struct trace_buffer *max_buf = &tr->max_buffer; |
---|
| 1776 | + struct array_buffer *trace_buf = &tr->array_buffer; |
---|
| 1777 | + struct array_buffer *max_buf = &tr->max_buffer; |
---|
1330 | 1778 | struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); |
---|
1331 | 1779 | struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); |
---|
1332 | 1780 | |
---|
.. | .. |
---|
1337 | 1785 | max_data->critical_start = data->critical_start; |
---|
1338 | 1786 | max_data->critical_end = data->critical_end; |
---|
1339 | 1787 | |
---|
1340 | | - memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
---|
| 1788 | + strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
---|
1341 | 1789 | max_data->pid = tsk->pid; |
---|
1342 | 1790 | /* |
---|
1343 | 1791 | * If tsk == current, then use current_uid(), as that does not use |
---|
.. | .. |
---|
1354 | 1802 | |
---|
1355 | 1803 | /* record this tasks comm */ |
---|
1356 | 1804 | tracing_record_cmdline(tsk); |
---|
| 1805 | + latency_fsnotify(tr); |
---|
1357 | 1806 | } |
---|
1358 | 1807 | |
---|
1359 | 1808 | /** |
---|
.. | .. |
---|
1361 | 1810 | * @tr: tracer |
---|
1362 | 1811 | * @tsk: the task with the latency |
---|
1363 | 1812 | * @cpu: The cpu that initiated the trace. |
---|
| 1813 | + * @cond_data: User data associated with a conditional snapshot |
---|
1364 | 1814 | * |
---|
1365 | 1815 | * Flip the buffers between the @tr and the max_tr and record information |
---|
1366 | 1816 | * about which task was the cause of this latency. |
---|
1367 | 1817 | */ |
---|
1368 | 1818 | void |
---|
1369 | | -update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
---|
| 1819 | +update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, |
---|
| 1820 | + void *cond_data) |
---|
1370 | 1821 | { |
---|
1371 | 1822 | if (tr->stop_count) |
---|
1372 | 1823 | return; |
---|
.. | .. |
---|
1381 | 1832 | |
---|
1382 | 1833 | arch_spin_lock(&tr->max_lock); |
---|
1383 | 1834 | |
---|
1384 | | - /* Inherit the recordable setting from trace_buffer */ |
---|
1385 | | - if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) |
---|
| 1835 | + /* Inherit the recordable setting from array_buffer */ |
---|
| 1836 | + if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) |
---|
1386 | 1837 | ring_buffer_record_on(tr->max_buffer.buffer); |
---|
1387 | 1838 | else |
---|
1388 | 1839 | ring_buffer_record_off(tr->max_buffer.buffer); |
---|
1389 | 1840 | |
---|
1390 | | - swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); |
---|
| 1841 | +#ifdef CONFIG_TRACER_SNAPSHOT |
---|
| 1842 | + if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) |
---|
| 1843 | + goto out_unlock; |
---|
| 1844 | +#endif |
---|
| 1845 | + swap(tr->array_buffer.buffer, tr->max_buffer.buffer); |
---|
1391 | 1846 | |
---|
1392 | 1847 | __update_max_tr(tr, tsk, cpu); |
---|
| 1848 | + |
---|
| 1849 | + out_unlock: |
---|
1393 | 1850 | arch_spin_unlock(&tr->max_lock); |
---|
1394 | 1851 | } |
---|
1395 | 1852 | |
---|
1396 | 1853 | /** |
---|
1397 | 1854 | * update_max_tr_single - only copy one trace over, and reset the rest |
---|
1398 | | - * @tr - tracer |
---|
1399 | | - * @tsk - task with the latency |
---|
1400 | | - * @cpu - the cpu of the buffer to copy. |
---|
| 1855 | + * @tr: tracer |
---|
| 1856 | + * @tsk: task with the latency |
---|
| 1857 | + * @cpu: the cpu of the buffer to copy. |
---|
1401 | 1858 | * |
---|
1402 | 1859 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. |
---|
1403 | 1860 | */ |
---|
.. | .. |
---|
1418 | 1875 | |
---|
1419 | 1876 | arch_spin_lock(&tr->max_lock); |
---|
1420 | 1877 | |
---|
1421 | | - ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); |
---|
| 1878 | + ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); |
---|
1422 | 1879 | |
---|
1423 | 1880 | if (ret == -EBUSY) { |
---|
1424 | 1881 | /* |
---|
.. | .. |
---|
1426 | 1883 | * place on this CPU. We fail to record, but we reset |
---|
1427 | 1884 | * the max trace buffer (no one writes directly to it) |
---|
1428 | 1885 | * and flag that it failed. |
---|
| 1886 | + * Another reason is resize is in progress. |
---|
1429 | 1887 | */ |
---|
1430 | 1888 | trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, |
---|
1431 | | - "Failed to swap buffers due to commit in progress\n"); |
---|
| 1889 | + "Failed to swap buffers due to commit or resize in progress\n"); |
---|
1432 | 1890 | } |
---|
1433 | 1891 | |
---|
1434 | 1892 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
---|
.. | .. |
---|
1438 | 1896 | } |
---|
1439 | 1897 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
---|
1440 | 1898 | |
---|
1441 | | -static int wait_on_pipe(struct trace_iterator *iter, bool full) |
---|
| 1899 | +static int wait_on_pipe(struct trace_iterator *iter, int full) |
---|
1442 | 1900 | { |
---|
1443 | 1901 | /* Iterators are static, they should be filled or empty */ |
---|
1444 | 1902 | if (trace_buffer_iter(iter, iter->cpu_file)) |
---|
1445 | 1903 | return 0; |
---|
1446 | 1904 | |
---|
1447 | | - return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, |
---|
| 1905 | + return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, |
---|
1448 | 1906 | full); |
---|
1449 | 1907 | } |
---|
1450 | 1908 | |
---|
.. | .. |
---|
1495 | 1953 | * internal tracing to verify that everything is in order. |
---|
1496 | 1954 | * If we fail, we do not register this tracer. |
---|
1497 | 1955 | */ |
---|
1498 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 1956 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
1499 | 1957 | |
---|
1500 | 1958 | tr->current_trace = type; |
---|
1501 | 1959 | |
---|
.. | .. |
---|
1521 | 1979 | return -1; |
---|
1522 | 1980 | } |
---|
1523 | 1981 | /* Only reset on passing, to avoid touching corrupted buffers */ |
---|
1524 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 1982 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
1525 | 1983 | |
---|
1526 | 1984 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
1527 | 1985 | if (type->use_max_tr) { |
---|
.. | .. |
---|
1555 | 2013 | |
---|
1556 | 2014 | tracing_selftest_running = true; |
---|
1557 | 2015 | list_for_each_entry_safe(p, n, &postponed_selftests, list) { |
---|
| 2016 | + /* This loop can take minutes when sanitizers are enabled, so |
---|
| 2017 | + * lets make sure we allow RCU processing. |
---|
| 2018 | + */ |
---|
| 2019 | + cond_resched(); |
---|
1558 | 2020 | ret = run_tracer_selftest(p->type); |
---|
1559 | 2021 | /* If the test fails, then warn and remove from available_tracers */ |
---|
1560 | 2022 | if (ret < 0) { |
---|
.. | .. |
---|
1593 | 2055 | |
---|
1594 | 2056 | /** |
---|
1595 | 2057 | * register_tracer - register a tracer with the ftrace system. |
---|
1596 | | - * @type - the plugin for the tracer |
---|
| 2058 | + * @type: the plugin for the tracer |
---|
1597 | 2059 | * |
---|
1598 | 2060 | * Register a new plugin tracer. |
---|
1599 | 2061 | */ |
---|
.. | .. |
---|
1610 | 2072 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
---|
1611 | 2073 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
---|
1612 | 2074 | return -1; |
---|
| 2075 | + } |
---|
| 2076 | + |
---|
| 2077 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
---|
| 2078 | + pr_warn("Can not register tracer %s due to lockdown\n", |
---|
| 2079 | + type->name); |
---|
| 2080 | + return -EPERM; |
---|
1613 | 2081 | } |
---|
1614 | 2082 | |
---|
1615 | 2083 | mutex_lock(&trace_types_lock); |
---|
.. | .. |
---|
1670 | 2138 | apply_trace_boot_options(); |
---|
1671 | 2139 | |
---|
1672 | 2140 | /* disable other selftests, since this will break it. */ |
---|
1673 | | - tracing_selftest_disabled = true; |
---|
1674 | | -#ifdef CONFIG_FTRACE_STARTUP_TEST |
---|
1675 | | - printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", |
---|
1676 | | - type->name); |
---|
1677 | | -#endif |
---|
| 2141 | + disable_tracing_selftest("running a tracer"); |
---|
1678 | 2142 | |
---|
1679 | 2143 | out_unlock: |
---|
1680 | 2144 | return ret; |
---|
1681 | 2145 | } |
---|
1682 | 2146 | |
---|
1683 | | -void tracing_reset(struct trace_buffer *buf, int cpu) |
---|
| 2147 | +static void tracing_reset_cpu(struct array_buffer *buf, int cpu) |
---|
1684 | 2148 | { |
---|
1685 | | - struct ring_buffer *buffer = buf->buffer; |
---|
| 2149 | + struct trace_buffer *buffer = buf->buffer; |
---|
1686 | 2150 | |
---|
1687 | 2151 | if (!buffer) |
---|
1688 | 2152 | return; |
---|
.. | .. |
---|
1690 | 2154 | ring_buffer_record_disable(buffer); |
---|
1691 | 2155 | |
---|
1692 | 2156 | /* Make sure all commits have finished */ |
---|
1693 | | - synchronize_sched(); |
---|
| 2157 | + synchronize_rcu(); |
---|
1694 | 2158 | ring_buffer_reset_cpu(buffer, cpu); |
---|
1695 | 2159 | |
---|
1696 | 2160 | ring_buffer_record_enable(buffer); |
---|
1697 | 2161 | } |
---|
1698 | 2162 | |
---|
1699 | | -void tracing_reset_online_cpus(struct trace_buffer *buf) |
---|
| 2163 | +void tracing_reset_online_cpus(struct array_buffer *buf) |
---|
1700 | 2164 | { |
---|
1701 | | - struct ring_buffer *buffer = buf->buffer; |
---|
1702 | | - int cpu; |
---|
| 2165 | + struct trace_buffer *buffer = buf->buffer; |
---|
1703 | 2166 | |
---|
1704 | 2167 | if (!buffer) |
---|
1705 | 2168 | return; |
---|
.. | .. |
---|
1707 | 2170 | ring_buffer_record_disable(buffer); |
---|
1708 | 2171 | |
---|
1709 | 2172 | /* Make sure all commits have finished */ |
---|
1710 | | - synchronize_sched(); |
---|
| 2173 | + synchronize_rcu(); |
---|
1711 | 2174 | |
---|
1712 | 2175 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
---|
1713 | 2176 | |
---|
1714 | | - for_each_online_cpu(cpu) |
---|
1715 | | - ring_buffer_reset_cpu(buffer, cpu); |
---|
| 2177 | + ring_buffer_reset_online_cpus(buffer); |
---|
1716 | 2178 | |
---|
1717 | 2179 | ring_buffer_record_enable(buffer); |
---|
1718 | 2180 | } |
---|
1719 | 2181 | |
---|
1720 | 2182 | /* Must have trace_types_lock held */ |
---|
1721 | | -void tracing_reset_all_online_cpus(void) |
---|
| 2183 | +void tracing_reset_all_online_cpus_unlocked(void) |
---|
1722 | 2184 | { |
---|
1723 | 2185 | struct trace_array *tr; |
---|
| 2186 | + |
---|
| 2187 | + lockdep_assert_held(&trace_types_lock); |
---|
1724 | 2188 | |
---|
1725 | 2189 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
1726 | 2190 | if (!tr->clear_trace) |
---|
1727 | 2191 | continue; |
---|
1728 | 2192 | tr->clear_trace = false; |
---|
1729 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 2193 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
1730 | 2194 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
1731 | 2195 | tracing_reset_online_cpus(&tr->max_buffer); |
---|
1732 | 2196 | #endif |
---|
1733 | 2197 | } |
---|
| 2198 | +} |
---|
| 2199 | + |
---|
| 2200 | +void tracing_reset_all_online_cpus(void) |
---|
| 2201 | +{ |
---|
| 2202 | + mutex_lock(&trace_types_lock); |
---|
| 2203 | + tracing_reset_all_online_cpus_unlocked(); |
---|
| 2204 | + mutex_unlock(&trace_types_lock); |
---|
1734 | 2205 | } |
---|
1735 | 2206 | |
---|
1736 | 2207 | /* |
---|
.. | .. |
---|
1744 | 2215 | |
---|
1745 | 2216 | #define SAVED_CMDLINES_DEFAULT 128 |
---|
1746 | 2217 | #define NO_CMDLINE_MAP UINT_MAX |
---|
| 2218 | +/* |
---|
| 2219 | + * Preemption must be disabled before acquiring trace_cmdline_lock. |
---|
| 2220 | + * The various trace_arrays' max_lock must be acquired in a context |
---|
| 2221 | + * where interrupt is disabled. |
---|
| 2222 | + */ |
---|
1747 | 2223 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
---|
1748 | 2224 | struct saved_cmdlines_buffer { |
---|
1749 | 2225 | unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
---|
.. | .. |
---|
1761 | 2237 | |
---|
1762 | 2238 | static inline void set_cmdline(int idx, const char *cmdline) |
---|
1763 | 2239 | { |
---|
1764 | | - memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); |
---|
| 2240 | + strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); |
---|
1765 | 2241 | } |
---|
1766 | 2242 | |
---|
1767 | 2243 | static int allocate_cmdlines_buffer(unsigned int val, |
---|
.. | .. |
---|
1820 | 2296 | */ |
---|
1821 | 2297 | void tracing_start(void) |
---|
1822 | 2298 | { |
---|
1823 | | - struct ring_buffer *buffer; |
---|
| 2299 | + struct trace_buffer *buffer; |
---|
1824 | 2300 | unsigned long flags; |
---|
1825 | 2301 | |
---|
1826 | 2302 | if (tracing_disabled) |
---|
.. | .. |
---|
1839 | 2315 | /* Prevent the buffers from switching */ |
---|
1840 | 2316 | arch_spin_lock(&global_trace.max_lock); |
---|
1841 | 2317 | |
---|
1842 | | - buffer = global_trace.trace_buffer.buffer; |
---|
| 2318 | + buffer = global_trace.array_buffer.buffer; |
---|
1843 | 2319 | if (buffer) |
---|
1844 | 2320 | ring_buffer_record_enable(buffer); |
---|
1845 | 2321 | |
---|
.. | .. |
---|
1857 | 2333 | |
---|
1858 | 2334 | static void tracing_start_tr(struct trace_array *tr) |
---|
1859 | 2335 | { |
---|
1860 | | - struct ring_buffer *buffer; |
---|
| 2336 | + struct trace_buffer *buffer; |
---|
1861 | 2337 | unsigned long flags; |
---|
1862 | 2338 | |
---|
1863 | 2339 | if (tracing_disabled) |
---|
.. | .. |
---|
1878 | 2354 | goto out; |
---|
1879 | 2355 | } |
---|
1880 | 2356 | |
---|
1881 | | - buffer = tr->trace_buffer.buffer; |
---|
| 2357 | + buffer = tr->array_buffer.buffer; |
---|
1882 | 2358 | if (buffer) |
---|
1883 | 2359 | ring_buffer_record_enable(buffer); |
---|
1884 | 2360 | |
---|
.. | .. |
---|
1894 | 2370 | */ |
---|
1895 | 2371 | void tracing_stop(void) |
---|
1896 | 2372 | { |
---|
1897 | | - struct ring_buffer *buffer; |
---|
| 2373 | + struct trace_buffer *buffer; |
---|
1898 | 2374 | unsigned long flags; |
---|
1899 | 2375 | |
---|
1900 | 2376 | raw_spin_lock_irqsave(&global_trace.start_lock, flags); |
---|
.. | .. |
---|
1904 | 2380 | /* Prevent the buffers from switching */ |
---|
1905 | 2381 | arch_spin_lock(&global_trace.max_lock); |
---|
1906 | 2382 | |
---|
1907 | | - buffer = global_trace.trace_buffer.buffer; |
---|
| 2383 | + buffer = global_trace.array_buffer.buffer; |
---|
1908 | 2384 | if (buffer) |
---|
1909 | 2385 | ring_buffer_record_disable(buffer); |
---|
1910 | 2386 | |
---|
.. | .. |
---|
1922 | 2398 | |
---|
1923 | 2399 | static void tracing_stop_tr(struct trace_array *tr) |
---|
1924 | 2400 | { |
---|
1925 | | - struct ring_buffer *buffer; |
---|
| 2401 | + struct trace_buffer *buffer; |
---|
1926 | 2402 | unsigned long flags; |
---|
1927 | 2403 | |
---|
1928 | 2404 | /* If global, we need to also stop the max tracer */ |
---|
.. | .. |
---|
1933 | 2409 | if (tr->stop_count++) |
---|
1934 | 2410 | goto out; |
---|
1935 | 2411 | |
---|
1936 | | - buffer = tr->trace_buffer.buffer; |
---|
| 2412 | + buffer = tr->array_buffer.buffer; |
---|
1937 | 2413 | if (buffer) |
---|
1938 | 2414 | ring_buffer_record_disable(buffer); |
---|
1939 | 2415 | |
---|
.. | .. |
---|
1956 | 2432 | * the lock, but we also don't want to spin |
---|
1957 | 2433 | * nor do we want to disable interrupts, |
---|
1958 | 2434 | * so if we miss here, then better luck next time. |
---|
| 2435 | + * |
---|
| 2436 | + * This is called within the scheduler and wake up, so interrupts |
---|
| 2437 | + * had better been disabled and run queue lock been held. |
---|
1959 | 2438 | */ |
---|
| 2439 | + lockdep_assert_preemption_disabled(); |
---|
1960 | 2440 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
---|
1961 | 2441 | return 0; |
---|
1962 | 2442 | |
---|
.. | .. |
---|
2064 | 2544 | /** |
---|
2065 | 2545 | * tracing_record_taskinfo - record the task info of a task |
---|
2066 | 2546 | * |
---|
2067 | | - * @task - task to record |
---|
2068 | | - * @flags - TRACE_RECORD_CMDLINE for recording comm |
---|
2069 | | - * - TRACE_RECORD_TGID for recording tgid |
---|
| 2547 | + * @task: task to record |
---|
| 2548 | + * @flags: TRACE_RECORD_CMDLINE for recording comm |
---|
| 2549 | + * TRACE_RECORD_TGID for recording tgid |
---|
2070 | 2550 | */ |
---|
2071 | 2551 | void tracing_record_taskinfo(struct task_struct *task, int flags) |
---|
2072 | 2552 | { |
---|
.. | .. |
---|
2092 | 2572 | /** |
---|
2093 | 2573 | * tracing_record_taskinfo_sched_switch - record task info for sched_switch |
---|
2094 | 2574 | * |
---|
2095 | | - * @prev - previous task during sched_switch |
---|
2096 | | - * @next - next task during sched_switch |
---|
2097 | | - * @flags - TRACE_RECORD_CMDLINE for recording comm |
---|
2098 | | - * TRACE_RECORD_TGID for recording tgid |
---|
| 2575 | + * @prev: previous task during sched_switch |
---|
| 2576 | + * @next: next task during sched_switch |
---|
| 2577 | + * @flags: TRACE_RECORD_CMDLINE for recording comm |
---|
| 2578 | + * TRACE_RECORD_TGID for recording tgid |
---|
2099 | 2579 | */ |
---|
2100 | 2580 | void tracing_record_taskinfo_sched_switch(struct task_struct *prev, |
---|
2101 | 2581 | struct task_struct *next, int flags) |
---|
.. | .. |
---|
2145 | 2625 | EXPORT_SYMBOL_GPL(trace_handle_return); |
---|
2146 | 2626 | |
---|
2147 | 2627 | void |
---|
2148 | | -tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
---|
2149 | | - int pc) |
---|
| 2628 | +tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, |
---|
| 2629 | + unsigned long flags, int pc) |
---|
2150 | 2630 | { |
---|
2151 | 2631 | struct task_struct *tsk = current; |
---|
2152 | 2632 | |
---|
2153 | 2633 | entry->preempt_count = pc & 0xff; |
---|
2154 | | - entry->preempt_lazy_count = preempt_lazy_count(); |
---|
2155 | 2634 | entry->pid = (tsk) ? tsk->pid : 0; |
---|
| 2635 | + entry->type = type; |
---|
2156 | 2636 | entry->flags = |
---|
2157 | 2637 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
---|
2158 | 2638 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
---|
.. | .. |
---|
2162 | 2642 | ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | |
---|
2163 | 2643 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
---|
2164 | 2644 | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | |
---|
2165 | | - (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | |
---|
2166 | | - (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | |
---|
| 2645 | + (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | |
---|
2167 | 2646 | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); |
---|
2168 | | - |
---|
2169 | | - entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; |
---|
2170 | 2647 | } |
---|
2171 | 2648 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
---|
2172 | 2649 | |
---|
2173 | 2650 | struct ring_buffer_event * |
---|
2174 | | -trace_buffer_lock_reserve(struct ring_buffer *buffer, |
---|
| 2651 | +trace_buffer_lock_reserve(struct trace_buffer *buffer, |
---|
2175 | 2652 | int type, |
---|
2176 | 2653 | unsigned long len, |
---|
2177 | 2654 | unsigned long flags, int pc) |
---|
.. | .. |
---|
2221 | 2698 | |
---|
2222 | 2699 | preempt_disable(); |
---|
2223 | 2700 | if (cpu == smp_processor_id() && |
---|
2224 | | - this_cpu_read(trace_buffered_event) != |
---|
| 2701 | + __this_cpu_read(trace_buffered_event) != |
---|
2225 | 2702 | per_cpu(trace_buffered_event, cpu)) |
---|
2226 | 2703 | WARN_ON_ONCE(1); |
---|
2227 | 2704 | preempt_enable(); |
---|
.. | .. |
---|
2271 | 2748 | preempt_enable(); |
---|
2272 | 2749 | |
---|
2273 | 2750 | /* Wait for all current users to finish */ |
---|
2274 | | - synchronize_sched(); |
---|
| 2751 | + synchronize_rcu(); |
---|
2275 | 2752 | |
---|
2276 | 2753 | for_each_tracing_cpu(cpu) { |
---|
2277 | 2754 | free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); |
---|
.. | .. |
---|
2290 | 2767 | preempt_enable(); |
---|
2291 | 2768 | } |
---|
2292 | 2769 | |
---|
2293 | | -static struct ring_buffer *temp_buffer; |
---|
| 2770 | +static struct trace_buffer *temp_buffer; |
---|
2294 | 2771 | |
---|
2295 | 2772 | struct ring_buffer_event * |
---|
2296 | | -trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
---|
| 2773 | +trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, |
---|
2297 | 2774 | struct trace_event_file *trace_file, |
---|
2298 | 2775 | int type, unsigned long len, |
---|
2299 | 2776 | unsigned long flags, int pc) |
---|
.. | .. |
---|
2301 | 2778 | struct ring_buffer_event *entry; |
---|
2302 | 2779 | int val; |
---|
2303 | 2780 | |
---|
2304 | | - *current_rb = trace_file->tr->trace_buffer.buffer; |
---|
| 2781 | + *current_rb = trace_file->tr->array_buffer.buffer; |
---|
2305 | 2782 | |
---|
2306 | 2783 | if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & |
---|
2307 | 2784 | (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && |
---|
.. | .. |
---|
2321 | 2798 | /* |
---|
2322 | 2799 | * If tracing is off, but we have triggers enabled |
---|
2323 | 2800 | * we still need to look at the event data. Use the temp_buffer |
---|
2324 | | - * to store the trace event for the tigger to use. It's recusive |
---|
| 2801 | + * to store the trace event for the trigger to use. It's recursive |
---|
2325 | 2802 | * safe and will not be recorded anywhere. |
---|
2326 | 2803 | */ |
---|
2327 | 2804 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
---|
.. | .. |
---|
2333 | 2810 | } |
---|
2334 | 2811 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); |
---|
2335 | 2812 | |
---|
2336 | | -static DEFINE_SPINLOCK(tracepoint_iter_lock); |
---|
| 2813 | +static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); |
---|
2337 | 2814 | static DEFINE_MUTEX(tracepoint_printk_mutex); |
---|
2338 | 2815 | |
---|
2339 | 2816 | static void output_printk(struct trace_event_buffer *fbuffer) |
---|
2340 | 2817 | { |
---|
2341 | 2818 | struct trace_event_call *event_call; |
---|
| 2819 | + struct trace_event_file *file; |
---|
2342 | 2820 | struct trace_event *event; |
---|
2343 | 2821 | unsigned long flags; |
---|
2344 | 2822 | struct trace_iterator *iter = tracepoint_print_iter; |
---|
.. | .. |
---|
2352 | 2830 | !event_call->event.funcs->trace) |
---|
2353 | 2831 | return; |
---|
2354 | 2832 | |
---|
| 2833 | + file = fbuffer->trace_file; |
---|
| 2834 | + if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || |
---|
| 2835 | + (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && |
---|
| 2836 | + !filter_match_preds(file->filter, fbuffer->entry))) |
---|
| 2837 | + return; |
---|
| 2838 | + |
---|
2355 | 2839 | event = &fbuffer->trace_file->event_call->event; |
---|
2356 | 2840 | |
---|
2357 | | - spin_lock_irqsave(&tracepoint_iter_lock, flags); |
---|
| 2841 | + raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); |
---|
2358 | 2842 | trace_seq_init(&iter->seq); |
---|
2359 | 2843 | iter->ent = fbuffer->entry; |
---|
2360 | 2844 | event_call->event.funcs->trace(iter, 0, event); |
---|
2361 | 2845 | trace_seq_putc(&iter->seq, 0); |
---|
2362 | 2846 | printk("%s", iter->seq.buffer); |
---|
2363 | 2847 | |
---|
2364 | | - spin_unlock_irqrestore(&tracepoint_iter_lock, flags); |
---|
| 2848 | + raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); |
---|
2365 | 2849 | } |
---|
2366 | 2850 | |
---|
2367 | 2851 | int tracepoint_printk_sysctl(struct ctl_table *table, int write, |
---|
2368 | | - void __user *buffer, size_t *lenp, |
---|
| 2852 | + void *buffer, size_t *lenp, |
---|
2369 | 2853 | loff_t *ppos) |
---|
2370 | 2854 | { |
---|
2371 | 2855 | int save_tracepoint_printk; |
---|
.. | .. |
---|
2402 | 2886 | if (static_key_false(&tracepoint_printk_key.key)) |
---|
2403 | 2887 | output_printk(fbuffer); |
---|
2404 | 2888 | |
---|
2405 | | - event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, |
---|
| 2889 | + if (static_branch_unlikely(&trace_event_exports_enabled)) |
---|
| 2890 | + ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); |
---|
| 2891 | + event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, |
---|
2406 | 2892 | fbuffer->event, fbuffer->entry, |
---|
2407 | | - fbuffer->flags, fbuffer->pc); |
---|
| 2893 | + fbuffer->flags, fbuffer->pc, fbuffer->regs); |
---|
2408 | 2894 | } |
---|
2409 | 2895 | EXPORT_SYMBOL_GPL(trace_event_buffer_commit); |
---|
2410 | 2896 | |
---|
.. | .. |
---|
2418 | 2904 | # define STACK_SKIP 3 |
---|
2419 | 2905 | |
---|
2420 | 2906 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
---|
2421 | | - struct ring_buffer *buffer, |
---|
| 2907 | + struct trace_buffer *buffer, |
---|
2422 | 2908 | struct ring_buffer_event *event, |
---|
2423 | 2909 | unsigned long flags, int pc, |
---|
2424 | 2910 | struct pt_regs *regs) |
---|
.. | .. |
---|
2439 | 2925 | * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. |
---|
2440 | 2926 | */ |
---|
2441 | 2927 | void |
---|
2442 | | -trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, |
---|
| 2928 | +trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
---|
2443 | 2929 | struct ring_buffer_event *event) |
---|
2444 | 2930 | { |
---|
2445 | 2931 | __buffer_unlock_commit(buffer, event); |
---|
2446 | 2932 | } |
---|
2447 | | - |
---|
2448 | | -static void |
---|
2449 | | -trace_process_export(struct trace_export *export, |
---|
2450 | | - struct ring_buffer_event *event) |
---|
2451 | | -{ |
---|
2452 | | - struct trace_entry *entry; |
---|
2453 | | - unsigned int size = 0; |
---|
2454 | | - |
---|
2455 | | - entry = ring_buffer_event_data(event); |
---|
2456 | | - size = ring_buffer_event_length(event); |
---|
2457 | | - export->write(export, entry, size); |
---|
2458 | | -} |
---|
2459 | | - |
---|
2460 | | -static DEFINE_MUTEX(ftrace_export_lock); |
---|
2461 | | - |
---|
2462 | | -static struct trace_export __rcu *ftrace_exports_list __read_mostly; |
---|
2463 | | - |
---|
2464 | | -static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); |
---|
2465 | | - |
---|
2466 | | -static inline void ftrace_exports_enable(void) |
---|
2467 | | -{ |
---|
2468 | | - static_branch_enable(&ftrace_exports_enabled); |
---|
2469 | | -} |
---|
2470 | | - |
---|
2471 | | -static inline void ftrace_exports_disable(void) |
---|
2472 | | -{ |
---|
2473 | | - static_branch_disable(&ftrace_exports_enabled); |
---|
2474 | | -} |
---|
2475 | | - |
---|
2476 | | -void ftrace_exports(struct ring_buffer_event *event) |
---|
2477 | | -{ |
---|
2478 | | - struct trace_export *export; |
---|
2479 | | - |
---|
2480 | | - preempt_disable_notrace(); |
---|
2481 | | - |
---|
2482 | | - export = rcu_dereference_raw_notrace(ftrace_exports_list); |
---|
2483 | | - while (export) { |
---|
2484 | | - trace_process_export(export, event); |
---|
2485 | | - export = rcu_dereference_raw_notrace(export->next); |
---|
2486 | | - } |
---|
2487 | | - |
---|
2488 | | - preempt_enable_notrace(); |
---|
2489 | | -} |
---|
2490 | | - |
---|
2491 | | -static inline void |
---|
2492 | | -add_trace_export(struct trace_export **list, struct trace_export *export) |
---|
2493 | | -{ |
---|
2494 | | - rcu_assign_pointer(export->next, *list); |
---|
2495 | | - /* |
---|
2496 | | - * We are entering export into the list but another |
---|
2497 | | - * CPU might be walking that list. We need to make sure |
---|
2498 | | - * the export->next pointer is valid before another CPU sees |
---|
2499 | | - * the export pointer included into the list. |
---|
2500 | | - */ |
---|
2501 | | - rcu_assign_pointer(*list, export); |
---|
2502 | | -} |
---|
2503 | | - |
---|
2504 | | -static inline int |
---|
2505 | | -rm_trace_export(struct trace_export **list, struct trace_export *export) |
---|
2506 | | -{ |
---|
2507 | | - struct trace_export **p; |
---|
2508 | | - |
---|
2509 | | - for (p = list; *p != NULL; p = &(*p)->next) |
---|
2510 | | - if (*p == export) |
---|
2511 | | - break; |
---|
2512 | | - |
---|
2513 | | - if (*p != export) |
---|
2514 | | - return -1; |
---|
2515 | | - |
---|
2516 | | - rcu_assign_pointer(*p, (*p)->next); |
---|
2517 | | - |
---|
2518 | | - return 0; |
---|
2519 | | -} |
---|
2520 | | - |
---|
2521 | | -static inline void |
---|
2522 | | -add_ftrace_export(struct trace_export **list, struct trace_export *export) |
---|
2523 | | -{ |
---|
2524 | | - if (*list == NULL) |
---|
2525 | | - ftrace_exports_enable(); |
---|
2526 | | - |
---|
2527 | | - add_trace_export(list, export); |
---|
2528 | | -} |
---|
2529 | | - |
---|
2530 | | -static inline int |
---|
2531 | | -rm_ftrace_export(struct trace_export **list, struct trace_export *export) |
---|
2532 | | -{ |
---|
2533 | | - int ret; |
---|
2534 | | - |
---|
2535 | | - ret = rm_trace_export(list, export); |
---|
2536 | | - if (*list == NULL) |
---|
2537 | | - ftrace_exports_disable(); |
---|
2538 | | - |
---|
2539 | | - return ret; |
---|
2540 | | -} |
---|
2541 | | - |
---|
2542 | | -int register_ftrace_export(struct trace_export *export) |
---|
2543 | | -{ |
---|
2544 | | - if (WARN_ON_ONCE(!export->write)) |
---|
2545 | | - return -1; |
---|
2546 | | - |
---|
2547 | | - mutex_lock(&ftrace_export_lock); |
---|
2548 | | - |
---|
2549 | | - add_ftrace_export(&ftrace_exports_list, export); |
---|
2550 | | - |
---|
2551 | | - mutex_unlock(&ftrace_export_lock); |
---|
2552 | | - |
---|
2553 | | - return 0; |
---|
2554 | | -} |
---|
2555 | | -EXPORT_SYMBOL_GPL(register_ftrace_export); |
---|
2556 | | - |
---|
2557 | | -int unregister_ftrace_export(struct trace_export *export) |
---|
2558 | | -{ |
---|
2559 | | - int ret; |
---|
2560 | | - |
---|
2561 | | - mutex_lock(&ftrace_export_lock); |
---|
2562 | | - |
---|
2563 | | - ret = rm_ftrace_export(&ftrace_exports_list, export); |
---|
2564 | | - |
---|
2565 | | - mutex_unlock(&ftrace_export_lock); |
---|
2566 | | - |
---|
2567 | | - return ret; |
---|
2568 | | -} |
---|
2569 | | -EXPORT_SYMBOL_GPL(unregister_ftrace_export); |
---|
2570 | 2933 | |
---|
2571 | 2934 | void |
---|
2572 | 2935 | trace_function(struct trace_array *tr, |
---|
.. | .. |
---|
2574 | 2937 | int pc) |
---|
2575 | 2938 | { |
---|
2576 | 2939 | struct trace_event_call *call = &event_function; |
---|
2577 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 2940 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
2578 | 2941 | struct ring_buffer_event *event; |
---|
2579 | 2942 | struct ftrace_entry *entry; |
---|
2580 | 2943 | |
---|
.. | .. |
---|
2587 | 2950 | entry->parent_ip = parent_ip; |
---|
2588 | 2951 | |
---|
2589 | 2952 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
---|
2590 | | - if (static_branch_unlikely(&ftrace_exports_enabled)) |
---|
2591 | | - ftrace_exports(event); |
---|
| 2953 | + if (static_branch_unlikely(&trace_function_exports_enabled)) |
---|
| 2954 | + ftrace_exports(event, TRACE_EXPORT_FUNCTION); |
---|
2592 | 2955 | __buffer_unlock_commit(buffer, event); |
---|
2593 | 2956 | } |
---|
2594 | 2957 | } |
---|
2595 | 2958 | |
---|
2596 | 2959 | #ifdef CONFIG_STACKTRACE |
---|
2597 | 2960 | |
---|
2598 | | -#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) |
---|
| 2961 | +/* Allow 4 levels of nesting: normal, softirq, irq, NMI */ |
---|
| 2962 | +#define FTRACE_KSTACK_NESTING 4 |
---|
| 2963 | + |
---|
| 2964 | +#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING) |
---|
| 2965 | + |
---|
2599 | 2966 | struct ftrace_stack { |
---|
2600 | | - unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; |
---|
| 2967 | + unsigned long calls[FTRACE_KSTACK_ENTRIES]; |
---|
2601 | 2968 | }; |
---|
2602 | 2969 | |
---|
2603 | | -static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); |
---|
| 2970 | + |
---|
| 2971 | +struct ftrace_stacks { |
---|
| 2972 | + struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; |
---|
| 2973 | +}; |
---|
| 2974 | + |
---|
| 2975 | +static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); |
---|
2604 | 2976 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); |
---|
2605 | 2977 | |
---|
2606 | | -static void __ftrace_trace_stack(struct ring_buffer *buffer, |
---|
| 2978 | +static void __ftrace_trace_stack(struct trace_buffer *buffer, |
---|
2607 | 2979 | unsigned long flags, |
---|
2608 | 2980 | int skip, int pc, struct pt_regs *regs) |
---|
2609 | 2981 | { |
---|
2610 | 2982 | struct trace_event_call *call = &event_kernel_stack; |
---|
2611 | 2983 | struct ring_buffer_event *event; |
---|
| 2984 | + unsigned int size, nr_entries; |
---|
| 2985 | + struct ftrace_stack *fstack; |
---|
2612 | 2986 | struct stack_entry *entry; |
---|
2613 | | - struct stack_trace trace; |
---|
2614 | | - int use_stack; |
---|
2615 | | - int size = FTRACE_STACK_ENTRIES; |
---|
2616 | | - |
---|
2617 | | - trace.nr_entries = 0; |
---|
2618 | | - trace.skip = skip; |
---|
| 2987 | + int stackidx; |
---|
2619 | 2988 | |
---|
2620 | 2989 | /* |
---|
2621 | 2990 | * Add one, for this function and the call to save_stack_trace() |
---|
.. | .. |
---|
2623 | 2992 | */ |
---|
2624 | 2993 | #ifndef CONFIG_UNWINDER_ORC |
---|
2625 | 2994 | if (!regs) |
---|
2626 | | - trace.skip++; |
---|
| 2995 | + skip++; |
---|
2627 | 2996 | #endif |
---|
2628 | 2997 | |
---|
2629 | | - /* |
---|
2630 | | - * Since events can happen in NMIs there's no safe way to |
---|
2631 | | - * use the per cpu ftrace_stacks. We reserve it and if an interrupt |
---|
2632 | | - * or NMI comes in, it will just have to use the default |
---|
2633 | | - * FTRACE_STACK_SIZE. |
---|
2634 | | - */ |
---|
2635 | 2998 | preempt_disable_notrace(); |
---|
2636 | 2999 | |
---|
2637 | | - use_stack = __this_cpu_inc_return(ftrace_stack_reserve); |
---|
| 3000 | + stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; |
---|
| 3001 | + |
---|
| 3002 | + /* This should never happen. If it does, yell once and skip */ |
---|
| 3003 | + if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) |
---|
| 3004 | + goto out; |
---|
| 3005 | + |
---|
2638 | 3006 | /* |
---|
2639 | | - * We don't need any atomic variables, just a barrier. |
---|
2640 | | - * If an interrupt comes in, we don't care, because it would |
---|
2641 | | - * have exited and put the counter back to what we want. |
---|
2642 | | - * We just need a barrier to keep gcc from moving things |
---|
2643 | | - * around. |
---|
| 3007 | + * The above __this_cpu_inc_return() is 'atomic' cpu local. An |
---|
| 3008 | + * interrupt will either see the value pre increment or post |
---|
| 3009 | + * increment. If the interrupt happens pre increment it will have |
---|
| 3010 | + * restored the counter when it returns. We just need a barrier to |
---|
| 3011 | + * keep gcc from moving things around. |
---|
2644 | 3012 | */ |
---|
2645 | 3013 | barrier(); |
---|
2646 | | - if (use_stack == 1) { |
---|
2647 | | - trace.entries = this_cpu_ptr(ftrace_stack.calls); |
---|
2648 | | - trace.max_entries = FTRACE_STACK_MAX_ENTRIES; |
---|
2649 | 3014 | |
---|
2650 | | - if (regs) |
---|
2651 | | - save_stack_trace_regs(regs, &trace); |
---|
2652 | | - else |
---|
2653 | | - save_stack_trace(&trace); |
---|
| 3015 | + fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; |
---|
| 3016 | + size = ARRAY_SIZE(fstack->calls); |
---|
2654 | 3017 | |
---|
2655 | | - if (trace.nr_entries > size) |
---|
2656 | | - size = trace.nr_entries; |
---|
2657 | | - } else |
---|
2658 | | - /* From now on, use_stack is a boolean */ |
---|
2659 | | - use_stack = 0; |
---|
| 3018 | + if (regs) { |
---|
| 3019 | + nr_entries = stack_trace_save_regs(regs, fstack->calls, |
---|
| 3020 | + size, skip); |
---|
| 3021 | + } else { |
---|
| 3022 | + nr_entries = stack_trace_save(fstack->calls, size, skip); |
---|
| 3023 | + } |
---|
2660 | 3024 | |
---|
2661 | | - size *= sizeof(unsigned long); |
---|
2662 | | - |
---|
| 3025 | + size = nr_entries * sizeof(unsigned long); |
---|
2663 | 3026 | event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, |
---|
2664 | 3027 | (sizeof(*entry) - sizeof(entry->caller)) + size, |
---|
2665 | 3028 | flags, pc); |
---|
.. | .. |
---|
2667 | 3030 | goto out; |
---|
2668 | 3031 | entry = ring_buffer_event_data(event); |
---|
2669 | 3032 | |
---|
2670 | | - memset(&entry->caller, 0, size); |
---|
2671 | | - |
---|
2672 | | - if (use_stack) |
---|
2673 | | - memcpy(&entry->caller, trace.entries, |
---|
2674 | | - trace.nr_entries * sizeof(unsigned long)); |
---|
2675 | | - else { |
---|
2676 | | - trace.max_entries = FTRACE_STACK_ENTRIES; |
---|
2677 | | - trace.entries = entry->caller; |
---|
2678 | | - if (regs) |
---|
2679 | | - save_stack_trace_regs(regs, &trace); |
---|
2680 | | - else |
---|
2681 | | - save_stack_trace(&trace); |
---|
2682 | | - } |
---|
2683 | | - |
---|
2684 | | - entry->size = trace.nr_entries; |
---|
| 3033 | + memcpy(&entry->caller, fstack->calls, size); |
---|
| 3034 | + entry->size = nr_entries; |
---|
2685 | 3035 | |
---|
2686 | 3036 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
2687 | 3037 | __buffer_unlock_commit(buffer, event); |
---|
.. | .. |
---|
2695 | 3045 | } |
---|
2696 | 3046 | |
---|
2697 | 3047 | static inline void ftrace_trace_stack(struct trace_array *tr, |
---|
2698 | | - struct ring_buffer *buffer, |
---|
| 3048 | + struct trace_buffer *buffer, |
---|
2699 | 3049 | unsigned long flags, |
---|
2700 | 3050 | int skip, int pc, struct pt_regs *regs) |
---|
2701 | 3051 | { |
---|
.. | .. |
---|
2708 | 3058 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
---|
2709 | 3059 | int pc) |
---|
2710 | 3060 | { |
---|
2711 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 3061 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
2712 | 3062 | |
---|
2713 | 3063 | if (rcu_is_watching()) { |
---|
2714 | 3064 | __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
---|
.. | .. |
---|
2746 | 3096 | /* Skip 1 to skip this function. */ |
---|
2747 | 3097 | skip++; |
---|
2748 | 3098 | #endif |
---|
2749 | | - __ftrace_trace_stack(global_trace.trace_buffer.buffer, |
---|
| 3099 | + __ftrace_trace_stack(global_trace.array_buffer.buffer, |
---|
2750 | 3100 | flags, skip, preempt_count(), NULL); |
---|
2751 | 3101 | } |
---|
| 3102 | +EXPORT_SYMBOL_GPL(trace_dump_stack); |
---|
2752 | 3103 | |
---|
| 3104 | +#ifdef CONFIG_USER_STACKTRACE_SUPPORT |
---|
2753 | 3105 | static DEFINE_PER_CPU(int, user_stack_count); |
---|
2754 | 3106 | |
---|
2755 | | -void |
---|
| 3107 | +static void |
---|
2756 | 3108 | ftrace_trace_userstack(struct trace_array *tr, |
---|
2757 | | - struct ring_buffer *buffer, unsigned long flags, int pc) |
---|
| 3109 | + struct trace_buffer *buffer, unsigned long flags, int pc) |
---|
2758 | 3110 | { |
---|
2759 | 3111 | struct trace_event_call *call = &event_user_stack; |
---|
2760 | 3112 | struct ring_buffer_event *event; |
---|
2761 | 3113 | struct userstack_entry *entry; |
---|
2762 | | - struct stack_trace trace; |
---|
2763 | 3114 | |
---|
2764 | 3115 | if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) |
---|
2765 | 3116 | return; |
---|
.. | .. |
---|
2790 | 3141 | entry->tgid = current->tgid; |
---|
2791 | 3142 | memset(&entry->caller, 0, sizeof(entry->caller)); |
---|
2792 | 3143 | |
---|
2793 | | - trace.nr_entries = 0; |
---|
2794 | | - trace.max_entries = FTRACE_STACK_ENTRIES; |
---|
2795 | | - trace.skip = 0; |
---|
2796 | | - trace.entries = entry->caller; |
---|
2797 | | - |
---|
2798 | | - save_stack_trace_user(&trace); |
---|
| 3144 | + stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); |
---|
2799 | 3145 | if (!call_filter_check_discard(call, entry, buffer, event)) |
---|
2800 | 3146 | __buffer_unlock_commit(buffer, event); |
---|
2801 | 3147 | |
---|
.. | .. |
---|
2804 | 3150 | out: |
---|
2805 | 3151 | preempt_enable(); |
---|
2806 | 3152 | } |
---|
2807 | | - |
---|
2808 | | -#ifdef UNUSED |
---|
2809 | | -static void __trace_userstack(struct trace_array *tr, unsigned long flags) |
---|
| 3153 | +#else /* CONFIG_USER_STACKTRACE_SUPPORT */ |
---|
| 3154 | +static void ftrace_trace_userstack(struct trace_array *tr, |
---|
| 3155 | + struct trace_buffer *buffer, |
---|
| 3156 | + unsigned long flags, int pc) |
---|
2810 | 3157 | { |
---|
2811 | | - ftrace_trace_userstack(tr, flags, preempt_count()); |
---|
2812 | 3158 | } |
---|
2813 | | -#endif /* UNUSED */ |
---|
| 3159 | +#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ |
---|
2814 | 3160 | |
---|
2815 | 3161 | #endif /* CONFIG_STACKTRACE */ |
---|
2816 | 3162 | |
---|
.. | .. |
---|
2851 | 3197 | { |
---|
2852 | 3198 | struct trace_buffer_struct __percpu *buffers; |
---|
2853 | 3199 | |
---|
| 3200 | + if (trace_percpu_buffer) |
---|
| 3201 | + return 0; |
---|
| 3202 | + |
---|
2854 | 3203 | buffers = alloc_percpu(struct trace_buffer_struct); |
---|
2855 | | - if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) |
---|
| 3204 | + if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) |
---|
2856 | 3205 | return -ENOMEM; |
---|
2857 | 3206 | |
---|
2858 | 3207 | trace_percpu_buffer = buffers; |
---|
.. | .. |
---|
2897 | 3246 | * directly here. If the global_trace.buffer is already |
---|
2898 | 3247 | * allocated here, then this was called by module code. |
---|
2899 | 3248 | */ |
---|
2900 | | - if (global_trace.trace_buffer.buffer) |
---|
| 3249 | + if (global_trace.array_buffer.buffer) |
---|
2901 | 3250 | tracing_start_cmdline_record(); |
---|
2902 | 3251 | } |
---|
| 3252 | +EXPORT_SYMBOL_GPL(trace_printk_init_buffers); |
---|
2903 | 3253 | |
---|
2904 | 3254 | void trace_printk_start_comm(void) |
---|
2905 | 3255 | { |
---|
.. | .. |
---|
2922 | 3272 | |
---|
2923 | 3273 | /** |
---|
2924 | 3274 | * trace_vbprintk - write binary msg to tracing buffer |
---|
2925 | | - * |
---|
| 3275 | + * @ip: The address of the caller |
---|
| 3276 | + * @fmt: The string format to write to the buffer |
---|
| 3277 | + * @args: Arguments for @fmt |
---|
2926 | 3278 | */ |
---|
2927 | 3279 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
---|
2928 | 3280 | { |
---|
2929 | 3281 | struct trace_event_call *call = &event_bprint; |
---|
2930 | 3282 | struct ring_buffer_event *event; |
---|
2931 | | - struct ring_buffer *buffer; |
---|
| 3283 | + struct trace_buffer *buffer; |
---|
2932 | 3284 | struct trace_array *tr = &global_trace; |
---|
2933 | 3285 | struct bprint_entry *entry; |
---|
2934 | 3286 | unsigned long flags; |
---|
.. | .. |
---|
2953 | 3305 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); |
---|
2954 | 3306 | |
---|
2955 | 3307 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
---|
2956 | | - goto out; |
---|
| 3308 | + goto out_put; |
---|
2957 | 3309 | |
---|
2958 | 3310 | local_save_flags(flags); |
---|
2959 | 3311 | size = sizeof(*entry) + sizeof(u32) * len; |
---|
2960 | | - buffer = tr->trace_buffer.buffer; |
---|
| 3312 | + buffer = tr->array_buffer.buffer; |
---|
| 3313 | + ring_buffer_nest_start(buffer); |
---|
2961 | 3314 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
---|
2962 | 3315 | flags, pc); |
---|
2963 | 3316 | if (!event) |
---|
.. | .. |
---|
2973 | 3326 | } |
---|
2974 | 3327 | |
---|
2975 | 3328 | out: |
---|
| 3329 | + ring_buffer_nest_end(buffer); |
---|
| 3330 | +out_put: |
---|
2976 | 3331 | put_trace_buf(); |
---|
2977 | 3332 | |
---|
2978 | 3333 | out_nobuffer: |
---|
.. | .. |
---|
2985 | 3340 | |
---|
2986 | 3341 | __printf(3, 0) |
---|
2987 | 3342 | static int |
---|
2988 | | -__trace_array_vprintk(struct ring_buffer *buffer, |
---|
| 3343 | +__trace_array_vprintk(struct trace_buffer *buffer, |
---|
2989 | 3344 | unsigned long ip, const char *fmt, va_list args) |
---|
2990 | 3345 | { |
---|
2991 | 3346 | struct trace_event_call *call = &event_print; |
---|
.. | .. |
---|
3015 | 3370 | |
---|
3016 | 3371 | local_save_flags(flags); |
---|
3017 | 3372 | size = sizeof(*entry) + len + 1; |
---|
| 3373 | + ring_buffer_nest_start(buffer); |
---|
3018 | 3374 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
---|
3019 | 3375 | flags, pc); |
---|
3020 | 3376 | if (!event) |
---|
.. | .. |
---|
3029 | 3385 | } |
---|
3030 | 3386 | |
---|
3031 | 3387 | out: |
---|
| 3388 | + ring_buffer_nest_end(buffer); |
---|
3032 | 3389 | put_trace_buf(); |
---|
3033 | 3390 | |
---|
3034 | 3391 | out_nobuffer: |
---|
.. | .. |
---|
3042 | 3399 | int trace_array_vprintk(struct trace_array *tr, |
---|
3043 | 3400 | unsigned long ip, const char *fmt, va_list args) |
---|
3044 | 3401 | { |
---|
3045 | | - return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); |
---|
| 3402 | + return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); |
---|
3046 | 3403 | } |
---|
3047 | 3404 | |
---|
| 3405 | +/** |
---|
| 3406 | + * trace_array_printk - Print a message to a specific instance |
---|
| 3407 | + * @tr: The instance trace_array descriptor |
---|
| 3408 | + * @ip: The instruction pointer that this is called from. |
---|
| 3409 | + * @fmt: The format to print (printf format) |
---|
| 3410 | + * |
---|
| 3411 | + * If a subsystem sets up its own instance, they have the right to |
---|
| 3412 | + * printk strings into their tracing instance buffer using this |
---|
| 3413 | + * function. Note, this function will not write into the top level |
---|
| 3414 | + * buffer (use trace_printk() for that), as writing into the top level |
---|
| 3415 | + * buffer should only have events that can be individually disabled. |
---|
| 3416 | + * trace_printk() is only used for debugging a kernel, and should not |
---|
| 3417 | + * be ever encorporated in normal use. |
---|
| 3418 | + * |
---|
| 3419 | + * trace_array_printk() can be used, as it will not add noise to the |
---|
| 3420 | + * top level tracing buffer. |
---|
| 3421 | + * |
---|
| 3422 | + * Note, trace_array_init_printk() must be called on @tr before this |
---|
| 3423 | + * can be used. |
---|
| 3424 | + */ |
---|
3048 | 3425 | __printf(3, 0) |
---|
3049 | 3426 | int trace_array_printk(struct trace_array *tr, |
---|
3050 | 3427 | unsigned long ip, const char *fmt, ...) |
---|
.. | .. |
---|
3052 | 3429 | int ret; |
---|
3053 | 3430 | va_list ap; |
---|
3054 | 3431 | |
---|
3055 | | - if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
---|
3056 | | - return 0; |
---|
3057 | | - |
---|
3058 | 3432 | if (!tr) |
---|
3059 | 3433 | return -ENOENT; |
---|
| 3434 | + |
---|
| 3435 | + /* This is only allowed for created instances */ |
---|
| 3436 | + if (tr == &global_trace) |
---|
| 3437 | + return 0; |
---|
| 3438 | + |
---|
| 3439 | + if (!(tr->trace_flags & TRACE_ITER_PRINTK)) |
---|
| 3440 | + return 0; |
---|
3060 | 3441 | |
---|
3061 | 3442 | va_start(ap, fmt); |
---|
3062 | 3443 | ret = trace_array_vprintk(tr, ip, fmt, ap); |
---|
3063 | 3444 | va_end(ap); |
---|
3064 | 3445 | return ret; |
---|
3065 | 3446 | } |
---|
| 3447 | +EXPORT_SYMBOL_GPL(trace_array_printk); |
---|
| 3448 | + |
---|
| 3449 | +/** |
---|
| 3450 | + * trace_array_init_printk - Initialize buffers for trace_array_printk() |
---|
| 3451 | + * @tr: The trace array to initialize the buffers for |
---|
| 3452 | + * |
---|
| 3453 | + * As trace_array_printk() only writes into instances, they are OK to |
---|
| 3454 | + * have in the kernel (unlike trace_printk()). This needs to be called |
---|
| 3455 | + * before trace_array_printk() can be used on a trace_array. |
---|
| 3456 | + */ |
---|
| 3457 | +int trace_array_init_printk(struct trace_array *tr) |
---|
| 3458 | +{ |
---|
| 3459 | + if (!tr) |
---|
| 3460 | + return -ENOENT; |
---|
| 3461 | + |
---|
| 3462 | + /* This is only allowed for created instances */ |
---|
| 3463 | + if (tr == &global_trace) |
---|
| 3464 | + return -EINVAL; |
---|
| 3465 | + |
---|
| 3466 | + return alloc_percpu_trace_buffer(); |
---|
| 3467 | +} |
---|
| 3468 | +EXPORT_SYMBOL_GPL(trace_array_init_printk); |
---|
3066 | 3469 | |
---|
3067 | 3470 | __printf(3, 4) |
---|
3068 | | -int trace_array_printk_buf(struct ring_buffer *buffer, |
---|
| 3471 | +int trace_array_printk_buf(struct trace_buffer *buffer, |
---|
3069 | 3472 | unsigned long ip, const char *fmt, ...) |
---|
3070 | 3473 | { |
---|
3071 | 3474 | int ret; |
---|
.. | .. |
---|
3093 | 3496 | |
---|
3094 | 3497 | iter->idx++; |
---|
3095 | 3498 | if (buf_iter) |
---|
3096 | | - ring_buffer_read(buf_iter, NULL); |
---|
| 3499 | + ring_buffer_iter_advance(buf_iter); |
---|
3097 | 3500 | } |
---|
3098 | 3501 | |
---|
3099 | 3502 | static struct trace_entry * |
---|
.. | .. |
---|
3103 | 3506 | struct ring_buffer_event *event; |
---|
3104 | 3507 | struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); |
---|
3105 | 3508 | |
---|
3106 | | - if (buf_iter) |
---|
| 3509 | + if (buf_iter) { |
---|
3107 | 3510 | event = ring_buffer_iter_peek(buf_iter, ts); |
---|
3108 | | - else |
---|
3109 | | - event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, |
---|
| 3511 | + if (lost_events) |
---|
| 3512 | + *lost_events = ring_buffer_iter_dropped(buf_iter) ? |
---|
| 3513 | + (unsigned long)-1 : 0; |
---|
| 3514 | + } else { |
---|
| 3515 | + event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, |
---|
3110 | 3516 | lost_events); |
---|
| 3517 | + } |
---|
3111 | 3518 | |
---|
3112 | 3519 | if (event) { |
---|
3113 | 3520 | iter->ent_size = ring_buffer_event_length(event); |
---|
.. | .. |
---|
3121 | 3528 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
---|
3122 | 3529 | unsigned long *missing_events, u64 *ent_ts) |
---|
3123 | 3530 | { |
---|
3124 | | - struct ring_buffer *buffer = iter->trace_buffer->buffer; |
---|
| 3531 | + struct trace_buffer *buffer = iter->array_buffer->buffer; |
---|
3125 | 3532 | struct trace_entry *ent, *next = NULL; |
---|
3126 | 3533 | unsigned long lost_events = 0, next_lost = 0; |
---|
3127 | 3534 | int cpu_file = iter->cpu_file; |
---|
.. | .. |
---|
3177 | 3584 | return next; |
---|
3178 | 3585 | } |
---|
3179 | 3586 | |
---|
| 3587 | +#define STATIC_TEMP_BUF_SIZE 128 |
---|
| 3588 | +static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); |
---|
| 3589 | + |
---|
3180 | 3590 | /* Find the next real entry, without updating the iterator itself */ |
---|
3181 | 3591 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
---|
3182 | 3592 | int *ent_cpu, u64 *ent_ts) |
---|
3183 | 3593 | { |
---|
3184 | | - return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
---|
| 3594 | + /* __find_next_entry will reset ent_size */ |
---|
| 3595 | + int ent_size = iter->ent_size; |
---|
| 3596 | + struct trace_entry *entry; |
---|
| 3597 | + |
---|
| 3598 | + /* |
---|
| 3599 | + * If called from ftrace_dump(), then the iter->temp buffer |
---|
| 3600 | + * will be the static_temp_buf and not created from kmalloc. |
---|
| 3601 | + * If the entry size is greater than the buffer, we can |
---|
| 3602 | + * not save it. Just return NULL in that case. This is only |
---|
| 3603 | + * used to add markers when two consecutive events' time |
---|
| 3604 | + * stamps have a large delta. See trace_print_lat_context() |
---|
| 3605 | + */ |
---|
| 3606 | + if (iter->temp == static_temp_buf && |
---|
| 3607 | + STATIC_TEMP_BUF_SIZE < ent_size) |
---|
| 3608 | + return NULL; |
---|
| 3609 | + |
---|
| 3610 | + /* |
---|
| 3611 | + * The __find_next_entry() may call peek_next_entry(), which may |
---|
| 3612 | + * call ring_buffer_peek() that may make the contents of iter->ent |
---|
| 3613 | + * undefined. Need to copy iter->ent now. |
---|
| 3614 | + */ |
---|
| 3615 | + if (iter->ent && iter->ent != iter->temp) { |
---|
| 3616 | + if ((!iter->temp || iter->temp_size < iter->ent_size) && |
---|
| 3617 | + !WARN_ON_ONCE(iter->temp == static_temp_buf)) { |
---|
| 3618 | + void *temp; |
---|
| 3619 | + temp = kmalloc(iter->ent_size, GFP_KERNEL); |
---|
| 3620 | + if (!temp) |
---|
| 3621 | + return NULL; |
---|
| 3622 | + kfree(iter->temp); |
---|
| 3623 | + iter->temp = temp; |
---|
| 3624 | + iter->temp_size = iter->ent_size; |
---|
| 3625 | + } |
---|
| 3626 | + memcpy(iter->temp, iter->ent, iter->ent_size); |
---|
| 3627 | + iter->ent = iter->temp; |
---|
| 3628 | + } |
---|
| 3629 | + entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
---|
| 3630 | + /* Put back the original ent_size */ |
---|
| 3631 | + iter->ent_size = ent_size; |
---|
| 3632 | + |
---|
| 3633 | + return entry; |
---|
3185 | 3634 | } |
---|
3186 | 3635 | |
---|
3187 | 3636 | /* Find the next real entry, and increment the iterator to the next entry */ |
---|
.. | .. |
---|
3198 | 3647 | |
---|
3199 | 3648 | static void trace_consume(struct trace_iterator *iter) |
---|
3200 | 3649 | { |
---|
3201 | | - ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, |
---|
| 3650 | + ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, |
---|
3202 | 3651 | &iter->lost_events); |
---|
3203 | 3652 | } |
---|
3204 | 3653 | |
---|
.. | .. |
---|
3231 | 3680 | |
---|
3232 | 3681 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
---|
3233 | 3682 | { |
---|
3234 | | - struct ring_buffer_event *event; |
---|
3235 | 3683 | struct ring_buffer_iter *buf_iter; |
---|
3236 | 3684 | unsigned long entries = 0; |
---|
3237 | 3685 | u64 ts; |
---|
3238 | 3686 | |
---|
3239 | | - per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; |
---|
| 3687 | + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; |
---|
3240 | 3688 | |
---|
3241 | 3689 | buf_iter = trace_buffer_iter(iter, cpu); |
---|
3242 | 3690 | if (!buf_iter) |
---|
.. | .. |
---|
3249 | 3697 | * that a reset never took place on a cpu. This is evident |
---|
3250 | 3698 | * by the timestamp being before the start of the buffer. |
---|
3251 | 3699 | */ |
---|
3252 | | - while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { |
---|
3253 | | - if (ts >= iter->trace_buffer->time_start) |
---|
| 3700 | + while (ring_buffer_iter_peek(buf_iter, &ts)) { |
---|
| 3701 | + if (ts >= iter->array_buffer->time_start) |
---|
3254 | 3702 | break; |
---|
3255 | 3703 | entries++; |
---|
3256 | | - ring_buffer_read(buf_iter, NULL); |
---|
| 3704 | + ring_buffer_iter_advance(buf_iter); |
---|
3257 | 3705 | } |
---|
3258 | 3706 | |
---|
3259 | | - per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; |
---|
| 3707 | + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; |
---|
3260 | 3708 | } |
---|
3261 | 3709 | |
---|
3262 | 3710 | /* |
---|
.. | .. |
---|
3279 | 3727 | * will point to the same string as current_trace->name. |
---|
3280 | 3728 | */ |
---|
3281 | 3729 | mutex_lock(&trace_types_lock); |
---|
3282 | | - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) |
---|
| 3730 | + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { |
---|
| 3731 | + /* Close iter->trace before switching to the new current tracer */ |
---|
| 3732 | + if (iter->trace->close) |
---|
| 3733 | + iter->trace->close(iter); |
---|
3283 | 3734 | *iter->trace = *tr->current_trace; |
---|
| 3735 | + /* Reopen the new current tracer */ |
---|
| 3736 | + if (iter->trace->open) |
---|
| 3737 | + iter->trace->open(iter); |
---|
| 3738 | + } |
---|
3284 | 3739 | mutex_unlock(&trace_types_lock); |
---|
3285 | 3740 | |
---|
3286 | 3741 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
.. | .. |
---|
3335 | 3790 | } |
---|
3336 | 3791 | |
---|
3337 | 3792 | static void |
---|
3338 | | -get_total_entries(struct trace_buffer *buf, |
---|
3339 | | - unsigned long *total, unsigned long *entries) |
---|
| 3793 | +get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, |
---|
| 3794 | + unsigned long *entries, int cpu) |
---|
3340 | 3795 | { |
---|
3341 | 3796 | unsigned long count; |
---|
| 3797 | + |
---|
| 3798 | + count = ring_buffer_entries_cpu(buf->buffer, cpu); |
---|
| 3799 | + /* |
---|
| 3800 | + * If this buffer has skipped entries, then we hold all |
---|
| 3801 | + * entries for the trace and we need to ignore the |
---|
| 3802 | + * ones before the time stamp. |
---|
| 3803 | + */ |
---|
| 3804 | + if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { |
---|
| 3805 | + count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; |
---|
| 3806 | + /* total is the same as the entries */ |
---|
| 3807 | + *total = count; |
---|
| 3808 | + } else |
---|
| 3809 | + *total = count + |
---|
| 3810 | + ring_buffer_overrun_cpu(buf->buffer, cpu); |
---|
| 3811 | + *entries = count; |
---|
| 3812 | +} |
---|
| 3813 | + |
---|
| 3814 | +static void |
---|
| 3815 | +get_total_entries(struct array_buffer *buf, |
---|
| 3816 | + unsigned long *total, unsigned long *entries) |
---|
| 3817 | +{ |
---|
| 3818 | + unsigned long t, e; |
---|
3342 | 3819 | int cpu; |
---|
3343 | 3820 | |
---|
3344 | 3821 | *total = 0; |
---|
3345 | 3822 | *entries = 0; |
---|
3346 | 3823 | |
---|
3347 | 3824 | for_each_tracing_cpu(cpu) { |
---|
3348 | | - count = ring_buffer_entries_cpu(buf->buffer, cpu); |
---|
3349 | | - /* |
---|
3350 | | - * If this buffer has skipped entries, then we hold all |
---|
3351 | | - * entries for the trace and we need to ignore the |
---|
3352 | | - * ones before the time stamp. |
---|
3353 | | - */ |
---|
3354 | | - if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { |
---|
3355 | | - count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; |
---|
3356 | | - /* total is the same as the entries */ |
---|
3357 | | - *total += count; |
---|
3358 | | - } else |
---|
3359 | | - *total += count + |
---|
3360 | | - ring_buffer_overrun_cpu(buf->buffer, cpu); |
---|
3361 | | - *entries += count; |
---|
| 3825 | + get_total_entries_cpu(buf, &t, &e, cpu); |
---|
| 3826 | + *total += t; |
---|
| 3827 | + *entries += e; |
---|
3362 | 3828 | } |
---|
| 3829 | +} |
---|
| 3830 | + |
---|
| 3831 | +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) |
---|
| 3832 | +{ |
---|
| 3833 | + unsigned long total, entries; |
---|
| 3834 | + |
---|
| 3835 | + if (!tr) |
---|
| 3836 | + tr = &global_trace; |
---|
| 3837 | + |
---|
| 3838 | + get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); |
---|
| 3839 | + |
---|
| 3840 | + return entries; |
---|
| 3841 | +} |
---|
| 3842 | + |
---|
| 3843 | +unsigned long trace_total_entries(struct trace_array *tr) |
---|
| 3844 | +{ |
---|
| 3845 | + unsigned long total, entries; |
---|
| 3846 | + |
---|
| 3847 | + if (!tr) |
---|
| 3848 | + tr = &global_trace; |
---|
| 3849 | + |
---|
| 3850 | + get_total_entries(&tr->array_buffer, &total, &entries); |
---|
| 3851 | + |
---|
| 3852 | + return entries; |
---|
3363 | 3853 | } |
---|
3364 | 3854 | |
---|
3365 | 3855 | static void print_lat_help_header(struct seq_file *m) |
---|
3366 | 3856 | { |
---|
3367 | | - seq_puts(m, "# _--------=> CPU# \n" |
---|
3368 | | - "# / _-------=> irqs-off \n" |
---|
3369 | | - "# | / _------=> need-resched \n" |
---|
3370 | | - "# || / _-----=> need-resched_lazy \n" |
---|
3371 | | - "# ||| / _----=> hardirq/softirq \n" |
---|
3372 | | - "# |||| / _---=> preempt-depth \n" |
---|
3373 | | - "# ||||| / _--=> preempt-lazy-depth\n" |
---|
3374 | | - "# |||||| / _-=> migrate-disable \n" |
---|
3375 | | - "# ||||||| / delay \n" |
---|
3376 | | - "# cmd pid |||||||| time | caller \n" |
---|
3377 | | - "# \\ / |||||||| \\ | / \n"); |
---|
| 3857 | + seq_puts(m, "# _------=> CPU# \n" |
---|
| 3858 | + "# / _-----=> irqs-off \n" |
---|
| 3859 | + "# | / _----=> need-resched \n" |
---|
| 3860 | + "# || / _---=> hardirq/softirq \n" |
---|
| 3861 | + "# ||| / _--=> preempt-depth \n" |
---|
| 3862 | + "# |||| / delay \n" |
---|
| 3863 | + "# cmd pid ||||| time | caller \n" |
---|
| 3864 | + "# \\ / ||||| \\ | / \n"); |
---|
3378 | 3865 | } |
---|
3379 | 3866 | |
---|
3380 | | -static void print_event_info(struct trace_buffer *buf, struct seq_file *m) |
---|
| 3867 | +static void print_event_info(struct array_buffer *buf, struct seq_file *m) |
---|
3381 | 3868 | { |
---|
3382 | 3869 | unsigned long total; |
---|
3383 | 3870 | unsigned long entries; |
---|
.. | .. |
---|
3388 | 3875 | seq_puts(m, "#\n"); |
---|
3389 | 3876 | } |
---|
3390 | 3877 | |
---|
3391 | | -static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, |
---|
| 3878 | +static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, |
---|
3392 | 3879 | unsigned int flags) |
---|
3393 | 3880 | { |
---|
3394 | 3881 | bool tgid = flags & TRACE_ITER_RECORD_TGID; |
---|
3395 | 3882 | |
---|
3396 | 3883 | print_event_info(buf, m); |
---|
3397 | 3884 | |
---|
3398 | | - seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); |
---|
3399 | | - seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); |
---|
| 3885 | + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); |
---|
| 3886 | + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); |
---|
3400 | 3887 | } |
---|
3401 | 3888 | |
---|
3402 | | -static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, |
---|
| 3889 | +static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, |
---|
3403 | 3890 | unsigned int flags) |
---|
3404 | 3891 | { |
---|
3405 | 3892 | bool tgid = flags & TRACE_ITER_RECORD_TGID; |
---|
3406 | | - const char tgid_space[] = " "; |
---|
3407 | | - const char space[] = " "; |
---|
| 3893 | + const char *space = " "; |
---|
| 3894 | + int prec = tgid ? 12 : 2; |
---|
3408 | 3895 | |
---|
3409 | 3896 | print_event_info(buf, m); |
---|
3410 | 3897 | |
---|
3411 | | - seq_printf(m, "# %s _-----=> irqs-off\n", |
---|
3412 | | - tgid ? tgid_space : space); |
---|
3413 | | - seq_printf(m, "# %s / _----=> need-resched\n", |
---|
3414 | | - tgid ? tgid_space : space); |
---|
3415 | | - seq_printf(m, "# %s| / _---=> need-resched_lazy\n", |
---|
3416 | | - tgid ? tgid_space : space); |
---|
3417 | | - seq_printf(m, "# %s|| / _--=> hardirq/softirq\n", |
---|
3418 | | - tgid ? tgid_space : space); |
---|
3419 | | - seq_printf(m, "# %s||| / preempt-depth\n", |
---|
3420 | | - tgid ? tgid_space : space); |
---|
3421 | | - seq_printf(m, "# %s|||| / delay\n", |
---|
3422 | | - tgid ? tgid_space : space); |
---|
3423 | | - seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n", |
---|
3424 | | - tgid ? " TGID " : space); |
---|
3425 | | - seq_printf(m, "# | | %s | ||||| | |\n", |
---|
3426 | | - tgid ? " | " : space); |
---|
| 3898 | + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); |
---|
| 3899 | + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); |
---|
| 3900 | + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); |
---|
| 3901 | + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); |
---|
| 3902 | + seq_printf(m, "# %.*s||| / delay\n", prec, space); |
---|
| 3903 | + seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); |
---|
| 3904 | + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); |
---|
3427 | 3905 | } |
---|
3428 | 3906 | |
---|
3429 | 3907 | void |
---|
3430 | 3908 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
---|
3431 | 3909 | { |
---|
3432 | 3910 | unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); |
---|
3433 | | - struct trace_buffer *buf = iter->trace_buffer; |
---|
| 3911 | + struct array_buffer *buf = iter->array_buffer; |
---|
3434 | 3912 | struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); |
---|
3435 | 3913 | struct tracer *type = iter->trace; |
---|
3436 | 3914 | unsigned long entries; |
---|
.. | .. |
---|
3457 | 3935 | "desktop", |
---|
3458 | 3936 | #elif defined(CONFIG_PREEMPT) |
---|
3459 | 3937 | "preempt", |
---|
| 3938 | +#elif defined(CONFIG_PREEMPT_RT) |
---|
| 3939 | + "preempt_rt", |
---|
3460 | 3940 | #else |
---|
3461 | 3941 | "unknown", |
---|
3462 | 3942 | #endif |
---|
.. | .. |
---|
3503 | 3983 | cpumask_test_cpu(iter->cpu, iter->started)) |
---|
3504 | 3984 | return; |
---|
3505 | 3985 | |
---|
3506 | | - if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) |
---|
| 3986 | + if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) |
---|
3507 | 3987 | return; |
---|
3508 | 3988 | |
---|
3509 | 3989 | if (cpumask_available(iter->started)) |
---|
.. | .. |
---|
3637 | 4117 | if (!ring_buffer_iter_empty(buf_iter)) |
---|
3638 | 4118 | return 0; |
---|
3639 | 4119 | } else { |
---|
3640 | | - if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) |
---|
| 4120 | + if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) |
---|
3641 | 4121 | return 0; |
---|
3642 | 4122 | } |
---|
3643 | 4123 | return 1; |
---|
.. | .. |
---|
3649 | 4129 | if (!ring_buffer_iter_empty(buf_iter)) |
---|
3650 | 4130 | return 0; |
---|
3651 | 4131 | } else { |
---|
3652 | | - if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) |
---|
| 4132 | + if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) |
---|
3653 | 4133 | return 0; |
---|
3654 | 4134 | } |
---|
3655 | 4135 | } |
---|
.. | .. |
---|
3665 | 4145 | enum print_line_t ret; |
---|
3666 | 4146 | |
---|
3667 | 4147 | if (iter->lost_events) { |
---|
3668 | | - trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
---|
3669 | | - iter->cpu, iter->lost_events); |
---|
| 4148 | + if (iter->lost_events == (unsigned long)-1) |
---|
| 4149 | + trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", |
---|
| 4150 | + iter->cpu); |
---|
| 4151 | + else |
---|
| 4152 | + trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
---|
| 4153 | + iter->cpu, iter->lost_events); |
---|
3670 | 4154 | if (trace_seq_has_overflowed(&iter->seq)) |
---|
3671 | 4155 | return TRACE_TYPE_PARTIAL_LINE; |
---|
3672 | 4156 | } |
---|
.. | .. |
---|
3739 | 4223 | } else { |
---|
3740 | 4224 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { |
---|
3741 | 4225 | if (trace_flags & TRACE_ITER_IRQ_INFO) |
---|
3742 | | - print_func_help_header_irq(iter->trace_buffer, |
---|
| 4226 | + print_func_help_header_irq(iter->array_buffer, |
---|
3743 | 4227 | m, trace_flags); |
---|
3744 | 4228 | else |
---|
3745 | | - print_func_help_header(iter->trace_buffer, m, |
---|
| 4229 | + print_func_help_header(iter->array_buffer, m, |
---|
3746 | 4230 | trace_flags); |
---|
3747 | 4231 | } |
---|
3748 | 4232 | } |
---|
.. | .. |
---|
3882 | 4366 | goto release; |
---|
3883 | 4367 | |
---|
3884 | 4368 | /* |
---|
| 4369 | + * trace_find_next_entry() may need to save off iter->ent. |
---|
| 4370 | + * It will place it into the iter->temp buffer. As most |
---|
| 4371 | + * events are less than 128, allocate a buffer of that size. |
---|
| 4372 | + * If one is greater, then trace_find_next_entry() will |
---|
| 4373 | + * allocate a new buffer to adjust for the bigger iter->ent. |
---|
| 4374 | + * It's not critical if it fails to get allocated here. |
---|
| 4375 | + */ |
---|
| 4376 | + iter->temp = kmalloc(128, GFP_KERNEL); |
---|
| 4377 | + if (iter->temp) |
---|
| 4378 | + iter->temp_size = 128; |
---|
| 4379 | + |
---|
| 4380 | + /* |
---|
3885 | 4381 | * We make a copy of the current tracer to avoid concurrent |
---|
3886 | 4382 | * changes on it while we are reading. |
---|
3887 | 4383 | */ |
---|
.. | .. |
---|
3900 | 4396 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
3901 | 4397 | /* Currently only the top directory has a snapshot */ |
---|
3902 | 4398 | if (tr->current_trace->print_max || snapshot) |
---|
3903 | | - iter->trace_buffer = &tr->max_buffer; |
---|
| 4399 | + iter->array_buffer = &tr->max_buffer; |
---|
3904 | 4400 | else |
---|
3905 | 4401 | #endif |
---|
3906 | | - iter->trace_buffer = &tr->trace_buffer; |
---|
| 4402 | + iter->array_buffer = &tr->array_buffer; |
---|
3907 | 4403 | iter->snapshot = snapshot; |
---|
3908 | 4404 | iter->pos = -1; |
---|
3909 | 4405 | iter->cpu_file = tracing_get_cpu(inode); |
---|
3910 | 4406 | mutex_init(&iter->mutex); |
---|
3911 | 4407 | |
---|
3912 | 4408 | /* Notify the tracer early; before we stop tracing. */ |
---|
3913 | | - if (iter->trace && iter->trace->open) |
---|
| 4409 | + if (iter->trace->open) |
---|
3914 | 4410 | iter->trace->open(iter); |
---|
3915 | 4411 | |
---|
3916 | 4412 | /* Annotate start of buffers if we had overruns */ |
---|
3917 | | - if (ring_buffer_overruns(iter->trace_buffer->buffer)) |
---|
| 4413 | + if (ring_buffer_overruns(iter->array_buffer->buffer)) |
---|
3918 | 4414 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
---|
3919 | 4415 | |
---|
3920 | 4416 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
---|
3921 | 4417 | if (trace_clocks[tr->clock_id].in_ns) |
---|
3922 | 4418 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
---|
3923 | 4419 | |
---|
3924 | | - /* stop the trace while dumping if we are not opening "snapshot" */ |
---|
3925 | | - if (!iter->snapshot) |
---|
| 4420 | + /* |
---|
| 4421 | + * If pause-on-trace is enabled, then stop the trace while |
---|
| 4422 | + * dumping, unless this is the "snapshot" file |
---|
| 4423 | + */ |
---|
| 4424 | + if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) |
---|
3926 | 4425 | tracing_stop_tr(tr); |
---|
3927 | 4426 | |
---|
3928 | 4427 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
---|
3929 | 4428 | for_each_tracing_cpu(cpu) { |
---|
3930 | 4429 | iter->buffer_iter[cpu] = |
---|
3931 | | - ring_buffer_read_prepare(iter->trace_buffer->buffer, |
---|
| 4430 | + ring_buffer_read_prepare(iter->array_buffer->buffer, |
---|
3932 | 4431 | cpu, GFP_KERNEL); |
---|
3933 | 4432 | } |
---|
3934 | 4433 | ring_buffer_read_prepare_sync(); |
---|
.. | .. |
---|
3939 | 4438 | } else { |
---|
3940 | 4439 | cpu = iter->cpu_file; |
---|
3941 | 4440 | iter->buffer_iter[cpu] = |
---|
3942 | | - ring_buffer_read_prepare(iter->trace_buffer->buffer, |
---|
| 4441 | + ring_buffer_read_prepare(iter->array_buffer->buffer, |
---|
3943 | 4442 | cpu, GFP_KERNEL); |
---|
3944 | 4443 | ring_buffer_read_prepare_sync(); |
---|
3945 | 4444 | ring_buffer_read_start(iter->buffer_iter[cpu]); |
---|
.. | .. |
---|
3953 | 4452 | fail: |
---|
3954 | 4453 | mutex_unlock(&trace_types_lock); |
---|
3955 | 4454 | kfree(iter->trace); |
---|
| 4455 | + kfree(iter->temp); |
---|
3956 | 4456 | kfree(iter->buffer_iter); |
---|
3957 | 4457 | release: |
---|
3958 | 4458 | seq_release_private(inode, file); |
---|
.. | .. |
---|
3961 | 4461 | |
---|
3962 | 4462 | int tracing_open_generic(struct inode *inode, struct file *filp) |
---|
3963 | 4463 | { |
---|
3964 | | - if (tracing_disabled) |
---|
3965 | | - return -ENODEV; |
---|
| 4464 | + int ret; |
---|
| 4465 | + |
---|
| 4466 | + ret = tracing_check_open_get_tr(NULL); |
---|
| 4467 | + if (ret) |
---|
| 4468 | + return ret; |
---|
3966 | 4469 | |
---|
3967 | 4470 | filp->private_data = inode->i_private; |
---|
3968 | 4471 | return 0; |
---|
.. | .. |
---|
3977 | 4480 | * Open and update trace_array ref count. |
---|
3978 | 4481 | * Must have the current trace_array passed to it. |
---|
3979 | 4482 | */ |
---|
3980 | | -static int tracing_open_generic_tr(struct inode *inode, struct file *filp) |
---|
| 4483 | +int tracing_open_generic_tr(struct inode *inode, struct file *filp) |
---|
3981 | 4484 | { |
---|
3982 | 4485 | struct trace_array *tr = inode->i_private; |
---|
| 4486 | + int ret; |
---|
3983 | 4487 | |
---|
3984 | | - if (tracing_disabled) |
---|
3985 | | - return -ENODEV; |
---|
3986 | | - |
---|
3987 | | - if (trace_array_get(tr) < 0) |
---|
3988 | | - return -ENODEV; |
---|
| 4488 | + ret = tracing_check_open_get_tr(tr); |
---|
| 4489 | + if (ret) |
---|
| 4490 | + return ret; |
---|
3989 | 4491 | |
---|
3990 | 4492 | filp->private_data = inode->i_private; |
---|
| 4493 | + |
---|
| 4494 | + return 0; |
---|
| 4495 | +} |
---|
| 4496 | + |
---|
| 4497 | +/* |
---|
| 4498 | + * The private pointer of the inode is the trace_event_file. |
---|
| 4499 | + * Update the tr ref count associated to it. |
---|
| 4500 | + */ |
---|
| 4501 | +int tracing_open_file_tr(struct inode *inode, struct file *filp) |
---|
| 4502 | +{ |
---|
| 4503 | + struct trace_event_file *file = inode->i_private; |
---|
| 4504 | + int ret; |
---|
| 4505 | + |
---|
| 4506 | + ret = tracing_check_open_get_tr(file->tr); |
---|
| 4507 | + if (ret) |
---|
| 4508 | + return ret; |
---|
| 4509 | + |
---|
| 4510 | + filp->private_data = inode->i_private; |
---|
| 4511 | + |
---|
| 4512 | + return 0; |
---|
| 4513 | +} |
---|
| 4514 | + |
---|
| 4515 | +int tracing_release_file_tr(struct inode *inode, struct file *filp) |
---|
| 4516 | +{ |
---|
| 4517 | + struct trace_event_file *file = inode->i_private; |
---|
| 4518 | + |
---|
| 4519 | + trace_array_put(file->tr); |
---|
3991 | 4520 | |
---|
3992 | 4521 | return 0; |
---|
3993 | 4522 | } |
---|
.. | .. |
---|
4016 | 4545 | if (iter->trace && iter->trace->close) |
---|
4017 | 4546 | iter->trace->close(iter); |
---|
4018 | 4547 | |
---|
4019 | | - if (!iter->snapshot) |
---|
| 4548 | + if (!iter->snapshot && tr->stop_count) |
---|
4020 | 4549 | /* reenable tracing if it was previously enabled */ |
---|
4021 | 4550 | tracing_start_tr(tr); |
---|
4022 | 4551 | |
---|
.. | .. |
---|
4026 | 4555 | |
---|
4027 | 4556 | mutex_destroy(&iter->mutex); |
---|
4028 | 4557 | free_cpumask_var(iter->started); |
---|
| 4558 | + kfree(iter->temp); |
---|
4029 | 4559 | kfree(iter->trace); |
---|
4030 | 4560 | kfree(iter->buffer_iter); |
---|
4031 | 4561 | seq_release_private(inode, file); |
---|
.. | .. |
---|
4054 | 4584 | { |
---|
4055 | 4585 | struct trace_array *tr = inode->i_private; |
---|
4056 | 4586 | struct trace_iterator *iter; |
---|
4057 | | - int ret = 0; |
---|
| 4587 | + int ret; |
---|
4058 | 4588 | |
---|
4059 | | - if (trace_array_get(tr) < 0) |
---|
4060 | | - return -ENODEV; |
---|
| 4589 | + ret = tracing_check_open_get_tr(tr); |
---|
| 4590 | + if (ret) |
---|
| 4591 | + return ret; |
---|
4061 | 4592 | |
---|
4062 | 4593 | /* If this file was open for write, then erase contents */ |
---|
4063 | 4594 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
---|
4064 | 4595 | int cpu = tracing_get_cpu(inode); |
---|
4065 | | - struct trace_buffer *trace_buf = &tr->trace_buffer; |
---|
| 4596 | + struct array_buffer *trace_buf = &tr->array_buffer; |
---|
4066 | 4597 | |
---|
4067 | 4598 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
4068 | 4599 | if (tr->current_trace->print_max) |
---|
.. | .. |
---|
4072 | 4603 | if (cpu == RING_BUFFER_ALL_CPUS) |
---|
4073 | 4604 | tracing_reset_online_cpus(trace_buf); |
---|
4074 | 4605 | else |
---|
4075 | | - tracing_reset(trace_buf, cpu); |
---|
| 4606 | + tracing_reset_cpu(trace_buf, cpu); |
---|
4076 | 4607 | } |
---|
4077 | 4608 | |
---|
4078 | 4609 | if (file->f_mode & FMODE_READ) { |
---|
.. | .. |
---|
4173 | 4704 | struct seq_file *m; |
---|
4174 | 4705 | int ret; |
---|
4175 | 4706 | |
---|
4176 | | - if (tracing_disabled) |
---|
4177 | | - return -ENODEV; |
---|
4178 | | - |
---|
4179 | | - if (trace_array_get(tr) < 0) |
---|
4180 | | - return -ENODEV; |
---|
| 4707 | + ret = tracing_check_open_get_tr(tr); |
---|
| 4708 | + if (ret) |
---|
| 4709 | + return ret; |
---|
4181 | 4710 | |
---|
4182 | 4711 | ret = seq_open(file, &show_traces_seq_ops); |
---|
4183 | 4712 | if (ret) { |
---|
.. | .. |
---|
4221 | 4750 | static const struct file_operations tracing_fops = { |
---|
4222 | 4751 | .open = tracing_open, |
---|
4223 | 4752 | .read = seq_read, |
---|
| 4753 | + .read_iter = seq_read_iter, |
---|
| 4754 | + .splice_read = generic_file_splice_read, |
---|
4224 | 4755 | .write = tracing_write_stub, |
---|
4225 | 4756 | .llseek = tracing_lseek, |
---|
4226 | 4757 | .release = tracing_release, |
---|
.. | .. |
---|
4261 | 4792 | return count; |
---|
4262 | 4793 | } |
---|
4263 | 4794 | |
---|
4264 | | -static ssize_t |
---|
4265 | | -tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
---|
4266 | | - size_t count, loff_t *ppos) |
---|
| 4795 | +int tracing_set_cpumask(struct trace_array *tr, |
---|
| 4796 | + cpumask_var_t tracing_cpumask_new) |
---|
4267 | 4797 | { |
---|
4268 | | - struct trace_array *tr = file_inode(filp)->i_private; |
---|
4269 | | - cpumask_var_t tracing_cpumask_new; |
---|
4270 | | - int err, cpu; |
---|
| 4798 | + int cpu; |
---|
4271 | 4799 | |
---|
4272 | | - if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
---|
4273 | | - return -ENOMEM; |
---|
4274 | | - |
---|
4275 | | - err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
---|
4276 | | - if (err) |
---|
4277 | | - goto err_unlock; |
---|
| 4800 | + if (!tr) |
---|
| 4801 | + return -EINVAL; |
---|
4278 | 4802 | |
---|
4279 | 4803 | local_irq_disable(); |
---|
4280 | 4804 | arch_spin_lock(&tr->max_lock); |
---|
.. | .. |
---|
4285 | 4809 | */ |
---|
4286 | 4810 | if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
---|
4287 | 4811 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
---|
4288 | | - atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
---|
4289 | | - ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); |
---|
| 4812 | + atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
---|
| 4813 | + ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); |
---|
| 4814 | +#ifdef CONFIG_TRACER_MAX_TRACE |
---|
| 4815 | + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); |
---|
| 4816 | +#endif |
---|
4290 | 4817 | } |
---|
4291 | 4818 | if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
---|
4292 | 4819 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
---|
4293 | | - atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
---|
4294 | | - ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); |
---|
| 4820 | + atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
---|
| 4821 | + ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); |
---|
| 4822 | +#ifdef CONFIG_TRACER_MAX_TRACE |
---|
| 4823 | + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); |
---|
| 4824 | +#endif |
---|
4295 | 4825 | } |
---|
4296 | 4826 | } |
---|
4297 | 4827 | arch_spin_unlock(&tr->max_lock); |
---|
4298 | 4828 | local_irq_enable(); |
---|
4299 | 4829 | |
---|
4300 | 4830 | cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); |
---|
| 4831 | + |
---|
| 4832 | + return 0; |
---|
| 4833 | +} |
---|
| 4834 | + |
---|
| 4835 | +static ssize_t |
---|
| 4836 | +tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
---|
| 4837 | + size_t count, loff_t *ppos) |
---|
| 4838 | +{ |
---|
| 4839 | + struct trace_array *tr = file_inode(filp)->i_private; |
---|
| 4840 | + cpumask_var_t tracing_cpumask_new; |
---|
| 4841 | + int err; |
---|
| 4842 | + |
---|
| 4843 | + if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
---|
| 4844 | + return -ENOMEM; |
---|
| 4845 | + |
---|
| 4846 | + err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
---|
| 4847 | + if (err) |
---|
| 4848 | + goto err_free; |
---|
| 4849 | + |
---|
| 4850 | + err = tracing_set_cpumask(tr, tracing_cpumask_new); |
---|
| 4851 | + if (err) |
---|
| 4852 | + goto err_free; |
---|
| 4853 | + |
---|
4301 | 4854 | free_cpumask_var(tracing_cpumask_new); |
---|
4302 | 4855 | |
---|
4303 | 4856 | return count; |
---|
4304 | 4857 | |
---|
4305 | | -err_unlock: |
---|
| 4858 | +err_free: |
---|
4306 | 4859 | free_cpumask_var(tracing_cpumask_new); |
---|
4307 | 4860 | |
---|
4308 | 4861 | return err; |
---|
.. | .. |
---|
4444 | 4997 | ftrace_pid_follow_fork(tr, enabled); |
---|
4445 | 4998 | |
---|
4446 | 4999 | if (mask == TRACE_ITER_OVERWRITE) { |
---|
4447 | | - ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); |
---|
| 5000 | + ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); |
---|
4448 | 5001 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
4449 | 5002 | ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); |
---|
4450 | 5003 | #endif |
---|
.. | .. |
---|
4458 | 5011 | return 0; |
---|
4459 | 5012 | } |
---|
4460 | 5013 | |
---|
4461 | | -static int trace_set_options(struct trace_array *tr, char *option) |
---|
| 5014 | +int trace_set_options(struct trace_array *tr, char *option) |
---|
4462 | 5015 | { |
---|
4463 | 5016 | char *cmp; |
---|
4464 | 5017 | int neg = 0; |
---|
4465 | 5018 | int ret; |
---|
4466 | 5019 | size_t orig_len = strlen(option); |
---|
| 5020 | + int len; |
---|
4467 | 5021 | |
---|
4468 | 5022 | cmp = strstrip(option); |
---|
4469 | 5023 | |
---|
4470 | | - if (strncmp(cmp, "no", 2) == 0) { |
---|
| 5024 | + len = str_has_prefix(cmp, "no"); |
---|
| 5025 | + if (len) |
---|
4471 | 5026 | neg = 1; |
---|
4472 | | - cmp += 2; |
---|
4473 | | - } |
---|
| 5027 | + |
---|
| 5028 | + cmp += len; |
---|
4474 | 5029 | |
---|
4475 | 5030 | mutex_lock(&event_mutex); |
---|
4476 | 5031 | mutex_lock(&trace_types_lock); |
---|
.. | .. |
---|
4546 | 5101 | struct trace_array *tr = inode->i_private; |
---|
4547 | 5102 | int ret; |
---|
4548 | 5103 | |
---|
4549 | | - if (tracing_disabled) |
---|
4550 | | - return -ENODEV; |
---|
4551 | | - |
---|
4552 | | - if (trace_array_get(tr) < 0) |
---|
4553 | | - return -ENODEV; |
---|
| 5104 | + ret = tracing_check_open_get_tr(tr); |
---|
| 5105 | + if (ret) |
---|
| 5106 | + return ret; |
---|
4554 | 5107 | |
---|
4555 | 5108 | ret = single_open(file, tracing_trace_options_show, inode->i_private); |
---|
4556 | 5109 | if (ret < 0) |
---|
.. | .. |
---|
4577 | 5130 | " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" |
---|
4578 | 5131 | " current_tracer\t- function and latency tracers\n" |
---|
4579 | 5132 | " available_tracers\t- list of configured tracers for current_tracer\n" |
---|
| 5133 | + " error_log\t- error log for failed commands (that support it)\n" |
---|
4580 | 5134 | " buffer_size_kb\t- view and modify size of per cpu buffer\n" |
---|
4581 | 5135 | " buffer_total_size_kb - view total size of all cpu buffers\n\n" |
---|
4582 | 5136 | " trace_clock\t\t-change the clock used to order events\n" |
---|
.. | .. |
---|
4597 | 5151 | " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" |
---|
4598 | 5152 | "\t\t\t Remove sub-buffer with rmdir\n" |
---|
4599 | 5153 | " trace_options\t\t- Set format or modify how tracing happens\n" |
---|
4600 | | - "\t\t\t Disable an option by adding a suffix 'no' to the\n" |
---|
| 5154 | + "\t\t\t Disable an option by prefixing 'no' to the\n" |
---|
4601 | 5155 | "\t\t\t option name\n" |
---|
4602 | 5156 | " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" |
---|
4603 | 5157 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
.. | .. |
---|
4641 | 5195 | #ifdef CONFIG_FUNCTION_TRACER |
---|
4642 | 5196 | " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" |
---|
4643 | 5197 | "\t\t (function)\n" |
---|
| 5198 | + " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" |
---|
| 5199 | + "\t\t (function)\n" |
---|
4644 | 5200 | #endif |
---|
4645 | 5201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
---|
4646 | 5202 | " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" |
---|
.. | .. |
---|
4662 | 5218 | "\t\t\t traces\n" |
---|
4663 | 5219 | #endif |
---|
4664 | 5220 | #endif /* CONFIG_STACK_TRACER */ |
---|
| 5221 | +#ifdef CONFIG_DYNAMIC_EVENTS |
---|
| 5222 | + " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" |
---|
| 5223 | + "\t\t\t Write into this file to define/undefine new trace events.\n" |
---|
| 5224 | +#endif |
---|
4665 | 5225 | #ifdef CONFIG_KPROBE_EVENTS |
---|
4666 | | - " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" |
---|
| 5226 | + " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" |
---|
4667 | 5227 | "\t\t\t Write into this file to define/undefine new trace events.\n" |
---|
4668 | 5228 | #endif |
---|
4669 | 5229 | #ifdef CONFIG_UPROBE_EVENTS |
---|
4670 | | - " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" |
---|
| 5230 | + " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" |
---|
4671 | 5231 | "\t\t\t Write into this file to define/undefine new trace events.\n" |
---|
4672 | 5232 | #endif |
---|
4673 | 5233 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) |
---|
4674 | 5234 | "\t accepts: event-definitions (one definition per line)\n" |
---|
4675 | 5235 | "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" |
---|
4676 | 5236 | "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" |
---|
| 5237 | +#ifdef CONFIG_HIST_TRIGGERS |
---|
| 5238 | + "\t s:[synthetic/]<event> <field> [<field>]\n" |
---|
| 5239 | +#endif |
---|
4677 | 5240 | "\t -:[<group>/]<event>\n" |
---|
4678 | 5241 | #ifdef CONFIG_KPROBE_EVENTS |
---|
4679 | 5242 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" |
---|
4680 | | - "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" |
---|
| 5243 | + "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" |
---|
4681 | 5244 | #endif |
---|
4682 | 5245 | #ifdef CONFIG_UPROBE_EVENTS |
---|
4683 | | - "\t place: <path>:<offset>\n" |
---|
| 5246 | + " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" |
---|
4684 | 5247 | #endif |
---|
4685 | 5248 | "\t args: <name>=fetcharg[:type]\n" |
---|
4686 | 5249 | "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" |
---|
4687 | | - "\t $stack<index>, $stack, $retval, $comm\n" |
---|
4688 | | - "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n" |
---|
4689 | | - "\t b<bit-width>@<bit-offset>/<container-size>\n" |
---|
| 5250 | +#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API |
---|
| 5251 | + "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" |
---|
| 5252 | +#else |
---|
| 5253 | + "\t $stack<index>, $stack, $retval, $comm,\n" |
---|
| 5254 | +#endif |
---|
| 5255 | + "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" |
---|
| 5256 | + "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" |
---|
| 5257 | + "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" |
---|
| 5258 | + "\t <type>\\[<array-size>\\]\n" |
---|
| 5259 | +#ifdef CONFIG_HIST_TRIGGERS |
---|
| 5260 | + "\t field: <stype> <name>;\n" |
---|
| 5261 | + "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" |
---|
| 5262 | + "\t [unsigned] char/int/long\n" |
---|
| 5263 | +#endif |
---|
4690 | 5264 | #endif |
---|
4691 | 5265 | " events/\t\t- Directory containing all trace event subsystems:\n" |
---|
4692 | 5266 | " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" |
---|
.. | .. |
---|
4739 | 5313 | "\t [:size=#entries]\n" |
---|
4740 | 5314 | "\t [:pause][:continue][:clear]\n" |
---|
4741 | 5315 | "\t [:name=histname1]\n" |
---|
| 5316 | + "\t [:<handler>.<action>]\n" |
---|
4742 | 5317 | "\t [if <filter>]\n\n" |
---|
4743 | 5318 | "\t Note, special fields can be used as well:\n" |
---|
4744 | 5319 | "\t common_timestamp - to record current timestamp\n" |
---|
.. | .. |
---|
4783 | 5358 | "\t unchanged.\n\n" |
---|
4784 | 5359 | "\t The enable_hist and disable_hist triggers can be used to\n" |
---|
4785 | 5360 | "\t have one event conditionally start and stop another event's\n" |
---|
4786 | | - "\t already-attached hist trigger. The syntax is analagous to\n" |
---|
4787 | | - "\t the enable_event and disable_event triggers.\n" |
---|
| 5361 | + "\t already-attached hist trigger. The syntax is analogous to\n" |
---|
| 5362 | + "\t the enable_event and disable_event triggers.\n\n" |
---|
| 5363 | + "\t Hist trigger handlers and actions are executed whenever a\n" |
---|
| 5364 | + "\t a histogram entry is added or updated. They take the form:\n\n" |
---|
| 5365 | + "\t <handler>.<action>\n\n" |
---|
| 5366 | + "\t The available handlers are:\n\n" |
---|
| 5367 | + "\t onmatch(matching.event) - invoke on addition or update\n" |
---|
| 5368 | + "\t onmax(var) - invoke if var exceeds current max\n" |
---|
| 5369 | + "\t onchange(var) - invoke action if var changes\n\n" |
---|
| 5370 | + "\t The available actions are:\n\n" |
---|
| 5371 | + "\t trace(<synthetic_event>,param list) - generate synthetic event\n" |
---|
| 5372 | + "\t save(field,...) - save current event fields\n" |
---|
| 5373 | +#ifdef CONFIG_TRACER_SNAPSHOT |
---|
| 5374 | + "\t snapshot() - snapshot the trace buffer\n\n" |
---|
| 5375 | +#endif |
---|
| 5376 | +#ifdef CONFIG_SYNTH_EVENTS |
---|
| 5377 | + " events/synthetic_events\t- Create/append/remove/show synthetic events\n" |
---|
| 5378 | + "\t Write into this file to define/undefine new synthetic events.\n" |
---|
| 5379 | + "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n" |
---|
| 5380 | +#endif |
---|
4788 | 5381 | #endif |
---|
4789 | 5382 | ; |
---|
4790 | 5383 | |
---|
.. | .. |
---|
4842 | 5435 | |
---|
4843 | 5436 | static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) |
---|
4844 | 5437 | { |
---|
4845 | | - if (tracing_disabled) |
---|
4846 | | - return -ENODEV; |
---|
| 5438 | + int ret; |
---|
| 5439 | + |
---|
| 5440 | + ret = tracing_check_open_get_tr(NULL); |
---|
| 5441 | + if (ret) |
---|
| 5442 | + return ret; |
---|
4847 | 5443 | |
---|
4848 | 5444 | return seq_open(filp, &tracing_saved_tgids_seq_ops); |
---|
4849 | 5445 | } |
---|
.. | .. |
---|
4919 | 5515 | |
---|
4920 | 5516 | static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) |
---|
4921 | 5517 | { |
---|
4922 | | - if (tracing_disabled) |
---|
4923 | | - return -ENODEV; |
---|
| 5518 | + int ret; |
---|
| 5519 | + |
---|
| 5520 | + ret = tracing_check_open_get_tr(NULL); |
---|
| 5521 | + if (ret) |
---|
| 5522 | + return ret; |
---|
4924 | 5523 | |
---|
4925 | 5524 | return seq_open(filp, &tracing_saved_cmdlines_seq_ops); |
---|
4926 | 5525 | } |
---|
.. | .. |
---|
4939 | 5538 | char buf[64]; |
---|
4940 | 5539 | int r; |
---|
4941 | 5540 | |
---|
| 5541 | + preempt_disable(); |
---|
4942 | 5542 | arch_spin_lock(&trace_cmdline_lock); |
---|
4943 | 5543 | r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); |
---|
4944 | 5544 | arch_spin_unlock(&trace_cmdline_lock); |
---|
| 5545 | + preempt_enable(); |
---|
4945 | 5546 | |
---|
4946 | 5547 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
---|
4947 | 5548 | } |
---|
.. | .. |
---|
4966 | 5567 | return -ENOMEM; |
---|
4967 | 5568 | } |
---|
4968 | 5569 | |
---|
| 5570 | + preempt_disable(); |
---|
4969 | 5571 | arch_spin_lock(&trace_cmdline_lock); |
---|
4970 | 5572 | savedcmd_temp = savedcmd; |
---|
4971 | 5573 | savedcmd = s; |
---|
4972 | 5574 | arch_spin_unlock(&trace_cmdline_lock); |
---|
| 5575 | + preempt_enable(); |
---|
4973 | 5576 | free_saved_cmdlines_buffer(savedcmd_temp); |
---|
4974 | 5577 | |
---|
4975 | 5578 | return 0; |
---|
.. | .. |
---|
5028 | 5631 | * Paranoid! If ptr points to end, we don't want to increment past it. |
---|
5029 | 5632 | * This really should never happen. |
---|
5030 | 5633 | */ |
---|
| 5634 | + (*pos)++; |
---|
5031 | 5635 | ptr = update_eval_map(ptr); |
---|
5032 | 5636 | if (WARN_ON_ONCE(!ptr)) |
---|
5033 | 5637 | return NULL; |
---|
5034 | 5638 | |
---|
5035 | 5639 | ptr++; |
---|
5036 | | - |
---|
5037 | | - (*pos)++; |
---|
5038 | | - |
---|
5039 | 5640 | ptr = update_eval_map(ptr); |
---|
5040 | 5641 | |
---|
5041 | 5642 | return ptr; |
---|
.. | .. |
---|
5084 | 5685 | |
---|
5085 | 5686 | static int tracing_eval_map_open(struct inode *inode, struct file *filp) |
---|
5086 | 5687 | { |
---|
5087 | | - if (tracing_disabled) |
---|
5088 | | - return -ENODEV; |
---|
| 5688 | + int ret; |
---|
| 5689 | + |
---|
| 5690 | + ret = tracing_check_open_get_tr(NULL); |
---|
| 5691 | + if (ret) |
---|
| 5692 | + return ret; |
---|
5089 | 5693 | |
---|
5090 | 5694 | return seq_open(filp, &tracing_eval_map_seq_ops); |
---|
5091 | 5695 | } |
---|
.. | .. |
---|
5198 | 5802 | |
---|
5199 | 5803 | int tracer_init(struct tracer *t, struct trace_array *tr) |
---|
5200 | 5804 | { |
---|
5201 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 5805 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
5202 | 5806 | return t->init(tr); |
---|
5203 | 5807 | } |
---|
5204 | 5808 | |
---|
5205 | | -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) |
---|
| 5809 | +static void set_buffer_entries(struct array_buffer *buf, unsigned long val) |
---|
5206 | 5810 | { |
---|
5207 | 5811 | int cpu; |
---|
5208 | 5812 | |
---|
.. | .. |
---|
5212 | 5816 | |
---|
5213 | 5817 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
5214 | 5818 | /* resize @tr's buffer to the size of @size_tr's entries */ |
---|
5215 | | -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
---|
5216 | | - struct trace_buffer *size_buf, int cpu_id) |
---|
| 5819 | +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, |
---|
| 5820 | + struct array_buffer *size_buf, int cpu_id) |
---|
5217 | 5821 | { |
---|
5218 | 5822 | int cpu, ret = 0; |
---|
5219 | 5823 | |
---|
.. | .. |
---|
5251 | 5855 | ring_buffer_expanded = true; |
---|
5252 | 5856 | |
---|
5253 | 5857 | /* May be called before buffers are initialized */ |
---|
5254 | | - if (!tr->trace_buffer.buffer) |
---|
| 5858 | + if (!tr->array_buffer.buffer) |
---|
5255 | 5859 | return 0; |
---|
5256 | 5860 | |
---|
5257 | | - ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); |
---|
| 5861 | + ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); |
---|
5258 | 5862 | if (ret < 0) |
---|
5259 | 5863 | return ret; |
---|
5260 | 5864 | |
---|
.. | .. |
---|
5265 | 5869 | |
---|
5266 | 5870 | ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); |
---|
5267 | 5871 | if (ret < 0) { |
---|
5268 | | - int r = resize_buffer_duplicate_size(&tr->trace_buffer, |
---|
5269 | | - &tr->trace_buffer, cpu); |
---|
| 5872 | + int r = resize_buffer_duplicate_size(&tr->array_buffer, |
---|
| 5873 | + &tr->array_buffer, cpu); |
---|
5270 | 5874 | if (r < 0) { |
---|
5271 | 5875 | /* |
---|
5272 | 5876 | * AARGH! We are left with different |
---|
.. | .. |
---|
5297 | 5901 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
---|
5298 | 5902 | |
---|
5299 | 5903 | if (cpu == RING_BUFFER_ALL_CPUS) |
---|
5300 | | - set_buffer_entries(&tr->trace_buffer, size); |
---|
| 5904 | + set_buffer_entries(&tr->array_buffer, size); |
---|
5301 | 5905 | else |
---|
5302 | | - per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; |
---|
| 5906 | + per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size; |
---|
5303 | 5907 | |
---|
5304 | 5908 | return ret; |
---|
5305 | 5909 | } |
---|
5306 | 5910 | |
---|
5307 | | -static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
---|
5308 | | - unsigned long size, int cpu_id) |
---|
| 5911 | +ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
---|
| 5912 | + unsigned long size, int cpu_id) |
---|
5309 | 5913 | { |
---|
5310 | 5914 | int ret = size; |
---|
5311 | 5915 | |
---|
.. | .. |
---|
5375 | 5979 | tr->current_trace = &nop_trace; |
---|
5376 | 5980 | } |
---|
5377 | 5981 | |
---|
| 5982 | +static bool tracer_options_updated; |
---|
| 5983 | + |
---|
5378 | 5984 | static void add_tracer_options(struct trace_array *tr, struct tracer *t) |
---|
5379 | 5985 | { |
---|
5380 | 5986 | /* Only enable if the directory has been created already. */ |
---|
5381 | 5987 | if (!tr->dir) |
---|
5382 | 5988 | return; |
---|
5383 | 5989 | |
---|
| 5990 | + /* Only create trace option files after update_tracer_options finish */ |
---|
| 5991 | + if (!tracer_options_updated) |
---|
| 5992 | + return; |
---|
| 5993 | + |
---|
5384 | 5994 | create_trace_option_files(tr, t); |
---|
5385 | 5995 | } |
---|
5386 | 5996 | |
---|
5387 | | -static int tracing_set_tracer(struct trace_array *tr, const char *buf) |
---|
| 5997 | +int tracing_set_tracer(struct trace_array *tr, const char *buf) |
---|
5388 | 5998 | { |
---|
5389 | 5999 | struct tracer *t; |
---|
5390 | 6000 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
.. | .. |
---|
5413 | 6023 | if (t == tr->current_trace) |
---|
5414 | 6024 | goto out; |
---|
5415 | 6025 | |
---|
| 6026 | +#ifdef CONFIG_TRACER_SNAPSHOT |
---|
| 6027 | + if (t->use_max_tr) { |
---|
| 6028 | + local_irq_disable(); |
---|
| 6029 | + arch_spin_lock(&tr->max_lock); |
---|
| 6030 | + if (tr->cond_snapshot) |
---|
| 6031 | + ret = -EBUSY; |
---|
| 6032 | + arch_spin_unlock(&tr->max_lock); |
---|
| 6033 | + local_irq_enable(); |
---|
| 6034 | + if (ret) |
---|
| 6035 | + goto out; |
---|
| 6036 | + } |
---|
| 6037 | +#endif |
---|
5416 | 6038 | /* Some tracers won't work on kernel command line */ |
---|
5417 | 6039 | if (system_state < SYSTEM_RUNNING && t->noboot) { |
---|
5418 | 6040 | pr_warn("Tracer '%s' is not allowed on command line, ignored\n", |
---|
.. | .. |
---|
5427 | 6049 | } |
---|
5428 | 6050 | |
---|
5429 | 6051 | /* If trace pipe files are being read, we can't change the tracer */ |
---|
5430 | | - if (tr->current_trace->ref) { |
---|
| 6052 | + if (tr->trace_ref) { |
---|
5431 | 6053 | ret = -EBUSY; |
---|
5432 | 6054 | goto out; |
---|
5433 | 6055 | } |
---|
.. | .. |
---|
5439 | 6061 | if (tr->current_trace->reset) |
---|
5440 | 6062 | tr->current_trace->reset(tr); |
---|
5441 | 6063 | |
---|
5442 | | - /* Current trace needs to be nop_trace before synchronize_sched */ |
---|
5443 | | - tr->current_trace = &nop_trace; |
---|
5444 | | - |
---|
5445 | 6064 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
5446 | | - had_max_tr = tr->allocated_snapshot; |
---|
| 6065 | + had_max_tr = tr->current_trace->use_max_tr; |
---|
| 6066 | + |
---|
| 6067 | + /* Current trace needs to be nop_trace before synchronize_rcu */ |
---|
| 6068 | + tr->current_trace = &nop_trace; |
---|
5447 | 6069 | |
---|
5448 | 6070 | if (had_max_tr && !t->use_max_tr) { |
---|
5449 | 6071 | /* |
---|
.. | .. |
---|
5453 | 6075 | * The update_max_tr is called from interrupts disabled |
---|
5454 | 6076 | * so a synchronized_sched() is sufficient. |
---|
5455 | 6077 | */ |
---|
5456 | | - synchronize_sched(); |
---|
| 6078 | + synchronize_rcu(); |
---|
5457 | 6079 | free_snapshot(tr); |
---|
5458 | 6080 | } |
---|
5459 | | -#endif |
---|
5460 | 6081 | |
---|
5461 | | -#ifdef CONFIG_TRACER_MAX_TRACE |
---|
5462 | | - if (t->use_max_tr && !had_max_tr) { |
---|
| 6082 | + if (t->use_max_tr && !tr->allocated_snapshot) { |
---|
5463 | 6083 | ret = tracing_alloc_snapshot_instance(tr); |
---|
5464 | 6084 | if (ret < 0) |
---|
5465 | 6085 | goto out; |
---|
5466 | 6086 | } |
---|
| 6087 | +#else |
---|
| 6088 | + tr->current_trace = &nop_trace; |
---|
5467 | 6089 | #endif |
---|
5468 | 6090 | |
---|
5469 | 6091 | if (t->init) { |
---|
.. | .. |
---|
5598 | 6220 | { |
---|
5599 | 6221 | struct trace_array *tr = inode->i_private; |
---|
5600 | 6222 | struct trace_iterator *iter; |
---|
5601 | | - int ret = 0; |
---|
| 6223 | + int ret; |
---|
5602 | 6224 | |
---|
5603 | | - if (tracing_disabled) |
---|
5604 | | - return -ENODEV; |
---|
5605 | | - |
---|
5606 | | - if (trace_array_get(tr) < 0) |
---|
5607 | | - return -ENODEV; |
---|
| 6225 | + ret = tracing_check_open_get_tr(tr); |
---|
| 6226 | + if (ret) |
---|
| 6227 | + return ret; |
---|
5608 | 6228 | |
---|
5609 | 6229 | mutex_lock(&trace_types_lock); |
---|
5610 | 6230 | |
---|
.. | .. |
---|
5635 | 6255 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
---|
5636 | 6256 | |
---|
5637 | 6257 | iter->tr = tr; |
---|
5638 | | - iter->trace_buffer = &tr->trace_buffer; |
---|
| 6258 | + iter->array_buffer = &tr->array_buffer; |
---|
5639 | 6259 | iter->cpu_file = tracing_get_cpu(inode); |
---|
5640 | 6260 | mutex_init(&iter->mutex); |
---|
5641 | 6261 | filp->private_data = iter; |
---|
.. | .. |
---|
5645 | 6265 | |
---|
5646 | 6266 | nonseekable_open(inode, filp); |
---|
5647 | 6267 | |
---|
5648 | | - tr->current_trace->ref++; |
---|
| 6268 | + tr->trace_ref++; |
---|
5649 | 6269 | out: |
---|
5650 | 6270 | mutex_unlock(&trace_types_lock); |
---|
5651 | 6271 | return ret; |
---|
.. | .. |
---|
5664 | 6284 | |
---|
5665 | 6285 | mutex_lock(&trace_types_lock); |
---|
5666 | 6286 | |
---|
5667 | | - tr->current_trace->ref--; |
---|
| 6287 | + tr->trace_ref--; |
---|
5668 | 6288 | |
---|
5669 | 6289 | if (iter->trace->pipe_close) |
---|
5670 | 6290 | iter->trace->pipe_close(iter); |
---|
.. | .. |
---|
5672 | 6292 | mutex_unlock(&trace_types_lock); |
---|
5673 | 6293 | |
---|
5674 | 6294 | free_cpumask_var(iter->started); |
---|
| 6295 | + kfree(iter->temp); |
---|
5675 | 6296 | mutex_destroy(&iter->mutex); |
---|
5676 | 6297 | kfree(iter); |
---|
5677 | 6298 | |
---|
.. | .. |
---|
5695 | 6316 | */ |
---|
5696 | 6317 | return EPOLLIN | EPOLLRDNORM; |
---|
5697 | 6318 | else |
---|
5698 | | - return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, |
---|
5699 | | - filp, poll_table); |
---|
| 6319 | + return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, |
---|
| 6320 | + filp, poll_table, iter->tr->buffer_percent); |
---|
5700 | 6321 | } |
---|
5701 | 6322 | |
---|
5702 | 6323 | static __poll_t |
---|
.. | .. |
---|
5733 | 6354 | |
---|
5734 | 6355 | mutex_unlock(&iter->mutex); |
---|
5735 | 6356 | |
---|
5736 | | - ret = wait_on_pipe(iter, false); |
---|
| 6357 | + ret = wait_on_pipe(iter, 0); |
---|
5737 | 6358 | |
---|
5738 | 6359 | mutex_lock(&iter->mutex); |
---|
5739 | 6360 | |
---|
.. | .. |
---|
5804 | 6425 | |
---|
5805 | 6426 | ret = print_trace_line(iter); |
---|
5806 | 6427 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
---|
5807 | | - /* don't print partial lines */ |
---|
| 6428 | + /* |
---|
| 6429 | + * If one print_trace_line() fills entire trace_seq in one shot, |
---|
| 6430 | + * trace_seq_to_user() will returns -EBUSY because save_len == 0, |
---|
| 6431 | + * In this case, we need to consume it, otherwise, loop will peek |
---|
| 6432 | + * this event next time, resulting in an infinite loop. |
---|
| 6433 | + */ |
---|
| 6434 | + if (save_len == 0) { |
---|
| 6435 | + iter->seq.full = 0; |
---|
| 6436 | + trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); |
---|
| 6437 | + trace_consume(iter); |
---|
| 6438 | + break; |
---|
| 6439 | + } |
---|
| 6440 | + |
---|
| 6441 | + /* In other cases, don't print partial lines */ |
---|
5808 | 6442 | iter->seq.seq.len = save_len; |
---|
5809 | 6443 | break; |
---|
5810 | 6444 | } |
---|
.. | .. |
---|
5848 | 6482 | { |
---|
5849 | 6483 | __free_page(spd->pages[idx]); |
---|
5850 | 6484 | } |
---|
5851 | | - |
---|
5852 | | -static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
---|
5853 | | - .can_merge = 0, |
---|
5854 | | - .confirm = generic_pipe_buf_confirm, |
---|
5855 | | - .release = generic_pipe_buf_release, |
---|
5856 | | - .steal = generic_pipe_buf_steal, |
---|
5857 | | - .get = generic_pipe_buf_get, |
---|
5858 | | -}; |
---|
5859 | 6485 | |
---|
5860 | 6486 | static size_t |
---|
5861 | 6487 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) |
---|
.. | .. |
---|
5918 | 6544 | .partial = partial_def, |
---|
5919 | 6545 | .nr_pages = 0, /* This gets updated below. */ |
---|
5920 | 6546 | .nr_pages_max = PIPE_DEF_BUFFERS, |
---|
5921 | | - .ops = &tracing_pipe_buf_ops, |
---|
| 6547 | + .ops = &default_pipe_buf_ops, |
---|
5922 | 6548 | .spd_release = tracing_spd_release_pipe, |
---|
5923 | 6549 | }; |
---|
5924 | 6550 | ssize_t ret; |
---|
.. | .. |
---|
6013 | 6639 | for_each_tracing_cpu(cpu) { |
---|
6014 | 6640 | /* fill in the size from first enabled cpu */ |
---|
6015 | 6641 | if (size == 0) |
---|
6016 | | - size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; |
---|
6017 | | - if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { |
---|
| 6642 | + size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; |
---|
| 6643 | + if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { |
---|
6018 | 6644 | buf_size_same = 0; |
---|
6019 | 6645 | break; |
---|
6020 | 6646 | } |
---|
.. | .. |
---|
6030 | 6656 | } else |
---|
6031 | 6657 | r = sprintf(buf, "X\n"); |
---|
6032 | 6658 | } else |
---|
6033 | | - r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); |
---|
| 6659 | + r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); |
---|
6034 | 6660 | |
---|
6035 | 6661 | mutex_unlock(&trace_types_lock); |
---|
6036 | 6662 | |
---|
.. | .. |
---|
6077 | 6703 | |
---|
6078 | 6704 | mutex_lock(&trace_types_lock); |
---|
6079 | 6705 | for_each_tracing_cpu(cpu) { |
---|
6080 | | - size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; |
---|
| 6706 | + size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; |
---|
6081 | 6707 | if (!ring_buffer_expanded) |
---|
6082 | 6708 | expanded_size += trace_buf_size >> 10; |
---|
6083 | 6709 | } |
---|
.. | .. |
---|
6127 | 6753 | struct trace_array *tr = filp->private_data; |
---|
6128 | 6754 | struct ring_buffer_event *event; |
---|
6129 | 6755 | enum event_trigger_type tt = ETT_NONE; |
---|
6130 | | - struct ring_buffer *buffer; |
---|
| 6756 | + struct trace_buffer *buffer; |
---|
6131 | 6757 | struct print_entry *entry; |
---|
6132 | 6758 | unsigned long irq_flags; |
---|
6133 | | - const char faulted[] = "<faulted>"; |
---|
6134 | 6759 | ssize_t written; |
---|
6135 | 6760 | int size; |
---|
6136 | 6761 | int len; |
---|
6137 | 6762 | |
---|
6138 | 6763 | /* Used in tracing_mark_raw_write() as well */ |
---|
6139 | | -#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ |
---|
| 6764 | +#define FAULTED_STR "<faulted>" |
---|
| 6765 | +#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ |
---|
6140 | 6766 | |
---|
6141 | 6767 | if (tracing_disabled) |
---|
6142 | 6768 | return -EINVAL; |
---|
.. | .. |
---|
6156 | 6782 | if (cnt < FAULTED_SIZE) |
---|
6157 | 6783 | size += FAULTED_SIZE - cnt; |
---|
6158 | 6784 | |
---|
6159 | | - buffer = tr->trace_buffer.buffer; |
---|
| 6785 | + buffer = tr->array_buffer.buffer; |
---|
6160 | 6786 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
---|
6161 | 6787 | irq_flags, preempt_count()); |
---|
6162 | 6788 | if (unlikely(!event)) |
---|
.. | .. |
---|
6168 | 6794 | |
---|
6169 | 6795 | len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); |
---|
6170 | 6796 | if (len) { |
---|
6171 | | - memcpy(&entry->buf, faulted, FAULTED_SIZE); |
---|
| 6797 | + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); |
---|
6172 | 6798 | cnt = FAULTED_SIZE; |
---|
6173 | 6799 | written = -EFAULT; |
---|
6174 | 6800 | } else |
---|
6175 | 6801 | written = cnt; |
---|
6176 | | - len = cnt; |
---|
6177 | 6802 | |
---|
6178 | 6803 | if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { |
---|
6179 | 6804 | /* do not add \n before testing triggers, but add \0 */ |
---|
.. | .. |
---|
6187 | 6812 | } else |
---|
6188 | 6813 | entry->buf[cnt] = '\0'; |
---|
6189 | 6814 | |
---|
| 6815 | + if (static_branch_unlikely(&trace_marker_exports_enabled)) |
---|
| 6816 | + ftrace_exports(event, TRACE_EXPORT_MARKER); |
---|
6190 | 6817 | __buffer_unlock_commit(buffer, event); |
---|
6191 | 6818 | |
---|
6192 | 6819 | if (tt) |
---|
.. | .. |
---|
6207 | 6834 | { |
---|
6208 | 6835 | struct trace_array *tr = filp->private_data; |
---|
6209 | 6836 | struct ring_buffer_event *event; |
---|
6210 | | - struct ring_buffer *buffer; |
---|
| 6837 | + struct trace_buffer *buffer; |
---|
6211 | 6838 | struct raw_data_entry *entry; |
---|
6212 | | - const char faulted[] = "<faulted>"; |
---|
6213 | 6839 | unsigned long irq_flags; |
---|
6214 | 6840 | ssize_t written; |
---|
6215 | 6841 | int size; |
---|
.. | .. |
---|
6237 | 6863 | if (cnt < FAULT_SIZE_ID) |
---|
6238 | 6864 | size += FAULT_SIZE_ID - cnt; |
---|
6239 | 6865 | |
---|
6240 | | - buffer = tr->trace_buffer.buffer; |
---|
| 6866 | + buffer = tr->array_buffer.buffer; |
---|
6241 | 6867 | event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, |
---|
6242 | 6868 | irq_flags, preempt_count()); |
---|
6243 | 6869 | if (!event) |
---|
.. | .. |
---|
6249 | 6875 | len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); |
---|
6250 | 6876 | if (len) { |
---|
6251 | 6877 | entry->id = -1; |
---|
6252 | | - memcpy(&entry->buf, faulted, FAULTED_SIZE); |
---|
| 6878 | + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); |
---|
6253 | 6879 | written = -EFAULT; |
---|
6254 | 6880 | } else |
---|
6255 | 6881 | written = cnt; |
---|
.. | .. |
---|
6292 | 6918 | |
---|
6293 | 6919 | tr->clock_id = i; |
---|
6294 | 6920 | |
---|
6295 | | - ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); |
---|
| 6921 | + ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); |
---|
6296 | 6922 | |
---|
6297 | 6923 | /* |
---|
6298 | 6924 | * New clock may not be consistent with the previous clock. |
---|
6299 | 6925 | * Reset the buffer so that it doesn't have incomparable timestamps. |
---|
6300 | 6926 | */ |
---|
6301 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
---|
| 6927 | + tracing_reset_online_cpus(&tr->array_buffer); |
---|
6302 | 6928 | |
---|
6303 | 6929 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
6304 | 6930 | if (tr->max_buffer.buffer) |
---|
.. | .. |
---|
6344 | 6970 | struct trace_array *tr = inode->i_private; |
---|
6345 | 6971 | int ret; |
---|
6346 | 6972 | |
---|
6347 | | - if (tracing_disabled) |
---|
6348 | | - return -ENODEV; |
---|
6349 | | - |
---|
6350 | | - if (trace_array_get(tr)) |
---|
6351 | | - return -ENODEV; |
---|
| 6973 | + ret = tracing_check_open_get_tr(tr); |
---|
| 6974 | + if (ret) |
---|
| 6975 | + return ret; |
---|
6352 | 6976 | |
---|
6353 | 6977 | ret = single_open(file, tracing_clock_show, inode->i_private); |
---|
6354 | 6978 | if (ret < 0) |
---|
.. | .. |
---|
6363 | 6987 | |
---|
6364 | 6988 | mutex_lock(&trace_types_lock); |
---|
6365 | 6989 | |
---|
6366 | | - if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) |
---|
| 6990 | + if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) |
---|
6367 | 6991 | seq_puts(m, "delta [absolute]\n"); |
---|
6368 | 6992 | else |
---|
6369 | 6993 | seq_puts(m, "[delta] absolute\n"); |
---|
.. | .. |
---|
6378 | 7002 | struct trace_array *tr = inode->i_private; |
---|
6379 | 7003 | int ret; |
---|
6380 | 7004 | |
---|
6381 | | - if (tracing_disabled) |
---|
6382 | | - return -ENODEV; |
---|
6383 | | - |
---|
6384 | | - if (trace_array_get(tr)) |
---|
6385 | | - return -ENODEV; |
---|
| 7005 | + ret = tracing_check_open_get_tr(tr); |
---|
| 7006 | + if (ret) |
---|
| 7007 | + return ret; |
---|
6386 | 7008 | |
---|
6387 | 7009 | ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); |
---|
6388 | 7010 | if (ret < 0) |
---|
.. | .. |
---|
6410 | 7032 | goto out; |
---|
6411 | 7033 | } |
---|
6412 | 7034 | |
---|
6413 | | - ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); |
---|
| 7035 | + ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs); |
---|
6414 | 7036 | |
---|
6415 | 7037 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
6416 | 7038 | if (tr->max_buffer.buffer) |
---|
.. | .. |
---|
6435 | 7057 | struct trace_array *tr = inode->i_private; |
---|
6436 | 7058 | struct trace_iterator *iter; |
---|
6437 | 7059 | struct seq_file *m; |
---|
6438 | | - int ret = 0; |
---|
| 7060 | + int ret; |
---|
6439 | 7061 | |
---|
6440 | | - if (trace_array_get(tr) < 0) |
---|
6441 | | - return -ENODEV; |
---|
| 7062 | + ret = tracing_check_open_get_tr(tr); |
---|
| 7063 | + if (ret) |
---|
| 7064 | + return ret; |
---|
6442 | 7065 | |
---|
6443 | 7066 | if (file->f_mode & FMODE_READ) { |
---|
6444 | 7067 | iter = __tracing_open(inode, file, true); |
---|
.. | .. |
---|
6458 | 7081 | ret = 0; |
---|
6459 | 7082 | |
---|
6460 | 7083 | iter->tr = tr; |
---|
6461 | | - iter->trace_buffer = &tr->max_buffer; |
---|
| 7084 | + iter->array_buffer = &tr->max_buffer; |
---|
6462 | 7085 | iter->cpu_file = tracing_get_cpu(inode); |
---|
6463 | 7086 | m->private = iter; |
---|
6464 | 7087 | file->private_data = m; |
---|
.. | .. |
---|
6468 | 7091 | trace_array_put(tr); |
---|
6469 | 7092 | |
---|
6470 | 7093 | return ret; |
---|
| 7094 | +} |
---|
| 7095 | + |
---|
| 7096 | +static void tracing_swap_cpu_buffer(void *tr) |
---|
| 7097 | +{ |
---|
| 7098 | + update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); |
---|
6471 | 7099 | } |
---|
6472 | 7100 | |
---|
6473 | 7101 | static ssize_t |
---|
.. | .. |
---|
6495 | 7123 | goto out; |
---|
6496 | 7124 | } |
---|
6497 | 7125 | |
---|
| 7126 | + local_irq_disable(); |
---|
| 7127 | + arch_spin_lock(&tr->max_lock); |
---|
| 7128 | + if (tr->cond_snapshot) |
---|
| 7129 | + ret = -EBUSY; |
---|
| 7130 | + arch_spin_unlock(&tr->max_lock); |
---|
| 7131 | + local_irq_enable(); |
---|
| 7132 | + if (ret) |
---|
| 7133 | + goto out; |
---|
| 7134 | + |
---|
6498 | 7135 | switch (val) { |
---|
6499 | 7136 | case 0: |
---|
6500 | 7137 | if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { |
---|
.. | .. |
---|
6514 | 7151 | #endif |
---|
6515 | 7152 | if (tr->allocated_snapshot) |
---|
6516 | 7153 | ret = resize_buffer_duplicate_size(&tr->max_buffer, |
---|
6517 | | - &tr->trace_buffer, iter->cpu_file); |
---|
| 7154 | + &tr->array_buffer, iter->cpu_file); |
---|
6518 | 7155 | else |
---|
6519 | 7156 | ret = tracing_alloc_snapshot_instance(tr); |
---|
6520 | 7157 | if (ret < 0) |
---|
6521 | 7158 | break; |
---|
6522 | | - local_irq_disable(); |
---|
6523 | 7159 | /* Now, we're going to swap */ |
---|
6524 | | - if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
---|
6525 | | - update_max_tr(tr, current, smp_processor_id()); |
---|
6526 | | - else |
---|
6527 | | - update_max_tr_single(tr, current, iter->cpu_file); |
---|
6528 | | - local_irq_enable(); |
---|
| 7160 | + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
---|
| 7161 | + local_irq_disable(); |
---|
| 7162 | + update_max_tr(tr, current, smp_processor_id(), NULL); |
---|
| 7163 | + local_irq_enable(); |
---|
| 7164 | + } else { |
---|
| 7165 | + smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, |
---|
| 7166 | + (void *)tr, 1); |
---|
| 7167 | + } |
---|
6529 | 7168 | break; |
---|
6530 | 7169 | default: |
---|
6531 | 7170 | if (tr->allocated_snapshot) { |
---|
6532 | 7171 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
---|
6533 | 7172 | tracing_reset_online_cpus(&tr->max_buffer); |
---|
6534 | 7173 | else |
---|
6535 | | - tracing_reset(&tr->max_buffer, iter->cpu_file); |
---|
| 7174 | + tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); |
---|
6536 | 7175 | } |
---|
6537 | 7176 | break; |
---|
6538 | 7177 | } |
---|
.. | .. |
---|
6576 | 7215 | struct ftrace_buffer_info *info; |
---|
6577 | 7216 | int ret; |
---|
6578 | 7217 | |
---|
| 7218 | + /* The following checks for tracefs lockdown */ |
---|
6579 | 7219 | ret = tracing_buffers_open(inode, filp); |
---|
6580 | 7220 | if (ret < 0) |
---|
6581 | 7221 | return ret; |
---|
.. | .. |
---|
6588 | 7228 | } |
---|
6589 | 7229 | |
---|
6590 | 7230 | info->iter.snapshot = true; |
---|
6591 | | - info->iter.trace_buffer = &info->iter.tr->max_buffer; |
---|
| 7231 | + info->iter.array_buffer = &info->iter.tr->max_buffer; |
---|
6592 | 7232 | |
---|
6593 | 7233 | return ret; |
---|
6594 | 7234 | } |
---|
.. | .. |
---|
6613 | 7253 | #endif |
---|
6614 | 7254 | |
---|
6615 | 7255 | static const struct file_operations set_tracer_fops = { |
---|
6616 | | - .open = tracing_open_generic, |
---|
| 7256 | + .open = tracing_open_generic_tr, |
---|
6617 | 7257 | .read = tracing_set_trace_read, |
---|
6618 | 7258 | .write = tracing_set_trace_write, |
---|
6619 | 7259 | .llseek = generic_file_llseek, |
---|
| 7260 | + .release = tracing_release_generic_tr, |
---|
6620 | 7261 | }; |
---|
6621 | 7262 | |
---|
6622 | 7263 | static const struct file_operations tracing_pipe_fops = { |
---|
.. | .. |
---|
6697 | 7338 | |
---|
6698 | 7339 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
---|
6699 | 7340 | |
---|
| 7341 | +#define TRACING_LOG_ERRS_MAX 8 |
---|
| 7342 | +#define TRACING_LOG_LOC_MAX 128 |
---|
| 7343 | + |
---|
| 7344 | +#define CMD_PREFIX " Command: " |
---|
| 7345 | + |
---|
| 7346 | +struct err_info { |
---|
| 7347 | + const char **errs; /* ptr to loc-specific array of err strings */ |
---|
| 7348 | + u8 type; /* index into errs -> specific err string */ |
---|
| 7349 | + u8 pos; /* MAX_FILTER_STR_VAL = 256 */ |
---|
| 7350 | + u64 ts; |
---|
| 7351 | +}; |
---|
| 7352 | + |
---|
| 7353 | +struct tracing_log_err { |
---|
| 7354 | + struct list_head list; |
---|
| 7355 | + struct err_info info; |
---|
| 7356 | + char loc[TRACING_LOG_LOC_MAX]; /* err location */ |
---|
| 7357 | + char cmd[MAX_FILTER_STR_VAL]; /* what caused err */ |
---|
| 7358 | +}; |
---|
| 7359 | + |
---|
| 7360 | +static DEFINE_MUTEX(tracing_err_log_lock); |
---|
| 7361 | + |
---|
| 7362 | +static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) |
---|
| 7363 | +{ |
---|
| 7364 | + struct tracing_log_err *err; |
---|
| 7365 | + |
---|
| 7366 | + if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { |
---|
| 7367 | + err = kzalloc(sizeof(*err), GFP_KERNEL); |
---|
| 7368 | + if (!err) |
---|
| 7369 | + err = ERR_PTR(-ENOMEM); |
---|
| 7370 | + else |
---|
| 7371 | + tr->n_err_log_entries++; |
---|
| 7372 | + |
---|
| 7373 | + return err; |
---|
| 7374 | + } |
---|
| 7375 | + |
---|
| 7376 | + err = list_first_entry(&tr->err_log, struct tracing_log_err, list); |
---|
| 7377 | + list_del(&err->list); |
---|
| 7378 | + |
---|
| 7379 | + return err; |
---|
| 7380 | +} |
---|
| 7381 | + |
---|
| 7382 | +/** |
---|
| 7383 | + * err_pos - find the position of a string within a command for error careting |
---|
| 7384 | + * @cmd: The tracing command that caused the error |
---|
| 7385 | + * @str: The string to position the caret at within @cmd |
---|
| 7386 | + * |
---|
| 7387 | + * Finds the position of the first occurence of @str within @cmd. The |
---|
| 7388 | + * return value can be passed to tracing_log_err() for caret placement |
---|
| 7389 | + * within @cmd. |
---|
| 7390 | + * |
---|
| 7391 | + * Returns the index within @cmd of the first occurence of @str or 0 |
---|
| 7392 | + * if @str was not found. |
---|
| 7393 | + */ |
---|
| 7394 | +unsigned int err_pos(char *cmd, const char *str) |
---|
| 7395 | +{ |
---|
| 7396 | + char *found; |
---|
| 7397 | + |
---|
| 7398 | + if (WARN_ON(!strlen(cmd))) |
---|
| 7399 | + return 0; |
---|
| 7400 | + |
---|
| 7401 | + found = strstr(cmd, str); |
---|
| 7402 | + if (found) |
---|
| 7403 | + return found - cmd; |
---|
| 7404 | + |
---|
| 7405 | + return 0; |
---|
| 7406 | +} |
---|
| 7407 | + |
---|
| 7408 | +/** |
---|
| 7409 | + * tracing_log_err - write an error to the tracing error log |
---|
| 7410 | + * @tr: The associated trace array for the error (NULL for top level array) |
---|
| 7411 | + * @loc: A string describing where the error occurred |
---|
| 7412 | + * @cmd: The tracing command that caused the error |
---|
| 7413 | + * @errs: The array of loc-specific static error strings |
---|
| 7414 | + * @type: The index into errs[], which produces the specific static err string |
---|
| 7415 | + * @pos: The position the caret should be placed in the cmd |
---|
| 7416 | + * |
---|
| 7417 | + * Writes an error into tracing/error_log of the form: |
---|
| 7418 | + * |
---|
| 7419 | + * <loc>: error: <text> |
---|
| 7420 | + * Command: <cmd> |
---|
| 7421 | + * ^ |
---|
| 7422 | + * |
---|
| 7423 | + * tracing/error_log is a small log file containing the last |
---|
| 7424 | + * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated |
---|
| 7425 | + * unless there has been a tracing error, and the error log can be |
---|
| 7426 | + * cleared and have its memory freed by writing the empty string in |
---|
| 7427 | + * truncation mode to it i.e. echo > tracing/error_log. |
---|
| 7428 | + * |
---|
| 7429 | + * NOTE: the @errs array along with the @type param are used to |
---|
| 7430 | + * produce a static error string - this string is not copied and saved |
---|
| 7431 | + * when the error is logged - only a pointer to it is saved. See |
---|
| 7432 | + * existing callers for examples of how static strings are typically |
---|
| 7433 | + * defined for use with tracing_log_err(). |
---|
| 7434 | + */ |
---|
| 7435 | +void tracing_log_err(struct trace_array *tr, |
---|
| 7436 | + const char *loc, const char *cmd, |
---|
| 7437 | + const char **errs, u8 type, u8 pos) |
---|
| 7438 | +{ |
---|
| 7439 | + struct tracing_log_err *err; |
---|
| 7440 | + |
---|
| 7441 | + if (!tr) |
---|
| 7442 | + tr = &global_trace; |
---|
| 7443 | + |
---|
| 7444 | + mutex_lock(&tracing_err_log_lock); |
---|
| 7445 | + err = get_tracing_log_err(tr); |
---|
| 7446 | + if (PTR_ERR(err) == -ENOMEM) { |
---|
| 7447 | + mutex_unlock(&tracing_err_log_lock); |
---|
| 7448 | + return; |
---|
| 7449 | + } |
---|
| 7450 | + |
---|
| 7451 | + snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); |
---|
| 7452 | + snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd); |
---|
| 7453 | + |
---|
| 7454 | + err->info.errs = errs; |
---|
| 7455 | + err->info.type = type; |
---|
| 7456 | + err->info.pos = pos; |
---|
| 7457 | + err->info.ts = local_clock(); |
---|
| 7458 | + |
---|
| 7459 | + list_add_tail(&err->list, &tr->err_log); |
---|
| 7460 | + mutex_unlock(&tracing_err_log_lock); |
---|
| 7461 | +} |
---|
| 7462 | + |
---|
| 7463 | +static void clear_tracing_err_log(struct trace_array *tr) |
---|
| 7464 | +{ |
---|
| 7465 | + struct tracing_log_err *err, *next; |
---|
| 7466 | + |
---|
| 7467 | + mutex_lock(&tracing_err_log_lock); |
---|
| 7468 | + list_for_each_entry_safe(err, next, &tr->err_log, list) { |
---|
| 7469 | + list_del(&err->list); |
---|
| 7470 | + kfree(err); |
---|
| 7471 | + } |
---|
| 7472 | + |
---|
| 7473 | + tr->n_err_log_entries = 0; |
---|
| 7474 | + mutex_unlock(&tracing_err_log_lock); |
---|
| 7475 | +} |
---|
| 7476 | + |
---|
| 7477 | +static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) |
---|
| 7478 | +{ |
---|
| 7479 | + struct trace_array *tr = m->private; |
---|
| 7480 | + |
---|
| 7481 | + mutex_lock(&tracing_err_log_lock); |
---|
| 7482 | + |
---|
| 7483 | + return seq_list_start(&tr->err_log, *pos); |
---|
| 7484 | +} |
---|
| 7485 | + |
---|
| 7486 | +static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) |
---|
| 7487 | +{ |
---|
| 7488 | + struct trace_array *tr = m->private; |
---|
| 7489 | + |
---|
| 7490 | + return seq_list_next(v, &tr->err_log, pos); |
---|
| 7491 | +} |
---|
| 7492 | + |
---|
| 7493 | +static void tracing_err_log_seq_stop(struct seq_file *m, void *v) |
---|
| 7494 | +{ |
---|
| 7495 | + mutex_unlock(&tracing_err_log_lock); |
---|
| 7496 | +} |
---|
| 7497 | + |
---|
| 7498 | +static void tracing_err_log_show_pos(struct seq_file *m, u8 pos) |
---|
| 7499 | +{ |
---|
| 7500 | + u8 i; |
---|
| 7501 | + |
---|
| 7502 | + for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) |
---|
| 7503 | + seq_putc(m, ' '); |
---|
| 7504 | + for (i = 0; i < pos; i++) |
---|
| 7505 | + seq_putc(m, ' '); |
---|
| 7506 | + seq_puts(m, "^\n"); |
---|
| 7507 | +} |
---|
| 7508 | + |
---|
| 7509 | +static int tracing_err_log_seq_show(struct seq_file *m, void *v) |
---|
| 7510 | +{ |
---|
| 7511 | + struct tracing_log_err *err = v; |
---|
| 7512 | + |
---|
| 7513 | + if (err) { |
---|
| 7514 | + const char *err_text = err->info.errs[err->info.type]; |
---|
| 7515 | + u64 sec = err->info.ts; |
---|
| 7516 | + u32 nsec; |
---|
| 7517 | + |
---|
| 7518 | + nsec = do_div(sec, NSEC_PER_SEC); |
---|
| 7519 | + seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, |
---|
| 7520 | + err->loc, err_text); |
---|
| 7521 | + seq_printf(m, "%s", err->cmd); |
---|
| 7522 | + tracing_err_log_show_pos(m, err->info.pos); |
---|
| 7523 | + } |
---|
| 7524 | + |
---|
| 7525 | + return 0; |
---|
| 7526 | +} |
---|
| 7527 | + |
---|
| 7528 | +static const struct seq_operations tracing_err_log_seq_ops = { |
---|
| 7529 | + .start = tracing_err_log_seq_start, |
---|
| 7530 | + .next = tracing_err_log_seq_next, |
---|
| 7531 | + .stop = tracing_err_log_seq_stop, |
---|
| 7532 | + .show = tracing_err_log_seq_show |
---|
| 7533 | +}; |
---|
| 7534 | + |
---|
| 7535 | +static int tracing_err_log_open(struct inode *inode, struct file *file) |
---|
| 7536 | +{ |
---|
| 7537 | + struct trace_array *tr = inode->i_private; |
---|
| 7538 | + int ret = 0; |
---|
| 7539 | + |
---|
| 7540 | + ret = tracing_check_open_get_tr(tr); |
---|
| 7541 | + if (ret) |
---|
| 7542 | + return ret; |
---|
| 7543 | + |
---|
| 7544 | + /* If this file was opened for write, then erase contents */ |
---|
| 7545 | + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) |
---|
| 7546 | + clear_tracing_err_log(tr); |
---|
| 7547 | + |
---|
| 7548 | + if (file->f_mode & FMODE_READ) { |
---|
| 7549 | + ret = seq_open(file, &tracing_err_log_seq_ops); |
---|
| 7550 | + if (!ret) { |
---|
| 7551 | + struct seq_file *m = file->private_data; |
---|
| 7552 | + m->private = tr; |
---|
| 7553 | + } else { |
---|
| 7554 | + trace_array_put(tr); |
---|
| 7555 | + } |
---|
| 7556 | + } |
---|
| 7557 | + return ret; |
---|
| 7558 | +} |
---|
| 7559 | + |
---|
| 7560 | +static ssize_t tracing_err_log_write(struct file *file, |
---|
| 7561 | + const char __user *buffer, |
---|
| 7562 | + size_t count, loff_t *ppos) |
---|
| 7563 | +{ |
---|
| 7564 | + return count; |
---|
| 7565 | +} |
---|
| 7566 | + |
---|
| 7567 | +static int tracing_err_log_release(struct inode *inode, struct file *file) |
---|
| 7568 | +{ |
---|
| 7569 | + struct trace_array *tr = inode->i_private; |
---|
| 7570 | + |
---|
| 7571 | + trace_array_put(tr); |
---|
| 7572 | + |
---|
| 7573 | + if (file->f_mode & FMODE_READ) |
---|
| 7574 | + seq_release(inode, file); |
---|
| 7575 | + |
---|
| 7576 | + return 0; |
---|
| 7577 | +} |
---|
| 7578 | + |
---|
| 7579 | +static const struct file_operations tracing_err_log_fops = { |
---|
| 7580 | + .open = tracing_err_log_open, |
---|
| 7581 | + .write = tracing_err_log_write, |
---|
| 7582 | + .read = seq_read, |
---|
| 7583 | + .llseek = tracing_lseek, |
---|
| 7584 | + .release = tracing_err_log_release, |
---|
| 7585 | +}; |
---|
| 7586 | + |
---|
6700 | 7587 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
---|
6701 | 7588 | { |
---|
6702 | 7589 | struct trace_array *tr = inode->i_private; |
---|
6703 | 7590 | struct ftrace_buffer_info *info; |
---|
6704 | 7591 | int ret; |
---|
6705 | 7592 | |
---|
6706 | | - if (tracing_disabled) |
---|
6707 | | - return -ENODEV; |
---|
| 7593 | + ret = tracing_check_open_get_tr(tr); |
---|
| 7594 | + if (ret) |
---|
| 7595 | + return ret; |
---|
6708 | 7596 | |
---|
6709 | | - if (trace_array_get(tr) < 0) |
---|
6710 | | - return -ENODEV; |
---|
6711 | | - |
---|
6712 | | - info = kzalloc(sizeof(*info), GFP_KERNEL); |
---|
| 7597 | + info = kvzalloc(sizeof(*info), GFP_KERNEL); |
---|
6713 | 7598 | if (!info) { |
---|
6714 | 7599 | trace_array_put(tr); |
---|
6715 | 7600 | return -ENOMEM; |
---|
.. | .. |
---|
6720 | 7605 | info->iter.tr = tr; |
---|
6721 | 7606 | info->iter.cpu_file = tracing_get_cpu(inode); |
---|
6722 | 7607 | info->iter.trace = tr->current_trace; |
---|
6723 | | - info->iter.trace_buffer = &tr->trace_buffer; |
---|
| 7608 | + info->iter.array_buffer = &tr->array_buffer; |
---|
6724 | 7609 | info->spare = NULL; |
---|
6725 | 7610 | /* Force reading ring buffer for first read */ |
---|
6726 | 7611 | info->read = (unsigned int)-1; |
---|
6727 | 7612 | |
---|
6728 | 7613 | filp->private_data = info; |
---|
6729 | 7614 | |
---|
6730 | | - tr->current_trace->ref++; |
---|
| 7615 | + tr->trace_ref++; |
---|
6731 | 7616 | |
---|
6732 | 7617 | mutex_unlock(&trace_types_lock); |
---|
6733 | 7618 | |
---|
.. | .. |
---|
6765 | 7650 | #endif |
---|
6766 | 7651 | |
---|
6767 | 7652 | if (!info->spare) { |
---|
6768 | | - info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, |
---|
| 7653 | + info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, |
---|
6769 | 7654 | iter->cpu_file); |
---|
6770 | 7655 | if (IS_ERR(info->spare)) { |
---|
6771 | 7656 | ret = PTR_ERR(info->spare); |
---|
.. | .. |
---|
6783 | 7668 | |
---|
6784 | 7669 | again: |
---|
6785 | 7670 | trace_access_lock(iter->cpu_file); |
---|
6786 | | - ret = ring_buffer_read_page(iter->trace_buffer->buffer, |
---|
| 7671 | + ret = ring_buffer_read_page(iter->array_buffer->buffer, |
---|
6787 | 7672 | &info->spare, |
---|
6788 | 7673 | count, |
---|
6789 | 7674 | iter->cpu_file, 0); |
---|
.. | .. |
---|
6794 | 7679 | if ((filp->f_flags & O_NONBLOCK)) |
---|
6795 | 7680 | return -EAGAIN; |
---|
6796 | 7681 | |
---|
6797 | | - ret = wait_on_pipe(iter, false); |
---|
| 7682 | + ret = wait_on_pipe(iter, 0); |
---|
6798 | 7683 | if (ret) |
---|
6799 | 7684 | return ret; |
---|
6800 | 7685 | |
---|
.. | .. |
---|
6828 | 7713 | |
---|
6829 | 7714 | mutex_lock(&trace_types_lock); |
---|
6830 | 7715 | |
---|
6831 | | - iter->tr->current_trace->ref--; |
---|
| 7716 | + iter->tr->trace_ref--; |
---|
6832 | 7717 | |
---|
6833 | 7718 | __trace_array_put(iter->tr); |
---|
6834 | 7719 | |
---|
6835 | 7720 | if (info->spare) |
---|
6836 | | - ring_buffer_free_read_page(iter->trace_buffer->buffer, |
---|
| 7721 | + ring_buffer_free_read_page(iter->array_buffer->buffer, |
---|
6837 | 7722 | info->spare_cpu, info->spare); |
---|
6838 | | - kfree(info); |
---|
| 7723 | + kvfree(info); |
---|
6839 | 7724 | |
---|
6840 | 7725 | mutex_unlock(&trace_types_lock); |
---|
6841 | 7726 | |
---|
.. | .. |
---|
6843 | 7728 | } |
---|
6844 | 7729 | |
---|
6845 | 7730 | struct buffer_ref { |
---|
6846 | | - struct ring_buffer *buffer; |
---|
| 7731 | + struct trace_buffer *buffer; |
---|
6847 | 7732 | void *page; |
---|
6848 | 7733 | int cpu; |
---|
6849 | 7734 | refcount_t refcount; |
---|
.. | .. |
---|
6880 | 7765 | |
---|
6881 | 7766 | /* Pipe buffer operations for a buffer. */ |
---|
6882 | 7767 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
---|
6883 | | - .can_merge = 0, |
---|
6884 | | - .confirm = generic_pipe_buf_confirm, |
---|
6885 | 7768 | .release = buffer_pipe_buf_release, |
---|
6886 | | - .steal = generic_pipe_buf_nosteal, |
---|
6887 | 7769 | .get = buffer_pipe_buf_get, |
---|
6888 | 7770 | }; |
---|
6889 | 7771 | |
---|
.. | .. |
---|
6939 | 7821 | |
---|
6940 | 7822 | again: |
---|
6941 | 7823 | trace_access_lock(iter->cpu_file); |
---|
6942 | | - entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
---|
| 7824 | + entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); |
---|
6943 | 7825 | |
---|
6944 | 7826 | for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { |
---|
6945 | 7827 | struct page *page; |
---|
.. | .. |
---|
6952 | 7834 | } |
---|
6953 | 7835 | |
---|
6954 | 7836 | refcount_set(&ref->refcount, 1); |
---|
6955 | | - ref->buffer = iter->trace_buffer->buffer; |
---|
| 7837 | + ref->buffer = iter->array_buffer->buffer; |
---|
6956 | 7838 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); |
---|
6957 | 7839 | if (IS_ERR(ref->page)) { |
---|
6958 | 7840 | ret = PTR_ERR(ref->page); |
---|
.. | .. |
---|
6980 | 7862 | spd.nr_pages++; |
---|
6981 | 7863 | *ppos += PAGE_SIZE; |
---|
6982 | 7864 | |
---|
6983 | | - entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
---|
| 7865 | + entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); |
---|
6984 | 7866 | } |
---|
6985 | 7867 | |
---|
6986 | 7868 | trace_access_unlock(iter->cpu_file); |
---|
.. | .. |
---|
6995 | 7877 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) |
---|
6996 | 7878 | goto out; |
---|
6997 | 7879 | |
---|
6998 | | - ret = wait_on_pipe(iter, true); |
---|
| 7880 | + ret = wait_on_pipe(iter, iter->tr->buffer_percent); |
---|
6999 | 7881 | if (ret) |
---|
7000 | 7882 | goto out; |
---|
7001 | 7883 | |
---|
.. | .. |
---|
7024 | 7906 | { |
---|
7025 | 7907 | struct inode *inode = file_inode(filp); |
---|
7026 | 7908 | struct trace_array *tr = inode->i_private; |
---|
7027 | | - struct trace_buffer *trace_buf = &tr->trace_buffer; |
---|
| 7909 | + struct array_buffer *trace_buf = &tr->array_buffer; |
---|
7028 | 7910 | int cpu = tracing_get_cpu(inode); |
---|
7029 | 7911 | struct trace_seq *s; |
---|
7030 | 7912 | unsigned long cnt; |
---|
.. | .. |
---|
7095 | 7977 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
---|
7096 | 7978 | size_t cnt, loff_t *ppos) |
---|
7097 | 7979 | { |
---|
7098 | | - unsigned long *p = filp->private_data; |
---|
7099 | | - char buf[64]; /* Not too big for a shallow stack */ |
---|
| 7980 | + ssize_t ret; |
---|
| 7981 | + char *buf; |
---|
7100 | 7982 | int r; |
---|
7101 | 7983 | |
---|
7102 | | - r = scnprintf(buf, 63, "%ld", *p); |
---|
7103 | | - buf[r++] = '\n'; |
---|
| 7984 | + /* 256 should be plenty to hold the amount needed */ |
---|
| 7985 | + buf = kmalloc(256, GFP_KERNEL); |
---|
| 7986 | + if (!buf) |
---|
| 7987 | + return -ENOMEM; |
---|
7104 | 7988 | |
---|
7105 | | - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
---|
| 7989 | + r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n", |
---|
| 7990 | + ftrace_update_tot_cnt, |
---|
| 7991 | + ftrace_number_of_pages, |
---|
| 7992 | + ftrace_number_of_groups); |
---|
| 7993 | + |
---|
| 7994 | + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
---|
| 7995 | + kfree(buf); |
---|
| 7996 | + return ret; |
---|
7106 | 7997 | } |
---|
7107 | 7998 | |
---|
7108 | 7999 | static const struct file_operations tracing_dyn_info_fops = { |
---|
.. | .. |
---|
7296 | 8187 | |
---|
7297 | 8188 | tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); |
---|
7298 | 8189 | |
---|
7299 | | - WARN_ONCE(!tr->percpu_dir, |
---|
| 8190 | + MEM_FAIL(!tr->percpu_dir, |
---|
7300 | 8191 | "Could not create tracefs directory 'per_cpu/%d'\n", cpu); |
---|
7301 | 8192 | |
---|
7302 | 8193 | return tr->percpu_dir; |
---|
.. | .. |
---|
7405 | 8296 | return cnt; |
---|
7406 | 8297 | } |
---|
7407 | 8298 | |
---|
| 8299 | +static int tracing_open_options(struct inode *inode, struct file *filp) |
---|
| 8300 | +{ |
---|
| 8301 | + struct trace_option_dentry *topt = inode->i_private; |
---|
| 8302 | + int ret; |
---|
| 8303 | + |
---|
| 8304 | + ret = tracing_check_open_get_tr(topt->tr); |
---|
| 8305 | + if (ret) |
---|
| 8306 | + return ret; |
---|
| 8307 | + |
---|
| 8308 | + filp->private_data = inode->i_private; |
---|
| 8309 | + return 0; |
---|
| 8310 | +} |
---|
| 8311 | + |
---|
| 8312 | +static int tracing_release_options(struct inode *inode, struct file *file) |
---|
| 8313 | +{ |
---|
| 8314 | + struct trace_option_dentry *topt = file->private_data; |
---|
| 8315 | + |
---|
| 8316 | + trace_array_put(topt->tr); |
---|
| 8317 | + return 0; |
---|
| 8318 | +} |
---|
7408 | 8319 | |
---|
7409 | 8320 | static const struct file_operations trace_options_fops = { |
---|
7410 | | - .open = tracing_open_generic, |
---|
| 8321 | + .open = tracing_open_options, |
---|
7411 | 8322 | .read = trace_options_read, |
---|
7412 | 8323 | .write = trace_options_write, |
---|
7413 | 8324 | .llseek = generic_file_llseek, |
---|
| 8325 | + .release = tracing_release_options, |
---|
7414 | 8326 | }; |
---|
7415 | 8327 | |
---|
7416 | 8328 | /* |
---|
.. | .. |
---|
7617 | 8529 | for (cnt = 0; opts[cnt].name; cnt++) { |
---|
7618 | 8530 | create_trace_option_file(tr, &topts[cnt], flags, |
---|
7619 | 8531 | &opts[cnt]); |
---|
7620 | | - WARN_ONCE(topts[cnt].entry == NULL, |
---|
| 8532 | + MEM_FAIL(topts[cnt].entry == NULL, |
---|
7621 | 8533 | "Failed to create trace option: %s", |
---|
7622 | 8534 | opts[cnt].name); |
---|
7623 | 8535 | } |
---|
.. | .. |
---|
7674 | 8586 | size_t cnt, loff_t *ppos) |
---|
7675 | 8587 | { |
---|
7676 | 8588 | struct trace_array *tr = filp->private_data; |
---|
7677 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
---|
| 8589 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
---|
7678 | 8590 | unsigned long val; |
---|
7679 | 8591 | int ret; |
---|
7680 | 8592 | |
---|
.. | .. |
---|
7711 | 8623 | .llseek = default_llseek, |
---|
7712 | 8624 | }; |
---|
7713 | 8625 | |
---|
7714 | | -struct dentry *trace_instance_dir; |
---|
| 8626 | +static ssize_t |
---|
| 8627 | +buffer_percent_read(struct file *filp, char __user *ubuf, |
---|
| 8628 | + size_t cnt, loff_t *ppos) |
---|
| 8629 | +{ |
---|
| 8630 | + struct trace_array *tr = filp->private_data; |
---|
| 8631 | + char buf[64]; |
---|
| 8632 | + int r; |
---|
| 8633 | + |
---|
| 8634 | + r = tr->buffer_percent; |
---|
| 8635 | + r = sprintf(buf, "%d\n", r); |
---|
| 8636 | + |
---|
| 8637 | + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
---|
| 8638 | +} |
---|
| 8639 | + |
---|
| 8640 | +static ssize_t |
---|
| 8641 | +buffer_percent_write(struct file *filp, const char __user *ubuf, |
---|
| 8642 | + size_t cnt, loff_t *ppos) |
---|
| 8643 | +{ |
---|
| 8644 | + struct trace_array *tr = filp->private_data; |
---|
| 8645 | + unsigned long val; |
---|
| 8646 | + int ret; |
---|
| 8647 | + |
---|
| 8648 | + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
---|
| 8649 | + if (ret) |
---|
| 8650 | + return ret; |
---|
| 8651 | + |
---|
| 8652 | + if (val > 100) |
---|
| 8653 | + return -EINVAL; |
---|
| 8654 | + |
---|
| 8655 | + tr->buffer_percent = val; |
---|
| 8656 | + |
---|
| 8657 | + (*ppos)++; |
---|
| 8658 | + |
---|
| 8659 | + return cnt; |
---|
| 8660 | +} |
---|
| 8661 | + |
---|
| 8662 | +static const struct file_operations buffer_percent_fops = { |
---|
| 8663 | + .open = tracing_open_generic_tr, |
---|
| 8664 | + .read = buffer_percent_read, |
---|
| 8665 | + .write = buffer_percent_write, |
---|
| 8666 | + .release = tracing_release_generic_tr, |
---|
| 8667 | + .llseek = default_llseek, |
---|
| 8668 | +}; |
---|
| 8669 | + |
---|
| 8670 | +static struct dentry *trace_instance_dir; |
---|
7715 | 8671 | |
---|
7716 | 8672 | static void |
---|
7717 | 8673 | init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
---|
7718 | 8674 | |
---|
7719 | 8675 | static int |
---|
7720 | | -allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) |
---|
| 8676 | +allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) |
---|
7721 | 8677 | { |
---|
7722 | 8678 | enum ring_buffer_flags rb_flags; |
---|
7723 | 8679 | |
---|
.. | .. |
---|
7737 | 8693 | } |
---|
7738 | 8694 | |
---|
7739 | 8695 | /* Allocate the first page for all buffers */ |
---|
7740 | | - set_buffer_entries(&tr->trace_buffer, |
---|
7741 | | - ring_buffer_size(tr->trace_buffer.buffer, 0)); |
---|
| 8696 | + set_buffer_entries(&tr->array_buffer, |
---|
| 8697 | + ring_buffer_size(tr->array_buffer.buffer, 0)); |
---|
7742 | 8698 | |
---|
7743 | 8699 | return 0; |
---|
7744 | 8700 | } |
---|
.. | .. |
---|
7747 | 8703 | { |
---|
7748 | 8704 | int ret; |
---|
7749 | 8705 | |
---|
7750 | | - ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); |
---|
| 8706 | + ret = allocate_trace_buffer(tr, &tr->array_buffer, size); |
---|
7751 | 8707 | if (ret) |
---|
7752 | 8708 | return ret; |
---|
7753 | 8709 | |
---|
7754 | 8710 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
7755 | 8711 | ret = allocate_trace_buffer(tr, &tr->max_buffer, |
---|
7756 | 8712 | allocate_snapshot ? size : 1); |
---|
7757 | | - if (WARN_ON(ret)) { |
---|
7758 | | - ring_buffer_free(tr->trace_buffer.buffer); |
---|
7759 | | - tr->trace_buffer.buffer = NULL; |
---|
7760 | | - free_percpu(tr->trace_buffer.data); |
---|
7761 | | - tr->trace_buffer.data = NULL; |
---|
| 8713 | + if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { |
---|
| 8714 | + ring_buffer_free(tr->array_buffer.buffer); |
---|
| 8715 | + tr->array_buffer.buffer = NULL; |
---|
| 8716 | + free_percpu(tr->array_buffer.data); |
---|
| 8717 | + tr->array_buffer.data = NULL; |
---|
7762 | 8718 | return -ENOMEM; |
---|
7763 | 8719 | } |
---|
7764 | 8720 | tr->allocated_snapshot = allocate_snapshot; |
---|
.. | .. |
---|
7770 | 8726 | allocate_snapshot = false; |
---|
7771 | 8727 | #endif |
---|
7772 | 8728 | |
---|
7773 | | - /* |
---|
7774 | | - * Because of some magic with the way alloc_percpu() works on |
---|
7775 | | - * x86_64, we need to synchronize the pgd of all the tables, |
---|
7776 | | - * otherwise the trace events that happen in x86_64 page fault |
---|
7777 | | - * handlers can't cope with accessing the chance that a |
---|
7778 | | - * alloc_percpu()'d memory might be touched in the page fault trace |
---|
7779 | | - * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() |
---|
7780 | | - * calls in tracing, because something might get triggered within a |
---|
7781 | | - * page fault trace event! |
---|
7782 | | - */ |
---|
7783 | | - vmalloc_sync_mappings(); |
---|
7784 | | - |
---|
7785 | 8729 | return 0; |
---|
7786 | 8730 | } |
---|
7787 | 8731 | |
---|
7788 | | -static void free_trace_buffer(struct trace_buffer *buf) |
---|
| 8732 | +static void free_trace_buffer(struct array_buffer *buf) |
---|
7789 | 8733 | { |
---|
7790 | 8734 | if (buf->buffer) { |
---|
7791 | 8735 | ring_buffer_free(buf->buffer); |
---|
.. | .. |
---|
7800 | 8744 | if (!tr) |
---|
7801 | 8745 | return; |
---|
7802 | 8746 | |
---|
7803 | | - free_trace_buffer(&tr->trace_buffer); |
---|
| 8747 | + free_trace_buffer(&tr->array_buffer); |
---|
7804 | 8748 | |
---|
7805 | 8749 | #ifdef CONFIG_TRACER_MAX_TRACE |
---|
7806 | 8750 | free_trace_buffer(&tr->max_buffer); |
---|
.. | .. |
---|
7827 | 8771 | static void update_tracer_options(struct trace_array *tr) |
---|
7828 | 8772 | { |
---|
7829 | 8773 | mutex_lock(&trace_types_lock); |
---|
| 8774 | + tracer_options_updated = true; |
---|
7830 | 8775 | __update_tracer_options(tr); |
---|
7831 | 8776 | mutex_unlock(&trace_types_lock); |
---|
7832 | 8777 | } |
---|
7833 | 8778 | |
---|
7834 | | -static int instance_mkdir(const char *name) |
---|
| 8779 | +/* Must have trace_types_lock held */ |
---|
| 8780 | +struct trace_array *trace_array_find(const char *instance) |
---|
| 8781 | +{ |
---|
| 8782 | + struct trace_array *tr, *found = NULL; |
---|
| 8783 | + |
---|
| 8784 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
| 8785 | + if (tr->name && strcmp(tr->name, instance) == 0) { |
---|
| 8786 | + found = tr; |
---|
| 8787 | + break; |
---|
| 8788 | + } |
---|
| 8789 | + } |
---|
| 8790 | + |
---|
| 8791 | + return found; |
---|
| 8792 | +} |
---|
| 8793 | + |
---|
| 8794 | +struct trace_array *trace_array_find_get(const char *instance) |
---|
| 8795 | +{ |
---|
| 8796 | + struct trace_array *tr; |
---|
| 8797 | + |
---|
| 8798 | + mutex_lock(&trace_types_lock); |
---|
| 8799 | + tr = trace_array_find(instance); |
---|
| 8800 | + if (tr) |
---|
| 8801 | + tr->ref++; |
---|
| 8802 | + mutex_unlock(&trace_types_lock); |
---|
| 8803 | + |
---|
| 8804 | + return tr; |
---|
| 8805 | +} |
---|
| 8806 | + |
---|
| 8807 | +static int trace_array_create_dir(struct trace_array *tr) |
---|
| 8808 | +{ |
---|
| 8809 | + int ret; |
---|
| 8810 | + |
---|
| 8811 | + tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); |
---|
| 8812 | + if (!tr->dir) |
---|
| 8813 | + return -EINVAL; |
---|
| 8814 | + |
---|
| 8815 | + ret = event_trace_add_tracer(tr->dir, tr); |
---|
| 8816 | + if (ret) { |
---|
| 8817 | + tracefs_remove(tr->dir); |
---|
| 8818 | + return ret; |
---|
| 8819 | + } |
---|
| 8820 | + |
---|
| 8821 | + init_tracer_tracefs(tr, tr->dir); |
---|
| 8822 | + __update_tracer_options(tr); |
---|
| 8823 | + |
---|
| 8824 | + return ret; |
---|
| 8825 | +} |
---|
| 8826 | + |
---|
| 8827 | +static struct trace_array *trace_array_create(const char *name) |
---|
7835 | 8828 | { |
---|
7836 | 8829 | struct trace_array *tr; |
---|
7837 | 8830 | int ret; |
---|
7838 | 8831 | |
---|
7839 | | - mutex_lock(&event_mutex); |
---|
7840 | | - mutex_lock(&trace_types_lock); |
---|
7841 | | - |
---|
7842 | | - ret = -EEXIST; |
---|
7843 | | - list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
7844 | | - if (tr->name && strcmp(tr->name, name) == 0) |
---|
7845 | | - goto out_unlock; |
---|
7846 | | - } |
---|
7847 | | - |
---|
7848 | 8832 | ret = -ENOMEM; |
---|
7849 | 8833 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); |
---|
7850 | 8834 | if (!tr) |
---|
7851 | | - goto out_unlock; |
---|
| 8835 | + return ERR_PTR(ret); |
---|
7852 | 8836 | |
---|
7853 | 8837 | tr->name = kstrdup(name, GFP_KERNEL); |
---|
7854 | 8838 | if (!tr->name) |
---|
.. | .. |
---|
7870 | 8854 | INIT_LIST_HEAD(&tr->systems); |
---|
7871 | 8855 | INIT_LIST_HEAD(&tr->events); |
---|
7872 | 8856 | INIT_LIST_HEAD(&tr->hist_vars); |
---|
| 8857 | + INIT_LIST_HEAD(&tr->err_log); |
---|
7873 | 8858 | |
---|
7874 | 8859 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
---|
7875 | 8860 | goto out_free_tr; |
---|
7876 | 8861 | |
---|
7877 | | - tr->dir = tracefs_create_dir(name, trace_instance_dir); |
---|
7878 | | - if (!tr->dir) |
---|
| 8862 | + if (ftrace_allocate_ftrace_ops(tr) < 0) |
---|
7879 | 8863 | goto out_free_tr; |
---|
7880 | | - |
---|
7881 | | - ret = event_trace_add_tracer(tr->dir, tr); |
---|
7882 | | - if (ret) { |
---|
7883 | | - tracefs_remove_recursive(tr->dir); |
---|
7884 | | - goto out_free_tr; |
---|
7885 | | - } |
---|
7886 | 8864 | |
---|
7887 | 8865 | ftrace_init_trace_array(tr); |
---|
7888 | 8866 | |
---|
7889 | | - init_tracer_tracefs(tr, tr->dir); |
---|
7890 | 8867 | init_trace_flags_index(tr); |
---|
7891 | | - __update_tracer_options(tr); |
---|
| 8868 | + |
---|
| 8869 | + if (trace_instance_dir) { |
---|
| 8870 | + ret = trace_array_create_dir(tr); |
---|
| 8871 | + if (ret) |
---|
| 8872 | + goto out_free_tr; |
---|
| 8873 | + } else |
---|
| 8874 | + __trace_early_add_events(tr); |
---|
7892 | 8875 | |
---|
7893 | 8876 | list_add(&tr->list, &ftrace_trace_arrays); |
---|
7894 | 8877 | |
---|
7895 | | - mutex_unlock(&trace_types_lock); |
---|
7896 | | - mutex_unlock(&event_mutex); |
---|
| 8878 | + tr->ref++; |
---|
7897 | 8879 | |
---|
7898 | | - return 0; |
---|
| 8880 | + return tr; |
---|
7899 | 8881 | |
---|
7900 | 8882 | out_free_tr: |
---|
| 8883 | + ftrace_free_ftrace_ops(tr); |
---|
7901 | 8884 | free_trace_buffers(tr); |
---|
7902 | 8885 | free_cpumask_var(tr->tracing_cpumask); |
---|
7903 | 8886 | kfree(tr->name); |
---|
7904 | 8887 | kfree(tr); |
---|
7905 | 8888 | |
---|
7906 | | - out_unlock: |
---|
7907 | | - mutex_unlock(&trace_types_lock); |
---|
7908 | | - mutex_unlock(&event_mutex); |
---|
7909 | | - |
---|
7910 | | - return ret; |
---|
7911 | | - |
---|
| 8889 | + return ERR_PTR(ret); |
---|
7912 | 8890 | } |
---|
7913 | 8891 | |
---|
7914 | | -static int instance_rmdir(const char *name) |
---|
| 8892 | +static int instance_mkdir(const char *name) |
---|
7915 | 8893 | { |
---|
7916 | 8894 | struct trace_array *tr; |
---|
7917 | | - int found = 0; |
---|
7918 | 8895 | int ret; |
---|
7919 | | - int i; |
---|
7920 | 8896 | |
---|
7921 | 8897 | mutex_lock(&event_mutex); |
---|
7922 | 8898 | mutex_lock(&trace_types_lock); |
---|
7923 | 8899 | |
---|
7924 | | - ret = -ENODEV; |
---|
7925 | | - list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
7926 | | - if (tr->name && strcmp(tr->name, name) == 0) { |
---|
7927 | | - found = 1; |
---|
7928 | | - break; |
---|
7929 | | - } |
---|
7930 | | - } |
---|
7931 | | - if (!found) |
---|
| 8900 | + ret = -EEXIST; |
---|
| 8901 | + if (trace_array_find(name)) |
---|
7932 | 8902 | goto out_unlock; |
---|
7933 | 8903 | |
---|
7934 | | - ret = -EBUSY; |
---|
7935 | | - if (tr->ref || (tr->current_trace && tr->current_trace->ref)) |
---|
7936 | | - goto out_unlock; |
---|
| 8904 | + tr = trace_array_create(name); |
---|
| 8905 | + |
---|
| 8906 | + ret = PTR_ERR_OR_ZERO(tr); |
---|
| 8907 | + |
---|
| 8908 | +out_unlock: |
---|
| 8909 | + mutex_unlock(&trace_types_lock); |
---|
| 8910 | + mutex_unlock(&event_mutex); |
---|
| 8911 | + return ret; |
---|
| 8912 | +} |
---|
| 8913 | + |
---|
| 8914 | +/** |
---|
| 8915 | + * trace_array_get_by_name - Create/Lookup a trace array, given its name. |
---|
| 8916 | + * @name: The name of the trace array to be looked up/created. |
---|
| 8917 | + * |
---|
| 8918 | + * Returns pointer to trace array with given name. |
---|
| 8919 | + * NULL, if it cannot be created. |
---|
| 8920 | + * |
---|
| 8921 | + * NOTE: This function increments the reference counter associated with the |
---|
| 8922 | + * trace array returned. This makes sure it cannot be freed while in use. |
---|
| 8923 | + * Use trace_array_put() once the trace array is no longer needed. |
---|
| 8924 | + * If the trace_array is to be freed, trace_array_destroy() needs to |
---|
| 8925 | + * be called after the trace_array_put(), or simply let user space delete |
---|
| 8926 | + * it from the tracefs instances directory. But until the |
---|
| 8927 | + * trace_array_put() is called, user space can not delete it. |
---|
| 8928 | + * |
---|
| 8929 | + */ |
---|
| 8930 | +struct trace_array *trace_array_get_by_name(const char *name) |
---|
| 8931 | +{ |
---|
| 8932 | + struct trace_array *tr; |
---|
| 8933 | + |
---|
| 8934 | + mutex_lock(&event_mutex); |
---|
| 8935 | + mutex_lock(&trace_types_lock); |
---|
| 8936 | + |
---|
| 8937 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
| 8938 | + if (tr->name && strcmp(tr->name, name) == 0) |
---|
| 8939 | + goto out_unlock; |
---|
| 8940 | + } |
---|
| 8941 | + |
---|
| 8942 | + tr = trace_array_create(name); |
---|
| 8943 | + |
---|
| 8944 | + if (IS_ERR(tr)) |
---|
| 8945 | + tr = NULL; |
---|
| 8946 | +out_unlock: |
---|
| 8947 | + if (tr) |
---|
| 8948 | + tr->ref++; |
---|
| 8949 | + |
---|
| 8950 | + mutex_unlock(&trace_types_lock); |
---|
| 8951 | + mutex_unlock(&event_mutex); |
---|
| 8952 | + return tr; |
---|
| 8953 | +} |
---|
| 8954 | +EXPORT_SYMBOL_GPL(trace_array_get_by_name); |
---|
| 8955 | + |
---|
| 8956 | +static int __remove_instance(struct trace_array *tr) |
---|
| 8957 | +{ |
---|
| 8958 | + int i; |
---|
| 8959 | + |
---|
| 8960 | + /* Reference counter for a newly created trace array = 1. */ |
---|
| 8961 | + if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) |
---|
| 8962 | + return -EBUSY; |
---|
7937 | 8963 | |
---|
7938 | 8964 | list_del(&tr->list); |
---|
7939 | 8965 | |
---|
.. | .. |
---|
7948 | 8974 | event_trace_del_tracer(tr); |
---|
7949 | 8975 | ftrace_clear_pids(tr); |
---|
7950 | 8976 | ftrace_destroy_function_files(tr); |
---|
7951 | | - tracefs_remove_recursive(tr->dir); |
---|
| 8977 | + tracefs_remove(tr->dir); |
---|
7952 | 8978 | free_trace_buffers(tr); |
---|
| 8979 | + clear_tracing_err_log(tr); |
---|
7953 | 8980 | |
---|
7954 | 8981 | for (i = 0; i < tr->nr_topts; i++) { |
---|
7955 | 8982 | kfree(tr->topts[i].topts); |
---|
.. | .. |
---|
7960 | 8987 | kfree(tr->name); |
---|
7961 | 8988 | kfree(tr); |
---|
7962 | 8989 | |
---|
7963 | | - ret = 0; |
---|
| 8990 | + return 0; |
---|
| 8991 | +} |
---|
7964 | 8992 | |
---|
7965 | | - out_unlock: |
---|
| 8993 | +int trace_array_destroy(struct trace_array *this_tr) |
---|
| 8994 | +{ |
---|
| 8995 | + struct trace_array *tr; |
---|
| 8996 | + int ret; |
---|
| 8997 | + |
---|
| 8998 | + if (!this_tr) |
---|
| 8999 | + return -EINVAL; |
---|
| 9000 | + |
---|
| 9001 | + mutex_lock(&event_mutex); |
---|
| 9002 | + mutex_lock(&trace_types_lock); |
---|
| 9003 | + |
---|
| 9004 | + ret = -ENODEV; |
---|
| 9005 | + |
---|
| 9006 | + /* Making sure trace array exists before destroying it. */ |
---|
| 9007 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
| 9008 | + if (tr == this_tr) { |
---|
| 9009 | + ret = __remove_instance(tr); |
---|
| 9010 | + break; |
---|
| 9011 | + } |
---|
| 9012 | + } |
---|
| 9013 | + |
---|
| 9014 | + mutex_unlock(&trace_types_lock); |
---|
| 9015 | + mutex_unlock(&event_mutex); |
---|
| 9016 | + |
---|
| 9017 | + return ret; |
---|
| 9018 | +} |
---|
| 9019 | +EXPORT_SYMBOL_GPL(trace_array_destroy); |
---|
| 9020 | + |
---|
| 9021 | +static int instance_rmdir(const char *name) |
---|
| 9022 | +{ |
---|
| 9023 | + struct trace_array *tr; |
---|
| 9024 | + int ret; |
---|
| 9025 | + |
---|
| 9026 | + mutex_lock(&event_mutex); |
---|
| 9027 | + mutex_lock(&trace_types_lock); |
---|
| 9028 | + |
---|
| 9029 | + ret = -ENODEV; |
---|
| 9030 | + tr = trace_array_find(name); |
---|
| 9031 | + if (tr) |
---|
| 9032 | + ret = __remove_instance(tr); |
---|
| 9033 | + |
---|
7966 | 9034 | mutex_unlock(&trace_types_lock); |
---|
7967 | 9035 | mutex_unlock(&event_mutex); |
---|
7968 | 9036 | |
---|
.. | .. |
---|
7971 | 9039 | |
---|
7972 | 9040 | static __init void create_trace_instances(struct dentry *d_tracer) |
---|
7973 | 9041 | { |
---|
| 9042 | + struct trace_array *tr; |
---|
| 9043 | + |
---|
7974 | 9044 | trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, |
---|
7975 | 9045 | instance_mkdir, |
---|
7976 | 9046 | instance_rmdir); |
---|
7977 | | - if (WARN_ON(!trace_instance_dir)) |
---|
| 9047 | + if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) |
---|
7978 | 9048 | return; |
---|
| 9049 | + |
---|
| 9050 | + mutex_lock(&event_mutex); |
---|
| 9051 | + mutex_lock(&trace_types_lock); |
---|
| 9052 | + |
---|
| 9053 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
---|
| 9054 | + if (!tr->name) |
---|
| 9055 | + continue; |
---|
| 9056 | + if (MEM_FAIL(trace_array_create_dir(tr) < 0, |
---|
| 9057 | + "Failed to create instance directory\n")) |
---|
| 9058 | + break; |
---|
| 9059 | + } |
---|
| 9060 | + |
---|
| 9061 | + mutex_unlock(&trace_types_lock); |
---|
| 9062 | + mutex_unlock(&event_mutex); |
---|
7979 | 9063 | } |
---|
7980 | 9064 | |
---|
7981 | 9065 | static void |
---|
.. | .. |
---|
8032 | 9116 | trace_create_file("timestamp_mode", 0444, d_tracer, tr, |
---|
8033 | 9117 | &trace_time_stamp_mode_fops); |
---|
8034 | 9118 | |
---|
| 9119 | + tr->buffer_percent = 50; |
---|
| 9120 | + |
---|
| 9121 | + trace_create_file("buffer_percent", 0444, d_tracer, |
---|
| 9122 | + tr, &buffer_percent_fops); |
---|
| 9123 | + |
---|
8035 | 9124 | create_trace_options_dir(tr); |
---|
8036 | 9125 | |
---|
8037 | 9126 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
---|
8038 | | - trace_create_file("tracing_max_latency", 0644, d_tracer, |
---|
8039 | | - &tr->max_latency, &tracing_max_lat_fops); |
---|
| 9127 | + trace_create_maxlat_file(tr, d_tracer); |
---|
8040 | 9128 | #endif |
---|
8041 | 9129 | |
---|
8042 | 9130 | if (ftrace_create_function_files(tr, d_tracer)) |
---|
8043 | | - WARN(1, "Could not allocate function filter files"); |
---|
| 9131 | + MEM_FAIL(1, "Could not allocate function filter files"); |
---|
8044 | 9132 | |
---|
8045 | 9133 | #ifdef CONFIG_TRACER_SNAPSHOT |
---|
8046 | 9134 | trace_create_file("snapshot", 0644, d_tracer, |
---|
8047 | 9135 | tr, &snapshot_fops); |
---|
8048 | 9136 | #endif |
---|
| 9137 | + |
---|
| 9138 | + trace_create_file("error_log", 0644, d_tracer, |
---|
| 9139 | + tr, &tracing_err_log_fops); |
---|
8049 | 9140 | |
---|
8050 | 9141 | for_each_tracing_cpu(cpu) |
---|
8051 | 9142 | tracing_init_tracefs_percpu(tr, cpu); |
---|
.. | .. |
---|
8053 | 9144 | ftrace_init_tracefs(tr, d_tracer); |
---|
8054 | 9145 | } |
---|
8055 | 9146 | |
---|
| 9147 | +#ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT |
---|
8056 | 9148 | static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) |
---|
8057 | 9149 | { |
---|
8058 | 9150 | struct vfsmount *mnt; |
---|
.. | .. |
---|
8074 | 9166 | |
---|
8075 | 9167 | return mnt; |
---|
8076 | 9168 | } |
---|
| 9169 | +#endif |
---|
8077 | 9170 | |
---|
8078 | 9171 | /** |
---|
8079 | 9172 | * tracing_init_dentry - initialize top level trace array |
---|
.. | .. |
---|
8082 | 9175 | * directory. It is called via fs_initcall() by any of the boot up code |
---|
8083 | 9176 | * and expects to return the dentry of the top level tracing directory. |
---|
8084 | 9177 | */ |
---|
8085 | | -struct dentry *tracing_init_dentry(void) |
---|
| 9178 | +int tracing_init_dentry(void) |
---|
8086 | 9179 | { |
---|
8087 | 9180 | struct trace_array *tr = &global_trace; |
---|
8088 | 9181 | |
---|
| 9182 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
---|
| 9183 | + pr_warn("Tracing disabled due to lockdown\n"); |
---|
| 9184 | + return -EPERM; |
---|
| 9185 | + } |
---|
| 9186 | + |
---|
8089 | 9187 | /* The top level trace array uses NULL as parent */ |
---|
8090 | 9188 | if (tr->dir) |
---|
8091 | | - return NULL; |
---|
| 9189 | + return 0; |
---|
8092 | 9190 | |
---|
8093 | | - if (WARN_ON(!tracefs_initialized()) || |
---|
8094 | | - (IS_ENABLED(CONFIG_DEBUG_FS) && |
---|
8095 | | - WARN_ON(!debugfs_initialized()))) |
---|
8096 | | - return ERR_PTR(-ENODEV); |
---|
| 9191 | + if (WARN_ON(!tracefs_initialized())) |
---|
| 9192 | + return -ENODEV; |
---|
8097 | 9193 | |
---|
| 9194 | +#ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT |
---|
8098 | 9195 | /* |
---|
8099 | 9196 | * As there may still be users that expect the tracing |
---|
8100 | 9197 | * files to exist in debugfs/tracing, we must automount |
---|
.. | .. |
---|
8103 | 9200 | */ |
---|
8104 | 9201 | tr->dir = debugfs_create_automount("tracing", NULL, |
---|
8105 | 9202 | trace_automount, NULL); |
---|
8106 | | - if (!tr->dir) { |
---|
8107 | | - pr_warn_once("Could not create debugfs directory 'tracing'\n"); |
---|
8108 | | - return ERR_PTR(-ENOMEM); |
---|
8109 | | - } |
---|
| 9203 | +#else |
---|
| 9204 | + tr->dir = ERR_PTR(-ENODEV); |
---|
| 9205 | +#endif |
---|
8110 | 9206 | |
---|
8111 | | - return NULL; |
---|
| 9207 | + return 0; |
---|
8112 | 9208 | } |
---|
8113 | 9209 | |
---|
8114 | 9210 | extern struct trace_eval_map *__start_ftrace_eval_maps[]; |
---|
.. | .. |
---|
8184 | 9280 | break; |
---|
8185 | 9281 | } |
---|
8186 | 9282 | |
---|
8187 | | - return 0; |
---|
| 9283 | + return NOTIFY_OK; |
---|
8188 | 9284 | } |
---|
8189 | 9285 | |
---|
8190 | 9286 | static struct notifier_block trace_module_nb = { |
---|
.. | .. |
---|
8195 | 9291 | |
---|
8196 | 9292 | static __init int tracer_init_tracefs(void) |
---|
8197 | 9293 | { |
---|
8198 | | - struct dentry *d_tracer; |
---|
| 9294 | + int ret; |
---|
8199 | 9295 | |
---|
8200 | 9296 | trace_access_lock_init(); |
---|
8201 | 9297 | |
---|
8202 | | - d_tracer = tracing_init_dentry(); |
---|
8203 | | - if (IS_ERR(d_tracer)) |
---|
| 9298 | + ret = tracing_init_dentry(); |
---|
| 9299 | + if (ret) |
---|
8204 | 9300 | return 0; |
---|
8205 | 9301 | |
---|
8206 | 9302 | event_trace_init(); |
---|
8207 | 9303 | |
---|
8208 | | - init_tracer_tracefs(&global_trace, d_tracer); |
---|
8209 | | - ftrace_init_tracefs_toplevel(&global_trace, d_tracer); |
---|
| 9304 | + init_tracer_tracefs(&global_trace, NULL); |
---|
| 9305 | + ftrace_init_tracefs_toplevel(&global_trace, NULL); |
---|
8210 | 9306 | |
---|
8211 | | - trace_create_file("tracing_thresh", 0644, d_tracer, |
---|
| 9307 | + trace_create_file("tracing_thresh", 0644, NULL, |
---|
8212 | 9308 | &global_trace, &tracing_thresh_fops); |
---|
8213 | 9309 | |
---|
8214 | | - trace_create_file("README", 0444, d_tracer, |
---|
| 9310 | + trace_create_file("README", 0444, NULL, |
---|
8215 | 9311 | NULL, &tracing_readme_fops); |
---|
8216 | 9312 | |
---|
8217 | | - trace_create_file("saved_cmdlines", 0444, d_tracer, |
---|
| 9313 | + trace_create_file("saved_cmdlines", 0444, NULL, |
---|
8218 | 9314 | NULL, &tracing_saved_cmdlines_fops); |
---|
8219 | 9315 | |
---|
8220 | | - trace_create_file("saved_cmdlines_size", 0644, d_tracer, |
---|
| 9316 | + trace_create_file("saved_cmdlines_size", 0644, NULL, |
---|
8221 | 9317 | NULL, &tracing_saved_cmdlines_size_fops); |
---|
8222 | 9318 | |
---|
8223 | | - trace_create_file("saved_tgids", 0444, d_tracer, |
---|
| 9319 | + trace_create_file("saved_tgids", 0444, NULL, |
---|
8224 | 9320 | NULL, &tracing_saved_tgids_fops); |
---|
8225 | 9321 | |
---|
8226 | 9322 | trace_eval_init(); |
---|
8227 | 9323 | |
---|
8228 | | - trace_create_eval_file(d_tracer); |
---|
| 9324 | + trace_create_eval_file(NULL); |
---|
8229 | 9325 | |
---|
8230 | 9326 | #ifdef CONFIG_MODULES |
---|
8231 | 9327 | register_module_notifier(&trace_module_nb); |
---|
8232 | 9328 | #endif |
---|
8233 | 9329 | |
---|
8234 | 9330 | #ifdef CONFIG_DYNAMIC_FTRACE |
---|
8235 | | - trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
---|
8236 | | - &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
---|
| 9331 | + trace_create_file("dyn_ftrace_total_info", 0444, NULL, |
---|
| 9332 | + NULL, &tracing_dyn_info_fops); |
---|
8237 | 9333 | #endif |
---|
8238 | 9334 | |
---|
8239 | | - create_trace_instances(d_tracer); |
---|
| 9335 | + create_trace_instances(NULL); |
---|
8240 | 9336 | |
---|
8241 | 9337 | update_tracer_options(&global_trace); |
---|
8242 | 9338 | |
---|
.. | .. |
---|
8246 | 9342 | static int trace_panic_handler(struct notifier_block *this, |
---|
8247 | 9343 | unsigned long event, void *unused) |
---|
8248 | 9344 | { |
---|
| 9345 | + bool ftrace_check = false; |
---|
| 9346 | + |
---|
| 9347 | + trace_android_vh_ftrace_oops_enter(&ftrace_check); |
---|
| 9348 | + |
---|
| 9349 | + if (ftrace_check) |
---|
| 9350 | + return NOTIFY_OK; |
---|
| 9351 | + |
---|
8249 | 9352 | if (ftrace_dump_on_oops) |
---|
8250 | 9353 | ftrace_dump(ftrace_dump_on_oops); |
---|
| 9354 | + |
---|
| 9355 | + trace_android_vh_ftrace_oops_exit(&ftrace_check); |
---|
8251 | 9356 | return NOTIFY_OK; |
---|
8252 | 9357 | } |
---|
8253 | 9358 | |
---|
.. | .. |
---|
8261 | 9366 | unsigned long val, |
---|
8262 | 9367 | void *data) |
---|
8263 | 9368 | { |
---|
| 9369 | + bool ftrace_check = false; |
---|
| 9370 | + |
---|
| 9371 | + trace_android_vh_ftrace_oops_enter(&ftrace_check); |
---|
| 9372 | + |
---|
| 9373 | + if (ftrace_check) |
---|
| 9374 | + return NOTIFY_OK; |
---|
| 9375 | + |
---|
8264 | 9376 | switch (val) { |
---|
8265 | 9377 | case DIE_OOPS: |
---|
8266 | 9378 | if (ftrace_dump_on_oops) |
---|
.. | .. |
---|
8269 | 9381 | default: |
---|
8270 | 9382 | break; |
---|
8271 | 9383 | } |
---|
| 9384 | + |
---|
| 9385 | + trace_android_vh_ftrace_oops_exit(&ftrace_check); |
---|
8272 | 9386 | return NOTIFY_OK; |
---|
8273 | 9387 | } |
---|
8274 | 9388 | |
---|
.. | .. |
---|
8293 | 9407 | void |
---|
8294 | 9408 | trace_printk_seq(struct trace_seq *s) |
---|
8295 | 9409 | { |
---|
| 9410 | + bool dump_printk = true; |
---|
| 9411 | + |
---|
8296 | 9412 | /* Probably should print a warning here. */ |
---|
8297 | 9413 | if (s->seq.len >= TRACE_MAX_PRINT) |
---|
8298 | 9414 | s->seq.len = TRACE_MAX_PRINT; |
---|
.. | .. |
---|
8308 | 9424 | /* should be zero ended, but we are paranoid. */ |
---|
8309 | 9425 | s->buffer[s->seq.len] = 0; |
---|
8310 | 9426 | |
---|
8311 | | - printk(KERN_TRACE "%s", s->buffer); |
---|
| 9427 | + trace_android_vh_ftrace_dump_buffer(s, &dump_printk); |
---|
| 9428 | + if (dump_printk) |
---|
| 9429 | + printk(KERN_TRACE "%s", s->buffer); |
---|
8312 | 9430 | |
---|
8313 | 9431 | trace_seq_init(s); |
---|
8314 | 9432 | } |
---|
.. | .. |
---|
8318 | 9436 | iter->tr = &global_trace; |
---|
8319 | 9437 | iter->trace = iter->tr->current_trace; |
---|
8320 | 9438 | iter->cpu_file = RING_BUFFER_ALL_CPUS; |
---|
8321 | | - iter->trace_buffer = &global_trace.trace_buffer; |
---|
| 9439 | + iter->array_buffer = &global_trace.array_buffer; |
---|
8322 | 9440 | |
---|
8323 | 9441 | if (iter->trace && iter->trace->open) |
---|
8324 | 9442 | iter->trace->open(iter); |
---|
8325 | 9443 | |
---|
8326 | 9444 | /* Annotate start of buffers if we had overruns */ |
---|
8327 | | - if (ring_buffer_overruns(iter->trace_buffer->buffer)) |
---|
| 9445 | + if (ring_buffer_overruns(iter->array_buffer->buffer)) |
---|
8328 | 9446 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
---|
8329 | 9447 | |
---|
8330 | 9448 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
---|
.. | .. |
---|
8341 | 9459 | unsigned int old_userobj; |
---|
8342 | 9460 | unsigned long flags; |
---|
8343 | 9461 | int cnt = 0, cpu; |
---|
| 9462 | + bool ftrace_check = false; |
---|
| 9463 | + unsigned long size; |
---|
8344 | 9464 | |
---|
8345 | 9465 | /* Only allow one dump user at a time. */ |
---|
8346 | 9466 | if (atomic_inc_return(&dump_running) != 1) { |
---|
.. | .. |
---|
8363 | 9483 | |
---|
8364 | 9484 | /* Simulate the iterator */ |
---|
8365 | 9485 | trace_init_global_iter(&iter); |
---|
| 9486 | + /* Can not use kmalloc for iter.temp */ |
---|
| 9487 | + iter.temp = static_temp_buf; |
---|
| 9488 | + iter.temp_size = STATIC_TEMP_BUF_SIZE; |
---|
8366 | 9489 | |
---|
8367 | 9490 | for_each_tracing_cpu(cpu) { |
---|
8368 | | - atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
---|
| 9491 | + atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); |
---|
| 9492 | + size = ring_buffer_size(iter.array_buffer->buffer, cpu); |
---|
| 9493 | + trace_android_vh_ftrace_size_check(size, &ftrace_check); |
---|
8369 | 9494 | } |
---|
8370 | 9495 | |
---|
8371 | 9496 | old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; |
---|
8372 | 9497 | |
---|
8373 | 9498 | /* don't look at user memory in panic mode */ |
---|
8374 | 9499 | tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
---|
| 9500 | + |
---|
| 9501 | + if (ftrace_check) |
---|
| 9502 | + goto out_enable; |
---|
8375 | 9503 | |
---|
8376 | 9504 | switch (oops_dump_mode) { |
---|
8377 | 9505 | case DUMP_ALL: |
---|
.. | .. |
---|
8396 | 9524 | } |
---|
8397 | 9525 | |
---|
8398 | 9526 | /* |
---|
8399 | | - * We need to stop all tracing on all CPUS to read the |
---|
| 9527 | + * We need to stop all tracing on all CPUS to read |
---|
8400 | 9528 | * the next buffer. This is a bit expensive, but is |
---|
8401 | 9529 | * not done often. We fill all what we can read, |
---|
8402 | 9530 | * and then release the locks again. |
---|
8403 | 9531 | */ |
---|
8404 | 9532 | |
---|
8405 | 9533 | while (!trace_empty(&iter)) { |
---|
| 9534 | + ftrace_check = true; |
---|
8406 | 9535 | |
---|
8407 | 9536 | if (!cnt) |
---|
8408 | 9537 | printk(KERN_TRACE "---------------------------------\n"); |
---|
.. | .. |
---|
8410 | 9539 | cnt++; |
---|
8411 | 9540 | |
---|
8412 | 9541 | trace_iterator_reset(&iter); |
---|
8413 | | - iter.iter_flags |= TRACE_FILE_LAT_FMT; |
---|
| 9542 | + trace_android_vh_ftrace_format_check(&ftrace_check); |
---|
| 9543 | + if (ftrace_check) |
---|
| 9544 | + iter.iter_flags |= TRACE_FILE_LAT_FMT; |
---|
8414 | 9545 | |
---|
8415 | 9546 | if (trace_find_next_entry_inc(&iter) != NULL) { |
---|
8416 | 9547 | int ret; |
---|
.. | .. |
---|
8433 | 9564 | tr->trace_flags |= old_userobj; |
---|
8434 | 9565 | |
---|
8435 | 9566 | for_each_tracing_cpu(cpu) { |
---|
8436 | | - atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
---|
| 9567 | + atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); |
---|
8437 | 9568 | } |
---|
8438 | 9569 | atomic_dec(&dump_running); |
---|
8439 | 9570 | printk_nmi_direct_exit(); |
---|
.. | .. |
---|
8532 | 9663 | int ring_buf_size; |
---|
8533 | 9664 | int ret = -ENOMEM; |
---|
8534 | 9665 | |
---|
| 9666 | + |
---|
| 9667 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
---|
| 9668 | + pr_warn("Tracing disabled due to lockdown\n"); |
---|
| 9669 | + return -EPERM; |
---|
| 9670 | + } |
---|
| 9671 | + |
---|
8535 | 9672 | /* |
---|
8536 | | - * Make sure we don't accidently add more trace options |
---|
| 9673 | + * Make sure we don't accidentally add more trace options |
---|
8537 | 9674 | * than we have bits for. |
---|
8538 | 9675 | */ |
---|
8539 | 9676 | BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); |
---|
.. | .. |
---|
8562 | 9699 | |
---|
8563 | 9700 | /* |
---|
8564 | 9701 | * The prepare callbacks allocates some memory for the ring buffer. We |
---|
8565 | | - * don't free the buffer if the if the CPU goes down. If we were to free |
---|
| 9702 | + * don't free the buffer if the CPU goes down. If we were to free |
---|
8566 | 9703 | * the buffer, then the user would lose any trace that was in the |
---|
8567 | 9704 | * buffer. The memory will be removed once the "instance" is removed. |
---|
8568 | 9705 | */ |
---|
.. | .. |
---|
8582 | 9719 | |
---|
8583 | 9720 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
---|
8584 | 9721 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
---|
8585 | | - printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
---|
8586 | | - WARN_ON(1); |
---|
| 9722 | + MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); |
---|
8587 | 9723 | goto out_free_savedcmd; |
---|
8588 | 9724 | } |
---|
8589 | 9725 | |
---|
.. | .. |
---|
8628 | 9764 | INIT_LIST_HEAD(&global_trace.systems); |
---|
8629 | 9765 | INIT_LIST_HEAD(&global_trace.events); |
---|
8630 | 9766 | INIT_LIST_HEAD(&global_trace.hist_vars); |
---|
| 9767 | + INIT_LIST_HEAD(&global_trace.err_log); |
---|
8631 | 9768 | list_add(&global_trace.list, &ftrace_trace_arrays); |
---|
8632 | 9769 | |
---|
8633 | 9770 | apply_trace_boot_options(); |
---|
.. | .. |
---|
8655 | 9792 | if (tracepoint_printk) { |
---|
8656 | 9793 | tracepoint_print_iter = |
---|
8657 | 9794 | kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); |
---|
8658 | | - if (WARN_ON(!tracepoint_print_iter)) |
---|
| 9795 | + if (MEM_FAIL(!tracepoint_print_iter, |
---|
| 9796 | + "Failed to allocate trace iterator\n")) |
---|
8659 | 9797 | tracepoint_printk = 0; |
---|
8660 | 9798 | else |
---|
8661 | 9799 | static_key_enable(&tracepoint_printk_key.key); |
---|
8662 | 9800 | } |
---|
8663 | 9801 | tracer_alloc_buffers(); |
---|
| 9802 | + |
---|
| 9803 | + init_events(); |
---|
8664 | 9804 | } |
---|
8665 | 9805 | |
---|
8666 | 9806 | void __init trace_init(void) |
---|
.. | .. |
---|
8695 | 9835 | { |
---|
8696 | 9836 | /* sched_clock_stable() is determined in late_initcall */ |
---|
8697 | 9837 | if (!trace_boot_clock && !sched_clock_stable()) { |
---|
| 9838 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
---|
| 9839 | + pr_warn("Can not set tracing clock due to lockdown\n"); |
---|
| 9840 | + return -EPERM; |
---|
| 9841 | + } |
---|
| 9842 | + |
---|
8698 | 9843 | printk(KERN_WARNING |
---|
8699 | 9844 | "Unstable clock detected, switching default tracing clock to \"global\"\n" |
---|
8700 | 9845 | "If you want to keep using the local clock, then add:\n" |
---|