| .. | .. |
|---|
| 17 | 17 | #include <linux/stacktrace.h> |
|---|
| 18 | 18 | #include <linux/writeback.h> |
|---|
| 19 | 19 | #include <linux/kallsyms.h> |
|---|
| 20 | +#include <linux/security.h> |
|---|
| 20 | 21 | #include <linux/seq_file.h> |
|---|
| 21 | 22 | #include <linux/notifier.h> |
|---|
| 22 | 23 | #include <linux/irqflags.h> |
|---|
| .. | .. |
|---|
| 44 | 45 | #include <linux/trace.h> |
|---|
| 45 | 46 | #include <linux/sched/clock.h> |
|---|
| 46 | 47 | #include <linux/sched/rt.h> |
|---|
| 48 | +#include <linux/fsnotify.h> |
|---|
| 49 | +#include <linux/irq_work.h> |
|---|
| 50 | +#include <linux/workqueue.h> |
|---|
| 51 | +#include <trace/hooks/ftrace_dump.h> |
|---|
| 47 | 52 | |
|---|
| 48 | 53 | #include "trace.h" |
|---|
| 49 | 54 | #include "trace_output.h" |
|---|
| .. | .. |
|---|
| 64 | 69 | static bool __read_mostly tracing_selftest_running; |
|---|
| 65 | 70 | |
|---|
| 66 | 71 | /* |
|---|
| 67 | | - * If a tracer is running, we do not want to run SELFTEST. |
|---|
| 72 | + * If boot-time tracing including tracers/events via kernel cmdline |
|---|
| 73 | + * is running, we do not want to run SELFTEST. |
|---|
| 68 | 74 | */ |
|---|
| 69 | 75 | bool __read_mostly tracing_selftest_disabled; |
|---|
| 76 | + |
|---|
| 77 | +#ifdef CONFIG_FTRACE_STARTUP_TEST |
|---|
| 78 | +void __init disable_tracing_selftest(const char *reason) |
|---|
| 79 | +{ |
|---|
| 80 | + if (!tracing_selftest_disabled) { |
|---|
| 81 | + tracing_selftest_disabled = true; |
|---|
| 82 | + pr_info("Ftrace startup test is disabled due to %s\n", reason); |
|---|
| 83 | + } |
|---|
| 84 | +} |
|---|
| 85 | +#endif |
|---|
| 70 | 86 | |
|---|
| 71 | 87 | /* Pipe tracepoints to printk */ |
|---|
| 72 | 88 | struct trace_iterator *tracepoint_print_iter; |
|---|
| .. | .. |
|---|
| 158 | 174 | static union trace_eval_map_item *trace_eval_maps; |
|---|
| 159 | 175 | #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ |
|---|
| 160 | 176 | |
|---|
| 161 | | -static int tracing_set_tracer(struct trace_array *tr, const char *buf); |
|---|
| 177 | +int tracing_set_tracer(struct trace_array *tr, const char *buf); |
|---|
| 178 | +static void ftrace_trace_userstack(struct trace_array *tr, |
|---|
| 179 | + struct trace_buffer *buffer, |
|---|
| 180 | + unsigned long flags, int pc); |
|---|
| 162 | 181 | |
|---|
| 163 | 182 | #define MAX_TRACER_SIZE 100 |
|---|
| 164 | 183 | static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; |
|---|
| .. | .. |
|---|
| 215 | 234 | static int __init set_trace_boot_options(char *str) |
|---|
| 216 | 235 | { |
|---|
| 217 | 236 | strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); |
|---|
| 218 | | - return 0; |
|---|
| 237 | + return 1; |
|---|
| 219 | 238 | } |
|---|
| 220 | 239 | __setup("trace_options=", set_trace_boot_options); |
|---|
| 221 | 240 | |
|---|
| .. | .. |
|---|
| 226 | 245 | { |
|---|
| 227 | 246 | strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); |
|---|
| 228 | 247 | trace_boot_clock = trace_boot_clock_buf; |
|---|
| 229 | | - return 0; |
|---|
| 248 | + return 1; |
|---|
| 230 | 249 | } |
|---|
| 231 | 250 | __setup("trace_clock=", set_trace_boot_clock); |
|---|
| 232 | 251 | |
|---|
| .. | .. |
|---|
| 248 | 267 | do_div(nsec, 1000); |
|---|
| 249 | 268 | return nsec; |
|---|
| 250 | 269 | } |
|---|
| 270 | + |
|---|
| 271 | +static void |
|---|
| 272 | +trace_process_export(struct trace_export *export, |
|---|
| 273 | + struct ring_buffer_event *event, int flag) |
|---|
| 274 | +{ |
|---|
| 275 | + struct trace_entry *entry; |
|---|
| 276 | + unsigned int size = 0; |
|---|
| 277 | + |
|---|
| 278 | + if (export->flags & flag) { |
|---|
| 279 | + entry = ring_buffer_event_data(event); |
|---|
| 280 | + size = ring_buffer_event_length(event); |
|---|
| 281 | + export->write(export, entry, size); |
|---|
| 282 | + } |
|---|
| 283 | +} |
|---|
| 284 | + |
|---|
| 285 | +static DEFINE_MUTEX(ftrace_export_lock); |
|---|
| 286 | + |
|---|
| 287 | +static struct trace_export __rcu *ftrace_exports_list __read_mostly; |
|---|
| 288 | + |
|---|
| 289 | +static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); |
|---|
| 290 | +static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); |
|---|
| 291 | +static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); |
|---|
| 292 | + |
|---|
| 293 | +static inline void ftrace_exports_enable(struct trace_export *export) |
|---|
| 294 | +{ |
|---|
| 295 | + if (export->flags & TRACE_EXPORT_FUNCTION) |
|---|
| 296 | + static_branch_inc(&trace_function_exports_enabled); |
|---|
| 297 | + |
|---|
| 298 | + if (export->flags & TRACE_EXPORT_EVENT) |
|---|
| 299 | + static_branch_inc(&trace_event_exports_enabled); |
|---|
| 300 | + |
|---|
| 301 | + if (export->flags & TRACE_EXPORT_MARKER) |
|---|
| 302 | + static_branch_inc(&trace_marker_exports_enabled); |
|---|
| 303 | +} |
|---|
| 304 | + |
|---|
| 305 | +static inline void ftrace_exports_disable(struct trace_export *export) |
|---|
| 306 | +{ |
|---|
| 307 | + if (export->flags & TRACE_EXPORT_FUNCTION) |
|---|
| 308 | + static_branch_dec(&trace_function_exports_enabled); |
|---|
| 309 | + |
|---|
| 310 | + if (export->flags & TRACE_EXPORT_EVENT) |
|---|
| 311 | + static_branch_dec(&trace_event_exports_enabled); |
|---|
| 312 | + |
|---|
| 313 | + if (export->flags & TRACE_EXPORT_MARKER) |
|---|
| 314 | + static_branch_dec(&trace_marker_exports_enabled); |
|---|
| 315 | +} |
|---|
| 316 | + |
|---|
| 317 | +static void ftrace_exports(struct ring_buffer_event *event, int flag) |
|---|
| 318 | +{ |
|---|
| 319 | + struct trace_export *export; |
|---|
| 320 | + |
|---|
| 321 | + preempt_disable_notrace(); |
|---|
| 322 | + |
|---|
| 323 | + export = rcu_dereference_raw_check(ftrace_exports_list); |
|---|
| 324 | + while (export) { |
|---|
| 325 | + trace_process_export(export, event, flag); |
|---|
| 326 | + export = rcu_dereference_raw_check(export->next); |
|---|
| 327 | + } |
|---|
| 328 | + |
|---|
| 329 | + preempt_enable_notrace(); |
|---|
| 330 | +} |
|---|
| 331 | + |
|---|
| 332 | +static inline void |
|---|
| 333 | +add_trace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 334 | +{ |
|---|
| 335 | + rcu_assign_pointer(export->next, *list); |
|---|
| 336 | + /* |
|---|
| 337 | + * We are entering export into the list but another |
|---|
| 338 | + * CPU might be walking that list. We need to make sure |
|---|
| 339 | + * the export->next pointer is valid before another CPU sees |
|---|
| 340 | + * the export pointer included into the list. |
|---|
| 341 | + */ |
|---|
| 342 | + rcu_assign_pointer(*list, export); |
|---|
| 343 | +} |
|---|
| 344 | + |
|---|
| 345 | +static inline int |
|---|
| 346 | +rm_trace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 347 | +{ |
|---|
| 348 | + struct trace_export **p; |
|---|
| 349 | + |
|---|
| 350 | + for (p = list; *p != NULL; p = &(*p)->next) |
|---|
| 351 | + if (*p == export) |
|---|
| 352 | + break; |
|---|
| 353 | + |
|---|
| 354 | + if (*p != export) |
|---|
| 355 | + return -1; |
|---|
| 356 | + |
|---|
| 357 | + rcu_assign_pointer(*p, (*p)->next); |
|---|
| 358 | + |
|---|
| 359 | + return 0; |
|---|
| 360 | +} |
|---|
| 361 | + |
|---|
| 362 | +static inline void |
|---|
| 363 | +add_ftrace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 364 | +{ |
|---|
| 365 | + ftrace_exports_enable(export); |
|---|
| 366 | + |
|---|
| 367 | + add_trace_export(list, export); |
|---|
| 368 | +} |
|---|
| 369 | + |
|---|
| 370 | +static inline int |
|---|
| 371 | +rm_ftrace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 372 | +{ |
|---|
| 373 | + int ret; |
|---|
| 374 | + |
|---|
| 375 | + ret = rm_trace_export(list, export); |
|---|
| 376 | + ftrace_exports_disable(export); |
|---|
| 377 | + |
|---|
| 378 | + return ret; |
|---|
| 379 | +} |
|---|
| 380 | + |
|---|
| 381 | +int register_ftrace_export(struct trace_export *export) |
|---|
| 382 | +{ |
|---|
| 383 | + if (WARN_ON_ONCE(!export->write)) |
|---|
| 384 | + return -1; |
|---|
| 385 | + |
|---|
| 386 | + mutex_lock(&ftrace_export_lock); |
|---|
| 387 | + |
|---|
| 388 | + add_ftrace_export(&ftrace_exports_list, export); |
|---|
| 389 | + |
|---|
| 390 | + mutex_unlock(&ftrace_export_lock); |
|---|
| 391 | + |
|---|
| 392 | + return 0; |
|---|
| 393 | +} |
|---|
| 394 | +EXPORT_SYMBOL_GPL(register_ftrace_export); |
|---|
| 395 | + |
|---|
| 396 | +int unregister_ftrace_export(struct trace_export *export) |
|---|
| 397 | +{ |
|---|
| 398 | + int ret; |
|---|
| 399 | + |
|---|
| 400 | + mutex_lock(&ftrace_export_lock); |
|---|
| 401 | + |
|---|
| 402 | + ret = rm_ftrace_export(&ftrace_exports_list, export); |
|---|
| 403 | + |
|---|
| 404 | + mutex_unlock(&ftrace_export_lock); |
|---|
| 405 | + |
|---|
| 406 | + return ret; |
|---|
| 407 | +} |
|---|
| 408 | +EXPORT_SYMBOL_GPL(unregister_ftrace_export); |
|---|
| 251 | 409 | |
|---|
| 252 | 410 | /* trace_flags holds trace_options default values */ |
|---|
| 253 | 411 | #define TRACE_DEFAULT_FLAGS \ |
|---|
| .. | .. |
|---|
| 299 | 457 | this_tr->ref--; |
|---|
| 300 | 458 | } |
|---|
| 301 | 459 | |
|---|
| 460 | +/** |
|---|
| 461 | + * trace_array_put - Decrement the reference counter for this trace array. |
|---|
| 462 | + * |
|---|
| 463 | + * NOTE: Use this when we no longer need the trace array returned by |
|---|
| 464 | + * trace_array_get_by_name(). This ensures the trace array can be later |
|---|
| 465 | + * destroyed. |
|---|
| 466 | + * |
|---|
| 467 | + */ |
|---|
| 302 | 468 | void trace_array_put(struct trace_array *this_tr) |
|---|
| 303 | 469 | { |
|---|
| 470 | + if (!this_tr) |
|---|
| 471 | + return; |
|---|
| 472 | + |
|---|
| 304 | 473 | mutex_lock(&trace_types_lock); |
|---|
| 305 | 474 | __trace_array_put(this_tr); |
|---|
| 306 | 475 | mutex_unlock(&trace_types_lock); |
|---|
| 307 | 476 | } |
|---|
| 477 | +EXPORT_SYMBOL_GPL(trace_array_put); |
|---|
| 478 | + |
|---|
| 479 | +int tracing_check_open_get_tr(struct trace_array *tr) |
|---|
| 480 | +{ |
|---|
| 481 | + int ret; |
|---|
| 482 | + |
|---|
| 483 | + ret = security_locked_down(LOCKDOWN_TRACEFS); |
|---|
| 484 | + if (ret) |
|---|
| 485 | + return ret; |
|---|
| 486 | + |
|---|
| 487 | + if (tracing_disabled) |
|---|
| 488 | + return -ENODEV; |
|---|
| 489 | + |
|---|
| 490 | + if (tr && trace_array_get(tr) < 0) |
|---|
| 491 | + return -ENODEV; |
|---|
| 492 | + |
|---|
| 493 | + return 0; |
|---|
| 494 | +} |
|---|
| 308 | 495 | |
|---|
| 309 | 496 | int call_filter_check_discard(struct trace_event_call *call, void *rec, |
|---|
| 310 | | - struct ring_buffer *buffer, |
|---|
| 497 | + struct trace_buffer *buffer, |
|---|
| 311 | 498 | struct ring_buffer_event *event) |
|---|
| 312 | 499 | { |
|---|
| 313 | 500 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && |
|---|
| .. | .. |
|---|
| 355 | 542 | * Returns false if @task should be traced. |
|---|
| 356 | 543 | */ |
|---|
| 357 | 544 | bool |
|---|
| 358 | | -trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) |
|---|
| 545 | +trace_ignore_this_task(struct trace_pid_list *filtered_pids, |
|---|
| 546 | + struct trace_pid_list *filtered_no_pids, |
|---|
| 547 | + struct task_struct *task) |
|---|
| 359 | 548 | { |
|---|
| 360 | 549 | /* |
|---|
| 361 | | - * Return false, because if filtered_pids does not exist, |
|---|
| 362 | | - * all pids are good to trace. |
|---|
| 550 | + * If filterd_no_pids is not empty, and the task's pid is listed |
|---|
| 551 | + * in filtered_no_pids, then return true. |
|---|
| 552 | + * Otherwise, if filtered_pids is empty, that means we can |
|---|
| 553 | + * trace all tasks. If it has content, then only trace pids |
|---|
| 554 | + * within filtered_pids. |
|---|
| 363 | 555 | */ |
|---|
| 364 | | - if (!filtered_pids) |
|---|
| 365 | | - return false; |
|---|
| 366 | 556 | |
|---|
| 367 | | - return !trace_find_filtered_pid(filtered_pids, task->pid); |
|---|
| 557 | + return (filtered_pids && |
|---|
| 558 | + !trace_find_filtered_pid(filtered_pids, task->pid)) || |
|---|
| 559 | + (filtered_no_pids && |
|---|
| 560 | + trace_find_filtered_pid(filtered_no_pids, task->pid)); |
|---|
| 368 | 561 | } |
|---|
| 369 | 562 | |
|---|
| 370 | 563 | /** |
|---|
| 371 | | - * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list |
|---|
| 564 | + * trace_filter_add_remove_task - Add or remove a task from a pid_list |
|---|
| 372 | 565 | * @pid_list: The list to modify |
|---|
| 373 | 566 | * @self: The current task for fork or NULL for exit |
|---|
| 374 | 567 | * @task: The task to add or remove |
|---|
| .. | .. |
|---|
| 572 | 765 | return read; |
|---|
| 573 | 766 | } |
|---|
| 574 | 767 | |
|---|
| 575 | | -static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
|---|
| 768 | +static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) |
|---|
| 576 | 769 | { |
|---|
| 577 | 770 | u64 ts; |
|---|
| 578 | 771 | |
|---|
| .. | .. |
|---|
| 588 | 781 | |
|---|
| 589 | 782 | u64 ftrace_now(int cpu) |
|---|
| 590 | 783 | { |
|---|
| 591 | | - return buffer_ftrace_now(&global_trace.trace_buffer, cpu); |
|---|
| 784 | + return buffer_ftrace_now(&global_trace.array_buffer, cpu); |
|---|
| 592 | 785 | } |
|---|
| 593 | 786 | |
|---|
| 594 | 787 | /** |
|---|
| .. | .. |
|---|
| 716 | 909 | #endif |
|---|
| 717 | 910 | |
|---|
| 718 | 911 | #ifdef CONFIG_STACKTRACE |
|---|
| 719 | | -static void __ftrace_trace_stack(struct ring_buffer *buffer, |
|---|
| 912 | +static void __ftrace_trace_stack(struct trace_buffer *buffer, |
|---|
| 720 | 913 | unsigned long flags, |
|---|
| 721 | 914 | int skip, int pc, struct pt_regs *regs); |
|---|
| 722 | 915 | static inline void ftrace_trace_stack(struct trace_array *tr, |
|---|
| 723 | | - struct ring_buffer *buffer, |
|---|
| 916 | + struct trace_buffer *buffer, |
|---|
| 724 | 917 | unsigned long flags, |
|---|
| 725 | 918 | int skip, int pc, struct pt_regs *regs); |
|---|
| 726 | 919 | |
|---|
| 727 | 920 | #else |
|---|
| 728 | | -static inline void __ftrace_trace_stack(struct ring_buffer *buffer, |
|---|
| 921 | +static inline void __ftrace_trace_stack(struct trace_buffer *buffer, |
|---|
| 729 | 922 | unsigned long flags, |
|---|
| 730 | 923 | int skip, int pc, struct pt_regs *regs) |
|---|
| 731 | 924 | { |
|---|
| 732 | 925 | } |
|---|
| 733 | 926 | static inline void ftrace_trace_stack(struct trace_array *tr, |
|---|
| 734 | | - struct ring_buffer *buffer, |
|---|
| 927 | + struct trace_buffer *buffer, |
|---|
| 735 | 928 | unsigned long flags, |
|---|
| 736 | 929 | int skip, int pc, struct pt_regs *regs) |
|---|
| 737 | 930 | { |
|---|
| .. | .. |
|---|
| 745 | 938 | { |
|---|
| 746 | 939 | struct trace_entry *ent = ring_buffer_event_data(event); |
|---|
| 747 | 940 | |
|---|
| 748 | | - tracing_generic_entry_update(ent, flags, pc); |
|---|
| 749 | | - ent->type = type; |
|---|
| 941 | + tracing_generic_entry_update(ent, type, flags, pc); |
|---|
| 750 | 942 | } |
|---|
| 751 | 943 | |
|---|
| 752 | 944 | static __always_inline struct ring_buffer_event * |
|---|
| 753 | | -__trace_buffer_lock_reserve(struct ring_buffer *buffer, |
|---|
| 945 | +__trace_buffer_lock_reserve(struct trace_buffer *buffer, |
|---|
| 754 | 946 | int type, |
|---|
| 755 | 947 | unsigned long len, |
|---|
| 756 | 948 | unsigned long flags, int pc) |
|---|
| .. | .. |
|---|
| 766 | 958 | |
|---|
| 767 | 959 | void tracer_tracing_on(struct trace_array *tr) |
|---|
| 768 | 960 | { |
|---|
| 769 | | - if (tr->trace_buffer.buffer) |
|---|
| 770 | | - ring_buffer_record_on(tr->trace_buffer.buffer); |
|---|
| 961 | + if (tr->array_buffer.buffer) |
|---|
| 962 | + ring_buffer_record_on(tr->array_buffer.buffer); |
|---|
| 771 | 963 | /* |
|---|
| 772 | 964 | * This flag is looked at when buffers haven't been allocated |
|---|
| 773 | 965 | * yet, or by some tracers (like irqsoff), that just want to |
|---|
| .. | .. |
|---|
| 795 | 987 | |
|---|
| 796 | 988 | |
|---|
| 797 | 989 | static __always_inline void |
|---|
| 798 | | -__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) |
|---|
| 990 | +__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) |
|---|
| 799 | 991 | { |
|---|
| 800 | 992 | __this_cpu_write(trace_taskinfo_save, true); |
|---|
| 801 | 993 | |
|---|
| .. | .. |
|---|
| 818 | 1010 | int __trace_puts(unsigned long ip, const char *str, int size) |
|---|
| 819 | 1011 | { |
|---|
| 820 | 1012 | struct ring_buffer_event *event; |
|---|
| 821 | | - struct ring_buffer *buffer; |
|---|
| 1013 | + struct trace_buffer *buffer; |
|---|
| 822 | 1014 | struct print_entry *entry; |
|---|
| 823 | 1015 | unsigned long irq_flags; |
|---|
| 824 | 1016 | int alloc; |
|---|
| .. | .. |
|---|
| 835 | 1027 | alloc = sizeof(*entry) + size + 2; /* possible \n added */ |
|---|
| 836 | 1028 | |
|---|
| 837 | 1029 | local_save_flags(irq_flags); |
|---|
| 838 | | - buffer = global_trace.trace_buffer.buffer; |
|---|
| 1030 | + buffer = global_trace.array_buffer.buffer; |
|---|
| 1031 | + ring_buffer_nest_start(buffer); |
|---|
| 839 | 1032 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, |
|---|
| 840 | 1033 | irq_flags, pc); |
|---|
| 841 | | - if (!event) |
|---|
| 842 | | - return 0; |
|---|
| 1034 | + if (!event) { |
|---|
| 1035 | + size = 0; |
|---|
| 1036 | + goto out; |
|---|
| 1037 | + } |
|---|
| 843 | 1038 | |
|---|
| 844 | 1039 | entry = ring_buffer_event_data(event); |
|---|
| 845 | 1040 | entry->ip = ip; |
|---|
| .. | .. |
|---|
| 855 | 1050 | |
|---|
| 856 | 1051 | __buffer_unlock_commit(buffer, event); |
|---|
| 857 | 1052 | ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
|---|
| 858 | | - |
|---|
| 1053 | + out: |
|---|
| 1054 | + ring_buffer_nest_end(buffer); |
|---|
| 859 | 1055 | return size; |
|---|
| 860 | 1056 | } |
|---|
| 861 | 1057 | EXPORT_SYMBOL_GPL(__trace_puts); |
|---|
| .. | .. |
|---|
| 868 | 1064 | int __trace_bputs(unsigned long ip, const char *str) |
|---|
| 869 | 1065 | { |
|---|
| 870 | 1066 | struct ring_buffer_event *event; |
|---|
| 871 | | - struct ring_buffer *buffer; |
|---|
| 1067 | + struct trace_buffer *buffer; |
|---|
| 872 | 1068 | struct bputs_entry *entry; |
|---|
| 873 | 1069 | unsigned long irq_flags; |
|---|
| 874 | 1070 | int size = sizeof(struct bputs_entry); |
|---|
| 1071 | + int ret = 0; |
|---|
| 875 | 1072 | int pc; |
|---|
| 876 | 1073 | |
|---|
| 877 | 1074 | if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
|---|
| .. | .. |
|---|
| 883 | 1080 | return 0; |
|---|
| 884 | 1081 | |
|---|
| 885 | 1082 | local_save_flags(irq_flags); |
|---|
| 886 | | - buffer = global_trace.trace_buffer.buffer; |
|---|
| 1083 | + buffer = global_trace.array_buffer.buffer; |
|---|
| 1084 | + |
|---|
| 1085 | + ring_buffer_nest_start(buffer); |
|---|
| 887 | 1086 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, |
|---|
| 888 | 1087 | irq_flags, pc); |
|---|
| 889 | 1088 | if (!event) |
|---|
| 890 | | - return 0; |
|---|
| 1089 | + goto out; |
|---|
| 891 | 1090 | |
|---|
| 892 | 1091 | entry = ring_buffer_event_data(event); |
|---|
| 893 | 1092 | entry->ip = ip; |
|---|
| .. | .. |
|---|
| 896 | 1095 | __buffer_unlock_commit(buffer, event); |
|---|
| 897 | 1096 | ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); |
|---|
| 898 | 1097 | |
|---|
| 899 | | - return 1; |
|---|
| 1098 | + ret = 1; |
|---|
| 1099 | + out: |
|---|
| 1100 | + ring_buffer_nest_end(buffer); |
|---|
| 1101 | + return ret; |
|---|
| 900 | 1102 | } |
|---|
| 901 | 1103 | EXPORT_SYMBOL_GPL(__trace_bputs); |
|---|
| 902 | 1104 | |
|---|
| 903 | 1105 | #ifdef CONFIG_TRACER_SNAPSHOT |
|---|
| 904 | | -void tracing_snapshot_instance(struct trace_array *tr) |
|---|
| 1106 | +static void tracing_snapshot_instance_cond(struct trace_array *tr, |
|---|
| 1107 | + void *cond_data) |
|---|
| 905 | 1108 | { |
|---|
| 906 | 1109 | struct tracer *tracer = tr->current_trace; |
|---|
| 907 | 1110 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 927 | 1130 | } |
|---|
| 928 | 1131 | |
|---|
| 929 | 1132 | local_irq_save(flags); |
|---|
| 930 | | - update_max_tr(tr, current, smp_processor_id()); |
|---|
| 1133 | + update_max_tr(tr, current, smp_processor_id(), cond_data); |
|---|
| 931 | 1134 | local_irq_restore(flags); |
|---|
| 1135 | +} |
|---|
| 1136 | + |
|---|
| 1137 | +void tracing_snapshot_instance(struct trace_array *tr) |
|---|
| 1138 | +{ |
|---|
| 1139 | + tracing_snapshot_instance_cond(tr, NULL); |
|---|
| 932 | 1140 | } |
|---|
| 933 | 1141 | |
|---|
| 934 | 1142 | /** |
|---|
| .. | .. |
|---|
| 953 | 1161 | } |
|---|
| 954 | 1162 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
|---|
| 955 | 1163 | |
|---|
| 956 | | -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
|---|
| 957 | | - struct trace_buffer *size_buf, int cpu_id); |
|---|
| 958 | | -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); |
|---|
| 1164 | +/** |
|---|
| 1165 | + * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. |
|---|
| 1166 | + * @tr: The tracing instance to snapshot |
|---|
| 1167 | + * @cond_data: The data to be tested conditionally, and possibly saved |
|---|
| 1168 | + * |
|---|
| 1169 | + * This is the same as tracing_snapshot() except that the snapshot is |
|---|
| 1170 | + * conditional - the snapshot will only happen if the |
|---|
| 1171 | + * cond_snapshot.update() implementation receiving the cond_data |
|---|
| 1172 | + * returns true, which means that the trace array's cond_snapshot |
|---|
| 1173 | + * update() operation used the cond_data to determine whether the |
|---|
| 1174 | + * snapshot should be taken, and if it was, presumably saved it along |
|---|
| 1175 | + * with the snapshot. |
|---|
| 1176 | + */ |
|---|
| 1177 | +void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) |
|---|
| 1178 | +{ |
|---|
| 1179 | + tracing_snapshot_instance_cond(tr, cond_data); |
|---|
| 1180 | +} |
|---|
| 1181 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond); |
|---|
| 1182 | + |
|---|
| 1183 | +/** |
|---|
| 1184 | + * tracing_snapshot_cond_data - get the user data associated with a snapshot |
|---|
| 1185 | + * @tr: The tracing instance |
|---|
| 1186 | + * |
|---|
| 1187 | + * When the user enables a conditional snapshot using |
|---|
| 1188 | + * tracing_snapshot_cond_enable(), the user-defined cond_data is saved |
|---|
| 1189 | + * with the snapshot. This accessor is used to retrieve it. |
|---|
| 1190 | + * |
|---|
| 1191 | + * Should not be called from cond_snapshot.update(), since it takes |
|---|
| 1192 | + * the tr->max_lock lock, which the code calling |
|---|
| 1193 | + * cond_snapshot.update() has already done. |
|---|
| 1194 | + * |
|---|
| 1195 | + * Returns the cond_data associated with the trace array's snapshot. |
|---|
| 1196 | + */ |
|---|
| 1197 | +void *tracing_cond_snapshot_data(struct trace_array *tr) |
|---|
| 1198 | +{ |
|---|
| 1199 | + void *cond_data = NULL; |
|---|
| 1200 | + |
|---|
| 1201 | + local_irq_disable(); |
|---|
| 1202 | + arch_spin_lock(&tr->max_lock); |
|---|
| 1203 | + |
|---|
| 1204 | + if (tr->cond_snapshot) |
|---|
| 1205 | + cond_data = tr->cond_snapshot->cond_data; |
|---|
| 1206 | + |
|---|
| 1207 | + arch_spin_unlock(&tr->max_lock); |
|---|
| 1208 | + local_irq_enable(); |
|---|
| 1209 | + |
|---|
| 1210 | + return cond_data; |
|---|
| 1211 | +} |
|---|
| 1212 | +EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); |
|---|
| 1213 | + |
|---|
| 1214 | +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, |
|---|
| 1215 | + struct array_buffer *size_buf, int cpu_id); |
|---|
| 1216 | +static void set_buffer_entries(struct array_buffer *buf, unsigned long val); |
|---|
| 959 | 1217 | |
|---|
| 960 | 1218 | int tracing_alloc_snapshot_instance(struct trace_array *tr) |
|---|
| 961 | 1219 | { |
|---|
| .. | .. |
|---|
| 965 | 1223 | |
|---|
| 966 | 1224 | /* allocate spare buffer */ |
|---|
| 967 | 1225 | ret = resize_buffer_duplicate_size(&tr->max_buffer, |
|---|
| 968 | | - &tr->trace_buffer, RING_BUFFER_ALL_CPUS); |
|---|
| 1226 | + &tr->array_buffer, RING_BUFFER_ALL_CPUS); |
|---|
| 969 | 1227 | if (ret < 0) |
|---|
| 970 | 1228 | return ret; |
|---|
| 971 | 1229 | |
|---|
| .. | .. |
|---|
| 1032 | 1290 | tracing_snapshot(); |
|---|
| 1033 | 1291 | } |
|---|
| 1034 | 1292 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
|---|
| 1293 | + |
|---|
| 1294 | +/** |
|---|
| 1295 | + * tracing_snapshot_cond_enable - enable conditional snapshot for an instance |
|---|
| 1296 | + * @tr: The tracing instance |
|---|
| 1297 | + * @cond_data: User data to associate with the snapshot |
|---|
| 1298 | + * @update: Implementation of the cond_snapshot update function |
|---|
| 1299 | + * |
|---|
| 1300 | + * Check whether the conditional snapshot for the given instance has |
|---|
| 1301 | + * already been enabled, or if the current tracer is already using a |
|---|
| 1302 | + * snapshot; if so, return -EBUSY, else create a cond_snapshot and |
|---|
| 1303 | + * save the cond_data and update function inside. |
|---|
| 1304 | + * |
|---|
| 1305 | + * Returns 0 if successful, error otherwise. |
|---|
| 1306 | + */ |
|---|
| 1307 | +int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, |
|---|
| 1308 | + cond_update_fn_t update) |
|---|
| 1309 | +{ |
|---|
| 1310 | + struct cond_snapshot *cond_snapshot; |
|---|
| 1311 | + int ret = 0; |
|---|
| 1312 | + |
|---|
| 1313 | + cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); |
|---|
| 1314 | + if (!cond_snapshot) |
|---|
| 1315 | + return -ENOMEM; |
|---|
| 1316 | + |
|---|
| 1317 | + cond_snapshot->cond_data = cond_data; |
|---|
| 1318 | + cond_snapshot->update = update; |
|---|
| 1319 | + |
|---|
| 1320 | + mutex_lock(&trace_types_lock); |
|---|
| 1321 | + |
|---|
| 1322 | + ret = tracing_alloc_snapshot_instance(tr); |
|---|
| 1323 | + if (ret) |
|---|
| 1324 | + goto fail_unlock; |
|---|
| 1325 | + |
|---|
| 1326 | + if (tr->current_trace->use_max_tr) { |
|---|
| 1327 | + ret = -EBUSY; |
|---|
| 1328 | + goto fail_unlock; |
|---|
| 1329 | + } |
|---|
| 1330 | + |
|---|
| 1331 | + /* |
|---|
| 1332 | + * The cond_snapshot can only change to NULL without the |
|---|
| 1333 | + * trace_types_lock. We don't care if we race with it going |
|---|
| 1334 | + * to NULL, but we want to make sure that it's not set to |
|---|
| 1335 | + * something other than NULL when we get here, which we can |
|---|
| 1336 | + * do safely with only holding the trace_types_lock and not |
|---|
| 1337 | + * having to take the max_lock. |
|---|
| 1338 | + */ |
|---|
| 1339 | + if (tr->cond_snapshot) { |
|---|
| 1340 | + ret = -EBUSY; |
|---|
| 1341 | + goto fail_unlock; |
|---|
| 1342 | + } |
|---|
| 1343 | + |
|---|
| 1344 | + local_irq_disable(); |
|---|
| 1345 | + arch_spin_lock(&tr->max_lock); |
|---|
| 1346 | + tr->cond_snapshot = cond_snapshot; |
|---|
| 1347 | + arch_spin_unlock(&tr->max_lock); |
|---|
| 1348 | + local_irq_enable(); |
|---|
| 1349 | + |
|---|
| 1350 | + mutex_unlock(&trace_types_lock); |
|---|
| 1351 | + |
|---|
| 1352 | + return ret; |
|---|
| 1353 | + |
|---|
| 1354 | + fail_unlock: |
|---|
| 1355 | + mutex_unlock(&trace_types_lock); |
|---|
| 1356 | + kfree(cond_snapshot); |
|---|
| 1357 | + return ret; |
|---|
| 1358 | +} |
|---|
| 1359 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); |
|---|
| 1360 | + |
|---|
| 1361 | +/** |
|---|
| 1362 | + * tracing_snapshot_cond_disable - disable conditional snapshot for an instance |
|---|
| 1363 | + * @tr: The tracing instance |
|---|
| 1364 | + * |
|---|
| 1365 | + * Check whether the conditional snapshot for the given instance is |
|---|
| 1366 | + * enabled; if so, free the cond_snapshot associated with it, |
|---|
| 1367 | + * otherwise return -EINVAL. |
|---|
| 1368 | + * |
|---|
| 1369 | + * Returns 0 if successful, error otherwise. |
|---|
| 1370 | + */ |
|---|
| 1371 | +int tracing_snapshot_cond_disable(struct trace_array *tr) |
|---|
| 1372 | +{ |
|---|
| 1373 | + int ret = 0; |
|---|
| 1374 | + |
|---|
| 1375 | + local_irq_disable(); |
|---|
| 1376 | + arch_spin_lock(&tr->max_lock); |
|---|
| 1377 | + |
|---|
| 1378 | + if (!tr->cond_snapshot) |
|---|
| 1379 | + ret = -EINVAL; |
|---|
| 1380 | + else { |
|---|
| 1381 | + kfree(tr->cond_snapshot); |
|---|
| 1382 | + tr->cond_snapshot = NULL; |
|---|
| 1383 | + } |
|---|
| 1384 | + |
|---|
| 1385 | + arch_spin_unlock(&tr->max_lock); |
|---|
| 1386 | + local_irq_enable(); |
|---|
| 1387 | + |
|---|
| 1388 | + return ret; |
|---|
| 1389 | +} |
|---|
| 1390 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); |
|---|
| 1035 | 1391 | #else |
|---|
| 1036 | 1392 | void tracing_snapshot(void) |
|---|
| 1037 | 1393 | { |
|---|
| 1038 | 1394 | WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); |
|---|
| 1039 | 1395 | } |
|---|
| 1040 | 1396 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
|---|
| 1397 | +void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) |
|---|
| 1398 | +{ |
|---|
| 1399 | + WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); |
|---|
| 1400 | +} |
|---|
| 1401 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond); |
|---|
| 1041 | 1402 | int tracing_alloc_snapshot(void) |
|---|
| 1042 | 1403 | { |
|---|
| 1043 | 1404 | WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); |
|---|
| .. | .. |
|---|
| 1050 | 1411 | tracing_snapshot(); |
|---|
| 1051 | 1412 | } |
|---|
| 1052 | 1413 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
|---|
| 1414 | +void *tracing_cond_snapshot_data(struct trace_array *tr) |
|---|
| 1415 | +{ |
|---|
| 1416 | + return NULL; |
|---|
| 1417 | +} |
|---|
| 1418 | +EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); |
|---|
| 1419 | +int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) |
|---|
| 1420 | +{ |
|---|
| 1421 | + return -ENODEV; |
|---|
| 1422 | +} |
|---|
| 1423 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); |
|---|
| 1424 | +int tracing_snapshot_cond_disable(struct trace_array *tr) |
|---|
| 1425 | +{ |
|---|
| 1426 | + return false; |
|---|
| 1427 | +} |
|---|
| 1428 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); |
|---|
| 1053 | 1429 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
|---|
| 1054 | 1430 | |
|---|
| 1055 | 1431 | void tracer_tracing_off(struct trace_array *tr) |
|---|
| 1056 | 1432 | { |
|---|
| 1057 | | - if (tr->trace_buffer.buffer) |
|---|
| 1058 | | - ring_buffer_record_off(tr->trace_buffer.buffer); |
|---|
| 1433 | + if (tr->array_buffer.buffer) |
|---|
| 1434 | + ring_buffer_record_off(tr->array_buffer.buffer); |
|---|
| 1059 | 1435 | /* |
|---|
| 1060 | 1436 | * This flag is looked at when buffers haven't been allocated |
|---|
| 1061 | 1437 | * yet, or by some tracers (like irqsoff), that just want to |
|---|
| .. | .. |
|---|
| 1085 | 1461 | |
|---|
| 1086 | 1462 | void disable_trace_on_warning(void) |
|---|
| 1087 | 1463 | { |
|---|
| 1088 | | - if (__disable_trace_on_warning) |
|---|
| 1464 | + if (__disable_trace_on_warning) { |
|---|
| 1465 | + trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, |
|---|
| 1466 | + "Disabling tracing due to warning\n"); |
|---|
| 1089 | 1467 | tracing_off(); |
|---|
| 1468 | + } |
|---|
| 1090 | 1469 | } |
|---|
| 1091 | 1470 | |
|---|
| 1092 | 1471 | /** |
|---|
| .. | .. |
|---|
| 1097 | 1476 | */ |
|---|
| 1098 | 1477 | bool tracer_tracing_is_on(struct trace_array *tr) |
|---|
| 1099 | 1478 | { |
|---|
| 1100 | | - if (tr->trace_buffer.buffer) |
|---|
| 1101 | | - return ring_buffer_record_is_on(tr->trace_buffer.buffer); |
|---|
| 1479 | + if (tr->array_buffer.buffer) |
|---|
| 1480 | + return ring_buffer_record_is_on(tr->array_buffer.buffer); |
|---|
| 1102 | 1481 | return !tr->buffer_disabled; |
|---|
| 1103 | 1482 | } |
|---|
| 1104 | 1483 | |
|---|
| .. | .. |
|---|
| 1118 | 1497 | if (!str) |
|---|
| 1119 | 1498 | return 0; |
|---|
| 1120 | 1499 | buf_size = memparse(str, &str); |
|---|
| 1121 | | - /* nr_entries can not be zero */ |
|---|
| 1122 | | - if (buf_size == 0) |
|---|
| 1123 | | - return 0; |
|---|
| 1124 | | - trace_buf_size = buf_size; |
|---|
| 1500 | + /* |
|---|
| 1501 | + * nr_entries can not be zero and the startup |
|---|
| 1502 | + * tests require some buffer space. Therefore |
|---|
| 1503 | + * ensure we have at least 4096 bytes of buffer. |
|---|
| 1504 | + */ |
|---|
| 1505 | + trace_buf_size = max(4096UL, buf_size); |
|---|
| 1125 | 1506 | return 1; |
|---|
| 1126 | 1507 | } |
|---|
| 1127 | 1508 | __setup("trace_buf_size=", set_buf_size); |
|---|
| .. | .. |
|---|
| 1315 | 1696 | } |
|---|
| 1316 | 1697 | |
|---|
| 1317 | 1698 | unsigned long __read_mostly tracing_thresh; |
|---|
| 1699 | +static const struct file_operations tracing_max_lat_fops; |
|---|
| 1700 | + |
|---|
| 1701 | +#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ |
|---|
| 1702 | + defined(CONFIG_FSNOTIFY) |
|---|
| 1703 | + |
|---|
| 1704 | +static struct workqueue_struct *fsnotify_wq; |
|---|
| 1705 | + |
|---|
| 1706 | +static void latency_fsnotify_workfn(struct work_struct *work) |
|---|
| 1707 | +{ |
|---|
| 1708 | + struct trace_array *tr = container_of(work, struct trace_array, |
|---|
| 1709 | + fsnotify_work); |
|---|
| 1710 | + fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); |
|---|
| 1711 | +} |
|---|
| 1712 | + |
|---|
| 1713 | +static void latency_fsnotify_workfn_irq(struct irq_work *iwork) |
|---|
| 1714 | +{ |
|---|
| 1715 | + struct trace_array *tr = container_of(iwork, struct trace_array, |
|---|
| 1716 | + fsnotify_irqwork); |
|---|
| 1717 | + queue_work(fsnotify_wq, &tr->fsnotify_work); |
|---|
| 1718 | +} |
|---|
| 1719 | + |
|---|
| 1720 | +static void trace_create_maxlat_file(struct trace_array *tr, |
|---|
| 1721 | + struct dentry *d_tracer) |
|---|
| 1722 | +{ |
|---|
| 1723 | + INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); |
|---|
| 1724 | + init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); |
|---|
| 1725 | + tr->d_max_latency = trace_create_file("tracing_max_latency", 0644, |
|---|
| 1726 | + d_tracer, &tr->max_latency, |
|---|
| 1727 | + &tracing_max_lat_fops); |
|---|
| 1728 | +} |
|---|
| 1729 | + |
|---|
| 1730 | +__init static int latency_fsnotify_init(void) |
|---|
| 1731 | +{ |
|---|
| 1732 | + fsnotify_wq = alloc_workqueue("tr_max_lat_wq", |
|---|
| 1733 | + WQ_UNBOUND | WQ_HIGHPRI, 0); |
|---|
| 1734 | + if (!fsnotify_wq) { |
|---|
| 1735 | + pr_err("Unable to allocate tr_max_lat_wq\n"); |
|---|
| 1736 | + return -ENOMEM; |
|---|
| 1737 | + } |
|---|
| 1738 | + return 0; |
|---|
| 1739 | +} |
|---|
| 1740 | + |
|---|
| 1741 | +late_initcall_sync(latency_fsnotify_init); |
|---|
| 1742 | + |
|---|
| 1743 | +void latency_fsnotify(struct trace_array *tr) |
|---|
| 1744 | +{ |
|---|
| 1745 | + if (!fsnotify_wq) |
|---|
| 1746 | + return; |
|---|
| 1747 | + /* |
|---|
| 1748 | + * We cannot call queue_work(&tr->fsnotify_work) from here because it's |
|---|
| 1749 | + * possible that we are called from __schedule() or do_idle(), which |
|---|
| 1750 | + * could cause a deadlock. |
|---|
| 1751 | + */ |
|---|
| 1752 | + irq_work_queue(&tr->fsnotify_irqwork); |
|---|
| 1753 | +} |
|---|
| 1754 | + |
|---|
| 1755 | +/* |
|---|
| 1756 | + * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \ |
|---|
| 1757 | + * defined(CONFIG_FSNOTIFY) |
|---|
| 1758 | + */ |
|---|
| 1759 | +#else |
|---|
| 1760 | + |
|---|
| 1761 | +#define trace_create_maxlat_file(tr, d_tracer) \ |
|---|
| 1762 | + trace_create_file("tracing_max_latency", 0644, d_tracer, \ |
|---|
| 1763 | + &tr->max_latency, &tracing_max_lat_fops) |
|---|
| 1764 | + |
|---|
| 1765 | +#endif |
|---|
| 1318 | 1766 | |
|---|
| 1319 | 1767 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 1320 | 1768 | /* |
|---|
| .. | .. |
|---|
| 1325 | 1773 | static void |
|---|
| 1326 | 1774 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
|---|
| 1327 | 1775 | { |
|---|
| 1328 | | - struct trace_buffer *trace_buf = &tr->trace_buffer; |
|---|
| 1329 | | - struct trace_buffer *max_buf = &tr->max_buffer; |
|---|
| 1776 | + struct array_buffer *trace_buf = &tr->array_buffer; |
|---|
| 1777 | + struct array_buffer *max_buf = &tr->max_buffer; |
|---|
| 1330 | 1778 | struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); |
|---|
| 1331 | 1779 | struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); |
|---|
| 1332 | 1780 | |
|---|
| .. | .. |
|---|
| 1337 | 1785 | max_data->critical_start = data->critical_start; |
|---|
| 1338 | 1786 | max_data->critical_end = data->critical_end; |
|---|
| 1339 | 1787 | |
|---|
| 1340 | | - memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
|---|
| 1788 | + strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); |
|---|
| 1341 | 1789 | max_data->pid = tsk->pid; |
|---|
| 1342 | 1790 | /* |
|---|
| 1343 | 1791 | * If tsk == current, then use current_uid(), as that does not use |
|---|
| .. | .. |
|---|
| 1354 | 1802 | |
|---|
| 1355 | 1803 | /* record this tasks comm */ |
|---|
| 1356 | 1804 | tracing_record_cmdline(tsk); |
|---|
| 1805 | + latency_fsnotify(tr); |
|---|
| 1357 | 1806 | } |
|---|
| 1358 | 1807 | |
|---|
| 1359 | 1808 | /** |
|---|
| .. | .. |
|---|
| 1361 | 1810 | * @tr: tracer |
|---|
| 1362 | 1811 | * @tsk: the task with the latency |
|---|
| 1363 | 1812 | * @cpu: The cpu that initiated the trace. |
|---|
| 1813 | + * @cond_data: User data associated with a conditional snapshot |
|---|
| 1364 | 1814 | * |
|---|
| 1365 | 1815 | * Flip the buffers between the @tr and the max_tr and record information |
|---|
| 1366 | 1816 | * about which task was the cause of this latency. |
|---|
| 1367 | 1817 | */ |
|---|
| 1368 | 1818 | void |
|---|
| 1369 | | -update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
|---|
| 1819 | +update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, |
|---|
| 1820 | + void *cond_data) |
|---|
| 1370 | 1821 | { |
|---|
| 1371 | 1822 | if (tr->stop_count) |
|---|
| 1372 | 1823 | return; |
|---|
| .. | .. |
|---|
| 1381 | 1832 | |
|---|
| 1382 | 1833 | arch_spin_lock(&tr->max_lock); |
|---|
| 1383 | 1834 | |
|---|
| 1384 | | - /* Inherit the recordable setting from trace_buffer */ |
|---|
| 1385 | | - if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) |
|---|
| 1835 | + /* Inherit the recordable setting from array_buffer */ |
|---|
| 1836 | + if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) |
|---|
| 1386 | 1837 | ring_buffer_record_on(tr->max_buffer.buffer); |
|---|
| 1387 | 1838 | else |
|---|
| 1388 | 1839 | ring_buffer_record_off(tr->max_buffer.buffer); |
|---|
| 1389 | 1840 | |
|---|
| 1390 | | - swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); |
|---|
| 1841 | +#ifdef CONFIG_TRACER_SNAPSHOT |
|---|
| 1842 | + if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) |
|---|
| 1843 | + goto out_unlock; |
|---|
| 1844 | +#endif |
|---|
| 1845 | + swap(tr->array_buffer.buffer, tr->max_buffer.buffer); |
|---|
| 1391 | 1846 | |
|---|
| 1392 | 1847 | __update_max_tr(tr, tsk, cpu); |
|---|
| 1848 | + |
|---|
| 1849 | + out_unlock: |
|---|
| 1393 | 1850 | arch_spin_unlock(&tr->max_lock); |
|---|
| 1394 | 1851 | } |
|---|
| 1395 | 1852 | |
|---|
| 1396 | 1853 | /** |
|---|
| 1397 | 1854 | * update_max_tr_single - only copy one trace over, and reset the rest |
|---|
| 1398 | | - * @tr - tracer |
|---|
| 1399 | | - * @tsk - task with the latency |
|---|
| 1400 | | - * @cpu - the cpu of the buffer to copy. |
|---|
| 1855 | + * @tr: tracer |
|---|
| 1856 | + * @tsk: task with the latency |
|---|
| 1857 | + * @cpu: the cpu of the buffer to copy. |
|---|
| 1401 | 1858 | * |
|---|
| 1402 | 1859 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. |
|---|
| 1403 | 1860 | */ |
|---|
| .. | .. |
|---|
| 1418 | 1875 | |
|---|
| 1419 | 1876 | arch_spin_lock(&tr->max_lock); |
|---|
| 1420 | 1877 | |
|---|
| 1421 | | - ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); |
|---|
| 1878 | + ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); |
|---|
| 1422 | 1879 | |
|---|
| 1423 | 1880 | if (ret == -EBUSY) { |
|---|
| 1424 | 1881 | /* |
|---|
| .. | .. |
|---|
| 1438 | 1895 | } |
|---|
| 1439 | 1896 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
|---|
| 1440 | 1897 | |
|---|
| 1441 | | -static int wait_on_pipe(struct trace_iterator *iter, bool full) |
|---|
| 1898 | +static int wait_on_pipe(struct trace_iterator *iter, int full) |
|---|
| 1442 | 1899 | { |
|---|
| 1443 | 1900 | /* Iterators are static, they should be filled or empty */ |
|---|
| 1444 | 1901 | if (trace_buffer_iter(iter, iter->cpu_file)) |
|---|
| 1445 | 1902 | return 0; |
|---|
| 1446 | 1903 | |
|---|
| 1447 | | - return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, |
|---|
| 1904 | + return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, |
|---|
| 1448 | 1905 | full); |
|---|
| 1449 | 1906 | } |
|---|
| 1450 | 1907 | |
|---|
| .. | .. |
|---|
| 1495 | 1952 | * internal tracing to verify that everything is in order. |
|---|
| 1496 | 1953 | * If we fail, we do not register this tracer. |
|---|
| 1497 | 1954 | */ |
|---|
| 1498 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
|---|
| 1955 | + tracing_reset_online_cpus(&tr->array_buffer); |
|---|
| 1499 | 1956 | |
|---|
| 1500 | 1957 | tr->current_trace = type; |
|---|
| 1501 | 1958 | |
|---|
| .. | .. |
|---|
| 1521 | 1978 | return -1; |
|---|
| 1522 | 1979 | } |
|---|
| 1523 | 1980 | /* Only reset on passing, to avoid touching corrupted buffers */ |
|---|
| 1524 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
|---|
| 1981 | + tracing_reset_online_cpus(&tr->array_buffer); |
|---|
| 1525 | 1982 | |
|---|
| 1526 | 1983 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 1527 | 1984 | if (type->use_max_tr) { |
|---|
| .. | .. |
|---|
| 1555 | 2012 | |
|---|
| 1556 | 2013 | tracing_selftest_running = true; |
|---|
| 1557 | 2014 | list_for_each_entry_safe(p, n, &postponed_selftests, list) { |
|---|
| 2015 | + /* This loop can take minutes when sanitizers are enabled, so |
|---|
| 2016 | + * lets make sure we allow RCU processing. |
|---|
| 2017 | + */ |
|---|
| 2018 | + cond_resched(); |
|---|
| 1558 | 2019 | ret = run_tracer_selftest(p->type); |
|---|
| 1559 | 2020 | /* If the test fails, then warn and remove from available_tracers */ |
|---|
| 1560 | 2021 | if (ret < 0) { |
|---|
| .. | .. |
|---|
| 1593 | 2054 | |
|---|
| 1594 | 2055 | /** |
|---|
| 1595 | 2056 | * register_tracer - register a tracer with the ftrace system. |
|---|
| 1596 | | - * @type - the plugin for the tracer |
|---|
| 2057 | + * @type: the plugin for the tracer |
|---|
| 1597 | 2058 | * |
|---|
| 1598 | 2059 | * Register a new plugin tracer. |
|---|
| 1599 | 2060 | */ |
|---|
| .. | .. |
|---|
| 1610 | 2071 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
|---|
| 1611 | 2072 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
|---|
| 1612 | 2073 | return -1; |
|---|
| 2074 | + } |
|---|
| 2075 | + |
|---|
| 2076 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
|---|
| 2077 | + pr_warn("Can not register tracer %s due to lockdown\n", |
|---|
| 2078 | + type->name); |
|---|
| 2079 | + return -EPERM; |
|---|
| 1613 | 2080 | } |
|---|
| 1614 | 2081 | |
|---|
| 1615 | 2082 | mutex_lock(&trace_types_lock); |
|---|
| .. | .. |
|---|
| 1670 | 2137 | apply_trace_boot_options(); |
|---|
| 1671 | 2138 | |
|---|
| 1672 | 2139 | /* disable other selftests, since this will break it. */ |
|---|
| 1673 | | - tracing_selftest_disabled = true; |
|---|
| 1674 | | -#ifdef CONFIG_FTRACE_STARTUP_TEST |
|---|
| 1675 | | - printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", |
|---|
| 1676 | | - type->name); |
|---|
| 1677 | | -#endif |
|---|
| 2140 | + disable_tracing_selftest("running a tracer"); |
|---|
| 1678 | 2141 | |
|---|
| 1679 | 2142 | out_unlock: |
|---|
| 1680 | 2143 | return ret; |
|---|
| 1681 | 2144 | } |
|---|
| 1682 | 2145 | |
|---|
| 1683 | | -void tracing_reset(struct trace_buffer *buf, int cpu) |
|---|
| 2146 | +static void tracing_reset_cpu(struct array_buffer *buf, int cpu) |
|---|
| 1684 | 2147 | { |
|---|
| 1685 | | - struct ring_buffer *buffer = buf->buffer; |
|---|
| 2148 | + struct trace_buffer *buffer = buf->buffer; |
|---|
| 1686 | 2149 | |
|---|
| 1687 | 2150 | if (!buffer) |
|---|
| 1688 | 2151 | return; |
|---|
| .. | .. |
|---|
| 1690 | 2153 | ring_buffer_record_disable(buffer); |
|---|
| 1691 | 2154 | |
|---|
| 1692 | 2155 | /* Make sure all commits have finished */ |
|---|
| 1693 | | - synchronize_sched(); |
|---|
| 2156 | + synchronize_rcu(); |
|---|
| 1694 | 2157 | ring_buffer_reset_cpu(buffer, cpu); |
|---|
| 1695 | 2158 | |
|---|
| 1696 | 2159 | ring_buffer_record_enable(buffer); |
|---|
| 1697 | 2160 | } |
|---|
| 1698 | 2161 | |
|---|
| 1699 | | -void tracing_reset_online_cpus(struct trace_buffer *buf) |
|---|
| 2162 | +void tracing_reset_online_cpus(struct array_buffer *buf) |
|---|
| 1700 | 2163 | { |
|---|
| 1701 | | - struct ring_buffer *buffer = buf->buffer; |
|---|
| 1702 | | - int cpu; |
|---|
| 2164 | + struct trace_buffer *buffer = buf->buffer; |
|---|
| 1703 | 2165 | |
|---|
| 1704 | 2166 | if (!buffer) |
|---|
| 1705 | 2167 | return; |
|---|
| .. | .. |
|---|
| 1707 | 2169 | ring_buffer_record_disable(buffer); |
|---|
| 1708 | 2170 | |
|---|
| 1709 | 2171 | /* Make sure all commits have finished */ |
|---|
| 1710 | | - synchronize_sched(); |
|---|
| 2172 | + synchronize_rcu(); |
|---|
| 1711 | 2173 | |
|---|
| 1712 | 2174 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
|---|
| 1713 | 2175 | |
|---|
| 1714 | | - for_each_online_cpu(cpu) |
|---|
| 1715 | | - ring_buffer_reset_cpu(buffer, cpu); |
|---|
| 2176 | + ring_buffer_reset_online_cpus(buffer); |
|---|
| 1716 | 2177 | |
|---|
| 1717 | 2178 | ring_buffer_record_enable(buffer); |
|---|
| 1718 | 2179 | } |
|---|
| .. | .. |
|---|
| 1726 | 2187 | if (!tr->clear_trace) |
|---|
| 1727 | 2188 | continue; |
|---|
| 1728 | 2189 | tr->clear_trace = false; |
|---|
| 1729 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
|---|
| 2190 | + tracing_reset_online_cpus(&tr->array_buffer); |
|---|
| 1730 | 2191 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 1731 | 2192 | tracing_reset_online_cpus(&tr->max_buffer); |
|---|
| 1732 | 2193 | #endif |
|---|
| .. | .. |
|---|
| 1744 | 2205 | |
|---|
| 1745 | 2206 | #define SAVED_CMDLINES_DEFAULT 128 |
|---|
| 1746 | 2207 | #define NO_CMDLINE_MAP UINT_MAX |
|---|
| 2208 | +/* |
|---|
| 2209 | + * Preemption must be disabled before acquiring trace_cmdline_lock. |
|---|
| 2210 | + * The various trace_arrays' max_lock must be acquired in a context |
|---|
| 2211 | + * where interrupt is disabled. |
|---|
| 2212 | + */ |
|---|
| 1747 | 2213 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
|---|
| 1748 | 2214 | struct saved_cmdlines_buffer { |
|---|
| 1749 | 2215 | unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
|---|
| .. | .. |
|---|
| 1761 | 2227 | |
|---|
| 1762 | 2228 | static inline void set_cmdline(int idx, const char *cmdline) |
|---|
| 1763 | 2229 | { |
|---|
| 1764 | | - memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); |
|---|
| 2230 | + strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); |
|---|
| 1765 | 2231 | } |
|---|
| 1766 | 2232 | |
|---|
| 1767 | 2233 | static int allocate_cmdlines_buffer(unsigned int val, |
|---|
| .. | .. |
|---|
| 1820 | 2286 | */ |
|---|
| 1821 | 2287 | void tracing_start(void) |
|---|
| 1822 | 2288 | { |
|---|
| 1823 | | - struct ring_buffer *buffer; |
|---|
| 2289 | + struct trace_buffer *buffer; |
|---|
| 1824 | 2290 | unsigned long flags; |
|---|
| 1825 | 2291 | |
|---|
| 1826 | 2292 | if (tracing_disabled) |
|---|
| .. | .. |
|---|
| 1839 | 2305 | /* Prevent the buffers from switching */ |
|---|
| 1840 | 2306 | arch_spin_lock(&global_trace.max_lock); |
|---|
| 1841 | 2307 | |
|---|
| 1842 | | - buffer = global_trace.trace_buffer.buffer; |
|---|
| 2308 | + buffer = global_trace.array_buffer.buffer; |
|---|
| 1843 | 2309 | if (buffer) |
|---|
| 1844 | 2310 | ring_buffer_record_enable(buffer); |
|---|
| 1845 | 2311 | |
|---|
| .. | .. |
|---|
| 1857 | 2323 | |
|---|
| 1858 | 2324 | static void tracing_start_tr(struct trace_array *tr) |
|---|
| 1859 | 2325 | { |
|---|
| 1860 | | - struct ring_buffer *buffer; |
|---|
| 2326 | + struct trace_buffer *buffer; |
|---|
| 1861 | 2327 | unsigned long flags; |
|---|
| 1862 | 2328 | |
|---|
| 1863 | 2329 | if (tracing_disabled) |
|---|
| .. | .. |
|---|
| 1878 | 2344 | goto out; |
|---|
| 1879 | 2345 | } |
|---|
| 1880 | 2346 | |
|---|
| 1881 | | - buffer = tr->trace_buffer.buffer; |
|---|
| 2347 | + buffer = tr->array_buffer.buffer; |
|---|
| 1882 | 2348 | if (buffer) |
|---|
| 1883 | 2349 | ring_buffer_record_enable(buffer); |
|---|
| 1884 | 2350 | |
|---|
| .. | .. |
|---|
| 1894 | 2360 | */ |
|---|
| 1895 | 2361 | void tracing_stop(void) |
|---|
| 1896 | 2362 | { |
|---|
| 1897 | | - struct ring_buffer *buffer; |
|---|
| 2363 | + struct trace_buffer *buffer; |
|---|
| 1898 | 2364 | unsigned long flags; |
|---|
| 1899 | 2365 | |
|---|
| 1900 | 2366 | raw_spin_lock_irqsave(&global_trace.start_lock, flags); |
|---|
| .. | .. |
|---|
| 1904 | 2370 | /* Prevent the buffers from switching */ |
|---|
| 1905 | 2371 | arch_spin_lock(&global_trace.max_lock); |
|---|
| 1906 | 2372 | |
|---|
| 1907 | | - buffer = global_trace.trace_buffer.buffer; |
|---|
| 2373 | + buffer = global_trace.array_buffer.buffer; |
|---|
| 1908 | 2374 | if (buffer) |
|---|
| 1909 | 2375 | ring_buffer_record_disable(buffer); |
|---|
| 1910 | 2376 | |
|---|
| .. | .. |
|---|
| 1922 | 2388 | |
|---|
| 1923 | 2389 | static void tracing_stop_tr(struct trace_array *tr) |
|---|
| 1924 | 2390 | { |
|---|
| 1925 | | - struct ring_buffer *buffer; |
|---|
| 2391 | + struct trace_buffer *buffer; |
|---|
| 1926 | 2392 | unsigned long flags; |
|---|
| 1927 | 2393 | |
|---|
| 1928 | 2394 | /* If global, we need to also stop the max tracer */ |
|---|
| .. | .. |
|---|
| 1933 | 2399 | if (tr->stop_count++) |
|---|
| 1934 | 2400 | goto out; |
|---|
| 1935 | 2401 | |
|---|
| 1936 | | - buffer = tr->trace_buffer.buffer; |
|---|
| 2402 | + buffer = tr->array_buffer.buffer; |
|---|
| 1937 | 2403 | if (buffer) |
|---|
| 1938 | 2404 | ring_buffer_record_disable(buffer); |
|---|
| 1939 | 2405 | |
|---|
| .. | .. |
|---|
| 1956 | 2422 | * the lock, but we also don't want to spin |
|---|
| 1957 | 2423 | * nor do we want to disable interrupts, |
|---|
| 1958 | 2424 | * so if we miss here, then better luck next time. |
|---|
| 2425 | + * |
|---|
| 2426 | + * This is called within the scheduler and wake up, so interrupts |
|---|
| 2427 | + * had better been disabled and run queue lock been held. |
|---|
| 1959 | 2428 | */ |
|---|
| 2429 | + lockdep_assert_preemption_disabled(); |
|---|
| 1960 | 2430 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
|---|
| 1961 | 2431 | return 0; |
|---|
| 1962 | 2432 | |
|---|
| .. | .. |
|---|
| 2064 | 2534 | /** |
|---|
| 2065 | 2535 | * tracing_record_taskinfo - record the task info of a task |
|---|
| 2066 | 2536 | * |
|---|
| 2067 | | - * @task - task to record |
|---|
| 2068 | | - * @flags - TRACE_RECORD_CMDLINE for recording comm |
|---|
| 2069 | | - * - TRACE_RECORD_TGID for recording tgid |
|---|
| 2537 | + * @task: task to record |
|---|
| 2538 | + * @flags: TRACE_RECORD_CMDLINE for recording comm |
|---|
| 2539 | + * TRACE_RECORD_TGID for recording tgid |
|---|
| 2070 | 2540 | */ |
|---|
| 2071 | 2541 | void tracing_record_taskinfo(struct task_struct *task, int flags) |
|---|
| 2072 | 2542 | { |
|---|
| .. | .. |
|---|
| 2092 | 2562 | /** |
|---|
| 2093 | 2563 | * tracing_record_taskinfo_sched_switch - record task info for sched_switch |
|---|
| 2094 | 2564 | * |
|---|
| 2095 | | - * @prev - previous task during sched_switch |
|---|
| 2096 | | - * @next - next task during sched_switch |
|---|
| 2097 | | - * @flags - TRACE_RECORD_CMDLINE for recording comm |
|---|
| 2098 | | - * TRACE_RECORD_TGID for recording tgid |
|---|
| 2565 | + * @prev: previous task during sched_switch |
|---|
| 2566 | + * @next: next task during sched_switch |
|---|
| 2567 | + * @flags: TRACE_RECORD_CMDLINE for recording comm |
|---|
| 2568 | + * TRACE_RECORD_TGID for recording tgid |
|---|
| 2099 | 2569 | */ |
|---|
| 2100 | 2570 | void tracing_record_taskinfo_sched_switch(struct task_struct *prev, |
|---|
| 2101 | 2571 | struct task_struct *next, int flags) |
|---|
| .. | .. |
|---|
| 2145 | 2615 | EXPORT_SYMBOL_GPL(trace_handle_return); |
|---|
| 2146 | 2616 | |
|---|
| 2147 | 2617 | void |
|---|
| 2148 | | -tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
|---|
| 2149 | | - int pc) |
|---|
| 2618 | +tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, |
|---|
| 2619 | + unsigned long flags, int pc) |
|---|
| 2150 | 2620 | { |
|---|
| 2151 | 2621 | struct task_struct *tsk = current; |
|---|
| 2152 | 2622 | |
|---|
| 2153 | 2623 | entry->preempt_count = pc & 0xff; |
|---|
| 2154 | 2624 | entry->pid = (tsk) ? tsk->pid : 0; |
|---|
| 2625 | + entry->type = type; |
|---|
| 2155 | 2626 | entry->flags = |
|---|
| 2156 | 2627 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
|---|
| 2157 | 2628 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
|---|
| .. | .. |
|---|
| 2167 | 2638 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
|---|
| 2168 | 2639 | |
|---|
| 2169 | 2640 | struct ring_buffer_event * |
|---|
| 2170 | | -trace_buffer_lock_reserve(struct ring_buffer *buffer, |
|---|
| 2641 | +trace_buffer_lock_reserve(struct trace_buffer *buffer, |
|---|
| 2171 | 2642 | int type, |
|---|
| 2172 | 2643 | unsigned long len, |
|---|
| 2173 | 2644 | unsigned long flags, int pc) |
|---|
| .. | .. |
|---|
| 2217 | 2688 | |
|---|
| 2218 | 2689 | preempt_disable(); |
|---|
| 2219 | 2690 | if (cpu == smp_processor_id() && |
|---|
| 2220 | | - this_cpu_read(trace_buffered_event) != |
|---|
| 2691 | + __this_cpu_read(trace_buffered_event) != |
|---|
| 2221 | 2692 | per_cpu(trace_buffered_event, cpu)) |
|---|
| 2222 | 2693 | WARN_ON_ONCE(1); |
|---|
| 2223 | 2694 | preempt_enable(); |
|---|
| .. | .. |
|---|
| 2267 | 2738 | preempt_enable(); |
|---|
| 2268 | 2739 | |
|---|
| 2269 | 2740 | /* Wait for all current users to finish */ |
|---|
| 2270 | | - synchronize_sched(); |
|---|
| 2741 | + synchronize_rcu(); |
|---|
| 2271 | 2742 | |
|---|
| 2272 | 2743 | for_each_tracing_cpu(cpu) { |
|---|
| 2273 | 2744 | free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); |
|---|
| .. | .. |
|---|
| 2286 | 2757 | preempt_enable(); |
|---|
| 2287 | 2758 | } |
|---|
| 2288 | 2759 | |
|---|
| 2289 | | -static struct ring_buffer *temp_buffer; |
|---|
| 2760 | +static struct trace_buffer *temp_buffer; |
|---|
| 2290 | 2761 | |
|---|
| 2291 | 2762 | struct ring_buffer_event * |
|---|
| 2292 | | -trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
|---|
| 2763 | +trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, |
|---|
| 2293 | 2764 | struct trace_event_file *trace_file, |
|---|
| 2294 | 2765 | int type, unsigned long len, |
|---|
| 2295 | 2766 | unsigned long flags, int pc) |
|---|
| .. | .. |
|---|
| 2297 | 2768 | struct ring_buffer_event *entry; |
|---|
| 2298 | 2769 | int val; |
|---|
| 2299 | 2770 | |
|---|
| 2300 | | - *current_rb = trace_file->tr->trace_buffer.buffer; |
|---|
| 2771 | + *current_rb = trace_file->tr->array_buffer.buffer; |
|---|
| 2301 | 2772 | |
|---|
| 2302 | 2773 | if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & |
|---|
| 2303 | 2774 | (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && |
|---|
| .. | .. |
|---|
| 2317 | 2788 | /* |
|---|
| 2318 | 2789 | * If tracing is off, but we have triggers enabled |
|---|
| 2319 | 2790 | * we still need to look at the event data. Use the temp_buffer |
|---|
| 2320 | | - * to store the trace event for the tigger to use. It's recusive |
|---|
| 2791 | + * to store the trace event for the trigger to use. It's recursive |
|---|
| 2321 | 2792 | * safe and will not be recorded anywhere. |
|---|
| 2322 | 2793 | */ |
|---|
| 2323 | 2794 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
|---|
| .. | .. |
|---|
| 2329 | 2800 | } |
|---|
| 2330 | 2801 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); |
|---|
| 2331 | 2802 | |
|---|
| 2332 | | -static DEFINE_SPINLOCK(tracepoint_iter_lock); |
|---|
| 2803 | +static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); |
|---|
| 2333 | 2804 | static DEFINE_MUTEX(tracepoint_printk_mutex); |
|---|
| 2334 | 2805 | |
|---|
| 2335 | 2806 | static void output_printk(struct trace_event_buffer *fbuffer) |
|---|
| 2336 | 2807 | { |
|---|
| 2337 | 2808 | struct trace_event_call *event_call; |
|---|
| 2809 | + struct trace_event_file *file; |
|---|
| 2338 | 2810 | struct trace_event *event; |
|---|
| 2339 | 2811 | unsigned long flags; |
|---|
| 2340 | 2812 | struct trace_iterator *iter = tracepoint_print_iter; |
|---|
| .. | .. |
|---|
| 2348 | 2820 | !event_call->event.funcs->trace) |
|---|
| 2349 | 2821 | return; |
|---|
| 2350 | 2822 | |
|---|
| 2823 | + file = fbuffer->trace_file; |
|---|
| 2824 | + if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || |
|---|
| 2825 | + (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && |
|---|
| 2826 | + !filter_match_preds(file->filter, fbuffer->entry))) |
|---|
| 2827 | + return; |
|---|
| 2828 | + |
|---|
| 2351 | 2829 | event = &fbuffer->trace_file->event_call->event; |
|---|
| 2352 | 2830 | |
|---|
| 2353 | | - spin_lock_irqsave(&tracepoint_iter_lock, flags); |
|---|
| 2831 | + raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); |
|---|
| 2354 | 2832 | trace_seq_init(&iter->seq); |
|---|
| 2355 | 2833 | iter->ent = fbuffer->entry; |
|---|
| 2356 | 2834 | event_call->event.funcs->trace(iter, 0, event); |
|---|
| 2357 | 2835 | trace_seq_putc(&iter->seq, 0); |
|---|
| 2358 | 2836 | printk("%s", iter->seq.buffer); |
|---|
| 2359 | 2837 | |
|---|
| 2360 | | - spin_unlock_irqrestore(&tracepoint_iter_lock, flags); |
|---|
| 2838 | + raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); |
|---|
| 2361 | 2839 | } |
|---|
| 2362 | 2840 | |
|---|
| 2363 | 2841 | int tracepoint_printk_sysctl(struct ctl_table *table, int write, |
|---|
| 2364 | | - void __user *buffer, size_t *lenp, |
|---|
| 2842 | + void *buffer, size_t *lenp, |
|---|
| 2365 | 2843 | loff_t *ppos) |
|---|
| 2366 | 2844 | { |
|---|
| 2367 | 2845 | int save_tracepoint_printk; |
|---|
| .. | .. |
|---|
| 2398 | 2876 | if (static_key_false(&tracepoint_printk_key.key)) |
|---|
| 2399 | 2877 | output_printk(fbuffer); |
|---|
| 2400 | 2878 | |
|---|
| 2401 | | - event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, |
|---|
| 2879 | + if (static_branch_unlikely(&trace_event_exports_enabled)) |
|---|
| 2880 | + ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); |
|---|
| 2881 | + event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer, |
|---|
| 2402 | 2882 | fbuffer->event, fbuffer->entry, |
|---|
| 2403 | | - fbuffer->flags, fbuffer->pc); |
|---|
| 2883 | + fbuffer->flags, fbuffer->pc, fbuffer->regs); |
|---|
| 2404 | 2884 | } |
|---|
| 2405 | 2885 | EXPORT_SYMBOL_GPL(trace_event_buffer_commit); |
|---|
| 2406 | 2886 | |
|---|
| .. | .. |
|---|
| 2414 | 2894 | # define STACK_SKIP 3 |
|---|
| 2415 | 2895 | |
|---|
| 2416 | 2896 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
|---|
| 2417 | | - struct ring_buffer *buffer, |
|---|
| 2897 | + struct trace_buffer *buffer, |
|---|
| 2418 | 2898 | struct ring_buffer_event *event, |
|---|
| 2419 | 2899 | unsigned long flags, int pc, |
|---|
| 2420 | 2900 | struct pt_regs *regs) |
|---|
| .. | .. |
|---|
| 2435 | 2915 | * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. |
|---|
| 2436 | 2916 | */ |
|---|
| 2437 | 2917 | void |
|---|
| 2438 | | -trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, |
|---|
| 2918 | +trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
|---|
| 2439 | 2919 | struct ring_buffer_event *event) |
|---|
| 2440 | 2920 | { |
|---|
| 2441 | 2921 | __buffer_unlock_commit(buffer, event); |
|---|
| 2442 | 2922 | } |
|---|
| 2443 | | - |
|---|
| 2444 | | -static void |
|---|
| 2445 | | -trace_process_export(struct trace_export *export, |
|---|
| 2446 | | - struct ring_buffer_event *event) |
|---|
| 2447 | | -{ |
|---|
| 2448 | | - struct trace_entry *entry; |
|---|
| 2449 | | - unsigned int size = 0; |
|---|
| 2450 | | - |
|---|
| 2451 | | - entry = ring_buffer_event_data(event); |
|---|
| 2452 | | - size = ring_buffer_event_length(event); |
|---|
| 2453 | | - export->write(export, entry, size); |
|---|
| 2454 | | -} |
|---|
| 2455 | | - |
|---|
| 2456 | | -static DEFINE_MUTEX(ftrace_export_lock); |
|---|
| 2457 | | - |
|---|
| 2458 | | -static struct trace_export __rcu *ftrace_exports_list __read_mostly; |
|---|
| 2459 | | - |
|---|
| 2460 | | -static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); |
|---|
| 2461 | | - |
|---|
| 2462 | | -static inline void ftrace_exports_enable(void) |
|---|
| 2463 | | -{ |
|---|
| 2464 | | - static_branch_enable(&ftrace_exports_enabled); |
|---|
| 2465 | | -} |
|---|
| 2466 | | - |
|---|
| 2467 | | -static inline void ftrace_exports_disable(void) |
|---|
| 2468 | | -{ |
|---|
| 2469 | | - static_branch_disable(&ftrace_exports_enabled); |
|---|
| 2470 | | -} |
|---|
| 2471 | | - |
|---|
| 2472 | | -void ftrace_exports(struct ring_buffer_event *event) |
|---|
| 2473 | | -{ |
|---|
| 2474 | | - struct trace_export *export; |
|---|
| 2475 | | - |
|---|
| 2476 | | - preempt_disable_notrace(); |
|---|
| 2477 | | - |
|---|
| 2478 | | - export = rcu_dereference_raw_notrace(ftrace_exports_list); |
|---|
| 2479 | | - while (export) { |
|---|
| 2480 | | - trace_process_export(export, event); |
|---|
| 2481 | | - export = rcu_dereference_raw_notrace(export->next); |
|---|
| 2482 | | - } |
|---|
| 2483 | | - |
|---|
| 2484 | | - preempt_enable_notrace(); |
|---|
| 2485 | | -} |
|---|
| 2486 | | - |
|---|
| 2487 | | -static inline void |
|---|
| 2488 | | -add_trace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 2489 | | -{ |
|---|
| 2490 | | - rcu_assign_pointer(export->next, *list); |
|---|
| 2491 | | - /* |
|---|
| 2492 | | - * We are entering export into the list but another |
|---|
| 2493 | | - * CPU might be walking that list. We need to make sure |
|---|
| 2494 | | - * the export->next pointer is valid before another CPU sees |
|---|
| 2495 | | - * the export pointer included into the list. |
|---|
| 2496 | | - */ |
|---|
| 2497 | | - rcu_assign_pointer(*list, export); |
|---|
| 2498 | | -} |
|---|
| 2499 | | - |
|---|
| 2500 | | -static inline int |
|---|
| 2501 | | -rm_trace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 2502 | | -{ |
|---|
| 2503 | | - struct trace_export **p; |
|---|
| 2504 | | - |
|---|
| 2505 | | - for (p = list; *p != NULL; p = &(*p)->next) |
|---|
| 2506 | | - if (*p == export) |
|---|
| 2507 | | - break; |
|---|
| 2508 | | - |
|---|
| 2509 | | - if (*p != export) |
|---|
| 2510 | | - return -1; |
|---|
| 2511 | | - |
|---|
| 2512 | | - rcu_assign_pointer(*p, (*p)->next); |
|---|
| 2513 | | - |
|---|
| 2514 | | - return 0; |
|---|
| 2515 | | -} |
|---|
| 2516 | | - |
|---|
| 2517 | | -static inline void |
|---|
| 2518 | | -add_ftrace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 2519 | | -{ |
|---|
| 2520 | | - if (*list == NULL) |
|---|
| 2521 | | - ftrace_exports_enable(); |
|---|
| 2522 | | - |
|---|
| 2523 | | - add_trace_export(list, export); |
|---|
| 2524 | | -} |
|---|
| 2525 | | - |
|---|
| 2526 | | -static inline int |
|---|
| 2527 | | -rm_ftrace_export(struct trace_export **list, struct trace_export *export) |
|---|
| 2528 | | -{ |
|---|
| 2529 | | - int ret; |
|---|
| 2530 | | - |
|---|
| 2531 | | - ret = rm_trace_export(list, export); |
|---|
| 2532 | | - if (*list == NULL) |
|---|
| 2533 | | - ftrace_exports_disable(); |
|---|
| 2534 | | - |
|---|
| 2535 | | - return ret; |
|---|
| 2536 | | -} |
|---|
| 2537 | | - |
|---|
| 2538 | | -int register_ftrace_export(struct trace_export *export) |
|---|
| 2539 | | -{ |
|---|
| 2540 | | - if (WARN_ON_ONCE(!export->write)) |
|---|
| 2541 | | - return -1; |
|---|
| 2542 | | - |
|---|
| 2543 | | - mutex_lock(&ftrace_export_lock); |
|---|
| 2544 | | - |
|---|
| 2545 | | - add_ftrace_export(&ftrace_exports_list, export); |
|---|
| 2546 | | - |
|---|
| 2547 | | - mutex_unlock(&ftrace_export_lock); |
|---|
| 2548 | | - |
|---|
| 2549 | | - return 0; |
|---|
| 2550 | | -} |
|---|
| 2551 | | -EXPORT_SYMBOL_GPL(register_ftrace_export); |
|---|
| 2552 | | - |
|---|
| 2553 | | -int unregister_ftrace_export(struct trace_export *export) |
|---|
| 2554 | | -{ |
|---|
| 2555 | | - int ret; |
|---|
| 2556 | | - |
|---|
| 2557 | | - mutex_lock(&ftrace_export_lock); |
|---|
| 2558 | | - |
|---|
| 2559 | | - ret = rm_ftrace_export(&ftrace_exports_list, export); |
|---|
| 2560 | | - |
|---|
| 2561 | | - mutex_unlock(&ftrace_export_lock); |
|---|
| 2562 | | - |
|---|
| 2563 | | - return ret; |
|---|
| 2564 | | -} |
|---|
| 2565 | | -EXPORT_SYMBOL_GPL(unregister_ftrace_export); |
|---|
| 2566 | 2923 | |
|---|
| 2567 | 2924 | void |
|---|
| 2568 | 2925 | trace_function(struct trace_array *tr, |
|---|
| .. | .. |
|---|
| 2570 | 2927 | int pc) |
|---|
| 2571 | 2928 | { |
|---|
| 2572 | 2929 | struct trace_event_call *call = &event_function; |
|---|
| 2573 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
|---|
| 2930 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| 2574 | 2931 | struct ring_buffer_event *event; |
|---|
| 2575 | 2932 | struct ftrace_entry *entry; |
|---|
| 2576 | 2933 | |
|---|
| .. | .. |
|---|
| 2583 | 2940 | entry->parent_ip = parent_ip; |
|---|
| 2584 | 2941 | |
|---|
| 2585 | 2942 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
|---|
| 2586 | | - if (static_branch_unlikely(&ftrace_exports_enabled)) |
|---|
| 2587 | | - ftrace_exports(event); |
|---|
| 2943 | + if (static_branch_unlikely(&trace_function_exports_enabled)) |
|---|
| 2944 | + ftrace_exports(event, TRACE_EXPORT_FUNCTION); |
|---|
| 2588 | 2945 | __buffer_unlock_commit(buffer, event); |
|---|
| 2589 | 2946 | } |
|---|
| 2590 | 2947 | } |
|---|
| 2591 | 2948 | |
|---|
| 2592 | 2949 | #ifdef CONFIG_STACKTRACE |
|---|
| 2593 | 2950 | |
|---|
| 2594 | | -#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) |
|---|
| 2951 | +/* Allow 4 levels of nesting: normal, softirq, irq, NMI */ |
|---|
| 2952 | +#define FTRACE_KSTACK_NESTING 4 |
|---|
| 2953 | + |
|---|
| 2954 | +#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING) |
|---|
| 2955 | + |
|---|
| 2595 | 2956 | struct ftrace_stack { |
|---|
| 2596 | | - unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; |
|---|
| 2957 | + unsigned long calls[FTRACE_KSTACK_ENTRIES]; |
|---|
| 2597 | 2958 | }; |
|---|
| 2598 | 2959 | |
|---|
| 2599 | | -static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); |
|---|
| 2960 | + |
|---|
| 2961 | +struct ftrace_stacks { |
|---|
| 2962 | + struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; |
|---|
| 2963 | +}; |
|---|
| 2964 | + |
|---|
| 2965 | +static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); |
|---|
| 2600 | 2966 | static DEFINE_PER_CPU(int, ftrace_stack_reserve); |
|---|
| 2601 | 2967 | |
|---|
| 2602 | | -static void __ftrace_trace_stack(struct ring_buffer *buffer, |
|---|
| 2968 | +static void __ftrace_trace_stack(struct trace_buffer *buffer, |
|---|
| 2603 | 2969 | unsigned long flags, |
|---|
| 2604 | 2970 | int skip, int pc, struct pt_regs *regs) |
|---|
| 2605 | 2971 | { |
|---|
| 2606 | 2972 | struct trace_event_call *call = &event_kernel_stack; |
|---|
| 2607 | 2973 | struct ring_buffer_event *event; |
|---|
| 2974 | + unsigned int size, nr_entries; |
|---|
| 2975 | + struct ftrace_stack *fstack; |
|---|
| 2608 | 2976 | struct stack_entry *entry; |
|---|
| 2609 | | - struct stack_trace trace; |
|---|
| 2610 | | - int use_stack; |
|---|
| 2611 | | - int size = FTRACE_STACK_ENTRIES; |
|---|
| 2612 | | - |
|---|
| 2613 | | - trace.nr_entries = 0; |
|---|
| 2614 | | - trace.skip = skip; |
|---|
| 2977 | + int stackidx; |
|---|
| 2615 | 2978 | |
|---|
| 2616 | 2979 | /* |
|---|
| 2617 | 2980 | * Add one, for this function and the call to save_stack_trace() |
|---|
| .. | .. |
|---|
| 2619 | 2982 | */ |
|---|
| 2620 | 2983 | #ifndef CONFIG_UNWINDER_ORC |
|---|
| 2621 | 2984 | if (!regs) |
|---|
| 2622 | | - trace.skip++; |
|---|
| 2985 | + skip++; |
|---|
| 2623 | 2986 | #endif |
|---|
| 2624 | 2987 | |
|---|
| 2625 | | - /* |
|---|
| 2626 | | - * Since events can happen in NMIs there's no safe way to |
|---|
| 2627 | | - * use the per cpu ftrace_stacks. We reserve it and if an interrupt |
|---|
| 2628 | | - * or NMI comes in, it will just have to use the default |
|---|
| 2629 | | - * FTRACE_STACK_SIZE. |
|---|
| 2630 | | - */ |
|---|
| 2631 | 2988 | preempt_disable_notrace(); |
|---|
| 2632 | 2989 | |
|---|
| 2633 | | - use_stack = __this_cpu_inc_return(ftrace_stack_reserve); |
|---|
| 2990 | + stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; |
|---|
| 2991 | + |
|---|
| 2992 | + /* This should never happen. If it does, yell once and skip */ |
|---|
| 2993 | + if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) |
|---|
| 2994 | + goto out; |
|---|
| 2995 | + |
|---|
| 2634 | 2996 | /* |
|---|
| 2635 | | - * We don't need any atomic variables, just a barrier. |
|---|
| 2636 | | - * If an interrupt comes in, we don't care, because it would |
|---|
| 2637 | | - * have exited and put the counter back to what we want. |
|---|
| 2638 | | - * We just need a barrier to keep gcc from moving things |
|---|
| 2639 | | - * around. |
|---|
| 2997 | + * The above __this_cpu_inc_return() is 'atomic' cpu local. An |
|---|
| 2998 | + * interrupt will either see the value pre increment or post |
|---|
| 2999 | + * increment. If the interrupt happens pre increment it will have |
|---|
| 3000 | + * restored the counter when it returns. We just need a barrier to |
|---|
| 3001 | + * keep gcc from moving things around. |
|---|
| 2640 | 3002 | */ |
|---|
| 2641 | 3003 | barrier(); |
|---|
| 2642 | | - if (use_stack == 1) { |
|---|
| 2643 | | - trace.entries = this_cpu_ptr(ftrace_stack.calls); |
|---|
| 2644 | | - trace.max_entries = FTRACE_STACK_MAX_ENTRIES; |
|---|
| 2645 | 3004 | |
|---|
| 2646 | | - if (regs) |
|---|
| 2647 | | - save_stack_trace_regs(regs, &trace); |
|---|
| 2648 | | - else |
|---|
| 2649 | | - save_stack_trace(&trace); |
|---|
| 3005 | + fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; |
|---|
| 3006 | + size = ARRAY_SIZE(fstack->calls); |
|---|
| 2650 | 3007 | |
|---|
| 2651 | | - if (trace.nr_entries > size) |
|---|
| 2652 | | - size = trace.nr_entries; |
|---|
| 2653 | | - } else |
|---|
| 2654 | | - /* From now on, use_stack is a boolean */ |
|---|
| 2655 | | - use_stack = 0; |
|---|
| 3008 | + if (regs) { |
|---|
| 3009 | + nr_entries = stack_trace_save_regs(regs, fstack->calls, |
|---|
| 3010 | + size, skip); |
|---|
| 3011 | + } else { |
|---|
| 3012 | + nr_entries = stack_trace_save(fstack->calls, size, skip); |
|---|
| 3013 | + } |
|---|
| 2656 | 3014 | |
|---|
| 2657 | | - size *= sizeof(unsigned long); |
|---|
| 2658 | | - |
|---|
| 3015 | + size = nr_entries * sizeof(unsigned long); |
|---|
| 2659 | 3016 | event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, |
|---|
| 2660 | 3017 | (sizeof(*entry) - sizeof(entry->caller)) + size, |
|---|
| 2661 | 3018 | flags, pc); |
|---|
| .. | .. |
|---|
| 2663 | 3020 | goto out; |
|---|
| 2664 | 3021 | entry = ring_buffer_event_data(event); |
|---|
| 2665 | 3022 | |
|---|
| 2666 | | - memset(&entry->caller, 0, size); |
|---|
| 2667 | | - |
|---|
| 2668 | | - if (use_stack) |
|---|
| 2669 | | - memcpy(&entry->caller, trace.entries, |
|---|
| 2670 | | - trace.nr_entries * sizeof(unsigned long)); |
|---|
| 2671 | | - else { |
|---|
| 2672 | | - trace.max_entries = FTRACE_STACK_ENTRIES; |
|---|
| 2673 | | - trace.entries = entry->caller; |
|---|
| 2674 | | - if (regs) |
|---|
| 2675 | | - save_stack_trace_regs(regs, &trace); |
|---|
| 2676 | | - else |
|---|
| 2677 | | - save_stack_trace(&trace); |
|---|
| 2678 | | - } |
|---|
| 2679 | | - |
|---|
| 2680 | | - entry->size = trace.nr_entries; |
|---|
| 3023 | + memcpy(&entry->caller, fstack->calls, size); |
|---|
| 3024 | + entry->size = nr_entries; |
|---|
| 2681 | 3025 | |
|---|
| 2682 | 3026 | if (!call_filter_check_discard(call, entry, buffer, event)) |
|---|
| 2683 | 3027 | __buffer_unlock_commit(buffer, event); |
|---|
| .. | .. |
|---|
| 2691 | 3035 | } |
|---|
| 2692 | 3036 | |
|---|
| 2693 | 3037 | static inline void ftrace_trace_stack(struct trace_array *tr, |
|---|
| 2694 | | - struct ring_buffer *buffer, |
|---|
| 3038 | + struct trace_buffer *buffer, |
|---|
| 2695 | 3039 | unsigned long flags, |
|---|
| 2696 | 3040 | int skip, int pc, struct pt_regs *regs) |
|---|
| 2697 | 3041 | { |
|---|
| .. | .. |
|---|
| 2704 | 3048 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, |
|---|
| 2705 | 3049 | int pc) |
|---|
| 2706 | 3050 | { |
|---|
| 2707 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
|---|
| 3051 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| 2708 | 3052 | |
|---|
| 2709 | 3053 | if (rcu_is_watching()) { |
|---|
| 2710 | 3054 | __ftrace_trace_stack(buffer, flags, skip, pc, NULL); |
|---|
| .. | .. |
|---|
| 2742 | 3086 | /* Skip 1 to skip this function. */ |
|---|
| 2743 | 3087 | skip++; |
|---|
| 2744 | 3088 | #endif |
|---|
| 2745 | | - __ftrace_trace_stack(global_trace.trace_buffer.buffer, |
|---|
| 3089 | + __ftrace_trace_stack(global_trace.array_buffer.buffer, |
|---|
| 2746 | 3090 | flags, skip, preempt_count(), NULL); |
|---|
| 2747 | 3091 | } |
|---|
| 3092 | +EXPORT_SYMBOL_GPL(trace_dump_stack); |
|---|
| 2748 | 3093 | |
|---|
| 3094 | +#ifdef CONFIG_USER_STACKTRACE_SUPPORT |
|---|
| 2749 | 3095 | static DEFINE_PER_CPU(int, user_stack_count); |
|---|
| 2750 | 3096 | |
|---|
| 2751 | | -void |
|---|
| 3097 | +static void |
|---|
| 2752 | 3098 | ftrace_trace_userstack(struct trace_array *tr, |
|---|
| 2753 | | - struct ring_buffer *buffer, unsigned long flags, int pc) |
|---|
| 3099 | + struct trace_buffer *buffer, unsigned long flags, int pc) |
|---|
| 2754 | 3100 | { |
|---|
| 2755 | 3101 | struct trace_event_call *call = &event_user_stack; |
|---|
| 2756 | 3102 | struct ring_buffer_event *event; |
|---|
| 2757 | 3103 | struct userstack_entry *entry; |
|---|
| 2758 | | - struct stack_trace trace; |
|---|
| 2759 | 3104 | |
|---|
| 2760 | 3105 | if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) |
|---|
| 2761 | 3106 | return; |
|---|
| .. | .. |
|---|
| 2786 | 3131 | entry->tgid = current->tgid; |
|---|
| 2787 | 3132 | memset(&entry->caller, 0, sizeof(entry->caller)); |
|---|
| 2788 | 3133 | |
|---|
| 2789 | | - trace.nr_entries = 0; |
|---|
| 2790 | | - trace.max_entries = FTRACE_STACK_ENTRIES; |
|---|
| 2791 | | - trace.skip = 0; |
|---|
| 2792 | | - trace.entries = entry->caller; |
|---|
| 2793 | | - |
|---|
| 2794 | | - save_stack_trace_user(&trace); |
|---|
| 3134 | + stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); |
|---|
| 2795 | 3135 | if (!call_filter_check_discard(call, entry, buffer, event)) |
|---|
| 2796 | 3136 | __buffer_unlock_commit(buffer, event); |
|---|
| 2797 | 3137 | |
|---|
| .. | .. |
|---|
| 2800 | 3140 | out: |
|---|
| 2801 | 3141 | preempt_enable(); |
|---|
| 2802 | 3142 | } |
|---|
| 2803 | | - |
|---|
| 2804 | | -#ifdef UNUSED |
|---|
| 2805 | | -static void __trace_userstack(struct trace_array *tr, unsigned long flags) |
|---|
| 3143 | +#else /* CONFIG_USER_STACKTRACE_SUPPORT */ |
|---|
| 3144 | +static void ftrace_trace_userstack(struct trace_array *tr, |
|---|
| 3145 | + struct trace_buffer *buffer, |
|---|
| 3146 | + unsigned long flags, int pc) |
|---|
| 2806 | 3147 | { |
|---|
| 2807 | | - ftrace_trace_userstack(tr, flags, preempt_count()); |
|---|
| 2808 | 3148 | } |
|---|
| 2809 | | -#endif /* UNUSED */ |
|---|
| 3149 | +#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ |
|---|
| 2810 | 3150 | |
|---|
| 2811 | 3151 | #endif /* CONFIG_STACKTRACE */ |
|---|
| 2812 | 3152 | |
|---|
| .. | .. |
|---|
| 2847 | 3187 | { |
|---|
| 2848 | 3188 | struct trace_buffer_struct __percpu *buffers; |
|---|
| 2849 | 3189 | |
|---|
| 3190 | + if (trace_percpu_buffer) |
|---|
| 3191 | + return 0; |
|---|
| 3192 | + |
|---|
| 2850 | 3193 | buffers = alloc_percpu(struct trace_buffer_struct); |
|---|
| 2851 | | - if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) |
|---|
| 3194 | + if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) |
|---|
| 2852 | 3195 | return -ENOMEM; |
|---|
| 2853 | 3196 | |
|---|
| 2854 | 3197 | trace_percpu_buffer = buffers; |
|---|
| .. | .. |
|---|
| 2893 | 3236 | * directly here. If the global_trace.buffer is already |
|---|
| 2894 | 3237 | * allocated here, then this was called by module code. |
|---|
| 2895 | 3238 | */ |
|---|
| 2896 | | - if (global_trace.trace_buffer.buffer) |
|---|
| 3239 | + if (global_trace.array_buffer.buffer) |
|---|
| 2897 | 3240 | tracing_start_cmdline_record(); |
|---|
| 2898 | 3241 | } |
|---|
| 3242 | +EXPORT_SYMBOL_GPL(trace_printk_init_buffers); |
|---|
| 2899 | 3243 | |
|---|
| 2900 | 3244 | void trace_printk_start_comm(void) |
|---|
| 2901 | 3245 | { |
|---|
| .. | .. |
|---|
| 2918 | 3262 | |
|---|
| 2919 | 3263 | /** |
|---|
| 2920 | 3264 | * trace_vbprintk - write binary msg to tracing buffer |
|---|
| 2921 | | - * |
|---|
| 3265 | + * @ip: The address of the caller |
|---|
| 3266 | + * @fmt: The string format to write to the buffer |
|---|
| 3267 | + * @args: Arguments for @fmt |
|---|
| 2922 | 3268 | */ |
|---|
| 2923 | 3269 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
|---|
| 2924 | 3270 | { |
|---|
| 2925 | 3271 | struct trace_event_call *call = &event_bprint; |
|---|
| 2926 | 3272 | struct ring_buffer_event *event; |
|---|
| 2927 | | - struct ring_buffer *buffer; |
|---|
| 3273 | + struct trace_buffer *buffer; |
|---|
| 2928 | 3274 | struct trace_array *tr = &global_trace; |
|---|
| 2929 | 3275 | struct bprint_entry *entry; |
|---|
| 2930 | 3276 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 2949 | 3295 | len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); |
|---|
| 2950 | 3296 | |
|---|
| 2951 | 3297 | if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) |
|---|
| 2952 | | - goto out; |
|---|
| 3298 | + goto out_put; |
|---|
| 2953 | 3299 | |
|---|
| 2954 | 3300 | local_save_flags(flags); |
|---|
| 2955 | 3301 | size = sizeof(*entry) + sizeof(u32) * len; |
|---|
| 2956 | | - buffer = tr->trace_buffer.buffer; |
|---|
| 3302 | + buffer = tr->array_buffer.buffer; |
|---|
| 3303 | + ring_buffer_nest_start(buffer); |
|---|
| 2957 | 3304 | event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, |
|---|
| 2958 | 3305 | flags, pc); |
|---|
| 2959 | 3306 | if (!event) |
|---|
| .. | .. |
|---|
| 2969 | 3316 | } |
|---|
| 2970 | 3317 | |
|---|
| 2971 | 3318 | out: |
|---|
| 3319 | + ring_buffer_nest_end(buffer); |
|---|
| 3320 | +out_put: |
|---|
| 2972 | 3321 | put_trace_buf(); |
|---|
| 2973 | 3322 | |
|---|
| 2974 | 3323 | out_nobuffer: |
|---|
| .. | .. |
|---|
| 2981 | 3330 | |
|---|
| 2982 | 3331 | __printf(3, 0) |
|---|
| 2983 | 3332 | static int |
|---|
| 2984 | | -__trace_array_vprintk(struct ring_buffer *buffer, |
|---|
| 3333 | +__trace_array_vprintk(struct trace_buffer *buffer, |
|---|
| 2985 | 3334 | unsigned long ip, const char *fmt, va_list args) |
|---|
| 2986 | 3335 | { |
|---|
| 2987 | 3336 | struct trace_event_call *call = &event_print; |
|---|
| .. | .. |
|---|
| 3011 | 3360 | |
|---|
| 3012 | 3361 | local_save_flags(flags); |
|---|
| 3013 | 3362 | size = sizeof(*entry) + len + 1; |
|---|
| 3363 | + ring_buffer_nest_start(buffer); |
|---|
| 3014 | 3364 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
|---|
| 3015 | 3365 | flags, pc); |
|---|
| 3016 | 3366 | if (!event) |
|---|
| .. | .. |
|---|
| 3025 | 3375 | } |
|---|
| 3026 | 3376 | |
|---|
| 3027 | 3377 | out: |
|---|
| 3378 | + ring_buffer_nest_end(buffer); |
|---|
| 3028 | 3379 | put_trace_buf(); |
|---|
| 3029 | 3380 | |
|---|
| 3030 | 3381 | out_nobuffer: |
|---|
| .. | .. |
|---|
| 3038 | 3389 | int trace_array_vprintk(struct trace_array *tr, |
|---|
| 3039 | 3390 | unsigned long ip, const char *fmt, va_list args) |
|---|
| 3040 | 3391 | { |
|---|
| 3041 | | - return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); |
|---|
| 3392 | + return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); |
|---|
| 3042 | 3393 | } |
|---|
| 3043 | 3394 | |
|---|
| 3395 | +/** |
|---|
| 3396 | + * trace_array_printk - Print a message to a specific instance |
|---|
| 3397 | + * @tr: The instance trace_array descriptor |
|---|
| 3398 | + * @ip: The instruction pointer that this is called from. |
|---|
| 3399 | + * @fmt: The format to print (printf format) |
|---|
| 3400 | + * |
|---|
| 3401 | + * If a subsystem sets up its own instance, they have the right to |
|---|
| 3402 | + * printk strings into their tracing instance buffer using this |
|---|
| 3403 | + * function. Note, this function will not write into the top level |
|---|
| 3404 | + * buffer (use trace_printk() for that), as writing into the top level |
|---|
| 3405 | + * buffer should only have events that can be individually disabled. |
|---|
| 3406 | + * trace_printk() is only used for debugging a kernel, and should not |
|---|
| 3407 | + * be ever encorporated in normal use. |
|---|
| 3408 | + * |
|---|
| 3409 | + * trace_array_printk() can be used, as it will not add noise to the |
|---|
| 3410 | + * top level tracing buffer. |
|---|
| 3411 | + * |
|---|
| 3412 | + * Note, trace_array_init_printk() must be called on @tr before this |
|---|
| 3413 | + * can be used. |
|---|
| 3414 | + */ |
|---|
| 3044 | 3415 | __printf(3, 0) |
|---|
| 3045 | 3416 | int trace_array_printk(struct trace_array *tr, |
|---|
| 3046 | 3417 | unsigned long ip, const char *fmt, ...) |
|---|
| .. | .. |
|---|
| 3048 | 3419 | int ret; |
|---|
| 3049 | 3420 | va_list ap; |
|---|
| 3050 | 3421 | |
|---|
| 3051 | | - if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) |
|---|
| 3052 | | - return 0; |
|---|
| 3053 | | - |
|---|
| 3054 | 3422 | if (!tr) |
|---|
| 3055 | 3423 | return -ENOENT; |
|---|
| 3424 | + |
|---|
| 3425 | + /* This is only allowed for created instances */ |
|---|
| 3426 | + if (tr == &global_trace) |
|---|
| 3427 | + return 0; |
|---|
| 3428 | + |
|---|
| 3429 | + if (!(tr->trace_flags & TRACE_ITER_PRINTK)) |
|---|
| 3430 | + return 0; |
|---|
| 3056 | 3431 | |
|---|
| 3057 | 3432 | va_start(ap, fmt); |
|---|
| 3058 | 3433 | ret = trace_array_vprintk(tr, ip, fmt, ap); |
|---|
| 3059 | 3434 | va_end(ap); |
|---|
| 3060 | 3435 | return ret; |
|---|
| 3061 | 3436 | } |
|---|
| 3437 | +EXPORT_SYMBOL_GPL(trace_array_printk); |
|---|
| 3438 | + |
|---|
| 3439 | +/** |
|---|
| 3440 | + * trace_array_init_printk - Initialize buffers for trace_array_printk() |
|---|
| 3441 | + * @tr: The trace array to initialize the buffers for |
|---|
| 3442 | + * |
|---|
| 3443 | + * As trace_array_printk() only writes into instances, they are OK to |
|---|
| 3444 | + * have in the kernel (unlike trace_printk()). This needs to be called |
|---|
| 3445 | + * before trace_array_printk() can be used on a trace_array. |
|---|
| 3446 | + */ |
|---|
| 3447 | +int trace_array_init_printk(struct trace_array *tr) |
|---|
| 3448 | +{ |
|---|
| 3449 | + if (!tr) |
|---|
| 3450 | + return -ENOENT; |
|---|
| 3451 | + |
|---|
| 3452 | + /* This is only allowed for created instances */ |
|---|
| 3453 | + if (tr == &global_trace) |
|---|
| 3454 | + return -EINVAL; |
|---|
| 3455 | + |
|---|
| 3456 | + return alloc_percpu_trace_buffer(); |
|---|
| 3457 | +} |
|---|
| 3458 | +EXPORT_SYMBOL_GPL(trace_array_init_printk); |
|---|
| 3062 | 3459 | |
|---|
| 3063 | 3460 | __printf(3, 4) |
|---|
| 3064 | | -int trace_array_printk_buf(struct ring_buffer *buffer, |
|---|
| 3461 | +int trace_array_printk_buf(struct trace_buffer *buffer, |
|---|
| 3065 | 3462 | unsigned long ip, const char *fmt, ...) |
|---|
| 3066 | 3463 | { |
|---|
| 3067 | 3464 | int ret; |
|---|
| .. | .. |
|---|
| 3089 | 3486 | |
|---|
| 3090 | 3487 | iter->idx++; |
|---|
| 3091 | 3488 | if (buf_iter) |
|---|
| 3092 | | - ring_buffer_read(buf_iter, NULL); |
|---|
| 3489 | + ring_buffer_iter_advance(buf_iter); |
|---|
| 3093 | 3490 | } |
|---|
| 3094 | 3491 | |
|---|
| 3095 | 3492 | static struct trace_entry * |
|---|
| .. | .. |
|---|
| 3099 | 3496 | struct ring_buffer_event *event; |
|---|
| 3100 | 3497 | struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); |
|---|
| 3101 | 3498 | |
|---|
| 3102 | | - if (buf_iter) |
|---|
| 3499 | + if (buf_iter) { |
|---|
| 3103 | 3500 | event = ring_buffer_iter_peek(buf_iter, ts); |
|---|
| 3104 | | - else |
|---|
| 3105 | | - event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, |
|---|
| 3501 | + if (lost_events) |
|---|
| 3502 | + *lost_events = ring_buffer_iter_dropped(buf_iter) ? |
|---|
| 3503 | + (unsigned long)-1 : 0; |
|---|
| 3504 | + } else { |
|---|
| 3505 | + event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, |
|---|
| 3106 | 3506 | lost_events); |
|---|
| 3507 | + } |
|---|
| 3107 | 3508 | |
|---|
| 3108 | 3509 | if (event) { |
|---|
| 3109 | 3510 | iter->ent_size = ring_buffer_event_length(event); |
|---|
| .. | .. |
|---|
| 3117 | 3518 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, |
|---|
| 3118 | 3519 | unsigned long *missing_events, u64 *ent_ts) |
|---|
| 3119 | 3520 | { |
|---|
| 3120 | | - struct ring_buffer *buffer = iter->trace_buffer->buffer; |
|---|
| 3521 | + struct trace_buffer *buffer = iter->array_buffer->buffer; |
|---|
| 3121 | 3522 | struct trace_entry *ent, *next = NULL; |
|---|
| 3122 | 3523 | unsigned long lost_events = 0, next_lost = 0; |
|---|
| 3123 | 3524 | int cpu_file = iter->cpu_file; |
|---|
| .. | .. |
|---|
| 3173 | 3574 | return next; |
|---|
| 3174 | 3575 | } |
|---|
| 3175 | 3576 | |
|---|
| 3577 | +#define STATIC_TEMP_BUF_SIZE 128 |
|---|
| 3578 | +static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); |
|---|
| 3579 | + |
|---|
| 3176 | 3580 | /* Find the next real entry, without updating the iterator itself */ |
|---|
| 3177 | 3581 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
|---|
| 3178 | 3582 | int *ent_cpu, u64 *ent_ts) |
|---|
| 3179 | 3583 | { |
|---|
| 3180 | | - return __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
|---|
| 3584 | + /* __find_next_entry will reset ent_size */ |
|---|
| 3585 | + int ent_size = iter->ent_size; |
|---|
| 3586 | + struct trace_entry *entry; |
|---|
| 3587 | + |
|---|
| 3588 | + /* |
|---|
| 3589 | + * If called from ftrace_dump(), then the iter->temp buffer |
|---|
| 3590 | + * will be the static_temp_buf and not created from kmalloc. |
|---|
| 3591 | + * If the entry size is greater than the buffer, we can |
|---|
| 3592 | + * not save it. Just return NULL in that case. This is only |
|---|
| 3593 | + * used to add markers when two consecutive events' time |
|---|
| 3594 | + * stamps have a large delta. See trace_print_lat_context() |
|---|
| 3595 | + */ |
|---|
| 3596 | + if (iter->temp == static_temp_buf && |
|---|
| 3597 | + STATIC_TEMP_BUF_SIZE < ent_size) |
|---|
| 3598 | + return NULL; |
|---|
| 3599 | + |
|---|
| 3600 | + /* |
|---|
| 3601 | + * The __find_next_entry() may call peek_next_entry(), which may |
|---|
| 3602 | + * call ring_buffer_peek() that may make the contents of iter->ent |
|---|
| 3603 | + * undefined. Need to copy iter->ent now. |
|---|
| 3604 | + */ |
|---|
| 3605 | + if (iter->ent && iter->ent != iter->temp) { |
|---|
| 3606 | + if ((!iter->temp || iter->temp_size < iter->ent_size) && |
|---|
| 3607 | + !WARN_ON_ONCE(iter->temp == static_temp_buf)) { |
|---|
| 3608 | + void *temp; |
|---|
| 3609 | + temp = kmalloc(iter->ent_size, GFP_KERNEL); |
|---|
| 3610 | + if (!temp) |
|---|
| 3611 | + return NULL; |
|---|
| 3612 | + kfree(iter->temp); |
|---|
| 3613 | + iter->temp = temp; |
|---|
| 3614 | + iter->temp_size = iter->ent_size; |
|---|
| 3615 | + } |
|---|
| 3616 | + memcpy(iter->temp, iter->ent, iter->ent_size); |
|---|
| 3617 | + iter->ent = iter->temp; |
|---|
| 3618 | + } |
|---|
| 3619 | + entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); |
|---|
| 3620 | + /* Put back the original ent_size */ |
|---|
| 3621 | + iter->ent_size = ent_size; |
|---|
| 3622 | + |
|---|
| 3623 | + return entry; |
|---|
| 3181 | 3624 | } |
|---|
| 3182 | 3625 | |
|---|
| 3183 | 3626 | /* Find the next real entry, and increment the iterator to the next entry */ |
|---|
| .. | .. |
|---|
| 3194 | 3637 | |
|---|
| 3195 | 3638 | static void trace_consume(struct trace_iterator *iter) |
|---|
| 3196 | 3639 | { |
|---|
| 3197 | | - ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, |
|---|
| 3640 | + ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, |
|---|
| 3198 | 3641 | &iter->lost_events); |
|---|
| 3199 | 3642 | } |
|---|
| 3200 | 3643 | |
|---|
| .. | .. |
|---|
| 3227 | 3670 | |
|---|
| 3228 | 3671 | void tracing_iter_reset(struct trace_iterator *iter, int cpu) |
|---|
| 3229 | 3672 | { |
|---|
| 3230 | | - struct ring_buffer_event *event; |
|---|
| 3231 | 3673 | struct ring_buffer_iter *buf_iter; |
|---|
| 3232 | 3674 | unsigned long entries = 0; |
|---|
| 3233 | 3675 | u64 ts; |
|---|
| 3234 | 3676 | |
|---|
| 3235 | | - per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; |
|---|
| 3677 | + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; |
|---|
| 3236 | 3678 | |
|---|
| 3237 | 3679 | buf_iter = trace_buffer_iter(iter, cpu); |
|---|
| 3238 | 3680 | if (!buf_iter) |
|---|
| .. | .. |
|---|
| 3245 | 3687 | * that a reset never took place on a cpu. This is evident |
|---|
| 3246 | 3688 | * by the timestamp being before the start of the buffer. |
|---|
| 3247 | 3689 | */ |
|---|
| 3248 | | - while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { |
|---|
| 3249 | | - if (ts >= iter->trace_buffer->time_start) |
|---|
| 3690 | + while (ring_buffer_iter_peek(buf_iter, &ts)) { |
|---|
| 3691 | + if (ts >= iter->array_buffer->time_start) |
|---|
| 3250 | 3692 | break; |
|---|
| 3251 | 3693 | entries++; |
|---|
| 3252 | | - ring_buffer_read(buf_iter, NULL); |
|---|
| 3694 | + ring_buffer_iter_advance(buf_iter); |
|---|
| 3253 | 3695 | } |
|---|
| 3254 | 3696 | |
|---|
| 3255 | | - per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; |
|---|
| 3697 | + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; |
|---|
| 3256 | 3698 | } |
|---|
| 3257 | 3699 | |
|---|
| 3258 | 3700 | /* |
|---|
| .. | .. |
|---|
| 3331 | 3773 | } |
|---|
| 3332 | 3774 | |
|---|
| 3333 | 3775 | static void |
|---|
| 3334 | | -get_total_entries(struct trace_buffer *buf, |
|---|
| 3335 | | - unsigned long *total, unsigned long *entries) |
|---|
| 3776 | +get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, |
|---|
| 3777 | + unsigned long *entries, int cpu) |
|---|
| 3336 | 3778 | { |
|---|
| 3337 | 3779 | unsigned long count; |
|---|
| 3780 | + |
|---|
| 3781 | + count = ring_buffer_entries_cpu(buf->buffer, cpu); |
|---|
| 3782 | + /* |
|---|
| 3783 | + * If this buffer has skipped entries, then we hold all |
|---|
| 3784 | + * entries for the trace and we need to ignore the |
|---|
| 3785 | + * ones before the time stamp. |
|---|
| 3786 | + */ |
|---|
| 3787 | + if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { |
|---|
| 3788 | + count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; |
|---|
| 3789 | + /* total is the same as the entries */ |
|---|
| 3790 | + *total = count; |
|---|
| 3791 | + } else |
|---|
| 3792 | + *total = count + |
|---|
| 3793 | + ring_buffer_overrun_cpu(buf->buffer, cpu); |
|---|
| 3794 | + *entries = count; |
|---|
| 3795 | +} |
|---|
| 3796 | + |
|---|
| 3797 | +static void |
|---|
| 3798 | +get_total_entries(struct array_buffer *buf, |
|---|
| 3799 | + unsigned long *total, unsigned long *entries) |
|---|
| 3800 | +{ |
|---|
| 3801 | + unsigned long t, e; |
|---|
| 3338 | 3802 | int cpu; |
|---|
| 3339 | 3803 | |
|---|
| 3340 | 3804 | *total = 0; |
|---|
| 3341 | 3805 | *entries = 0; |
|---|
| 3342 | 3806 | |
|---|
| 3343 | 3807 | for_each_tracing_cpu(cpu) { |
|---|
| 3344 | | - count = ring_buffer_entries_cpu(buf->buffer, cpu); |
|---|
| 3345 | | - /* |
|---|
| 3346 | | - * If this buffer has skipped entries, then we hold all |
|---|
| 3347 | | - * entries for the trace and we need to ignore the |
|---|
| 3348 | | - * ones before the time stamp. |
|---|
| 3349 | | - */ |
|---|
| 3350 | | - if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { |
|---|
| 3351 | | - count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; |
|---|
| 3352 | | - /* total is the same as the entries */ |
|---|
| 3353 | | - *total += count; |
|---|
| 3354 | | - } else |
|---|
| 3355 | | - *total += count + |
|---|
| 3356 | | - ring_buffer_overrun_cpu(buf->buffer, cpu); |
|---|
| 3357 | | - *entries += count; |
|---|
| 3808 | + get_total_entries_cpu(buf, &t, &e, cpu); |
|---|
| 3809 | + *total += t; |
|---|
| 3810 | + *entries += e; |
|---|
| 3358 | 3811 | } |
|---|
| 3812 | +} |
|---|
| 3813 | + |
|---|
| 3814 | +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) |
|---|
| 3815 | +{ |
|---|
| 3816 | + unsigned long total, entries; |
|---|
| 3817 | + |
|---|
| 3818 | + if (!tr) |
|---|
| 3819 | + tr = &global_trace; |
|---|
| 3820 | + |
|---|
| 3821 | + get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); |
|---|
| 3822 | + |
|---|
| 3823 | + return entries; |
|---|
| 3824 | +} |
|---|
| 3825 | + |
|---|
| 3826 | +unsigned long trace_total_entries(struct trace_array *tr) |
|---|
| 3827 | +{ |
|---|
| 3828 | + unsigned long total, entries; |
|---|
| 3829 | + |
|---|
| 3830 | + if (!tr) |
|---|
| 3831 | + tr = &global_trace; |
|---|
| 3832 | + |
|---|
| 3833 | + get_total_entries(&tr->array_buffer, &total, &entries); |
|---|
| 3834 | + |
|---|
| 3835 | + return entries; |
|---|
| 3359 | 3836 | } |
|---|
| 3360 | 3837 | |
|---|
| 3361 | 3838 | static void print_lat_help_header(struct seq_file *m) |
|---|
| 3362 | 3839 | { |
|---|
| 3363 | | - seq_puts(m, "# _------=> CPU# \n" |
|---|
| 3364 | | - "# / _-----=> irqs-off \n" |
|---|
| 3365 | | - "# | / _----=> need-resched \n" |
|---|
| 3366 | | - "# || / _---=> hardirq/softirq \n" |
|---|
| 3367 | | - "# ||| / _--=> preempt-depth \n" |
|---|
| 3368 | | - "# |||| / delay \n" |
|---|
| 3369 | | - "# cmd pid ||||| time | caller \n" |
|---|
| 3370 | | - "# \\ / ||||| \\ | / \n"); |
|---|
| 3840 | + seq_puts(m, "# _------=> CPU# \n" |
|---|
| 3841 | + "# / _-----=> irqs-off \n" |
|---|
| 3842 | + "# | / _----=> need-resched \n" |
|---|
| 3843 | + "# || / _---=> hardirq/softirq \n" |
|---|
| 3844 | + "# ||| / _--=> preempt-depth \n" |
|---|
| 3845 | + "# |||| / delay \n" |
|---|
| 3846 | + "# cmd pid ||||| time | caller \n" |
|---|
| 3847 | + "# \\ / ||||| \\ | / \n"); |
|---|
| 3371 | 3848 | } |
|---|
| 3372 | 3849 | |
|---|
| 3373 | | -static void print_event_info(struct trace_buffer *buf, struct seq_file *m) |
|---|
| 3850 | +static void print_event_info(struct array_buffer *buf, struct seq_file *m) |
|---|
| 3374 | 3851 | { |
|---|
| 3375 | 3852 | unsigned long total; |
|---|
| 3376 | 3853 | unsigned long entries; |
|---|
| .. | .. |
|---|
| 3381 | 3858 | seq_puts(m, "#\n"); |
|---|
| 3382 | 3859 | } |
|---|
| 3383 | 3860 | |
|---|
| 3384 | | -static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, |
|---|
| 3861 | +static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, |
|---|
| 3385 | 3862 | unsigned int flags) |
|---|
| 3386 | 3863 | { |
|---|
| 3387 | 3864 | bool tgid = flags & TRACE_ITER_RECORD_TGID; |
|---|
| 3388 | 3865 | |
|---|
| 3389 | 3866 | print_event_info(buf, m); |
|---|
| 3390 | 3867 | |
|---|
| 3391 | | - seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); |
|---|
| 3392 | | - seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); |
|---|
| 3868 | + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); |
|---|
| 3869 | + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); |
|---|
| 3393 | 3870 | } |
|---|
| 3394 | 3871 | |
|---|
| 3395 | | -static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, |
|---|
| 3872 | +static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, |
|---|
| 3396 | 3873 | unsigned int flags) |
|---|
| 3397 | 3874 | { |
|---|
| 3398 | 3875 | bool tgid = flags & TRACE_ITER_RECORD_TGID; |
|---|
| 3399 | | - const char tgid_space[] = " "; |
|---|
| 3400 | | - const char space[] = " "; |
|---|
| 3876 | + const char *space = " "; |
|---|
| 3877 | + int prec = tgid ? 12 : 2; |
|---|
| 3401 | 3878 | |
|---|
| 3402 | 3879 | print_event_info(buf, m); |
|---|
| 3403 | 3880 | |
|---|
| 3404 | | - seq_printf(m, "# %s _-----=> irqs-off\n", |
|---|
| 3405 | | - tgid ? tgid_space : space); |
|---|
| 3406 | | - seq_printf(m, "# %s / _----=> need-resched\n", |
|---|
| 3407 | | - tgid ? tgid_space : space); |
|---|
| 3408 | | - seq_printf(m, "# %s| / _---=> hardirq/softirq\n", |
|---|
| 3409 | | - tgid ? tgid_space : space); |
|---|
| 3410 | | - seq_printf(m, "# %s|| / _--=> preempt-depth\n", |
|---|
| 3411 | | - tgid ? tgid_space : space); |
|---|
| 3412 | | - seq_printf(m, "# %s||| / delay\n", |
|---|
| 3413 | | - tgid ? tgid_space : space); |
|---|
| 3414 | | - seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", |
|---|
| 3415 | | - tgid ? " TGID " : space); |
|---|
| 3416 | | - seq_printf(m, "# | | %s | |||| | |\n", |
|---|
| 3417 | | - tgid ? " | " : space); |
|---|
| 3881 | + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); |
|---|
| 3882 | + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); |
|---|
| 3883 | + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); |
|---|
| 3884 | + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); |
|---|
| 3885 | + seq_printf(m, "# %.*s||| / delay\n", prec, space); |
|---|
| 3886 | + seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); |
|---|
| 3887 | + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); |
|---|
| 3418 | 3888 | } |
|---|
| 3419 | 3889 | |
|---|
| 3420 | 3890 | void |
|---|
| 3421 | 3891 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
|---|
| 3422 | 3892 | { |
|---|
| 3423 | 3893 | unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); |
|---|
| 3424 | | - struct trace_buffer *buf = iter->trace_buffer; |
|---|
| 3894 | + struct array_buffer *buf = iter->array_buffer; |
|---|
| 3425 | 3895 | struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); |
|---|
| 3426 | 3896 | struct tracer *type = iter->trace; |
|---|
| 3427 | 3897 | unsigned long entries; |
|---|
| .. | .. |
|---|
| 3448 | 3918 | "desktop", |
|---|
| 3449 | 3919 | #elif defined(CONFIG_PREEMPT) |
|---|
| 3450 | 3920 | "preempt", |
|---|
| 3921 | +#elif defined(CONFIG_PREEMPT_RT) |
|---|
| 3922 | + "preempt_rt", |
|---|
| 3451 | 3923 | #else |
|---|
| 3452 | 3924 | "unknown", |
|---|
| 3453 | 3925 | #endif |
|---|
| .. | .. |
|---|
| 3494 | 3966 | cpumask_test_cpu(iter->cpu, iter->started)) |
|---|
| 3495 | 3967 | return; |
|---|
| 3496 | 3968 | |
|---|
| 3497 | | - if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) |
|---|
| 3969 | + if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) |
|---|
| 3498 | 3970 | return; |
|---|
| 3499 | 3971 | |
|---|
| 3500 | 3972 | if (cpumask_available(iter->started)) |
|---|
| .. | .. |
|---|
| 3628 | 4100 | if (!ring_buffer_iter_empty(buf_iter)) |
|---|
| 3629 | 4101 | return 0; |
|---|
| 3630 | 4102 | } else { |
|---|
| 3631 | | - if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) |
|---|
| 4103 | + if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) |
|---|
| 3632 | 4104 | return 0; |
|---|
| 3633 | 4105 | } |
|---|
| 3634 | 4106 | return 1; |
|---|
| .. | .. |
|---|
| 3640 | 4112 | if (!ring_buffer_iter_empty(buf_iter)) |
|---|
| 3641 | 4113 | return 0; |
|---|
| 3642 | 4114 | } else { |
|---|
| 3643 | | - if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) |
|---|
| 4115 | + if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) |
|---|
| 3644 | 4116 | return 0; |
|---|
| 3645 | 4117 | } |
|---|
| 3646 | 4118 | } |
|---|
| .. | .. |
|---|
| 3656 | 4128 | enum print_line_t ret; |
|---|
| 3657 | 4129 | |
|---|
| 3658 | 4130 | if (iter->lost_events) { |
|---|
| 3659 | | - trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
|---|
| 3660 | | - iter->cpu, iter->lost_events); |
|---|
| 4131 | + if (iter->lost_events == (unsigned long)-1) |
|---|
| 4132 | + trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", |
|---|
| 4133 | + iter->cpu); |
|---|
| 4134 | + else |
|---|
| 4135 | + trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
|---|
| 4136 | + iter->cpu, iter->lost_events); |
|---|
| 3661 | 4137 | if (trace_seq_has_overflowed(&iter->seq)) |
|---|
| 3662 | 4138 | return TRACE_TYPE_PARTIAL_LINE; |
|---|
| 3663 | 4139 | } |
|---|
| .. | .. |
|---|
| 3730 | 4206 | } else { |
|---|
| 3731 | 4207 | if (!(trace_flags & TRACE_ITER_VERBOSE)) { |
|---|
| 3732 | 4208 | if (trace_flags & TRACE_ITER_IRQ_INFO) |
|---|
| 3733 | | - print_func_help_header_irq(iter->trace_buffer, |
|---|
| 4209 | + print_func_help_header_irq(iter->array_buffer, |
|---|
| 3734 | 4210 | m, trace_flags); |
|---|
| 3735 | 4211 | else |
|---|
| 3736 | | - print_func_help_header(iter->trace_buffer, m, |
|---|
| 4212 | + print_func_help_header(iter->array_buffer, m, |
|---|
| 3737 | 4213 | trace_flags); |
|---|
| 3738 | 4214 | } |
|---|
| 3739 | 4215 | } |
|---|
| .. | .. |
|---|
| 3873 | 4349 | goto release; |
|---|
| 3874 | 4350 | |
|---|
| 3875 | 4351 | /* |
|---|
| 4352 | + * trace_find_next_entry() may need to save off iter->ent. |
|---|
| 4353 | + * It will place it into the iter->temp buffer. As most |
|---|
| 4354 | + * events are less than 128, allocate a buffer of that size. |
|---|
| 4355 | + * If one is greater, then trace_find_next_entry() will |
|---|
| 4356 | + * allocate a new buffer to adjust for the bigger iter->ent. |
|---|
| 4357 | + * It's not critical if it fails to get allocated here. |
|---|
| 4358 | + */ |
|---|
| 4359 | + iter->temp = kmalloc(128, GFP_KERNEL); |
|---|
| 4360 | + if (iter->temp) |
|---|
| 4361 | + iter->temp_size = 128; |
|---|
| 4362 | + |
|---|
| 4363 | + /* |
|---|
| 3876 | 4364 | * We make a copy of the current tracer to avoid concurrent |
|---|
| 3877 | 4365 | * changes on it while we are reading. |
|---|
| 3878 | 4366 | */ |
|---|
| .. | .. |
|---|
| 3891 | 4379 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 3892 | 4380 | /* Currently only the top directory has a snapshot */ |
|---|
| 3893 | 4381 | if (tr->current_trace->print_max || snapshot) |
|---|
| 3894 | | - iter->trace_buffer = &tr->max_buffer; |
|---|
| 4382 | + iter->array_buffer = &tr->max_buffer; |
|---|
| 3895 | 4383 | else |
|---|
| 3896 | 4384 | #endif |
|---|
| 3897 | | - iter->trace_buffer = &tr->trace_buffer; |
|---|
| 4385 | + iter->array_buffer = &tr->array_buffer; |
|---|
| 3898 | 4386 | iter->snapshot = snapshot; |
|---|
| 3899 | 4387 | iter->pos = -1; |
|---|
| 3900 | 4388 | iter->cpu_file = tracing_get_cpu(inode); |
|---|
| 3901 | 4389 | mutex_init(&iter->mutex); |
|---|
| 3902 | 4390 | |
|---|
| 3903 | 4391 | /* Notify the tracer early; before we stop tracing. */ |
|---|
| 3904 | | - if (iter->trace && iter->trace->open) |
|---|
| 4392 | + if (iter->trace->open) |
|---|
| 3905 | 4393 | iter->trace->open(iter); |
|---|
| 3906 | 4394 | |
|---|
| 3907 | 4395 | /* Annotate start of buffers if we had overruns */ |
|---|
| 3908 | | - if (ring_buffer_overruns(iter->trace_buffer->buffer)) |
|---|
| 4396 | + if (ring_buffer_overruns(iter->array_buffer->buffer)) |
|---|
| 3909 | 4397 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
|---|
| 3910 | 4398 | |
|---|
| 3911 | 4399 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
|---|
| 3912 | 4400 | if (trace_clocks[tr->clock_id].in_ns) |
|---|
| 3913 | 4401 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
|---|
| 3914 | 4402 | |
|---|
| 3915 | | - /* stop the trace while dumping if we are not opening "snapshot" */ |
|---|
| 3916 | | - if (!iter->snapshot) |
|---|
| 4403 | + /* |
|---|
| 4404 | + * If pause-on-trace is enabled, then stop the trace while |
|---|
| 4405 | + * dumping, unless this is the "snapshot" file |
|---|
| 4406 | + */ |
|---|
| 4407 | + if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) |
|---|
| 3917 | 4408 | tracing_stop_tr(tr); |
|---|
| 3918 | 4409 | |
|---|
| 3919 | 4410 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
|---|
| 3920 | 4411 | for_each_tracing_cpu(cpu) { |
|---|
| 3921 | 4412 | iter->buffer_iter[cpu] = |
|---|
| 3922 | | - ring_buffer_read_prepare(iter->trace_buffer->buffer, |
|---|
| 4413 | + ring_buffer_read_prepare(iter->array_buffer->buffer, |
|---|
| 3923 | 4414 | cpu, GFP_KERNEL); |
|---|
| 3924 | 4415 | } |
|---|
| 3925 | 4416 | ring_buffer_read_prepare_sync(); |
|---|
| .. | .. |
|---|
| 3930 | 4421 | } else { |
|---|
| 3931 | 4422 | cpu = iter->cpu_file; |
|---|
| 3932 | 4423 | iter->buffer_iter[cpu] = |
|---|
| 3933 | | - ring_buffer_read_prepare(iter->trace_buffer->buffer, |
|---|
| 4424 | + ring_buffer_read_prepare(iter->array_buffer->buffer, |
|---|
| 3934 | 4425 | cpu, GFP_KERNEL); |
|---|
| 3935 | 4426 | ring_buffer_read_prepare_sync(); |
|---|
| 3936 | 4427 | ring_buffer_read_start(iter->buffer_iter[cpu]); |
|---|
| .. | .. |
|---|
| 3944 | 4435 | fail: |
|---|
| 3945 | 4436 | mutex_unlock(&trace_types_lock); |
|---|
| 3946 | 4437 | kfree(iter->trace); |
|---|
| 4438 | + kfree(iter->temp); |
|---|
| 3947 | 4439 | kfree(iter->buffer_iter); |
|---|
| 3948 | 4440 | release: |
|---|
| 3949 | 4441 | seq_release_private(inode, file); |
|---|
| .. | .. |
|---|
| 3952 | 4444 | |
|---|
| 3953 | 4445 | int tracing_open_generic(struct inode *inode, struct file *filp) |
|---|
| 3954 | 4446 | { |
|---|
| 3955 | | - if (tracing_disabled) |
|---|
| 3956 | | - return -ENODEV; |
|---|
| 4447 | + int ret; |
|---|
| 4448 | + |
|---|
| 4449 | + ret = tracing_check_open_get_tr(NULL); |
|---|
| 4450 | + if (ret) |
|---|
| 4451 | + return ret; |
|---|
| 3957 | 4452 | |
|---|
| 3958 | 4453 | filp->private_data = inode->i_private; |
|---|
| 3959 | 4454 | return 0; |
|---|
| .. | .. |
|---|
| 3968 | 4463 | * Open and update trace_array ref count. |
|---|
| 3969 | 4464 | * Must have the current trace_array passed to it. |
|---|
| 3970 | 4465 | */ |
|---|
| 3971 | | -static int tracing_open_generic_tr(struct inode *inode, struct file *filp) |
|---|
| 4466 | +int tracing_open_generic_tr(struct inode *inode, struct file *filp) |
|---|
| 3972 | 4467 | { |
|---|
| 3973 | 4468 | struct trace_array *tr = inode->i_private; |
|---|
| 4469 | + int ret; |
|---|
| 3974 | 4470 | |
|---|
| 3975 | | - if (tracing_disabled) |
|---|
| 3976 | | - return -ENODEV; |
|---|
| 3977 | | - |
|---|
| 3978 | | - if (trace_array_get(tr) < 0) |
|---|
| 3979 | | - return -ENODEV; |
|---|
| 4471 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 4472 | + if (ret) |
|---|
| 4473 | + return ret; |
|---|
| 3980 | 4474 | |
|---|
| 3981 | 4475 | filp->private_data = inode->i_private; |
|---|
| 3982 | 4476 | |
|---|
| .. | .. |
|---|
| 4007 | 4501 | if (iter->trace && iter->trace->close) |
|---|
| 4008 | 4502 | iter->trace->close(iter); |
|---|
| 4009 | 4503 | |
|---|
| 4010 | | - if (!iter->snapshot) |
|---|
| 4504 | + if (!iter->snapshot && tr->stop_count) |
|---|
| 4011 | 4505 | /* reenable tracing if it was previously enabled */ |
|---|
| 4012 | 4506 | tracing_start_tr(tr); |
|---|
| 4013 | 4507 | |
|---|
| .. | .. |
|---|
| 4017 | 4511 | |
|---|
| 4018 | 4512 | mutex_destroy(&iter->mutex); |
|---|
| 4019 | 4513 | free_cpumask_var(iter->started); |
|---|
| 4514 | + kfree(iter->temp); |
|---|
| 4020 | 4515 | kfree(iter->trace); |
|---|
| 4021 | 4516 | kfree(iter->buffer_iter); |
|---|
| 4022 | 4517 | seq_release_private(inode, file); |
|---|
| .. | .. |
|---|
| 4045 | 4540 | { |
|---|
| 4046 | 4541 | struct trace_array *tr = inode->i_private; |
|---|
| 4047 | 4542 | struct trace_iterator *iter; |
|---|
| 4048 | | - int ret = 0; |
|---|
| 4543 | + int ret; |
|---|
| 4049 | 4544 | |
|---|
| 4050 | | - if (trace_array_get(tr) < 0) |
|---|
| 4051 | | - return -ENODEV; |
|---|
| 4545 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 4546 | + if (ret) |
|---|
| 4547 | + return ret; |
|---|
| 4052 | 4548 | |
|---|
| 4053 | 4549 | /* If this file was open for write, then erase contents */ |
|---|
| 4054 | 4550 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
|---|
| 4055 | 4551 | int cpu = tracing_get_cpu(inode); |
|---|
| 4056 | | - struct trace_buffer *trace_buf = &tr->trace_buffer; |
|---|
| 4552 | + struct array_buffer *trace_buf = &tr->array_buffer; |
|---|
| 4057 | 4553 | |
|---|
| 4058 | 4554 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 4059 | 4555 | if (tr->current_trace->print_max) |
|---|
| .. | .. |
|---|
| 4063 | 4559 | if (cpu == RING_BUFFER_ALL_CPUS) |
|---|
| 4064 | 4560 | tracing_reset_online_cpus(trace_buf); |
|---|
| 4065 | 4561 | else |
|---|
| 4066 | | - tracing_reset(trace_buf, cpu); |
|---|
| 4562 | + tracing_reset_cpu(trace_buf, cpu); |
|---|
| 4067 | 4563 | } |
|---|
| 4068 | 4564 | |
|---|
| 4069 | 4565 | if (file->f_mode & FMODE_READ) { |
|---|
| .. | .. |
|---|
| 4164 | 4660 | struct seq_file *m; |
|---|
| 4165 | 4661 | int ret; |
|---|
| 4166 | 4662 | |
|---|
| 4167 | | - if (tracing_disabled) |
|---|
| 4168 | | - return -ENODEV; |
|---|
| 4169 | | - |
|---|
| 4170 | | - if (trace_array_get(tr) < 0) |
|---|
| 4171 | | - return -ENODEV; |
|---|
| 4663 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 4664 | + if (ret) |
|---|
| 4665 | + return ret; |
|---|
| 4172 | 4666 | |
|---|
| 4173 | 4667 | ret = seq_open(file, &show_traces_seq_ops); |
|---|
| 4174 | 4668 | if (ret) { |
|---|
| .. | .. |
|---|
| 4252 | 4746 | return count; |
|---|
| 4253 | 4747 | } |
|---|
| 4254 | 4748 | |
|---|
| 4255 | | -static ssize_t |
|---|
| 4256 | | -tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
|---|
| 4257 | | - size_t count, loff_t *ppos) |
|---|
| 4749 | +int tracing_set_cpumask(struct trace_array *tr, |
|---|
| 4750 | + cpumask_var_t tracing_cpumask_new) |
|---|
| 4258 | 4751 | { |
|---|
| 4259 | | - struct trace_array *tr = file_inode(filp)->i_private; |
|---|
| 4260 | | - cpumask_var_t tracing_cpumask_new; |
|---|
| 4261 | | - int err, cpu; |
|---|
| 4752 | + int cpu; |
|---|
| 4262 | 4753 | |
|---|
| 4263 | | - if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
|---|
| 4264 | | - return -ENOMEM; |
|---|
| 4265 | | - |
|---|
| 4266 | | - err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
|---|
| 4267 | | - if (err) |
|---|
| 4268 | | - goto err_unlock; |
|---|
| 4754 | + if (!tr) |
|---|
| 4755 | + return -EINVAL; |
|---|
| 4269 | 4756 | |
|---|
| 4270 | 4757 | local_irq_disable(); |
|---|
| 4271 | 4758 | arch_spin_lock(&tr->max_lock); |
|---|
| .. | .. |
|---|
| 4276 | 4763 | */ |
|---|
| 4277 | 4764 | if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
|---|
| 4278 | 4765 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
|---|
| 4279 | | - atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
|---|
| 4280 | | - ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); |
|---|
| 4766 | + atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
|---|
| 4767 | + ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); |
|---|
| 4281 | 4768 | } |
|---|
| 4282 | 4769 | if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
|---|
| 4283 | 4770 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
|---|
| 4284 | | - atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
|---|
| 4285 | | - ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); |
|---|
| 4771 | + atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); |
|---|
| 4772 | + ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); |
|---|
| 4286 | 4773 | } |
|---|
| 4287 | 4774 | } |
|---|
| 4288 | 4775 | arch_spin_unlock(&tr->max_lock); |
|---|
| 4289 | 4776 | local_irq_enable(); |
|---|
| 4290 | 4777 | |
|---|
| 4291 | 4778 | cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); |
|---|
| 4779 | + |
|---|
| 4780 | + return 0; |
|---|
| 4781 | +} |
|---|
| 4782 | + |
|---|
| 4783 | +static ssize_t |
|---|
| 4784 | +tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
|---|
| 4785 | + size_t count, loff_t *ppos) |
|---|
| 4786 | +{ |
|---|
| 4787 | + struct trace_array *tr = file_inode(filp)->i_private; |
|---|
| 4788 | + cpumask_var_t tracing_cpumask_new; |
|---|
| 4789 | + int err; |
|---|
| 4790 | + |
|---|
| 4791 | + if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) |
|---|
| 4792 | + return -ENOMEM; |
|---|
| 4793 | + |
|---|
| 4794 | + err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
|---|
| 4795 | + if (err) |
|---|
| 4796 | + goto err_free; |
|---|
| 4797 | + |
|---|
| 4798 | + err = tracing_set_cpumask(tr, tracing_cpumask_new); |
|---|
| 4799 | + if (err) |
|---|
| 4800 | + goto err_free; |
|---|
| 4801 | + |
|---|
| 4292 | 4802 | free_cpumask_var(tracing_cpumask_new); |
|---|
| 4293 | 4803 | |
|---|
| 4294 | 4804 | return count; |
|---|
| 4295 | 4805 | |
|---|
| 4296 | | -err_unlock: |
|---|
| 4806 | +err_free: |
|---|
| 4297 | 4807 | free_cpumask_var(tracing_cpumask_new); |
|---|
| 4298 | 4808 | |
|---|
| 4299 | 4809 | return err; |
|---|
| .. | .. |
|---|
| 4435 | 4945 | ftrace_pid_follow_fork(tr, enabled); |
|---|
| 4436 | 4946 | |
|---|
| 4437 | 4947 | if (mask == TRACE_ITER_OVERWRITE) { |
|---|
| 4438 | | - ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); |
|---|
| 4948 | + ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); |
|---|
| 4439 | 4949 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 4440 | 4950 | ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); |
|---|
| 4441 | 4951 | #endif |
|---|
| .. | .. |
|---|
| 4449 | 4959 | return 0; |
|---|
| 4450 | 4960 | } |
|---|
| 4451 | 4961 | |
|---|
| 4452 | | -static int trace_set_options(struct trace_array *tr, char *option) |
|---|
| 4962 | +int trace_set_options(struct trace_array *tr, char *option) |
|---|
| 4453 | 4963 | { |
|---|
| 4454 | 4964 | char *cmp; |
|---|
| 4455 | 4965 | int neg = 0; |
|---|
| 4456 | 4966 | int ret; |
|---|
| 4457 | 4967 | size_t orig_len = strlen(option); |
|---|
| 4968 | + int len; |
|---|
| 4458 | 4969 | |
|---|
| 4459 | 4970 | cmp = strstrip(option); |
|---|
| 4460 | 4971 | |
|---|
| 4461 | | - if (strncmp(cmp, "no", 2) == 0) { |
|---|
| 4972 | + len = str_has_prefix(cmp, "no"); |
|---|
| 4973 | + if (len) |
|---|
| 4462 | 4974 | neg = 1; |
|---|
| 4463 | | - cmp += 2; |
|---|
| 4464 | | - } |
|---|
| 4975 | + |
|---|
| 4976 | + cmp += len; |
|---|
| 4465 | 4977 | |
|---|
| 4466 | 4978 | mutex_lock(&event_mutex); |
|---|
| 4467 | 4979 | mutex_lock(&trace_types_lock); |
|---|
| .. | .. |
|---|
| 4537 | 5049 | struct trace_array *tr = inode->i_private; |
|---|
| 4538 | 5050 | int ret; |
|---|
| 4539 | 5051 | |
|---|
| 4540 | | - if (tracing_disabled) |
|---|
| 4541 | | - return -ENODEV; |
|---|
| 4542 | | - |
|---|
| 4543 | | - if (trace_array_get(tr) < 0) |
|---|
| 4544 | | - return -ENODEV; |
|---|
| 5052 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 5053 | + if (ret) |
|---|
| 5054 | + return ret; |
|---|
| 4545 | 5055 | |
|---|
| 4546 | 5056 | ret = single_open(file, tracing_trace_options_show, inode->i_private); |
|---|
| 4547 | 5057 | if (ret < 0) |
|---|
| .. | .. |
|---|
| 4568 | 5078 | " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" |
|---|
| 4569 | 5079 | " current_tracer\t- function and latency tracers\n" |
|---|
| 4570 | 5080 | " available_tracers\t- list of configured tracers for current_tracer\n" |
|---|
| 5081 | + " error_log\t- error log for failed commands (that support it)\n" |
|---|
| 4571 | 5082 | " buffer_size_kb\t- view and modify size of per cpu buffer\n" |
|---|
| 4572 | 5083 | " buffer_total_size_kb - view total size of all cpu buffers\n\n" |
|---|
| 4573 | 5084 | " trace_clock\t\t-change the clock used to order events\n" |
|---|
| .. | .. |
|---|
| 4588 | 5099 | " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" |
|---|
| 4589 | 5100 | "\t\t\t Remove sub-buffer with rmdir\n" |
|---|
| 4590 | 5101 | " trace_options\t\t- Set format or modify how tracing happens\n" |
|---|
| 4591 | | - "\t\t\t Disable an option by adding a suffix 'no' to the\n" |
|---|
| 5102 | + "\t\t\t Disable an option by prefixing 'no' to the\n" |
|---|
| 4592 | 5103 | "\t\t\t option name\n" |
|---|
| 4593 | 5104 | " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" |
|---|
| 4594 | 5105 | #ifdef CONFIG_DYNAMIC_FTRACE |
|---|
| .. | .. |
|---|
| 4632 | 5143 | #ifdef CONFIG_FUNCTION_TRACER |
|---|
| 4633 | 5144 | " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" |
|---|
| 4634 | 5145 | "\t\t (function)\n" |
|---|
| 5146 | + " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" |
|---|
| 5147 | + "\t\t (function)\n" |
|---|
| 4635 | 5148 | #endif |
|---|
| 4636 | 5149 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
|---|
| 4637 | 5150 | " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" |
|---|
| .. | .. |
|---|
| 4653 | 5166 | "\t\t\t traces\n" |
|---|
| 4654 | 5167 | #endif |
|---|
| 4655 | 5168 | #endif /* CONFIG_STACK_TRACER */ |
|---|
| 5169 | +#ifdef CONFIG_DYNAMIC_EVENTS |
|---|
| 5170 | + " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" |
|---|
| 5171 | + "\t\t\t Write into this file to define/undefine new trace events.\n" |
|---|
| 5172 | +#endif |
|---|
| 4656 | 5173 | #ifdef CONFIG_KPROBE_EVENTS |
|---|
| 4657 | | - " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" |
|---|
| 5174 | + " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" |
|---|
| 4658 | 5175 | "\t\t\t Write into this file to define/undefine new trace events.\n" |
|---|
| 4659 | 5176 | #endif |
|---|
| 4660 | 5177 | #ifdef CONFIG_UPROBE_EVENTS |
|---|
| 4661 | | - " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" |
|---|
| 5178 | + " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" |
|---|
| 4662 | 5179 | "\t\t\t Write into this file to define/undefine new trace events.\n" |
|---|
| 4663 | 5180 | #endif |
|---|
| 4664 | 5181 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) |
|---|
| 4665 | 5182 | "\t accepts: event-definitions (one definition per line)\n" |
|---|
| 4666 | 5183 | "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" |
|---|
| 4667 | 5184 | "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" |
|---|
| 5185 | +#ifdef CONFIG_HIST_TRIGGERS |
|---|
| 5186 | + "\t s:[synthetic/]<event> <field> [<field>]\n" |
|---|
| 5187 | +#endif |
|---|
| 4668 | 5188 | "\t -:[<group>/]<event>\n" |
|---|
| 4669 | 5189 | #ifdef CONFIG_KPROBE_EVENTS |
|---|
| 4670 | 5190 | "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" |
|---|
| 4671 | | - "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" |
|---|
| 5191 | + "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" |
|---|
| 4672 | 5192 | #endif |
|---|
| 4673 | 5193 | #ifdef CONFIG_UPROBE_EVENTS |
|---|
| 4674 | | - "\t place: <path>:<offset>\n" |
|---|
| 5194 | + " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" |
|---|
| 4675 | 5195 | #endif |
|---|
| 4676 | 5196 | "\t args: <name>=fetcharg[:type]\n" |
|---|
| 4677 | 5197 | "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" |
|---|
| 4678 | | - "\t $stack<index>, $stack, $retval, $comm\n" |
|---|
| 4679 | | - "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n" |
|---|
| 4680 | | - "\t b<bit-width>@<bit-offset>/<container-size>\n" |
|---|
| 5198 | +#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API |
|---|
| 5199 | + "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" |
|---|
| 5200 | +#else |
|---|
| 5201 | + "\t $stack<index>, $stack, $retval, $comm,\n" |
|---|
| 5202 | +#endif |
|---|
| 5203 | + "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" |
|---|
| 5204 | + "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" |
|---|
| 5205 | + "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" |
|---|
| 5206 | + "\t <type>\\[<array-size>\\]\n" |
|---|
| 5207 | +#ifdef CONFIG_HIST_TRIGGERS |
|---|
| 5208 | + "\t field: <stype> <name>;\n" |
|---|
| 5209 | + "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" |
|---|
| 5210 | + "\t [unsigned] char/int/long\n" |
|---|
| 5211 | +#endif |
|---|
| 4681 | 5212 | #endif |
|---|
| 4682 | 5213 | " events/\t\t- Directory containing all trace event subsystems:\n" |
|---|
| 4683 | 5214 | " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" |
|---|
| .. | .. |
|---|
| 4730 | 5261 | "\t [:size=#entries]\n" |
|---|
| 4731 | 5262 | "\t [:pause][:continue][:clear]\n" |
|---|
| 4732 | 5263 | "\t [:name=histname1]\n" |
|---|
| 5264 | + "\t [:<handler>.<action>]\n" |
|---|
| 4733 | 5265 | "\t [if <filter>]\n\n" |
|---|
| 4734 | 5266 | "\t Note, special fields can be used as well:\n" |
|---|
| 4735 | 5267 | "\t common_timestamp - to record current timestamp\n" |
|---|
| .. | .. |
|---|
| 4774 | 5306 | "\t unchanged.\n\n" |
|---|
| 4775 | 5307 | "\t The enable_hist and disable_hist triggers can be used to\n" |
|---|
| 4776 | 5308 | "\t have one event conditionally start and stop another event's\n" |
|---|
| 4777 | | - "\t already-attached hist trigger. The syntax is analagous to\n" |
|---|
| 4778 | | - "\t the enable_event and disable_event triggers.\n" |
|---|
| 5309 | + "\t already-attached hist trigger. The syntax is analogous to\n" |
|---|
| 5310 | + "\t the enable_event and disable_event triggers.\n\n" |
|---|
| 5311 | + "\t Hist trigger handlers and actions are executed whenever a\n" |
|---|
| 5312 | + "\t a histogram entry is added or updated. They take the form:\n\n" |
|---|
| 5313 | + "\t <handler>.<action>\n\n" |
|---|
| 5314 | + "\t The available handlers are:\n\n" |
|---|
| 5315 | + "\t onmatch(matching.event) - invoke on addition or update\n" |
|---|
| 5316 | + "\t onmax(var) - invoke if var exceeds current max\n" |
|---|
| 5317 | + "\t onchange(var) - invoke action if var changes\n\n" |
|---|
| 5318 | + "\t The available actions are:\n\n" |
|---|
| 5319 | + "\t trace(<synthetic_event>,param list) - generate synthetic event\n" |
|---|
| 5320 | + "\t save(field,...) - save current event fields\n" |
|---|
| 5321 | +#ifdef CONFIG_TRACER_SNAPSHOT |
|---|
| 5322 | + "\t snapshot() - snapshot the trace buffer\n\n" |
|---|
| 5323 | +#endif |
|---|
| 5324 | +#ifdef CONFIG_SYNTH_EVENTS |
|---|
| 5325 | + " events/synthetic_events\t- Create/append/remove/show synthetic events\n" |
|---|
| 5326 | + "\t Write into this file to define/undefine new synthetic events.\n" |
|---|
| 5327 | + "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n" |
|---|
| 5328 | +#endif |
|---|
| 4779 | 5329 | #endif |
|---|
| 4780 | 5330 | ; |
|---|
| 4781 | 5331 | |
|---|
| .. | .. |
|---|
| 4833 | 5383 | |
|---|
| 4834 | 5384 | static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) |
|---|
| 4835 | 5385 | { |
|---|
| 4836 | | - if (tracing_disabled) |
|---|
| 4837 | | - return -ENODEV; |
|---|
| 5386 | + int ret; |
|---|
| 5387 | + |
|---|
| 5388 | + ret = tracing_check_open_get_tr(NULL); |
|---|
| 5389 | + if (ret) |
|---|
| 5390 | + return ret; |
|---|
| 4838 | 5391 | |
|---|
| 4839 | 5392 | return seq_open(filp, &tracing_saved_tgids_seq_ops); |
|---|
| 4840 | 5393 | } |
|---|
| .. | .. |
|---|
| 4910 | 5463 | |
|---|
| 4911 | 5464 | static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) |
|---|
| 4912 | 5465 | { |
|---|
| 4913 | | - if (tracing_disabled) |
|---|
| 4914 | | - return -ENODEV; |
|---|
| 5466 | + int ret; |
|---|
| 5467 | + |
|---|
| 5468 | + ret = tracing_check_open_get_tr(NULL); |
|---|
| 5469 | + if (ret) |
|---|
| 5470 | + return ret; |
|---|
| 4915 | 5471 | |
|---|
| 4916 | 5472 | return seq_open(filp, &tracing_saved_cmdlines_seq_ops); |
|---|
| 4917 | 5473 | } |
|---|
| .. | .. |
|---|
| 4930 | 5486 | char buf[64]; |
|---|
| 4931 | 5487 | int r; |
|---|
| 4932 | 5488 | |
|---|
| 5489 | + preempt_disable(); |
|---|
| 4933 | 5490 | arch_spin_lock(&trace_cmdline_lock); |
|---|
| 4934 | 5491 | r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); |
|---|
| 4935 | 5492 | arch_spin_unlock(&trace_cmdline_lock); |
|---|
| 5493 | + preempt_enable(); |
|---|
| 4936 | 5494 | |
|---|
| 4937 | 5495 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
|---|
| 4938 | 5496 | } |
|---|
| .. | .. |
|---|
| 4957 | 5515 | return -ENOMEM; |
|---|
| 4958 | 5516 | } |
|---|
| 4959 | 5517 | |
|---|
| 5518 | + preempt_disable(); |
|---|
| 4960 | 5519 | arch_spin_lock(&trace_cmdline_lock); |
|---|
| 4961 | 5520 | savedcmd_temp = savedcmd; |
|---|
| 4962 | 5521 | savedcmd = s; |
|---|
| 4963 | 5522 | arch_spin_unlock(&trace_cmdline_lock); |
|---|
| 5523 | + preempt_enable(); |
|---|
| 4964 | 5524 | free_saved_cmdlines_buffer(savedcmd_temp); |
|---|
| 4965 | 5525 | |
|---|
| 4966 | 5526 | return 0; |
|---|
| .. | .. |
|---|
| 5019 | 5579 | * Paranoid! If ptr points to end, we don't want to increment past it. |
|---|
| 5020 | 5580 | * This really should never happen. |
|---|
| 5021 | 5581 | */ |
|---|
| 5582 | + (*pos)++; |
|---|
| 5022 | 5583 | ptr = update_eval_map(ptr); |
|---|
| 5023 | 5584 | if (WARN_ON_ONCE(!ptr)) |
|---|
| 5024 | 5585 | return NULL; |
|---|
| 5025 | 5586 | |
|---|
| 5026 | 5587 | ptr++; |
|---|
| 5027 | | - |
|---|
| 5028 | | - (*pos)++; |
|---|
| 5029 | | - |
|---|
| 5030 | 5588 | ptr = update_eval_map(ptr); |
|---|
| 5031 | 5589 | |
|---|
| 5032 | 5590 | return ptr; |
|---|
| .. | .. |
|---|
| 5075 | 5633 | |
|---|
| 5076 | 5634 | static int tracing_eval_map_open(struct inode *inode, struct file *filp) |
|---|
| 5077 | 5635 | { |
|---|
| 5078 | | - if (tracing_disabled) |
|---|
| 5079 | | - return -ENODEV; |
|---|
| 5636 | + int ret; |
|---|
| 5637 | + |
|---|
| 5638 | + ret = tracing_check_open_get_tr(NULL); |
|---|
| 5639 | + if (ret) |
|---|
| 5640 | + return ret; |
|---|
| 5080 | 5641 | |
|---|
| 5081 | 5642 | return seq_open(filp, &tracing_eval_map_seq_ops); |
|---|
| 5082 | 5643 | } |
|---|
| .. | .. |
|---|
| 5189 | 5750 | |
|---|
| 5190 | 5751 | int tracer_init(struct tracer *t, struct trace_array *tr) |
|---|
| 5191 | 5752 | { |
|---|
| 5192 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
|---|
| 5753 | + tracing_reset_online_cpus(&tr->array_buffer); |
|---|
| 5193 | 5754 | return t->init(tr); |
|---|
| 5194 | 5755 | } |
|---|
| 5195 | 5756 | |
|---|
| 5196 | | -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) |
|---|
| 5757 | +static void set_buffer_entries(struct array_buffer *buf, unsigned long val) |
|---|
| 5197 | 5758 | { |
|---|
| 5198 | 5759 | int cpu; |
|---|
| 5199 | 5760 | |
|---|
| .. | .. |
|---|
| 5203 | 5764 | |
|---|
| 5204 | 5765 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 5205 | 5766 | /* resize @tr's buffer to the size of @size_tr's entries */ |
|---|
| 5206 | | -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
|---|
| 5207 | | - struct trace_buffer *size_buf, int cpu_id) |
|---|
| 5767 | +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, |
|---|
| 5768 | + struct array_buffer *size_buf, int cpu_id) |
|---|
| 5208 | 5769 | { |
|---|
| 5209 | 5770 | int cpu, ret = 0; |
|---|
| 5210 | 5771 | |
|---|
| .. | .. |
|---|
| 5242 | 5803 | ring_buffer_expanded = true; |
|---|
| 5243 | 5804 | |
|---|
| 5244 | 5805 | /* May be called before buffers are initialized */ |
|---|
| 5245 | | - if (!tr->trace_buffer.buffer) |
|---|
| 5806 | + if (!tr->array_buffer.buffer) |
|---|
| 5246 | 5807 | return 0; |
|---|
| 5247 | 5808 | |
|---|
| 5248 | | - ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); |
|---|
| 5809 | + ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); |
|---|
| 5249 | 5810 | if (ret < 0) |
|---|
| 5250 | 5811 | return ret; |
|---|
| 5251 | 5812 | |
|---|
| .. | .. |
|---|
| 5256 | 5817 | |
|---|
| 5257 | 5818 | ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); |
|---|
| 5258 | 5819 | if (ret < 0) { |
|---|
| 5259 | | - int r = resize_buffer_duplicate_size(&tr->trace_buffer, |
|---|
| 5260 | | - &tr->trace_buffer, cpu); |
|---|
| 5820 | + int r = resize_buffer_duplicate_size(&tr->array_buffer, |
|---|
| 5821 | + &tr->array_buffer, cpu); |
|---|
| 5261 | 5822 | if (r < 0) { |
|---|
| 5262 | 5823 | /* |
|---|
| 5263 | 5824 | * AARGH! We are left with different |
|---|
| .. | .. |
|---|
| 5288 | 5849 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
|---|
| 5289 | 5850 | |
|---|
| 5290 | 5851 | if (cpu == RING_BUFFER_ALL_CPUS) |
|---|
| 5291 | | - set_buffer_entries(&tr->trace_buffer, size); |
|---|
| 5852 | + set_buffer_entries(&tr->array_buffer, size); |
|---|
| 5292 | 5853 | else |
|---|
| 5293 | | - per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; |
|---|
| 5854 | + per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size; |
|---|
| 5294 | 5855 | |
|---|
| 5295 | 5856 | return ret; |
|---|
| 5296 | 5857 | } |
|---|
| 5297 | 5858 | |
|---|
| 5298 | | -static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
|---|
| 5299 | | - unsigned long size, int cpu_id) |
|---|
| 5859 | +ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
|---|
| 5860 | + unsigned long size, int cpu_id) |
|---|
| 5300 | 5861 | { |
|---|
| 5301 | 5862 | int ret = size; |
|---|
| 5302 | 5863 | |
|---|
| .. | .. |
|---|
| 5366 | 5927 | tr->current_trace = &nop_trace; |
|---|
| 5367 | 5928 | } |
|---|
| 5368 | 5929 | |
|---|
| 5930 | +static bool tracer_options_updated; |
|---|
| 5931 | + |
|---|
| 5369 | 5932 | static void add_tracer_options(struct trace_array *tr, struct tracer *t) |
|---|
| 5370 | 5933 | { |
|---|
| 5371 | 5934 | /* Only enable if the directory has been created already. */ |
|---|
| 5372 | 5935 | if (!tr->dir) |
|---|
| 5373 | 5936 | return; |
|---|
| 5374 | 5937 | |
|---|
| 5938 | + /* Only create trace option files after update_tracer_options finish */ |
|---|
| 5939 | + if (!tracer_options_updated) |
|---|
| 5940 | + return; |
|---|
| 5941 | + |
|---|
| 5375 | 5942 | create_trace_option_files(tr, t); |
|---|
| 5376 | 5943 | } |
|---|
| 5377 | 5944 | |
|---|
| 5378 | | -static int tracing_set_tracer(struct trace_array *tr, const char *buf) |
|---|
| 5945 | +int tracing_set_tracer(struct trace_array *tr, const char *buf) |
|---|
| 5379 | 5946 | { |
|---|
| 5380 | 5947 | struct tracer *t; |
|---|
| 5381 | 5948 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| .. | .. |
|---|
| 5404 | 5971 | if (t == tr->current_trace) |
|---|
| 5405 | 5972 | goto out; |
|---|
| 5406 | 5973 | |
|---|
| 5974 | +#ifdef CONFIG_TRACER_SNAPSHOT |
|---|
| 5975 | + if (t->use_max_tr) { |
|---|
| 5976 | + local_irq_disable(); |
|---|
| 5977 | + arch_spin_lock(&tr->max_lock); |
|---|
| 5978 | + if (tr->cond_snapshot) |
|---|
| 5979 | + ret = -EBUSY; |
|---|
| 5980 | + arch_spin_unlock(&tr->max_lock); |
|---|
| 5981 | + local_irq_enable(); |
|---|
| 5982 | + if (ret) |
|---|
| 5983 | + goto out; |
|---|
| 5984 | + } |
|---|
| 5985 | +#endif |
|---|
| 5407 | 5986 | /* Some tracers won't work on kernel command line */ |
|---|
| 5408 | 5987 | if (system_state < SYSTEM_RUNNING && t->noboot) { |
|---|
| 5409 | 5988 | pr_warn("Tracer '%s' is not allowed on command line, ignored\n", |
|---|
| .. | .. |
|---|
| 5418 | 5997 | } |
|---|
| 5419 | 5998 | |
|---|
| 5420 | 5999 | /* If trace pipe files are being read, we can't change the tracer */ |
|---|
| 5421 | | - if (tr->current_trace->ref) { |
|---|
| 6000 | + if (tr->trace_ref) { |
|---|
| 5422 | 6001 | ret = -EBUSY; |
|---|
| 5423 | 6002 | goto out; |
|---|
| 5424 | 6003 | } |
|---|
| .. | .. |
|---|
| 5430 | 6009 | if (tr->current_trace->reset) |
|---|
| 5431 | 6010 | tr->current_trace->reset(tr); |
|---|
| 5432 | 6011 | |
|---|
| 5433 | | - /* Current trace needs to be nop_trace before synchronize_sched */ |
|---|
| 5434 | | - tr->current_trace = &nop_trace; |
|---|
| 5435 | | - |
|---|
| 5436 | 6012 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 5437 | | - had_max_tr = tr->allocated_snapshot; |
|---|
| 6013 | + had_max_tr = tr->current_trace->use_max_tr; |
|---|
| 6014 | + |
|---|
| 6015 | + /* Current trace needs to be nop_trace before synchronize_rcu */ |
|---|
| 6016 | + tr->current_trace = &nop_trace; |
|---|
| 5438 | 6017 | |
|---|
| 5439 | 6018 | if (had_max_tr && !t->use_max_tr) { |
|---|
| 5440 | 6019 | /* |
|---|
| .. | .. |
|---|
| 5444 | 6023 | * The update_max_tr is called from interrupts disabled |
|---|
| 5445 | 6024 | * so a synchronized_sched() is sufficient. |
|---|
| 5446 | 6025 | */ |
|---|
| 5447 | | - synchronize_sched(); |
|---|
| 6026 | + synchronize_rcu(); |
|---|
| 5448 | 6027 | free_snapshot(tr); |
|---|
| 5449 | 6028 | } |
|---|
| 5450 | | -#endif |
|---|
| 5451 | 6029 | |
|---|
| 5452 | | -#ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 5453 | | - if (t->use_max_tr && !had_max_tr) { |
|---|
| 6030 | + if (t->use_max_tr && !tr->allocated_snapshot) { |
|---|
| 5454 | 6031 | ret = tracing_alloc_snapshot_instance(tr); |
|---|
| 5455 | 6032 | if (ret < 0) |
|---|
| 5456 | 6033 | goto out; |
|---|
| 5457 | 6034 | } |
|---|
| 6035 | +#else |
|---|
| 6036 | + tr->current_trace = &nop_trace; |
|---|
| 5458 | 6037 | #endif |
|---|
| 5459 | 6038 | |
|---|
| 5460 | 6039 | if (t->init) { |
|---|
| .. | .. |
|---|
| 5589 | 6168 | { |
|---|
| 5590 | 6169 | struct trace_array *tr = inode->i_private; |
|---|
| 5591 | 6170 | struct trace_iterator *iter; |
|---|
| 5592 | | - int ret = 0; |
|---|
| 6171 | + int ret; |
|---|
| 5593 | 6172 | |
|---|
| 5594 | | - if (tracing_disabled) |
|---|
| 5595 | | - return -ENODEV; |
|---|
| 5596 | | - |
|---|
| 5597 | | - if (trace_array_get(tr) < 0) |
|---|
| 5598 | | - return -ENODEV; |
|---|
| 6173 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 6174 | + if (ret) |
|---|
| 6175 | + return ret; |
|---|
| 5599 | 6176 | |
|---|
| 5600 | 6177 | mutex_lock(&trace_types_lock); |
|---|
| 5601 | 6178 | |
|---|
| .. | .. |
|---|
| 5626 | 6203 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
|---|
| 5627 | 6204 | |
|---|
| 5628 | 6205 | iter->tr = tr; |
|---|
| 5629 | | - iter->trace_buffer = &tr->trace_buffer; |
|---|
| 6206 | + iter->array_buffer = &tr->array_buffer; |
|---|
| 5630 | 6207 | iter->cpu_file = tracing_get_cpu(inode); |
|---|
| 5631 | 6208 | mutex_init(&iter->mutex); |
|---|
| 5632 | 6209 | filp->private_data = iter; |
|---|
| .. | .. |
|---|
| 5636 | 6213 | |
|---|
| 5637 | 6214 | nonseekable_open(inode, filp); |
|---|
| 5638 | 6215 | |
|---|
| 5639 | | - tr->current_trace->ref++; |
|---|
| 6216 | + tr->trace_ref++; |
|---|
| 5640 | 6217 | out: |
|---|
| 5641 | 6218 | mutex_unlock(&trace_types_lock); |
|---|
| 5642 | 6219 | return ret; |
|---|
| .. | .. |
|---|
| 5655 | 6232 | |
|---|
| 5656 | 6233 | mutex_lock(&trace_types_lock); |
|---|
| 5657 | 6234 | |
|---|
| 5658 | | - tr->current_trace->ref--; |
|---|
| 6235 | + tr->trace_ref--; |
|---|
| 5659 | 6236 | |
|---|
| 5660 | 6237 | if (iter->trace->pipe_close) |
|---|
| 5661 | 6238 | iter->trace->pipe_close(iter); |
|---|
| .. | .. |
|---|
| 5686 | 6263 | */ |
|---|
| 5687 | 6264 | return EPOLLIN | EPOLLRDNORM; |
|---|
| 5688 | 6265 | else |
|---|
| 5689 | | - return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, |
|---|
| 5690 | | - filp, poll_table); |
|---|
| 6266 | + return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, |
|---|
| 6267 | + filp, poll_table, iter->tr->buffer_percent); |
|---|
| 5691 | 6268 | } |
|---|
| 5692 | 6269 | |
|---|
| 5693 | 6270 | static __poll_t |
|---|
| .. | .. |
|---|
| 5724 | 6301 | |
|---|
| 5725 | 6302 | mutex_unlock(&iter->mutex); |
|---|
| 5726 | 6303 | |
|---|
| 5727 | | - ret = wait_on_pipe(iter, false); |
|---|
| 6304 | + ret = wait_on_pipe(iter, 0); |
|---|
| 5728 | 6305 | |
|---|
| 5729 | 6306 | mutex_lock(&iter->mutex); |
|---|
| 5730 | 6307 | |
|---|
| .. | .. |
|---|
| 5840 | 6417 | __free_page(spd->pages[idx]); |
|---|
| 5841 | 6418 | } |
|---|
| 5842 | 6419 | |
|---|
| 5843 | | -static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
|---|
| 5844 | | - .can_merge = 0, |
|---|
| 5845 | | - .confirm = generic_pipe_buf_confirm, |
|---|
| 5846 | | - .release = generic_pipe_buf_release, |
|---|
| 5847 | | - .steal = generic_pipe_buf_steal, |
|---|
| 5848 | | - .get = generic_pipe_buf_get, |
|---|
| 5849 | | -}; |
|---|
| 5850 | | - |
|---|
| 5851 | 6420 | static size_t |
|---|
| 5852 | 6421 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) |
|---|
| 5853 | 6422 | { |
|---|
| .. | .. |
|---|
| 5909 | 6478 | .partial = partial_def, |
|---|
| 5910 | 6479 | .nr_pages = 0, /* This gets updated below. */ |
|---|
| 5911 | 6480 | .nr_pages_max = PIPE_DEF_BUFFERS, |
|---|
| 5912 | | - .ops = &tracing_pipe_buf_ops, |
|---|
| 6481 | + .ops = &default_pipe_buf_ops, |
|---|
| 5913 | 6482 | .spd_release = tracing_spd_release_pipe, |
|---|
| 5914 | 6483 | }; |
|---|
| 5915 | 6484 | ssize_t ret; |
|---|
| .. | .. |
|---|
| 6004 | 6573 | for_each_tracing_cpu(cpu) { |
|---|
| 6005 | 6574 | /* fill in the size from first enabled cpu */ |
|---|
| 6006 | 6575 | if (size == 0) |
|---|
| 6007 | | - size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; |
|---|
| 6008 | | - if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { |
|---|
| 6576 | + size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; |
|---|
| 6577 | + if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { |
|---|
| 6009 | 6578 | buf_size_same = 0; |
|---|
| 6010 | 6579 | break; |
|---|
| 6011 | 6580 | } |
|---|
| .. | .. |
|---|
| 6021 | 6590 | } else |
|---|
| 6022 | 6591 | r = sprintf(buf, "X\n"); |
|---|
| 6023 | 6592 | } else |
|---|
| 6024 | | - r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); |
|---|
| 6593 | + r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); |
|---|
| 6025 | 6594 | |
|---|
| 6026 | 6595 | mutex_unlock(&trace_types_lock); |
|---|
| 6027 | 6596 | |
|---|
| .. | .. |
|---|
| 6068 | 6637 | |
|---|
| 6069 | 6638 | mutex_lock(&trace_types_lock); |
|---|
| 6070 | 6639 | for_each_tracing_cpu(cpu) { |
|---|
| 6071 | | - size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; |
|---|
| 6640 | + size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; |
|---|
| 6072 | 6641 | if (!ring_buffer_expanded) |
|---|
| 6073 | 6642 | expanded_size += trace_buf_size >> 10; |
|---|
| 6074 | 6643 | } |
|---|
| .. | .. |
|---|
| 6118 | 6687 | struct trace_array *tr = filp->private_data; |
|---|
| 6119 | 6688 | struct ring_buffer_event *event; |
|---|
| 6120 | 6689 | enum event_trigger_type tt = ETT_NONE; |
|---|
| 6121 | | - struct ring_buffer *buffer; |
|---|
| 6690 | + struct trace_buffer *buffer; |
|---|
| 6122 | 6691 | struct print_entry *entry; |
|---|
| 6123 | 6692 | unsigned long irq_flags; |
|---|
| 6124 | | - const char faulted[] = "<faulted>"; |
|---|
| 6125 | 6693 | ssize_t written; |
|---|
| 6126 | 6694 | int size; |
|---|
| 6127 | 6695 | int len; |
|---|
| 6128 | 6696 | |
|---|
| 6129 | 6697 | /* Used in tracing_mark_raw_write() as well */ |
|---|
| 6130 | | -#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ |
|---|
| 6698 | +#define FAULTED_STR "<faulted>" |
|---|
| 6699 | +#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ |
|---|
| 6131 | 6700 | |
|---|
| 6132 | 6701 | if (tracing_disabled) |
|---|
| 6133 | 6702 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 6147 | 6716 | if (cnt < FAULTED_SIZE) |
|---|
| 6148 | 6717 | size += FAULTED_SIZE - cnt; |
|---|
| 6149 | 6718 | |
|---|
| 6150 | | - buffer = tr->trace_buffer.buffer; |
|---|
| 6719 | + buffer = tr->array_buffer.buffer; |
|---|
| 6151 | 6720 | event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, |
|---|
| 6152 | 6721 | irq_flags, preempt_count()); |
|---|
| 6153 | 6722 | if (unlikely(!event)) |
|---|
| .. | .. |
|---|
| 6159 | 6728 | |
|---|
| 6160 | 6729 | len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); |
|---|
| 6161 | 6730 | if (len) { |
|---|
| 6162 | | - memcpy(&entry->buf, faulted, FAULTED_SIZE); |
|---|
| 6731 | + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); |
|---|
| 6163 | 6732 | cnt = FAULTED_SIZE; |
|---|
| 6164 | 6733 | written = -EFAULT; |
|---|
| 6165 | 6734 | } else |
|---|
| 6166 | 6735 | written = cnt; |
|---|
| 6167 | | - len = cnt; |
|---|
| 6168 | 6736 | |
|---|
| 6169 | 6737 | if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { |
|---|
| 6170 | 6738 | /* do not add \n before testing triggers, but add \0 */ |
|---|
| .. | .. |
|---|
| 6178 | 6746 | } else |
|---|
| 6179 | 6747 | entry->buf[cnt] = '\0'; |
|---|
| 6180 | 6748 | |
|---|
| 6749 | + if (static_branch_unlikely(&trace_marker_exports_enabled)) |
|---|
| 6750 | + ftrace_exports(event, TRACE_EXPORT_MARKER); |
|---|
| 6181 | 6751 | __buffer_unlock_commit(buffer, event); |
|---|
| 6182 | 6752 | |
|---|
| 6183 | 6753 | if (tt) |
|---|
| .. | .. |
|---|
| 6198 | 6768 | { |
|---|
| 6199 | 6769 | struct trace_array *tr = filp->private_data; |
|---|
| 6200 | 6770 | struct ring_buffer_event *event; |
|---|
| 6201 | | - struct ring_buffer *buffer; |
|---|
| 6771 | + struct trace_buffer *buffer; |
|---|
| 6202 | 6772 | struct raw_data_entry *entry; |
|---|
| 6203 | | - const char faulted[] = "<faulted>"; |
|---|
| 6204 | 6773 | unsigned long irq_flags; |
|---|
| 6205 | 6774 | ssize_t written; |
|---|
| 6206 | 6775 | int size; |
|---|
| .. | .. |
|---|
| 6228 | 6797 | if (cnt < FAULT_SIZE_ID) |
|---|
| 6229 | 6798 | size += FAULT_SIZE_ID - cnt; |
|---|
| 6230 | 6799 | |
|---|
| 6231 | | - buffer = tr->trace_buffer.buffer; |
|---|
| 6800 | + buffer = tr->array_buffer.buffer; |
|---|
| 6232 | 6801 | event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, |
|---|
| 6233 | 6802 | irq_flags, preempt_count()); |
|---|
| 6234 | 6803 | if (!event) |
|---|
| .. | .. |
|---|
| 6240 | 6809 | len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); |
|---|
| 6241 | 6810 | if (len) { |
|---|
| 6242 | 6811 | entry->id = -1; |
|---|
| 6243 | | - memcpy(&entry->buf, faulted, FAULTED_SIZE); |
|---|
| 6812 | + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); |
|---|
| 6244 | 6813 | written = -EFAULT; |
|---|
| 6245 | 6814 | } else |
|---|
| 6246 | 6815 | written = cnt; |
|---|
| .. | .. |
|---|
| 6283 | 6852 | |
|---|
| 6284 | 6853 | tr->clock_id = i; |
|---|
| 6285 | 6854 | |
|---|
| 6286 | | - ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); |
|---|
| 6855 | + ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); |
|---|
| 6287 | 6856 | |
|---|
| 6288 | 6857 | /* |
|---|
| 6289 | 6858 | * New clock may not be consistent with the previous clock. |
|---|
| 6290 | 6859 | * Reset the buffer so that it doesn't have incomparable timestamps. |
|---|
| 6291 | 6860 | */ |
|---|
| 6292 | | - tracing_reset_online_cpus(&tr->trace_buffer); |
|---|
| 6861 | + tracing_reset_online_cpus(&tr->array_buffer); |
|---|
| 6293 | 6862 | |
|---|
| 6294 | 6863 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 6295 | 6864 | if (tr->max_buffer.buffer) |
|---|
| .. | .. |
|---|
| 6335 | 6904 | struct trace_array *tr = inode->i_private; |
|---|
| 6336 | 6905 | int ret; |
|---|
| 6337 | 6906 | |
|---|
| 6338 | | - if (tracing_disabled) |
|---|
| 6339 | | - return -ENODEV; |
|---|
| 6340 | | - |
|---|
| 6341 | | - if (trace_array_get(tr)) |
|---|
| 6342 | | - return -ENODEV; |
|---|
| 6907 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 6908 | + if (ret) |
|---|
| 6909 | + return ret; |
|---|
| 6343 | 6910 | |
|---|
| 6344 | 6911 | ret = single_open(file, tracing_clock_show, inode->i_private); |
|---|
| 6345 | 6912 | if (ret < 0) |
|---|
| .. | .. |
|---|
| 6354 | 6921 | |
|---|
| 6355 | 6922 | mutex_lock(&trace_types_lock); |
|---|
| 6356 | 6923 | |
|---|
| 6357 | | - if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) |
|---|
| 6924 | + if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) |
|---|
| 6358 | 6925 | seq_puts(m, "delta [absolute]\n"); |
|---|
| 6359 | 6926 | else |
|---|
| 6360 | 6927 | seq_puts(m, "[delta] absolute\n"); |
|---|
| .. | .. |
|---|
| 6369 | 6936 | struct trace_array *tr = inode->i_private; |
|---|
| 6370 | 6937 | int ret; |
|---|
| 6371 | 6938 | |
|---|
| 6372 | | - if (tracing_disabled) |
|---|
| 6373 | | - return -ENODEV; |
|---|
| 6374 | | - |
|---|
| 6375 | | - if (trace_array_get(tr)) |
|---|
| 6376 | | - return -ENODEV; |
|---|
| 6939 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 6940 | + if (ret) |
|---|
| 6941 | + return ret; |
|---|
| 6377 | 6942 | |
|---|
| 6378 | 6943 | ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); |
|---|
| 6379 | 6944 | if (ret < 0) |
|---|
| .. | .. |
|---|
| 6401 | 6966 | goto out; |
|---|
| 6402 | 6967 | } |
|---|
| 6403 | 6968 | |
|---|
| 6404 | | - ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); |
|---|
| 6969 | + ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs); |
|---|
| 6405 | 6970 | |
|---|
| 6406 | 6971 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 6407 | 6972 | if (tr->max_buffer.buffer) |
|---|
| .. | .. |
|---|
| 6426 | 6991 | struct trace_array *tr = inode->i_private; |
|---|
| 6427 | 6992 | struct trace_iterator *iter; |
|---|
| 6428 | 6993 | struct seq_file *m; |
|---|
| 6429 | | - int ret = 0; |
|---|
| 6994 | + int ret; |
|---|
| 6430 | 6995 | |
|---|
| 6431 | | - if (trace_array_get(tr) < 0) |
|---|
| 6432 | | - return -ENODEV; |
|---|
| 6996 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 6997 | + if (ret) |
|---|
| 6998 | + return ret; |
|---|
| 6433 | 6999 | |
|---|
| 6434 | 7000 | if (file->f_mode & FMODE_READ) { |
|---|
| 6435 | 7001 | iter = __tracing_open(inode, file, true); |
|---|
| .. | .. |
|---|
| 6449 | 7015 | ret = 0; |
|---|
| 6450 | 7016 | |
|---|
| 6451 | 7017 | iter->tr = tr; |
|---|
| 6452 | | - iter->trace_buffer = &tr->max_buffer; |
|---|
| 7018 | + iter->array_buffer = &tr->max_buffer; |
|---|
| 6453 | 7019 | iter->cpu_file = tracing_get_cpu(inode); |
|---|
| 6454 | 7020 | m->private = iter; |
|---|
| 6455 | 7021 | file->private_data = m; |
|---|
| .. | .. |
|---|
| 6486 | 7052 | goto out; |
|---|
| 6487 | 7053 | } |
|---|
| 6488 | 7054 | |
|---|
| 7055 | + local_irq_disable(); |
|---|
| 7056 | + arch_spin_lock(&tr->max_lock); |
|---|
| 7057 | + if (tr->cond_snapshot) |
|---|
| 7058 | + ret = -EBUSY; |
|---|
| 7059 | + arch_spin_unlock(&tr->max_lock); |
|---|
| 7060 | + local_irq_enable(); |
|---|
| 7061 | + if (ret) |
|---|
| 7062 | + goto out; |
|---|
| 7063 | + |
|---|
| 6489 | 7064 | switch (val) { |
|---|
| 6490 | 7065 | case 0: |
|---|
| 6491 | 7066 | if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { |
|---|
| .. | .. |
|---|
| 6505 | 7080 | #endif |
|---|
| 6506 | 7081 | if (tr->allocated_snapshot) |
|---|
| 6507 | 7082 | ret = resize_buffer_duplicate_size(&tr->max_buffer, |
|---|
| 6508 | | - &tr->trace_buffer, iter->cpu_file); |
|---|
| 7083 | + &tr->array_buffer, iter->cpu_file); |
|---|
| 6509 | 7084 | else |
|---|
| 6510 | 7085 | ret = tracing_alloc_snapshot_instance(tr); |
|---|
| 6511 | 7086 | if (ret < 0) |
|---|
| .. | .. |
|---|
| 6513 | 7088 | local_irq_disable(); |
|---|
| 6514 | 7089 | /* Now, we're going to swap */ |
|---|
| 6515 | 7090 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
|---|
| 6516 | | - update_max_tr(tr, current, smp_processor_id()); |
|---|
| 7091 | + update_max_tr(tr, current, smp_processor_id(), NULL); |
|---|
| 6517 | 7092 | else |
|---|
| 6518 | 7093 | update_max_tr_single(tr, current, iter->cpu_file); |
|---|
| 6519 | 7094 | local_irq_enable(); |
|---|
| .. | .. |
|---|
| 6523 | 7098 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
|---|
| 6524 | 7099 | tracing_reset_online_cpus(&tr->max_buffer); |
|---|
| 6525 | 7100 | else |
|---|
| 6526 | | - tracing_reset(&tr->max_buffer, iter->cpu_file); |
|---|
| 7101 | + tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); |
|---|
| 6527 | 7102 | } |
|---|
| 6528 | 7103 | break; |
|---|
| 6529 | 7104 | } |
|---|
| .. | .. |
|---|
| 6567 | 7142 | struct ftrace_buffer_info *info; |
|---|
| 6568 | 7143 | int ret; |
|---|
| 6569 | 7144 | |
|---|
| 7145 | + /* The following checks for tracefs lockdown */ |
|---|
| 6570 | 7146 | ret = tracing_buffers_open(inode, filp); |
|---|
| 6571 | 7147 | if (ret < 0) |
|---|
| 6572 | 7148 | return ret; |
|---|
| .. | .. |
|---|
| 6579 | 7155 | } |
|---|
| 6580 | 7156 | |
|---|
| 6581 | 7157 | info->iter.snapshot = true; |
|---|
| 6582 | | - info->iter.trace_buffer = &info->iter.tr->max_buffer; |
|---|
| 7158 | + info->iter.array_buffer = &info->iter.tr->max_buffer; |
|---|
| 6583 | 7159 | |
|---|
| 6584 | 7160 | return ret; |
|---|
| 6585 | 7161 | } |
|---|
| .. | .. |
|---|
| 6688 | 7264 | |
|---|
| 6689 | 7265 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
|---|
| 6690 | 7266 | |
|---|
| 7267 | +#define TRACING_LOG_ERRS_MAX 8 |
|---|
| 7268 | +#define TRACING_LOG_LOC_MAX 128 |
|---|
| 7269 | + |
|---|
| 7270 | +#define CMD_PREFIX " Command: " |
|---|
| 7271 | + |
|---|
| 7272 | +struct err_info { |
|---|
| 7273 | + const char **errs; /* ptr to loc-specific array of err strings */ |
|---|
| 7274 | + u8 type; /* index into errs -> specific err string */ |
|---|
| 7275 | + u8 pos; /* MAX_FILTER_STR_VAL = 256 */ |
|---|
| 7276 | + u64 ts; |
|---|
| 7277 | +}; |
|---|
| 7278 | + |
|---|
| 7279 | +struct tracing_log_err { |
|---|
| 7280 | + struct list_head list; |
|---|
| 7281 | + struct err_info info; |
|---|
| 7282 | + char loc[TRACING_LOG_LOC_MAX]; /* err location */ |
|---|
| 7283 | + char cmd[MAX_FILTER_STR_VAL]; /* what caused err */ |
|---|
| 7284 | +}; |
|---|
| 7285 | + |
|---|
| 7286 | +static DEFINE_MUTEX(tracing_err_log_lock); |
|---|
| 7287 | + |
|---|
| 7288 | +static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) |
|---|
| 7289 | +{ |
|---|
| 7290 | + struct tracing_log_err *err; |
|---|
| 7291 | + |
|---|
| 7292 | + if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { |
|---|
| 7293 | + err = kzalloc(sizeof(*err), GFP_KERNEL); |
|---|
| 7294 | + if (!err) |
|---|
| 7295 | + err = ERR_PTR(-ENOMEM); |
|---|
| 7296 | + else |
|---|
| 7297 | + tr->n_err_log_entries++; |
|---|
| 7298 | + |
|---|
| 7299 | + return err; |
|---|
| 7300 | + } |
|---|
| 7301 | + |
|---|
| 7302 | + err = list_first_entry(&tr->err_log, struct tracing_log_err, list); |
|---|
| 7303 | + list_del(&err->list); |
|---|
| 7304 | + |
|---|
| 7305 | + return err; |
|---|
| 7306 | +} |
|---|
| 7307 | + |
|---|
| 7308 | +/** |
|---|
| 7309 | + * err_pos - find the position of a string within a command for error careting |
|---|
| 7310 | + * @cmd: The tracing command that caused the error |
|---|
| 7311 | + * @str: The string to position the caret at within @cmd |
|---|
| 7312 | + * |
|---|
| 7313 | + * Finds the position of the first occurence of @str within @cmd. The |
|---|
| 7314 | + * return value can be passed to tracing_log_err() for caret placement |
|---|
| 7315 | + * within @cmd. |
|---|
| 7316 | + * |
|---|
| 7317 | + * Returns the index within @cmd of the first occurence of @str or 0 |
|---|
| 7318 | + * if @str was not found. |
|---|
| 7319 | + */ |
|---|
| 7320 | +unsigned int err_pos(char *cmd, const char *str) |
|---|
| 7321 | +{ |
|---|
| 7322 | + char *found; |
|---|
| 7323 | + |
|---|
| 7324 | + if (WARN_ON(!strlen(cmd))) |
|---|
| 7325 | + return 0; |
|---|
| 7326 | + |
|---|
| 7327 | + found = strstr(cmd, str); |
|---|
| 7328 | + if (found) |
|---|
| 7329 | + return found - cmd; |
|---|
| 7330 | + |
|---|
| 7331 | + return 0; |
|---|
| 7332 | +} |
|---|
| 7333 | + |
|---|
| 7334 | +/** |
|---|
| 7335 | + * tracing_log_err - write an error to the tracing error log |
|---|
| 7336 | + * @tr: The associated trace array for the error (NULL for top level array) |
|---|
| 7337 | + * @loc: A string describing where the error occurred |
|---|
| 7338 | + * @cmd: The tracing command that caused the error |
|---|
| 7339 | + * @errs: The array of loc-specific static error strings |
|---|
| 7340 | + * @type: The index into errs[], which produces the specific static err string |
|---|
| 7341 | + * @pos: The position the caret should be placed in the cmd |
|---|
| 7342 | + * |
|---|
| 7343 | + * Writes an error into tracing/error_log of the form: |
|---|
| 7344 | + * |
|---|
| 7345 | + * <loc>: error: <text> |
|---|
| 7346 | + * Command: <cmd> |
|---|
| 7347 | + * ^ |
|---|
| 7348 | + * |
|---|
| 7349 | + * tracing/error_log is a small log file containing the last |
|---|
| 7350 | + * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated |
|---|
| 7351 | + * unless there has been a tracing error, and the error log can be |
|---|
| 7352 | + * cleared and have its memory freed by writing the empty string in |
|---|
| 7353 | + * truncation mode to it i.e. echo > tracing/error_log. |
|---|
| 7354 | + * |
|---|
| 7355 | + * NOTE: the @errs array along with the @type param are used to |
|---|
| 7356 | + * produce a static error string - this string is not copied and saved |
|---|
| 7357 | + * when the error is logged - only a pointer to it is saved. See |
|---|
| 7358 | + * existing callers for examples of how static strings are typically |
|---|
| 7359 | + * defined for use with tracing_log_err(). |
|---|
| 7360 | + */ |
|---|
| 7361 | +void tracing_log_err(struct trace_array *tr, |
|---|
| 7362 | + const char *loc, const char *cmd, |
|---|
| 7363 | + const char **errs, u8 type, u8 pos) |
|---|
| 7364 | +{ |
|---|
| 7365 | + struct tracing_log_err *err; |
|---|
| 7366 | + |
|---|
| 7367 | + if (!tr) |
|---|
| 7368 | + tr = &global_trace; |
|---|
| 7369 | + |
|---|
| 7370 | + mutex_lock(&tracing_err_log_lock); |
|---|
| 7371 | + err = get_tracing_log_err(tr); |
|---|
| 7372 | + if (PTR_ERR(err) == -ENOMEM) { |
|---|
| 7373 | + mutex_unlock(&tracing_err_log_lock); |
|---|
| 7374 | + return; |
|---|
| 7375 | + } |
|---|
| 7376 | + |
|---|
| 7377 | + snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); |
|---|
| 7378 | + snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd); |
|---|
| 7379 | + |
|---|
| 7380 | + err->info.errs = errs; |
|---|
| 7381 | + err->info.type = type; |
|---|
| 7382 | + err->info.pos = pos; |
|---|
| 7383 | + err->info.ts = local_clock(); |
|---|
| 7384 | + |
|---|
| 7385 | + list_add_tail(&err->list, &tr->err_log); |
|---|
| 7386 | + mutex_unlock(&tracing_err_log_lock); |
|---|
| 7387 | +} |
|---|
| 7388 | + |
|---|
| 7389 | +static void clear_tracing_err_log(struct trace_array *tr) |
|---|
| 7390 | +{ |
|---|
| 7391 | + struct tracing_log_err *err, *next; |
|---|
| 7392 | + |
|---|
| 7393 | + mutex_lock(&tracing_err_log_lock); |
|---|
| 7394 | + list_for_each_entry_safe(err, next, &tr->err_log, list) { |
|---|
| 7395 | + list_del(&err->list); |
|---|
| 7396 | + kfree(err); |
|---|
| 7397 | + } |
|---|
| 7398 | + |
|---|
| 7399 | + tr->n_err_log_entries = 0; |
|---|
| 7400 | + mutex_unlock(&tracing_err_log_lock); |
|---|
| 7401 | +} |
|---|
| 7402 | + |
|---|
| 7403 | +static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) |
|---|
| 7404 | +{ |
|---|
| 7405 | + struct trace_array *tr = m->private; |
|---|
| 7406 | + |
|---|
| 7407 | + mutex_lock(&tracing_err_log_lock); |
|---|
| 7408 | + |
|---|
| 7409 | + return seq_list_start(&tr->err_log, *pos); |
|---|
| 7410 | +} |
|---|
| 7411 | + |
|---|
| 7412 | +static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) |
|---|
| 7413 | +{ |
|---|
| 7414 | + struct trace_array *tr = m->private; |
|---|
| 7415 | + |
|---|
| 7416 | + return seq_list_next(v, &tr->err_log, pos); |
|---|
| 7417 | +} |
|---|
| 7418 | + |
|---|
| 7419 | +static void tracing_err_log_seq_stop(struct seq_file *m, void *v) |
|---|
| 7420 | +{ |
|---|
| 7421 | + mutex_unlock(&tracing_err_log_lock); |
|---|
| 7422 | +} |
|---|
| 7423 | + |
|---|
| 7424 | +static void tracing_err_log_show_pos(struct seq_file *m, u8 pos) |
|---|
| 7425 | +{ |
|---|
| 7426 | + u8 i; |
|---|
| 7427 | + |
|---|
| 7428 | + for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) |
|---|
| 7429 | + seq_putc(m, ' '); |
|---|
| 7430 | + for (i = 0; i < pos; i++) |
|---|
| 7431 | + seq_putc(m, ' '); |
|---|
| 7432 | + seq_puts(m, "^\n"); |
|---|
| 7433 | +} |
|---|
| 7434 | + |
|---|
| 7435 | +static int tracing_err_log_seq_show(struct seq_file *m, void *v) |
|---|
| 7436 | +{ |
|---|
| 7437 | + struct tracing_log_err *err = v; |
|---|
| 7438 | + |
|---|
| 7439 | + if (err) { |
|---|
| 7440 | + const char *err_text = err->info.errs[err->info.type]; |
|---|
| 7441 | + u64 sec = err->info.ts; |
|---|
| 7442 | + u32 nsec; |
|---|
| 7443 | + |
|---|
| 7444 | + nsec = do_div(sec, NSEC_PER_SEC); |
|---|
| 7445 | + seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, |
|---|
| 7446 | + err->loc, err_text); |
|---|
| 7447 | + seq_printf(m, "%s", err->cmd); |
|---|
| 7448 | + tracing_err_log_show_pos(m, err->info.pos); |
|---|
| 7449 | + } |
|---|
| 7450 | + |
|---|
| 7451 | + return 0; |
|---|
| 7452 | +} |
|---|
| 7453 | + |
|---|
| 7454 | +static const struct seq_operations tracing_err_log_seq_ops = { |
|---|
| 7455 | + .start = tracing_err_log_seq_start, |
|---|
| 7456 | + .next = tracing_err_log_seq_next, |
|---|
| 7457 | + .stop = tracing_err_log_seq_stop, |
|---|
| 7458 | + .show = tracing_err_log_seq_show |
|---|
| 7459 | +}; |
|---|
| 7460 | + |
|---|
| 7461 | +static int tracing_err_log_open(struct inode *inode, struct file *file) |
|---|
| 7462 | +{ |
|---|
| 7463 | + struct trace_array *tr = inode->i_private; |
|---|
| 7464 | + int ret = 0; |
|---|
| 7465 | + |
|---|
| 7466 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 7467 | + if (ret) |
|---|
| 7468 | + return ret; |
|---|
| 7469 | + |
|---|
| 7470 | + /* If this file was opened for write, then erase contents */ |
|---|
| 7471 | + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) |
|---|
| 7472 | + clear_tracing_err_log(tr); |
|---|
| 7473 | + |
|---|
| 7474 | + if (file->f_mode & FMODE_READ) { |
|---|
| 7475 | + ret = seq_open(file, &tracing_err_log_seq_ops); |
|---|
| 7476 | + if (!ret) { |
|---|
| 7477 | + struct seq_file *m = file->private_data; |
|---|
| 7478 | + m->private = tr; |
|---|
| 7479 | + } else { |
|---|
| 7480 | + trace_array_put(tr); |
|---|
| 7481 | + } |
|---|
| 7482 | + } |
|---|
| 7483 | + return ret; |
|---|
| 7484 | +} |
|---|
| 7485 | + |
|---|
| 7486 | +static ssize_t tracing_err_log_write(struct file *file, |
|---|
| 7487 | + const char __user *buffer, |
|---|
| 7488 | + size_t count, loff_t *ppos) |
|---|
| 7489 | +{ |
|---|
| 7490 | + return count; |
|---|
| 7491 | +} |
|---|
| 7492 | + |
|---|
| 7493 | +static int tracing_err_log_release(struct inode *inode, struct file *file) |
|---|
| 7494 | +{ |
|---|
| 7495 | + struct trace_array *tr = inode->i_private; |
|---|
| 7496 | + |
|---|
| 7497 | + trace_array_put(tr); |
|---|
| 7498 | + |
|---|
| 7499 | + if (file->f_mode & FMODE_READ) |
|---|
| 7500 | + seq_release(inode, file); |
|---|
| 7501 | + |
|---|
| 7502 | + return 0; |
|---|
| 7503 | +} |
|---|
| 7504 | + |
|---|
| 7505 | +static const struct file_operations tracing_err_log_fops = { |
|---|
| 7506 | + .open = tracing_err_log_open, |
|---|
| 7507 | + .write = tracing_err_log_write, |
|---|
| 7508 | + .read = seq_read, |
|---|
| 7509 | + .llseek = seq_lseek, |
|---|
| 7510 | + .release = tracing_err_log_release, |
|---|
| 7511 | +}; |
|---|
| 7512 | + |
|---|
| 6691 | 7513 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
|---|
| 6692 | 7514 | { |
|---|
| 6693 | 7515 | struct trace_array *tr = inode->i_private; |
|---|
| 6694 | 7516 | struct ftrace_buffer_info *info; |
|---|
| 6695 | 7517 | int ret; |
|---|
| 6696 | 7518 | |
|---|
| 6697 | | - if (tracing_disabled) |
|---|
| 6698 | | - return -ENODEV; |
|---|
| 7519 | + ret = tracing_check_open_get_tr(tr); |
|---|
| 7520 | + if (ret) |
|---|
| 7521 | + return ret; |
|---|
| 6699 | 7522 | |
|---|
| 6700 | | - if (trace_array_get(tr) < 0) |
|---|
| 6701 | | - return -ENODEV; |
|---|
| 6702 | | - |
|---|
| 6703 | | - info = kzalloc(sizeof(*info), GFP_KERNEL); |
|---|
| 7523 | + info = kvzalloc(sizeof(*info), GFP_KERNEL); |
|---|
| 6704 | 7524 | if (!info) { |
|---|
| 6705 | 7525 | trace_array_put(tr); |
|---|
| 6706 | 7526 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 6711 | 7531 | info->iter.tr = tr; |
|---|
| 6712 | 7532 | info->iter.cpu_file = tracing_get_cpu(inode); |
|---|
| 6713 | 7533 | info->iter.trace = tr->current_trace; |
|---|
| 6714 | | - info->iter.trace_buffer = &tr->trace_buffer; |
|---|
| 7534 | + info->iter.array_buffer = &tr->array_buffer; |
|---|
| 6715 | 7535 | info->spare = NULL; |
|---|
| 6716 | 7536 | /* Force reading ring buffer for first read */ |
|---|
| 6717 | 7537 | info->read = (unsigned int)-1; |
|---|
| 6718 | 7538 | |
|---|
| 6719 | 7539 | filp->private_data = info; |
|---|
| 6720 | 7540 | |
|---|
| 6721 | | - tr->current_trace->ref++; |
|---|
| 7541 | + tr->trace_ref++; |
|---|
| 6722 | 7542 | |
|---|
| 6723 | 7543 | mutex_unlock(&trace_types_lock); |
|---|
| 6724 | 7544 | |
|---|
| .. | .. |
|---|
| 6756 | 7576 | #endif |
|---|
| 6757 | 7577 | |
|---|
| 6758 | 7578 | if (!info->spare) { |
|---|
| 6759 | | - info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, |
|---|
| 7579 | + info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, |
|---|
| 6760 | 7580 | iter->cpu_file); |
|---|
| 6761 | 7581 | if (IS_ERR(info->spare)) { |
|---|
| 6762 | 7582 | ret = PTR_ERR(info->spare); |
|---|
| .. | .. |
|---|
| 6774 | 7594 | |
|---|
| 6775 | 7595 | again: |
|---|
| 6776 | 7596 | trace_access_lock(iter->cpu_file); |
|---|
| 6777 | | - ret = ring_buffer_read_page(iter->trace_buffer->buffer, |
|---|
| 7597 | + ret = ring_buffer_read_page(iter->array_buffer->buffer, |
|---|
| 6778 | 7598 | &info->spare, |
|---|
| 6779 | 7599 | count, |
|---|
| 6780 | 7600 | iter->cpu_file, 0); |
|---|
| .. | .. |
|---|
| 6785 | 7605 | if ((filp->f_flags & O_NONBLOCK)) |
|---|
| 6786 | 7606 | return -EAGAIN; |
|---|
| 6787 | 7607 | |
|---|
| 6788 | | - ret = wait_on_pipe(iter, false); |
|---|
| 7608 | + ret = wait_on_pipe(iter, 0); |
|---|
| 6789 | 7609 | if (ret) |
|---|
| 6790 | 7610 | return ret; |
|---|
| 6791 | 7611 | |
|---|
| .. | .. |
|---|
| 6819 | 7639 | |
|---|
| 6820 | 7640 | mutex_lock(&trace_types_lock); |
|---|
| 6821 | 7641 | |
|---|
| 6822 | | - iter->tr->current_trace->ref--; |
|---|
| 7642 | + iter->tr->trace_ref--; |
|---|
| 6823 | 7643 | |
|---|
| 6824 | 7644 | __trace_array_put(iter->tr); |
|---|
| 6825 | 7645 | |
|---|
| 6826 | 7646 | if (info->spare) |
|---|
| 6827 | | - ring_buffer_free_read_page(iter->trace_buffer->buffer, |
|---|
| 7647 | + ring_buffer_free_read_page(iter->array_buffer->buffer, |
|---|
| 6828 | 7648 | info->spare_cpu, info->spare); |
|---|
| 6829 | | - kfree(info); |
|---|
| 7649 | + kvfree(info); |
|---|
| 6830 | 7650 | |
|---|
| 6831 | 7651 | mutex_unlock(&trace_types_lock); |
|---|
| 6832 | 7652 | |
|---|
| .. | .. |
|---|
| 6834 | 7654 | } |
|---|
| 6835 | 7655 | |
|---|
| 6836 | 7656 | struct buffer_ref { |
|---|
| 6837 | | - struct ring_buffer *buffer; |
|---|
| 7657 | + struct trace_buffer *buffer; |
|---|
| 6838 | 7658 | void *page; |
|---|
| 6839 | 7659 | int cpu; |
|---|
| 6840 | 7660 | refcount_t refcount; |
|---|
| .. | .. |
|---|
| 6871 | 7691 | |
|---|
| 6872 | 7692 | /* Pipe buffer operations for a buffer. */ |
|---|
| 6873 | 7693 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
|---|
| 6874 | | - .can_merge = 0, |
|---|
| 6875 | | - .confirm = generic_pipe_buf_confirm, |
|---|
| 6876 | 7694 | .release = buffer_pipe_buf_release, |
|---|
| 6877 | | - .steal = generic_pipe_buf_nosteal, |
|---|
| 6878 | 7695 | .get = buffer_pipe_buf_get, |
|---|
| 6879 | 7696 | }; |
|---|
| 6880 | 7697 | |
|---|
| .. | .. |
|---|
| 6930 | 7747 | |
|---|
| 6931 | 7748 | again: |
|---|
| 6932 | 7749 | trace_access_lock(iter->cpu_file); |
|---|
| 6933 | | - entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
|---|
| 7750 | + entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); |
|---|
| 6934 | 7751 | |
|---|
| 6935 | 7752 | for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { |
|---|
| 6936 | 7753 | struct page *page; |
|---|
| .. | .. |
|---|
| 6943 | 7760 | } |
|---|
| 6944 | 7761 | |
|---|
| 6945 | 7762 | refcount_set(&ref->refcount, 1); |
|---|
| 6946 | | - ref->buffer = iter->trace_buffer->buffer; |
|---|
| 7763 | + ref->buffer = iter->array_buffer->buffer; |
|---|
| 6947 | 7764 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); |
|---|
| 6948 | 7765 | if (IS_ERR(ref->page)) { |
|---|
| 6949 | 7766 | ret = PTR_ERR(ref->page); |
|---|
| .. | .. |
|---|
| 6971 | 7788 | spd.nr_pages++; |
|---|
| 6972 | 7789 | *ppos += PAGE_SIZE; |
|---|
| 6973 | 7790 | |
|---|
| 6974 | | - entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); |
|---|
| 7791 | + entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); |
|---|
| 6975 | 7792 | } |
|---|
| 6976 | 7793 | |
|---|
| 6977 | 7794 | trace_access_unlock(iter->cpu_file); |
|---|
| .. | .. |
|---|
| 6986 | 7803 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) |
|---|
| 6987 | 7804 | goto out; |
|---|
| 6988 | 7805 | |
|---|
| 6989 | | - ret = wait_on_pipe(iter, true); |
|---|
| 7806 | + ret = wait_on_pipe(iter, iter->tr->buffer_percent); |
|---|
| 6990 | 7807 | if (ret) |
|---|
| 6991 | 7808 | goto out; |
|---|
| 6992 | 7809 | |
|---|
| .. | .. |
|---|
| 7015 | 7832 | { |
|---|
| 7016 | 7833 | struct inode *inode = file_inode(filp); |
|---|
| 7017 | 7834 | struct trace_array *tr = inode->i_private; |
|---|
| 7018 | | - struct trace_buffer *trace_buf = &tr->trace_buffer; |
|---|
| 7835 | + struct array_buffer *trace_buf = &tr->array_buffer; |
|---|
| 7019 | 7836 | int cpu = tracing_get_cpu(inode); |
|---|
| 7020 | 7837 | struct trace_seq *s; |
|---|
| 7021 | 7838 | unsigned long cnt; |
|---|
| .. | .. |
|---|
| 7086 | 7903 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
|---|
| 7087 | 7904 | size_t cnt, loff_t *ppos) |
|---|
| 7088 | 7905 | { |
|---|
| 7089 | | - unsigned long *p = filp->private_data; |
|---|
| 7090 | | - char buf[64]; /* Not too big for a shallow stack */ |
|---|
| 7906 | + ssize_t ret; |
|---|
| 7907 | + char *buf; |
|---|
| 7091 | 7908 | int r; |
|---|
| 7092 | 7909 | |
|---|
| 7093 | | - r = scnprintf(buf, 63, "%ld", *p); |
|---|
| 7094 | | - buf[r++] = '\n'; |
|---|
| 7910 | + /* 256 should be plenty to hold the amount needed */ |
|---|
| 7911 | + buf = kmalloc(256, GFP_KERNEL); |
|---|
| 7912 | + if (!buf) |
|---|
| 7913 | + return -ENOMEM; |
|---|
| 7095 | 7914 | |
|---|
| 7096 | | - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
|---|
| 7915 | + r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n", |
|---|
| 7916 | + ftrace_update_tot_cnt, |
|---|
| 7917 | + ftrace_number_of_pages, |
|---|
| 7918 | + ftrace_number_of_groups); |
|---|
| 7919 | + |
|---|
| 7920 | + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
|---|
| 7921 | + kfree(buf); |
|---|
| 7922 | + return ret; |
|---|
| 7097 | 7923 | } |
|---|
| 7098 | 7924 | |
|---|
| 7099 | 7925 | static const struct file_operations tracing_dyn_info_fops = { |
|---|
| .. | .. |
|---|
| 7287 | 8113 | |
|---|
| 7288 | 8114 | tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); |
|---|
| 7289 | 8115 | |
|---|
| 7290 | | - WARN_ONCE(!tr->percpu_dir, |
|---|
| 8116 | + MEM_FAIL(!tr->percpu_dir, |
|---|
| 7291 | 8117 | "Could not create tracefs directory 'per_cpu/%d'\n", cpu); |
|---|
| 7292 | 8118 | |
|---|
| 7293 | 8119 | return tr->percpu_dir; |
|---|
| .. | .. |
|---|
| 7608 | 8434 | for (cnt = 0; opts[cnt].name; cnt++) { |
|---|
| 7609 | 8435 | create_trace_option_file(tr, &topts[cnt], flags, |
|---|
| 7610 | 8436 | &opts[cnt]); |
|---|
| 7611 | | - WARN_ONCE(topts[cnt].entry == NULL, |
|---|
| 8437 | + MEM_FAIL(topts[cnt].entry == NULL, |
|---|
| 7612 | 8438 | "Failed to create trace option: %s", |
|---|
| 7613 | 8439 | opts[cnt].name); |
|---|
| 7614 | 8440 | } |
|---|
| .. | .. |
|---|
| 7665 | 8491 | size_t cnt, loff_t *ppos) |
|---|
| 7666 | 8492 | { |
|---|
| 7667 | 8493 | struct trace_array *tr = filp->private_data; |
|---|
| 7668 | | - struct ring_buffer *buffer = tr->trace_buffer.buffer; |
|---|
| 8494 | + struct trace_buffer *buffer = tr->array_buffer.buffer; |
|---|
| 7669 | 8495 | unsigned long val; |
|---|
| 7670 | 8496 | int ret; |
|---|
| 7671 | 8497 | |
|---|
| .. | .. |
|---|
| 7702 | 8528 | .llseek = default_llseek, |
|---|
| 7703 | 8529 | }; |
|---|
| 7704 | 8530 | |
|---|
| 7705 | | -struct dentry *trace_instance_dir; |
|---|
| 8531 | +static ssize_t |
|---|
| 8532 | +buffer_percent_read(struct file *filp, char __user *ubuf, |
|---|
| 8533 | + size_t cnt, loff_t *ppos) |
|---|
| 8534 | +{ |
|---|
| 8535 | + struct trace_array *tr = filp->private_data; |
|---|
| 8536 | + char buf[64]; |
|---|
| 8537 | + int r; |
|---|
| 8538 | + |
|---|
| 8539 | + r = tr->buffer_percent; |
|---|
| 8540 | + r = sprintf(buf, "%d\n", r); |
|---|
| 8541 | + |
|---|
| 8542 | + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
|---|
| 8543 | +} |
|---|
| 8544 | + |
|---|
| 8545 | +static ssize_t |
|---|
| 8546 | +buffer_percent_write(struct file *filp, const char __user *ubuf, |
|---|
| 8547 | + size_t cnt, loff_t *ppos) |
|---|
| 8548 | +{ |
|---|
| 8549 | + struct trace_array *tr = filp->private_data; |
|---|
| 8550 | + unsigned long val; |
|---|
| 8551 | + int ret; |
|---|
| 8552 | + |
|---|
| 8553 | + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
|---|
| 8554 | + if (ret) |
|---|
| 8555 | + return ret; |
|---|
| 8556 | + |
|---|
| 8557 | + if (val > 100) |
|---|
| 8558 | + return -EINVAL; |
|---|
| 8559 | + |
|---|
| 8560 | + if (!val) |
|---|
| 8561 | + val = 1; |
|---|
| 8562 | + |
|---|
| 8563 | + tr->buffer_percent = val; |
|---|
| 8564 | + |
|---|
| 8565 | + (*ppos)++; |
|---|
| 8566 | + |
|---|
| 8567 | + return cnt; |
|---|
| 8568 | +} |
|---|
| 8569 | + |
|---|
| 8570 | +static const struct file_operations buffer_percent_fops = { |
|---|
| 8571 | + .open = tracing_open_generic_tr, |
|---|
| 8572 | + .read = buffer_percent_read, |
|---|
| 8573 | + .write = buffer_percent_write, |
|---|
| 8574 | + .release = tracing_release_generic_tr, |
|---|
| 8575 | + .llseek = default_llseek, |
|---|
| 8576 | +}; |
|---|
| 8577 | + |
|---|
| 8578 | +static struct dentry *trace_instance_dir; |
|---|
| 7706 | 8579 | |
|---|
| 7707 | 8580 | static void |
|---|
| 7708 | 8581 | init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
|---|
| 7709 | 8582 | |
|---|
| 7710 | 8583 | static int |
|---|
| 7711 | | -allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) |
|---|
| 8584 | +allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) |
|---|
| 7712 | 8585 | { |
|---|
| 7713 | 8586 | enum ring_buffer_flags rb_flags; |
|---|
| 7714 | 8587 | |
|---|
| .. | .. |
|---|
| 7728 | 8601 | } |
|---|
| 7729 | 8602 | |
|---|
| 7730 | 8603 | /* Allocate the first page for all buffers */ |
|---|
| 7731 | | - set_buffer_entries(&tr->trace_buffer, |
|---|
| 7732 | | - ring_buffer_size(tr->trace_buffer.buffer, 0)); |
|---|
| 8604 | + set_buffer_entries(&tr->array_buffer, |
|---|
| 8605 | + ring_buffer_size(tr->array_buffer.buffer, 0)); |
|---|
| 7733 | 8606 | |
|---|
| 7734 | 8607 | return 0; |
|---|
| 7735 | 8608 | } |
|---|
| .. | .. |
|---|
| 7738 | 8611 | { |
|---|
| 7739 | 8612 | int ret; |
|---|
| 7740 | 8613 | |
|---|
| 7741 | | - ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); |
|---|
| 8614 | + ret = allocate_trace_buffer(tr, &tr->array_buffer, size); |
|---|
| 7742 | 8615 | if (ret) |
|---|
| 7743 | 8616 | return ret; |
|---|
| 7744 | 8617 | |
|---|
| 7745 | 8618 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 7746 | 8619 | ret = allocate_trace_buffer(tr, &tr->max_buffer, |
|---|
| 7747 | 8620 | allocate_snapshot ? size : 1); |
|---|
| 7748 | | - if (WARN_ON(ret)) { |
|---|
| 7749 | | - ring_buffer_free(tr->trace_buffer.buffer); |
|---|
| 7750 | | - tr->trace_buffer.buffer = NULL; |
|---|
| 7751 | | - free_percpu(tr->trace_buffer.data); |
|---|
| 7752 | | - tr->trace_buffer.data = NULL; |
|---|
| 8621 | + if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { |
|---|
| 8622 | + ring_buffer_free(tr->array_buffer.buffer); |
|---|
| 8623 | + tr->array_buffer.buffer = NULL; |
|---|
| 8624 | + free_percpu(tr->array_buffer.data); |
|---|
| 8625 | + tr->array_buffer.data = NULL; |
|---|
| 7753 | 8626 | return -ENOMEM; |
|---|
| 7754 | 8627 | } |
|---|
| 7755 | 8628 | tr->allocated_snapshot = allocate_snapshot; |
|---|
| .. | .. |
|---|
| 7761 | 8634 | allocate_snapshot = false; |
|---|
| 7762 | 8635 | #endif |
|---|
| 7763 | 8636 | |
|---|
| 7764 | | - /* |
|---|
| 7765 | | - * Because of some magic with the way alloc_percpu() works on |
|---|
| 7766 | | - * x86_64, we need to synchronize the pgd of all the tables, |
|---|
| 7767 | | - * otherwise the trace events that happen in x86_64 page fault |
|---|
| 7768 | | - * handlers can't cope with accessing the chance that a |
|---|
| 7769 | | - * alloc_percpu()'d memory might be touched in the page fault trace |
|---|
| 7770 | | - * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() |
|---|
| 7771 | | - * calls in tracing, because something might get triggered within a |
|---|
| 7772 | | - * page fault trace event! |
|---|
| 7773 | | - */ |
|---|
| 7774 | | - vmalloc_sync_mappings(); |
|---|
| 7775 | | - |
|---|
| 7776 | 8637 | return 0; |
|---|
| 7777 | 8638 | } |
|---|
| 7778 | 8639 | |
|---|
| 7779 | | -static void free_trace_buffer(struct trace_buffer *buf) |
|---|
| 8640 | +static void free_trace_buffer(struct array_buffer *buf) |
|---|
| 7780 | 8641 | { |
|---|
| 7781 | 8642 | if (buf->buffer) { |
|---|
| 7782 | 8643 | ring_buffer_free(buf->buffer); |
|---|
| .. | .. |
|---|
| 7791 | 8652 | if (!tr) |
|---|
| 7792 | 8653 | return; |
|---|
| 7793 | 8654 | |
|---|
| 7794 | | - free_trace_buffer(&tr->trace_buffer); |
|---|
| 8655 | + free_trace_buffer(&tr->array_buffer); |
|---|
| 7795 | 8656 | |
|---|
| 7796 | 8657 | #ifdef CONFIG_TRACER_MAX_TRACE |
|---|
| 7797 | 8658 | free_trace_buffer(&tr->max_buffer); |
|---|
| .. | .. |
|---|
| 7818 | 8679 | static void update_tracer_options(struct trace_array *tr) |
|---|
| 7819 | 8680 | { |
|---|
| 7820 | 8681 | mutex_lock(&trace_types_lock); |
|---|
| 8682 | + tracer_options_updated = true; |
|---|
| 7821 | 8683 | __update_tracer_options(tr); |
|---|
| 7822 | 8684 | mutex_unlock(&trace_types_lock); |
|---|
| 7823 | 8685 | } |
|---|
| 7824 | 8686 | |
|---|
| 7825 | | -static int instance_mkdir(const char *name) |
|---|
| 8687 | +/* Must have trace_types_lock held */ |
|---|
| 8688 | +struct trace_array *trace_array_find(const char *instance) |
|---|
| 8689 | +{ |
|---|
| 8690 | + struct trace_array *tr, *found = NULL; |
|---|
| 8691 | + |
|---|
| 8692 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
|---|
| 8693 | + if (tr->name && strcmp(tr->name, instance) == 0) { |
|---|
| 8694 | + found = tr; |
|---|
| 8695 | + break; |
|---|
| 8696 | + } |
|---|
| 8697 | + } |
|---|
| 8698 | + |
|---|
| 8699 | + return found; |
|---|
| 8700 | +} |
|---|
| 8701 | + |
|---|
| 8702 | +struct trace_array *trace_array_find_get(const char *instance) |
|---|
| 8703 | +{ |
|---|
| 8704 | + struct trace_array *tr; |
|---|
| 8705 | + |
|---|
| 8706 | + mutex_lock(&trace_types_lock); |
|---|
| 8707 | + tr = trace_array_find(instance); |
|---|
| 8708 | + if (tr) |
|---|
| 8709 | + tr->ref++; |
|---|
| 8710 | + mutex_unlock(&trace_types_lock); |
|---|
| 8711 | + |
|---|
| 8712 | + return tr; |
|---|
| 8713 | +} |
|---|
| 8714 | + |
|---|
| 8715 | +static int trace_array_create_dir(struct trace_array *tr) |
|---|
| 8716 | +{ |
|---|
| 8717 | + int ret; |
|---|
| 8718 | + |
|---|
| 8719 | + tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); |
|---|
| 8720 | + if (!tr->dir) |
|---|
| 8721 | + return -EINVAL; |
|---|
| 8722 | + |
|---|
| 8723 | + ret = event_trace_add_tracer(tr->dir, tr); |
|---|
| 8724 | + if (ret) { |
|---|
| 8725 | + tracefs_remove(tr->dir); |
|---|
| 8726 | + return ret; |
|---|
| 8727 | + } |
|---|
| 8728 | + |
|---|
| 8729 | + init_tracer_tracefs(tr, tr->dir); |
|---|
| 8730 | + __update_tracer_options(tr); |
|---|
| 8731 | + |
|---|
| 8732 | + return ret; |
|---|
| 8733 | +} |
|---|
| 8734 | + |
|---|
| 8735 | +static struct trace_array *trace_array_create(const char *name) |
|---|
| 7826 | 8736 | { |
|---|
| 7827 | 8737 | struct trace_array *tr; |
|---|
| 7828 | 8738 | int ret; |
|---|
| 7829 | 8739 | |
|---|
| 7830 | | - mutex_lock(&event_mutex); |
|---|
| 7831 | | - mutex_lock(&trace_types_lock); |
|---|
| 7832 | | - |
|---|
| 7833 | | - ret = -EEXIST; |
|---|
| 7834 | | - list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
|---|
| 7835 | | - if (tr->name && strcmp(tr->name, name) == 0) |
|---|
| 7836 | | - goto out_unlock; |
|---|
| 7837 | | - } |
|---|
| 7838 | | - |
|---|
| 7839 | 8740 | ret = -ENOMEM; |
|---|
| 7840 | 8741 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); |
|---|
| 7841 | 8742 | if (!tr) |
|---|
| 7842 | | - goto out_unlock; |
|---|
| 8743 | + return ERR_PTR(ret); |
|---|
| 7843 | 8744 | |
|---|
| 7844 | 8745 | tr->name = kstrdup(name, GFP_KERNEL); |
|---|
| 7845 | 8746 | if (!tr->name) |
|---|
| .. | .. |
|---|
| 7861 | 8762 | INIT_LIST_HEAD(&tr->systems); |
|---|
| 7862 | 8763 | INIT_LIST_HEAD(&tr->events); |
|---|
| 7863 | 8764 | INIT_LIST_HEAD(&tr->hist_vars); |
|---|
| 8765 | + INIT_LIST_HEAD(&tr->err_log); |
|---|
| 7864 | 8766 | |
|---|
| 7865 | 8767 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
|---|
| 7866 | 8768 | goto out_free_tr; |
|---|
| 7867 | 8769 | |
|---|
| 7868 | | - tr->dir = tracefs_create_dir(name, trace_instance_dir); |
|---|
| 7869 | | - if (!tr->dir) |
|---|
| 8770 | + if (ftrace_allocate_ftrace_ops(tr) < 0) |
|---|
| 7870 | 8771 | goto out_free_tr; |
|---|
| 7871 | | - |
|---|
| 7872 | | - ret = event_trace_add_tracer(tr->dir, tr); |
|---|
| 7873 | | - if (ret) { |
|---|
| 7874 | | - tracefs_remove_recursive(tr->dir); |
|---|
| 7875 | | - goto out_free_tr; |
|---|
| 7876 | | - } |
|---|
| 7877 | 8772 | |
|---|
| 7878 | 8773 | ftrace_init_trace_array(tr); |
|---|
| 7879 | 8774 | |
|---|
| 7880 | | - init_tracer_tracefs(tr, tr->dir); |
|---|
| 7881 | 8775 | init_trace_flags_index(tr); |
|---|
| 7882 | | - __update_tracer_options(tr); |
|---|
| 8776 | + |
|---|
| 8777 | + if (trace_instance_dir) { |
|---|
| 8778 | + ret = trace_array_create_dir(tr); |
|---|
| 8779 | + if (ret) |
|---|
| 8780 | + goto out_free_tr; |
|---|
| 8781 | + } else |
|---|
| 8782 | + __trace_early_add_events(tr); |
|---|
| 7883 | 8783 | |
|---|
| 7884 | 8784 | list_add(&tr->list, &ftrace_trace_arrays); |
|---|
| 7885 | 8785 | |
|---|
| 7886 | | - mutex_unlock(&trace_types_lock); |
|---|
| 7887 | | - mutex_unlock(&event_mutex); |
|---|
| 8786 | + tr->ref++; |
|---|
| 7888 | 8787 | |
|---|
| 7889 | | - return 0; |
|---|
| 8788 | + return tr; |
|---|
| 7890 | 8789 | |
|---|
| 7891 | 8790 | out_free_tr: |
|---|
| 8791 | + ftrace_free_ftrace_ops(tr); |
|---|
| 7892 | 8792 | free_trace_buffers(tr); |
|---|
| 7893 | 8793 | free_cpumask_var(tr->tracing_cpumask); |
|---|
| 7894 | 8794 | kfree(tr->name); |
|---|
| 7895 | 8795 | kfree(tr); |
|---|
| 7896 | 8796 | |
|---|
| 7897 | | - out_unlock: |
|---|
| 7898 | | - mutex_unlock(&trace_types_lock); |
|---|
| 7899 | | - mutex_unlock(&event_mutex); |
|---|
| 7900 | | - |
|---|
| 7901 | | - return ret; |
|---|
| 7902 | | - |
|---|
| 8797 | + return ERR_PTR(ret); |
|---|
| 7903 | 8798 | } |
|---|
| 7904 | 8799 | |
|---|
| 7905 | | -static int instance_rmdir(const char *name) |
|---|
| 8800 | +static int instance_mkdir(const char *name) |
|---|
| 7906 | 8801 | { |
|---|
| 7907 | 8802 | struct trace_array *tr; |
|---|
| 7908 | | - int found = 0; |
|---|
| 7909 | 8803 | int ret; |
|---|
| 7910 | | - int i; |
|---|
| 7911 | 8804 | |
|---|
| 7912 | 8805 | mutex_lock(&event_mutex); |
|---|
| 7913 | 8806 | mutex_lock(&trace_types_lock); |
|---|
| 7914 | 8807 | |
|---|
| 7915 | | - ret = -ENODEV; |
|---|
| 7916 | | - list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
|---|
| 7917 | | - if (tr->name && strcmp(tr->name, name) == 0) { |
|---|
| 7918 | | - found = 1; |
|---|
| 7919 | | - break; |
|---|
| 7920 | | - } |
|---|
| 7921 | | - } |
|---|
| 7922 | | - if (!found) |
|---|
| 8808 | + ret = -EEXIST; |
|---|
| 8809 | + if (trace_array_find(name)) |
|---|
| 7923 | 8810 | goto out_unlock; |
|---|
| 7924 | 8811 | |
|---|
| 7925 | | - ret = -EBUSY; |
|---|
| 7926 | | - if (tr->ref || (tr->current_trace && tr->current_trace->ref)) |
|---|
| 7927 | | - goto out_unlock; |
|---|
| 8812 | + tr = trace_array_create(name); |
|---|
| 8813 | + |
|---|
| 8814 | + ret = PTR_ERR_OR_ZERO(tr); |
|---|
| 8815 | + |
|---|
| 8816 | +out_unlock: |
|---|
| 8817 | + mutex_unlock(&trace_types_lock); |
|---|
| 8818 | + mutex_unlock(&event_mutex); |
|---|
| 8819 | + return ret; |
|---|
| 8820 | +} |
|---|
| 8821 | + |
|---|
| 8822 | +/** |
|---|
| 8823 | + * trace_array_get_by_name - Create/Lookup a trace array, given its name. |
|---|
| 8824 | + * @name: The name of the trace array to be looked up/created. |
|---|
| 8825 | + * |
|---|
| 8826 | + * Returns pointer to trace array with given name. |
|---|
| 8827 | + * NULL, if it cannot be created. |
|---|
| 8828 | + * |
|---|
| 8829 | + * NOTE: This function increments the reference counter associated with the |
|---|
| 8830 | + * trace array returned. This makes sure it cannot be freed while in use. |
|---|
| 8831 | + * Use trace_array_put() once the trace array is no longer needed. |
|---|
| 8832 | + * If the trace_array is to be freed, trace_array_destroy() needs to |
|---|
| 8833 | + * be called after the trace_array_put(), or simply let user space delete |
|---|
| 8834 | + * it from the tracefs instances directory. But until the |
|---|
| 8835 | + * trace_array_put() is called, user space can not delete it. |
|---|
| 8836 | + * |
|---|
| 8837 | + */ |
|---|
| 8838 | +struct trace_array *trace_array_get_by_name(const char *name) |
|---|
| 8839 | +{ |
|---|
| 8840 | + struct trace_array *tr; |
|---|
| 8841 | + |
|---|
| 8842 | + mutex_lock(&event_mutex); |
|---|
| 8843 | + mutex_lock(&trace_types_lock); |
|---|
| 8844 | + |
|---|
| 8845 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
|---|
| 8846 | + if (tr->name && strcmp(tr->name, name) == 0) |
|---|
| 8847 | + goto out_unlock; |
|---|
| 8848 | + } |
|---|
| 8849 | + |
|---|
| 8850 | + tr = trace_array_create(name); |
|---|
| 8851 | + |
|---|
| 8852 | + if (IS_ERR(tr)) |
|---|
| 8853 | + tr = NULL; |
|---|
| 8854 | +out_unlock: |
|---|
| 8855 | + if (tr) |
|---|
| 8856 | + tr->ref++; |
|---|
| 8857 | + |
|---|
| 8858 | + mutex_unlock(&trace_types_lock); |
|---|
| 8859 | + mutex_unlock(&event_mutex); |
|---|
| 8860 | + return tr; |
|---|
| 8861 | +} |
|---|
| 8862 | +EXPORT_SYMBOL_GPL(trace_array_get_by_name); |
|---|
| 8863 | + |
|---|
| 8864 | +static int __remove_instance(struct trace_array *tr) |
|---|
| 8865 | +{ |
|---|
| 8866 | + int i; |
|---|
| 8867 | + |
|---|
| 8868 | + /* Reference counter for a newly created trace array = 1. */ |
|---|
| 8869 | + if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) |
|---|
| 8870 | + return -EBUSY; |
|---|
| 7928 | 8871 | |
|---|
| 7929 | 8872 | list_del(&tr->list); |
|---|
| 7930 | 8873 | |
|---|
| .. | .. |
|---|
| 7939 | 8882 | event_trace_del_tracer(tr); |
|---|
| 7940 | 8883 | ftrace_clear_pids(tr); |
|---|
| 7941 | 8884 | ftrace_destroy_function_files(tr); |
|---|
| 7942 | | - tracefs_remove_recursive(tr->dir); |
|---|
| 8885 | + tracefs_remove(tr->dir); |
|---|
| 7943 | 8886 | free_trace_buffers(tr); |
|---|
| 7944 | 8887 | |
|---|
| 7945 | 8888 | for (i = 0; i < tr->nr_topts; i++) { |
|---|
| .. | .. |
|---|
| 7951 | 8894 | kfree(tr->name); |
|---|
| 7952 | 8895 | kfree(tr); |
|---|
| 7953 | 8896 | |
|---|
| 7954 | | - ret = 0; |
|---|
| 8897 | + return 0; |
|---|
| 8898 | +} |
|---|
| 7955 | 8899 | |
|---|
| 7956 | | - out_unlock: |
|---|
| 8900 | +int trace_array_destroy(struct trace_array *this_tr) |
|---|
| 8901 | +{ |
|---|
| 8902 | + struct trace_array *tr; |
|---|
| 8903 | + int ret; |
|---|
| 8904 | + |
|---|
| 8905 | + if (!this_tr) |
|---|
| 8906 | + return -EINVAL; |
|---|
| 8907 | + |
|---|
| 8908 | + mutex_lock(&event_mutex); |
|---|
| 8909 | + mutex_lock(&trace_types_lock); |
|---|
| 8910 | + |
|---|
| 8911 | + ret = -ENODEV; |
|---|
| 8912 | + |
|---|
| 8913 | + /* Making sure trace array exists before destroying it. */ |
|---|
| 8914 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
|---|
| 8915 | + if (tr == this_tr) { |
|---|
| 8916 | + ret = __remove_instance(tr); |
|---|
| 8917 | + break; |
|---|
| 8918 | + } |
|---|
| 8919 | + } |
|---|
| 8920 | + |
|---|
| 8921 | + mutex_unlock(&trace_types_lock); |
|---|
| 8922 | + mutex_unlock(&event_mutex); |
|---|
| 8923 | + |
|---|
| 8924 | + return ret; |
|---|
| 8925 | +} |
|---|
| 8926 | +EXPORT_SYMBOL_GPL(trace_array_destroy); |
|---|
| 8927 | + |
|---|
| 8928 | +static int instance_rmdir(const char *name) |
|---|
| 8929 | +{ |
|---|
| 8930 | + struct trace_array *tr; |
|---|
| 8931 | + int ret; |
|---|
| 8932 | + |
|---|
| 8933 | + mutex_lock(&event_mutex); |
|---|
| 8934 | + mutex_lock(&trace_types_lock); |
|---|
| 8935 | + |
|---|
| 8936 | + ret = -ENODEV; |
|---|
| 8937 | + tr = trace_array_find(name); |
|---|
| 8938 | + if (tr) |
|---|
| 8939 | + ret = __remove_instance(tr); |
|---|
| 8940 | + |
|---|
| 7957 | 8941 | mutex_unlock(&trace_types_lock); |
|---|
| 7958 | 8942 | mutex_unlock(&event_mutex); |
|---|
| 7959 | 8943 | |
|---|
| .. | .. |
|---|
| 7962 | 8946 | |
|---|
| 7963 | 8947 | static __init void create_trace_instances(struct dentry *d_tracer) |
|---|
| 7964 | 8948 | { |
|---|
| 8949 | + struct trace_array *tr; |
|---|
| 8950 | + |
|---|
| 7965 | 8951 | trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, |
|---|
| 7966 | 8952 | instance_mkdir, |
|---|
| 7967 | 8953 | instance_rmdir); |
|---|
| 7968 | | - if (WARN_ON(!trace_instance_dir)) |
|---|
| 8954 | + if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) |
|---|
| 7969 | 8955 | return; |
|---|
| 8956 | + |
|---|
| 8957 | + mutex_lock(&event_mutex); |
|---|
| 8958 | + mutex_lock(&trace_types_lock); |
|---|
| 8959 | + |
|---|
| 8960 | + list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
|---|
| 8961 | + if (!tr->name) |
|---|
| 8962 | + continue; |
|---|
| 8963 | + if (MEM_FAIL(trace_array_create_dir(tr) < 0, |
|---|
| 8964 | + "Failed to create instance directory\n")) |
|---|
| 8965 | + break; |
|---|
| 8966 | + } |
|---|
| 8967 | + |
|---|
| 8968 | + mutex_unlock(&trace_types_lock); |
|---|
| 8969 | + mutex_unlock(&event_mutex); |
|---|
| 7970 | 8970 | } |
|---|
| 7971 | 8971 | |
|---|
| 7972 | 8972 | static void |
|---|
| .. | .. |
|---|
| 8023 | 9023 | trace_create_file("timestamp_mode", 0444, d_tracer, tr, |
|---|
| 8024 | 9024 | &trace_time_stamp_mode_fops); |
|---|
| 8025 | 9025 | |
|---|
| 9026 | + tr->buffer_percent = 50; |
|---|
| 9027 | + |
|---|
| 9028 | + trace_create_file("buffer_percent", 0444, d_tracer, |
|---|
| 9029 | + tr, &buffer_percent_fops); |
|---|
| 9030 | + |
|---|
| 8026 | 9031 | create_trace_options_dir(tr); |
|---|
| 8027 | 9032 | |
|---|
| 8028 | 9033 | #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) |
|---|
| 8029 | | - trace_create_file("tracing_max_latency", 0644, d_tracer, |
|---|
| 8030 | | - &tr->max_latency, &tracing_max_lat_fops); |
|---|
| 9034 | + trace_create_maxlat_file(tr, d_tracer); |
|---|
| 8031 | 9035 | #endif |
|---|
| 8032 | 9036 | |
|---|
| 8033 | 9037 | if (ftrace_create_function_files(tr, d_tracer)) |
|---|
| 8034 | | - WARN(1, "Could not allocate function filter files"); |
|---|
| 9038 | + MEM_FAIL(1, "Could not allocate function filter files"); |
|---|
| 8035 | 9039 | |
|---|
| 8036 | 9040 | #ifdef CONFIG_TRACER_SNAPSHOT |
|---|
| 8037 | 9041 | trace_create_file("snapshot", 0644, d_tracer, |
|---|
| 8038 | 9042 | tr, &snapshot_fops); |
|---|
| 8039 | 9043 | #endif |
|---|
| 9044 | + |
|---|
| 9045 | + trace_create_file("error_log", 0644, d_tracer, |
|---|
| 9046 | + tr, &tracing_err_log_fops); |
|---|
| 8040 | 9047 | |
|---|
| 8041 | 9048 | for_each_tracing_cpu(cpu) |
|---|
| 8042 | 9049 | tracing_init_tracefs_percpu(tr, cpu); |
|---|
| .. | .. |
|---|
| 8044 | 9051 | ftrace_init_tracefs(tr, d_tracer); |
|---|
| 8045 | 9052 | } |
|---|
| 8046 | 9053 | |
|---|
| 9054 | +#ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT |
|---|
| 8047 | 9055 | static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) |
|---|
| 8048 | 9056 | { |
|---|
| 8049 | 9057 | struct vfsmount *mnt; |
|---|
| .. | .. |
|---|
| 8065 | 9073 | |
|---|
| 8066 | 9074 | return mnt; |
|---|
| 8067 | 9075 | } |
|---|
| 9076 | +#endif |
|---|
| 8068 | 9077 | |
|---|
| 8069 | 9078 | /** |
|---|
| 8070 | 9079 | * tracing_init_dentry - initialize top level trace array |
|---|
| .. | .. |
|---|
| 8073 | 9082 | * directory. It is called via fs_initcall() by any of the boot up code |
|---|
| 8074 | 9083 | * and expects to return the dentry of the top level tracing directory. |
|---|
| 8075 | 9084 | */ |
|---|
| 8076 | | -struct dentry *tracing_init_dentry(void) |
|---|
| 9085 | +int tracing_init_dentry(void) |
|---|
| 8077 | 9086 | { |
|---|
| 8078 | 9087 | struct trace_array *tr = &global_trace; |
|---|
| 8079 | 9088 | |
|---|
| 9089 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
|---|
| 9090 | + pr_warn("Tracing disabled due to lockdown\n"); |
|---|
| 9091 | + return -EPERM; |
|---|
| 9092 | + } |
|---|
| 9093 | + |
|---|
| 8080 | 9094 | /* The top level trace array uses NULL as parent */ |
|---|
| 8081 | 9095 | if (tr->dir) |
|---|
| 8082 | | - return NULL; |
|---|
| 9096 | + return 0; |
|---|
| 8083 | 9097 | |
|---|
| 8084 | | - if (WARN_ON(!tracefs_initialized()) || |
|---|
| 8085 | | - (IS_ENABLED(CONFIG_DEBUG_FS) && |
|---|
| 8086 | | - WARN_ON(!debugfs_initialized()))) |
|---|
| 8087 | | - return ERR_PTR(-ENODEV); |
|---|
| 9098 | + if (WARN_ON(!tracefs_initialized())) |
|---|
| 9099 | + return -ENODEV; |
|---|
| 8088 | 9100 | |
|---|
| 9101 | +#ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT |
|---|
| 8089 | 9102 | /* |
|---|
| 8090 | 9103 | * As there may still be users that expect the tracing |
|---|
| 8091 | 9104 | * files to exist in debugfs/tracing, we must automount |
|---|
| .. | .. |
|---|
| 8094 | 9107 | */ |
|---|
| 8095 | 9108 | tr->dir = debugfs_create_automount("tracing", NULL, |
|---|
| 8096 | 9109 | trace_automount, NULL); |
|---|
| 8097 | | - if (!tr->dir) { |
|---|
| 8098 | | - pr_warn_once("Could not create debugfs directory 'tracing'\n"); |
|---|
| 8099 | | - return ERR_PTR(-ENOMEM); |
|---|
| 8100 | | - } |
|---|
| 9110 | +#else |
|---|
| 9111 | + tr->dir = ERR_PTR(-ENODEV); |
|---|
| 9112 | +#endif |
|---|
| 8101 | 9113 | |
|---|
| 8102 | | - return NULL; |
|---|
| 9114 | + return 0; |
|---|
| 8103 | 9115 | } |
|---|
| 8104 | 9116 | |
|---|
| 8105 | 9117 | extern struct trace_eval_map *__start_ftrace_eval_maps[]; |
|---|
| .. | .. |
|---|
| 8175 | 9187 | break; |
|---|
| 8176 | 9188 | } |
|---|
| 8177 | 9189 | |
|---|
| 8178 | | - return 0; |
|---|
| 9190 | + return NOTIFY_OK; |
|---|
| 8179 | 9191 | } |
|---|
| 8180 | 9192 | |
|---|
| 8181 | 9193 | static struct notifier_block trace_module_nb = { |
|---|
| .. | .. |
|---|
| 8186 | 9198 | |
|---|
| 8187 | 9199 | static __init int tracer_init_tracefs(void) |
|---|
| 8188 | 9200 | { |
|---|
| 8189 | | - struct dentry *d_tracer; |
|---|
| 9201 | + int ret; |
|---|
| 8190 | 9202 | |
|---|
| 8191 | 9203 | trace_access_lock_init(); |
|---|
| 8192 | 9204 | |
|---|
| 8193 | | - d_tracer = tracing_init_dentry(); |
|---|
| 8194 | | - if (IS_ERR(d_tracer)) |
|---|
| 9205 | + ret = tracing_init_dentry(); |
|---|
| 9206 | + if (ret) |
|---|
| 8195 | 9207 | return 0; |
|---|
| 8196 | 9208 | |
|---|
| 8197 | 9209 | event_trace_init(); |
|---|
| 8198 | 9210 | |
|---|
| 8199 | | - init_tracer_tracefs(&global_trace, d_tracer); |
|---|
| 8200 | | - ftrace_init_tracefs_toplevel(&global_trace, d_tracer); |
|---|
| 9211 | + init_tracer_tracefs(&global_trace, NULL); |
|---|
| 9212 | + ftrace_init_tracefs_toplevel(&global_trace, NULL); |
|---|
| 8201 | 9213 | |
|---|
| 8202 | | - trace_create_file("tracing_thresh", 0644, d_tracer, |
|---|
| 9214 | + trace_create_file("tracing_thresh", 0644, NULL, |
|---|
| 8203 | 9215 | &global_trace, &tracing_thresh_fops); |
|---|
| 8204 | 9216 | |
|---|
| 8205 | | - trace_create_file("README", 0444, d_tracer, |
|---|
| 9217 | + trace_create_file("README", 0444, NULL, |
|---|
| 8206 | 9218 | NULL, &tracing_readme_fops); |
|---|
| 8207 | 9219 | |
|---|
| 8208 | | - trace_create_file("saved_cmdlines", 0444, d_tracer, |
|---|
| 9220 | + trace_create_file("saved_cmdlines", 0444, NULL, |
|---|
| 8209 | 9221 | NULL, &tracing_saved_cmdlines_fops); |
|---|
| 8210 | 9222 | |
|---|
| 8211 | | - trace_create_file("saved_cmdlines_size", 0644, d_tracer, |
|---|
| 9223 | + trace_create_file("saved_cmdlines_size", 0644, NULL, |
|---|
| 8212 | 9224 | NULL, &tracing_saved_cmdlines_size_fops); |
|---|
| 8213 | 9225 | |
|---|
| 8214 | | - trace_create_file("saved_tgids", 0444, d_tracer, |
|---|
| 9226 | + trace_create_file("saved_tgids", 0444, NULL, |
|---|
| 8215 | 9227 | NULL, &tracing_saved_tgids_fops); |
|---|
| 8216 | 9228 | |
|---|
| 8217 | 9229 | trace_eval_init(); |
|---|
| 8218 | 9230 | |
|---|
| 8219 | | - trace_create_eval_file(d_tracer); |
|---|
| 9231 | + trace_create_eval_file(NULL); |
|---|
| 8220 | 9232 | |
|---|
| 8221 | 9233 | #ifdef CONFIG_MODULES |
|---|
| 8222 | 9234 | register_module_notifier(&trace_module_nb); |
|---|
| 8223 | 9235 | #endif |
|---|
| 8224 | 9236 | |
|---|
| 8225 | 9237 | #ifdef CONFIG_DYNAMIC_FTRACE |
|---|
| 8226 | | - trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
|---|
| 8227 | | - &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
|---|
| 9238 | + trace_create_file("dyn_ftrace_total_info", 0444, NULL, |
|---|
| 9239 | + NULL, &tracing_dyn_info_fops); |
|---|
| 8228 | 9240 | #endif |
|---|
| 8229 | 9241 | |
|---|
| 8230 | | - create_trace_instances(d_tracer); |
|---|
| 9242 | + create_trace_instances(NULL); |
|---|
| 8231 | 9243 | |
|---|
| 8232 | 9244 | update_tracer_options(&global_trace); |
|---|
| 8233 | 9245 | |
|---|
| .. | .. |
|---|
| 8237 | 9249 | static int trace_panic_handler(struct notifier_block *this, |
|---|
| 8238 | 9250 | unsigned long event, void *unused) |
|---|
| 8239 | 9251 | { |
|---|
| 9252 | + bool ftrace_check = false; |
|---|
| 9253 | + |
|---|
| 9254 | + trace_android_vh_ftrace_oops_enter(&ftrace_check); |
|---|
| 9255 | + |
|---|
| 9256 | + if (ftrace_check) |
|---|
| 9257 | + return NOTIFY_OK; |
|---|
| 9258 | + |
|---|
| 8240 | 9259 | if (ftrace_dump_on_oops) |
|---|
| 8241 | 9260 | ftrace_dump(ftrace_dump_on_oops); |
|---|
| 9261 | + |
|---|
| 9262 | + trace_android_vh_ftrace_oops_exit(&ftrace_check); |
|---|
| 8242 | 9263 | return NOTIFY_OK; |
|---|
| 8243 | 9264 | } |
|---|
| 8244 | 9265 | |
|---|
| .. | .. |
|---|
| 8252 | 9273 | unsigned long val, |
|---|
| 8253 | 9274 | void *data) |
|---|
| 8254 | 9275 | { |
|---|
| 9276 | + bool ftrace_check = false; |
|---|
| 9277 | + |
|---|
| 9278 | + trace_android_vh_ftrace_oops_enter(&ftrace_check); |
|---|
| 9279 | + |
|---|
| 9280 | + if (ftrace_check) |
|---|
| 9281 | + return NOTIFY_OK; |
|---|
| 9282 | + |
|---|
| 8255 | 9283 | switch (val) { |
|---|
| 8256 | 9284 | case DIE_OOPS: |
|---|
| 8257 | 9285 | if (ftrace_dump_on_oops) |
|---|
| .. | .. |
|---|
| 8260 | 9288 | default: |
|---|
| 8261 | 9289 | break; |
|---|
| 8262 | 9290 | } |
|---|
| 9291 | + |
|---|
| 9292 | + trace_android_vh_ftrace_oops_exit(&ftrace_check); |
|---|
| 8263 | 9293 | return NOTIFY_OK; |
|---|
| 8264 | 9294 | } |
|---|
| 8265 | 9295 | |
|---|
| .. | .. |
|---|
| 8284 | 9314 | void |
|---|
| 8285 | 9315 | trace_printk_seq(struct trace_seq *s) |
|---|
| 8286 | 9316 | { |
|---|
| 9317 | + bool dump_printk = true; |
|---|
| 9318 | + |
|---|
| 8287 | 9319 | /* Probably should print a warning here. */ |
|---|
| 8288 | 9320 | if (s->seq.len >= TRACE_MAX_PRINT) |
|---|
| 8289 | 9321 | s->seq.len = TRACE_MAX_PRINT; |
|---|
| .. | .. |
|---|
| 8299 | 9331 | /* should be zero ended, but we are paranoid. */ |
|---|
| 8300 | 9332 | s->buffer[s->seq.len] = 0; |
|---|
| 8301 | 9333 | |
|---|
| 8302 | | - printk(KERN_TRACE "%s", s->buffer); |
|---|
| 9334 | + trace_android_vh_ftrace_dump_buffer(s, &dump_printk); |
|---|
| 9335 | + if (dump_printk) |
|---|
| 9336 | + printk(KERN_TRACE "%s", s->buffer); |
|---|
| 8303 | 9337 | |
|---|
| 8304 | 9338 | trace_seq_init(s); |
|---|
| 8305 | 9339 | } |
|---|
| .. | .. |
|---|
| 8309 | 9343 | iter->tr = &global_trace; |
|---|
| 8310 | 9344 | iter->trace = iter->tr->current_trace; |
|---|
| 8311 | 9345 | iter->cpu_file = RING_BUFFER_ALL_CPUS; |
|---|
| 8312 | | - iter->trace_buffer = &global_trace.trace_buffer; |
|---|
| 9346 | + iter->array_buffer = &global_trace.array_buffer; |
|---|
| 8313 | 9347 | |
|---|
| 8314 | 9348 | if (iter->trace && iter->trace->open) |
|---|
| 8315 | 9349 | iter->trace->open(iter); |
|---|
| 8316 | 9350 | |
|---|
| 8317 | 9351 | /* Annotate start of buffers if we had overruns */ |
|---|
| 8318 | | - if (ring_buffer_overruns(iter->trace_buffer->buffer)) |
|---|
| 9352 | + if (ring_buffer_overruns(iter->array_buffer->buffer)) |
|---|
| 8319 | 9353 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
|---|
| 8320 | 9354 | |
|---|
| 8321 | 9355 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ |
|---|
| .. | .. |
|---|
| 8332 | 9366 | unsigned int old_userobj; |
|---|
| 8333 | 9367 | unsigned long flags; |
|---|
| 8334 | 9368 | int cnt = 0, cpu; |
|---|
| 9369 | + bool ftrace_check = false; |
|---|
| 9370 | + unsigned long size; |
|---|
| 8335 | 9371 | |
|---|
| 8336 | 9372 | /* Only allow one dump user at a time. */ |
|---|
| 8337 | 9373 | if (atomic_inc_return(&dump_running) != 1) { |
|---|
| .. | .. |
|---|
| 8354 | 9390 | |
|---|
| 8355 | 9391 | /* Simulate the iterator */ |
|---|
| 8356 | 9392 | trace_init_global_iter(&iter); |
|---|
| 9393 | + /* Can not use kmalloc for iter.temp */ |
|---|
| 9394 | + iter.temp = static_temp_buf; |
|---|
| 9395 | + iter.temp_size = STATIC_TEMP_BUF_SIZE; |
|---|
| 8357 | 9396 | |
|---|
| 8358 | 9397 | for_each_tracing_cpu(cpu) { |
|---|
| 8359 | | - atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
|---|
| 9398 | + atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); |
|---|
| 9399 | + size = ring_buffer_size(iter.array_buffer->buffer, cpu); |
|---|
| 9400 | + trace_android_vh_ftrace_size_check(size, &ftrace_check); |
|---|
| 8360 | 9401 | } |
|---|
| 8361 | 9402 | |
|---|
| 8362 | 9403 | old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; |
|---|
| 8363 | 9404 | |
|---|
| 8364 | 9405 | /* don't look at user memory in panic mode */ |
|---|
| 8365 | 9406 | tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
|---|
| 9407 | + |
|---|
| 9408 | + if (ftrace_check) |
|---|
| 9409 | + goto out_enable; |
|---|
| 8366 | 9410 | |
|---|
| 8367 | 9411 | switch (oops_dump_mode) { |
|---|
| 8368 | 9412 | case DUMP_ALL: |
|---|
| .. | .. |
|---|
| 8387 | 9431 | } |
|---|
| 8388 | 9432 | |
|---|
| 8389 | 9433 | /* |
|---|
| 8390 | | - * We need to stop all tracing on all CPUS to read the |
|---|
| 9434 | + * We need to stop all tracing on all CPUS to read |
|---|
| 8391 | 9435 | * the next buffer. This is a bit expensive, but is |
|---|
| 8392 | 9436 | * not done often. We fill all what we can read, |
|---|
| 8393 | 9437 | * and then release the locks again. |
|---|
| 8394 | 9438 | */ |
|---|
| 8395 | 9439 | |
|---|
| 8396 | 9440 | while (!trace_empty(&iter)) { |
|---|
| 9441 | + ftrace_check = true; |
|---|
| 8397 | 9442 | |
|---|
| 8398 | 9443 | if (!cnt) |
|---|
| 8399 | 9444 | printk(KERN_TRACE "---------------------------------\n"); |
|---|
| .. | .. |
|---|
| 8401 | 9446 | cnt++; |
|---|
| 8402 | 9447 | |
|---|
| 8403 | 9448 | trace_iterator_reset(&iter); |
|---|
| 8404 | | - iter.iter_flags |= TRACE_FILE_LAT_FMT; |
|---|
| 9449 | + trace_android_vh_ftrace_format_check(&ftrace_check); |
|---|
| 9450 | + if (ftrace_check) |
|---|
| 9451 | + iter.iter_flags |= TRACE_FILE_LAT_FMT; |
|---|
| 8405 | 9452 | |
|---|
| 8406 | 9453 | if (trace_find_next_entry_inc(&iter) != NULL) { |
|---|
| 8407 | 9454 | int ret; |
|---|
| .. | .. |
|---|
| 8424 | 9471 | tr->trace_flags |= old_userobj; |
|---|
| 8425 | 9472 | |
|---|
| 8426 | 9473 | for_each_tracing_cpu(cpu) { |
|---|
| 8427 | | - atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
|---|
| 9474 | + atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); |
|---|
| 8428 | 9475 | } |
|---|
| 8429 | 9476 | atomic_dec(&dump_running); |
|---|
| 8430 | 9477 | printk_nmi_direct_exit(); |
|---|
| .. | .. |
|---|
| 8523 | 9570 | int ring_buf_size; |
|---|
| 8524 | 9571 | int ret = -ENOMEM; |
|---|
| 8525 | 9572 | |
|---|
| 9573 | + |
|---|
| 9574 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
|---|
| 9575 | + pr_warn("Tracing disabled due to lockdown\n"); |
|---|
| 9576 | + return -EPERM; |
|---|
| 9577 | + } |
|---|
| 9578 | + |
|---|
| 8526 | 9579 | /* |
|---|
| 8527 | | - * Make sure we don't accidently add more trace options |
|---|
| 9580 | + * Make sure we don't accidentally add more trace options |
|---|
| 8528 | 9581 | * than we have bits for. |
|---|
| 8529 | 9582 | */ |
|---|
| 8530 | 9583 | BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); |
|---|
| .. | .. |
|---|
| 8553 | 9606 | |
|---|
| 8554 | 9607 | /* |
|---|
| 8555 | 9608 | * The prepare callbacks allocates some memory for the ring buffer. We |
|---|
| 8556 | | - * don't free the buffer if the if the CPU goes down. If we were to free |
|---|
| 9609 | + * don't free the buffer if the CPU goes down. If we were to free |
|---|
| 8557 | 9610 | * the buffer, then the user would lose any trace that was in the |
|---|
| 8558 | 9611 | * buffer. The memory will be removed once the "instance" is removed. |
|---|
| 8559 | 9612 | */ |
|---|
| .. | .. |
|---|
| 8573 | 9626 | |
|---|
| 8574 | 9627 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
|---|
| 8575 | 9628 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
|---|
| 8576 | | - printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
|---|
| 8577 | | - WARN_ON(1); |
|---|
| 9629 | + MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); |
|---|
| 8578 | 9630 | goto out_free_savedcmd; |
|---|
| 8579 | 9631 | } |
|---|
| 8580 | 9632 | |
|---|
| .. | .. |
|---|
| 8619 | 9671 | INIT_LIST_HEAD(&global_trace.systems); |
|---|
| 8620 | 9672 | INIT_LIST_HEAD(&global_trace.events); |
|---|
| 8621 | 9673 | INIT_LIST_HEAD(&global_trace.hist_vars); |
|---|
| 9674 | + INIT_LIST_HEAD(&global_trace.err_log); |
|---|
| 8622 | 9675 | list_add(&global_trace.list, &ftrace_trace_arrays); |
|---|
| 8623 | 9676 | |
|---|
| 8624 | 9677 | apply_trace_boot_options(); |
|---|
| .. | .. |
|---|
| 8646 | 9699 | if (tracepoint_printk) { |
|---|
| 8647 | 9700 | tracepoint_print_iter = |
|---|
| 8648 | 9701 | kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); |
|---|
| 8649 | | - if (WARN_ON(!tracepoint_print_iter)) |
|---|
| 9702 | + if (MEM_FAIL(!tracepoint_print_iter, |
|---|
| 9703 | + "Failed to allocate trace iterator\n")) |
|---|
| 8650 | 9704 | tracepoint_printk = 0; |
|---|
| 8651 | 9705 | else |
|---|
| 8652 | 9706 | static_key_enable(&tracepoint_printk_key.key); |
|---|
| .. | .. |
|---|
| 8686 | 9740 | { |
|---|
| 8687 | 9741 | /* sched_clock_stable() is determined in late_initcall */ |
|---|
| 8688 | 9742 | if (!trace_boot_clock && !sched_clock_stable()) { |
|---|
| 9743 | + if (security_locked_down(LOCKDOWN_TRACEFS)) { |
|---|
| 9744 | + pr_warn("Can not set tracing clock due to lockdown\n"); |
|---|
| 9745 | + return -EPERM; |
|---|
| 9746 | + } |
|---|
| 9747 | + |
|---|
| 8689 | 9748 | printk(KERN_WARNING |
|---|
| 8690 | 9749 | "Unstable clock detected, switching default tracing clock to \"global\"\n" |
|---|
| 8691 | 9750 | "If you want to keep using the local clock, then add:\n" |
|---|