| .. | .. | 
|---|
| 17 | 17 |  #include <linux/stacktrace.h> | 
|---|
| 18 | 18 |  #include <linux/writeback.h> | 
|---|
| 19 | 19 |  #include <linux/kallsyms.h> | 
|---|
 | 20 | +#include <linux/security.h>  | 
|---|
| 20 | 21 |  #include <linux/seq_file.h> | 
|---|
| 21 | 22 |  #include <linux/notifier.h> | 
|---|
| 22 | 23 |  #include <linux/irqflags.h> | 
|---|
| .. | .. | 
|---|
| 44 | 45 |  #include <linux/trace.h> | 
|---|
| 45 | 46 |  #include <linux/sched/clock.h> | 
|---|
| 46 | 47 |  #include <linux/sched/rt.h> | 
|---|
 | 48 | +#include <linux/fsnotify.h>  | 
|---|
 | 49 | +#include <linux/irq_work.h>  | 
|---|
 | 50 | +#include <linux/workqueue.h>  | 
|---|
 | 51 | +#include <trace/hooks/ftrace_dump.h>  | 
|---|
| 47 | 52 |   | 
|---|
| 48 | 53 |  #include "trace.h" | 
|---|
| 49 | 54 |  #include "trace_output.h" | 
|---|
| .. | .. | 
|---|
| 64 | 69 |  static bool __read_mostly tracing_selftest_running; | 
|---|
| 65 | 70 |   | 
|---|
| 66 | 71 |  /* | 
|---|
| 67 |  | - * If a tracer is running, we do not want to run SELFTEST.  | 
|---|
 | 72 | + * If boot-time tracing including tracers/events via kernel cmdline  | 
|---|
 | 73 | + * is running, we do not want to run SELFTEST.  | 
|---|
| 68 | 74 |   */ | 
|---|
| 69 | 75 |  bool __read_mostly tracing_selftest_disabled; | 
|---|
 | 76 | +  | 
|---|
 | 77 | +#ifdef CONFIG_FTRACE_STARTUP_TEST  | 
|---|
 | 78 | +void __init disable_tracing_selftest(const char *reason)  | 
|---|
 | 79 | +{  | 
|---|
 | 80 | +	if (!tracing_selftest_disabled) {  | 
|---|
 | 81 | +		tracing_selftest_disabled = true;  | 
|---|
 | 82 | +		pr_info("Ftrace startup test is disabled due to %s\n", reason);  | 
|---|
 | 83 | +	}  | 
|---|
 | 84 | +}  | 
|---|
 | 85 | +#endif  | 
|---|
| 70 | 86 |   | 
|---|
| 71 | 87 |  /* Pipe tracepoints to printk */ | 
|---|
| 72 | 88 |  struct trace_iterator *tracepoint_print_iter; | 
|---|
| .. | .. | 
|---|
| 158 | 174 |  static union trace_eval_map_item *trace_eval_maps; | 
|---|
| 159 | 175 |  #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ | 
|---|
| 160 | 176 |   | 
|---|
| 161 |  | -static int tracing_set_tracer(struct trace_array *tr, const char *buf);  | 
|---|
 | 177 | +int tracing_set_tracer(struct trace_array *tr, const char *buf);  | 
|---|
 | 178 | +static void ftrace_trace_userstack(struct trace_array *tr,  | 
|---|
 | 179 | +				   struct trace_buffer *buffer,  | 
|---|
 | 180 | +				   unsigned int trace_ctx);  | 
|---|
| 162 | 181 |   | 
|---|
| 163 | 182 |  #define MAX_TRACER_SIZE		100 | 
|---|
| 164 | 183 |  static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; | 
|---|
| .. | .. | 
|---|
| 215 | 234 |  static int __init set_trace_boot_options(char *str) | 
|---|
| 216 | 235 |  { | 
|---|
| 217 | 236 |  	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); | 
|---|
| 218 |  | -	return 0;  | 
|---|
 | 237 | +	return 1;  | 
|---|
| 219 | 238 |  } | 
|---|
| 220 | 239 |  __setup("trace_options=", set_trace_boot_options); | 
|---|
| 221 | 240 |   | 
|---|
| .. | .. | 
|---|
| 226 | 245 |  { | 
|---|
| 227 | 246 |  	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); | 
|---|
| 228 | 247 |  	trace_boot_clock = trace_boot_clock_buf; | 
|---|
| 229 |  | -	return 0;  | 
|---|
 | 248 | +	return 1;  | 
|---|
| 230 | 249 |  } | 
|---|
| 231 | 250 |  __setup("trace_clock=", set_trace_boot_clock); | 
|---|
| 232 | 251 |   | 
|---|
| .. | .. | 
|---|
| 248 | 267 |  	do_div(nsec, 1000); | 
|---|
| 249 | 268 |  	return nsec; | 
|---|
| 250 | 269 |  } | 
|---|
 | 270 | +  | 
|---|
 | 271 | +static void  | 
|---|
 | 272 | +trace_process_export(struct trace_export *export,  | 
|---|
 | 273 | +	       struct ring_buffer_event *event, int flag)  | 
|---|
 | 274 | +{  | 
|---|
 | 275 | +	struct trace_entry *entry;  | 
|---|
 | 276 | +	unsigned int size = 0;  | 
|---|
 | 277 | +  | 
|---|
 | 278 | +	if (export->flags & flag) {  | 
|---|
 | 279 | +		entry = ring_buffer_event_data(event);  | 
|---|
 | 280 | +		size = ring_buffer_event_length(event);  | 
|---|
 | 281 | +		export->write(export, entry, size);  | 
|---|
 | 282 | +	}  | 
|---|
 | 283 | +}  | 
|---|
 | 284 | +  | 
|---|
 | 285 | +static DEFINE_MUTEX(ftrace_export_lock);  | 
|---|
 | 286 | +  | 
|---|
 | 287 | +static struct trace_export __rcu *ftrace_exports_list __read_mostly;  | 
|---|
 | 288 | +  | 
|---|
 | 289 | +static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);  | 
|---|
 | 290 | +static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);  | 
|---|
 | 291 | +static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);  | 
|---|
 | 292 | +  | 
|---|
 | 293 | +static inline void ftrace_exports_enable(struct trace_export *export)  | 
|---|
 | 294 | +{  | 
|---|
 | 295 | +	if (export->flags & TRACE_EXPORT_FUNCTION)  | 
|---|
 | 296 | +		static_branch_inc(&trace_function_exports_enabled);  | 
|---|
 | 297 | +  | 
|---|
 | 298 | +	if (export->flags & TRACE_EXPORT_EVENT)  | 
|---|
 | 299 | +		static_branch_inc(&trace_event_exports_enabled);  | 
|---|
 | 300 | +  | 
|---|
 | 301 | +	if (export->flags & TRACE_EXPORT_MARKER)  | 
|---|
 | 302 | +		static_branch_inc(&trace_marker_exports_enabled);  | 
|---|
 | 303 | +}  | 
|---|
 | 304 | +  | 
|---|
 | 305 | +static inline void ftrace_exports_disable(struct trace_export *export)  | 
|---|
 | 306 | +{  | 
|---|
 | 307 | +	if (export->flags & TRACE_EXPORT_FUNCTION)  | 
|---|
 | 308 | +		static_branch_dec(&trace_function_exports_enabled);  | 
|---|
 | 309 | +  | 
|---|
 | 310 | +	if (export->flags & TRACE_EXPORT_EVENT)  | 
|---|
 | 311 | +		static_branch_dec(&trace_event_exports_enabled);  | 
|---|
 | 312 | +  | 
|---|
 | 313 | +	if (export->flags & TRACE_EXPORT_MARKER)  | 
|---|
 | 314 | +		static_branch_dec(&trace_marker_exports_enabled);  | 
|---|
 | 315 | +}  | 
|---|
 | 316 | +  | 
|---|
 | 317 | +static void ftrace_exports(struct ring_buffer_event *event, int flag)  | 
|---|
 | 318 | +{  | 
|---|
 | 319 | +	struct trace_export *export;  | 
|---|
 | 320 | +  | 
|---|
 | 321 | +	preempt_disable_notrace();  | 
|---|
 | 322 | +  | 
|---|
 | 323 | +	export = rcu_dereference_raw_check(ftrace_exports_list);  | 
|---|
 | 324 | +	while (export) {  | 
|---|
 | 325 | +		trace_process_export(export, event, flag);  | 
|---|
 | 326 | +		export = rcu_dereference_raw_check(export->next);  | 
|---|
 | 327 | +	}  | 
|---|
 | 328 | +  | 
|---|
 | 329 | +	preempt_enable_notrace();  | 
|---|
 | 330 | +}  | 
|---|
 | 331 | +  | 
|---|
 | 332 | +static inline void  | 
|---|
 | 333 | +add_trace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
 | 334 | +{  | 
|---|
 | 335 | +	rcu_assign_pointer(export->next, *list);  | 
|---|
 | 336 | +	/*  | 
|---|
 | 337 | +	 * We are entering export into the list but another  | 
|---|
 | 338 | +	 * CPU might be walking that list. We need to make sure  | 
|---|
 | 339 | +	 * the export->next pointer is valid before another CPU sees  | 
|---|
 | 340 | +	 * the export pointer included into the list.  | 
|---|
 | 341 | +	 */  | 
|---|
 | 342 | +	rcu_assign_pointer(*list, export);  | 
|---|
 | 343 | +}  | 
|---|
 | 344 | +  | 
|---|
 | 345 | +static inline int  | 
|---|
 | 346 | +rm_trace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
 | 347 | +{  | 
|---|
 | 348 | +	struct trace_export **p;  | 
|---|
 | 349 | +  | 
|---|
 | 350 | +	for (p = list; *p != NULL; p = &(*p)->next)  | 
|---|
 | 351 | +		if (*p == export)  | 
|---|
 | 352 | +			break;  | 
|---|
 | 353 | +  | 
|---|
 | 354 | +	if (*p != export)  | 
|---|
 | 355 | +		return -1;  | 
|---|
 | 356 | +  | 
|---|
 | 357 | +	rcu_assign_pointer(*p, (*p)->next);  | 
|---|
 | 358 | +  | 
|---|
 | 359 | +	return 0;  | 
|---|
 | 360 | +}  | 
|---|
 | 361 | +  | 
|---|
 | 362 | +static inline void  | 
|---|
 | 363 | +add_ftrace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
 | 364 | +{  | 
|---|
 | 365 | +	ftrace_exports_enable(export);  | 
|---|
 | 366 | +  | 
|---|
 | 367 | +	add_trace_export(list, export);  | 
|---|
 | 368 | +}  | 
|---|
 | 369 | +  | 
|---|
 | 370 | +static inline int  | 
|---|
 | 371 | +rm_ftrace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
 | 372 | +{  | 
|---|
 | 373 | +	int ret;  | 
|---|
 | 374 | +  | 
|---|
 | 375 | +	ret = rm_trace_export(list, export);  | 
|---|
 | 376 | +	ftrace_exports_disable(export);  | 
|---|
 | 377 | +  | 
|---|
 | 378 | +	return ret;  | 
|---|
 | 379 | +}  | 
|---|
 | 380 | +  | 
|---|
 | 381 | +int register_ftrace_export(struct trace_export *export)  | 
|---|
 | 382 | +{  | 
|---|
 | 383 | +	if (WARN_ON_ONCE(!export->write))  | 
|---|
 | 384 | +		return -1;  | 
|---|
 | 385 | +  | 
|---|
 | 386 | +	mutex_lock(&ftrace_export_lock);  | 
|---|
 | 387 | +  | 
|---|
 | 388 | +	add_ftrace_export(&ftrace_exports_list, export);  | 
|---|
 | 389 | +  | 
|---|
 | 390 | +	mutex_unlock(&ftrace_export_lock);  | 
|---|
 | 391 | +  | 
|---|
 | 392 | +	return 0;  | 
|---|
 | 393 | +}  | 
|---|
 | 394 | +EXPORT_SYMBOL_GPL(register_ftrace_export);  | 
|---|
 | 395 | +  | 
|---|
 | 396 | +int unregister_ftrace_export(struct trace_export *export)  | 
|---|
 | 397 | +{  | 
|---|
 | 398 | +	int ret;  | 
|---|
 | 399 | +  | 
|---|
 | 400 | +	mutex_lock(&ftrace_export_lock);  | 
|---|
 | 401 | +  | 
|---|
 | 402 | +	ret = rm_ftrace_export(&ftrace_exports_list, export);  | 
|---|
 | 403 | +  | 
|---|
 | 404 | +	mutex_unlock(&ftrace_export_lock);  | 
|---|
 | 405 | +  | 
|---|
 | 406 | +	return ret;  | 
|---|
 | 407 | +}  | 
|---|
 | 408 | +EXPORT_SYMBOL_GPL(unregister_ftrace_export);  | 
|---|
| 251 | 409 |   | 
|---|
| 252 | 410 |  /* trace_flags holds trace_options default values */ | 
|---|
| 253 | 411 |  #define TRACE_DEFAULT_FLAGS						\ | 
|---|
| .. | .. | 
|---|
| 299 | 457 |  	this_tr->ref--; | 
|---|
| 300 | 458 |  } | 
|---|
| 301 | 459 |   | 
|---|
 | 460 | +/**  | 
|---|
 | 461 | + * trace_array_put - Decrement the reference counter for this trace array.  | 
|---|
 | 462 | + *  | 
|---|
 | 463 | + * NOTE: Use this when we no longer need the trace array returned by  | 
|---|
 | 464 | + * trace_array_get_by_name(). This ensures the trace array can be later  | 
|---|
 | 465 | + * destroyed.  | 
|---|
 | 466 | + *  | 
|---|
 | 467 | + */  | 
|---|
| 302 | 468 |  void trace_array_put(struct trace_array *this_tr) | 
|---|
| 303 | 469 |  { | 
|---|
 | 470 | +	if (!this_tr)  | 
|---|
 | 471 | +		return;  | 
|---|
 | 472 | +  | 
|---|
| 304 | 473 |  	mutex_lock(&trace_types_lock); | 
|---|
| 305 | 474 |  	__trace_array_put(this_tr); | 
|---|
| 306 | 475 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 307 | 476 |  } | 
|---|
 | 477 | +EXPORT_SYMBOL_GPL(trace_array_put);  | 
|---|
 | 478 | +  | 
|---|
 | 479 | +int tracing_check_open_get_tr(struct trace_array *tr)  | 
|---|
 | 480 | +{  | 
|---|
 | 481 | +	int ret;  | 
|---|
 | 482 | +  | 
|---|
 | 483 | +	ret = security_locked_down(LOCKDOWN_TRACEFS);  | 
|---|
 | 484 | +	if (ret)  | 
|---|
 | 485 | +		return ret;  | 
|---|
 | 486 | +  | 
|---|
 | 487 | +	if (tracing_disabled)  | 
|---|
 | 488 | +		return -ENODEV;  | 
|---|
 | 489 | +  | 
|---|
 | 490 | +	if (tr && trace_array_get(tr) < 0)  | 
|---|
 | 491 | +		return -ENODEV;  | 
|---|
 | 492 | +  | 
|---|
 | 493 | +	return 0;  | 
|---|
 | 494 | +}  | 
|---|
| 308 | 495 |   | 
|---|
| 309 | 496 |  int call_filter_check_discard(struct trace_event_call *call, void *rec, | 
|---|
| 310 |  | -			      struct ring_buffer *buffer,  | 
|---|
 | 497 | +			      struct trace_buffer *buffer,  | 
|---|
| 311 | 498 |  			      struct ring_buffer_event *event) | 
|---|
| 312 | 499 |  { | 
|---|
| 313 | 500 |  	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && | 
|---|
| .. | .. | 
|---|
| 355 | 542 |   * Returns false if @task should be traced. | 
|---|
| 356 | 543 |   */ | 
|---|
| 357 | 544 |  bool | 
|---|
| 358 |  | -trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)  | 
|---|
 | 545 | +trace_ignore_this_task(struct trace_pid_list *filtered_pids,  | 
|---|
 | 546 | +		       struct trace_pid_list *filtered_no_pids,  | 
|---|
 | 547 | +		       struct task_struct *task)  | 
|---|
| 359 | 548 |  { | 
|---|
| 360 | 549 |  	/* | 
|---|
| 361 |  | -	 * Return false, because if filtered_pids does not exist,  | 
|---|
| 362 |  | -	 * all pids are good to trace.  | 
|---|
 | 550 | +	 * If filterd_no_pids is not empty, and the task's pid is listed  | 
|---|
 | 551 | +	 * in filtered_no_pids, then return true.  | 
|---|
 | 552 | +	 * Otherwise, if filtered_pids is empty, that means we can  | 
|---|
 | 553 | +	 * trace all tasks. If it has content, then only trace pids  | 
|---|
 | 554 | +	 * within filtered_pids.  | 
|---|
| 363 | 555 |  	 */ | 
|---|
| 364 |  | -	if (!filtered_pids)  | 
|---|
| 365 |  | -		return false;  | 
|---|
| 366 | 556 |   | 
|---|
| 367 |  | -	return !trace_find_filtered_pid(filtered_pids, task->pid);  | 
|---|
 | 557 | +	return (filtered_pids &&  | 
|---|
 | 558 | +		!trace_find_filtered_pid(filtered_pids, task->pid)) ||  | 
|---|
 | 559 | +		(filtered_no_pids &&  | 
|---|
 | 560 | +		 trace_find_filtered_pid(filtered_no_pids, task->pid));  | 
|---|
| 368 | 561 |  } | 
|---|
| 369 | 562 |   | 
|---|
| 370 | 563 |  /** | 
|---|
| 371 |  | - * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list  | 
|---|
 | 564 | + * trace_filter_add_remove_task - Add or remove a task from a pid_list  | 
|---|
| 372 | 565 |   * @pid_list: The list to modify | 
|---|
| 373 | 566 |   * @self: The current task for fork or NULL for exit | 
|---|
| 374 | 567 |   * @task: The task to add or remove | 
|---|
| .. | .. | 
|---|
| 572 | 765 |  	return read; | 
|---|
| 573 | 766 |  } | 
|---|
| 574 | 767 |   | 
|---|
| 575 |  | -static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)  | 
|---|
 | 768 | +static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)  | 
|---|
| 576 | 769 |  { | 
|---|
| 577 | 770 |  	u64 ts; | 
|---|
| 578 | 771 |   | 
|---|
| .. | .. | 
|---|
| 588 | 781 |   | 
|---|
| 589 | 782 |  u64 ftrace_now(int cpu) | 
|---|
| 590 | 783 |  { | 
|---|
| 591 |  | -	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);  | 
|---|
 | 784 | +	return buffer_ftrace_now(&global_trace.array_buffer, cpu);  | 
|---|
| 592 | 785 |  } | 
|---|
| 593 | 786 |   | 
|---|
| 594 | 787 |  /** | 
|---|
| .. | .. | 
|---|
| 716 | 909 |  #endif | 
|---|
| 717 | 910 |   | 
|---|
| 718 | 911 |  #ifdef CONFIG_STACKTRACE | 
|---|
| 719 |  | -static void __ftrace_trace_stack(struct ring_buffer *buffer,  | 
|---|
| 720 |  | -				 unsigned long flags,  | 
|---|
| 721 |  | -				 int skip, int pc, struct pt_regs *regs);  | 
|---|
 | 912 | +static void __ftrace_trace_stack(struct trace_buffer *buffer,  | 
|---|
 | 913 | +				 unsigned int trace_ctx,  | 
|---|
 | 914 | +				 int skip, struct pt_regs *regs);  | 
|---|
| 722 | 915 |  static inline void ftrace_trace_stack(struct trace_array *tr, | 
|---|
| 723 |  | -				      struct ring_buffer *buffer,  | 
|---|
| 724 |  | -				      unsigned long flags,  | 
|---|
| 725 |  | -				      int skip, int pc, struct pt_regs *regs);  | 
|---|
 | 916 | +				      struct trace_buffer *buffer,  | 
|---|
 | 917 | +				      unsigned int trace_ctx,  | 
|---|
 | 918 | +				      int skip, struct pt_regs *regs);  | 
|---|
| 726 | 919 |   | 
|---|
| 727 | 920 |  #else | 
|---|
| 728 |  | -static inline void __ftrace_trace_stack(struct ring_buffer *buffer,  | 
|---|
| 729 |  | -					unsigned long flags,  | 
|---|
| 730 |  | -					int skip, int pc, struct pt_regs *regs)  | 
|---|
 | 921 | +static inline void __ftrace_trace_stack(struct trace_buffer *buffer,  | 
|---|
 | 922 | +					unsigned int trace_ctx,  | 
|---|
 | 923 | +					int skip, struct pt_regs *regs)  | 
|---|
| 731 | 924 |  { | 
|---|
| 732 | 925 |  } | 
|---|
| 733 | 926 |  static inline void ftrace_trace_stack(struct trace_array *tr, | 
|---|
| 734 |  | -				      struct ring_buffer *buffer,  | 
|---|
| 735 |  | -				      unsigned long flags,  | 
|---|
| 736 |  | -				      int skip, int pc, struct pt_regs *regs)  | 
|---|
 | 927 | +				      struct trace_buffer *buffer,  | 
|---|
 | 928 | +				      unsigned long trace_ctx,  | 
|---|
 | 929 | +				      int skip, struct pt_regs *regs)  | 
|---|
| 737 | 930 |  { | 
|---|
| 738 | 931 |  } | 
|---|
| 739 | 932 |   | 
|---|
| .. | .. | 
|---|
| 741 | 934 |   | 
|---|
| 742 | 935 |  static __always_inline void | 
|---|
| 743 | 936 |  trace_event_setup(struct ring_buffer_event *event, | 
|---|
| 744 |  | -		  int type, unsigned long flags, int pc)  | 
|---|
 | 937 | +		  int type, unsigned int trace_ctx)  | 
|---|
| 745 | 938 |  { | 
|---|
| 746 | 939 |  	struct trace_entry *ent = ring_buffer_event_data(event); | 
|---|
| 747 | 940 |   | 
|---|
| 748 |  | -	tracing_generic_entry_update(ent, flags, pc);  | 
|---|
| 749 |  | -	ent->type = type;  | 
|---|
 | 941 | +	tracing_generic_entry_update(ent, type, trace_ctx);  | 
|---|
| 750 | 942 |  } | 
|---|
| 751 | 943 |   | 
|---|
| 752 | 944 |  static __always_inline struct ring_buffer_event * | 
|---|
| 753 |  | -__trace_buffer_lock_reserve(struct ring_buffer *buffer,  | 
|---|
 | 945 | +__trace_buffer_lock_reserve(struct trace_buffer *buffer,  | 
|---|
| 754 | 946 |  			  int type, | 
|---|
| 755 | 947 |  			  unsigned long len, | 
|---|
| 756 |  | -			  unsigned long flags, int pc)  | 
|---|
 | 948 | +			  unsigned int trace_ctx)  | 
|---|
| 757 | 949 |  { | 
|---|
| 758 | 950 |  	struct ring_buffer_event *event; | 
|---|
| 759 | 951 |   | 
|---|
| 760 | 952 |  	event = ring_buffer_lock_reserve(buffer, len); | 
|---|
| 761 | 953 |  	if (event != NULL) | 
|---|
| 762 |  | -		trace_event_setup(event, type, flags, pc);  | 
|---|
 | 954 | +		trace_event_setup(event, type, trace_ctx);  | 
|---|
| 763 | 955 |   | 
|---|
| 764 | 956 |  	return event; | 
|---|
| 765 | 957 |  } | 
|---|
| 766 | 958 |   | 
|---|
| 767 | 959 |  void tracer_tracing_on(struct trace_array *tr) | 
|---|
| 768 | 960 |  { | 
|---|
| 769 |  | -	if (tr->trace_buffer.buffer)  | 
|---|
| 770 |  | -		ring_buffer_record_on(tr->trace_buffer.buffer);  | 
|---|
 | 961 | +	if (tr->array_buffer.buffer)  | 
|---|
 | 962 | +		ring_buffer_record_on(tr->array_buffer.buffer);  | 
|---|
| 771 | 963 |  	/* | 
|---|
| 772 | 964 |  	 * This flag is looked at when buffers haven't been allocated | 
|---|
| 773 | 965 |  	 * yet, or by some tracers (like irqsoff), that just want to | 
|---|
| .. | .. | 
|---|
| 795 | 987 |   | 
|---|
| 796 | 988 |   | 
|---|
| 797 | 989 |  static __always_inline void | 
|---|
| 798 |  | -__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)  | 
|---|
 | 990 | +__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)  | 
|---|
| 799 | 991 |  { | 
|---|
| 800 | 992 |  	__this_cpu_write(trace_taskinfo_save, true); | 
|---|
| 801 | 993 |   | 
|---|
| .. | .. | 
|---|
| 818 | 1010 |  int __trace_puts(unsigned long ip, const char *str, int size) | 
|---|
| 819 | 1011 |  { | 
|---|
| 820 | 1012 |  	struct ring_buffer_event *event; | 
|---|
| 821 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 1013 | +	struct trace_buffer *buffer;  | 
|---|
| 822 | 1014 |  	struct print_entry *entry; | 
|---|
| 823 |  | -	unsigned long irq_flags;  | 
|---|
 | 1015 | +	unsigned int trace_ctx;  | 
|---|
| 824 | 1016 |  	int alloc; | 
|---|
| 825 |  | -	int pc;  | 
|---|
| 826 | 1017 |   | 
|---|
| 827 | 1018 |  	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) | 
|---|
| 828 | 1019 |  		return 0; | 
|---|
| 829 |  | -  | 
|---|
| 830 |  | -	pc = preempt_count();  | 
|---|
| 831 | 1020 |   | 
|---|
| 832 | 1021 |  	if (unlikely(tracing_selftest_running || tracing_disabled)) | 
|---|
| 833 | 1022 |  		return 0; | 
|---|
| 834 | 1023 |   | 
|---|
| 835 | 1024 |  	alloc = sizeof(*entry) + size + 2; /* possible \n added */ | 
|---|
| 836 | 1025 |   | 
|---|
| 837 |  | -	local_save_flags(irq_flags);  | 
|---|
| 838 |  | -	buffer = global_trace.trace_buffer.buffer;  | 
|---|
| 839 |  | -	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,   | 
|---|
| 840 |  | -					    irq_flags, pc);  | 
|---|
| 841 |  | -	if (!event)  | 
|---|
| 842 |  | -		return 0;  | 
|---|
 | 1026 | +	trace_ctx = tracing_gen_ctx();  | 
|---|
 | 1027 | +	buffer = global_trace.array_buffer.buffer;  | 
|---|
 | 1028 | +	ring_buffer_nest_start(buffer);  | 
|---|
 | 1029 | +	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,  | 
|---|
 | 1030 | +					    trace_ctx);  | 
|---|
 | 1031 | +	if (!event) {  | 
|---|
 | 1032 | +		size = 0;  | 
|---|
 | 1033 | +		goto out;  | 
|---|
 | 1034 | +	}  | 
|---|
| 843 | 1035 |   | 
|---|
| 844 | 1036 |  	entry = ring_buffer_event_data(event); | 
|---|
| 845 | 1037 |  	entry->ip = ip; | 
|---|
| .. | .. | 
|---|
| 854 | 1046 |  		entry->buf[size] = '\0'; | 
|---|
| 855 | 1047 |   | 
|---|
| 856 | 1048 |  	__buffer_unlock_commit(buffer, event); | 
|---|
| 857 |  | -	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);  | 
|---|
| 858 |  | -  | 
|---|
 | 1049 | +	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);  | 
|---|
 | 1050 | + out:  | 
|---|
 | 1051 | +	ring_buffer_nest_end(buffer);  | 
|---|
| 859 | 1052 |  	return size; | 
|---|
| 860 | 1053 |  } | 
|---|
| 861 | 1054 |  EXPORT_SYMBOL_GPL(__trace_puts); | 
|---|
| .. | .. | 
|---|
| 868 | 1061 |  int __trace_bputs(unsigned long ip, const char *str) | 
|---|
| 869 | 1062 |  { | 
|---|
| 870 | 1063 |  	struct ring_buffer_event *event; | 
|---|
| 871 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 1064 | +	struct trace_buffer *buffer;  | 
|---|
| 872 | 1065 |  	struct bputs_entry *entry; | 
|---|
| 873 |  | -	unsigned long irq_flags;  | 
|---|
 | 1066 | +	unsigned int trace_ctx;  | 
|---|
| 874 | 1067 |  	int size = sizeof(struct bputs_entry); | 
|---|
| 875 |  | -	int pc;  | 
|---|
 | 1068 | +	int ret = 0;  | 
|---|
| 876 | 1069 |   | 
|---|
| 877 | 1070 |  	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) | 
|---|
| 878 | 1071 |  		return 0; | 
|---|
| 879 | 1072 |   | 
|---|
| 880 |  | -	pc = preempt_count();  | 
|---|
| 881 |  | -  | 
|---|
| 882 | 1073 |  	if (unlikely(tracing_selftest_running || tracing_disabled)) | 
|---|
| 883 | 1074 |  		return 0; | 
|---|
| 884 | 1075 |   | 
|---|
| 885 |  | -	local_save_flags(irq_flags);  | 
|---|
| 886 |  | -	buffer = global_trace.trace_buffer.buffer;  | 
|---|
 | 1076 | +	trace_ctx = tracing_gen_ctx();  | 
|---|
 | 1077 | +	buffer = global_trace.array_buffer.buffer;  | 
|---|
 | 1078 | +  | 
|---|
 | 1079 | +	ring_buffer_nest_start(buffer);  | 
|---|
| 887 | 1080 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, | 
|---|
| 888 |  | -					    irq_flags, pc);  | 
|---|
 | 1081 | +					    trace_ctx);  | 
|---|
| 889 | 1082 |  	if (!event) | 
|---|
| 890 |  | -		return 0;  | 
|---|
 | 1083 | +		goto out;  | 
|---|
| 891 | 1084 |   | 
|---|
| 892 | 1085 |  	entry = ring_buffer_event_data(event); | 
|---|
| 893 | 1086 |  	entry->ip			= ip; | 
|---|
| 894 | 1087 |  	entry->str			= str; | 
|---|
| 895 | 1088 |   | 
|---|
| 896 | 1089 |  	__buffer_unlock_commit(buffer, event); | 
|---|
| 897 |  | -	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);  | 
|---|
 | 1090 | +	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);  | 
|---|
| 898 | 1091 |   | 
|---|
| 899 |  | -	return 1;  | 
|---|
 | 1092 | +	ret = 1;  | 
|---|
 | 1093 | + out:  | 
|---|
 | 1094 | +	ring_buffer_nest_end(buffer);  | 
|---|
 | 1095 | +	return ret;  | 
|---|
| 900 | 1096 |  } | 
|---|
| 901 | 1097 |  EXPORT_SYMBOL_GPL(__trace_bputs); | 
|---|
| 902 | 1098 |   | 
|---|
| 903 | 1099 |  #ifdef CONFIG_TRACER_SNAPSHOT | 
|---|
| 904 |  | -void tracing_snapshot_instance(struct trace_array *tr)  | 
|---|
 | 1100 | +static void tracing_snapshot_instance_cond(struct trace_array *tr,  | 
|---|
 | 1101 | +					   void *cond_data)  | 
|---|
| 905 | 1102 |  { | 
|---|
| 906 | 1103 |  	struct tracer *tracer = tr->current_trace; | 
|---|
| 907 | 1104 |  	unsigned long flags; | 
|---|
| .. | .. | 
|---|
| 927 | 1124 |  	} | 
|---|
| 928 | 1125 |   | 
|---|
| 929 | 1126 |  	local_irq_save(flags); | 
|---|
| 930 |  | -	update_max_tr(tr, current, smp_processor_id());  | 
|---|
 | 1127 | +	update_max_tr(tr, current, smp_processor_id(), cond_data);  | 
|---|
| 931 | 1128 |  	local_irq_restore(flags); | 
|---|
 | 1129 | +}  | 
|---|
 | 1130 | +  | 
|---|
 | 1131 | +void tracing_snapshot_instance(struct trace_array *tr)  | 
|---|
 | 1132 | +{  | 
|---|
 | 1133 | +	tracing_snapshot_instance_cond(tr, NULL);  | 
|---|
| 932 | 1134 |  } | 
|---|
| 933 | 1135 |   | 
|---|
| 934 | 1136 |  /** | 
|---|
| .. | .. | 
|---|
| 953 | 1155 |  } | 
|---|
| 954 | 1156 |  EXPORT_SYMBOL_GPL(tracing_snapshot); | 
|---|
| 955 | 1157 |   | 
|---|
| 956 |  | -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,  | 
|---|
| 957 |  | -					struct trace_buffer *size_buf, int cpu_id);  | 
|---|
| 958 |  | -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);  | 
|---|
 | 1158 | +/**  | 
|---|
 | 1159 | + * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.  | 
|---|
 | 1160 | + * @tr:		The tracing instance to snapshot  | 
|---|
 | 1161 | + * @cond_data:	The data to be tested conditionally, and possibly saved  | 
|---|
 | 1162 | + *  | 
|---|
 | 1163 | + * This is the same as tracing_snapshot() except that the snapshot is  | 
|---|
 | 1164 | + * conditional - the snapshot will only happen if the  | 
|---|
 | 1165 | + * cond_snapshot.update() implementation receiving the cond_data  | 
|---|
 | 1166 | + * returns true, which means that the trace array's cond_snapshot  | 
|---|
 | 1167 | + * update() operation used the cond_data to determine whether the  | 
|---|
 | 1168 | + * snapshot should be taken, and if it was, presumably saved it along  | 
|---|
 | 1169 | + * with the snapshot.  | 
|---|
 | 1170 | + */  | 
|---|
 | 1171 | +void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)  | 
|---|
 | 1172 | +{  | 
|---|
 | 1173 | +	tracing_snapshot_instance_cond(tr, cond_data);  | 
|---|
 | 1174 | +}  | 
|---|
 | 1175 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond);  | 
|---|
 | 1176 | +  | 
|---|
 | 1177 | +/**  | 
|---|
 | 1178 | + * tracing_snapshot_cond_data - get the user data associated with a snapshot  | 
|---|
 | 1179 | + * @tr:		The tracing instance  | 
|---|
 | 1180 | + *  | 
|---|
 | 1181 | + * When the user enables a conditional snapshot using  | 
|---|
 | 1182 | + * tracing_snapshot_cond_enable(), the user-defined cond_data is saved  | 
|---|
 | 1183 | + * with the snapshot.  This accessor is used to retrieve it.  | 
|---|
 | 1184 | + *  | 
|---|
 | 1185 | + * Should not be called from cond_snapshot.update(), since it takes  | 
|---|
 | 1186 | + * the tr->max_lock lock, which the code calling  | 
|---|
 | 1187 | + * cond_snapshot.update() has already done.  | 
|---|
 | 1188 | + *  | 
|---|
 | 1189 | + * Returns the cond_data associated with the trace array's snapshot.  | 
|---|
 | 1190 | + */  | 
|---|
 | 1191 | +void *tracing_cond_snapshot_data(struct trace_array *tr)  | 
|---|
 | 1192 | +{  | 
|---|
 | 1193 | +	void *cond_data = NULL;  | 
|---|
 | 1194 | +  | 
|---|
 | 1195 | +	local_irq_disable();  | 
|---|
 | 1196 | +	arch_spin_lock(&tr->max_lock);  | 
|---|
 | 1197 | +  | 
|---|
 | 1198 | +	if (tr->cond_snapshot)  | 
|---|
 | 1199 | +		cond_data = tr->cond_snapshot->cond_data;  | 
|---|
 | 1200 | +  | 
|---|
 | 1201 | +	arch_spin_unlock(&tr->max_lock);  | 
|---|
 | 1202 | +	local_irq_enable();  | 
|---|
 | 1203 | +  | 
|---|
 | 1204 | +	return cond_data;  | 
|---|
 | 1205 | +}  | 
|---|
 | 1206 | +EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);  | 
|---|
 | 1207 | +  | 
|---|
 | 1208 | +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,  | 
|---|
 | 1209 | +					struct array_buffer *size_buf, int cpu_id);  | 
|---|
 | 1210 | +static void set_buffer_entries(struct array_buffer *buf, unsigned long val);  | 
|---|
| 959 | 1211 |   | 
|---|
| 960 | 1212 |  int tracing_alloc_snapshot_instance(struct trace_array *tr) | 
|---|
| 961 | 1213 |  { | 
|---|
| .. | .. | 
|---|
| 965 | 1217 |   | 
|---|
| 966 | 1218 |  		/* allocate spare buffer */ | 
|---|
| 967 | 1219 |  		ret = resize_buffer_duplicate_size(&tr->max_buffer, | 
|---|
| 968 |  | -				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);  | 
|---|
 | 1220 | +				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);  | 
|---|
| 969 | 1221 |  		if (ret < 0) | 
|---|
| 970 | 1222 |  			return ret; | 
|---|
| 971 | 1223 |   | 
|---|
| .. | .. | 
|---|
| 1032 | 1284 |  	tracing_snapshot(); | 
|---|
| 1033 | 1285 |  } | 
|---|
| 1034 | 1286 |  EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | 
|---|
 | 1287 | +  | 
|---|
 | 1288 | +/**  | 
|---|
 | 1289 | + * tracing_snapshot_cond_enable - enable conditional snapshot for an instance  | 
|---|
 | 1290 | + * @tr:		The tracing instance  | 
|---|
 | 1291 | + * @cond_data:	User data to associate with the snapshot  | 
|---|
 | 1292 | + * @update:	Implementation of the cond_snapshot update function  | 
|---|
 | 1293 | + *  | 
|---|
 | 1294 | + * Check whether the conditional snapshot for the given instance has  | 
|---|
 | 1295 | + * already been enabled, or if the current tracer is already using a  | 
|---|
 | 1296 | + * snapshot; if so, return -EBUSY, else create a cond_snapshot and  | 
|---|
 | 1297 | + * save the cond_data and update function inside.  | 
|---|
 | 1298 | + *  | 
|---|
 | 1299 | + * Returns 0 if successful, error otherwise.  | 
|---|
 | 1300 | + */  | 
|---|
 | 1301 | +int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,  | 
|---|
 | 1302 | +				 cond_update_fn_t update)  | 
|---|
 | 1303 | +{  | 
|---|
 | 1304 | +	struct cond_snapshot *cond_snapshot;  | 
|---|
 | 1305 | +	int ret = 0;  | 
|---|
 | 1306 | +  | 
|---|
 | 1307 | +	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);  | 
|---|
 | 1308 | +	if (!cond_snapshot)  | 
|---|
 | 1309 | +		return -ENOMEM;  | 
|---|
 | 1310 | +  | 
|---|
 | 1311 | +	cond_snapshot->cond_data = cond_data;  | 
|---|
 | 1312 | +	cond_snapshot->update = update;  | 
|---|
 | 1313 | +  | 
|---|
 | 1314 | +	mutex_lock(&trace_types_lock);  | 
|---|
 | 1315 | +  | 
|---|
 | 1316 | +	ret = tracing_alloc_snapshot_instance(tr);  | 
|---|
 | 1317 | +	if (ret)  | 
|---|
 | 1318 | +		goto fail_unlock;  | 
|---|
 | 1319 | +  | 
|---|
 | 1320 | +	if (tr->current_trace->use_max_tr) {  | 
|---|
 | 1321 | +		ret = -EBUSY;  | 
|---|
 | 1322 | +		goto fail_unlock;  | 
|---|
 | 1323 | +	}  | 
|---|
 | 1324 | +  | 
|---|
 | 1325 | +	/*  | 
|---|
 | 1326 | +	 * The cond_snapshot can only change to NULL without the  | 
|---|
 | 1327 | +	 * trace_types_lock. We don't care if we race with it going  | 
|---|
 | 1328 | +	 * to NULL, but we want to make sure that it's not set to  | 
|---|
 | 1329 | +	 * something other than NULL when we get here, which we can  | 
|---|
 | 1330 | +	 * do safely with only holding the trace_types_lock and not  | 
|---|
 | 1331 | +	 * having to take the max_lock.  | 
|---|
 | 1332 | +	 */  | 
|---|
 | 1333 | +	if (tr->cond_snapshot) {  | 
|---|
 | 1334 | +		ret = -EBUSY;  | 
|---|
 | 1335 | +		goto fail_unlock;  | 
|---|
 | 1336 | +	}  | 
|---|
 | 1337 | +  | 
|---|
 | 1338 | +	local_irq_disable();  | 
|---|
 | 1339 | +	arch_spin_lock(&tr->max_lock);  | 
|---|
 | 1340 | +	tr->cond_snapshot = cond_snapshot;  | 
|---|
 | 1341 | +	arch_spin_unlock(&tr->max_lock);  | 
|---|
 | 1342 | +	local_irq_enable();  | 
|---|
 | 1343 | +  | 
|---|
 | 1344 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 1345 | +  | 
|---|
 | 1346 | +	return ret;  | 
|---|
 | 1347 | +  | 
|---|
 | 1348 | + fail_unlock:  | 
|---|
 | 1349 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 1350 | +	kfree(cond_snapshot);  | 
|---|
 | 1351 | +	return ret;  | 
|---|
 | 1352 | +}  | 
|---|
 | 1353 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);  | 
|---|
 | 1354 | +  | 
|---|
 | 1355 | +/**  | 
|---|
 | 1356 | + * tracing_snapshot_cond_disable - disable conditional snapshot for an instance  | 
|---|
 | 1357 | + * @tr:		The tracing instance  | 
|---|
 | 1358 | + *  | 
|---|
 | 1359 | + * Check whether the conditional snapshot for the given instance is  | 
|---|
 | 1360 | + * enabled; if so, free the cond_snapshot associated with it,  | 
|---|
 | 1361 | + * otherwise return -EINVAL.  | 
|---|
 | 1362 | + *  | 
|---|
 | 1363 | + * Returns 0 if successful, error otherwise.  | 
|---|
 | 1364 | + */  | 
|---|
 | 1365 | +int tracing_snapshot_cond_disable(struct trace_array *tr)  | 
|---|
 | 1366 | +{  | 
|---|
 | 1367 | +	int ret = 0;  | 
|---|
 | 1368 | +  | 
|---|
 | 1369 | +	local_irq_disable();  | 
|---|
 | 1370 | +	arch_spin_lock(&tr->max_lock);  | 
|---|
 | 1371 | +  | 
|---|
 | 1372 | +	if (!tr->cond_snapshot)  | 
|---|
 | 1373 | +		ret = -EINVAL;  | 
|---|
 | 1374 | +	else {  | 
|---|
 | 1375 | +		kfree(tr->cond_snapshot);  | 
|---|
 | 1376 | +		tr->cond_snapshot = NULL;  | 
|---|
 | 1377 | +	}  | 
|---|
 | 1378 | +  | 
|---|
 | 1379 | +	arch_spin_unlock(&tr->max_lock);  | 
|---|
 | 1380 | +	local_irq_enable();  | 
|---|
 | 1381 | +  | 
|---|
 | 1382 | +	return ret;  | 
|---|
 | 1383 | +}  | 
|---|
 | 1384 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);  | 
|---|
| 1035 | 1385 |  #else | 
|---|
| 1036 | 1386 |  void tracing_snapshot(void) | 
|---|
| 1037 | 1387 |  { | 
|---|
| 1038 | 1388 |  	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); | 
|---|
| 1039 | 1389 |  } | 
|---|
| 1040 | 1390 |  EXPORT_SYMBOL_GPL(tracing_snapshot); | 
|---|
 | 1391 | +void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)  | 
|---|
 | 1392 | +{  | 
|---|
 | 1393 | +	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");  | 
|---|
 | 1394 | +}  | 
|---|
 | 1395 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond);  | 
|---|
| 1041 | 1396 |  int tracing_alloc_snapshot(void) | 
|---|
| 1042 | 1397 |  { | 
|---|
| 1043 | 1398 |  	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); | 
|---|
| .. | .. | 
|---|
| 1050 | 1405 |  	tracing_snapshot(); | 
|---|
| 1051 | 1406 |  } | 
|---|
| 1052 | 1407 |  EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | 
|---|
 | 1408 | +void *tracing_cond_snapshot_data(struct trace_array *tr)  | 
|---|
 | 1409 | +{  | 
|---|
 | 1410 | +	return NULL;  | 
|---|
 | 1411 | +}  | 
|---|
 | 1412 | +EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);  | 
|---|
 | 1413 | +int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)  | 
|---|
 | 1414 | +{  | 
|---|
 | 1415 | +	return -ENODEV;  | 
|---|
 | 1416 | +}  | 
|---|
 | 1417 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);  | 
|---|
 | 1418 | +int tracing_snapshot_cond_disable(struct trace_array *tr)  | 
|---|
 | 1419 | +{  | 
|---|
 | 1420 | +	return false;  | 
|---|
 | 1421 | +}  | 
|---|
 | 1422 | +EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);  | 
|---|
| 1053 | 1423 |  #endif /* CONFIG_TRACER_SNAPSHOT */ | 
|---|
| 1054 | 1424 |   | 
|---|
| 1055 | 1425 |  void tracer_tracing_off(struct trace_array *tr) | 
|---|
| 1056 | 1426 |  { | 
|---|
| 1057 |  | -	if (tr->trace_buffer.buffer)  | 
|---|
| 1058 |  | -		ring_buffer_record_off(tr->trace_buffer.buffer);  | 
|---|
 | 1427 | +	if (tr->array_buffer.buffer)  | 
|---|
 | 1428 | +		ring_buffer_record_off(tr->array_buffer.buffer);  | 
|---|
| 1059 | 1429 |  	/* | 
|---|
| 1060 | 1430 |  	 * This flag is looked at when buffers haven't been allocated | 
|---|
| 1061 | 1431 |  	 * yet, or by some tracers (like irqsoff), that just want to | 
|---|
| .. | .. | 
|---|
| 1085 | 1455 |   | 
|---|
| 1086 | 1456 |  void disable_trace_on_warning(void) | 
|---|
| 1087 | 1457 |  { | 
|---|
| 1088 |  | -	if (__disable_trace_on_warning)  | 
|---|
 | 1458 | +	if (__disable_trace_on_warning) {  | 
|---|
 | 1459 | +		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,  | 
|---|
 | 1460 | +			"Disabling tracing due to warning\n");  | 
|---|
| 1089 | 1461 |  		tracing_off(); | 
|---|
 | 1462 | +	}  | 
|---|
| 1090 | 1463 |  } | 
|---|
| 1091 | 1464 |   | 
|---|
| 1092 | 1465 |  /** | 
|---|
| .. | .. | 
|---|
| 1097 | 1470 |   */ | 
|---|
| 1098 | 1471 |  bool tracer_tracing_is_on(struct trace_array *tr) | 
|---|
| 1099 | 1472 |  { | 
|---|
| 1100 |  | -	if (tr->trace_buffer.buffer)  | 
|---|
| 1101 |  | -		return ring_buffer_record_is_on(tr->trace_buffer.buffer);  | 
|---|
 | 1473 | +	if (tr->array_buffer.buffer)  | 
|---|
 | 1474 | +		return ring_buffer_record_is_on(tr->array_buffer.buffer);  | 
|---|
| 1102 | 1475 |  	return !tr->buffer_disabled; | 
|---|
| 1103 | 1476 |  } | 
|---|
| 1104 | 1477 |   | 
|---|
| .. | .. | 
|---|
| 1118 | 1491 |  	if (!str) | 
|---|
| 1119 | 1492 |  		return 0; | 
|---|
| 1120 | 1493 |  	buf_size = memparse(str, &str); | 
|---|
| 1121 |  | -	/* nr_entries can not be zero */  | 
|---|
| 1122 |  | -	if (buf_size == 0)  | 
|---|
| 1123 |  | -		return 0;  | 
|---|
| 1124 |  | -	trace_buf_size = buf_size;  | 
|---|
 | 1494 | +	/*  | 
|---|
 | 1495 | +	 * nr_entries can not be zero and the startup  | 
|---|
 | 1496 | +	 * tests require some buffer space. Therefore  | 
|---|
 | 1497 | +	 * ensure we have at least 4096 bytes of buffer.  | 
|---|
 | 1498 | +	 */  | 
|---|
 | 1499 | +	trace_buf_size = max(4096UL, buf_size);  | 
|---|
| 1125 | 1500 |  	return 1; | 
|---|
| 1126 | 1501 |  } | 
|---|
| 1127 | 1502 |  __setup("trace_buf_size=", set_buf_size); | 
|---|
| .. | .. | 
|---|
| 1315 | 1690 |  } | 
|---|
| 1316 | 1691 |   | 
|---|
| 1317 | 1692 |  unsigned long __read_mostly	tracing_thresh; | 
|---|
 | 1693 | +static const struct file_operations tracing_max_lat_fops;  | 
|---|
 | 1694 | +  | 
|---|
 | 1695 | +#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \  | 
|---|
 | 1696 | +	defined(CONFIG_FSNOTIFY)  | 
|---|
 | 1697 | +  | 
|---|
 | 1698 | +static struct workqueue_struct *fsnotify_wq;  | 
|---|
 | 1699 | +  | 
|---|
 | 1700 | +static void latency_fsnotify_workfn(struct work_struct *work)  | 
|---|
 | 1701 | +{  | 
|---|
 | 1702 | +	struct trace_array *tr = container_of(work, struct trace_array,  | 
|---|
 | 1703 | +					      fsnotify_work);  | 
|---|
 | 1704 | +	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);  | 
|---|
 | 1705 | +}  | 
|---|
 | 1706 | +  | 
|---|
 | 1707 | +static void latency_fsnotify_workfn_irq(struct irq_work *iwork)  | 
|---|
 | 1708 | +{  | 
|---|
 | 1709 | +	struct trace_array *tr = container_of(iwork, struct trace_array,  | 
|---|
 | 1710 | +					      fsnotify_irqwork);  | 
|---|
 | 1711 | +	queue_work(fsnotify_wq, &tr->fsnotify_work);  | 
|---|
 | 1712 | +}  | 
|---|
 | 1713 | +  | 
|---|
 | 1714 | +static void trace_create_maxlat_file(struct trace_array *tr,  | 
|---|
 | 1715 | +				     struct dentry *d_tracer)  | 
|---|
 | 1716 | +{  | 
|---|
 | 1717 | +	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);  | 
|---|
 | 1718 | +	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);  | 
|---|
 | 1719 | +	tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,  | 
|---|
 | 1720 | +					      d_tracer, &tr->max_latency,  | 
|---|
 | 1721 | +					      &tracing_max_lat_fops);  | 
|---|
 | 1722 | +}  | 
|---|
 | 1723 | +  | 
|---|
 | 1724 | +__init static int latency_fsnotify_init(void)  | 
|---|
 | 1725 | +{  | 
|---|
 | 1726 | +	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",  | 
|---|
 | 1727 | +				      WQ_UNBOUND | WQ_HIGHPRI, 0);  | 
|---|
 | 1728 | +	if (!fsnotify_wq) {  | 
|---|
 | 1729 | +		pr_err("Unable to allocate tr_max_lat_wq\n");  | 
|---|
 | 1730 | +		return -ENOMEM;  | 
|---|
 | 1731 | +	}  | 
|---|
 | 1732 | +	return 0;  | 
|---|
 | 1733 | +}  | 
|---|
 | 1734 | +  | 
|---|
 | 1735 | +late_initcall_sync(latency_fsnotify_init);  | 
|---|
 | 1736 | +  | 
|---|
 | 1737 | +void latency_fsnotify(struct trace_array *tr)  | 
|---|
 | 1738 | +{  | 
|---|
 | 1739 | +	if (!fsnotify_wq)  | 
|---|
 | 1740 | +		return;  | 
|---|
 | 1741 | +	/*  | 
|---|
 | 1742 | +	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's  | 
|---|
 | 1743 | +	 * possible that we are called from __schedule() or do_idle(), which  | 
|---|
 | 1744 | +	 * could cause a deadlock.  | 
|---|
 | 1745 | +	 */  | 
|---|
 | 1746 | +	irq_work_queue(&tr->fsnotify_irqwork);  | 
|---|
 | 1747 | +}  | 
|---|
 | 1748 | +  | 
|---|
 | 1749 | +/*  | 
|---|
 | 1750 | + * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \  | 
|---|
 | 1751 | + *  defined(CONFIG_FSNOTIFY)  | 
|---|
 | 1752 | + */  | 
|---|
 | 1753 | +#else  | 
|---|
 | 1754 | +  | 
|---|
 | 1755 | +#define trace_create_maxlat_file(tr, d_tracer)				\  | 
|---|
 | 1756 | +	trace_create_file("tracing_max_latency", 0644, d_tracer,	\  | 
|---|
 | 1757 | +			  &tr->max_latency, &tracing_max_lat_fops)  | 
|---|
 | 1758 | +  | 
|---|
 | 1759 | +#endif  | 
|---|
| 1318 | 1760 |   | 
|---|
| 1319 | 1761 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 1320 | 1762 |  /* | 
|---|
| .. | .. | 
|---|
| 1325 | 1767 |  static void | 
|---|
| 1326 | 1768 |  __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 
|---|
| 1327 | 1769 |  { | 
|---|
| 1328 |  | -	struct trace_buffer *trace_buf = &tr->trace_buffer;  | 
|---|
| 1329 |  | -	struct trace_buffer *max_buf = &tr->max_buffer;  | 
|---|
 | 1770 | +	struct array_buffer *trace_buf = &tr->array_buffer;  | 
|---|
 | 1771 | +	struct array_buffer *max_buf = &tr->max_buffer;  | 
|---|
| 1330 | 1772 |  	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); | 
|---|
| 1331 | 1773 |  	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); | 
|---|
| 1332 | 1774 |   | 
|---|
| .. | .. | 
|---|
| 1337 | 1779 |  	max_data->critical_start = data->critical_start; | 
|---|
| 1338 | 1780 |  	max_data->critical_end = data->critical_end; | 
|---|
| 1339 | 1781 |   | 
|---|
| 1340 |  | -	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);  | 
|---|
 | 1782 | +	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);  | 
|---|
| 1341 | 1783 |  	max_data->pid = tsk->pid; | 
|---|
| 1342 | 1784 |  	/* | 
|---|
| 1343 | 1785 |  	 * If tsk == current, then use current_uid(), as that does not use | 
|---|
| .. | .. | 
|---|
| 1354 | 1796 |   | 
|---|
| 1355 | 1797 |  	/* record this tasks comm */ | 
|---|
| 1356 | 1798 |  	tracing_record_cmdline(tsk); | 
|---|
 | 1799 | +	latency_fsnotify(tr);  | 
|---|
| 1357 | 1800 |  } | 
|---|
| 1358 | 1801 |   | 
|---|
| 1359 | 1802 |  /** | 
|---|
| .. | .. | 
|---|
| 1361 | 1804 |   * @tr: tracer | 
|---|
| 1362 | 1805 |   * @tsk: the task with the latency | 
|---|
| 1363 | 1806 |   * @cpu: The cpu that initiated the trace. | 
|---|
 | 1807 | + * @cond_data: User data associated with a conditional snapshot  | 
|---|
| 1364 | 1808 |   * | 
|---|
| 1365 | 1809 |   * Flip the buffers between the @tr and the max_tr and record information | 
|---|
| 1366 | 1810 |   * about which task was the cause of this latency. | 
|---|
| 1367 | 1811 |   */ | 
|---|
| 1368 | 1812 |  void | 
|---|
| 1369 |  | -update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)  | 
|---|
 | 1813 | +update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,  | 
|---|
 | 1814 | +	      void *cond_data)  | 
|---|
| 1370 | 1815 |  { | 
|---|
| 1371 | 1816 |  	if (tr->stop_count) | 
|---|
| 1372 | 1817 |  		return; | 
|---|
| .. | .. | 
|---|
| 1381 | 1826 |   | 
|---|
| 1382 | 1827 |  	arch_spin_lock(&tr->max_lock); | 
|---|
| 1383 | 1828 |   | 
|---|
| 1384 |  | -	/* Inherit the recordable setting from trace_buffer */  | 
|---|
| 1385 |  | -	if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))  | 
|---|
 | 1829 | +	/* Inherit the recordable setting from array_buffer */  | 
|---|
 | 1830 | +	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))  | 
|---|
| 1386 | 1831 |  		ring_buffer_record_on(tr->max_buffer.buffer); | 
|---|
| 1387 | 1832 |  	else | 
|---|
| 1388 | 1833 |  		ring_buffer_record_off(tr->max_buffer.buffer); | 
|---|
| 1389 | 1834 |   | 
|---|
| 1390 |  | -	swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);  | 
|---|
 | 1835 | +#ifdef CONFIG_TRACER_SNAPSHOT  | 
|---|
 | 1836 | +	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))  | 
|---|
 | 1837 | +		goto out_unlock;  | 
|---|
 | 1838 | +#endif  | 
|---|
 | 1839 | +	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);  | 
|---|
| 1391 | 1840 |   | 
|---|
| 1392 | 1841 |  	__update_max_tr(tr, tsk, cpu); | 
|---|
 | 1842 | +  | 
|---|
 | 1843 | + out_unlock:  | 
|---|
| 1393 | 1844 |  	arch_spin_unlock(&tr->max_lock); | 
|---|
| 1394 | 1845 |  } | 
|---|
| 1395 | 1846 |   | 
|---|
| 1396 | 1847 |  /** | 
|---|
| 1397 | 1848 |   * update_max_tr_single - only copy one trace over, and reset the rest | 
|---|
| 1398 |  | - * @tr - tracer  | 
|---|
| 1399 |  | - * @tsk - task with the latency  | 
|---|
| 1400 |  | - * @cpu - the cpu of the buffer to copy.  | 
|---|
 | 1849 | + * @tr: tracer  | 
|---|
 | 1850 | + * @tsk: task with the latency  | 
|---|
 | 1851 | + * @cpu: the cpu of the buffer to copy.  | 
|---|
| 1401 | 1852 |   * | 
|---|
| 1402 | 1853 |   * Flip the trace of a single CPU buffer between the @tr and the max_tr. | 
|---|
| 1403 | 1854 |   */ | 
|---|
| .. | .. | 
|---|
| 1418 | 1869 |   | 
|---|
| 1419 | 1870 |  	arch_spin_lock(&tr->max_lock); | 
|---|
| 1420 | 1871 |   | 
|---|
| 1421 |  | -	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);  | 
|---|
 | 1872 | +	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);  | 
|---|
| 1422 | 1873 |   | 
|---|
| 1423 | 1874 |  	if (ret == -EBUSY) { | 
|---|
| 1424 | 1875 |  		/* | 
|---|
| .. | .. | 
|---|
| 1438 | 1889 |  } | 
|---|
| 1439 | 1890 |  #endif /* CONFIG_TRACER_MAX_TRACE */ | 
|---|
| 1440 | 1891 |   | 
|---|
| 1441 |  | -static int wait_on_pipe(struct trace_iterator *iter, bool full)  | 
|---|
 | 1892 | +static int wait_on_pipe(struct trace_iterator *iter, int full)  | 
|---|
| 1442 | 1893 |  { | 
|---|
| 1443 | 1894 |  	/* Iterators are static, they should be filled or empty */ | 
|---|
| 1444 | 1895 |  	if (trace_buffer_iter(iter, iter->cpu_file)) | 
|---|
| 1445 | 1896 |  		return 0; | 
|---|
| 1446 | 1897 |   | 
|---|
| 1447 |  | -	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,  | 
|---|
 | 1898 | +	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,  | 
|---|
| 1448 | 1899 |  				full); | 
|---|
| 1449 | 1900 |  } | 
|---|
| 1450 | 1901 |   | 
|---|
| .. | .. | 
|---|
| 1495 | 1946 |  	 * internal tracing to verify that everything is in order. | 
|---|
| 1496 | 1947 |  	 * If we fail, we do not register this tracer. | 
|---|
| 1497 | 1948 |  	 */ | 
|---|
| 1498 |  | -	tracing_reset_online_cpus(&tr->trace_buffer);  | 
|---|
 | 1949 | +	tracing_reset_online_cpus(&tr->array_buffer);  | 
|---|
| 1499 | 1950 |   | 
|---|
| 1500 | 1951 |  	tr->current_trace = type; | 
|---|
| 1501 | 1952 |   | 
|---|
| .. | .. | 
|---|
| 1521 | 1972 |  		return -1; | 
|---|
| 1522 | 1973 |  	} | 
|---|
| 1523 | 1974 |  	/* Only reset on passing, to avoid touching corrupted buffers */ | 
|---|
| 1524 |  | -	tracing_reset_online_cpus(&tr->trace_buffer);  | 
|---|
 | 1975 | +	tracing_reset_online_cpus(&tr->array_buffer);  | 
|---|
| 1525 | 1976 |   | 
|---|
| 1526 | 1977 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 1527 | 1978 |  	if (type->use_max_tr) { | 
|---|
| .. | .. | 
|---|
| 1555 | 2006 |   | 
|---|
| 1556 | 2007 |  	tracing_selftest_running = true; | 
|---|
| 1557 | 2008 |  	list_for_each_entry_safe(p, n, &postponed_selftests, list) { | 
|---|
 | 2009 | +		/* This loop can take minutes when sanitizers are enabled, so  | 
|---|
 | 2010 | +		 * lets make sure we allow RCU processing.  | 
|---|
 | 2011 | +		 */  | 
|---|
 | 2012 | +		cond_resched();  | 
|---|
| 1558 | 2013 |  		ret = run_tracer_selftest(p->type); | 
|---|
| 1559 | 2014 |  		/* If the test fails, then warn and remove from available_tracers */ | 
|---|
| 1560 | 2015 |  		if (ret < 0) { | 
|---|
| .. | .. | 
|---|
| 1593 | 2048 |   | 
|---|
| 1594 | 2049 |  /** | 
|---|
| 1595 | 2050 |   * register_tracer - register a tracer with the ftrace system. | 
|---|
| 1596 |  | - * @type - the plugin for the tracer  | 
|---|
 | 2051 | + * @type: the plugin for the tracer  | 
|---|
| 1597 | 2052 |   * | 
|---|
| 1598 | 2053 |   * Register a new plugin tracer. | 
|---|
| 1599 | 2054 |   */ | 
|---|
| .. | .. | 
|---|
| 1610 | 2065 |  	if (strlen(type->name) >= MAX_TRACER_SIZE) { | 
|---|
| 1611 | 2066 |  		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 
|---|
| 1612 | 2067 |  		return -1; | 
|---|
 | 2068 | +	}  | 
|---|
 | 2069 | +  | 
|---|
 | 2070 | +	if (security_locked_down(LOCKDOWN_TRACEFS)) {  | 
|---|
 | 2071 | +		pr_warn("Can not register tracer %s due to lockdown\n",  | 
|---|
 | 2072 | +			   type->name);  | 
|---|
 | 2073 | +		return -EPERM;  | 
|---|
| 1613 | 2074 |  	} | 
|---|
| 1614 | 2075 |   | 
|---|
| 1615 | 2076 |  	mutex_lock(&trace_types_lock); | 
|---|
| .. | .. | 
|---|
| 1670 | 2131 |  	apply_trace_boot_options(); | 
|---|
| 1671 | 2132 |   | 
|---|
| 1672 | 2133 |  	/* disable other selftests, since this will break it. */ | 
|---|
| 1673 |  | -	tracing_selftest_disabled = true;  | 
|---|
| 1674 |  | -#ifdef CONFIG_FTRACE_STARTUP_TEST  | 
|---|
| 1675 |  | -	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",  | 
|---|
| 1676 |  | -	       type->name);  | 
|---|
| 1677 |  | -#endif  | 
|---|
 | 2134 | +	disable_tracing_selftest("running a tracer");  | 
|---|
| 1678 | 2135 |   | 
|---|
| 1679 | 2136 |   out_unlock: | 
|---|
| 1680 | 2137 |  	return ret; | 
|---|
| 1681 | 2138 |  } | 
|---|
| 1682 | 2139 |   | 
|---|
| 1683 |  | -void tracing_reset(struct trace_buffer *buf, int cpu)  | 
|---|
 | 2140 | +static void tracing_reset_cpu(struct array_buffer *buf, int cpu)  | 
|---|
| 1684 | 2141 |  { | 
|---|
| 1685 |  | -	struct ring_buffer *buffer = buf->buffer;  | 
|---|
 | 2142 | +	struct trace_buffer *buffer = buf->buffer;  | 
|---|
| 1686 | 2143 |   | 
|---|
| 1687 | 2144 |  	if (!buffer) | 
|---|
| 1688 | 2145 |  		return; | 
|---|
| .. | .. | 
|---|
| 1690 | 2147 |  	ring_buffer_record_disable(buffer); | 
|---|
| 1691 | 2148 |   | 
|---|
| 1692 | 2149 |  	/* Make sure all commits have finished */ | 
|---|
| 1693 |  | -	synchronize_sched();  | 
|---|
 | 2150 | +	synchronize_rcu();  | 
|---|
| 1694 | 2151 |  	ring_buffer_reset_cpu(buffer, cpu); | 
|---|
| 1695 | 2152 |   | 
|---|
| 1696 | 2153 |  	ring_buffer_record_enable(buffer); | 
|---|
| 1697 | 2154 |  } | 
|---|
| 1698 | 2155 |   | 
|---|
| 1699 |  | -void tracing_reset_online_cpus(struct trace_buffer *buf)  | 
|---|
 | 2156 | +void tracing_reset_online_cpus(struct array_buffer *buf)  | 
|---|
| 1700 | 2157 |  { | 
|---|
| 1701 |  | -	struct ring_buffer *buffer = buf->buffer;  | 
|---|
| 1702 |  | -	int cpu;  | 
|---|
 | 2158 | +	struct trace_buffer *buffer = buf->buffer;  | 
|---|
| 1703 | 2159 |   | 
|---|
| 1704 | 2160 |  	if (!buffer) | 
|---|
| 1705 | 2161 |  		return; | 
|---|
| .. | .. | 
|---|
| 1707 | 2163 |  	ring_buffer_record_disable(buffer); | 
|---|
| 1708 | 2164 |   | 
|---|
| 1709 | 2165 |  	/* Make sure all commits have finished */ | 
|---|
| 1710 |  | -	synchronize_sched();  | 
|---|
 | 2166 | +	synchronize_rcu();  | 
|---|
| 1711 | 2167 |   | 
|---|
| 1712 | 2168 |  	buf->time_start = buffer_ftrace_now(buf, buf->cpu); | 
|---|
| 1713 | 2169 |   | 
|---|
| 1714 |  | -	for_each_online_cpu(cpu)  | 
|---|
| 1715 |  | -		ring_buffer_reset_cpu(buffer, cpu);  | 
|---|
 | 2170 | +	ring_buffer_reset_online_cpus(buffer);  | 
|---|
| 1716 | 2171 |   | 
|---|
| 1717 | 2172 |  	ring_buffer_record_enable(buffer); | 
|---|
| 1718 | 2173 |  } | 
|---|
| .. | .. | 
|---|
| 1726 | 2181 |  		if (!tr->clear_trace) | 
|---|
| 1727 | 2182 |  			continue; | 
|---|
| 1728 | 2183 |  		tr->clear_trace = false; | 
|---|
| 1729 |  | -		tracing_reset_online_cpus(&tr->trace_buffer);  | 
|---|
 | 2184 | +		tracing_reset_online_cpus(&tr->array_buffer);  | 
|---|
| 1730 | 2185 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 1731 | 2186 |  		tracing_reset_online_cpus(&tr->max_buffer); | 
|---|
| 1732 | 2187 |  #endif | 
|---|
| .. | .. | 
|---|
| 1744 | 2199 |   | 
|---|
| 1745 | 2200 |  #define SAVED_CMDLINES_DEFAULT 128 | 
|---|
| 1746 | 2201 |  #define NO_CMDLINE_MAP UINT_MAX | 
|---|
 | 2202 | +/*  | 
|---|
 | 2203 | + * Preemption must be disabled before acquiring trace_cmdline_lock.  | 
|---|
 | 2204 | + * The various trace_arrays' max_lock must be acquired in a context  | 
|---|
 | 2205 | + * where interrupt is disabled.  | 
|---|
 | 2206 | + */  | 
|---|
| 1747 | 2207 |  static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 
|---|
| 1748 | 2208 |  struct saved_cmdlines_buffer { | 
|---|
| 1749 | 2209 |  	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 
|---|
| .. | .. | 
|---|
| 1761 | 2221 |   | 
|---|
| 1762 | 2222 |  static inline void set_cmdline(int idx, const char *cmdline) | 
|---|
| 1763 | 2223 |  { | 
|---|
| 1764 |  | -	memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);  | 
|---|
 | 2224 | +	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);  | 
|---|
| 1765 | 2225 |  } | 
|---|
| 1766 | 2226 |   | 
|---|
| 1767 | 2227 |  static int allocate_cmdlines_buffer(unsigned int val, | 
|---|
| .. | .. | 
|---|
| 1820 | 2280 |   */ | 
|---|
| 1821 | 2281 |  void tracing_start(void) | 
|---|
| 1822 | 2282 |  { | 
|---|
| 1823 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 2283 | +	struct trace_buffer *buffer;  | 
|---|
| 1824 | 2284 |  	unsigned long flags; | 
|---|
| 1825 | 2285 |   | 
|---|
| 1826 | 2286 |  	if (tracing_disabled) | 
|---|
| .. | .. | 
|---|
| 1839 | 2299 |  	/* Prevent the buffers from switching */ | 
|---|
| 1840 | 2300 |  	arch_spin_lock(&global_trace.max_lock); | 
|---|
| 1841 | 2301 |   | 
|---|
| 1842 |  | -	buffer = global_trace.trace_buffer.buffer;  | 
|---|
 | 2302 | +	buffer = global_trace.array_buffer.buffer;  | 
|---|
| 1843 | 2303 |  	if (buffer) | 
|---|
| 1844 | 2304 |  		ring_buffer_record_enable(buffer); | 
|---|
| 1845 | 2305 |   | 
|---|
| .. | .. | 
|---|
| 1857 | 2317 |   | 
|---|
| 1858 | 2318 |  static void tracing_start_tr(struct trace_array *tr) | 
|---|
| 1859 | 2319 |  { | 
|---|
| 1860 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 2320 | +	struct trace_buffer *buffer;  | 
|---|
| 1861 | 2321 |  	unsigned long flags; | 
|---|
| 1862 | 2322 |   | 
|---|
| 1863 | 2323 |  	if (tracing_disabled) | 
|---|
| .. | .. | 
|---|
| 1878 | 2338 |  		goto out; | 
|---|
| 1879 | 2339 |  	} | 
|---|
| 1880 | 2340 |   | 
|---|
| 1881 |  | -	buffer = tr->trace_buffer.buffer;  | 
|---|
 | 2341 | +	buffer = tr->array_buffer.buffer;  | 
|---|
| 1882 | 2342 |  	if (buffer) | 
|---|
| 1883 | 2343 |  		ring_buffer_record_enable(buffer); | 
|---|
| 1884 | 2344 |   | 
|---|
| .. | .. | 
|---|
| 1894 | 2354 |   */ | 
|---|
| 1895 | 2355 |  void tracing_stop(void) | 
|---|
| 1896 | 2356 |  { | 
|---|
| 1897 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 2357 | +	struct trace_buffer *buffer;  | 
|---|
| 1898 | 2358 |  	unsigned long flags; | 
|---|
| 1899 | 2359 |   | 
|---|
| 1900 | 2360 |  	raw_spin_lock_irqsave(&global_trace.start_lock, flags); | 
|---|
| .. | .. | 
|---|
| 1904 | 2364 |  	/* Prevent the buffers from switching */ | 
|---|
| 1905 | 2365 |  	arch_spin_lock(&global_trace.max_lock); | 
|---|
| 1906 | 2366 |   | 
|---|
| 1907 |  | -	buffer = global_trace.trace_buffer.buffer;  | 
|---|
 | 2367 | +	buffer = global_trace.array_buffer.buffer;  | 
|---|
| 1908 | 2368 |  	if (buffer) | 
|---|
| 1909 | 2369 |  		ring_buffer_record_disable(buffer); | 
|---|
| 1910 | 2370 |   | 
|---|
| .. | .. | 
|---|
| 1922 | 2382 |   | 
|---|
| 1923 | 2383 |  static void tracing_stop_tr(struct trace_array *tr) | 
|---|
| 1924 | 2384 |  { | 
|---|
| 1925 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 2385 | +	struct trace_buffer *buffer;  | 
|---|
| 1926 | 2386 |  	unsigned long flags; | 
|---|
| 1927 | 2387 |   | 
|---|
| 1928 | 2388 |  	/* If global, we need to also stop the max tracer */ | 
|---|
| .. | .. | 
|---|
| 1933 | 2393 |  	if (tr->stop_count++) | 
|---|
| 1934 | 2394 |  		goto out; | 
|---|
| 1935 | 2395 |   | 
|---|
| 1936 |  | -	buffer = tr->trace_buffer.buffer;  | 
|---|
 | 2396 | +	buffer = tr->array_buffer.buffer;  | 
|---|
| 1937 | 2397 |  	if (buffer) | 
|---|
| 1938 | 2398 |  		ring_buffer_record_disable(buffer); | 
|---|
| 1939 | 2399 |   | 
|---|
| .. | .. | 
|---|
| 1956 | 2416 |  	 * the lock, but we also don't want to spin | 
|---|
| 1957 | 2417 |  	 * nor do we want to disable interrupts, | 
|---|
| 1958 | 2418 |  	 * so if we miss here, then better luck next time. | 
|---|
 | 2419 | +	 *  | 
|---|
 | 2420 | +	 * This is called within the scheduler and wake up, so interrupts  | 
|---|
 | 2421 | +	 * had better been disabled and run queue lock been held.  | 
|---|
| 1959 | 2422 |  	 */ | 
|---|
 | 2423 | +	lockdep_assert_preemption_disabled();  | 
|---|
| 1960 | 2424 |  	if (!arch_spin_trylock(&trace_cmdline_lock)) | 
|---|
| 1961 | 2425 |  		return 0; | 
|---|
| 1962 | 2426 |   | 
|---|
| .. | .. | 
|---|
| 2064 | 2528 |  /** | 
|---|
| 2065 | 2529 |   * tracing_record_taskinfo - record the task info of a task | 
|---|
| 2066 | 2530 |   * | 
|---|
| 2067 |  | - * @task  - task to record  | 
|---|
| 2068 |  | - * @flags - TRACE_RECORD_CMDLINE for recording comm  | 
|---|
| 2069 |  | - *        - TRACE_RECORD_TGID for recording tgid  | 
|---|
 | 2531 | + * @task:  task to record  | 
|---|
 | 2532 | + * @flags: TRACE_RECORD_CMDLINE for recording comm  | 
|---|
 | 2533 | + *         TRACE_RECORD_TGID for recording tgid  | 
|---|
| 2070 | 2534 |   */ | 
|---|
| 2071 | 2535 |  void tracing_record_taskinfo(struct task_struct *task, int flags) | 
|---|
| 2072 | 2536 |  { | 
|---|
| .. | .. | 
|---|
| 2092 | 2556 |  /** | 
|---|
| 2093 | 2557 |   * tracing_record_taskinfo_sched_switch - record task info for sched_switch | 
|---|
| 2094 | 2558 |   * | 
|---|
| 2095 |  | - * @prev - previous task during sched_switch  | 
|---|
| 2096 |  | - * @next - next task during sched_switch  | 
|---|
| 2097 |  | - * @flags - TRACE_RECORD_CMDLINE for recording comm  | 
|---|
| 2098 |  | - *          TRACE_RECORD_TGID for recording tgid  | 
|---|
 | 2559 | + * @prev: previous task during sched_switch  | 
|---|
 | 2560 | + * @next: next task during sched_switch  | 
|---|
 | 2561 | + * @flags: TRACE_RECORD_CMDLINE for recording comm  | 
|---|
 | 2562 | + *         TRACE_RECORD_TGID for recording tgid  | 
|---|
| 2099 | 2563 |   */ | 
|---|
| 2100 | 2564 |  void tracing_record_taskinfo_sched_switch(struct task_struct *prev, | 
|---|
| 2101 | 2565 |  					  struct task_struct *next, int flags) | 
|---|
| .. | .. | 
|---|
| 2144 | 2608 |  } | 
|---|
| 2145 | 2609 |  EXPORT_SYMBOL_GPL(trace_handle_return); | 
|---|
| 2146 | 2610 |   | 
|---|
| 2147 |  | -void  | 
|---|
| 2148 |  | -tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,  | 
|---|
| 2149 |  | -			     int pc)  | 
|---|
 | 2611 | +static unsigned short migration_disable_value(void)  | 
|---|
| 2150 | 2612 |  { | 
|---|
| 2151 |  | -	struct task_struct *tsk = current;  | 
|---|
| 2152 |  | -  | 
|---|
| 2153 |  | -	entry->preempt_count		= pc & 0xff;  | 
|---|
| 2154 |  | -	entry->pid			= (tsk) ? tsk->pid : 0;  | 
|---|
| 2155 |  | -	entry->flags =  | 
|---|
| 2156 |  | -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT  | 
|---|
| 2157 |  | -		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |  | 
|---|
 | 2613 | +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)  | 
|---|
 | 2614 | +	return current->migration_disabled;  | 
|---|
| 2158 | 2615 |  #else | 
|---|
| 2159 |  | -		TRACE_FLAG_IRQS_NOSUPPORT |  | 
|---|
 | 2616 | +	return 0;  | 
|---|
| 2160 | 2617 |  #endif | 
|---|
| 2161 |  | -		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |  | 
|---|
| 2162 |  | -		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |  | 
|---|
| 2163 |  | -		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |  | 
|---|
| 2164 |  | -		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |  | 
|---|
| 2165 |  | -		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);  | 
|---|
| 2166 | 2618 |  } | 
|---|
| 2167 |  | -EXPORT_SYMBOL_GPL(tracing_generic_entry_update);  | 
|---|
 | 2619 | +  | 
|---|
 | 2620 | +unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)  | 
|---|
 | 2621 | +{  | 
|---|
 | 2622 | +	unsigned int trace_flags = irqs_status;  | 
|---|
 | 2623 | +	unsigned int pc;  | 
|---|
 | 2624 | +  | 
|---|
 | 2625 | +	pc = preempt_count();  | 
|---|
 | 2626 | +  | 
|---|
 | 2627 | +	if (pc & NMI_MASK)  | 
|---|
 | 2628 | +		trace_flags |= TRACE_FLAG_NMI;  | 
|---|
 | 2629 | +	if (pc & HARDIRQ_MASK)  | 
|---|
 | 2630 | +		trace_flags |= TRACE_FLAG_HARDIRQ;  | 
|---|
 | 2631 | +	if (in_serving_softirq())  | 
|---|
 | 2632 | +		trace_flags |= TRACE_FLAG_SOFTIRQ;  | 
|---|
 | 2633 | +  | 
|---|
 | 2634 | +	if (tif_need_resched())  | 
|---|
 | 2635 | +		trace_flags |= TRACE_FLAG_NEED_RESCHED;  | 
|---|
 | 2636 | +	if (test_preempt_need_resched())  | 
|---|
 | 2637 | +		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;  | 
|---|
 | 2638 | +  | 
|---|
 | 2639 | +#ifdef CONFIG_PREEMPT_LAZY  | 
|---|
 | 2640 | +	if (need_resched_lazy())  | 
|---|
 | 2641 | +		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;  | 
|---|
 | 2642 | +#endif  | 
|---|
 | 2643 | +  | 
|---|
 | 2644 | +	return (pc & 0xff) |  | 
|---|
 | 2645 | +		(migration_disable_value() & 0xff) << 8 |  | 
|---|
 | 2646 | +		(preempt_lazy_count() & 0xff) << 16 |  | 
|---|
 | 2647 | +		(trace_flags << 24);  | 
|---|
 | 2648 | +}  | 
|---|
| 2168 | 2649 |   | 
|---|
| 2169 | 2650 |  struct ring_buffer_event * | 
|---|
| 2170 |  | -trace_buffer_lock_reserve(struct ring_buffer *buffer,  | 
|---|
 | 2651 | +trace_buffer_lock_reserve(struct trace_buffer *buffer,  | 
|---|
| 2171 | 2652 |  			  int type, | 
|---|
| 2172 | 2653 |  			  unsigned long len, | 
|---|
| 2173 |  | -			  unsigned long flags, int pc)  | 
|---|
 | 2654 | +			  unsigned int trace_ctx)  | 
|---|
| 2174 | 2655 |  { | 
|---|
| 2175 |  | -	return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);  | 
|---|
 | 2656 | +	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);  | 
|---|
| 2176 | 2657 |  } | 
|---|
| 2177 | 2658 |   | 
|---|
| 2178 | 2659 |  DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); | 
|---|
| .. | .. | 
|---|
| 2217 | 2698 |   | 
|---|
| 2218 | 2699 |  		preempt_disable(); | 
|---|
| 2219 | 2700 |  		if (cpu == smp_processor_id() && | 
|---|
| 2220 |  | -		    this_cpu_read(trace_buffered_event) !=  | 
|---|
 | 2701 | +		    __this_cpu_read(trace_buffered_event) !=  | 
|---|
| 2221 | 2702 |  		    per_cpu(trace_buffered_event, cpu)) | 
|---|
| 2222 | 2703 |  			WARN_ON_ONCE(1); | 
|---|
| 2223 | 2704 |  		preempt_enable(); | 
|---|
| .. | .. | 
|---|
| 2267 | 2748 |  	preempt_enable(); | 
|---|
| 2268 | 2749 |   | 
|---|
| 2269 | 2750 |  	/* Wait for all current users to finish */ | 
|---|
| 2270 |  | -	synchronize_sched();  | 
|---|
 | 2751 | +	synchronize_rcu();  | 
|---|
| 2271 | 2752 |   | 
|---|
| 2272 | 2753 |  	for_each_tracing_cpu(cpu) { | 
|---|
| 2273 | 2754 |  		free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); | 
|---|
| .. | .. | 
|---|
| 2286 | 2767 |  	preempt_enable(); | 
|---|
| 2287 | 2768 |  } | 
|---|
| 2288 | 2769 |   | 
|---|
| 2289 |  | -static struct ring_buffer *temp_buffer;  | 
|---|
 | 2770 | +static struct trace_buffer *temp_buffer;  | 
|---|
| 2290 | 2771 |   | 
|---|
| 2291 | 2772 |  struct ring_buffer_event * | 
|---|
| 2292 |  | -trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,  | 
|---|
 | 2773 | +trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,  | 
|---|
| 2293 | 2774 |  			  struct trace_event_file *trace_file, | 
|---|
| 2294 | 2775 |  			  int type, unsigned long len, | 
|---|
| 2295 |  | -			  unsigned long flags, int pc)  | 
|---|
 | 2776 | +			  unsigned int trace_ctx)  | 
|---|
| 2296 | 2777 |  { | 
|---|
| 2297 | 2778 |  	struct ring_buffer_event *entry; | 
|---|
| 2298 | 2779 |  	int val; | 
|---|
| 2299 | 2780 |   | 
|---|
| 2300 |  | -	*current_rb = trace_file->tr->trace_buffer.buffer;  | 
|---|
 | 2781 | +	*current_rb = trace_file->tr->array_buffer.buffer;  | 
|---|
| 2301 | 2782 |   | 
|---|
| 2302 | 2783 |  	if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & | 
|---|
| 2303 | 2784 |  	     (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && | 
|---|
| .. | .. | 
|---|
| 2305 | 2786 |  		/* Try to use the per cpu buffer first */ | 
|---|
| 2306 | 2787 |  		val = this_cpu_inc_return(trace_buffered_event_cnt); | 
|---|
| 2307 | 2788 |  		if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) { | 
|---|
| 2308 |  | -			trace_event_setup(entry, type, flags, pc);  | 
|---|
 | 2789 | +			trace_event_setup(entry, type, trace_ctx);  | 
|---|
| 2309 | 2790 |  			entry->array[0] = len; | 
|---|
| 2310 | 2791 |  			return entry; | 
|---|
| 2311 | 2792 |  		} | 
|---|
| .. | .. | 
|---|
| 2313 | 2794 |  	} | 
|---|
| 2314 | 2795 |   | 
|---|
| 2315 | 2796 |  	entry = __trace_buffer_lock_reserve(*current_rb, | 
|---|
| 2316 |  | -					    type, len, flags, pc);  | 
|---|
 | 2797 | +					    type, len, trace_ctx);  | 
|---|
| 2317 | 2798 |  	/* | 
|---|
| 2318 | 2799 |  	 * If tracing is off, but we have triggers enabled | 
|---|
| 2319 | 2800 |  	 * we still need to look at the event data. Use the temp_buffer | 
|---|
| 2320 |  | -	 * to store the trace event for the tigger to use. It's recusive  | 
|---|
 | 2801 | +	 * to store the trace event for the trigger to use. It's recursive  | 
|---|
| 2321 | 2802 |  	 * safe and will not be recorded anywhere. | 
|---|
| 2322 | 2803 |  	 */ | 
|---|
| 2323 | 2804 |  	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { | 
|---|
| 2324 | 2805 |  		*current_rb = temp_buffer; | 
|---|
| 2325 |  | -		entry = __trace_buffer_lock_reserve(*current_rb,  | 
|---|
| 2326 |  | -						    type, len, flags, pc);  | 
|---|
 | 2806 | +		entry = __trace_buffer_lock_reserve(*current_rb, type, len,  | 
|---|
 | 2807 | +						    trace_ctx);  | 
|---|
| 2327 | 2808 |  	} | 
|---|
| 2328 | 2809 |  	return entry; | 
|---|
| 2329 | 2810 |  } | 
|---|
| 2330 | 2811 |  EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); | 
|---|
| 2331 | 2812 |   | 
|---|
| 2332 |  | -static DEFINE_SPINLOCK(tracepoint_iter_lock);  | 
|---|
 | 2813 | +static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);  | 
|---|
| 2333 | 2814 |  static DEFINE_MUTEX(tracepoint_printk_mutex); | 
|---|
| 2334 | 2815 |   | 
|---|
| 2335 | 2816 |  static void output_printk(struct trace_event_buffer *fbuffer) | 
|---|
| 2336 | 2817 |  { | 
|---|
| 2337 | 2818 |  	struct trace_event_call *event_call; | 
|---|
 | 2819 | +	struct trace_event_file *file;  | 
|---|
| 2338 | 2820 |  	struct trace_event *event; | 
|---|
| 2339 | 2821 |  	unsigned long flags; | 
|---|
| 2340 | 2822 |  	struct trace_iterator *iter = tracepoint_print_iter; | 
|---|
| .. | .. | 
|---|
| 2348 | 2830 |  	    !event_call->event.funcs->trace) | 
|---|
| 2349 | 2831 |  		return; | 
|---|
| 2350 | 2832 |   | 
|---|
 | 2833 | +	file = fbuffer->trace_file;  | 
|---|
 | 2834 | +	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||  | 
|---|
 | 2835 | +	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&  | 
|---|
 | 2836 | +	     !filter_match_preds(file->filter, fbuffer->entry)))  | 
|---|
 | 2837 | +		return;  | 
|---|
 | 2838 | +  | 
|---|
| 2351 | 2839 |  	event = &fbuffer->trace_file->event_call->event; | 
|---|
| 2352 | 2840 |   | 
|---|
| 2353 |  | -	spin_lock_irqsave(&tracepoint_iter_lock, flags);  | 
|---|
 | 2841 | +	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);  | 
|---|
| 2354 | 2842 |  	trace_seq_init(&iter->seq); | 
|---|
| 2355 | 2843 |  	iter->ent = fbuffer->entry; | 
|---|
| 2356 | 2844 |  	event_call->event.funcs->trace(iter, 0, event); | 
|---|
| 2357 | 2845 |  	trace_seq_putc(&iter->seq, 0); | 
|---|
| 2358 | 2846 |  	printk("%s", iter->seq.buffer); | 
|---|
| 2359 | 2847 |   | 
|---|
| 2360 |  | -	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);  | 
|---|
 | 2848 | +	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);  | 
|---|
| 2361 | 2849 |  } | 
|---|
| 2362 | 2850 |   | 
|---|
| 2363 | 2851 |  int tracepoint_printk_sysctl(struct ctl_table *table, int write, | 
|---|
| 2364 |  | -			     void __user *buffer, size_t *lenp,  | 
|---|
 | 2852 | +			     void *buffer, size_t *lenp,  | 
|---|
| 2365 | 2853 |  			     loff_t *ppos) | 
|---|
| 2366 | 2854 |  { | 
|---|
| 2367 | 2855 |  	int save_tracepoint_printk; | 
|---|
| .. | .. | 
|---|
| 2398 | 2886 |  	if (static_key_false(&tracepoint_printk_key.key)) | 
|---|
| 2399 | 2887 |  		output_printk(fbuffer); | 
|---|
| 2400 | 2888 |   | 
|---|
| 2401 |  | -	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,  | 
|---|
 | 2889 | +	if (static_branch_unlikely(&trace_event_exports_enabled))  | 
|---|
 | 2890 | +		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);  | 
|---|
 | 2891 | +	event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,  | 
|---|
| 2402 | 2892 |  				    fbuffer->event, fbuffer->entry, | 
|---|
| 2403 |  | -				    fbuffer->flags, fbuffer->pc);  | 
|---|
 | 2893 | +				    fbuffer->trace_ctx, fbuffer->regs);  | 
|---|
| 2404 | 2894 |  } | 
|---|
| 2405 | 2895 |  EXPORT_SYMBOL_GPL(trace_event_buffer_commit); | 
|---|
| 2406 | 2896 |   | 
|---|
| .. | .. | 
|---|
| 2414 | 2904 |  # define STACK_SKIP 3 | 
|---|
| 2415 | 2905 |   | 
|---|
| 2416 | 2906 |  void trace_buffer_unlock_commit_regs(struct trace_array *tr, | 
|---|
| 2417 |  | -				     struct ring_buffer *buffer,  | 
|---|
 | 2907 | +				     struct trace_buffer *buffer,  | 
|---|
| 2418 | 2908 |  				     struct ring_buffer_event *event, | 
|---|
| 2419 |  | -				     unsigned long flags, int pc,  | 
|---|
 | 2909 | +				     unsigned int trace_ctx,  | 
|---|
| 2420 | 2910 |  				     struct pt_regs *regs) | 
|---|
| 2421 | 2911 |  { | 
|---|
| 2422 | 2912 |  	__buffer_unlock_commit(buffer, event); | 
|---|
| .. | .. | 
|---|
| 2427 | 2917 |  	 * and mmiotrace, but that's ok if they lose a function or | 
|---|
| 2428 | 2918 |  	 * two. They are not that meaningful. | 
|---|
| 2429 | 2919 |  	 */ | 
|---|
| 2430 |  | -	ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);  | 
|---|
| 2431 |  | -	ftrace_trace_userstack(tr, buffer, flags, pc);  | 
|---|
 | 2920 | +	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);  | 
|---|
 | 2921 | +	ftrace_trace_userstack(tr, buffer, trace_ctx);  | 
|---|
| 2432 | 2922 |  } | 
|---|
| 2433 | 2923 |   | 
|---|
| 2434 | 2924 |  /* | 
|---|
| 2435 | 2925 |   * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. | 
|---|
| 2436 | 2926 |   */ | 
|---|
| 2437 | 2927 |  void | 
|---|
| 2438 |  | -trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,  | 
|---|
 | 2928 | +trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,  | 
|---|
| 2439 | 2929 |  				   struct ring_buffer_event *event) | 
|---|
| 2440 | 2930 |  { | 
|---|
| 2441 | 2931 |  	__buffer_unlock_commit(buffer, event); | 
|---|
| 2442 | 2932 |  } | 
|---|
| 2443 | 2933 |   | 
|---|
| 2444 |  | -static void  | 
|---|
| 2445 |  | -trace_process_export(struct trace_export *export,  | 
|---|
| 2446 |  | -	       struct ring_buffer_event *event)  | 
|---|
| 2447 |  | -{  | 
|---|
| 2448 |  | -	struct trace_entry *entry;  | 
|---|
| 2449 |  | -	unsigned int size = 0;  | 
|---|
| 2450 |  | -  | 
|---|
| 2451 |  | -	entry = ring_buffer_event_data(event);  | 
|---|
| 2452 |  | -	size = ring_buffer_event_length(event);  | 
|---|
| 2453 |  | -	export->write(export, entry, size);  | 
|---|
| 2454 |  | -}  | 
|---|
| 2455 |  | -  | 
|---|
| 2456 |  | -static DEFINE_MUTEX(ftrace_export_lock);  | 
|---|
| 2457 |  | -  | 
|---|
| 2458 |  | -static struct trace_export __rcu *ftrace_exports_list __read_mostly;  | 
|---|
| 2459 |  | -  | 
|---|
| 2460 |  | -static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);  | 
|---|
| 2461 |  | -  | 
|---|
| 2462 |  | -static inline void ftrace_exports_enable(void)  | 
|---|
| 2463 |  | -{  | 
|---|
| 2464 |  | -	static_branch_enable(&ftrace_exports_enabled);  | 
|---|
| 2465 |  | -}  | 
|---|
| 2466 |  | -  | 
|---|
| 2467 |  | -static inline void ftrace_exports_disable(void)  | 
|---|
| 2468 |  | -{  | 
|---|
| 2469 |  | -	static_branch_disable(&ftrace_exports_enabled);  | 
|---|
| 2470 |  | -}  | 
|---|
| 2471 |  | -  | 
|---|
| 2472 |  | -void ftrace_exports(struct ring_buffer_event *event)  | 
|---|
| 2473 |  | -{  | 
|---|
| 2474 |  | -	struct trace_export *export;  | 
|---|
| 2475 |  | -  | 
|---|
| 2476 |  | -	preempt_disable_notrace();  | 
|---|
| 2477 |  | -  | 
|---|
| 2478 |  | -	export = rcu_dereference_raw_notrace(ftrace_exports_list);  | 
|---|
| 2479 |  | -	while (export) {  | 
|---|
| 2480 |  | -		trace_process_export(export, event);  | 
|---|
| 2481 |  | -		export = rcu_dereference_raw_notrace(export->next);  | 
|---|
| 2482 |  | -	}  | 
|---|
| 2483 |  | -  | 
|---|
| 2484 |  | -	preempt_enable_notrace();  | 
|---|
| 2485 |  | -}  | 
|---|
| 2486 |  | -  | 
|---|
| 2487 |  | -static inline void  | 
|---|
| 2488 |  | -add_trace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
| 2489 |  | -{  | 
|---|
| 2490 |  | -	rcu_assign_pointer(export->next, *list);  | 
|---|
| 2491 |  | -	/*  | 
|---|
| 2492 |  | -	 * We are entering export into the list but another  | 
|---|
| 2493 |  | -	 * CPU might be walking that list. We need to make sure  | 
|---|
| 2494 |  | -	 * the export->next pointer is valid before another CPU sees  | 
|---|
| 2495 |  | -	 * the export pointer included into the list.  | 
|---|
| 2496 |  | -	 */  | 
|---|
| 2497 |  | -	rcu_assign_pointer(*list, export);  | 
|---|
| 2498 |  | -}  | 
|---|
| 2499 |  | -  | 
|---|
| 2500 |  | -static inline int  | 
|---|
| 2501 |  | -rm_trace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
| 2502 |  | -{  | 
|---|
| 2503 |  | -	struct trace_export **p;  | 
|---|
| 2504 |  | -  | 
|---|
| 2505 |  | -	for (p = list; *p != NULL; p = &(*p)->next)  | 
|---|
| 2506 |  | -		if (*p == export)  | 
|---|
| 2507 |  | -			break;  | 
|---|
| 2508 |  | -  | 
|---|
| 2509 |  | -	if (*p != export)  | 
|---|
| 2510 |  | -		return -1;  | 
|---|
| 2511 |  | -  | 
|---|
| 2512 |  | -	rcu_assign_pointer(*p, (*p)->next);  | 
|---|
| 2513 |  | -  | 
|---|
| 2514 |  | -	return 0;  | 
|---|
| 2515 |  | -}  | 
|---|
| 2516 |  | -  | 
|---|
| 2517 |  | -static inline void  | 
|---|
| 2518 |  | -add_ftrace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
| 2519 |  | -{  | 
|---|
| 2520 |  | -	if (*list == NULL)  | 
|---|
| 2521 |  | -		ftrace_exports_enable();  | 
|---|
| 2522 |  | -  | 
|---|
| 2523 |  | -	add_trace_export(list, export);  | 
|---|
| 2524 |  | -}  | 
|---|
| 2525 |  | -  | 
|---|
| 2526 |  | -static inline int  | 
|---|
| 2527 |  | -rm_ftrace_export(struct trace_export **list, struct trace_export *export)  | 
|---|
| 2528 |  | -{  | 
|---|
| 2529 |  | -	int ret;  | 
|---|
| 2530 |  | -  | 
|---|
| 2531 |  | -	ret = rm_trace_export(list, export);  | 
|---|
| 2532 |  | -	if (*list == NULL)  | 
|---|
| 2533 |  | -		ftrace_exports_disable();  | 
|---|
| 2534 |  | -  | 
|---|
| 2535 |  | -	return ret;  | 
|---|
| 2536 |  | -}  | 
|---|
| 2537 |  | -  | 
|---|
| 2538 |  | -int register_ftrace_export(struct trace_export *export)  | 
|---|
| 2539 |  | -{  | 
|---|
| 2540 |  | -	if (WARN_ON_ONCE(!export->write))  | 
|---|
| 2541 |  | -		return -1;  | 
|---|
| 2542 |  | -  | 
|---|
| 2543 |  | -	mutex_lock(&ftrace_export_lock);  | 
|---|
| 2544 |  | -  | 
|---|
| 2545 |  | -	add_ftrace_export(&ftrace_exports_list, export);  | 
|---|
| 2546 |  | -  | 
|---|
| 2547 |  | -	mutex_unlock(&ftrace_export_lock);  | 
|---|
| 2548 |  | -  | 
|---|
| 2549 |  | -	return 0;  | 
|---|
| 2550 |  | -}  | 
|---|
| 2551 |  | -EXPORT_SYMBOL_GPL(register_ftrace_export);  | 
|---|
| 2552 |  | -  | 
|---|
| 2553 |  | -int unregister_ftrace_export(struct trace_export *export)  | 
|---|
| 2554 |  | -{  | 
|---|
| 2555 |  | -	int ret;  | 
|---|
| 2556 |  | -  | 
|---|
| 2557 |  | -	mutex_lock(&ftrace_export_lock);  | 
|---|
| 2558 |  | -  | 
|---|
| 2559 |  | -	ret = rm_ftrace_export(&ftrace_exports_list, export);  | 
|---|
| 2560 |  | -  | 
|---|
| 2561 |  | -	mutex_unlock(&ftrace_export_lock);  | 
|---|
| 2562 |  | -  | 
|---|
| 2563 |  | -	return ret;  | 
|---|
| 2564 |  | -}  | 
|---|
| 2565 |  | -EXPORT_SYMBOL_GPL(unregister_ftrace_export);  | 
|---|
| 2566 |  | -  | 
|---|
| 2567 | 2934 |  void | 
|---|
| 2568 |  | -trace_function(struct trace_array *tr,  | 
|---|
| 2569 |  | -	       unsigned long ip, unsigned long parent_ip, unsigned long flags,  | 
|---|
| 2570 |  | -	       int pc)  | 
|---|
 | 2935 | +trace_function(struct trace_array *tr, unsigned long ip, unsigned long  | 
|---|
 | 2936 | +	       parent_ip, unsigned int trace_ctx)  | 
|---|
| 2571 | 2937 |  { | 
|---|
| 2572 | 2938 |  	struct trace_event_call *call = &event_function; | 
|---|
| 2573 |  | -	struct ring_buffer *buffer = tr->trace_buffer.buffer;  | 
|---|
 | 2939 | +	struct trace_buffer *buffer = tr->array_buffer.buffer;  | 
|---|
| 2574 | 2940 |  	struct ring_buffer_event *event; | 
|---|
| 2575 | 2941 |  	struct ftrace_entry *entry; | 
|---|
| 2576 | 2942 |   | 
|---|
| 2577 | 2943 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 
|---|
| 2578 |  | -					    flags, pc);  | 
|---|
 | 2944 | +					    trace_ctx);  | 
|---|
| 2579 | 2945 |  	if (!event) | 
|---|
| 2580 | 2946 |  		return; | 
|---|
| 2581 | 2947 |  	entry	= ring_buffer_event_data(event); | 
|---|
| .. | .. | 
|---|
| 2583 | 2949 |  	entry->parent_ip		= parent_ip; | 
|---|
| 2584 | 2950 |   | 
|---|
| 2585 | 2951 |  	if (!call_filter_check_discard(call, entry, buffer, event)) { | 
|---|
| 2586 |  | -		if (static_branch_unlikely(&ftrace_exports_enabled))  | 
|---|
| 2587 |  | -			ftrace_exports(event);  | 
|---|
 | 2952 | +		if (static_branch_unlikely(&trace_function_exports_enabled))  | 
|---|
 | 2953 | +			ftrace_exports(event, TRACE_EXPORT_FUNCTION);  | 
|---|
| 2588 | 2954 |  		__buffer_unlock_commit(buffer, event); | 
|---|
| 2589 | 2955 |  	} | 
|---|
| 2590 | 2956 |  } | 
|---|
| 2591 | 2957 |   | 
|---|
| 2592 | 2958 |  #ifdef CONFIG_STACKTRACE | 
|---|
| 2593 | 2959 |   | 
|---|
| 2594 |  | -#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))  | 
|---|
 | 2960 | +/* Allow 4 levels of nesting: normal, softirq, irq, NMI */  | 
|---|
 | 2961 | +#define FTRACE_KSTACK_NESTING	4  | 
|---|
 | 2962 | +  | 
|---|
 | 2963 | +#define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)  | 
|---|
 | 2964 | +  | 
|---|
| 2595 | 2965 |  struct ftrace_stack { | 
|---|
| 2596 |  | -	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];  | 
|---|
 | 2966 | +	unsigned long		calls[FTRACE_KSTACK_ENTRIES];  | 
|---|
| 2597 | 2967 |  }; | 
|---|
| 2598 | 2968 |   | 
|---|
| 2599 |  | -static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);  | 
|---|
 | 2969 | +  | 
|---|
 | 2970 | +struct ftrace_stacks {  | 
|---|
 | 2971 | +	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];  | 
|---|
 | 2972 | +};  | 
|---|
 | 2973 | +  | 
|---|
 | 2974 | +static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);  | 
|---|
| 2600 | 2975 |  static DEFINE_PER_CPU(int, ftrace_stack_reserve); | 
|---|
| 2601 | 2976 |   | 
|---|
| 2602 |  | -static void __ftrace_trace_stack(struct ring_buffer *buffer,  | 
|---|
| 2603 |  | -				 unsigned long flags,  | 
|---|
| 2604 |  | -				 int skip, int pc, struct pt_regs *regs)  | 
|---|
 | 2977 | +static void __ftrace_trace_stack(struct trace_buffer *buffer,  | 
|---|
 | 2978 | +				 unsigned int trace_ctx,  | 
|---|
 | 2979 | +				 int skip, struct pt_regs *regs)  | 
|---|
| 2605 | 2980 |  { | 
|---|
| 2606 | 2981 |  	struct trace_event_call *call = &event_kernel_stack; | 
|---|
| 2607 | 2982 |  	struct ring_buffer_event *event; | 
|---|
 | 2983 | +	unsigned int size, nr_entries;  | 
|---|
 | 2984 | +	struct ftrace_stack *fstack;  | 
|---|
| 2608 | 2985 |  	struct stack_entry *entry; | 
|---|
| 2609 |  | -	struct stack_trace trace;  | 
|---|
| 2610 |  | -	int use_stack;  | 
|---|
| 2611 |  | -	int size = FTRACE_STACK_ENTRIES;  | 
|---|
| 2612 |  | -  | 
|---|
| 2613 |  | -	trace.nr_entries	= 0;  | 
|---|
| 2614 |  | -	trace.skip		= skip;  | 
|---|
 | 2986 | +	int stackidx;  | 
|---|
| 2615 | 2987 |   | 
|---|
| 2616 | 2988 |  	/* | 
|---|
| 2617 | 2989 |  	 * Add one, for this function and the call to save_stack_trace() | 
|---|
| .. | .. | 
|---|
| 2619 | 2991 |  	 */ | 
|---|
| 2620 | 2992 |  #ifndef CONFIG_UNWINDER_ORC | 
|---|
| 2621 | 2993 |  	if (!regs) | 
|---|
| 2622 |  | -		trace.skip++;  | 
|---|
 | 2994 | +		skip++;  | 
|---|
| 2623 | 2995 |  #endif | 
|---|
| 2624 | 2996 |   | 
|---|
| 2625 |  | -	/*  | 
|---|
| 2626 |  | -	 * Since events can happen in NMIs there's no safe way to  | 
|---|
| 2627 |  | -	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt  | 
|---|
| 2628 |  | -	 * or NMI comes in, it will just have to use the default  | 
|---|
| 2629 |  | -	 * FTRACE_STACK_SIZE.  | 
|---|
| 2630 |  | -	 */  | 
|---|
| 2631 | 2997 |  	preempt_disable_notrace(); | 
|---|
| 2632 | 2998 |   | 
|---|
| 2633 |  | -	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);  | 
|---|
 | 2999 | +	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;  | 
|---|
 | 3000 | +  | 
|---|
 | 3001 | +	/* This should never happen. If it does, yell once and skip */  | 
|---|
 | 3002 | +	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))  | 
|---|
 | 3003 | +		goto out;  | 
|---|
 | 3004 | +  | 
|---|
| 2634 | 3005 |  	/* | 
|---|
| 2635 |  | -	 * We don't need any atomic variables, just a barrier.  | 
|---|
| 2636 |  | -	 * If an interrupt comes in, we don't care, because it would  | 
|---|
| 2637 |  | -	 * have exited and put the counter back to what we want.  | 
|---|
| 2638 |  | -	 * We just need a barrier to keep gcc from moving things  | 
|---|
| 2639 |  | -	 * around.  | 
|---|
 | 3006 | +	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An  | 
|---|
 | 3007 | +	 * interrupt will either see the value pre increment or post  | 
|---|
 | 3008 | +	 * increment. If the interrupt happens pre increment it will have  | 
|---|
 | 3009 | +	 * restored the counter when it returns.  We just need a barrier to  | 
|---|
 | 3010 | +	 * keep gcc from moving things around.  | 
|---|
| 2640 | 3011 |  	 */ | 
|---|
| 2641 | 3012 |  	barrier(); | 
|---|
| 2642 |  | -	if (use_stack == 1) {  | 
|---|
| 2643 |  | -		trace.entries		= this_cpu_ptr(ftrace_stack.calls);  | 
|---|
| 2644 |  | -		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;  | 
|---|
| 2645 | 3013 |   | 
|---|
| 2646 |  | -		if (regs)  | 
|---|
| 2647 |  | -			save_stack_trace_regs(regs, &trace);  | 
|---|
| 2648 |  | -		else  | 
|---|
| 2649 |  | -			save_stack_trace(&trace);  | 
|---|
 | 3014 | +	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;  | 
|---|
 | 3015 | +	size = ARRAY_SIZE(fstack->calls);  | 
|---|
| 2650 | 3016 |   | 
|---|
| 2651 |  | -		if (trace.nr_entries > size)  | 
|---|
| 2652 |  | -			size = trace.nr_entries;  | 
|---|
| 2653 |  | -	} else  | 
|---|
| 2654 |  | -		/* From now on, use_stack is a boolean */  | 
|---|
| 2655 |  | -		use_stack = 0;  | 
|---|
 | 3017 | +	if (regs) {  | 
|---|
 | 3018 | +		nr_entries = stack_trace_save_regs(regs, fstack->calls,  | 
|---|
 | 3019 | +						   size, skip);  | 
|---|
 | 3020 | +	} else {  | 
|---|
 | 3021 | +		nr_entries = stack_trace_save(fstack->calls, size, skip);  | 
|---|
 | 3022 | +	}  | 
|---|
| 2656 | 3023 |   | 
|---|
| 2657 |  | -	size *= sizeof(unsigned long);  | 
|---|
| 2658 |  | -  | 
|---|
 | 3024 | +	size = nr_entries * sizeof(unsigned long);  | 
|---|
| 2659 | 3025 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, | 
|---|
| 2660 | 3026 |  				    (sizeof(*entry) - sizeof(entry->caller)) + size, | 
|---|
| 2661 |  | -				    flags, pc);  | 
|---|
 | 3027 | +				    trace_ctx);  | 
|---|
| 2662 | 3028 |  	if (!event) | 
|---|
| 2663 | 3029 |  		goto out; | 
|---|
| 2664 | 3030 |  	entry = ring_buffer_event_data(event); | 
|---|
| 2665 | 3031 |   | 
|---|
| 2666 |  | -	memset(&entry->caller, 0, size);  | 
|---|
| 2667 |  | -  | 
|---|
| 2668 |  | -	if (use_stack)  | 
|---|
| 2669 |  | -		memcpy(&entry->caller, trace.entries,  | 
|---|
| 2670 |  | -		       trace.nr_entries * sizeof(unsigned long));  | 
|---|
| 2671 |  | -	else {  | 
|---|
| 2672 |  | -		trace.max_entries	= FTRACE_STACK_ENTRIES;  | 
|---|
| 2673 |  | -		trace.entries		= entry->caller;  | 
|---|
| 2674 |  | -		if (regs)  | 
|---|
| 2675 |  | -			save_stack_trace_regs(regs, &trace);  | 
|---|
| 2676 |  | -		else  | 
|---|
| 2677 |  | -			save_stack_trace(&trace);  | 
|---|
| 2678 |  | -	}  | 
|---|
| 2679 |  | -  | 
|---|
| 2680 |  | -	entry->size = trace.nr_entries;  | 
|---|
 | 3032 | +	memcpy(&entry->caller, fstack->calls, size);  | 
|---|
 | 3033 | +	entry->size = nr_entries;  | 
|---|
| 2681 | 3034 |   | 
|---|
| 2682 | 3035 |  	if (!call_filter_check_discard(call, entry, buffer, event)) | 
|---|
| 2683 | 3036 |  		__buffer_unlock_commit(buffer, event); | 
|---|
| .. | .. | 
|---|
| 2691 | 3044 |  } | 
|---|
| 2692 | 3045 |   | 
|---|
| 2693 | 3046 |  static inline void ftrace_trace_stack(struct trace_array *tr, | 
|---|
| 2694 |  | -				      struct ring_buffer *buffer,  | 
|---|
| 2695 |  | -				      unsigned long flags,  | 
|---|
| 2696 |  | -				      int skip, int pc, struct pt_regs *regs)  | 
|---|
 | 3047 | +				      struct trace_buffer *buffer,  | 
|---|
 | 3048 | +				      unsigned int trace_ctx,  | 
|---|
 | 3049 | +				      int skip, struct pt_regs *regs)  | 
|---|
| 2697 | 3050 |  { | 
|---|
| 2698 | 3051 |  	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) | 
|---|
| 2699 | 3052 |  		return; | 
|---|
| 2700 | 3053 |   | 
|---|
| 2701 |  | -	__ftrace_trace_stack(buffer, flags, skip, pc, regs);  | 
|---|
 | 3054 | +	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);  | 
|---|
| 2702 | 3055 |  } | 
|---|
| 2703 | 3056 |   | 
|---|
| 2704 |  | -void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,  | 
|---|
| 2705 |  | -		   int pc)  | 
|---|
 | 3057 | +void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,  | 
|---|
 | 3058 | +		   int skip)  | 
|---|
| 2706 | 3059 |  { | 
|---|
| 2707 |  | -	struct ring_buffer *buffer = tr->trace_buffer.buffer;  | 
|---|
 | 3060 | +	struct trace_buffer *buffer = tr->array_buffer.buffer;  | 
|---|
| 2708 | 3061 |   | 
|---|
| 2709 | 3062 |  	if (rcu_is_watching()) { | 
|---|
| 2710 |  | -		__ftrace_trace_stack(buffer, flags, skip, pc, NULL);  | 
|---|
 | 3063 | +		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);  | 
|---|
| 2711 | 3064 |  		return; | 
|---|
| 2712 | 3065 |  	} | 
|---|
| 2713 | 3066 |   | 
|---|
| .. | .. | 
|---|
| 2721 | 3074 |  		return; | 
|---|
| 2722 | 3075 |   | 
|---|
| 2723 | 3076 |  	rcu_irq_enter_irqson(); | 
|---|
| 2724 |  | -	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);  | 
|---|
 | 3077 | +	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);  | 
|---|
| 2725 | 3078 |  	rcu_irq_exit_irqson(); | 
|---|
| 2726 | 3079 |  } | 
|---|
| 2727 | 3080 |   | 
|---|
| .. | .. | 
|---|
| 2731 | 3084 |   */ | 
|---|
| 2732 | 3085 |  void trace_dump_stack(int skip) | 
|---|
| 2733 | 3086 |  { | 
|---|
| 2734 |  | -	unsigned long flags;  | 
|---|
| 2735 |  | -  | 
|---|
| 2736 | 3087 |  	if (tracing_disabled || tracing_selftest_running) | 
|---|
| 2737 | 3088 |  		return; | 
|---|
| 2738 |  | -  | 
|---|
| 2739 |  | -	local_save_flags(flags);  | 
|---|
| 2740 | 3089 |   | 
|---|
| 2741 | 3090 |  #ifndef CONFIG_UNWINDER_ORC | 
|---|
| 2742 | 3091 |  	/* Skip 1 to skip this function. */ | 
|---|
| 2743 | 3092 |  	skip++; | 
|---|
| 2744 | 3093 |  #endif | 
|---|
| 2745 |  | -	__ftrace_trace_stack(global_trace.trace_buffer.buffer,  | 
|---|
| 2746 |  | -			     flags, skip, preempt_count(), NULL);  | 
|---|
 | 3094 | +	__ftrace_trace_stack(global_trace.array_buffer.buffer,  | 
|---|
 | 3095 | +			     tracing_gen_ctx(), skip, NULL);  | 
|---|
| 2747 | 3096 |  } | 
|---|
 | 3097 | +EXPORT_SYMBOL_GPL(trace_dump_stack);  | 
|---|
| 2748 | 3098 |   | 
|---|
 | 3099 | +#ifdef CONFIG_USER_STACKTRACE_SUPPORT  | 
|---|
| 2749 | 3100 |  static DEFINE_PER_CPU(int, user_stack_count); | 
|---|
| 2750 | 3101 |   | 
|---|
| 2751 |  | -void  | 
|---|
 | 3102 | +static void  | 
|---|
| 2752 | 3103 |  ftrace_trace_userstack(struct trace_array *tr, | 
|---|
| 2753 |  | -		       struct ring_buffer *buffer, unsigned long flags, int pc)  | 
|---|
 | 3104 | +		       struct trace_buffer *buffer, unsigned int trace_ctx)  | 
|---|
| 2754 | 3105 |  { | 
|---|
| 2755 | 3106 |  	struct trace_event_call *call = &event_user_stack; | 
|---|
| 2756 | 3107 |  	struct ring_buffer_event *event; | 
|---|
| 2757 | 3108 |  	struct userstack_entry *entry; | 
|---|
| 2758 |  | -	struct stack_trace trace;  | 
|---|
| 2759 | 3109 |   | 
|---|
| 2760 | 3110 |  	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) | 
|---|
| 2761 | 3111 |  		return; | 
|---|
| .. | .. | 
|---|
| 2778 | 3128 |  	__this_cpu_inc(user_stack_count); | 
|---|
| 2779 | 3129 |   | 
|---|
| 2780 | 3130 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, | 
|---|
| 2781 |  | -					    sizeof(*entry), flags, pc);  | 
|---|
 | 3131 | +					    sizeof(*entry), trace_ctx);  | 
|---|
| 2782 | 3132 |  	if (!event) | 
|---|
| 2783 | 3133 |  		goto out_drop_count; | 
|---|
| 2784 | 3134 |  	entry	= ring_buffer_event_data(event); | 
|---|
| .. | .. | 
|---|
| 2786 | 3136 |  	entry->tgid		= current->tgid; | 
|---|
| 2787 | 3137 |  	memset(&entry->caller, 0, sizeof(entry->caller)); | 
|---|
| 2788 | 3138 |   | 
|---|
| 2789 |  | -	trace.nr_entries	= 0;  | 
|---|
| 2790 |  | -	trace.max_entries	= FTRACE_STACK_ENTRIES;  | 
|---|
| 2791 |  | -	trace.skip		= 0;  | 
|---|
| 2792 |  | -	trace.entries		= entry->caller;  | 
|---|
| 2793 |  | -  | 
|---|
| 2794 |  | -	save_stack_trace_user(&trace);  | 
|---|
 | 3139 | +	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);  | 
|---|
| 2795 | 3140 |  	if (!call_filter_check_discard(call, entry, buffer, event)) | 
|---|
| 2796 | 3141 |  		__buffer_unlock_commit(buffer, event); | 
|---|
| 2797 | 3142 |   | 
|---|
| .. | .. | 
|---|
| 2800 | 3145 |   out: | 
|---|
| 2801 | 3146 |  	preempt_enable(); | 
|---|
| 2802 | 3147 |  } | 
|---|
| 2803 |  | -  | 
|---|
| 2804 |  | -#ifdef UNUSED  | 
|---|
| 2805 |  | -static void __trace_userstack(struct trace_array *tr, unsigned long flags)  | 
|---|
 | 3148 | +#else /* CONFIG_USER_STACKTRACE_SUPPORT */  | 
|---|
 | 3149 | +static void ftrace_trace_userstack(struct trace_array *tr,  | 
|---|
 | 3150 | +				   struct trace_buffer *buffer,  | 
|---|
 | 3151 | +				   unsigned int trace_ctx)  | 
|---|
| 2806 | 3152 |  { | 
|---|
| 2807 |  | -	ftrace_trace_userstack(tr, flags, preempt_count());  | 
|---|
| 2808 | 3153 |  } | 
|---|
| 2809 |  | -#endif /* UNUSED */  | 
|---|
 | 3154 | +#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */  | 
|---|
| 2810 | 3155 |   | 
|---|
| 2811 | 3156 |  #endif /* CONFIG_STACKTRACE */ | 
|---|
| 2812 | 3157 |   | 
|---|
| .. | .. | 
|---|
| 2847 | 3192 |  { | 
|---|
| 2848 | 3193 |  	struct trace_buffer_struct __percpu *buffers; | 
|---|
| 2849 | 3194 |   | 
|---|
 | 3195 | +	if (trace_percpu_buffer)  | 
|---|
 | 3196 | +		return 0;  | 
|---|
 | 3197 | +  | 
|---|
| 2850 | 3198 |  	buffers = alloc_percpu(struct trace_buffer_struct); | 
|---|
| 2851 |  | -	if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))  | 
|---|
 | 3199 | +	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))  | 
|---|
| 2852 | 3200 |  		return -ENOMEM; | 
|---|
| 2853 | 3201 |   | 
|---|
| 2854 | 3202 |  	trace_percpu_buffer = buffers; | 
|---|
| .. | .. | 
|---|
| 2893 | 3241 |  	 * directly here. If the global_trace.buffer is already | 
|---|
| 2894 | 3242 |  	 * allocated here, then this was called by module code. | 
|---|
| 2895 | 3243 |  	 */ | 
|---|
| 2896 |  | -	if (global_trace.trace_buffer.buffer)  | 
|---|
 | 3244 | +	if (global_trace.array_buffer.buffer)  | 
|---|
| 2897 | 3245 |  		tracing_start_cmdline_record(); | 
|---|
| 2898 | 3246 |  } | 
|---|
 | 3247 | +EXPORT_SYMBOL_GPL(trace_printk_init_buffers);  | 
|---|
| 2899 | 3248 |   | 
|---|
| 2900 | 3249 |  void trace_printk_start_comm(void) | 
|---|
| 2901 | 3250 |  { | 
|---|
| .. | .. | 
|---|
| 2918 | 3267 |   | 
|---|
| 2919 | 3268 |  /** | 
|---|
| 2920 | 3269 |   * trace_vbprintk - write binary msg to tracing buffer | 
|---|
| 2921 |  | - *  | 
|---|
 | 3270 | + * @ip:    The address of the caller  | 
|---|
 | 3271 | + * @fmt:   The string format to write to the buffer  | 
|---|
 | 3272 | + * @args:  Arguments for @fmt  | 
|---|
| 2922 | 3273 |   */ | 
|---|
| 2923 | 3274 |  int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 
|---|
| 2924 | 3275 |  { | 
|---|
| 2925 | 3276 |  	struct trace_event_call *call = &event_bprint; | 
|---|
| 2926 | 3277 |  	struct ring_buffer_event *event; | 
|---|
| 2927 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 3278 | +	struct trace_buffer *buffer;  | 
|---|
| 2928 | 3279 |  	struct trace_array *tr = &global_trace; | 
|---|
| 2929 | 3280 |  	struct bprint_entry *entry; | 
|---|
| 2930 |  | -	unsigned long flags;  | 
|---|
 | 3281 | +	unsigned int trace_ctx;  | 
|---|
| 2931 | 3282 |  	char *tbuffer; | 
|---|
| 2932 |  | -	int len = 0, size, pc;  | 
|---|
 | 3283 | +	int len = 0, size;  | 
|---|
| 2933 | 3284 |   | 
|---|
| 2934 | 3285 |  	if (unlikely(tracing_selftest_running || tracing_disabled)) | 
|---|
| 2935 | 3286 |  		return 0; | 
|---|
| .. | .. | 
|---|
| 2937 | 3288 |  	/* Don't pollute graph traces with trace_vprintk internals */ | 
|---|
| 2938 | 3289 |  	pause_graph_tracing(); | 
|---|
| 2939 | 3290 |   | 
|---|
| 2940 |  | -	pc = preempt_count();  | 
|---|
 | 3291 | +	trace_ctx = tracing_gen_ctx();  | 
|---|
| 2941 | 3292 |  	preempt_disable_notrace(); | 
|---|
| 2942 | 3293 |   | 
|---|
| 2943 | 3294 |  	tbuffer = get_trace_buf(); | 
|---|
| .. | .. | 
|---|
| 2949 | 3300 |  	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); | 
|---|
| 2950 | 3301 |   | 
|---|
| 2951 | 3302 |  	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) | 
|---|
| 2952 |  | -		goto out;  | 
|---|
 | 3303 | +		goto out_put;  | 
|---|
| 2953 | 3304 |   | 
|---|
| 2954 |  | -	local_save_flags(flags);  | 
|---|
| 2955 | 3305 |  	size = sizeof(*entry) + sizeof(u32) * len; | 
|---|
| 2956 |  | -	buffer = tr->trace_buffer.buffer;  | 
|---|
 | 3306 | +	buffer = tr->array_buffer.buffer;  | 
|---|
 | 3307 | +	ring_buffer_nest_start(buffer);  | 
|---|
| 2957 | 3308 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, | 
|---|
| 2958 |  | -					    flags, pc);  | 
|---|
 | 3309 | +					    trace_ctx);  | 
|---|
| 2959 | 3310 |  	if (!event) | 
|---|
| 2960 | 3311 |  		goto out; | 
|---|
| 2961 | 3312 |  	entry = ring_buffer_event_data(event); | 
|---|
| .. | .. | 
|---|
| 2965 | 3316 |  	memcpy(entry->buf, tbuffer, sizeof(u32) * len); | 
|---|
| 2966 | 3317 |  	if (!call_filter_check_discard(call, entry, buffer, event)) { | 
|---|
| 2967 | 3318 |  		__buffer_unlock_commit(buffer, event); | 
|---|
| 2968 |  | -		ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);  | 
|---|
 | 3319 | +		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);  | 
|---|
| 2969 | 3320 |  	} | 
|---|
| 2970 | 3321 |   | 
|---|
| 2971 | 3322 |  out: | 
|---|
 | 3323 | +	ring_buffer_nest_end(buffer);  | 
|---|
 | 3324 | +out_put:  | 
|---|
| 2972 | 3325 |  	put_trace_buf(); | 
|---|
| 2973 | 3326 |   | 
|---|
| 2974 | 3327 |  out_nobuffer: | 
|---|
| .. | .. | 
|---|
| 2981 | 3334 |   | 
|---|
| 2982 | 3335 |  __printf(3, 0) | 
|---|
| 2983 | 3336 |  static int | 
|---|
| 2984 |  | -__trace_array_vprintk(struct ring_buffer *buffer,  | 
|---|
 | 3337 | +__trace_array_vprintk(struct trace_buffer *buffer,  | 
|---|
| 2985 | 3338 |  		      unsigned long ip, const char *fmt, va_list args) | 
|---|
| 2986 | 3339 |  { | 
|---|
| 2987 | 3340 |  	struct trace_event_call *call = &event_print; | 
|---|
| 2988 | 3341 |  	struct ring_buffer_event *event; | 
|---|
| 2989 |  | -	int len = 0, size, pc;  | 
|---|
 | 3342 | +	int len = 0, size;  | 
|---|
| 2990 | 3343 |  	struct print_entry *entry; | 
|---|
| 2991 |  | -	unsigned long flags;  | 
|---|
 | 3344 | +	unsigned int trace_ctx;  | 
|---|
| 2992 | 3345 |  	char *tbuffer; | 
|---|
| 2993 | 3346 |   | 
|---|
| 2994 | 3347 |  	if (tracing_disabled || tracing_selftest_running) | 
|---|
| .. | .. | 
|---|
| 2997 | 3350 |  	/* Don't pollute graph traces with trace_vprintk internals */ | 
|---|
| 2998 | 3351 |  	pause_graph_tracing(); | 
|---|
| 2999 | 3352 |   | 
|---|
| 3000 |  | -	pc = preempt_count();  | 
|---|
 | 3353 | +	trace_ctx = tracing_gen_ctx();  | 
|---|
| 3001 | 3354 |  	preempt_disable_notrace(); | 
|---|
| 3002 | 3355 |   | 
|---|
| 3003 | 3356 |   | 
|---|
| .. | .. | 
|---|
| 3009 | 3362 |   | 
|---|
| 3010 | 3363 |  	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); | 
|---|
| 3011 | 3364 |   | 
|---|
| 3012 |  | -	local_save_flags(flags);  | 
|---|
| 3013 | 3365 |  	size = sizeof(*entry) + len + 1; | 
|---|
 | 3366 | +	ring_buffer_nest_start(buffer);  | 
|---|
| 3014 | 3367 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
|---|
| 3015 |  | -					    flags, pc);  | 
|---|
 | 3368 | +					    trace_ctx);  | 
|---|
| 3016 | 3369 |  	if (!event) | 
|---|
| 3017 | 3370 |  		goto out; | 
|---|
| 3018 | 3371 |  	entry = ring_buffer_event_data(event); | 
|---|
| .. | .. | 
|---|
| 3021 | 3374 |  	memcpy(&entry->buf, tbuffer, len + 1); | 
|---|
| 3022 | 3375 |  	if (!call_filter_check_discard(call, entry, buffer, event)) { | 
|---|
| 3023 | 3376 |  		__buffer_unlock_commit(buffer, event); | 
|---|
| 3024 |  | -		ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);  | 
|---|
 | 3377 | +		ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);  | 
|---|
| 3025 | 3378 |  	} | 
|---|
| 3026 | 3379 |   | 
|---|
| 3027 | 3380 |  out: | 
|---|
 | 3381 | +	ring_buffer_nest_end(buffer);  | 
|---|
| 3028 | 3382 |  	put_trace_buf(); | 
|---|
| 3029 | 3383 |   | 
|---|
| 3030 | 3384 |  out_nobuffer: | 
|---|
| .. | .. | 
|---|
| 3038 | 3392 |  int trace_array_vprintk(struct trace_array *tr, | 
|---|
| 3039 | 3393 |  			unsigned long ip, const char *fmt, va_list args) | 
|---|
| 3040 | 3394 |  { | 
|---|
| 3041 |  | -	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);  | 
|---|
 | 3395 | +	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);  | 
|---|
| 3042 | 3396 |  } | 
|---|
| 3043 | 3397 |   | 
|---|
 | 3398 | +/**  | 
|---|
 | 3399 | + * trace_array_printk - Print a message to a specific instance  | 
|---|
 | 3400 | + * @tr: The instance trace_array descriptor  | 
|---|
 | 3401 | + * @ip: The instruction pointer that this is called from.  | 
|---|
 | 3402 | + * @fmt: The format to print (printf format)  | 
|---|
 | 3403 | + *  | 
|---|
 | 3404 | + * If a subsystem sets up its own instance, they have the right to  | 
|---|
 | 3405 | + * printk strings into their tracing instance buffer using this  | 
|---|
 | 3406 | + * function. Note, this function will not write into the top level  | 
|---|
 | 3407 | + * buffer (use trace_printk() for that), as writing into the top level  | 
|---|
 | 3408 | + * buffer should only have events that can be individually disabled.  | 
|---|
 | 3409 | + * trace_printk() is only used for debugging a kernel, and should not  | 
|---|
 | 3410 | + * be ever encorporated in normal use.  | 
|---|
 | 3411 | + *  | 
|---|
 | 3412 | + * trace_array_printk() can be used, as it will not add noise to the  | 
|---|
 | 3413 | + * top level tracing buffer.  | 
|---|
 | 3414 | + *  | 
|---|
 | 3415 | + * Note, trace_array_init_printk() must be called on @tr before this  | 
|---|
 | 3416 | + * can be used.  | 
|---|
 | 3417 | + */  | 
|---|
| 3044 | 3418 |  __printf(3, 0) | 
|---|
| 3045 | 3419 |  int trace_array_printk(struct trace_array *tr, | 
|---|
| 3046 | 3420 |  		       unsigned long ip, const char *fmt, ...) | 
|---|
| .. | .. | 
|---|
| 3048 | 3422 |  	int ret; | 
|---|
| 3049 | 3423 |  	va_list ap; | 
|---|
| 3050 | 3424 |   | 
|---|
| 3051 |  | -	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))  | 
|---|
| 3052 |  | -		return 0;  | 
|---|
| 3053 |  | -  | 
|---|
| 3054 | 3425 |  	if (!tr) | 
|---|
| 3055 | 3426 |  		return -ENOENT; | 
|---|
 | 3427 | +  | 
|---|
 | 3428 | +	/* This is only allowed for created instances */  | 
|---|
 | 3429 | +	if (tr == &global_trace)  | 
|---|
 | 3430 | +		return 0;  | 
|---|
 | 3431 | +  | 
|---|
 | 3432 | +	if (!(tr->trace_flags & TRACE_ITER_PRINTK))  | 
|---|
 | 3433 | +		return 0;  | 
|---|
| 3056 | 3434 |   | 
|---|
| 3057 | 3435 |  	va_start(ap, fmt); | 
|---|
| 3058 | 3436 |  	ret = trace_array_vprintk(tr, ip, fmt, ap); | 
|---|
| 3059 | 3437 |  	va_end(ap); | 
|---|
| 3060 | 3438 |  	return ret; | 
|---|
| 3061 | 3439 |  } | 
|---|
 | 3440 | +EXPORT_SYMBOL_GPL(trace_array_printk);  | 
|---|
 | 3441 | +  | 
|---|
 | 3442 | +/**  | 
|---|
 | 3443 | + * trace_array_init_printk - Initialize buffers for trace_array_printk()  | 
|---|
 | 3444 | + * @tr: The trace array to initialize the buffers for  | 
|---|
 | 3445 | + *  | 
|---|
 | 3446 | + * As trace_array_printk() only writes into instances, they are OK to  | 
|---|
 | 3447 | + * have in the kernel (unlike trace_printk()). This needs to be called  | 
|---|
 | 3448 | + * before trace_array_printk() can be used on a trace_array.  | 
|---|
 | 3449 | + */  | 
|---|
 | 3450 | +int trace_array_init_printk(struct trace_array *tr)  | 
|---|
 | 3451 | +{  | 
|---|
 | 3452 | +	if (!tr)  | 
|---|
 | 3453 | +		return -ENOENT;  | 
|---|
 | 3454 | +  | 
|---|
 | 3455 | +	/* This is only allowed for created instances */  | 
|---|
 | 3456 | +	if (tr == &global_trace)  | 
|---|
 | 3457 | +		return -EINVAL;  | 
|---|
 | 3458 | +  | 
|---|
 | 3459 | +	return alloc_percpu_trace_buffer();  | 
|---|
 | 3460 | +}  | 
|---|
 | 3461 | +EXPORT_SYMBOL_GPL(trace_array_init_printk);  | 
|---|
| 3062 | 3462 |   | 
|---|
| 3063 | 3463 |  __printf(3, 4) | 
|---|
| 3064 |  | -int trace_array_printk_buf(struct ring_buffer *buffer,  | 
|---|
 | 3464 | +int trace_array_printk_buf(struct trace_buffer *buffer,  | 
|---|
| 3065 | 3465 |  			   unsigned long ip, const char *fmt, ...) | 
|---|
| 3066 | 3466 |  { | 
|---|
| 3067 | 3467 |  	int ret; | 
|---|
| .. | .. | 
|---|
| 3089 | 3489 |   | 
|---|
| 3090 | 3490 |  	iter->idx++; | 
|---|
| 3091 | 3491 |  	if (buf_iter) | 
|---|
| 3092 |  | -		ring_buffer_read(buf_iter, NULL);  | 
|---|
 | 3492 | +		ring_buffer_iter_advance(buf_iter);  | 
|---|
| 3093 | 3493 |  } | 
|---|
| 3094 | 3494 |   | 
|---|
| 3095 | 3495 |  static struct trace_entry * | 
|---|
| .. | .. | 
|---|
| 3099 | 3499 |  	struct ring_buffer_event *event; | 
|---|
| 3100 | 3500 |  	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); | 
|---|
| 3101 | 3501 |   | 
|---|
| 3102 |  | -	if (buf_iter)  | 
|---|
 | 3502 | +	if (buf_iter) {  | 
|---|
| 3103 | 3503 |  		event = ring_buffer_iter_peek(buf_iter, ts); | 
|---|
| 3104 |  | -	else  | 
|---|
| 3105 |  | -		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,  | 
|---|
 | 3504 | +		if (lost_events)  | 
|---|
 | 3505 | +			*lost_events = ring_buffer_iter_dropped(buf_iter) ?  | 
|---|
 | 3506 | +				(unsigned long)-1 : 0;  | 
|---|
 | 3507 | +	} else {  | 
|---|
 | 3508 | +		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,  | 
|---|
| 3106 | 3509 |  					 lost_events); | 
|---|
 | 3510 | +	}  | 
|---|
| 3107 | 3511 |   | 
|---|
| 3108 | 3512 |  	if (event) { | 
|---|
| 3109 | 3513 |  		iter->ent_size = ring_buffer_event_length(event); | 
|---|
| .. | .. | 
|---|
| 3117 | 3521 |  __find_next_entry(struct trace_iterator *iter, int *ent_cpu, | 
|---|
| 3118 | 3522 |  		  unsigned long *missing_events, u64 *ent_ts) | 
|---|
| 3119 | 3523 |  { | 
|---|
| 3120 |  | -	struct ring_buffer *buffer = iter->trace_buffer->buffer;  | 
|---|
 | 3524 | +	struct trace_buffer *buffer = iter->array_buffer->buffer;  | 
|---|
| 3121 | 3525 |  	struct trace_entry *ent, *next = NULL; | 
|---|
| 3122 | 3526 |  	unsigned long lost_events = 0, next_lost = 0; | 
|---|
| 3123 | 3527 |  	int cpu_file = iter->cpu_file; | 
|---|
| .. | .. | 
|---|
| 3173 | 3577 |  	return next; | 
|---|
| 3174 | 3578 |  } | 
|---|
| 3175 | 3579 |   | 
|---|
 | 3580 | +#define STATIC_TEMP_BUF_SIZE	128  | 
|---|
 | 3581 | +static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);  | 
|---|
 | 3582 | +  | 
|---|
| 3176 | 3583 |  /* Find the next real entry, without updating the iterator itself */ | 
|---|
| 3177 | 3584 |  struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 
|---|
| 3178 | 3585 |  					  int *ent_cpu, u64 *ent_ts) | 
|---|
| 3179 | 3586 |  { | 
|---|
| 3180 |  | -	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);  | 
|---|
 | 3587 | +	/* __find_next_entry will reset ent_size */  | 
|---|
 | 3588 | +	int ent_size = iter->ent_size;  | 
|---|
 | 3589 | +	struct trace_entry *entry;  | 
|---|
 | 3590 | +  | 
|---|
 | 3591 | +	/*  | 
|---|
 | 3592 | +	 * If called from ftrace_dump(), then the iter->temp buffer  | 
|---|
 | 3593 | +	 * will be the static_temp_buf and not created from kmalloc.  | 
|---|
 | 3594 | +	 * If the entry size is greater than the buffer, we can  | 
|---|
 | 3595 | +	 * not save it. Just return NULL in that case. This is only  | 
|---|
 | 3596 | +	 * used to add markers when two consecutive events' time  | 
|---|
 | 3597 | +	 * stamps have a large delta. See trace_print_lat_context()  | 
|---|
 | 3598 | +	 */  | 
|---|
 | 3599 | +	if (iter->temp == static_temp_buf &&  | 
|---|
 | 3600 | +	    STATIC_TEMP_BUF_SIZE < ent_size)  | 
|---|
 | 3601 | +		return NULL;  | 
|---|
 | 3602 | +  | 
|---|
 | 3603 | +	/*  | 
|---|
 | 3604 | +	 * The __find_next_entry() may call peek_next_entry(), which may  | 
|---|
 | 3605 | +	 * call ring_buffer_peek() that may make the contents of iter->ent  | 
|---|
 | 3606 | +	 * undefined. Need to copy iter->ent now.  | 
|---|
 | 3607 | +	 */  | 
|---|
 | 3608 | +	if (iter->ent && iter->ent != iter->temp) {  | 
|---|
 | 3609 | +		if ((!iter->temp || iter->temp_size < iter->ent_size) &&  | 
|---|
 | 3610 | +		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {  | 
|---|
 | 3611 | +			void *temp;  | 
|---|
 | 3612 | +			temp = kmalloc(iter->ent_size, GFP_KERNEL);  | 
|---|
 | 3613 | +			if (!temp)  | 
|---|
 | 3614 | +				return NULL;  | 
|---|
 | 3615 | +			kfree(iter->temp);  | 
|---|
 | 3616 | +			iter->temp = temp;  | 
|---|
 | 3617 | +			iter->temp_size = iter->ent_size;  | 
|---|
 | 3618 | +		}  | 
|---|
 | 3619 | +		memcpy(iter->temp, iter->ent, iter->ent_size);  | 
|---|
 | 3620 | +		iter->ent = iter->temp;  | 
|---|
 | 3621 | +	}  | 
|---|
 | 3622 | +	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);  | 
|---|
 | 3623 | +	/* Put back the original ent_size */  | 
|---|
 | 3624 | +	iter->ent_size = ent_size;  | 
|---|
 | 3625 | +  | 
|---|
 | 3626 | +	return entry;  | 
|---|
| 3181 | 3627 |  } | 
|---|
| 3182 | 3628 |   | 
|---|
| 3183 | 3629 |  /* Find the next real entry, and increment the iterator to the next entry */ | 
|---|
| .. | .. | 
|---|
| 3194 | 3640 |   | 
|---|
| 3195 | 3641 |  static void trace_consume(struct trace_iterator *iter) | 
|---|
| 3196 | 3642 |  { | 
|---|
| 3197 |  | -	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,  | 
|---|
 | 3643 | +	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,  | 
|---|
| 3198 | 3644 |  			    &iter->lost_events); | 
|---|
| 3199 | 3645 |  } | 
|---|
| 3200 | 3646 |   | 
|---|
| .. | .. | 
|---|
| 3227 | 3673 |   | 
|---|
| 3228 | 3674 |  void tracing_iter_reset(struct trace_iterator *iter, int cpu) | 
|---|
| 3229 | 3675 |  { | 
|---|
| 3230 |  | -	struct ring_buffer_event *event;  | 
|---|
| 3231 | 3676 |  	struct ring_buffer_iter *buf_iter; | 
|---|
| 3232 | 3677 |  	unsigned long entries = 0; | 
|---|
| 3233 | 3678 |  	u64 ts; | 
|---|
| 3234 | 3679 |   | 
|---|
| 3235 |  | -	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;  | 
|---|
 | 3680 | +	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;  | 
|---|
| 3236 | 3681 |   | 
|---|
| 3237 | 3682 |  	buf_iter = trace_buffer_iter(iter, cpu); | 
|---|
| 3238 | 3683 |  	if (!buf_iter) | 
|---|
| .. | .. | 
|---|
| 3245 | 3690 |  	 * that a reset never took place on a cpu. This is evident | 
|---|
| 3246 | 3691 |  	 * by the timestamp being before the start of the buffer. | 
|---|
| 3247 | 3692 |  	 */ | 
|---|
| 3248 |  | -	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {  | 
|---|
| 3249 |  | -		if (ts >= iter->trace_buffer->time_start)  | 
|---|
 | 3693 | +	while (ring_buffer_iter_peek(buf_iter, &ts)) {  | 
|---|
 | 3694 | +		if (ts >= iter->array_buffer->time_start)  | 
|---|
| 3250 | 3695 |  			break; | 
|---|
| 3251 | 3696 |  		entries++; | 
|---|
| 3252 |  | -		ring_buffer_read(buf_iter, NULL);  | 
|---|
 | 3697 | +		ring_buffer_iter_advance(buf_iter);  | 
|---|
| 3253 | 3698 |  	} | 
|---|
| 3254 | 3699 |   | 
|---|
| 3255 |  | -	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;  | 
|---|
 | 3700 | +	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;  | 
|---|
| 3256 | 3701 |  } | 
|---|
| 3257 | 3702 |   | 
|---|
| 3258 | 3703 |  /* | 
|---|
| .. | .. | 
|---|
| 3331 | 3776 |  } | 
|---|
| 3332 | 3777 |   | 
|---|
| 3333 | 3778 |  static void | 
|---|
| 3334 |  | -get_total_entries(struct trace_buffer *buf,  | 
|---|
| 3335 |  | -		  unsigned long *total, unsigned long *entries)  | 
|---|
 | 3779 | +get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,  | 
|---|
 | 3780 | +		      unsigned long *entries, int cpu)  | 
|---|
| 3336 | 3781 |  { | 
|---|
| 3337 | 3782 |  	unsigned long count; | 
|---|
 | 3783 | +  | 
|---|
 | 3784 | +	count = ring_buffer_entries_cpu(buf->buffer, cpu);  | 
|---|
 | 3785 | +	/*  | 
|---|
 | 3786 | +	 * If this buffer has skipped entries, then we hold all  | 
|---|
 | 3787 | +	 * entries for the trace and we need to ignore the  | 
|---|
 | 3788 | +	 * ones before the time stamp.  | 
|---|
 | 3789 | +	 */  | 
|---|
 | 3790 | +	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {  | 
|---|
 | 3791 | +		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;  | 
|---|
 | 3792 | +		/* total is the same as the entries */  | 
|---|
 | 3793 | +		*total = count;  | 
|---|
 | 3794 | +	} else  | 
|---|
 | 3795 | +		*total = count +  | 
|---|
 | 3796 | +			ring_buffer_overrun_cpu(buf->buffer, cpu);  | 
|---|
 | 3797 | +	*entries = count;  | 
|---|
 | 3798 | +}  | 
|---|
 | 3799 | +  | 
|---|
 | 3800 | +static void  | 
|---|
 | 3801 | +get_total_entries(struct array_buffer *buf,  | 
|---|
 | 3802 | +		  unsigned long *total, unsigned long *entries)  | 
|---|
 | 3803 | +{  | 
|---|
 | 3804 | +	unsigned long t, e;  | 
|---|
| 3338 | 3805 |  	int cpu; | 
|---|
| 3339 | 3806 |   | 
|---|
| 3340 | 3807 |  	*total = 0; | 
|---|
| 3341 | 3808 |  	*entries = 0; | 
|---|
| 3342 | 3809 |   | 
|---|
| 3343 | 3810 |  	for_each_tracing_cpu(cpu) { | 
|---|
| 3344 |  | -		count = ring_buffer_entries_cpu(buf->buffer, cpu);  | 
|---|
| 3345 |  | -		/*  | 
|---|
| 3346 |  | -		 * If this buffer has skipped entries, then we hold all  | 
|---|
| 3347 |  | -		 * entries for the trace and we need to ignore the  | 
|---|
| 3348 |  | -		 * ones before the time stamp.  | 
|---|
| 3349 |  | -		 */  | 
|---|
| 3350 |  | -		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {  | 
|---|
| 3351 |  | -			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;  | 
|---|
| 3352 |  | -			/* total is the same as the entries */  | 
|---|
| 3353 |  | -			*total += count;  | 
|---|
| 3354 |  | -		} else  | 
|---|
| 3355 |  | -			*total += count +  | 
|---|
| 3356 |  | -				ring_buffer_overrun_cpu(buf->buffer, cpu);  | 
|---|
| 3357 |  | -		*entries += count;  | 
|---|
 | 3811 | +		get_total_entries_cpu(buf, &t, &e, cpu);  | 
|---|
 | 3812 | +		*total += t;  | 
|---|
 | 3813 | +		*entries += e;  | 
|---|
| 3358 | 3814 |  	} | 
|---|
 | 3815 | +}  | 
|---|
 | 3816 | +  | 
|---|
 | 3817 | +unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)  | 
|---|
 | 3818 | +{  | 
|---|
 | 3819 | +	unsigned long total, entries;  | 
|---|
 | 3820 | +  | 
|---|
 | 3821 | +	if (!tr)  | 
|---|
 | 3822 | +		tr = &global_trace;  | 
|---|
 | 3823 | +  | 
|---|
 | 3824 | +	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);  | 
|---|
 | 3825 | +  | 
|---|
 | 3826 | +	return entries;  | 
|---|
 | 3827 | +}  | 
|---|
 | 3828 | +  | 
|---|
 | 3829 | +unsigned long trace_total_entries(struct trace_array *tr)  | 
|---|
 | 3830 | +{  | 
|---|
 | 3831 | +	unsigned long total, entries;  | 
|---|
 | 3832 | +  | 
|---|
 | 3833 | +	if (!tr)  | 
|---|
 | 3834 | +		tr = &global_trace;  | 
|---|
 | 3835 | +  | 
|---|
 | 3836 | +	get_total_entries(&tr->array_buffer, &total, &entries);  | 
|---|
 | 3837 | +  | 
|---|
 | 3838 | +	return entries;  | 
|---|
| 3359 | 3839 |  } | 
|---|
| 3360 | 3840 |   | 
|---|
| 3361 | 3841 |  static void print_lat_help_header(struct seq_file *m) | 
|---|
| 3362 | 3842 |  { | 
|---|
| 3363 |  | -	seq_puts(m, "#                  _------=> CPU#            \n"  | 
|---|
| 3364 |  | -		    "#                 / _-----=> irqs-off        \n"  | 
|---|
| 3365 |  | -		    "#                | / _----=> need-resched    \n"  | 
|---|
| 3366 |  | -		    "#                || / _---=> hardirq/softirq \n"  | 
|---|
| 3367 |  | -		    "#                ||| / _--=> preempt-depth   \n"  | 
|---|
| 3368 |  | -		    "#                |||| /     delay            \n"  | 
|---|
| 3369 |  | -		    "#  cmd     pid   ||||| time  |   caller      \n"  | 
|---|
| 3370 |  | -		    "#     \\   /      |||||  \\    |   /         \n");  | 
|---|
 | 3843 | +	seq_puts(m, "#                    _--------=> CPU#            \n"  | 
|---|
 | 3844 | +		    "#                   / _-------=> irqs-off        \n"  | 
|---|
 | 3845 | +		    "#                  | / _------=> need-resched    \n"  | 
|---|
 | 3846 | +		    "#                  || / _-----=> need-resched-lazy\n"  | 
|---|
 | 3847 | +		    "#                  ||| / _----=> hardirq/softirq \n"  | 
|---|
 | 3848 | +		    "#                  |||| / _---=> preempt-depth   \n"  | 
|---|
 | 3849 | +		    "#                  ||||| / _--=> preempt-lazy-depth\n"  | 
|---|
 | 3850 | +		    "#                  |||||| / _-=> migrate-disable \n"  | 
|---|
 | 3851 | +		    "#                  ||||||| /     delay           \n"  | 
|---|
 | 3852 | +		    "#  cmd     pid     |||||||| time  |   caller     \n"  | 
|---|
 | 3853 | +		    "#     \\   /        ||||||||  \\    |    /       \n");  | 
|---|
| 3371 | 3854 |  } | 
|---|
| 3372 | 3855 |   | 
|---|
| 3373 |  | -static void print_event_info(struct trace_buffer *buf, struct seq_file *m)  | 
|---|
 | 3856 | +static void print_event_info(struct array_buffer *buf, struct seq_file *m)  | 
|---|
| 3374 | 3857 |  { | 
|---|
| 3375 | 3858 |  	unsigned long total; | 
|---|
| 3376 | 3859 |  	unsigned long entries; | 
|---|
| .. | .. | 
|---|
| 3381 | 3864 |  	seq_puts(m, "#\n"); | 
|---|
| 3382 | 3865 |  } | 
|---|
| 3383 | 3866 |   | 
|---|
| 3384 |  | -static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,  | 
|---|
 | 3867 | +static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,  | 
|---|
| 3385 | 3868 |  				   unsigned int flags) | 
|---|
| 3386 | 3869 |  { | 
|---|
| 3387 | 3870 |  	bool tgid = flags & TRACE_ITER_RECORD_TGID; | 
|---|
| 3388 | 3871 |   | 
|---|
| 3389 | 3872 |  	print_event_info(buf, m); | 
|---|
| 3390 | 3873 |   | 
|---|
| 3391 |  | -	seq_printf(m, "#           TASK-PID   %s  CPU#   TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");  | 
|---|
| 3392 |  | -	seq_printf(m, "#              | |     %s    |       |         |\n",	 tgid ? "  |      " : "");  | 
|---|
 | 3874 | +	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");  | 
|---|
 | 3875 | +	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");  | 
|---|
| 3393 | 3876 |  } | 
|---|
| 3394 | 3877 |   | 
|---|
| 3395 |  | -static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,  | 
|---|
 | 3878 | +static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,  | 
|---|
| 3396 | 3879 |  				       unsigned int flags) | 
|---|
| 3397 | 3880 |  { | 
|---|
| 3398 | 3881 |  	bool tgid = flags & TRACE_ITER_RECORD_TGID; | 
|---|
| 3399 |  | -	const char tgid_space[] = "          ";  | 
|---|
| 3400 |  | -	const char space[] = "  ";  | 
|---|
 | 3882 | +	const char *space = "            ";  | 
|---|
 | 3883 | +	int prec = tgid ? 12 : 2;  | 
|---|
| 3401 | 3884 |   | 
|---|
| 3402 | 3885 |  	print_event_info(buf, m); | 
|---|
| 3403 | 3886 |   | 
|---|
| 3404 |  | -	seq_printf(m, "#                          %s  _-----=> irqs-off\n",  | 
|---|
| 3405 |  | -		   tgid ? tgid_space : space);  | 
|---|
| 3406 |  | -	seq_printf(m, "#                          %s / _----=> need-resched\n",  | 
|---|
| 3407 |  | -		   tgid ? tgid_space : space);  | 
|---|
| 3408 |  | -	seq_printf(m, "#                          %s| / _---=> hardirq/softirq\n",  | 
|---|
| 3409 |  | -		   tgid ? tgid_space : space);  | 
|---|
| 3410 |  | -	seq_printf(m, "#                          %s|| / _--=> preempt-depth\n",  | 
|---|
| 3411 |  | -		   tgid ? tgid_space : space);  | 
|---|
| 3412 |  | -	seq_printf(m, "#                          %s||| /     delay\n",  | 
|---|
| 3413 |  | -		   tgid ? tgid_space : space);  | 
|---|
| 3414 |  | -	seq_printf(m, "#           TASK-PID %sCPU#  ||||    TIMESTAMP  FUNCTION\n",  | 
|---|
| 3415 |  | -		   tgid ? "   TGID   " : space);  | 
|---|
| 3416 |  | -	seq_printf(m, "#              | |   %s  |   ||||       |         |\n",  | 
|---|
| 3417 |  | -		   tgid ? "     |    " : space);  | 
|---|
 | 3887 | +	seq_printf(m, "#                            %.*s  _-------=> irqs-off\n", prec, space);  | 
|---|
 | 3888 | +	seq_printf(m, "#                            %.*s / _------=> need-resched\n", prec, space);  | 
|---|
 | 3889 | +	seq_printf(m, "#                            %.*s| / _-----=> need-resched-lazy\n", prec, space);  | 
|---|
 | 3890 | +	seq_printf(m, "#                            %.*s|| / _----=> hardirq/softirq\n", prec, space);  | 
|---|
 | 3891 | +	seq_printf(m, "#                            %.*s||| / _---=> preempt-depth\n", prec, space);  | 
|---|
 | 3892 | +	seq_printf(m, "#                            %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);  | 
|---|
 | 3893 | +	seq_printf(m, "#                            %.*s||||| / _-=> migrate-disable\n", prec, space);  | 
|---|
 | 3894 | +	seq_printf(m, "#                            %.*s|||||| /     delay\n", prec, space);  | 
|---|
 | 3895 | +	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");  | 
|---|
 | 3896 | +	seq_printf(m, "#              | |    %.*s   |   |||||||      |         |\n", prec, "       |    ");  | 
|---|
| 3418 | 3897 |  } | 
|---|
| 3419 | 3898 |   | 
|---|
| 3420 | 3899 |  void | 
|---|
| 3421 | 3900 |  print_trace_header(struct seq_file *m, struct trace_iterator *iter) | 
|---|
| 3422 | 3901 |  { | 
|---|
| 3423 | 3902 |  	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); | 
|---|
| 3424 |  | -	struct trace_buffer *buf = iter->trace_buffer;  | 
|---|
 | 3903 | +	struct array_buffer *buf = iter->array_buffer;  | 
|---|
| 3425 | 3904 |  	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); | 
|---|
| 3426 | 3905 |  	struct tracer *type = iter->trace; | 
|---|
| 3427 | 3906 |  	unsigned long entries; | 
|---|
| .. | .. | 
|---|
| 3448 | 3927 |  		   "desktop", | 
|---|
| 3449 | 3928 |  #elif defined(CONFIG_PREEMPT) | 
|---|
| 3450 | 3929 |  		   "preempt", | 
|---|
 | 3930 | +#elif defined(CONFIG_PREEMPT_RT)  | 
|---|
 | 3931 | +		   "preempt_rt",  | 
|---|
| 3451 | 3932 |  #else | 
|---|
| 3452 | 3933 |  		   "unknown", | 
|---|
| 3453 | 3934 |  #endif | 
|---|
| .. | .. | 
|---|
| 3494 | 3975 |  	    cpumask_test_cpu(iter->cpu, iter->started)) | 
|---|
| 3495 | 3976 |  		return; | 
|---|
| 3496 | 3977 |   | 
|---|
| 3497 |  | -	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)  | 
|---|
 | 3978 | +	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)  | 
|---|
| 3498 | 3979 |  		return; | 
|---|
| 3499 | 3980 |   | 
|---|
| 3500 | 3981 |  	if (cpumask_available(iter->started)) | 
|---|
| .. | .. | 
|---|
| 3628 | 4109 |  			if (!ring_buffer_iter_empty(buf_iter)) | 
|---|
| 3629 | 4110 |  				return 0; | 
|---|
| 3630 | 4111 |  		} else { | 
|---|
| 3631 |  | -			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))  | 
|---|
 | 4112 | +			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))  | 
|---|
| 3632 | 4113 |  				return 0; | 
|---|
| 3633 | 4114 |  		} | 
|---|
| 3634 | 4115 |  		return 1; | 
|---|
| .. | .. | 
|---|
| 3640 | 4121 |  			if (!ring_buffer_iter_empty(buf_iter)) | 
|---|
| 3641 | 4122 |  				return 0; | 
|---|
| 3642 | 4123 |  		} else { | 
|---|
| 3643 |  | -			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))  | 
|---|
 | 4124 | +			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))  | 
|---|
| 3644 | 4125 |  				return 0; | 
|---|
| 3645 | 4126 |  		} | 
|---|
| 3646 | 4127 |  	} | 
|---|
| .. | .. | 
|---|
| 3656 | 4137 |  	enum print_line_t ret; | 
|---|
| 3657 | 4138 |   | 
|---|
| 3658 | 4139 |  	if (iter->lost_events) { | 
|---|
| 3659 |  | -		trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",  | 
|---|
| 3660 |  | -				 iter->cpu, iter->lost_events);  | 
|---|
 | 4140 | +		if (iter->lost_events == (unsigned long)-1)  | 
|---|
 | 4141 | +			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",  | 
|---|
 | 4142 | +					 iter->cpu);  | 
|---|
 | 4143 | +		else  | 
|---|
 | 4144 | +			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",  | 
|---|
 | 4145 | +					 iter->cpu, iter->lost_events);  | 
|---|
| 3661 | 4146 |  		if (trace_seq_has_overflowed(&iter->seq)) | 
|---|
| 3662 | 4147 |  			return TRACE_TYPE_PARTIAL_LINE; | 
|---|
| 3663 | 4148 |  	} | 
|---|
| .. | .. | 
|---|
| 3730 | 4215 |  	} else { | 
|---|
| 3731 | 4216 |  		if (!(trace_flags & TRACE_ITER_VERBOSE)) { | 
|---|
| 3732 | 4217 |  			if (trace_flags & TRACE_ITER_IRQ_INFO) | 
|---|
| 3733 |  | -				print_func_help_header_irq(iter->trace_buffer,  | 
|---|
 | 4218 | +				print_func_help_header_irq(iter->array_buffer,  | 
|---|
| 3734 | 4219 |  							   m, trace_flags); | 
|---|
| 3735 | 4220 |  			else | 
|---|
| 3736 |  | -				print_func_help_header(iter->trace_buffer, m,  | 
|---|
 | 4221 | +				print_func_help_header(iter->array_buffer, m,  | 
|---|
| 3737 | 4222 |  						       trace_flags); | 
|---|
| 3738 | 4223 |  		} | 
|---|
| 3739 | 4224 |  	} | 
|---|
| .. | .. | 
|---|
| 3873 | 4358 |  		goto release; | 
|---|
| 3874 | 4359 |   | 
|---|
| 3875 | 4360 |  	/* | 
|---|
 | 4361 | +	 * trace_find_next_entry() may need to save off iter->ent.  | 
|---|
 | 4362 | +	 * It will place it into the iter->temp buffer. As most  | 
|---|
 | 4363 | +	 * events are less than 128, allocate a buffer of that size.  | 
|---|
 | 4364 | +	 * If one is greater, then trace_find_next_entry() will  | 
|---|
 | 4365 | +	 * allocate a new buffer to adjust for the bigger iter->ent.  | 
|---|
 | 4366 | +	 * It's not critical if it fails to get allocated here.  | 
|---|
 | 4367 | +	 */  | 
|---|
 | 4368 | +	iter->temp = kmalloc(128, GFP_KERNEL);  | 
|---|
 | 4369 | +	if (iter->temp)  | 
|---|
 | 4370 | +		iter->temp_size = 128;  | 
|---|
 | 4371 | +  | 
|---|
 | 4372 | +	/*  | 
|---|
| 3876 | 4373 |  	 * We make a copy of the current tracer to avoid concurrent | 
|---|
| 3877 | 4374 |  	 * changes on it while we are reading. | 
|---|
| 3878 | 4375 |  	 */ | 
|---|
| .. | .. | 
|---|
| 3891 | 4388 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 3892 | 4389 |  	/* Currently only the top directory has a snapshot */ | 
|---|
| 3893 | 4390 |  	if (tr->current_trace->print_max || snapshot) | 
|---|
| 3894 |  | -		iter->trace_buffer = &tr->max_buffer;  | 
|---|
 | 4391 | +		iter->array_buffer = &tr->max_buffer;  | 
|---|
| 3895 | 4392 |  	else | 
|---|
| 3896 | 4393 |  #endif | 
|---|
| 3897 |  | -		iter->trace_buffer = &tr->trace_buffer;  | 
|---|
 | 4394 | +		iter->array_buffer = &tr->array_buffer;  | 
|---|
| 3898 | 4395 |  	iter->snapshot = snapshot; | 
|---|
| 3899 | 4396 |  	iter->pos = -1; | 
|---|
| 3900 | 4397 |  	iter->cpu_file = tracing_get_cpu(inode); | 
|---|
| 3901 | 4398 |  	mutex_init(&iter->mutex); | 
|---|
| 3902 | 4399 |   | 
|---|
| 3903 | 4400 |  	/* Notify the tracer early; before we stop tracing. */ | 
|---|
| 3904 |  | -	if (iter->trace && iter->trace->open)  | 
|---|
 | 4401 | +	if (iter->trace->open)  | 
|---|
| 3905 | 4402 |  		iter->trace->open(iter); | 
|---|
| 3906 | 4403 |   | 
|---|
| 3907 | 4404 |  	/* Annotate start of buffers if we had overruns */ | 
|---|
| 3908 |  | -	if (ring_buffer_overruns(iter->trace_buffer->buffer))  | 
|---|
 | 4405 | +	if (ring_buffer_overruns(iter->array_buffer->buffer))  | 
|---|
| 3909 | 4406 |  		iter->iter_flags |= TRACE_FILE_ANNOTATE; | 
|---|
| 3910 | 4407 |   | 
|---|
| 3911 | 4408 |  	/* Output in nanoseconds only if we are using a clock in nanoseconds. */ | 
|---|
| 3912 | 4409 |  	if (trace_clocks[tr->clock_id].in_ns) | 
|---|
| 3913 | 4410 |  		iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 
|---|
| 3914 | 4411 |   | 
|---|
| 3915 |  | -	/* stop the trace while dumping if we are not opening "snapshot" */  | 
|---|
| 3916 |  | -	if (!iter->snapshot)  | 
|---|
 | 4412 | +	/*  | 
|---|
 | 4413 | +	 * If pause-on-trace is enabled, then stop the trace while  | 
|---|
 | 4414 | +	 * dumping, unless this is the "snapshot" file  | 
|---|
 | 4415 | +	 */  | 
|---|
 | 4416 | +	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))  | 
|---|
| 3917 | 4417 |  		tracing_stop_tr(tr); | 
|---|
| 3918 | 4418 |   | 
|---|
| 3919 | 4419 |  	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { | 
|---|
| 3920 | 4420 |  		for_each_tracing_cpu(cpu) { | 
|---|
| 3921 | 4421 |  			iter->buffer_iter[cpu] = | 
|---|
| 3922 |  | -				ring_buffer_read_prepare(iter->trace_buffer->buffer,  | 
|---|
 | 4422 | +				ring_buffer_read_prepare(iter->array_buffer->buffer,  | 
|---|
| 3923 | 4423 |  							 cpu, GFP_KERNEL); | 
|---|
| 3924 | 4424 |  		} | 
|---|
| 3925 | 4425 |  		ring_buffer_read_prepare_sync(); | 
|---|
| .. | .. | 
|---|
| 3930 | 4430 |  	} else { | 
|---|
| 3931 | 4431 |  		cpu = iter->cpu_file; | 
|---|
| 3932 | 4432 |  		iter->buffer_iter[cpu] = | 
|---|
| 3933 |  | -			ring_buffer_read_prepare(iter->trace_buffer->buffer,  | 
|---|
 | 4433 | +			ring_buffer_read_prepare(iter->array_buffer->buffer,  | 
|---|
| 3934 | 4434 |  						 cpu, GFP_KERNEL); | 
|---|
| 3935 | 4435 |  		ring_buffer_read_prepare_sync(); | 
|---|
| 3936 | 4436 |  		ring_buffer_read_start(iter->buffer_iter[cpu]); | 
|---|
| .. | .. | 
|---|
| 3944 | 4444 |   fail: | 
|---|
| 3945 | 4445 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 3946 | 4446 |  	kfree(iter->trace); | 
|---|
 | 4447 | +	kfree(iter->temp);  | 
|---|
| 3947 | 4448 |  	kfree(iter->buffer_iter); | 
|---|
| 3948 | 4449 |  release: | 
|---|
| 3949 | 4450 |  	seq_release_private(inode, file); | 
|---|
| .. | .. | 
|---|
| 3952 | 4453 |   | 
|---|
| 3953 | 4454 |  int tracing_open_generic(struct inode *inode, struct file *filp) | 
|---|
| 3954 | 4455 |  { | 
|---|
| 3955 |  | -	if (tracing_disabled)  | 
|---|
| 3956 |  | -		return -ENODEV;  | 
|---|
 | 4456 | +	int ret;  | 
|---|
 | 4457 | +  | 
|---|
 | 4458 | +	ret = tracing_check_open_get_tr(NULL);  | 
|---|
 | 4459 | +	if (ret)  | 
|---|
 | 4460 | +		return ret;  | 
|---|
| 3957 | 4461 |   | 
|---|
| 3958 | 4462 |  	filp->private_data = inode->i_private; | 
|---|
| 3959 | 4463 |  	return 0; | 
|---|
| .. | .. | 
|---|
| 3968 | 4472 |   * Open and update trace_array ref count. | 
|---|
| 3969 | 4473 |   * Must have the current trace_array passed to it. | 
|---|
| 3970 | 4474 |   */ | 
|---|
| 3971 |  | -static int tracing_open_generic_tr(struct inode *inode, struct file *filp)  | 
|---|
 | 4475 | +int tracing_open_generic_tr(struct inode *inode, struct file *filp)  | 
|---|
| 3972 | 4476 |  { | 
|---|
| 3973 | 4477 |  	struct trace_array *tr = inode->i_private; | 
|---|
 | 4478 | +	int ret;  | 
|---|
| 3974 | 4479 |   | 
|---|
| 3975 |  | -	if (tracing_disabled)  | 
|---|
| 3976 |  | -		return -ENODEV;  | 
|---|
| 3977 |  | -  | 
|---|
| 3978 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 3979 |  | -		return -ENODEV;  | 
|---|
 | 4480 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 4481 | +	if (ret)  | 
|---|
 | 4482 | +		return ret;  | 
|---|
| 3980 | 4483 |   | 
|---|
| 3981 | 4484 |  	filp->private_data = inode->i_private; | 
|---|
| 3982 | 4485 |   | 
|---|
| .. | .. | 
|---|
| 4007 | 4510 |  	if (iter->trace && iter->trace->close) | 
|---|
| 4008 | 4511 |  		iter->trace->close(iter); | 
|---|
| 4009 | 4512 |   | 
|---|
| 4010 |  | -	if (!iter->snapshot)  | 
|---|
 | 4513 | +	if (!iter->snapshot && tr->stop_count)  | 
|---|
| 4011 | 4514 |  		/* reenable tracing if it was previously enabled */ | 
|---|
| 4012 | 4515 |  		tracing_start_tr(tr); | 
|---|
| 4013 | 4516 |   | 
|---|
| .. | .. | 
|---|
| 4017 | 4520 |   | 
|---|
| 4018 | 4521 |  	mutex_destroy(&iter->mutex); | 
|---|
| 4019 | 4522 |  	free_cpumask_var(iter->started); | 
|---|
 | 4523 | +	kfree(iter->temp);  | 
|---|
| 4020 | 4524 |  	kfree(iter->trace); | 
|---|
| 4021 | 4525 |  	kfree(iter->buffer_iter); | 
|---|
| 4022 | 4526 |  	seq_release_private(inode, file); | 
|---|
| .. | .. | 
|---|
| 4045 | 4549 |  { | 
|---|
| 4046 | 4550 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 4047 | 4551 |  	struct trace_iterator *iter; | 
|---|
| 4048 |  | -	int ret = 0;  | 
|---|
 | 4552 | +	int ret;  | 
|---|
| 4049 | 4553 |   | 
|---|
| 4050 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 4051 |  | -		return -ENODEV;  | 
|---|
 | 4554 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 4555 | +	if (ret)  | 
|---|
 | 4556 | +		return ret;  | 
|---|
| 4052 | 4557 |   | 
|---|
| 4053 | 4558 |  	/* If this file was open for write, then erase contents */ | 
|---|
| 4054 | 4559 |  	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { | 
|---|
| 4055 | 4560 |  		int cpu = tracing_get_cpu(inode); | 
|---|
| 4056 |  | -		struct trace_buffer *trace_buf = &tr->trace_buffer;  | 
|---|
 | 4561 | +		struct array_buffer *trace_buf = &tr->array_buffer;  | 
|---|
| 4057 | 4562 |   | 
|---|
| 4058 | 4563 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 4059 | 4564 |  		if (tr->current_trace->print_max) | 
|---|
| .. | .. | 
|---|
| 4063 | 4568 |  		if (cpu == RING_BUFFER_ALL_CPUS) | 
|---|
| 4064 | 4569 |  			tracing_reset_online_cpus(trace_buf); | 
|---|
| 4065 | 4570 |  		else | 
|---|
| 4066 |  | -			tracing_reset(trace_buf, cpu);  | 
|---|
 | 4571 | +			tracing_reset_cpu(trace_buf, cpu);  | 
|---|
| 4067 | 4572 |  	} | 
|---|
| 4068 | 4573 |   | 
|---|
| 4069 | 4574 |  	if (file->f_mode & FMODE_READ) { | 
|---|
| .. | .. | 
|---|
| 4164 | 4669 |  	struct seq_file *m; | 
|---|
| 4165 | 4670 |  	int ret; | 
|---|
| 4166 | 4671 |   | 
|---|
| 4167 |  | -	if (tracing_disabled)  | 
|---|
| 4168 |  | -		return -ENODEV;  | 
|---|
| 4169 |  | -  | 
|---|
| 4170 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 4171 |  | -		return -ENODEV;  | 
|---|
 | 4672 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 4673 | +	if (ret)  | 
|---|
 | 4674 | +		return ret;  | 
|---|
| 4172 | 4675 |   | 
|---|
| 4173 | 4676 |  	ret = seq_open(file, &show_traces_seq_ops); | 
|---|
| 4174 | 4677 |  	if (ret) { | 
|---|
| .. | .. | 
|---|
| 4252 | 4755 |  	return count; | 
|---|
| 4253 | 4756 |  } | 
|---|
| 4254 | 4757 |   | 
|---|
| 4255 |  | -static ssize_t  | 
|---|
| 4256 |  | -tracing_cpumask_write(struct file *filp, const char __user *ubuf,  | 
|---|
| 4257 |  | -		      size_t count, loff_t *ppos)  | 
|---|
 | 4758 | +int tracing_set_cpumask(struct trace_array *tr,  | 
|---|
 | 4759 | +			cpumask_var_t tracing_cpumask_new)  | 
|---|
| 4258 | 4760 |  { | 
|---|
| 4259 |  | -	struct trace_array *tr = file_inode(filp)->i_private;  | 
|---|
| 4260 |  | -	cpumask_var_t tracing_cpumask_new;  | 
|---|
| 4261 |  | -	int err, cpu;  | 
|---|
 | 4761 | +	int cpu;  | 
|---|
| 4262 | 4762 |   | 
|---|
| 4263 |  | -	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))  | 
|---|
| 4264 |  | -		return -ENOMEM;  | 
|---|
| 4265 |  | -  | 
|---|
| 4266 |  | -	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);  | 
|---|
| 4267 |  | -	if (err)  | 
|---|
| 4268 |  | -		goto err_unlock;  | 
|---|
 | 4763 | +	if (!tr)  | 
|---|
 | 4764 | +		return -EINVAL;  | 
|---|
| 4269 | 4765 |   | 
|---|
| 4270 | 4766 |  	local_irq_disable(); | 
|---|
| 4271 | 4767 |  	arch_spin_lock(&tr->max_lock); | 
|---|
| .. | .. | 
|---|
| 4276 | 4772 |  		 */ | 
|---|
| 4277 | 4773 |  		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && | 
|---|
| 4278 | 4774 |  				!cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
|---|
| 4279 |  | -			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);  | 
|---|
| 4280 |  | -			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);  | 
|---|
 | 4775 | +			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);  | 
|---|
 | 4776 | +			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);  | 
|---|
| 4281 | 4777 |  		} | 
|---|
| 4282 | 4778 |  		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && | 
|---|
| 4283 | 4779 |  				cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 
|---|
| 4284 |  | -			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);  | 
|---|
| 4285 |  | -			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);  | 
|---|
 | 4780 | +			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);  | 
|---|
 | 4781 | +			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);  | 
|---|
| 4286 | 4782 |  		} | 
|---|
| 4287 | 4783 |  	} | 
|---|
| 4288 | 4784 |  	arch_spin_unlock(&tr->max_lock); | 
|---|
| 4289 | 4785 |  	local_irq_enable(); | 
|---|
| 4290 | 4786 |   | 
|---|
| 4291 | 4787 |  	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); | 
|---|
 | 4788 | +  | 
|---|
 | 4789 | +	return 0;  | 
|---|
 | 4790 | +}  | 
|---|
 | 4791 | +  | 
|---|
 | 4792 | +static ssize_t  | 
|---|
 | 4793 | +tracing_cpumask_write(struct file *filp, const char __user *ubuf,  | 
|---|
 | 4794 | +		      size_t count, loff_t *ppos)  | 
|---|
 | 4795 | +{  | 
|---|
 | 4796 | +	struct trace_array *tr = file_inode(filp)->i_private;  | 
|---|
 | 4797 | +	cpumask_var_t tracing_cpumask_new;  | 
|---|
 | 4798 | +	int err;  | 
|---|
 | 4799 | +  | 
|---|
 | 4800 | +	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))  | 
|---|
 | 4801 | +		return -ENOMEM;  | 
|---|
 | 4802 | +  | 
|---|
 | 4803 | +	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);  | 
|---|
 | 4804 | +	if (err)  | 
|---|
 | 4805 | +		goto err_free;  | 
|---|
 | 4806 | +  | 
|---|
 | 4807 | +	err = tracing_set_cpumask(tr, tracing_cpumask_new);  | 
|---|
 | 4808 | +	if (err)  | 
|---|
 | 4809 | +		goto err_free;  | 
|---|
 | 4810 | +  | 
|---|
| 4292 | 4811 |  	free_cpumask_var(tracing_cpumask_new); | 
|---|
| 4293 | 4812 |   | 
|---|
| 4294 | 4813 |  	return count; | 
|---|
| 4295 | 4814 |   | 
|---|
| 4296 |  | -err_unlock:  | 
|---|
 | 4815 | +err_free:  | 
|---|
| 4297 | 4816 |  	free_cpumask_var(tracing_cpumask_new); | 
|---|
| 4298 | 4817 |   | 
|---|
| 4299 | 4818 |  	return err; | 
|---|
| .. | .. | 
|---|
| 4435 | 4954 |  		ftrace_pid_follow_fork(tr, enabled); | 
|---|
| 4436 | 4955 |   | 
|---|
| 4437 | 4956 |  	if (mask == TRACE_ITER_OVERWRITE) { | 
|---|
| 4438 |  | -		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);  | 
|---|
 | 4957 | +		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);  | 
|---|
| 4439 | 4958 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 4440 | 4959 |  		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); | 
|---|
| 4441 | 4960 |  #endif | 
|---|
| .. | .. | 
|---|
| 4449 | 4968 |  	return 0; | 
|---|
| 4450 | 4969 |  } | 
|---|
| 4451 | 4970 |   | 
|---|
| 4452 |  | -static int trace_set_options(struct trace_array *tr, char *option)  | 
|---|
 | 4971 | +int trace_set_options(struct trace_array *tr, char *option)  | 
|---|
| 4453 | 4972 |  { | 
|---|
| 4454 | 4973 |  	char *cmp; | 
|---|
| 4455 | 4974 |  	int neg = 0; | 
|---|
| 4456 | 4975 |  	int ret; | 
|---|
| 4457 | 4976 |  	size_t orig_len = strlen(option); | 
|---|
 | 4977 | +	int len;  | 
|---|
| 4458 | 4978 |   | 
|---|
| 4459 | 4979 |  	cmp = strstrip(option); | 
|---|
| 4460 | 4980 |   | 
|---|
| 4461 |  | -	if (strncmp(cmp, "no", 2) == 0) {  | 
|---|
 | 4981 | +	len = str_has_prefix(cmp, "no");  | 
|---|
 | 4982 | +	if (len)  | 
|---|
| 4462 | 4983 |  		neg = 1; | 
|---|
| 4463 |  | -		cmp += 2;  | 
|---|
| 4464 |  | -	}  | 
|---|
 | 4984 | +  | 
|---|
 | 4985 | +	cmp += len;  | 
|---|
| 4465 | 4986 |   | 
|---|
| 4466 | 4987 |  	mutex_lock(&event_mutex); | 
|---|
| 4467 | 4988 |  	mutex_lock(&trace_types_lock); | 
|---|
| .. | .. | 
|---|
| 4537 | 5058 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 4538 | 5059 |  	int ret; | 
|---|
| 4539 | 5060 |   | 
|---|
| 4540 |  | -	if (tracing_disabled)  | 
|---|
| 4541 |  | -		return -ENODEV;  | 
|---|
| 4542 |  | -  | 
|---|
| 4543 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 4544 |  | -		return -ENODEV;  | 
|---|
 | 5061 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 5062 | +	if (ret)  | 
|---|
 | 5063 | +		return ret;  | 
|---|
| 4545 | 5064 |   | 
|---|
| 4546 | 5065 |  	ret = single_open(file, tracing_trace_options_show, inode->i_private); | 
|---|
| 4547 | 5066 |  	if (ret < 0) | 
|---|
| .. | .. | 
|---|
| 4568 | 5087 |  	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n" | 
|---|
| 4569 | 5088 |  	"  current_tracer\t- function and latency tracers\n" | 
|---|
| 4570 | 5089 |  	"  available_tracers\t- list of configured tracers for current_tracer\n" | 
|---|
 | 5090 | +	"  error_log\t- error log for failed commands (that support it)\n"  | 
|---|
| 4571 | 5091 |  	"  buffer_size_kb\t- view and modify size of per cpu buffer\n" | 
|---|
| 4572 | 5092 |  	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n" | 
|---|
| 4573 | 5093 |  	"  trace_clock\t\t-change the clock used to order events\n" | 
|---|
| .. | .. | 
|---|
| 4588 | 5108 |  	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n" | 
|---|
| 4589 | 5109 |  	"\t\t\t  Remove sub-buffer with rmdir\n" | 
|---|
| 4590 | 5110 |  	"  trace_options\t\t- Set format or modify how tracing happens\n" | 
|---|
| 4591 |  | -	"\t\t\t  Disable an option by adding a suffix 'no' to the\n"  | 
|---|
 | 5111 | +	"\t\t\t  Disable an option by prefixing 'no' to the\n"  | 
|---|
| 4592 | 5112 |  	"\t\t\t  option name\n" | 
|---|
| 4593 | 5113 |  	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" | 
|---|
| 4594 | 5114 |  #ifdef CONFIG_DYNAMIC_FTRACE | 
|---|
| .. | .. | 
|---|
| 4632 | 5152 |  #ifdef CONFIG_FUNCTION_TRACER | 
|---|
| 4633 | 5153 |  	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" | 
|---|
| 4634 | 5154 |  	"\t\t    (function)\n" | 
|---|
 | 5155 | +	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"  | 
|---|
 | 5156 | +	"\t\t    (function)\n"  | 
|---|
| 4635 | 5157 |  #endif | 
|---|
| 4636 | 5158 |  #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|---|
| 4637 | 5159 |  	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n" | 
|---|
| .. | .. | 
|---|
| 4653 | 5175 |  	"\t\t\t  traces\n" | 
|---|
| 4654 | 5176 |  #endif | 
|---|
| 4655 | 5177 |  #endif /* CONFIG_STACK_TRACER */ | 
|---|
 | 5178 | +#ifdef CONFIG_DYNAMIC_EVENTS  | 
|---|
 | 5179 | +	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"  | 
|---|
 | 5180 | +	"\t\t\t  Write into this file to define/undefine new trace events.\n"  | 
|---|
 | 5181 | +#endif  | 
|---|
| 4656 | 5182 |  #ifdef CONFIG_KPROBE_EVENTS | 
|---|
| 4657 |  | -	"  kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"  | 
|---|
 | 5183 | +	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"  | 
|---|
| 4658 | 5184 |  	"\t\t\t  Write into this file to define/undefine new trace events.\n" | 
|---|
| 4659 | 5185 |  #endif | 
|---|
| 4660 | 5186 |  #ifdef CONFIG_UPROBE_EVENTS | 
|---|
| 4661 |  | -	"  uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"  | 
|---|
 | 5187 | +	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"  | 
|---|
| 4662 | 5188 |  	"\t\t\t  Write into this file to define/undefine new trace events.\n" | 
|---|
| 4663 | 5189 |  #endif | 
|---|
| 4664 | 5190 |  #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) | 
|---|
| 4665 | 5191 |  	"\t  accepts: event-definitions (one definition per line)\n" | 
|---|
| 4666 | 5192 |  	"\t   Format: p[:[<group>/]<event>] <place> [<args>]\n" | 
|---|
| 4667 | 5193 |  	"\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n" | 
|---|
 | 5194 | +#ifdef CONFIG_HIST_TRIGGERS  | 
|---|
 | 5195 | +	"\t           s:[synthetic/]<event> <field> [<field>]\n"  | 
|---|
 | 5196 | +#endif  | 
|---|
| 4668 | 5197 |  	"\t           -:[<group>/]<event>\n" | 
|---|
| 4669 | 5198 |  #ifdef CONFIG_KPROBE_EVENTS | 
|---|
| 4670 | 5199 |  	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" | 
|---|
| 4671 |  | -  "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"  | 
|---|
 | 5200 | +  "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"  | 
|---|
| 4672 | 5201 |  #endif | 
|---|
| 4673 | 5202 |  #ifdef CONFIG_UPROBE_EVENTS | 
|---|
| 4674 |  | -	"\t    place: <path>:<offset>\n"  | 
|---|
 | 5203 | +  "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"  | 
|---|
| 4675 | 5204 |  #endif | 
|---|
| 4676 | 5205 |  	"\t     args: <name>=fetcharg[:type]\n" | 
|---|
| 4677 | 5206 |  	"\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" | 
|---|
| 4678 |  | -	"\t           $stack<index>, $stack, $retval, $comm\n"  | 
|---|
| 4679 |  | -	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"  | 
|---|
| 4680 |  | -	"\t           b<bit-width>@<bit-offset>/<container-size>\n"  | 
|---|
 | 5207 | +#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API  | 
|---|
 | 5208 | +	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"  | 
|---|
 | 5209 | +#else  | 
|---|
 | 5210 | +	"\t           $stack<index>, $stack, $retval, $comm,\n"  | 
|---|
 | 5211 | +#endif  | 
|---|
 | 5212 | +	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"  | 
|---|
 | 5213 | +	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"  | 
|---|
 | 5214 | +	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"  | 
|---|
 | 5215 | +	"\t           <type>\\[<array-size>\\]\n"  | 
|---|
 | 5216 | +#ifdef CONFIG_HIST_TRIGGERS  | 
|---|
 | 5217 | +	"\t    field: <stype> <name>;\n"  | 
|---|
 | 5218 | +	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"  | 
|---|
 | 5219 | +	"\t           [unsigned] char/int/long\n"  | 
|---|
 | 5220 | +#endif  | 
|---|
| 4681 | 5221 |  #endif | 
|---|
| 4682 | 5222 |  	"  events/\t\t- Directory containing all trace event subsystems:\n" | 
|---|
| 4683 | 5223 |  	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n" | 
|---|
| .. | .. | 
|---|
| 4730 | 5270 |  	"\t            [:size=#entries]\n" | 
|---|
| 4731 | 5271 |  	"\t            [:pause][:continue][:clear]\n" | 
|---|
| 4732 | 5272 |  	"\t            [:name=histname1]\n" | 
|---|
 | 5273 | +	"\t            [:<handler>.<action>]\n"  | 
|---|
| 4733 | 5274 |  	"\t            [if <filter>]\n\n" | 
|---|
| 4734 | 5275 |  	"\t    Note, special fields can be used as well:\n" | 
|---|
| 4735 | 5276 |  	"\t            common_timestamp - to record current timestamp\n" | 
|---|
| .. | .. | 
|---|
| 4774 | 5315 |  	"\t    unchanged.\n\n" | 
|---|
| 4775 | 5316 |  	"\t    The enable_hist and disable_hist triggers can be used to\n" | 
|---|
| 4776 | 5317 |  	"\t    have one event conditionally start and stop another event's\n" | 
|---|
| 4777 |  | -	"\t    already-attached hist trigger.  The syntax is analagous to\n"  | 
|---|
| 4778 |  | -	"\t    the enable_event and disable_event triggers.\n"  | 
|---|
 | 5318 | +	"\t    already-attached hist trigger.  The syntax is analogous to\n"  | 
|---|
 | 5319 | +	"\t    the enable_event and disable_event triggers.\n\n"  | 
|---|
 | 5320 | +	"\t    Hist trigger handlers and actions are executed whenever a\n"  | 
|---|
 | 5321 | +	"\t    a histogram entry is added or updated.  They take the form:\n\n"  | 
|---|
 | 5322 | +	"\t        <handler>.<action>\n\n"  | 
|---|
 | 5323 | +	"\t    The available handlers are:\n\n"  | 
|---|
 | 5324 | +	"\t        onmatch(matching.event)  - invoke on addition or update\n"  | 
|---|
 | 5325 | +	"\t        onmax(var)               - invoke if var exceeds current max\n"  | 
|---|
 | 5326 | +	"\t        onchange(var)            - invoke action if var changes\n\n"  | 
|---|
 | 5327 | +	"\t    The available actions are:\n\n"  | 
|---|
 | 5328 | +	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"  | 
|---|
 | 5329 | +	"\t        save(field,...)                      - save current event fields\n"  | 
|---|
 | 5330 | +#ifdef CONFIG_TRACER_SNAPSHOT  | 
|---|
 | 5331 | +	"\t        snapshot()                           - snapshot the trace buffer\n\n"  | 
|---|
 | 5332 | +#endif  | 
|---|
 | 5333 | +#ifdef CONFIG_SYNTH_EVENTS  | 
|---|
 | 5334 | +	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"  | 
|---|
 | 5335 | +	"\t  Write into this file to define/undefine new synthetic events.\n"  | 
|---|
 | 5336 | +	"\t     example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"  | 
|---|
 | 5337 | +#endif  | 
|---|
| 4779 | 5338 |  #endif | 
|---|
| 4780 | 5339 |  ; | 
|---|
| 4781 | 5340 |   | 
|---|
| .. | .. | 
|---|
| 4833 | 5392 |   | 
|---|
| 4834 | 5393 |  static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) | 
|---|
| 4835 | 5394 |  { | 
|---|
| 4836 |  | -	if (tracing_disabled)  | 
|---|
| 4837 |  | -		return -ENODEV;  | 
|---|
 | 5395 | +	int ret;  | 
|---|
 | 5396 | +  | 
|---|
 | 5397 | +	ret = tracing_check_open_get_tr(NULL);  | 
|---|
 | 5398 | +	if (ret)  | 
|---|
 | 5399 | +		return ret;  | 
|---|
| 4838 | 5400 |   | 
|---|
| 4839 | 5401 |  	return seq_open(filp, &tracing_saved_tgids_seq_ops); | 
|---|
| 4840 | 5402 |  } | 
|---|
| .. | .. | 
|---|
| 4910 | 5472 |   | 
|---|
| 4911 | 5473 |  static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) | 
|---|
| 4912 | 5474 |  { | 
|---|
| 4913 |  | -	if (tracing_disabled)  | 
|---|
| 4914 |  | -		return -ENODEV;  | 
|---|
 | 5475 | +	int ret;  | 
|---|
 | 5476 | +  | 
|---|
 | 5477 | +	ret = tracing_check_open_get_tr(NULL);  | 
|---|
 | 5478 | +	if (ret)  | 
|---|
 | 5479 | +		return ret;  | 
|---|
| 4915 | 5480 |   | 
|---|
| 4916 | 5481 |  	return seq_open(filp, &tracing_saved_cmdlines_seq_ops); | 
|---|
| 4917 | 5482 |  } | 
|---|
| .. | .. | 
|---|
| 4930 | 5495 |  	char buf[64]; | 
|---|
| 4931 | 5496 |  	int r; | 
|---|
| 4932 | 5497 |   | 
|---|
 | 5498 | +	preempt_disable();  | 
|---|
| 4933 | 5499 |  	arch_spin_lock(&trace_cmdline_lock); | 
|---|
| 4934 | 5500 |  	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); | 
|---|
| 4935 | 5501 |  	arch_spin_unlock(&trace_cmdline_lock); | 
|---|
 | 5502 | +	preempt_enable();  | 
|---|
| 4936 | 5503 |   | 
|---|
| 4937 | 5504 |  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 
|---|
| 4938 | 5505 |  } | 
|---|
| .. | .. | 
|---|
| 4957 | 5524 |  		return -ENOMEM; | 
|---|
| 4958 | 5525 |  	} | 
|---|
| 4959 | 5526 |   | 
|---|
 | 5527 | +	preempt_disable();  | 
|---|
| 4960 | 5528 |  	arch_spin_lock(&trace_cmdline_lock); | 
|---|
| 4961 | 5529 |  	savedcmd_temp = savedcmd; | 
|---|
| 4962 | 5530 |  	savedcmd = s; | 
|---|
| 4963 | 5531 |  	arch_spin_unlock(&trace_cmdline_lock); | 
|---|
 | 5532 | +	preempt_enable();  | 
|---|
| 4964 | 5533 |  	free_saved_cmdlines_buffer(savedcmd_temp); | 
|---|
| 4965 | 5534 |   | 
|---|
| 4966 | 5535 |  	return 0; | 
|---|
| .. | .. | 
|---|
| 5019 | 5588 |  	 * Paranoid! If ptr points to end, we don't want to increment past it. | 
|---|
| 5020 | 5589 |  	 * This really should never happen. | 
|---|
| 5021 | 5590 |  	 */ | 
|---|
 | 5591 | +	(*pos)++;  | 
|---|
| 5022 | 5592 |  	ptr = update_eval_map(ptr); | 
|---|
| 5023 | 5593 |  	if (WARN_ON_ONCE(!ptr)) | 
|---|
| 5024 | 5594 |  		return NULL; | 
|---|
| 5025 | 5595 |   | 
|---|
| 5026 | 5596 |  	ptr++; | 
|---|
| 5027 |  | -  | 
|---|
| 5028 |  | -	(*pos)++;  | 
|---|
| 5029 |  | -  | 
|---|
| 5030 | 5597 |  	ptr = update_eval_map(ptr); | 
|---|
| 5031 | 5598 |   | 
|---|
| 5032 | 5599 |  	return ptr; | 
|---|
| .. | .. | 
|---|
| 5075 | 5642 |   | 
|---|
| 5076 | 5643 |  static int tracing_eval_map_open(struct inode *inode, struct file *filp) | 
|---|
| 5077 | 5644 |  { | 
|---|
| 5078 |  | -	if (tracing_disabled)  | 
|---|
| 5079 |  | -		return -ENODEV;  | 
|---|
 | 5645 | +	int ret;  | 
|---|
 | 5646 | +  | 
|---|
 | 5647 | +	ret = tracing_check_open_get_tr(NULL);  | 
|---|
 | 5648 | +	if (ret)  | 
|---|
 | 5649 | +		return ret;  | 
|---|
| 5080 | 5650 |   | 
|---|
| 5081 | 5651 |  	return seq_open(filp, &tracing_eval_map_seq_ops); | 
|---|
| 5082 | 5652 |  } | 
|---|
| .. | .. | 
|---|
| 5189 | 5759 |   | 
|---|
| 5190 | 5760 |  int tracer_init(struct tracer *t, struct trace_array *tr) | 
|---|
| 5191 | 5761 |  { | 
|---|
| 5192 |  | -	tracing_reset_online_cpus(&tr->trace_buffer);  | 
|---|
 | 5762 | +	tracing_reset_online_cpus(&tr->array_buffer);  | 
|---|
| 5193 | 5763 |  	return t->init(tr); | 
|---|
| 5194 | 5764 |  } | 
|---|
| 5195 | 5765 |   | 
|---|
| 5196 |  | -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)  | 
|---|
 | 5766 | +static void set_buffer_entries(struct array_buffer *buf, unsigned long val)  | 
|---|
| 5197 | 5767 |  { | 
|---|
| 5198 | 5768 |  	int cpu; | 
|---|
| 5199 | 5769 |   | 
|---|
| .. | .. | 
|---|
| 5203 | 5773 |   | 
|---|
| 5204 | 5774 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 5205 | 5775 |  /* resize @tr's buffer to the size of @size_tr's entries */ | 
|---|
| 5206 |  | -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,  | 
|---|
| 5207 |  | -					struct trace_buffer *size_buf, int cpu_id)  | 
|---|
 | 5776 | +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,  | 
|---|
 | 5777 | +					struct array_buffer *size_buf, int cpu_id)  | 
|---|
| 5208 | 5778 |  { | 
|---|
| 5209 | 5779 |  	int cpu, ret = 0; | 
|---|
| 5210 | 5780 |   | 
|---|
| .. | .. | 
|---|
| 5242 | 5812 |  	ring_buffer_expanded = true; | 
|---|
| 5243 | 5813 |   | 
|---|
| 5244 | 5814 |  	/* May be called before buffers are initialized */ | 
|---|
| 5245 |  | -	if (!tr->trace_buffer.buffer)  | 
|---|
 | 5815 | +	if (!tr->array_buffer.buffer)  | 
|---|
| 5246 | 5816 |  		return 0; | 
|---|
| 5247 | 5817 |   | 
|---|
| 5248 |  | -	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);  | 
|---|
 | 5818 | +	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);  | 
|---|
| 5249 | 5819 |  	if (ret < 0) | 
|---|
| 5250 | 5820 |  		return ret; | 
|---|
| 5251 | 5821 |   | 
|---|
| .. | .. | 
|---|
| 5256 | 5826 |   | 
|---|
| 5257 | 5827 |  	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); | 
|---|
| 5258 | 5828 |  	if (ret < 0) { | 
|---|
| 5259 |  | -		int r = resize_buffer_duplicate_size(&tr->trace_buffer,  | 
|---|
| 5260 |  | -						     &tr->trace_buffer, cpu);  | 
|---|
 | 5829 | +		int r = resize_buffer_duplicate_size(&tr->array_buffer,  | 
|---|
 | 5830 | +						     &tr->array_buffer, cpu);  | 
|---|
| 5261 | 5831 |  		if (r < 0) { | 
|---|
| 5262 | 5832 |  			/* | 
|---|
| 5263 | 5833 |  			 * AARGH! We are left with different | 
|---|
| .. | .. | 
|---|
| 5288 | 5858 |  #endif /* CONFIG_TRACER_MAX_TRACE */ | 
|---|
| 5289 | 5859 |   | 
|---|
| 5290 | 5860 |  	if (cpu == RING_BUFFER_ALL_CPUS) | 
|---|
| 5291 |  | -		set_buffer_entries(&tr->trace_buffer, size);  | 
|---|
 | 5861 | +		set_buffer_entries(&tr->array_buffer, size);  | 
|---|
| 5292 | 5862 |  	else | 
|---|
| 5293 |  | -		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;  | 
|---|
 | 5863 | +		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;  | 
|---|
| 5294 | 5864 |   | 
|---|
| 5295 | 5865 |  	return ret; | 
|---|
| 5296 | 5866 |  } | 
|---|
| 5297 | 5867 |   | 
|---|
| 5298 |  | -static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,  | 
|---|
| 5299 |  | -					  unsigned long size, int cpu_id)  | 
|---|
 | 5868 | +ssize_t tracing_resize_ring_buffer(struct trace_array *tr,  | 
|---|
 | 5869 | +				  unsigned long size, int cpu_id)  | 
|---|
| 5300 | 5870 |  { | 
|---|
| 5301 | 5871 |  	int ret = size; | 
|---|
| 5302 | 5872 |   | 
|---|
| .. | .. | 
|---|
| 5366 | 5936 |  	tr->current_trace = &nop_trace; | 
|---|
| 5367 | 5937 |  } | 
|---|
| 5368 | 5938 |   | 
|---|
 | 5939 | +static bool tracer_options_updated;  | 
|---|
 | 5940 | +  | 
|---|
| 5369 | 5941 |  static void add_tracer_options(struct trace_array *tr, struct tracer *t) | 
|---|
| 5370 | 5942 |  { | 
|---|
| 5371 | 5943 |  	/* Only enable if the directory has been created already. */ | 
|---|
| 5372 | 5944 |  	if (!tr->dir) | 
|---|
| 5373 | 5945 |  		return; | 
|---|
| 5374 | 5946 |   | 
|---|
 | 5947 | +	/* Only create trace option files after update_tracer_options finish */  | 
|---|
 | 5948 | +	if (!tracer_options_updated)  | 
|---|
 | 5949 | +		return;  | 
|---|
 | 5950 | +  | 
|---|
| 5375 | 5951 |  	create_trace_option_files(tr, t); | 
|---|
| 5376 | 5952 |  } | 
|---|
| 5377 | 5953 |   | 
|---|
| 5378 |  | -static int tracing_set_tracer(struct trace_array *tr, const char *buf)  | 
|---|
 | 5954 | +int tracing_set_tracer(struct trace_array *tr, const char *buf)  | 
|---|
| 5379 | 5955 |  { | 
|---|
| 5380 | 5956 |  	struct tracer *t; | 
|---|
| 5381 | 5957 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| .. | .. | 
|---|
| 5404 | 5980 |  	if (t == tr->current_trace) | 
|---|
| 5405 | 5981 |  		goto out; | 
|---|
| 5406 | 5982 |   | 
|---|
 | 5983 | +#ifdef CONFIG_TRACER_SNAPSHOT  | 
|---|
 | 5984 | +	if (t->use_max_tr) {  | 
|---|
 | 5985 | +		local_irq_disable();  | 
|---|
 | 5986 | +		arch_spin_lock(&tr->max_lock);  | 
|---|
 | 5987 | +		if (tr->cond_snapshot)  | 
|---|
 | 5988 | +			ret = -EBUSY;  | 
|---|
 | 5989 | +		arch_spin_unlock(&tr->max_lock);  | 
|---|
 | 5990 | +		local_irq_enable();  | 
|---|
 | 5991 | +		if (ret)  | 
|---|
 | 5992 | +			goto out;  | 
|---|
 | 5993 | +	}  | 
|---|
 | 5994 | +#endif  | 
|---|
| 5407 | 5995 |  	/* Some tracers won't work on kernel command line */ | 
|---|
| 5408 | 5996 |  	if (system_state < SYSTEM_RUNNING && t->noboot) { | 
|---|
| 5409 | 5997 |  		pr_warn("Tracer '%s' is not allowed on command line, ignored\n", | 
|---|
| .. | .. | 
|---|
| 5418 | 6006 |  	} | 
|---|
| 5419 | 6007 |   | 
|---|
| 5420 | 6008 |  	/* If trace pipe files are being read, we can't change the tracer */ | 
|---|
| 5421 |  | -	if (tr->current_trace->ref) {  | 
|---|
 | 6009 | +	if (tr->trace_ref) {  | 
|---|
| 5422 | 6010 |  		ret = -EBUSY; | 
|---|
| 5423 | 6011 |  		goto out; | 
|---|
| 5424 | 6012 |  	} | 
|---|
| .. | .. | 
|---|
| 5430 | 6018 |  	if (tr->current_trace->reset) | 
|---|
| 5431 | 6019 |  		tr->current_trace->reset(tr); | 
|---|
| 5432 | 6020 |   | 
|---|
| 5433 |  | -	/* Current trace needs to be nop_trace before synchronize_sched */  | 
|---|
| 5434 |  | -	tr->current_trace = &nop_trace;  | 
|---|
| 5435 |  | -  | 
|---|
| 5436 | 6021 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 5437 |  | -	had_max_tr = tr->allocated_snapshot;  | 
|---|
 | 6022 | +	had_max_tr = tr->current_trace->use_max_tr;  | 
|---|
 | 6023 | +  | 
|---|
 | 6024 | +	/* Current trace needs to be nop_trace before synchronize_rcu */  | 
|---|
 | 6025 | +	tr->current_trace = &nop_trace;  | 
|---|
| 5438 | 6026 |   | 
|---|
| 5439 | 6027 |  	if (had_max_tr && !t->use_max_tr) { | 
|---|
| 5440 | 6028 |  		/* | 
|---|
| .. | .. | 
|---|
| 5444 | 6032 |  		 * The update_max_tr is called from interrupts disabled | 
|---|
| 5445 | 6033 |  		 * so a synchronized_sched() is sufficient. | 
|---|
| 5446 | 6034 |  		 */ | 
|---|
| 5447 |  | -		synchronize_sched();  | 
|---|
 | 6035 | +		synchronize_rcu();  | 
|---|
| 5448 | 6036 |  		free_snapshot(tr); | 
|---|
| 5449 | 6037 |  	} | 
|---|
| 5450 |  | -#endif  | 
|---|
| 5451 | 6038 |   | 
|---|
| 5452 |  | -#ifdef CONFIG_TRACER_MAX_TRACE  | 
|---|
| 5453 |  | -	if (t->use_max_tr && !had_max_tr) {  | 
|---|
 | 6039 | +	if (t->use_max_tr && !tr->allocated_snapshot) {  | 
|---|
| 5454 | 6040 |  		ret = tracing_alloc_snapshot_instance(tr); | 
|---|
| 5455 | 6041 |  		if (ret < 0) | 
|---|
| 5456 | 6042 |  			goto out; | 
|---|
| 5457 | 6043 |  	} | 
|---|
 | 6044 | +#else  | 
|---|
 | 6045 | +	tr->current_trace = &nop_trace;  | 
|---|
| 5458 | 6046 |  #endif | 
|---|
| 5459 | 6047 |   | 
|---|
| 5460 | 6048 |  	if (t->init) { | 
|---|
| .. | .. | 
|---|
| 5589 | 6177 |  { | 
|---|
| 5590 | 6178 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 5591 | 6179 |  	struct trace_iterator *iter; | 
|---|
| 5592 |  | -	int ret = 0;  | 
|---|
 | 6180 | +	int ret;  | 
|---|
| 5593 | 6181 |   | 
|---|
| 5594 |  | -	if (tracing_disabled)  | 
|---|
| 5595 |  | -		return -ENODEV;  | 
|---|
| 5596 |  | -  | 
|---|
| 5597 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 5598 |  | -		return -ENODEV;  | 
|---|
 | 6182 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 6183 | +	if (ret)  | 
|---|
 | 6184 | +		return ret;  | 
|---|
| 5599 | 6185 |   | 
|---|
| 5600 | 6186 |  	mutex_lock(&trace_types_lock); | 
|---|
| 5601 | 6187 |   | 
|---|
| .. | .. | 
|---|
| 5626 | 6212 |  		iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 
|---|
| 5627 | 6213 |   | 
|---|
| 5628 | 6214 |  	iter->tr = tr; | 
|---|
| 5629 |  | -	iter->trace_buffer = &tr->trace_buffer;  | 
|---|
 | 6215 | +	iter->array_buffer = &tr->array_buffer;  | 
|---|
| 5630 | 6216 |  	iter->cpu_file = tracing_get_cpu(inode); | 
|---|
| 5631 | 6217 |  	mutex_init(&iter->mutex); | 
|---|
| 5632 | 6218 |  	filp->private_data = iter; | 
|---|
| .. | .. | 
|---|
| 5636 | 6222 |   | 
|---|
| 5637 | 6223 |  	nonseekable_open(inode, filp); | 
|---|
| 5638 | 6224 |   | 
|---|
| 5639 |  | -	tr->current_trace->ref++;  | 
|---|
 | 6225 | +	tr->trace_ref++;  | 
|---|
| 5640 | 6226 |  out: | 
|---|
| 5641 | 6227 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 5642 | 6228 |  	return ret; | 
|---|
| .. | .. | 
|---|
| 5655 | 6241 |   | 
|---|
| 5656 | 6242 |  	mutex_lock(&trace_types_lock); | 
|---|
| 5657 | 6243 |   | 
|---|
| 5658 |  | -	tr->current_trace->ref--;  | 
|---|
 | 6244 | +	tr->trace_ref--;  | 
|---|
| 5659 | 6245 |   | 
|---|
| 5660 | 6246 |  	if (iter->trace->pipe_close) | 
|---|
| 5661 | 6247 |  		iter->trace->pipe_close(iter); | 
|---|
| .. | .. | 
|---|
| 5686 | 6272 |  		 */ | 
|---|
| 5687 | 6273 |  		return EPOLLIN | EPOLLRDNORM; | 
|---|
| 5688 | 6274 |  	else | 
|---|
| 5689 |  | -		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,  | 
|---|
| 5690 |  | -					     filp, poll_table);  | 
|---|
 | 6275 | +		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,  | 
|---|
 | 6276 | +					     filp, poll_table, iter->tr->buffer_percent);  | 
|---|
| 5691 | 6277 |  } | 
|---|
| 5692 | 6278 |   | 
|---|
| 5693 | 6279 |  static __poll_t | 
|---|
| .. | .. | 
|---|
| 5724 | 6310 |   | 
|---|
| 5725 | 6311 |  		mutex_unlock(&iter->mutex); | 
|---|
| 5726 | 6312 |   | 
|---|
| 5727 |  | -		ret = wait_on_pipe(iter, false);  | 
|---|
 | 6313 | +		ret = wait_on_pipe(iter, 0);  | 
|---|
| 5728 | 6314 |   | 
|---|
| 5729 | 6315 |  		mutex_lock(&iter->mutex); | 
|---|
| 5730 | 6316 |   | 
|---|
| .. | .. | 
|---|
| 5840 | 6426 |  	__free_page(spd->pages[idx]); | 
|---|
| 5841 | 6427 |  } | 
|---|
| 5842 | 6428 |   | 
|---|
| 5843 |  | -static const struct pipe_buf_operations tracing_pipe_buf_ops = {  | 
|---|
| 5844 |  | -	.can_merge		= 0,  | 
|---|
| 5845 |  | -	.confirm		= generic_pipe_buf_confirm,  | 
|---|
| 5846 |  | -	.release		= generic_pipe_buf_release,  | 
|---|
| 5847 |  | -	.steal			= generic_pipe_buf_steal,  | 
|---|
| 5848 |  | -	.get			= generic_pipe_buf_get,  | 
|---|
| 5849 |  | -};  | 
|---|
| 5850 |  | -  | 
|---|
| 5851 | 6429 |  static size_t | 
|---|
| 5852 | 6430 |  tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | 
|---|
| 5853 | 6431 |  { | 
|---|
| .. | .. | 
|---|
| 5909 | 6487 |  		.partial	= partial_def, | 
|---|
| 5910 | 6488 |  		.nr_pages	= 0, /* This gets updated below. */ | 
|---|
| 5911 | 6489 |  		.nr_pages_max	= PIPE_DEF_BUFFERS, | 
|---|
| 5912 |  | -		.ops		= &tracing_pipe_buf_ops,  | 
|---|
 | 6490 | +		.ops		= &default_pipe_buf_ops,  | 
|---|
| 5913 | 6491 |  		.spd_release	= tracing_spd_release_pipe, | 
|---|
| 5914 | 6492 |  	}; | 
|---|
| 5915 | 6493 |  	ssize_t ret; | 
|---|
| .. | .. | 
|---|
| 6004 | 6582 |  		for_each_tracing_cpu(cpu) { | 
|---|
| 6005 | 6583 |  			/* fill in the size from first enabled cpu */ | 
|---|
| 6006 | 6584 |  			if (size == 0) | 
|---|
| 6007 |  | -				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;  | 
|---|
| 6008 |  | -			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {  | 
|---|
 | 6585 | +				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;  | 
|---|
 | 6586 | +			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {  | 
|---|
| 6009 | 6587 |  				buf_size_same = 0; | 
|---|
| 6010 | 6588 |  				break; | 
|---|
| 6011 | 6589 |  			} | 
|---|
| .. | .. | 
|---|
| 6021 | 6599 |  		} else | 
|---|
| 6022 | 6600 |  			r = sprintf(buf, "X\n"); | 
|---|
| 6023 | 6601 |  	} else | 
|---|
| 6024 |  | -		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);  | 
|---|
 | 6602 | +		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);  | 
|---|
| 6025 | 6603 |   | 
|---|
| 6026 | 6604 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 6027 | 6605 |   | 
|---|
| .. | .. | 
|---|
| 6068 | 6646 |   | 
|---|
| 6069 | 6647 |  	mutex_lock(&trace_types_lock); | 
|---|
| 6070 | 6648 |  	for_each_tracing_cpu(cpu) { | 
|---|
| 6071 |  | -		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;  | 
|---|
 | 6649 | +		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;  | 
|---|
| 6072 | 6650 |  		if (!ring_buffer_expanded) | 
|---|
| 6073 | 6651 |  			expanded_size += trace_buf_size >> 10; | 
|---|
| 6074 | 6652 |  	} | 
|---|
| .. | .. | 
|---|
| 6118 | 6696 |  	struct trace_array *tr = filp->private_data; | 
|---|
| 6119 | 6697 |  	struct ring_buffer_event *event; | 
|---|
| 6120 | 6698 |  	enum event_trigger_type tt = ETT_NONE; | 
|---|
| 6121 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 6699 | +	struct trace_buffer *buffer;  | 
|---|
| 6122 | 6700 |  	struct print_entry *entry; | 
|---|
| 6123 |  | -	unsigned long irq_flags;  | 
|---|
| 6124 |  | -	const char faulted[] = "<faulted>";  | 
|---|
| 6125 | 6701 |  	ssize_t written; | 
|---|
| 6126 | 6702 |  	int size; | 
|---|
| 6127 | 6703 |  	int len; | 
|---|
| 6128 | 6704 |   | 
|---|
| 6129 | 6705 |  /* Used in tracing_mark_raw_write() as well */ | 
|---|
| 6130 |  | -#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */  | 
|---|
 | 6706 | +#define FAULTED_STR "<faulted>"  | 
|---|
 | 6707 | +#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */  | 
|---|
| 6131 | 6708 |   | 
|---|
| 6132 | 6709 |  	if (tracing_disabled) | 
|---|
| 6133 | 6710 |  		return -EINVAL; | 
|---|
| .. | .. | 
|---|
| 6140 | 6717 |   | 
|---|
| 6141 | 6718 |  	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); | 
|---|
| 6142 | 6719 |   | 
|---|
| 6143 |  | -	local_save_flags(irq_flags);  | 
|---|
| 6144 | 6720 |  	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ | 
|---|
| 6145 | 6721 |   | 
|---|
| 6146 | 6722 |  	/* If less than "<faulted>", then make sure we can still add that */ | 
|---|
| 6147 | 6723 |  	if (cnt < FAULTED_SIZE) | 
|---|
| 6148 | 6724 |  		size += FAULTED_SIZE - cnt; | 
|---|
| 6149 | 6725 |   | 
|---|
| 6150 |  | -	buffer = tr->trace_buffer.buffer;  | 
|---|
 | 6726 | +	buffer = tr->array_buffer.buffer;  | 
|---|
| 6151 | 6727 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, | 
|---|
| 6152 |  | -					    irq_flags, preempt_count());  | 
|---|
 | 6728 | +					    tracing_gen_ctx());  | 
|---|
| 6153 | 6729 |  	if (unlikely(!event)) | 
|---|
| 6154 | 6730 |  		/* Ring buffer disabled, return as if not open for write */ | 
|---|
| 6155 | 6731 |  		return -EBADF; | 
|---|
| .. | .. | 
|---|
| 6159 | 6735 |   | 
|---|
| 6160 | 6736 |  	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); | 
|---|
| 6161 | 6737 |  	if (len) { | 
|---|
| 6162 |  | -		memcpy(&entry->buf, faulted, FAULTED_SIZE);  | 
|---|
 | 6738 | +		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);  | 
|---|
| 6163 | 6739 |  		cnt = FAULTED_SIZE; | 
|---|
| 6164 | 6740 |  		written = -EFAULT; | 
|---|
| 6165 | 6741 |  	} else | 
|---|
| 6166 | 6742 |  		written = cnt; | 
|---|
| 6167 |  | -	len = cnt;  | 
|---|
| 6168 | 6743 |   | 
|---|
| 6169 | 6744 |  	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { | 
|---|
| 6170 | 6745 |  		/* do not add \n before testing triggers, but add \0 */ | 
|---|
| .. | .. | 
|---|
| 6178 | 6753 |  	} else | 
|---|
| 6179 | 6754 |  		entry->buf[cnt] = '\0'; | 
|---|
| 6180 | 6755 |   | 
|---|
 | 6756 | +	if (static_branch_unlikely(&trace_marker_exports_enabled))  | 
|---|
 | 6757 | +		ftrace_exports(event, TRACE_EXPORT_MARKER);  | 
|---|
| 6181 | 6758 |  	__buffer_unlock_commit(buffer, event); | 
|---|
| 6182 | 6759 |   | 
|---|
| 6183 | 6760 |  	if (tt) | 
|---|
| .. | .. | 
|---|
| 6198 | 6775 |  { | 
|---|
| 6199 | 6776 |  	struct trace_array *tr = filp->private_data; | 
|---|
| 6200 | 6777 |  	struct ring_buffer_event *event; | 
|---|
| 6201 |  | -	struct ring_buffer *buffer;  | 
|---|
 | 6778 | +	struct trace_buffer *buffer;  | 
|---|
| 6202 | 6779 |  	struct raw_data_entry *entry; | 
|---|
| 6203 |  | -	const char faulted[] = "<faulted>";  | 
|---|
| 6204 |  | -	unsigned long irq_flags;  | 
|---|
| 6205 | 6780 |  	ssize_t written; | 
|---|
| 6206 | 6781 |  	int size; | 
|---|
| 6207 | 6782 |  	int len; | 
|---|
| .. | .. | 
|---|
| 6223 | 6798 |   | 
|---|
| 6224 | 6799 |  	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); | 
|---|
| 6225 | 6800 |   | 
|---|
| 6226 |  | -	local_save_flags(irq_flags);  | 
|---|
| 6227 | 6801 |  	size = sizeof(*entry) + cnt; | 
|---|
| 6228 | 6802 |  	if (cnt < FAULT_SIZE_ID) | 
|---|
| 6229 | 6803 |  		size += FAULT_SIZE_ID - cnt; | 
|---|
| 6230 | 6804 |   | 
|---|
| 6231 |  | -	buffer = tr->trace_buffer.buffer;  | 
|---|
 | 6805 | +	buffer = tr->array_buffer.buffer;  | 
|---|
| 6232 | 6806 |  	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, | 
|---|
| 6233 |  | -					    irq_flags, preempt_count());  | 
|---|
 | 6807 | +					    tracing_gen_ctx());  | 
|---|
| 6234 | 6808 |  	if (!event) | 
|---|
| 6235 | 6809 |  		/* Ring buffer disabled, return as if not open for write */ | 
|---|
| 6236 | 6810 |  		return -EBADF; | 
|---|
| .. | .. | 
|---|
| 6240 | 6814 |  	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); | 
|---|
| 6241 | 6815 |  	if (len) { | 
|---|
| 6242 | 6816 |  		entry->id = -1; | 
|---|
| 6243 |  | -		memcpy(&entry->buf, faulted, FAULTED_SIZE);  | 
|---|
 | 6817 | +		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);  | 
|---|
| 6244 | 6818 |  		written = -EFAULT; | 
|---|
| 6245 | 6819 |  	} else | 
|---|
| 6246 | 6820 |  		written = cnt; | 
|---|
| .. | .. | 
|---|
| 6283 | 6857 |   | 
|---|
| 6284 | 6858 |  	tr->clock_id = i; | 
|---|
| 6285 | 6859 |   | 
|---|
| 6286 |  | -	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);  | 
|---|
 | 6860 | +	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);  | 
|---|
| 6287 | 6861 |   | 
|---|
| 6288 | 6862 |  	/* | 
|---|
| 6289 | 6863 |  	 * New clock may not be consistent with the previous clock. | 
|---|
| 6290 | 6864 |  	 * Reset the buffer so that it doesn't have incomparable timestamps. | 
|---|
| 6291 | 6865 |  	 */ | 
|---|
| 6292 |  | -	tracing_reset_online_cpus(&tr->trace_buffer);  | 
|---|
 | 6866 | +	tracing_reset_online_cpus(&tr->array_buffer);  | 
|---|
| 6293 | 6867 |   | 
|---|
| 6294 | 6868 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 6295 | 6869 |  	if (tr->max_buffer.buffer) | 
|---|
| .. | .. | 
|---|
| 6335 | 6909 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 6336 | 6910 |  	int ret; | 
|---|
| 6337 | 6911 |   | 
|---|
| 6338 |  | -	if (tracing_disabled)  | 
|---|
| 6339 |  | -		return -ENODEV;  | 
|---|
| 6340 |  | -  | 
|---|
| 6341 |  | -	if (trace_array_get(tr))  | 
|---|
| 6342 |  | -		return -ENODEV;  | 
|---|
 | 6912 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 6913 | +	if (ret)  | 
|---|
 | 6914 | +		return ret;  | 
|---|
| 6343 | 6915 |   | 
|---|
| 6344 | 6916 |  	ret = single_open(file, tracing_clock_show, inode->i_private); | 
|---|
| 6345 | 6917 |  	if (ret < 0) | 
|---|
| .. | .. | 
|---|
| 6354 | 6926 |   | 
|---|
| 6355 | 6927 |  	mutex_lock(&trace_types_lock); | 
|---|
| 6356 | 6928 |   | 
|---|
| 6357 |  | -	if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))  | 
|---|
 | 6929 | +	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))  | 
|---|
| 6358 | 6930 |  		seq_puts(m, "delta [absolute]\n"); | 
|---|
| 6359 | 6931 |  	else | 
|---|
| 6360 | 6932 |  		seq_puts(m, "[delta] absolute\n"); | 
|---|
| .. | .. | 
|---|
| 6369 | 6941 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 6370 | 6942 |  	int ret; | 
|---|
| 6371 | 6943 |   | 
|---|
| 6372 |  | -	if (tracing_disabled)  | 
|---|
| 6373 |  | -		return -ENODEV;  | 
|---|
| 6374 |  | -  | 
|---|
| 6375 |  | -	if (trace_array_get(tr))  | 
|---|
| 6376 |  | -		return -ENODEV;  | 
|---|
 | 6944 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 6945 | +	if (ret)  | 
|---|
 | 6946 | +		return ret;  | 
|---|
| 6377 | 6947 |   | 
|---|
| 6378 | 6948 |  	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); | 
|---|
| 6379 | 6949 |  	if (ret < 0) | 
|---|
| .. | .. | 
|---|
| 6401 | 6971 |  			goto out; | 
|---|
| 6402 | 6972 |  	} | 
|---|
| 6403 | 6973 |   | 
|---|
| 6404 |  | -	ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);  | 
|---|
 | 6974 | +	ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);  | 
|---|
| 6405 | 6975 |   | 
|---|
| 6406 | 6976 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 6407 | 6977 |  	if (tr->max_buffer.buffer) | 
|---|
| .. | .. | 
|---|
| 6426 | 6996 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 6427 | 6997 |  	struct trace_iterator *iter; | 
|---|
| 6428 | 6998 |  	struct seq_file *m; | 
|---|
| 6429 |  | -	int ret = 0;  | 
|---|
 | 6999 | +	int ret;  | 
|---|
| 6430 | 7000 |   | 
|---|
| 6431 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 6432 |  | -		return -ENODEV;  | 
|---|
 | 7001 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 7002 | +	if (ret)  | 
|---|
 | 7003 | +		return ret;  | 
|---|
| 6433 | 7004 |   | 
|---|
| 6434 | 7005 |  	if (file->f_mode & FMODE_READ) { | 
|---|
| 6435 | 7006 |  		iter = __tracing_open(inode, file, true); | 
|---|
| .. | .. | 
|---|
| 6449 | 7020 |  		ret = 0; | 
|---|
| 6450 | 7021 |   | 
|---|
| 6451 | 7022 |  		iter->tr = tr; | 
|---|
| 6452 |  | -		iter->trace_buffer = &tr->max_buffer;  | 
|---|
 | 7023 | +		iter->array_buffer = &tr->max_buffer;  | 
|---|
| 6453 | 7024 |  		iter->cpu_file = tracing_get_cpu(inode); | 
|---|
| 6454 | 7025 |  		m->private = iter; | 
|---|
| 6455 | 7026 |  		file->private_data = m; | 
|---|
| .. | .. | 
|---|
| 6486 | 7057 |  		goto out; | 
|---|
| 6487 | 7058 |  	} | 
|---|
| 6488 | 7059 |   | 
|---|
 | 7060 | +	local_irq_disable();  | 
|---|
 | 7061 | +	arch_spin_lock(&tr->max_lock);  | 
|---|
 | 7062 | +	if (tr->cond_snapshot)  | 
|---|
 | 7063 | +		ret = -EBUSY;  | 
|---|
 | 7064 | +	arch_spin_unlock(&tr->max_lock);  | 
|---|
 | 7065 | +	local_irq_enable();  | 
|---|
 | 7066 | +	if (ret)  | 
|---|
 | 7067 | +		goto out;  | 
|---|
 | 7068 | +  | 
|---|
| 6489 | 7069 |  	switch (val) { | 
|---|
| 6490 | 7070 |  	case 0: | 
|---|
| 6491 | 7071 |  		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { | 
|---|
| .. | .. | 
|---|
| 6505 | 7085 |  #endif | 
|---|
| 6506 | 7086 |  		if (tr->allocated_snapshot) | 
|---|
| 6507 | 7087 |  			ret = resize_buffer_duplicate_size(&tr->max_buffer, | 
|---|
| 6508 |  | -					&tr->trace_buffer, iter->cpu_file);  | 
|---|
 | 7088 | +					&tr->array_buffer, iter->cpu_file);  | 
|---|
| 6509 | 7089 |  		else | 
|---|
| 6510 | 7090 |  			ret = tracing_alloc_snapshot_instance(tr); | 
|---|
| 6511 | 7091 |  		if (ret < 0) | 
|---|
| .. | .. | 
|---|
| 6513 | 7093 |  		local_irq_disable(); | 
|---|
| 6514 | 7094 |  		/* Now, we're going to swap */ | 
|---|
| 6515 | 7095 |  		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) | 
|---|
| 6516 |  | -			update_max_tr(tr, current, smp_processor_id());  | 
|---|
 | 7096 | +			update_max_tr(tr, current, smp_processor_id(), NULL);  | 
|---|
| 6517 | 7097 |  		else | 
|---|
| 6518 | 7098 |  			update_max_tr_single(tr, current, iter->cpu_file); | 
|---|
| 6519 | 7099 |  		local_irq_enable(); | 
|---|
| .. | .. | 
|---|
| 6523 | 7103 |  			if (iter->cpu_file == RING_BUFFER_ALL_CPUS) | 
|---|
| 6524 | 7104 |  				tracing_reset_online_cpus(&tr->max_buffer); | 
|---|
| 6525 | 7105 |  			else | 
|---|
| 6526 |  | -				tracing_reset(&tr->max_buffer, iter->cpu_file);  | 
|---|
 | 7106 | +				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);  | 
|---|
| 6527 | 7107 |  		} | 
|---|
| 6528 | 7108 |  		break; | 
|---|
| 6529 | 7109 |  	} | 
|---|
| .. | .. | 
|---|
| 6567 | 7147 |  	struct ftrace_buffer_info *info; | 
|---|
| 6568 | 7148 |  	int ret; | 
|---|
| 6569 | 7149 |   | 
|---|
 | 7150 | +	/* The following checks for tracefs lockdown */  | 
|---|
| 6570 | 7151 |  	ret = tracing_buffers_open(inode, filp); | 
|---|
| 6571 | 7152 |  	if (ret < 0) | 
|---|
| 6572 | 7153 |  		return ret; | 
|---|
| .. | .. | 
|---|
| 6579 | 7160 |  	} | 
|---|
| 6580 | 7161 |   | 
|---|
| 6581 | 7162 |  	info->iter.snapshot = true; | 
|---|
| 6582 |  | -	info->iter.trace_buffer = &info->iter.tr->max_buffer;  | 
|---|
 | 7163 | +	info->iter.array_buffer = &info->iter.tr->max_buffer;  | 
|---|
| 6583 | 7164 |   | 
|---|
| 6584 | 7165 |  	return ret; | 
|---|
| 6585 | 7166 |  } | 
|---|
| .. | .. | 
|---|
| 6688 | 7269 |   | 
|---|
| 6689 | 7270 |  #endif /* CONFIG_TRACER_SNAPSHOT */ | 
|---|
| 6690 | 7271 |   | 
|---|
 | 7272 | +#define TRACING_LOG_ERRS_MAX	8  | 
|---|
 | 7273 | +#define TRACING_LOG_LOC_MAX	128  | 
|---|
 | 7274 | +  | 
|---|
 | 7275 | +#define CMD_PREFIX "  Command: "  | 
|---|
 | 7276 | +  | 
|---|
 | 7277 | +struct err_info {  | 
|---|
 | 7278 | +	const char	**errs;	/* ptr to loc-specific array of err strings */  | 
|---|
 | 7279 | +	u8		type;	/* index into errs -> specific err string */  | 
|---|
 | 7280 | +	u8		pos;	/* MAX_FILTER_STR_VAL = 256 */  | 
|---|
 | 7281 | +	u64		ts;  | 
|---|
 | 7282 | +};  | 
|---|
 | 7283 | +  | 
|---|
 | 7284 | +struct tracing_log_err {  | 
|---|
 | 7285 | +	struct list_head	list;  | 
|---|
 | 7286 | +	struct err_info		info;  | 
|---|
 | 7287 | +	char			loc[TRACING_LOG_LOC_MAX]; /* err location */  | 
|---|
 | 7288 | +	char			cmd[MAX_FILTER_STR_VAL]; /* what caused err */  | 
|---|
 | 7289 | +};  | 
|---|
 | 7290 | +  | 
|---|
 | 7291 | +static DEFINE_MUTEX(tracing_err_log_lock);  | 
|---|
 | 7292 | +  | 
|---|
 | 7293 | +static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)  | 
|---|
 | 7294 | +{  | 
|---|
 | 7295 | +	struct tracing_log_err *err;  | 
|---|
 | 7296 | +  | 
|---|
 | 7297 | +	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {  | 
|---|
 | 7298 | +		err = kzalloc(sizeof(*err), GFP_KERNEL);  | 
|---|
 | 7299 | +		if (!err)  | 
|---|
 | 7300 | +			err = ERR_PTR(-ENOMEM);  | 
|---|
 | 7301 | +		else  | 
|---|
 | 7302 | +			tr->n_err_log_entries++;  | 
|---|
 | 7303 | +  | 
|---|
 | 7304 | +		return err;  | 
|---|
 | 7305 | +	}  | 
|---|
 | 7306 | +  | 
|---|
 | 7307 | +	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);  | 
|---|
 | 7308 | +	list_del(&err->list);  | 
|---|
 | 7309 | +  | 
|---|
 | 7310 | +	return err;  | 
|---|
 | 7311 | +}  | 
|---|
 | 7312 | +  | 
|---|
 | 7313 | +/**  | 
|---|
 | 7314 | + * err_pos - find the position of a string within a command for error careting  | 
|---|
 | 7315 | + * @cmd: The tracing command that caused the error  | 
|---|
 | 7316 | + * @str: The string to position the caret at within @cmd  | 
|---|
 | 7317 | + *  | 
|---|
 | 7318 | + * Finds the position of the first occurence of @str within @cmd.  The  | 
|---|
 | 7319 | + * return value can be passed to tracing_log_err() for caret placement  | 
|---|
 | 7320 | + * within @cmd.  | 
|---|
 | 7321 | + *  | 
|---|
 | 7322 | + * Returns the index within @cmd of the first occurence of @str or 0  | 
|---|
 | 7323 | + * if @str was not found.  | 
|---|
 | 7324 | + */  | 
|---|
 | 7325 | +unsigned int err_pos(char *cmd, const char *str)  | 
|---|
 | 7326 | +{  | 
|---|
 | 7327 | +	char *found;  | 
|---|
 | 7328 | +  | 
|---|
 | 7329 | +	if (WARN_ON(!strlen(cmd)))  | 
|---|
 | 7330 | +		return 0;  | 
|---|
 | 7331 | +  | 
|---|
 | 7332 | +	found = strstr(cmd, str);  | 
|---|
 | 7333 | +	if (found)  | 
|---|
 | 7334 | +		return found - cmd;  | 
|---|
 | 7335 | +  | 
|---|
 | 7336 | +	return 0;  | 
|---|
 | 7337 | +}  | 
|---|
 | 7338 | +  | 
|---|
 | 7339 | +/**  | 
|---|
 | 7340 | + * tracing_log_err - write an error to the tracing error log  | 
|---|
 | 7341 | + * @tr: The associated trace array for the error (NULL for top level array)  | 
|---|
 | 7342 | + * @loc: A string describing where the error occurred  | 
|---|
 | 7343 | + * @cmd: The tracing command that caused the error  | 
|---|
 | 7344 | + * @errs: The array of loc-specific static error strings  | 
|---|
 | 7345 | + * @type: The index into errs[], which produces the specific static err string  | 
|---|
 | 7346 | + * @pos: The position the caret should be placed in the cmd  | 
|---|
 | 7347 | + *  | 
|---|
 | 7348 | + * Writes an error into tracing/error_log of the form:  | 
|---|
 | 7349 | + *  | 
|---|
 | 7350 | + * <loc>: error: <text>  | 
|---|
 | 7351 | + *   Command: <cmd>  | 
|---|
 | 7352 | + *              ^  | 
|---|
 | 7353 | + *  | 
|---|
 | 7354 | + * tracing/error_log is a small log file containing the last  | 
|---|
 | 7355 | + * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated  | 
|---|
 | 7356 | + * unless there has been a tracing error, and the error log can be  | 
|---|
 | 7357 | + * cleared and have its memory freed by writing the empty string in  | 
|---|
 | 7358 | + * truncation mode to it i.e. echo > tracing/error_log.  | 
|---|
 | 7359 | + *  | 
|---|
 | 7360 | + * NOTE: the @errs array along with the @type param are used to  | 
|---|
 | 7361 | + * produce a static error string - this string is not copied and saved  | 
|---|
 | 7362 | + * when the error is logged - only a pointer to it is saved.  See  | 
|---|
 | 7363 | + * existing callers for examples of how static strings are typically  | 
|---|
 | 7364 | + * defined for use with tracing_log_err().  | 
|---|
 | 7365 | + */  | 
|---|
 | 7366 | +void tracing_log_err(struct trace_array *tr,  | 
|---|
 | 7367 | +		     const char *loc, const char *cmd,  | 
|---|
 | 7368 | +		     const char **errs, u8 type, u8 pos)  | 
|---|
 | 7369 | +{  | 
|---|
 | 7370 | +	struct tracing_log_err *err;  | 
|---|
 | 7371 | +  | 
|---|
 | 7372 | +	if (!tr)  | 
|---|
 | 7373 | +		tr = &global_trace;  | 
|---|
 | 7374 | +  | 
|---|
 | 7375 | +	mutex_lock(&tracing_err_log_lock);  | 
|---|
 | 7376 | +	err = get_tracing_log_err(tr);  | 
|---|
 | 7377 | +	if (PTR_ERR(err) == -ENOMEM) {  | 
|---|
 | 7378 | +		mutex_unlock(&tracing_err_log_lock);  | 
|---|
 | 7379 | +		return;  | 
|---|
 | 7380 | +	}  | 
|---|
 | 7381 | +  | 
|---|
 | 7382 | +	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);  | 
|---|
 | 7383 | +	snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);  | 
|---|
 | 7384 | +  | 
|---|
 | 7385 | +	err->info.errs = errs;  | 
|---|
 | 7386 | +	err->info.type = type;  | 
|---|
 | 7387 | +	err->info.pos = pos;  | 
|---|
 | 7388 | +	err->info.ts = local_clock();  | 
|---|
 | 7389 | +  | 
|---|
 | 7390 | +	list_add_tail(&err->list, &tr->err_log);  | 
|---|
 | 7391 | +	mutex_unlock(&tracing_err_log_lock);  | 
|---|
 | 7392 | +}  | 
|---|
 | 7393 | +  | 
|---|
 | 7394 | +static void clear_tracing_err_log(struct trace_array *tr)  | 
|---|
 | 7395 | +{  | 
|---|
 | 7396 | +	struct tracing_log_err *err, *next;  | 
|---|
 | 7397 | +  | 
|---|
 | 7398 | +	mutex_lock(&tracing_err_log_lock);  | 
|---|
 | 7399 | +	list_for_each_entry_safe(err, next, &tr->err_log, list) {  | 
|---|
 | 7400 | +		list_del(&err->list);  | 
|---|
 | 7401 | +		kfree(err);  | 
|---|
 | 7402 | +	}  | 
|---|
 | 7403 | +  | 
|---|
 | 7404 | +	tr->n_err_log_entries = 0;  | 
|---|
 | 7405 | +	mutex_unlock(&tracing_err_log_lock);  | 
|---|
 | 7406 | +}  | 
|---|
 | 7407 | +  | 
|---|
 | 7408 | +static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)  | 
|---|
 | 7409 | +{  | 
|---|
 | 7410 | +	struct trace_array *tr = m->private;  | 
|---|
 | 7411 | +  | 
|---|
 | 7412 | +	mutex_lock(&tracing_err_log_lock);  | 
|---|
 | 7413 | +  | 
|---|
 | 7414 | +	return seq_list_start(&tr->err_log, *pos);  | 
|---|
 | 7415 | +}  | 
|---|
 | 7416 | +  | 
|---|
 | 7417 | +static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)  | 
|---|
 | 7418 | +{  | 
|---|
 | 7419 | +	struct trace_array *tr = m->private;  | 
|---|
 | 7420 | +  | 
|---|
 | 7421 | +	return seq_list_next(v, &tr->err_log, pos);  | 
|---|
 | 7422 | +}  | 
|---|
 | 7423 | +  | 
|---|
 | 7424 | +static void tracing_err_log_seq_stop(struct seq_file *m, void *v)  | 
|---|
 | 7425 | +{  | 
|---|
 | 7426 | +	mutex_unlock(&tracing_err_log_lock);  | 
|---|
 | 7427 | +}  | 
|---|
 | 7428 | +  | 
|---|
 | 7429 | +static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)  | 
|---|
 | 7430 | +{  | 
|---|
 | 7431 | +	u8 i;  | 
|---|
 | 7432 | +  | 
|---|
 | 7433 | +	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)  | 
|---|
 | 7434 | +		seq_putc(m, ' ');  | 
|---|
 | 7435 | +	for (i = 0; i < pos; i++)  | 
|---|
 | 7436 | +		seq_putc(m, ' ');  | 
|---|
 | 7437 | +	seq_puts(m, "^\n");  | 
|---|
 | 7438 | +}  | 
|---|
 | 7439 | +  | 
|---|
 | 7440 | +static int tracing_err_log_seq_show(struct seq_file *m, void *v)  | 
|---|
 | 7441 | +{  | 
|---|
 | 7442 | +	struct tracing_log_err *err = v;  | 
|---|
 | 7443 | +  | 
|---|
 | 7444 | +	if (err) {  | 
|---|
 | 7445 | +		const char *err_text = err->info.errs[err->info.type];  | 
|---|
 | 7446 | +		u64 sec = err->info.ts;  | 
|---|
 | 7447 | +		u32 nsec;  | 
|---|
 | 7448 | +  | 
|---|
 | 7449 | +		nsec = do_div(sec, NSEC_PER_SEC);  | 
|---|
 | 7450 | +		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,  | 
|---|
 | 7451 | +			   err->loc, err_text);  | 
|---|
 | 7452 | +		seq_printf(m, "%s", err->cmd);  | 
|---|
 | 7453 | +		tracing_err_log_show_pos(m, err->info.pos);  | 
|---|
 | 7454 | +	}  | 
|---|
 | 7455 | +  | 
|---|
 | 7456 | +	return 0;  | 
|---|
 | 7457 | +}  | 
|---|
 | 7458 | +  | 
|---|
 | 7459 | +static const struct seq_operations tracing_err_log_seq_ops = {  | 
|---|
 | 7460 | +	.start  = tracing_err_log_seq_start,  | 
|---|
 | 7461 | +	.next   = tracing_err_log_seq_next,  | 
|---|
 | 7462 | +	.stop   = tracing_err_log_seq_stop,  | 
|---|
 | 7463 | +	.show   = tracing_err_log_seq_show  | 
|---|
 | 7464 | +};  | 
|---|
 | 7465 | +  | 
|---|
 | 7466 | +static int tracing_err_log_open(struct inode *inode, struct file *file)  | 
|---|
 | 7467 | +{  | 
|---|
 | 7468 | +	struct trace_array *tr = inode->i_private;  | 
|---|
 | 7469 | +	int ret = 0;  | 
|---|
 | 7470 | +  | 
|---|
 | 7471 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 7472 | +	if (ret)  | 
|---|
 | 7473 | +		return ret;  | 
|---|
 | 7474 | +  | 
|---|
 | 7475 | +	/* If this file was opened for write, then erase contents */  | 
|---|
 | 7476 | +	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))  | 
|---|
 | 7477 | +		clear_tracing_err_log(tr);  | 
|---|
 | 7478 | +  | 
|---|
 | 7479 | +	if (file->f_mode & FMODE_READ) {  | 
|---|
 | 7480 | +		ret = seq_open(file, &tracing_err_log_seq_ops);  | 
|---|
 | 7481 | +		if (!ret) {  | 
|---|
 | 7482 | +			struct seq_file *m = file->private_data;  | 
|---|
 | 7483 | +			m->private = tr;  | 
|---|
 | 7484 | +		} else {  | 
|---|
 | 7485 | +			trace_array_put(tr);  | 
|---|
 | 7486 | +		}  | 
|---|
 | 7487 | +	}  | 
|---|
 | 7488 | +	return ret;  | 
|---|
 | 7489 | +}  | 
|---|
 | 7490 | +  | 
|---|
 | 7491 | +static ssize_t tracing_err_log_write(struct file *file,  | 
|---|
 | 7492 | +				     const char __user *buffer,  | 
|---|
 | 7493 | +				     size_t count, loff_t *ppos)  | 
|---|
 | 7494 | +{  | 
|---|
 | 7495 | +	return count;  | 
|---|
 | 7496 | +}  | 
|---|
 | 7497 | +  | 
|---|
 | 7498 | +static int tracing_err_log_release(struct inode *inode, struct file *file)  | 
|---|
 | 7499 | +{  | 
|---|
 | 7500 | +	struct trace_array *tr = inode->i_private;  | 
|---|
 | 7501 | +  | 
|---|
 | 7502 | +	trace_array_put(tr);  | 
|---|
 | 7503 | +  | 
|---|
 | 7504 | +	if (file->f_mode & FMODE_READ)  | 
|---|
 | 7505 | +		seq_release(inode, file);  | 
|---|
 | 7506 | +  | 
|---|
 | 7507 | +	return 0;  | 
|---|
 | 7508 | +}  | 
|---|
 | 7509 | +  | 
|---|
 | 7510 | +static const struct file_operations tracing_err_log_fops = {  | 
|---|
 | 7511 | +	.open           = tracing_err_log_open,  | 
|---|
 | 7512 | +	.write		= tracing_err_log_write,  | 
|---|
 | 7513 | +	.read           = seq_read,  | 
|---|
 | 7514 | +	.llseek         = seq_lseek,  | 
|---|
 | 7515 | +	.release        = tracing_err_log_release,  | 
|---|
 | 7516 | +};  | 
|---|
 | 7517 | +  | 
|---|
| 6691 | 7518 |  static int tracing_buffers_open(struct inode *inode, struct file *filp) | 
|---|
| 6692 | 7519 |  { | 
|---|
| 6693 | 7520 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 6694 | 7521 |  	struct ftrace_buffer_info *info; | 
|---|
| 6695 | 7522 |  	int ret; | 
|---|
| 6696 | 7523 |   | 
|---|
| 6697 |  | -	if (tracing_disabled)  | 
|---|
| 6698 |  | -		return -ENODEV;  | 
|---|
 | 7524 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 7525 | +	if (ret)  | 
|---|
 | 7526 | +		return ret;  | 
|---|
| 6699 | 7527 |   | 
|---|
| 6700 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 6701 |  | -		return -ENODEV;  | 
|---|
| 6702 |  | -  | 
|---|
| 6703 |  | -	info = kzalloc(sizeof(*info), GFP_KERNEL);  | 
|---|
 | 7528 | +	info = kvzalloc(sizeof(*info), GFP_KERNEL);  | 
|---|
| 6704 | 7529 |  	if (!info) { | 
|---|
| 6705 | 7530 |  		trace_array_put(tr); | 
|---|
| 6706 | 7531 |  		return -ENOMEM; | 
|---|
| .. | .. | 
|---|
| 6711 | 7536 |  	info->iter.tr		= tr; | 
|---|
| 6712 | 7537 |  	info->iter.cpu_file	= tracing_get_cpu(inode); | 
|---|
| 6713 | 7538 |  	info->iter.trace	= tr->current_trace; | 
|---|
| 6714 |  | -	info->iter.trace_buffer = &tr->trace_buffer;  | 
|---|
 | 7539 | +	info->iter.array_buffer = &tr->array_buffer;  | 
|---|
| 6715 | 7540 |  	info->spare		= NULL; | 
|---|
| 6716 | 7541 |  	/* Force reading ring buffer for first read */ | 
|---|
| 6717 | 7542 |  	info->read		= (unsigned int)-1; | 
|---|
| 6718 | 7543 |   | 
|---|
| 6719 | 7544 |  	filp->private_data = info; | 
|---|
| 6720 | 7545 |   | 
|---|
| 6721 |  | -	tr->current_trace->ref++;  | 
|---|
 | 7546 | +	tr->trace_ref++;  | 
|---|
| 6722 | 7547 |   | 
|---|
| 6723 | 7548 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 6724 | 7549 |   | 
|---|
| .. | .. | 
|---|
| 6756 | 7581 |  #endif | 
|---|
| 6757 | 7582 |   | 
|---|
| 6758 | 7583 |  	if (!info->spare) { | 
|---|
| 6759 |  | -		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,  | 
|---|
 | 7584 | +		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,  | 
|---|
| 6760 | 7585 |  							  iter->cpu_file); | 
|---|
| 6761 | 7586 |  		if (IS_ERR(info->spare)) { | 
|---|
| 6762 | 7587 |  			ret = PTR_ERR(info->spare); | 
|---|
| .. | .. | 
|---|
| 6774 | 7599 |   | 
|---|
| 6775 | 7600 |   again: | 
|---|
| 6776 | 7601 |  	trace_access_lock(iter->cpu_file); | 
|---|
| 6777 |  | -	ret = ring_buffer_read_page(iter->trace_buffer->buffer,  | 
|---|
 | 7602 | +	ret = ring_buffer_read_page(iter->array_buffer->buffer,  | 
|---|
| 6778 | 7603 |  				    &info->spare, | 
|---|
| 6779 | 7604 |  				    count, | 
|---|
| 6780 | 7605 |  				    iter->cpu_file, 0); | 
|---|
| .. | .. | 
|---|
| 6785 | 7610 |  			if ((filp->f_flags & O_NONBLOCK)) | 
|---|
| 6786 | 7611 |  				return -EAGAIN; | 
|---|
| 6787 | 7612 |   | 
|---|
| 6788 |  | -			ret = wait_on_pipe(iter, false);  | 
|---|
 | 7613 | +			ret = wait_on_pipe(iter, 0);  | 
|---|
| 6789 | 7614 |  			if (ret) | 
|---|
| 6790 | 7615 |  				return ret; | 
|---|
| 6791 | 7616 |   | 
|---|
| .. | .. | 
|---|
| 6819 | 7644 |   | 
|---|
| 6820 | 7645 |  	mutex_lock(&trace_types_lock); | 
|---|
| 6821 | 7646 |   | 
|---|
| 6822 |  | -	iter->tr->current_trace->ref--;  | 
|---|
 | 7647 | +	iter->tr->trace_ref--;  | 
|---|
| 6823 | 7648 |   | 
|---|
| 6824 | 7649 |  	__trace_array_put(iter->tr); | 
|---|
| 6825 | 7650 |   | 
|---|
| 6826 | 7651 |  	if (info->spare) | 
|---|
| 6827 |  | -		ring_buffer_free_read_page(iter->trace_buffer->buffer,  | 
|---|
 | 7652 | +		ring_buffer_free_read_page(iter->array_buffer->buffer,  | 
|---|
| 6828 | 7653 |  					   info->spare_cpu, info->spare); | 
|---|
| 6829 |  | -	kfree(info);  | 
|---|
 | 7654 | +	kvfree(info);  | 
|---|
| 6830 | 7655 |   | 
|---|
| 6831 | 7656 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 6832 | 7657 |   | 
|---|
| .. | .. | 
|---|
| 6834 | 7659 |  } | 
|---|
| 6835 | 7660 |   | 
|---|
| 6836 | 7661 |  struct buffer_ref { | 
|---|
| 6837 |  | -	struct ring_buffer	*buffer;  | 
|---|
 | 7662 | +	struct trace_buffer	*buffer;  | 
|---|
| 6838 | 7663 |  	void			*page; | 
|---|
| 6839 | 7664 |  	int			cpu; | 
|---|
| 6840 | 7665 |  	refcount_t		refcount; | 
|---|
| .. | .. | 
|---|
| 6871 | 7696 |   | 
|---|
| 6872 | 7697 |  /* Pipe buffer operations for a buffer. */ | 
|---|
| 6873 | 7698 |  static const struct pipe_buf_operations buffer_pipe_buf_ops = { | 
|---|
| 6874 |  | -	.can_merge		= 0,  | 
|---|
| 6875 |  | -	.confirm		= generic_pipe_buf_confirm,  | 
|---|
| 6876 | 7699 |  	.release		= buffer_pipe_buf_release, | 
|---|
| 6877 |  | -	.steal			= generic_pipe_buf_nosteal,  | 
|---|
| 6878 | 7700 |  	.get			= buffer_pipe_buf_get, | 
|---|
| 6879 | 7701 |  }; | 
|---|
| 6880 | 7702 |   | 
|---|
| .. | .. | 
|---|
| 6930 | 7752 |   | 
|---|
| 6931 | 7753 |   again: | 
|---|
| 6932 | 7754 |  	trace_access_lock(iter->cpu_file); | 
|---|
| 6933 |  | -	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);  | 
|---|
 | 7755 | +	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);  | 
|---|
| 6934 | 7756 |   | 
|---|
| 6935 | 7757 |  	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { | 
|---|
| 6936 | 7758 |  		struct page *page; | 
|---|
| .. | .. | 
|---|
| 6943 | 7765 |  		} | 
|---|
| 6944 | 7766 |   | 
|---|
| 6945 | 7767 |  		refcount_set(&ref->refcount, 1); | 
|---|
| 6946 |  | -		ref->buffer = iter->trace_buffer->buffer;  | 
|---|
 | 7768 | +		ref->buffer = iter->array_buffer->buffer;  | 
|---|
| 6947 | 7769 |  		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); | 
|---|
| 6948 | 7770 |  		if (IS_ERR(ref->page)) { | 
|---|
| 6949 | 7771 |  			ret = PTR_ERR(ref->page); | 
|---|
| .. | .. | 
|---|
| 6971 | 7793 |  		spd.nr_pages++; | 
|---|
| 6972 | 7794 |  		*ppos += PAGE_SIZE; | 
|---|
| 6973 | 7795 |   | 
|---|
| 6974 |  | -		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);  | 
|---|
 | 7796 | +		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);  | 
|---|
| 6975 | 7797 |  	} | 
|---|
| 6976 | 7798 |   | 
|---|
| 6977 | 7799 |  	trace_access_unlock(iter->cpu_file); | 
|---|
| .. | .. | 
|---|
| 6986 | 7808 |  		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) | 
|---|
| 6987 | 7809 |  			goto out; | 
|---|
| 6988 | 7810 |   | 
|---|
| 6989 |  | -		ret = wait_on_pipe(iter, true);  | 
|---|
 | 7811 | +		ret = wait_on_pipe(iter, iter->tr->buffer_percent);  | 
|---|
| 6990 | 7812 |  		if (ret) | 
|---|
| 6991 | 7813 |  			goto out; | 
|---|
| 6992 | 7814 |   | 
|---|
| .. | .. | 
|---|
| 7015 | 7837 |  { | 
|---|
| 7016 | 7838 |  	struct inode *inode = file_inode(filp); | 
|---|
| 7017 | 7839 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 7018 |  | -	struct trace_buffer *trace_buf = &tr->trace_buffer;  | 
|---|
 | 7840 | +	struct array_buffer *trace_buf = &tr->array_buffer;  | 
|---|
| 7019 | 7841 |  	int cpu = tracing_get_cpu(inode); | 
|---|
| 7020 | 7842 |  	struct trace_seq *s; | 
|---|
| 7021 | 7843 |  	unsigned long cnt; | 
|---|
| .. | .. | 
|---|
| 7086 | 7908 |  tracing_read_dyn_info(struct file *filp, char __user *ubuf, | 
|---|
| 7087 | 7909 |  		  size_t cnt, loff_t *ppos) | 
|---|
| 7088 | 7910 |  { | 
|---|
| 7089 |  | -	unsigned long *p = filp->private_data;  | 
|---|
| 7090 |  | -	char buf[64]; /* Not too big for a shallow stack */  | 
|---|
 | 7911 | +	ssize_t ret;  | 
|---|
 | 7912 | +	char *buf;  | 
|---|
| 7091 | 7913 |  	int r; | 
|---|
| 7092 | 7914 |   | 
|---|
| 7093 |  | -	r = scnprintf(buf, 63, "%ld", *p);  | 
|---|
| 7094 |  | -	buf[r++] = '\n';  | 
|---|
 | 7915 | +	/* 256 should be plenty to hold the amount needed */  | 
|---|
 | 7916 | +	buf = kmalloc(256, GFP_KERNEL);  | 
|---|
 | 7917 | +	if (!buf)  | 
|---|
 | 7918 | +		return -ENOMEM;  | 
|---|
| 7095 | 7919 |   | 
|---|
| 7096 |  | -	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);  | 
|---|
 | 7920 | +	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",  | 
|---|
 | 7921 | +		      ftrace_update_tot_cnt,  | 
|---|
 | 7922 | +		      ftrace_number_of_pages,  | 
|---|
 | 7923 | +		      ftrace_number_of_groups);  | 
|---|
 | 7924 | +  | 
|---|
 | 7925 | +	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);  | 
|---|
 | 7926 | +	kfree(buf);  | 
|---|
 | 7927 | +	return ret;  | 
|---|
| 7097 | 7928 |  } | 
|---|
| 7098 | 7929 |   | 
|---|
| 7099 | 7930 |  static const struct file_operations tracing_dyn_info_fops = { | 
|---|
| .. | .. | 
|---|
| 7287 | 8118 |   | 
|---|
| 7288 | 8119 |  	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); | 
|---|
| 7289 | 8120 |   | 
|---|
| 7290 |  | -	WARN_ONCE(!tr->percpu_dir,  | 
|---|
 | 8121 | +	MEM_FAIL(!tr->percpu_dir,  | 
|---|
| 7291 | 8122 |  		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu); | 
|---|
| 7292 | 8123 |   | 
|---|
| 7293 | 8124 |  	return tr->percpu_dir; | 
|---|
| .. | .. | 
|---|
| 7608 | 8439 |  	for (cnt = 0; opts[cnt].name; cnt++) { | 
|---|
| 7609 | 8440 |  		create_trace_option_file(tr, &topts[cnt], flags, | 
|---|
| 7610 | 8441 |  					 &opts[cnt]); | 
|---|
| 7611 |  | -		WARN_ONCE(topts[cnt].entry == NULL,  | 
|---|
 | 8442 | +		MEM_FAIL(topts[cnt].entry == NULL,  | 
|---|
| 7612 | 8443 |  			  "Failed to create trace option: %s", | 
|---|
| 7613 | 8444 |  			  opts[cnt].name); | 
|---|
| 7614 | 8445 |  	} | 
|---|
| .. | .. | 
|---|
| 7665 | 8496 |  		size_t cnt, loff_t *ppos) | 
|---|
| 7666 | 8497 |  { | 
|---|
| 7667 | 8498 |  	struct trace_array *tr = filp->private_data; | 
|---|
| 7668 |  | -	struct ring_buffer *buffer = tr->trace_buffer.buffer;  | 
|---|
 | 8499 | +	struct trace_buffer *buffer = tr->array_buffer.buffer;  | 
|---|
| 7669 | 8500 |  	unsigned long val; | 
|---|
| 7670 | 8501 |  	int ret; | 
|---|
| 7671 | 8502 |   | 
|---|
| .. | .. | 
|---|
| 7702 | 8533 |  	.llseek		= default_llseek, | 
|---|
| 7703 | 8534 |  }; | 
|---|
| 7704 | 8535 |   | 
|---|
| 7705 |  | -struct dentry *trace_instance_dir;  | 
|---|
 | 8536 | +static ssize_t  | 
|---|
 | 8537 | +buffer_percent_read(struct file *filp, char __user *ubuf,  | 
|---|
 | 8538 | +		    size_t cnt, loff_t *ppos)  | 
|---|
 | 8539 | +{  | 
|---|
 | 8540 | +	struct trace_array *tr = filp->private_data;  | 
|---|
 | 8541 | +	char buf[64];  | 
|---|
 | 8542 | +	int r;  | 
|---|
 | 8543 | +  | 
|---|
 | 8544 | +	r = tr->buffer_percent;  | 
|---|
 | 8545 | +	r = sprintf(buf, "%d\n", r);  | 
|---|
 | 8546 | +  | 
|---|
 | 8547 | +	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);  | 
|---|
 | 8548 | +}  | 
|---|
 | 8549 | +  | 
|---|
 | 8550 | +static ssize_t  | 
|---|
 | 8551 | +buffer_percent_write(struct file *filp, const char __user *ubuf,  | 
|---|
 | 8552 | +		     size_t cnt, loff_t *ppos)  | 
|---|
 | 8553 | +{  | 
|---|
 | 8554 | +	struct trace_array *tr = filp->private_data;  | 
|---|
 | 8555 | +	unsigned long val;  | 
|---|
 | 8556 | +	int ret;  | 
|---|
 | 8557 | +  | 
|---|
 | 8558 | +	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);  | 
|---|
 | 8559 | +	if (ret)  | 
|---|
 | 8560 | +		return ret;  | 
|---|
 | 8561 | +  | 
|---|
 | 8562 | +	if (val > 100)  | 
|---|
 | 8563 | +		return -EINVAL;  | 
|---|
 | 8564 | +  | 
|---|
 | 8565 | +	if (!val)  | 
|---|
 | 8566 | +		val = 1;  | 
|---|
 | 8567 | +  | 
|---|
 | 8568 | +	tr->buffer_percent = val;  | 
|---|
 | 8569 | +  | 
|---|
 | 8570 | +	(*ppos)++;  | 
|---|
 | 8571 | +  | 
|---|
 | 8572 | +	return cnt;  | 
|---|
 | 8573 | +}  | 
|---|
 | 8574 | +  | 
|---|
 | 8575 | +static const struct file_operations buffer_percent_fops = {  | 
|---|
 | 8576 | +	.open		= tracing_open_generic_tr,  | 
|---|
 | 8577 | +	.read		= buffer_percent_read,  | 
|---|
 | 8578 | +	.write		= buffer_percent_write,  | 
|---|
 | 8579 | +	.release	= tracing_release_generic_tr,  | 
|---|
 | 8580 | +	.llseek		= default_llseek,  | 
|---|
 | 8581 | +};  | 
|---|
 | 8582 | +  | 
|---|
 | 8583 | +static struct dentry *trace_instance_dir;  | 
|---|
| 7706 | 8584 |   | 
|---|
| 7707 | 8585 |  static void | 
|---|
| 7708 | 8586 |  init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); | 
|---|
| 7709 | 8587 |   | 
|---|
| 7710 | 8588 |  static int | 
|---|
| 7711 |  | -allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)  | 
|---|
 | 8589 | +allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)  | 
|---|
| 7712 | 8590 |  { | 
|---|
| 7713 | 8591 |  	enum ring_buffer_flags rb_flags; | 
|---|
| 7714 | 8592 |   | 
|---|
| .. | .. | 
|---|
| 7728 | 8606 |  	} | 
|---|
| 7729 | 8607 |   | 
|---|
| 7730 | 8608 |  	/* Allocate the first page for all buffers */ | 
|---|
| 7731 |  | -	set_buffer_entries(&tr->trace_buffer,  | 
|---|
| 7732 |  | -			   ring_buffer_size(tr->trace_buffer.buffer, 0));  | 
|---|
 | 8609 | +	set_buffer_entries(&tr->array_buffer,  | 
|---|
 | 8610 | +			   ring_buffer_size(tr->array_buffer.buffer, 0));  | 
|---|
| 7733 | 8611 |   | 
|---|
| 7734 | 8612 |  	return 0; | 
|---|
| 7735 | 8613 |  } | 
|---|
| .. | .. | 
|---|
| 7738 | 8616 |  { | 
|---|
| 7739 | 8617 |  	int ret; | 
|---|
| 7740 | 8618 |   | 
|---|
| 7741 |  | -	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);  | 
|---|
 | 8619 | +	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);  | 
|---|
| 7742 | 8620 |  	if (ret) | 
|---|
| 7743 | 8621 |  		return ret; | 
|---|
| 7744 | 8622 |   | 
|---|
| 7745 | 8623 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 7746 | 8624 |  	ret = allocate_trace_buffer(tr, &tr->max_buffer, | 
|---|
| 7747 | 8625 |  				    allocate_snapshot ? size : 1); | 
|---|
| 7748 |  | -	if (WARN_ON(ret)) {  | 
|---|
| 7749 |  | -		ring_buffer_free(tr->trace_buffer.buffer);  | 
|---|
| 7750 |  | -		tr->trace_buffer.buffer = NULL;  | 
|---|
| 7751 |  | -		free_percpu(tr->trace_buffer.data);  | 
|---|
| 7752 |  | -		tr->trace_buffer.data = NULL;  | 
|---|
 | 8626 | +	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {  | 
|---|
 | 8627 | +		ring_buffer_free(tr->array_buffer.buffer);  | 
|---|
 | 8628 | +		tr->array_buffer.buffer = NULL;  | 
|---|
 | 8629 | +		free_percpu(tr->array_buffer.data);  | 
|---|
 | 8630 | +		tr->array_buffer.data = NULL;  | 
|---|
| 7753 | 8631 |  		return -ENOMEM; | 
|---|
| 7754 | 8632 |  	} | 
|---|
| 7755 | 8633 |  	tr->allocated_snapshot = allocate_snapshot; | 
|---|
| .. | .. | 
|---|
| 7761 | 8639 |  	allocate_snapshot = false; | 
|---|
| 7762 | 8640 |  #endif | 
|---|
| 7763 | 8641 |   | 
|---|
| 7764 |  | -	/*  | 
|---|
| 7765 |  | -	 * Because of some magic with the way alloc_percpu() works on  | 
|---|
| 7766 |  | -	 * x86_64, we need to synchronize the pgd of all the tables,  | 
|---|
| 7767 |  | -	 * otherwise the trace events that happen in x86_64 page fault  | 
|---|
| 7768 |  | -	 * handlers can't cope with accessing the chance that a  | 
|---|
| 7769 |  | -	 * alloc_percpu()'d memory might be touched in the page fault trace  | 
|---|
| 7770 |  | -	 * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()  | 
|---|
| 7771 |  | -	 * calls in tracing, because something might get triggered within a  | 
|---|
| 7772 |  | -	 * page fault trace event!  | 
|---|
| 7773 |  | -	 */  | 
|---|
| 7774 |  | -	vmalloc_sync_mappings();  | 
|---|
| 7775 |  | -  | 
|---|
| 7776 | 8642 |  	return 0; | 
|---|
| 7777 | 8643 |  } | 
|---|
| 7778 | 8644 |   | 
|---|
| 7779 |  | -static void free_trace_buffer(struct trace_buffer *buf)  | 
|---|
 | 8645 | +static void free_trace_buffer(struct array_buffer *buf)  | 
|---|
| 7780 | 8646 |  { | 
|---|
| 7781 | 8647 |  	if (buf->buffer) { | 
|---|
| 7782 | 8648 |  		ring_buffer_free(buf->buffer); | 
|---|
| .. | .. | 
|---|
| 7791 | 8657 |  	if (!tr) | 
|---|
| 7792 | 8658 |  		return; | 
|---|
| 7793 | 8659 |   | 
|---|
| 7794 |  | -	free_trace_buffer(&tr->trace_buffer);  | 
|---|
 | 8660 | +	free_trace_buffer(&tr->array_buffer);  | 
|---|
| 7795 | 8661 |   | 
|---|
| 7796 | 8662 |  #ifdef CONFIG_TRACER_MAX_TRACE | 
|---|
| 7797 | 8663 |  	free_trace_buffer(&tr->max_buffer); | 
|---|
| .. | .. | 
|---|
| 7818 | 8684 |  static void update_tracer_options(struct trace_array *tr) | 
|---|
| 7819 | 8685 |  { | 
|---|
| 7820 | 8686 |  	mutex_lock(&trace_types_lock); | 
|---|
 | 8687 | +	tracer_options_updated = true;  | 
|---|
| 7821 | 8688 |  	__update_tracer_options(tr); | 
|---|
| 7822 | 8689 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 7823 | 8690 |  } | 
|---|
| 7824 | 8691 |   | 
|---|
| 7825 |  | -static int instance_mkdir(const char *name)  | 
|---|
 | 8692 | +/* Must have trace_types_lock held */  | 
|---|
 | 8693 | +struct trace_array *trace_array_find(const char *instance)  | 
|---|
 | 8694 | +{  | 
|---|
 | 8695 | +	struct trace_array *tr, *found = NULL;  | 
|---|
 | 8696 | +  | 
|---|
 | 8697 | +	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
 | 8698 | +		if (tr->name && strcmp(tr->name, instance) == 0) {  | 
|---|
 | 8699 | +			found = tr;  | 
|---|
 | 8700 | +			break;  | 
|---|
 | 8701 | +		}  | 
|---|
 | 8702 | +	}  | 
|---|
 | 8703 | +  | 
|---|
 | 8704 | +	return found;  | 
|---|
 | 8705 | +}  | 
|---|
 | 8706 | +  | 
|---|
 | 8707 | +struct trace_array *trace_array_find_get(const char *instance)  | 
|---|
 | 8708 | +{  | 
|---|
 | 8709 | +	struct trace_array *tr;  | 
|---|
 | 8710 | +  | 
|---|
 | 8711 | +	mutex_lock(&trace_types_lock);  | 
|---|
 | 8712 | +	tr = trace_array_find(instance);  | 
|---|
 | 8713 | +	if (tr)  | 
|---|
 | 8714 | +		tr->ref++;  | 
|---|
 | 8715 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 8716 | +  | 
|---|
 | 8717 | +	return tr;  | 
|---|
 | 8718 | +}  | 
|---|
 | 8719 | +  | 
|---|
 | 8720 | +static int trace_array_create_dir(struct trace_array *tr)  | 
|---|
 | 8721 | +{  | 
|---|
 | 8722 | +	int ret;  | 
|---|
 | 8723 | +  | 
|---|
 | 8724 | +	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);  | 
|---|
 | 8725 | +	if (!tr->dir)  | 
|---|
 | 8726 | +		return -EINVAL;  | 
|---|
 | 8727 | +  | 
|---|
 | 8728 | +	ret = event_trace_add_tracer(tr->dir, tr);  | 
|---|
 | 8729 | +	if (ret) {  | 
|---|
 | 8730 | +		tracefs_remove(tr->dir);  | 
|---|
 | 8731 | +		return ret;  | 
|---|
 | 8732 | +	}  | 
|---|
 | 8733 | +  | 
|---|
 | 8734 | +	init_tracer_tracefs(tr, tr->dir);  | 
|---|
 | 8735 | +	__update_tracer_options(tr);  | 
|---|
 | 8736 | +  | 
|---|
 | 8737 | +	return ret;  | 
|---|
 | 8738 | +}  | 
|---|
 | 8739 | +  | 
|---|
 | 8740 | +static struct trace_array *trace_array_create(const char *name)  | 
|---|
| 7826 | 8741 |  { | 
|---|
| 7827 | 8742 |  	struct trace_array *tr; | 
|---|
| 7828 | 8743 |  	int ret; | 
|---|
| 7829 | 8744 |   | 
|---|
| 7830 |  | -	mutex_lock(&event_mutex);  | 
|---|
| 7831 |  | -	mutex_lock(&trace_types_lock);  | 
|---|
| 7832 |  | -  | 
|---|
| 7833 |  | -	ret = -EEXIST;  | 
|---|
| 7834 |  | -	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
| 7835 |  | -		if (tr->name && strcmp(tr->name, name) == 0)  | 
|---|
| 7836 |  | -			goto out_unlock;  | 
|---|
| 7837 |  | -	}  | 
|---|
| 7838 |  | -  | 
|---|
| 7839 | 8745 |  	ret = -ENOMEM; | 
|---|
| 7840 | 8746 |  	tr = kzalloc(sizeof(*tr), GFP_KERNEL); | 
|---|
| 7841 | 8747 |  	if (!tr) | 
|---|
| 7842 |  | -		goto out_unlock;  | 
|---|
 | 8748 | +		return ERR_PTR(ret);  | 
|---|
| 7843 | 8749 |   | 
|---|
| 7844 | 8750 |  	tr->name = kstrdup(name, GFP_KERNEL); | 
|---|
| 7845 | 8751 |  	if (!tr->name) | 
|---|
| .. | .. | 
|---|
| 7861 | 8767 |  	INIT_LIST_HEAD(&tr->systems); | 
|---|
| 7862 | 8768 |  	INIT_LIST_HEAD(&tr->events); | 
|---|
| 7863 | 8769 |  	INIT_LIST_HEAD(&tr->hist_vars); | 
|---|
 | 8770 | +	INIT_LIST_HEAD(&tr->err_log);  | 
|---|
| 7864 | 8771 |   | 
|---|
| 7865 | 8772 |  	if (allocate_trace_buffers(tr, trace_buf_size) < 0) | 
|---|
| 7866 | 8773 |  		goto out_free_tr; | 
|---|
| 7867 | 8774 |   | 
|---|
| 7868 |  | -	tr->dir = tracefs_create_dir(name, trace_instance_dir);  | 
|---|
| 7869 |  | -	if (!tr->dir)  | 
|---|
 | 8775 | +	if (ftrace_allocate_ftrace_ops(tr) < 0)  | 
|---|
| 7870 | 8776 |  		goto out_free_tr; | 
|---|
| 7871 |  | -  | 
|---|
| 7872 |  | -	ret = event_trace_add_tracer(tr->dir, tr);  | 
|---|
| 7873 |  | -	if (ret) {  | 
|---|
| 7874 |  | -		tracefs_remove_recursive(tr->dir);  | 
|---|
| 7875 |  | -		goto out_free_tr;  | 
|---|
| 7876 |  | -	}  | 
|---|
| 7877 | 8777 |   | 
|---|
| 7878 | 8778 |  	ftrace_init_trace_array(tr); | 
|---|
| 7879 | 8779 |   | 
|---|
| 7880 |  | -	init_tracer_tracefs(tr, tr->dir);  | 
|---|
| 7881 | 8780 |  	init_trace_flags_index(tr); | 
|---|
| 7882 |  | -	__update_tracer_options(tr);  | 
|---|
 | 8781 | +  | 
|---|
 | 8782 | +	if (trace_instance_dir) {  | 
|---|
 | 8783 | +		ret = trace_array_create_dir(tr);  | 
|---|
 | 8784 | +		if (ret)  | 
|---|
 | 8785 | +			goto out_free_tr;  | 
|---|
 | 8786 | +	} else  | 
|---|
 | 8787 | +		__trace_early_add_events(tr);  | 
|---|
| 7883 | 8788 |   | 
|---|
| 7884 | 8789 |  	list_add(&tr->list, &ftrace_trace_arrays); | 
|---|
| 7885 | 8790 |   | 
|---|
| 7886 |  | -	mutex_unlock(&trace_types_lock);  | 
|---|
| 7887 |  | -	mutex_unlock(&event_mutex);  | 
|---|
 | 8791 | +	tr->ref++;  | 
|---|
| 7888 | 8792 |   | 
|---|
| 7889 |  | -	return 0;  | 
|---|
 | 8793 | +	return tr;  | 
|---|
| 7890 | 8794 |   | 
|---|
| 7891 | 8795 |   out_free_tr: | 
|---|
 | 8796 | +	ftrace_free_ftrace_ops(tr);  | 
|---|
| 7892 | 8797 |  	free_trace_buffers(tr); | 
|---|
| 7893 | 8798 |  	free_cpumask_var(tr->tracing_cpumask); | 
|---|
| 7894 | 8799 |  	kfree(tr->name); | 
|---|
| 7895 | 8800 |  	kfree(tr); | 
|---|
| 7896 | 8801 |   | 
|---|
| 7897 |  | - out_unlock:  | 
|---|
| 7898 |  | -	mutex_unlock(&trace_types_lock);  | 
|---|
| 7899 |  | -	mutex_unlock(&event_mutex);  | 
|---|
| 7900 |  | -  | 
|---|
| 7901 |  | -	return ret;  | 
|---|
| 7902 |  | -  | 
|---|
 | 8802 | +	return ERR_PTR(ret);  | 
|---|
| 7903 | 8803 |  } | 
|---|
| 7904 | 8804 |   | 
|---|
| 7905 |  | -static int instance_rmdir(const char *name)  | 
|---|
 | 8805 | +static int instance_mkdir(const char *name)  | 
|---|
| 7906 | 8806 |  { | 
|---|
| 7907 | 8807 |  	struct trace_array *tr; | 
|---|
| 7908 |  | -	int found = 0;  | 
|---|
| 7909 | 8808 |  	int ret; | 
|---|
| 7910 |  | -	int i;  | 
|---|
| 7911 | 8809 |   | 
|---|
| 7912 | 8810 |  	mutex_lock(&event_mutex); | 
|---|
| 7913 | 8811 |  	mutex_lock(&trace_types_lock); | 
|---|
| 7914 | 8812 |   | 
|---|
| 7915 |  | -	ret = -ENODEV;  | 
|---|
| 7916 |  | -	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
| 7917 |  | -		if (tr->name && strcmp(tr->name, name) == 0) {  | 
|---|
| 7918 |  | -			found = 1;  | 
|---|
| 7919 |  | -			break;  | 
|---|
| 7920 |  | -		}  | 
|---|
| 7921 |  | -	}  | 
|---|
| 7922 |  | -	if (!found)  | 
|---|
 | 8813 | +	ret = -EEXIST;  | 
|---|
 | 8814 | +	if (trace_array_find(name))  | 
|---|
| 7923 | 8815 |  		goto out_unlock; | 
|---|
| 7924 | 8816 |   | 
|---|
| 7925 |  | -	ret = -EBUSY;  | 
|---|
| 7926 |  | -	if (tr->ref || (tr->current_trace && tr->current_trace->ref))  | 
|---|
| 7927 |  | -		goto out_unlock;  | 
|---|
 | 8817 | +	tr = trace_array_create(name);  | 
|---|
 | 8818 | +  | 
|---|
 | 8819 | +	ret = PTR_ERR_OR_ZERO(tr);  | 
|---|
 | 8820 | +  | 
|---|
 | 8821 | +out_unlock:  | 
|---|
 | 8822 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 8823 | +	mutex_unlock(&event_mutex);  | 
|---|
 | 8824 | +	return ret;  | 
|---|
 | 8825 | +}  | 
|---|
 | 8826 | +  | 
|---|
 | 8827 | +/**  | 
|---|
 | 8828 | + * trace_array_get_by_name - Create/Lookup a trace array, given its name.  | 
|---|
 | 8829 | + * @name: The name of the trace array to be looked up/created.  | 
|---|
 | 8830 | + *  | 
|---|
 | 8831 | + * Returns pointer to trace array with given name.  | 
|---|
 | 8832 | + * NULL, if it cannot be created.  | 
|---|
 | 8833 | + *  | 
|---|
 | 8834 | + * NOTE: This function increments the reference counter associated with the  | 
|---|
 | 8835 | + * trace array returned. This makes sure it cannot be freed while in use.  | 
|---|
 | 8836 | + * Use trace_array_put() once the trace array is no longer needed.  | 
|---|
 | 8837 | + * If the trace_array is to be freed, trace_array_destroy() needs to  | 
|---|
 | 8838 | + * be called after the trace_array_put(), or simply let user space delete  | 
|---|
 | 8839 | + * it from the tracefs instances directory. But until the  | 
|---|
 | 8840 | + * trace_array_put() is called, user space can not delete it.  | 
|---|
 | 8841 | + *  | 
|---|
 | 8842 | + */  | 
|---|
 | 8843 | +struct trace_array *trace_array_get_by_name(const char *name)  | 
|---|
 | 8844 | +{  | 
|---|
 | 8845 | +	struct trace_array *tr;  | 
|---|
 | 8846 | +  | 
|---|
 | 8847 | +	mutex_lock(&event_mutex);  | 
|---|
 | 8848 | +	mutex_lock(&trace_types_lock);  | 
|---|
 | 8849 | +  | 
|---|
 | 8850 | +	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
 | 8851 | +		if (tr->name && strcmp(tr->name, name) == 0)  | 
|---|
 | 8852 | +			goto out_unlock;  | 
|---|
 | 8853 | +	}  | 
|---|
 | 8854 | +  | 
|---|
 | 8855 | +	tr = trace_array_create(name);  | 
|---|
 | 8856 | +  | 
|---|
 | 8857 | +	if (IS_ERR(tr))  | 
|---|
 | 8858 | +		tr = NULL;  | 
|---|
 | 8859 | +out_unlock:  | 
|---|
 | 8860 | +	if (tr)  | 
|---|
 | 8861 | +		tr->ref++;  | 
|---|
 | 8862 | +  | 
|---|
 | 8863 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 8864 | +	mutex_unlock(&event_mutex);  | 
|---|
 | 8865 | +	return tr;  | 
|---|
 | 8866 | +}  | 
|---|
 | 8867 | +EXPORT_SYMBOL_GPL(trace_array_get_by_name);  | 
|---|
 | 8868 | +  | 
|---|
 | 8869 | +static int __remove_instance(struct trace_array *tr)  | 
|---|
 | 8870 | +{  | 
|---|
 | 8871 | +	int i;  | 
|---|
 | 8872 | +  | 
|---|
 | 8873 | +	/* Reference counter for a newly created trace array = 1. */  | 
|---|
 | 8874 | +	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))  | 
|---|
 | 8875 | +		return -EBUSY;  | 
|---|
| 7928 | 8876 |   | 
|---|
| 7929 | 8877 |  	list_del(&tr->list); | 
|---|
| 7930 | 8878 |   | 
|---|
| .. | .. | 
|---|
| 7939 | 8887 |  	event_trace_del_tracer(tr); | 
|---|
| 7940 | 8888 |  	ftrace_clear_pids(tr); | 
|---|
| 7941 | 8889 |  	ftrace_destroy_function_files(tr); | 
|---|
| 7942 |  | -	tracefs_remove_recursive(tr->dir);  | 
|---|
 | 8890 | +	tracefs_remove(tr->dir);  | 
|---|
| 7943 | 8891 |  	free_trace_buffers(tr); | 
|---|
| 7944 | 8892 |   | 
|---|
| 7945 | 8893 |  	for (i = 0; i < tr->nr_topts; i++) { | 
|---|
| .. | .. | 
|---|
| 7951 | 8899 |  	kfree(tr->name); | 
|---|
| 7952 | 8900 |  	kfree(tr); | 
|---|
| 7953 | 8901 |   | 
|---|
| 7954 |  | -	ret = 0;  | 
|---|
 | 8902 | +	return 0;  | 
|---|
 | 8903 | +}  | 
|---|
| 7955 | 8904 |   | 
|---|
| 7956 |  | - out_unlock:  | 
|---|
 | 8905 | +int trace_array_destroy(struct trace_array *this_tr)  | 
|---|
 | 8906 | +{  | 
|---|
 | 8907 | +	struct trace_array *tr;  | 
|---|
 | 8908 | +	int ret;  | 
|---|
 | 8909 | +  | 
|---|
 | 8910 | +	if (!this_tr)  | 
|---|
 | 8911 | +		return -EINVAL;  | 
|---|
 | 8912 | +  | 
|---|
 | 8913 | +	mutex_lock(&event_mutex);  | 
|---|
 | 8914 | +	mutex_lock(&trace_types_lock);  | 
|---|
 | 8915 | +  | 
|---|
 | 8916 | +	ret = -ENODEV;  | 
|---|
 | 8917 | +  | 
|---|
 | 8918 | +	/* Making sure trace array exists before destroying it. */  | 
|---|
 | 8919 | +	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
 | 8920 | +		if (tr == this_tr) {  | 
|---|
 | 8921 | +			ret = __remove_instance(tr);  | 
|---|
 | 8922 | +			break;  | 
|---|
 | 8923 | +		}  | 
|---|
 | 8924 | +	}  | 
|---|
 | 8925 | +  | 
|---|
 | 8926 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 8927 | +	mutex_unlock(&event_mutex);  | 
|---|
 | 8928 | +  | 
|---|
 | 8929 | +	return ret;  | 
|---|
 | 8930 | +}  | 
|---|
 | 8931 | +EXPORT_SYMBOL_GPL(trace_array_destroy);  | 
|---|
 | 8932 | +  | 
|---|
 | 8933 | +static int instance_rmdir(const char *name)  | 
|---|
 | 8934 | +{  | 
|---|
 | 8935 | +	struct trace_array *tr;  | 
|---|
 | 8936 | +	int ret;  | 
|---|
 | 8937 | +  | 
|---|
 | 8938 | +	mutex_lock(&event_mutex);  | 
|---|
 | 8939 | +	mutex_lock(&trace_types_lock);  | 
|---|
 | 8940 | +  | 
|---|
 | 8941 | +	ret = -ENODEV;  | 
|---|
 | 8942 | +	tr = trace_array_find(name);  | 
|---|
 | 8943 | +	if (tr)  | 
|---|
 | 8944 | +		ret = __remove_instance(tr);  | 
|---|
 | 8945 | +  | 
|---|
| 7957 | 8946 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 7958 | 8947 |  	mutex_unlock(&event_mutex); | 
|---|
| 7959 | 8948 |   | 
|---|
| .. | .. | 
|---|
| 7962 | 8951 |   | 
|---|
| 7963 | 8952 |  static __init void create_trace_instances(struct dentry *d_tracer) | 
|---|
| 7964 | 8953 |  { | 
|---|
 | 8954 | +	struct trace_array *tr;  | 
|---|
 | 8955 | +  | 
|---|
| 7965 | 8956 |  	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, | 
|---|
| 7966 | 8957 |  							 instance_mkdir, | 
|---|
| 7967 | 8958 |  							 instance_rmdir); | 
|---|
| 7968 |  | -	if (WARN_ON(!trace_instance_dir))  | 
|---|
 | 8959 | +	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))  | 
|---|
| 7969 | 8960 |  		return; | 
|---|
 | 8961 | +  | 
|---|
 | 8962 | +	mutex_lock(&event_mutex);  | 
|---|
 | 8963 | +	mutex_lock(&trace_types_lock);  | 
|---|
 | 8964 | +  | 
|---|
 | 8965 | +	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
 | 8966 | +		if (!tr->name)  | 
|---|
 | 8967 | +			continue;  | 
|---|
 | 8968 | +		if (MEM_FAIL(trace_array_create_dir(tr) < 0,  | 
|---|
 | 8969 | +			     "Failed to create instance directory\n"))  | 
|---|
 | 8970 | +			break;  | 
|---|
 | 8971 | +	}  | 
|---|
 | 8972 | +  | 
|---|
 | 8973 | +	mutex_unlock(&trace_types_lock);  | 
|---|
 | 8974 | +	mutex_unlock(&event_mutex);  | 
|---|
| 7970 | 8975 |  } | 
|---|
| 7971 | 8976 |   | 
|---|
| 7972 | 8977 |  static void | 
|---|
| .. | .. | 
|---|
| 8023 | 9028 |  	trace_create_file("timestamp_mode", 0444, d_tracer, tr, | 
|---|
| 8024 | 9029 |  			  &trace_time_stamp_mode_fops); | 
|---|
| 8025 | 9030 |   | 
|---|
 | 9031 | +	tr->buffer_percent = 50;  | 
|---|
 | 9032 | +  | 
|---|
 | 9033 | +	trace_create_file("buffer_percent", 0444, d_tracer,  | 
|---|
 | 9034 | +			tr, &buffer_percent_fops);  | 
|---|
 | 9035 | +  | 
|---|
| 8026 | 9036 |  	create_trace_options_dir(tr); | 
|---|
| 8027 | 9037 |   | 
|---|
| 8028 | 9038 |  #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) | 
|---|
| 8029 |  | -	trace_create_file("tracing_max_latency", 0644, d_tracer,  | 
|---|
| 8030 |  | -			&tr->max_latency, &tracing_max_lat_fops);  | 
|---|
 | 9039 | +	trace_create_maxlat_file(tr, d_tracer);  | 
|---|
| 8031 | 9040 |  #endif | 
|---|
| 8032 | 9041 |   | 
|---|
| 8033 | 9042 |  	if (ftrace_create_function_files(tr, d_tracer)) | 
|---|
| 8034 |  | -		WARN(1, "Could not allocate function filter files");  | 
|---|
 | 9043 | +		MEM_FAIL(1, "Could not allocate function filter files");  | 
|---|
| 8035 | 9044 |   | 
|---|
| 8036 | 9045 |  #ifdef CONFIG_TRACER_SNAPSHOT | 
|---|
| 8037 | 9046 |  	trace_create_file("snapshot", 0644, d_tracer, | 
|---|
| 8038 | 9047 |  			  tr, &snapshot_fops); | 
|---|
| 8039 | 9048 |  #endif | 
|---|
 | 9049 | +  | 
|---|
 | 9050 | +	trace_create_file("error_log", 0644, d_tracer,  | 
|---|
 | 9051 | +			  tr, &tracing_err_log_fops);  | 
|---|
| 8040 | 9052 |   | 
|---|
| 8041 | 9053 |  	for_each_tracing_cpu(cpu) | 
|---|
| 8042 | 9054 |  		tracing_init_tracefs_percpu(tr, cpu); | 
|---|
| .. | .. | 
|---|
| 8044 | 9056 |  	ftrace_init_tracefs(tr, d_tracer); | 
|---|
| 8045 | 9057 |  } | 
|---|
| 8046 | 9058 |   | 
|---|
 | 9059 | +#ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT  | 
|---|
| 8047 | 9060 |  static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) | 
|---|
| 8048 | 9061 |  { | 
|---|
| 8049 | 9062 |  	struct vfsmount *mnt; | 
|---|
| .. | .. | 
|---|
| 8065 | 9078 |   | 
|---|
| 8066 | 9079 |  	return mnt; | 
|---|
| 8067 | 9080 |  } | 
|---|
 | 9081 | +#endif  | 
|---|
| 8068 | 9082 |   | 
|---|
| 8069 | 9083 |  /** | 
|---|
| 8070 | 9084 |   * tracing_init_dentry - initialize top level trace array | 
|---|
| .. | .. | 
|---|
| 8073 | 9087 |   * directory. It is called via fs_initcall() by any of the boot up code | 
|---|
| 8074 | 9088 |   * and expects to return the dentry of the top level tracing directory. | 
|---|
| 8075 | 9089 |   */ | 
|---|
| 8076 |  | -struct dentry *tracing_init_dentry(void)  | 
|---|
 | 9090 | +int tracing_init_dentry(void)  | 
|---|
| 8077 | 9091 |  { | 
|---|
| 8078 | 9092 |  	struct trace_array *tr = &global_trace; | 
|---|
| 8079 | 9093 |   | 
|---|
 | 9094 | +	if (security_locked_down(LOCKDOWN_TRACEFS)) {  | 
|---|
 | 9095 | +		pr_warn("Tracing disabled due to lockdown\n");  | 
|---|
 | 9096 | +		return -EPERM;  | 
|---|
 | 9097 | +	}  | 
|---|
 | 9098 | +  | 
|---|
| 8080 | 9099 |  	/* The top level trace array uses  NULL as parent */ | 
|---|
| 8081 | 9100 |  	if (tr->dir) | 
|---|
| 8082 |  | -		return NULL;  | 
|---|
 | 9101 | +		return 0;  | 
|---|
| 8083 | 9102 |   | 
|---|
| 8084 |  | -	if (WARN_ON(!tracefs_initialized()) ||  | 
|---|
| 8085 |  | -		(IS_ENABLED(CONFIG_DEBUG_FS) &&  | 
|---|
| 8086 |  | -		 WARN_ON(!debugfs_initialized())))  | 
|---|
| 8087 |  | -		return ERR_PTR(-ENODEV);  | 
|---|
 | 9103 | +	if (WARN_ON(!tracefs_initialized()))  | 
|---|
 | 9104 | +		return -ENODEV;  | 
|---|
| 8088 | 9105 |   | 
|---|
 | 9106 | +#ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT  | 
|---|
| 8089 | 9107 |  	/* | 
|---|
| 8090 | 9108 |  	 * As there may still be users that expect the tracing | 
|---|
| 8091 | 9109 |  	 * files to exist in debugfs/tracing, we must automount | 
|---|
| .. | .. | 
|---|
| 8094 | 9112 |  	 */ | 
|---|
| 8095 | 9113 |  	tr->dir = debugfs_create_automount("tracing", NULL, | 
|---|
| 8096 | 9114 |  					   trace_automount, NULL); | 
|---|
| 8097 |  | -	if (!tr->dir) {  | 
|---|
| 8098 |  | -		pr_warn_once("Could not create debugfs directory 'tracing'\n");  | 
|---|
| 8099 |  | -		return ERR_PTR(-ENOMEM);  | 
|---|
| 8100 |  | -	}  | 
|---|
 | 9115 | +#else  | 
|---|
 | 9116 | +	tr->dir = ERR_PTR(-ENODEV);  | 
|---|
 | 9117 | +#endif  | 
|---|
| 8101 | 9118 |   | 
|---|
| 8102 |  | -	return NULL;  | 
|---|
 | 9119 | +	return 0;  | 
|---|
| 8103 | 9120 |  } | 
|---|
| 8104 | 9121 |   | 
|---|
| 8105 | 9122 |  extern struct trace_eval_map *__start_ftrace_eval_maps[]; | 
|---|
| .. | .. | 
|---|
| 8175 | 9192 |  		break; | 
|---|
| 8176 | 9193 |  	} | 
|---|
| 8177 | 9194 |   | 
|---|
| 8178 |  | -	return 0;  | 
|---|
 | 9195 | +	return NOTIFY_OK;  | 
|---|
| 8179 | 9196 |  } | 
|---|
| 8180 | 9197 |   | 
|---|
| 8181 | 9198 |  static struct notifier_block trace_module_nb = { | 
|---|
| .. | .. | 
|---|
| 8186 | 9203 |   | 
|---|
| 8187 | 9204 |  static __init int tracer_init_tracefs(void) | 
|---|
| 8188 | 9205 |  { | 
|---|
| 8189 |  | -	struct dentry *d_tracer;  | 
|---|
 | 9206 | +	int ret;  | 
|---|
| 8190 | 9207 |   | 
|---|
| 8191 | 9208 |  	trace_access_lock_init(); | 
|---|
| 8192 | 9209 |   | 
|---|
| 8193 |  | -	d_tracer = tracing_init_dentry();  | 
|---|
| 8194 |  | -	if (IS_ERR(d_tracer))  | 
|---|
 | 9210 | +	ret = tracing_init_dentry();  | 
|---|
 | 9211 | +	if (ret)  | 
|---|
| 8195 | 9212 |  		return 0; | 
|---|
| 8196 | 9213 |   | 
|---|
| 8197 | 9214 |  	event_trace_init(); | 
|---|
| 8198 | 9215 |   | 
|---|
| 8199 |  | -	init_tracer_tracefs(&global_trace, d_tracer);  | 
|---|
| 8200 |  | -	ftrace_init_tracefs_toplevel(&global_trace, d_tracer);  | 
|---|
 | 9216 | +	init_tracer_tracefs(&global_trace, NULL);  | 
|---|
 | 9217 | +	ftrace_init_tracefs_toplevel(&global_trace, NULL);  | 
|---|
| 8201 | 9218 |   | 
|---|
| 8202 |  | -	trace_create_file("tracing_thresh", 0644, d_tracer,  | 
|---|
 | 9219 | +	trace_create_file("tracing_thresh", 0644, NULL,  | 
|---|
| 8203 | 9220 |  			&global_trace, &tracing_thresh_fops); | 
|---|
| 8204 | 9221 |   | 
|---|
| 8205 |  | -	trace_create_file("README", 0444, d_tracer,  | 
|---|
 | 9222 | +	trace_create_file("README", 0444, NULL,  | 
|---|
| 8206 | 9223 |  			NULL, &tracing_readme_fops); | 
|---|
| 8207 | 9224 |   | 
|---|
| 8208 |  | -	trace_create_file("saved_cmdlines", 0444, d_tracer,  | 
|---|
 | 9225 | +	trace_create_file("saved_cmdlines", 0444, NULL,  | 
|---|
| 8209 | 9226 |  			NULL, &tracing_saved_cmdlines_fops); | 
|---|
| 8210 | 9227 |   | 
|---|
| 8211 |  | -	trace_create_file("saved_cmdlines_size", 0644, d_tracer,  | 
|---|
 | 9228 | +	trace_create_file("saved_cmdlines_size", 0644, NULL,  | 
|---|
| 8212 | 9229 |  			  NULL, &tracing_saved_cmdlines_size_fops); | 
|---|
| 8213 | 9230 |   | 
|---|
| 8214 |  | -	trace_create_file("saved_tgids", 0444, d_tracer,  | 
|---|
 | 9231 | +	trace_create_file("saved_tgids", 0444, NULL,  | 
|---|
| 8215 | 9232 |  			NULL, &tracing_saved_tgids_fops); | 
|---|
| 8216 | 9233 |   | 
|---|
| 8217 | 9234 |  	trace_eval_init(); | 
|---|
| 8218 | 9235 |   | 
|---|
| 8219 |  | -	trace_create_eval_file(d_tracer);  | 
|---|
 | 9236 | +	trace_create_eval_file(NULL);  | 
|---|
| 8220 | 9237 |   | 
|---|
| 8221 | 9238 |  #ifdef CONFIG_MODULES | 
|---|
| 8222 | 9239 |  	register_module_notifier(&trace_module_nb); | 
|---|
| 8223 | 9240 |  #endif | 
|---|
| 8224 | 9241 |   | 
|---|
| 8225 | 9242 |  #ifdef CONFIG_DYNAMIC_FTRACE | 
|---|
| 8226 |  | -	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,  | 
|---|
| 8227 |  | -			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);  | 
|---|
 | 9243 | +	trace_create_file("dyn_ftrace_total_info", 0444, NULL,  | 
|---|
 | 9244 | +			NULL, &tracing_dyn_info_fops);  | 
|---|
| 8228 | 9245 |  #endif | 
|---|
| 8229 | 9246 |   | 
|---|
| 8230 |  | -	create_trace_instances(d_tracer);  | 
|---|
 | 9247 | +	create_trace_instances(NULL);  | 
|---|
| 8231 | 9248 |   | 
|---|
| 8232 | 9249 |  	update_tracer_options(&global_trace); | 
|---|
| 8233 | 9250 |   | 
|---|
| .. | .. | 
|---|
| 8237 | 9254 |  static int trace_panic_handler(struct notifier_block *this, | 
|---|
| 8238 | 9255 |  			       unsigned long event, void *unused) | 
|---|
| 8239 | 9256 |  { | 
|---|
 | 9257 | +	bool ftrace_check = false;  | 
|---|
 | 9258 | +  | 
|---|
 | 9259 | +	trace_android_vh_ftrace_oops_enter(&ftrace_check);  | 
|---|
 | 9260 | +  | 
|---|
 | 9261 | +	if (ftrace_check)  | 
|---|
 | 9262 | +		return NOTIFY_OK;  | 
|---|
 | 9263 | +  | 
|---|
| 8240 | 9264 |  	if (ftrace_dump_on_oops) | 
|---|
| 8241 | 9265 |  		ftrace_dump(ftrace_dump_on_oops); | 
|---|
 | 9266 | +  | 
|---|
 | 9267 | +	trace_android_vh_ftrace_oops_exit(&ftrace_check);  | 
|---|
| 8242 | 9268 |  	return NOTIFY_OK; | 
|---|
| 8243 | 9269 |  } | 
|---|
| 8244 | 9270 |   | 
|---|
| .. | .. | 
|---|
| 8252 | 9278 |  			     unsigned long val, | 
|---|
| 8253 | 9279 |  			     void *data) | 
|---|
| 8254 | 9280 |  { | 
|---|
 | 9281 | +	bool ftrace_check = false;  | 
|---|
 | 9282 | +  | 
|---|
 | 9283 | +	trace_android_vh_ftrace_oops_enter(&ftrace_check);  | 
|---|
 | 9284 | +  | 
|---|
 | 9285 | +	if (ftrace_check)  | 
|---|
 | 9286 | +		return NOTIFY_OK;  | 
|---|
 | 9287 | +  | 
|---|
| 8255 | 9288 |  	switch (val) { | 
|---|
| 8256 | 9289 |  	case DIE_OOPS: | 
|---|
| 8257 | 9290 |  		if (ftrace_dump_on_oops) | 
|---|
| .. | .. | 
|---|
| 8260 | 9293 |  	default: | 
|---|
| 8261 | 9294 |  		break; | 
|---|
| 8262 | 9295 |  	} | 
|---|
 | 9296 | +  | 
|---|
 | 9297 | +	trace_android_vh_ftrace_oops_exit(&ftrace_check);  | 
|---|
| 8263 | 9298 |  	return NOTIFY_OK; | 
|---|
| 8264 | 9299 |  } | 
|---|
| 8265 | 9300 |   | 
|---|
| .. | .. | 
|---|
| 8284 | 9319 |  void | 
|---|
| 8285 | 9320 |  trace_printk_seq(struct trace_seq *s) | 
|---|
| 8286 | 9321 |  { | 
|---|
 | 9322 | +	bool dump_printk = true;  | 
|---|
 | 9323 | +  | 
|---|
| 8287 | 9324 |  	/* Probably should print a warning here. */ | 
|---|
| 8288 | 9325 |  	if (s->seq.len >= TRACE_MAX_PRINT) | 
|---|
| 8289 | 9326 |  		s->seq.len = TRACE_MAX_PRINT; | 
|---|
| .. | .. | 
|---|
| 8299 | 9336 |  	/* should be zero ended, but we are paranoid. */ | 
|---|
| 8300 | 9337 |  	s->buffer[s->seq.len] = 0; | 
|---|
| 8301 | 9338 |   | 
|---|
| 8302 |  | -	printk(KERN_TRACE "%s", s->buffer);  | 
|---|
 | 9339 | +	trace_android_vh_ftrace_dump_buffer(s, &dump_printk);  | 
|---|
 | 9340 | +	if (dump_printk)  | 
|---|
 | 9341 | +		printk(KERN_TRACE "%s", s->buffer);  | 
|---|
| 8303 | 9342 |   | 
|---|
| 8304 | 9343 |  	trace_seq_init(s); | 
|---|
| 8305 | 9344 |  } | 
|---|
| .. | .. | 
|---|
| 8309 | 9348 |  	iter->tr = &global_trace; | 
|---|
| 8310 | 9349 |  	iter->trace = iter->tr->current_trace; | 
|---|
| 8311 | 9350 |  	iter->cpu_file = RING_BUFFER_ALL_CPUS; | 
|---|
| 8312 |  | -	iter->trace_buffer = &global_trace.trace_buffer;  | 
|---|
 | 9351 | +	iter->array_buffer = &global_trace.array_buffer;  | 
|---|
| 8313 | 9352 |   | 
|---|
| 8314 | 9353 |  	if (iter->trace && iter->trace->open) | 
|---|
| 8315 | 9354 |  		iter->trace->open(iter); | 
|---|
| 8316 | 9355 |   | 
|---|
| 8317 | 9356 |  	/* Annotate start of buffers if we had overruns */ | 
|---|
| 8318 |  | -	if (ring_buffer_overruns(iter->trace_buffer->buffer))  | 
|---|
 | 9357 | +	if (ring_buffer_overruns(iter->array_buffer->buffer))  | 
|---|
| 8319 | 9358 |  		iter->iter_flags |= TRACE_FILE_ANNOTATE; | 
|---|
| 8320 | 9359 |   | 
|---|
| 8321 | 9360 |  	/* Output in nanoseconds only if we are using a clock in nanoseconds. */ | 
|---|
| .. | .. | 
|---|
| 8332 | 9371 |  	unsigned int old_userobj; | 
|---|
| 8333 | 9372 |  	unsigned long flags; | 
|---|
| 8334 | 9373 |  	int cnt = 0, cpu; | 
|---|
 | 9374 | +	bool ftrace_check = false;  | 
|---|
 | 9375 | +	unsigned long size;  | 
|---|
| 8335 | 9376 |   | 
|---|
| 8336 | 9377 |  	/* Only allow one dump user at a time. */ | 
|---|
| 8337 | 9378 |  	if (atomic_inc_return(&dump_running) != 1) { | 
|---|
| .. | .. | 
|---|
| 8350 | 9391 |  	tracing_off(); | 
|---|
| 8351 | 9392 |   | 
|---|
| 8352 | 9393 |  	local_irq_save(flags); | 
|---|
| 8353 |  | -	printk_nmi_direct_enter();  | 
|---|
| 8354 | 9394 |   | 
|---|
| 8355 | 9395 |  	/* Simulate the iterator */ | 
|---|
| 8356 | 9396 |  	trace_init_global_iter(&iter); | 
|---|
 | 9397 | +	/* Can not use kmalloc for iter.temp */  | 
|---|
 | 9398 | +	iter.temp = static_temp_buf;  | 
|---|
 | 9399 | +	iter.temp_size = STATIC_TEMP_BUF_SIZE;  | 
|---|
| 8357 | 9400 |   | 
|---|
| 8358 | 9401 |  	for_each_tracing_cpu(cpu) { | 
|---|
| 8359 |  | -		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);  | 
|---|
 | 9402 | +		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);  | 
|---|
 | 9403 | +		size = ring_buffer_size(iter.array_buffer->buffer, cpu);  | 
|---|
 | 9404 | +		trace_android_vh_ftrace_size_check(size, &ftrace_check);  | 
|---|
| 8360 | 9405 |  	} | 
|---|
| 8361 | 9406 |   | 
|---|
| 8362 | 9407 |  	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; | 
|---|
| 8363 | 9408 |   | 
|---|
| 8364 | 9409 |  	/* don't look at user memory in panic mode */ | 
|---|
| 8365 | 9410 |  	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 
|---|
 | 9411 | +  | 
|---|
 | 9412 | +	if (ftrace_check)  | 
|---|
 | 9413 | +		goto out_enable;  | 
|---|
| 8366 | 9414 |   | 
|---|
| 8367 | 9415 |  	switch (oops_dump_mode) { | 
|---|
| 8368 | 9416 |  	case DUMP_ALL: | 
|---|
| .. | .. | 
|---|
| 8387 | 9435 |  	} | 
|---|
| 8388 | 9436 |   | 
|---|
| 8389 | 9437 |  	/* | 
|---|
| 8390 |  | -	 * We need to stop all tracing on all CPUS to read the  | 
|---|
 | 9438 | +	 * We need to stop all tracing on all CPUS to read  | 
|---|
| 8391 | 9439 |  	 * the next buffer. This is a bit expensive, but is | 
|---|
| 8392 | 9440 |  	 * not done often. We fill all what we can read, | 
|---|
| 8393 | 9441 |  	 * and then release the locks again. | 
|---|
| 8394 | 9442 |  	 */ | 
|---|
| 8395 | 9443 |   | 
|---|
| 8396 | 9444 |  	while (!trace_empty(&iter)) { | 
|---|
 | 9445 | +		ftrace_check = true;  | 
|---|
| 8397 | 9446 |   | 
|---|
| 8398 | 9447 |  		if (!cnt) | 
|---|
| 8399 | 9448 |  			printk(KERN_TRACE "---------------------------------\n"); | 
|---|
| .. | .. | 
|---|
| 8401 | 9450 |  		cnt++; | 
|---|
| 8402 | 9451 |   | 
|---|
| 8403 | 9452 |  		trace_iterator_reset(&iter); | 
|---|
| 8404 |  | -		iter.iter_flags |= TRACE_FILE_LAT_FMT;  | 
|---|
 | 9453 | +		trace_android_vh_ftrace_format_check(&ftrace_check);  | 
|---|
 | 9454 | +		if (ftrace_check)  | 
|---|
 | 9455 | +			iter.iter_flags |= TRACE_FILE_LAT_FMT;  | 
|---|
| 8405 | 9456 |   | 
|---|
| 8406 | 9457 |  		if (trace_find_next_entry_inc(&iter) != NULL) { | 
|---|
| 8407 | 9458 |  			int ret; | 
|---|
| .. | .. | 
|---|
| 8424 | 9475 |  	tr->trace_flags |= old_userobj; | 
|---|
| 8425 | 9476 |   | 
|---|
| 8426 | 9477 |  	for_each_tracing_cpu(cpu) { | 
|---|
| 8427 |  | -		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);  | 
|---|
 | 9478 | +		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);  | 
|---|
| 8428 | 9479 |  	} | 
|---|
| 8429 | 9480 |  	atomic_dec(&dump_running); | 
|---|
| 8430 |  | -	printk_nmi_direct_exit();  | 
|---|
| 8431 | 9481 |  	local_irq_restore(flags); | 
|---|
| 8432 | 9482 |  } | 
|---|
| 8433 | 9483 |  EXPORT_SYMBOL_GPL(ftrace_dump); | 
|---|
| .. | .. | 
|---|
| 8523 | 9573 |  	int ring_buf_size; | 
|---|
| 8524 | 9574 |  	int ret = -ENOMEM; | 
|---|
| 8525 | 9575 |   | 
|---|
 | 9576 | +  | 
|---|
 | 9577 | +	if (security_locked_down(LOCKDOWN_TRACEFS)) {  | 
|---|
 | 9578 | +		pr_warn("Tracing disabled due to lockdown\n");  | 
|---|
 | 9579 | +		return -EPERM;  | 
|---|
 | 9580 | +	}  | 
|---|
 | 9581 | +  | 
|---|
| 8526 | 9582 |  	/* | 
|---|
| 8527 |  | -	 * Make sure we don't accidently add more trace options  | 
|---|
 | 9583 | +	 * Make sure we don't accidentally add more trace options  | 
|---|
| 8528 | 9584 |  	 * than we have bits for. | 
|---|
| 8529 | 9585 |  	 */ | 
|---|
| 8530 | 9586 |  	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); | 
|---|
| .. | .. | 
|---|
| 8553 | 9609 |   | 
|---|
| 8554 | 9610 |  	/* | 
|---|
| 8555 | 9611 |  	 * The prepare callbacks allocates some memory for the ring buffer. We | 
|---|
| 8556 |  | -	 * don't free the buffer if the if the CPU goes down. If we were to free  | 
|---|
 | 9612 | +	 * don't free the buffer if the CPU goes down. If we were to free  | 
|---|
| 8557 | 9613 |  	 * the buffer, then the user would lose any trace that was in the | 
|---|
| 8558 | 9614 |  	 * buffer. The memory will be removed once the "instance" is removed. | 
|---|
| 8559 | 9615 |  	 */ | 
|---|
| .. | .. | 
|---|
| 8573 | 9629 |   | 
|---|
| 8574 | 9630 |  	/* TODO: make the number of buffers hot pluggable with CPUS */ | 
|---|
| 8575 | 9631 |  	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { | 
|---|
| 8576 |  | -		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");  | 
|---|
| 8577 |  | -		WARN_ON(1);  | 
|---|
 | 9632 | +		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");  | 
|---|
| 8578 | 9633 |  		goto out_free_savedcmd; | 
|---|
| 8579 | 9634 |  	} | 
|---|
| 8580 | 9635 |   | 
|---|
| .. | .. | 
|---|
| 8619 | 9674 |  	INIT_LIST_HEAD(&global_trace.systems); | 
|---|
| 8620 | 9675 |  	INIT_LIST_HEAD(&global_trace.events); | 
|---|
| 8621 | 9676 |  	INIT_LIST_HEAD(&global_trace.hist_vars); | 
|---|
 | 9677 | +	INIT_LIST_HEAD(&global_trace.err_log);  | 
|---|
| 8622 | 9678 |  	list_add(&global_trace.list, &ftrace_trace_arrays); | 
|---|
| 8623 | 9679 |   | 
|---|
| 8624 | 9680 |  	apply_trace_boot_options(); | 
|---|
| .. | .. | 
|---|
| 8646 | 9702 |  	if (tracepoint_printk) { | 
|---|
| 8647 | 9703 |  		tracepoint_print_iter = | 
|---|
| 8648 | 9704 |  			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); | 
|---|
| 8649 |  | -		if (WARN_ON(!tracepoint_print_iter))  | 
|---|
 | 9705 | +		if (MEM_FAIL(!tracepoint_print_iter,  | 
|---|
 | 9706 | +			     "Failed to allocate trace iterator\n"))  | 
|---|
| 8650 | 9707 |  			tracepoint_printk = 0; | 
|---|
| 8651 | 9708 |  		else | 
|---|
| 8652 | 9709 |  			static_key_enable(&tracepoint_printk_key.key); | 
|---|
| .. | .. | 
|---|
| 8686 | 9743 |  { | 
|---|
| 8687 | 9744 |  	/* sched_clock_stable() is determined in late_initcall */ | 
|---|
| 8688 | 9745 |  	if (!trace_boot_clock && !sched_clock_stable()) { | 
|---|
 | 9746 | +		if (security_locked_down(LOCKDOWN_TRACEFS)) {  | 
|---|
 | 9747 | +			pr_warn("Can not set tracing clock due to lockdown\n");  | 
|---|
 | 9748 | +			return -EPERM;  | 
|---|
 | 9749 | +		}  | 
|---|
 | 9750 | +  | 
|---|
| 8689 | 9751 |  		printk(KERN_WARNING | 
|---|
| 8690 | 9752 |  		       "Unstable clock detected, switching default tracing clock to \"global\"\n" | 
|---|
| 8691 | 9753 |  		       "If you want to keep using the local clock, then add:\n" | 
|---|