| .. | .. | 
|---|
| 12 | 12 |  #define pr_fmt(fmt) fmt | 
|---|
| 13 | 13 |   | 
|---|
| 14 | 14 |  #include <linux/workqueue.h> | 
|---|
 | 15 | +#include <linux/security.h>  | 
|---|
| 15 | 16 |  #include <linux/spinlock.h> | 
|---|
| 16 | 17 |  #include <linux/kthread.h> | 
|---|
| 17 | 18 |  #include <linux/tracefs.h> | 
|---|
| .. | .. | 
|---|
| 23 | 24 |  #include <linux/delay.h> | 
|---|
| 24 | 25 |   | 
|---|
| 25 | 26 |  #include <trace/events/sched.h> | 
|---|
 | 27 | +#include <trace/syscall.h>  | 
|---|
| 26 | 28 |   | 
|---|
| 27 | 29 |  #include <asm/setup.h> | 
|---|
| 28 | 30 |   | 
|---|
| .. | .. | 
|---|
| 36 | 38 |  LIST_HEAD(ftrace_events); | 
|---|
| 37 | 39 |  static LIST_HEAD(ftrace_generic_fields); | 
|---|
| 38 | 40 |  static LIST_HEAD(ftrace_common_fields); | 
|---|
 | 41 | +static bool eventdir_initialized;  | 
|---|
| 39 | 42 |   | 
|---|
| 40 | 43 |  #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) | 
|---|
| 41 | 44 |   | 
|---|
| .. | .. | 
|---|
| 69 | 72 |   | 
|---|
| 70 | 73 |  #define while_for_each_event_file()		\ | 
|---|
| 71 | 74 |  	} | 
|---|
| 72 |  | -  | 
|---|
| 73 |  | -static struct list_head *  | 
|---|
| 74 |  | -trace_get_fields(struct trace_event_call *event_call)  | 
|---|
| 75 |  | -{  | 
|---|
| 76 |  | -	if (!event_call->class->get_fields)  | 
|---|
| 77 |  | -		return &event_call->class->fields;  | 
|---|
| 78 |  | -	return event_call->class->get_fields(event_call);  | 
|---|
| 79 |  | -}  | 
|---|
| 80 | 75 |   | 
|---|
| 81 | 76 |  static struct ftrace_event_field * | 
|---|
| 82 | 77 |  __find_event_field(struct list_head *head, char *name) | 
|---|
| .. | .. | 
|---|
| 173 | 168 |   | 
|---|
| 174 | 169 |  	__generic_field(int, CPU, FILTER_CPU); | 
|---|
| 175 | 170 |  	__generic_field(int, cpu, FILTER_CPU); | 
|---|
 | 171 | +	__generic_field(int, common_cpu, FILTER_CPU);  | 
|---|
| 176 | 172 |  	__generic_field(char *, COMM, FILTER_COMM); | 
|---|
| 177 | 173 |  	__generic_field(char *, comm, FILTER_COMM); | 
|---|
| 178 | 174 |   | 
|---|
| .. | .. | 
|---|
| 188 | 184 |  	__common_field(unsigned char, flags); | 
|---|
| 189 | 185 |  	__common_field(unsigned char, preempt_count); | 
|---|
| 190 | 186 |  	__common_field(int, pid); | 
|---|
 | 187 | +	__common_field(unsigned char, migrate_disable);  | 
|---|
 | 188 | +	__common_field(unsigned char, preempt_lazy_count);  | 
|---|
| 191 | 189 |   | 
|---|
| 192 | 190 |  	return ret; | 
|---|
| 193 | 191 |  } | 
|---|
| .. | .. | 
|---|
| 238 | 236 |  { | 
|---|
| 239 | 237 |  	struct trace_array *tr = trace_file->tr; | 
|---|
| 240 | 238 |  	struct trace_array_cpu *data; | 
|---|
 | 239 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 241 | 240 |  	struct trace_pid_list *pid_list; | 
|---|
| 242 | 241 |   | 
|---|
| 243 | 242 |  	pid_list = rcu_dereference_raw(tr->filtered_pids); | 
|---|
| 244 |  | -	if (!pid_list)  | 
|---|
 | 243 | +	no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);  | 
|---|
 | 244 | +  | 
|---|
 | 245 | +	if (!pid_list && !no_pid_list)  | 
|---|
| 245 | 246 |  		return false; | 
|---|
| 246 | 247 |   | 
|---|
| 247 |  | -	data = this_cpu_ptr(tr->trace_buffer.data);  | 
|---|
 | 248 | +	data = this_cpu_ptr(tr->array_buffer.data);  | 
|---|
| 248 | 249 |   | 
|---|
| 249 | 250 |  	return data->ignore_pid; | 
|---|
| 250 | 251 |  } | 
|---|
| .. | .. | 
|---|
| 260 | 261 |  	    trace_event_ignore_this_pid(trace_file)) | 
|---|
| 261 | 262 |  		return NULL; | 
|---|
| 262 | 263 |   | 
|---|
| 263 |  | -	local_save_flags(fbuffer->flags);  | 
|---|
| 264 |  | -	fbuffer->pc = preempt_count();  | 
|---|
| 265 | 264 |  	/* | 
|---|
| 266 |  | -	 * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables  | 
|---|
 | 265 | +	 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables  | 
|---|
| 267 | 266 |  	 * preemption (adding one to the preempt_count). Since we are | 
|---|
| 268 | 267 |  	 * interested in the preempt_count at the time the tracepoint was | 
|---|
| 269 | 268 |  	 * hit, we need to subtract one to offset the increment. | 
|---|
| 270 | 269 |  	 */ | 
|---|
| 271 |  | -	if (IS_ENABLED(CONFIG_PREEMPT))  | 
|---|
| 272 |  | -		fbuffer->pc--;  | 
|---|
 | 270 | +	fbuffer->trace_ctx = tracing_gen_ctx_dec();  | 
|---|
| 273 | 271 |  	fbuffer->trace_file = trace_file; | 
|---|
| 274 | 272 |   | 
|---|
| 275 | 273 |  	fbuffer->event = | 
|---|
| 276 | 274 |  		trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, | 
|---|
| 277 | 275 |  						event_call->event.type, len, | 
|---|
| 278 |  | -						fbuffer->flags, fbuffer->pc);  | 
|---|
 | 276 | +						fbuffer->trace_ctx);  | 
|---|
| 279 | 277 |  	if (!fbuffer->event) | 
|---|
| 280 | 278 |  		return NULL; | 
|---|
| 281 | 279 |   | 
|---|
 | 280 | +	fbuffer->regs = NULL;  | 
|---|
| 282 | 281 |  	fbuffer->entry = ring_buffer_event_data(fbuffer->event); | 
|---|
| 283 | 282 |  	return fbuffer->entry; | 
|---|
| 284 | 283 |  } | 
|---|
| .. | .. | 
|---|
| 515 | 514 |   | 
|---|
| 516 | 515 |  	pid_list = rcu_dereference_raw(tr->filtered_pids); | 
|---|
| 517 | 516 |  	trace_filter_add_remove_task(pid_list, NULL, task); | 
|---|
 | 517 | +  | 
|---|
 | 518 | +	pid_list = rcu_dereference_raw(tr->filtered_no_pids);  | 
|---|
 | 519 | +	trace_filter_add_remove_task(pid_list, NULL, task);  | 
|---|
| 518 | 520 |  } | 
|---|
| 519 | 521 |   | 
|---|
| 520 | 522 |  static void | 
|---|
| .. | .. | 
|---|
| 526 | 528 |  	struct trace_array *tr = data; | 
|---|
| 527 | 529 |   | 
|---|
| 528 | 530 |  	pid_list = rcu_dereference_sched(tr->filtered_pids); | 
|---|
 | 531 | +	trace_filter_add_remove_task(pid_list, self, task);  | 
|---|
 | 532 | +  | 
|---|
 | 533 | +	pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 529 | 534 |  	trace_filter_add_remove_task(pid_list, self, task); | 
|---|
| 530 | 535 |  } | 
|---|
| 531 | 536 |   | 
|---|
| .. | .. | 
|---|
| 549 | 554 |  		    struct task_struct *prev, struct task_struct *next) | 
|---|
| 550 | 555 |  { | 
|---|
| 551 | 556 |  	struct trace_array *tr = data; | 
|---|
 | 557 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 552 | 558 |  	struct trace_pid_list *pid_list; | 
|---|
 | 559 | +	bool ret;  | 
|---|
| 553 | 560 |   | 
|---|
| 554 | 561 |  	pid_list = rcu_dereference_sched(tr->filtered_pids); | 
|---|
 | 562 | +	no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 555 | 563 |   | 
|---|
| 556 |  | -	this_cpu_write(tr->trace_buffer.data->ignore_pid,  | 
|---|
| 557 |  | -		       trace_ignore_this_task(pid_list, prev) &&  | 
|---|
| 558 |  | -		       trace_ignore_this_task(pid_list, next));  | 
|---|
 | 564 | +	/*  | 
|---|
 | 565 | +	 * Sched switch is funny, as we only want to ignore it  | 
|---|
 | 566 | +	 * in the notrace case if both prev and next should be ignored.  | 
|---|
 | 567 | +	 */  | 
|---|
 | 568 | +	ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&  | 
|---|
 | 569 | +		trace_ignore_this_task(NULL, no_pid_list, next);  | 
|---|
 | 570 | +  | 
|---|
 | 571 | +	this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||  | 
|---|
 | 572 | +		       (trace_ignore_this_task(pid_list, NULL, prev) &&  | 
|---|
 | 573 | +			trace_ignore_this_task(pid_list, NULL, next)));  | 
|---|
| 559 | 574 |  } | 
|---|
| 560 | 575 |   | 
|---|
| 561 | 576 |  static void | 
|---|
| .. | .. | 
|---|
| 563 | 578 |  		    struct task_struct *prev, struct task_struct *next) | 
|---|
| 564 | 579 |  { | 
|---|
| 565 | 580 |  	struct trace_array *tr = data; | 
|---|
 | 581 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 566 | 582 |  	struct trace_pid_list *pid_list; | 
|---|
| 567 | 583 |   | 
|---|
| 568 | 584 |  	pid_list = rcu_dereference_sched(tr->filtered_pids); | 
|---|
 | 585 | +	no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 569 | 586 |   | 
|---|
| 570 |  | -	this_cpu_write(tr->trace_buffer.data->ignore_pid,  | 
|---|
| 571 |  | -		       trace_ignore_this_task(pid_list, next));  | 
|---|
 | 587 | +	this_cpu_write(tr->array_buffer.data->ignore_pid,  | 
|---|
 | 588 | +		       trace_ignore_this_task(pid_list, no_pid_list, next));  | 
|---|
| 572 | 589 |  } | 
|---|
| 573 | 590 |   | 
|---|
| 574 | 591 |  static void | 
|---|
| 575 | 592 |  event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) | 
|---|
| 576 | 593 |  { | 
|---|
| 577 | 594 |  	struct trace_array *tr = data; | 
|---|
 | 595 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 578 | 596 |  	struct trace_pid_list *pid_list; | 
|---|
| 579 | 597 |   | 
|---|
| 580 | 598 |  	/* Nothing to do if we are already tracing */ | 
|---|
| 581 |  | -	if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))  | 
|---|
 | 599 | +	if (!this_cpu_read(tr->array_buffer.data->ignore_pid))  | 
|---|
| 582 | 600 |  		return; | 
|---|
| 583 | 601 |   | 
|---|
| 584 | 602 |  	pid_list = rcu_dereference_sched(tr->filtered_pids); | 
|---|
 | 603 | +	no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 585 | 604 |   | 
|---|
| 586 |  | -	this_cpu_write(tr->trace_buffer.data->ignore_pid,  | 
|---|
| 587 |  | -		       trace_ignore_this_task(pid_list, task));  | 
|---|
 | 605 | +	this_cpu_write(tr->array_buffer.data->ignore_pid,  | 
|---|
 | 606 | +		       trace_ignore_this_task(pid_list, no_pid_list, task));  | 
|---|
| 588 | 607 |  } | 
|---|
| 589 | 608 |   | 
|---|
| 590 | 609 |  static void | 
|---|
| 591 | 610 |  event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) | 
|---|
| 592 | 611 |  { | 
|---|
| 593 | 612 |  	struct trace_array *tr = data; | 
|---|
 | 613 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 594 | 614 |  	struct trace_pid_list *pid_list; | 
|---|
| 595 | 615 |   | 
|---|
| 596 | 616 |  	/* Nothing to do if we are not tracing */ | 
|---|
| 597 |  | -	if (this_cpu_read(tr->trace_buffer.data->ignore_pid))  | 
|---|
 | 617 | +	if (this_cpu_read(tr->array_buffer.data->ignore_pid))  | 
|---|
| 598 | 618 |  		return; | 
|---|
| 599 | 619 |   | 
|---|
| 600 | 620 |  	pid_list = rcu_dereference_sched(tr->filtered_pids); | 
|---|
 | 621 | +	no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 601 | 622 |   | 
|---|
| 602 | 623 |  	/* Set tracing if current is enabled */ | 
|---|
| 603 |  | -	this_cpu_write(tr->trace_buffer.data->ignore_pid,  | 
|---|
| 604 |  | -		       trace_ignore_this_task(pid_list, current));  | 
|---|
 | 624 | +	this_cpu_write(tr->array_buffer.data->ignore_pid,  | 
|---|
 | 625 | +		       trace_ignore_this_task(pid_list, no_pid_list, current));  | 
|---|
| 605 | 626 |  } | 
|---|
| 606 | 627 |   | 
|---|
| 607 |  | -static void __ftrace_clear_event_pids(struct trace_array *tr)  | 
|---|
 | 628 | +static void unregister_pid_events(struct trace_array *tr)  | 
|---|
| 608 | 629 |  { | 
|---|
| 609 |  | -	struct trace_pid_list *pid_list;  | 
|---|
| 610 |  | -	struct trace_event_file *file;  | 
|---|
| 611 |  | -	int cpu;  | 
|---|
| 612 |  | -  | 
|---|
| 613 |  | -	pid_list = rcu_dereference_protected(tr->filtered_pids,  | 
|---|
| 614 |  | -					     lockdep_is_held(&event_mutex));  | 
|---|
| 615 |  | -	if (!pid_list)  | 
|---|
| 616 |  | -		return;  | 
|---|
| 617 |  | -  | 
|---|
| 618 | 630 |  	unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); | 
|---|
| 619 | 631 |  	unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); | 
|---|
| 620 | 632 |   | 
|---|
| .. | .. | 
|---|
| 626 | 638 |   | 
|---|
| 627 | 639 |  	unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); | 
|---|
| 628 | 640 |  	unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); | 
|---|
 | 641 | +}  | 
|---|
| 629 | 642 |   | 
|---|
| 630 |  | -	list_for_each_entry(file, &tr->events, list) {  | 
|---|
| 631 |  | -		clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);  | 
|---|
 | 643 | +static void __ftrace_clear_event_pids(struct trace_array *tr, int type)  | 
|---|
 | 644 | +{  | 
|---|
 | 645 | +	struct trace_pid_list *pid_list;  | 
|---|
 | 646 | +	struct trace_pid_list *no_pid_list;  | 
|---|
 | 647 | +	struct trace_event_file *file;  | 
|---|
 | 648 | +	int cpu;  | 
|---|
 | 649 | +  | 
|---|
 | 650 | +	pid_list = rcu_dereference_protected(tr->filtered_pids,  | 
|---|
 | 651 | +					     lockdep_is_held(&event_mutex));  | 
|---|
 | 652 | +	no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,  | 
|---|
 | 653 | +					     lockdep_is_held(&event_mutex));  | 
|---|
 | 654 | +  | 
|---|
 | 655 | +	/* Make sure there's something to do */  | 
|---|
 | 656 | +	if (!pid_type_enabled(type, pid_list, no_pid_list))  | 
|---|
 | 657 | +		return;  | 
|---|
 | 658 | +  | 
|---|
 | 659 | +	if (!still_need_pid_events(type, pid_list, no_pid_list)) {  | 
|---|
 | 660 | +		unregister_pid_events(tr);  | 
|---|
 | 661 | +  | 
|---|
 | 662 | +		list_for_each_entry(file, &tr->events, list) {  | 
|---|
 | 663 | +			clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);  | 
|---|
 | 664 | +		}  | 
|---|
 | 665 | +  | 
|---|
 | 666 | +		for_each_possible_cpu(cpu)  | 
|---|
 | 667 | +			per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;  | 
|---|
| 632 | 668 |  	} | 
|---|
| 633 | 669 |   | 
|---|
| 634 |  | -	for_each_possible_cpu(cpu)  | 
|---|
| 635 |  | -		per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;  | 
|---|
 | 670 | +	if (type & TRACE_PIDS)  | 
|---|
 | 671 | +		rcu_assign_pointer(tr->filtered_pids, NULL);  | 
|---|
| 636 | 672 |   | 
|---|
| 637 |  | -	rcu_assign_pointer(tr->filtered_pids, NULL);  | 
|---|
 | 673 | +	if (type & TRACE_NO_PIDS)  | 
|---|
 | 674 | +		rcu_assign_pointer(tr->filtered_no_pids, NULL);  | 
|---|
| 638 | 675 |   | 
|---|
| 639 | 676 |  	/* Wait till all users are no longer using pid filtering */ | 
|---|
| 640 | 677 |  	tracepoint_synchronize_unregister(); | 
|---|
| 641 | 678 |   | 
|---|
| 642 |  | -	trace_free_pid_list(pid_list);  | 
|---|
 | 679 | +	if ((type & TRACE_PIDS) && pid_list)  | 
|---|
 | 680 | +		trace_free_pid_list(pid_list);  | 
|---|
 | 681 | +  | 
|---|
 | 682 | +	if ((type & TRACE_NO_PIDS) && no_pid_list)  | 
|---|
 | 683 | +		trace_free_pid_list(no_pid_list);  | 
|---|
| 643 | 684 |  } | 
|---|
| 644 | 685 |   | 
|---|
| 645 |  | -static void ftrace_clear_event_pids(struct trace_array *tr)  | 
|---|
 | 686 | +static void ftrace_clear_event_pids(struct trace_array *tr, int type)  | 
|---|
| 646 | 687 |  { | 
|---|
| 647 | 688 |  	mutex_lock(&event_mutex); | 
|---|
| 648 |  | -	__ftrace_clear_event_pids(tr);  | 
|---|
 | 689 | +	__ftrace_clear_event_pids(tr, type);  | 
|---|
| 649 | 690 |  	mutex_unlock(&event_mutex); | 
|---|
| 650 | 691 |  } | 
|---|
| 651 | 692 |   | 
|---|
| .. | .. | 
|---|
| 704 | 745 |  		return; | 
|---|
| 705 | 746 |   | 
|---|
| 706 | 747 |  	if (!--dir->nr_events) { | 
|---|
| 707 |  | -		tracefs_remove_recursive(dir->entry);  | 
|---|
 | 748 | +		tracefs_remove(dir->entry);  | 
|---|
| 708 | 749 |  		list_del(&dir->list); | 
|---|
| 709 | 750 |  		__put_system_dir(dir); | 
|---|
| 710 | 751 |  	} | 
|---|
| .. | .. | 
|---|
| 723 | 764 |  		} | 
|---|
| 724 | 765 |  		spin_unlock(&dir->d_lock); | 
|---|
| 725 | 766 |   | 
|---|
| 726 |  | -		tracefs_remove_recursive(dir);  | 
|---|
 | 767 | +		tracefs_remove(dir);  | 
|---|
| 727 | 768 |  	} | 
|---|
| 728 | 769 |   | 
|---|
| 729 | 770 |  	list_del(&file->list); | 
|---|
| .. | .. | 
|---|
| 795 | 836 |  	return ret; | 
|---|
| 796 | 837 |  } | 
|---|
| 797 | 838 |   | 
|---|
| 798 |  | -static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)  | 
|---|
 | 839 | +int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)  | 
|---|
| 799 | 840 |  { | 
|---|
| 800 | 841 |  	char *event = NULL, *sub = NULL, *match; | 
|---|
| 801 | 842 |  	int ret; | 
|---|
| .. | .. | 
|---|
| 857 | 898 |  	return __ftrace_set_clr_event(tr, NULL, system, event, set); | 
|---|
| 858 | 899 |  } | 
|---|
| 859 | 900 |  EXPORT_SYMBOL_GPL(trace_set_clr_event); | 
|---|
 | 901 | +  | 
|---|
 | 902 | +/**  | 
|---|
 | 903 | + * trace_array_set_clr_event - enable or disable an event for a trace array.  | 
|---|
 | 904 | + * @tr: concerned trace array.  | 
|---|
 | 905 | + * @system: system name to match (NULL for any system)  | 
|---|
 | 906 | + * @event: event name to match (NULL for all events, within system)  | 
|---|
 | 907 | + * @enable: true to enable, false to disable  | 
|---|
 | 908 | + *  | 
|---|
 | 909 | + * This is a way for other parts of the kernel to enable or disable  | 
|---|
 | 910 | + * event recording.  | 
|---|
 | 911 | + *  | 
|---|
 | 912 | + * Returns 0 on success, -EINVAL if the parameters do not match any  | 
|---|
 | 913 | + * registered events.  | 
|---|
 | 914 | + */  | 
|---|
 | 915 | +int trace_array_set_clr_event(struct trace_array *tr, const char *system,  | 
|---|
 | 916 | +		const char *event, bool enable)  | 
|---|
 | 917 | +{  | 
|---|
 | 918 | +	int set;  | 
|---|
 | 919 | +  | 
|---|
 | 920 | +	if (!tr)  | 
|---|
 | 921 | +		return -ENOENT;  | 
|---|
 | 922 | +  | 
|---|
 | 923 | +	set = (enable == true) ? 1 : 0;  | 
|---|
 | 924 | +	return __ftrace_set_clr_event(tr, NULL, system, event, set);  | 
|---|
 | 925 | +}  | 
|---|
 | 926 | +EXPORT_SYMBOL_GPL(trace_array_set_clr_event);  | 
|---|
| 860 | 927 |   | 
|---|
| 861 | 928 |  /* 128 should be much more than enough */ | 
|---|
| 862 | 929 |  #define EVENT_BUF_SIZE		127 | 
|---|
| .. | .. | 
|---|
| 992 | 1059 |  } | 
|---|
| 993 | 1060 |   | 
|---|
| 994 | 1061 |  static void * | 
|---|
| 995 |  | -p_next(struct seq_file *m, void *v, loff_t *pos)  | 
|---|
 | 1062 | +__next(struct seq_file *m, void *v, loff_t *pos, int type)  | 
|---|
| 996 | 1063 |  { | 
|---|
| 997 | 1064 |  	struct trace_array *tr = m->private; | 
|---|
| 998 |  | -	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);  | 
|---|
 | 1065 | +	struct trace_pid_list *pid_list;  | 
|---|
 | 1066 | +  | 
|---|
 | 1067 | +	if (type == TRACE_PIDS)  | 
|---|
 | 1068 | +		pid_list = rcu_dereference_sched(tr->filtered_pids);  | 
|---|
 | 1069 | +	else  | 
|---|
 | 1070 | +		pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 999 | 1071 |   | 
|---|
| 1000 | 1072 |  	return trace_pid_next(pid_list, v, pos); | 
|---|
| 1001 | 1073 |  } | 
|---|
| 1002 | 1074 |   | 
|---|
| 1003 |  | -static void *p_start(struct seq_file *m, loff_t *pos)  | 
|---|
 | 1075 | +static void *  | 
|---|
 | 1076 | +p_next(struct seq_file *m, void *v, loff_t *pos)  | 
|---|
 | 1077 | +{  | 
|---|
 | 1078 | +	return __next(m, v, pos, TRACE_PIDS);  | 
|---|
 | 1079 | +}  | 
|---|
 | 1080 | +  | 
|---|
 | 1081 | +static void *  | 
|---|
 | 1082 | +np_next(struct seq_file *m, void *v, loff_t *pos)  | 
|---|
 | 1083 | +{  | 
|---|
 | 1084 | +	return __next(m, v, pos, TRACE_NO_PIDS);  | 
|---|
 | 1085 | +}  | 
|---|
 | 1086 | +  | 
|---|
 | 1087 | +static void *__start(struct seq_file *m, loff_t *pos, int type)  | 
|---|
| 1004 | 1088 |  	__acquires(RCU) | 
|---|
| 1005 | 1089 |  { | 
|---|
| 1006 | 1090 |  	struct trace_pid_list *pid_list; | 
|---|
| .. | .. | 
|---|
| 1015 | 1099 |  	mutex_lock(&event_mutex); | 
|---|
| 1016 | 1100 |  	rcu_read_lock_sched(); | 
|---|
| 1017 | 1101 |   | 
|---|
| 1018 |  | -	pid_list = rcu_dereference_sched(tr->filtered_pids);  | 
|---|
 | 1102 | +	if (type == TRACE_PIDS)  | 
|---|
 | 1103 | +		pid_list = rcu_dereference_sched(tr->filtered_pids);  | 
|---|
 | 1104 | +	else  | 
|---|
 | 1105 | +		pid_list = rcu_dereference_sched(tr->filtered_no_pids);  | 
|---|
| 1019 | 1106 |   | 
|---|
| 1020 | 1107 |  	if (!pid_list) | 
|---|
| 1021 | 1108 |  		return NULL; | 
|---|
| 1022 | 1109 |   | 
|---|
| 1023 | 1110 |  	return trace_pid_start(pid_list, pos); | 
|---|
 | 1111 | +}  | 
|---|
 | 1112 | +  | 
|---|
 | 1113 | +static void *p_start(struct seq_file *m, loff_t *pos)  | 
|---|
 | 1114 | +	__acquires(RCU)  | 
|---|
 | 1115 | +{  | 
|---|
 | 1116 | +	return __start(m, pos, TRACE_PIDS);  | 
|---|
 | 1117 | +}  | 
|---|
 | 1118 | +  | 
|---|
 | 1119 | +static void *np_start(struct seq_file *m, loff_t *pos)  | 
|---|
 | 1120 | +	__acquires(RCU)  | 
|---|
 | 1121 | +{  | 
|---|
 | 1122 | +	return __start(m, pos, TRACE_NO_PIDS);  | 
|---|
| 1024 | 1123 |  } | 
|---|
| 1025 | 1124 |   | 
|---|
| 1026 | 1125 |  static void p_stop(struct seq_file *m, void *p) | 
|---|
| .. | .. | 
|---|
| 1254 | 1353 |  	 */ | 
|---|
| 1255 | 1354 |  	array_descriptor = strchr(field->type, '['); | 
|---|
| 1256 | 1355 |   | 
|---|
| 1257 |  | -	if (!strncmp(field->type, "__data_loc", 10))  | 
|---|
 | 1356 | +	if (str_has_prefix(field->type, "__data_loc"))  | 
|---|
| 1258 | 1357 |  		array_descriptor = NULL; | 
|---|
| 1259 | 1358 |   | 
|---|
| 1260 | 1359 |  	if (!array_descriptor) | 
|---|
| .. | .. | 
|---|
| 1303 | 1402 |  { | 
|---|
| 1304 | 1403 |  	struct seq_file *m; | 
|---|
| 1305 | 1404 |  	int ret; | 
|---|
 | 1405 | +  | 
|---|
 | 1406 | +	/* Do we want to hide event format files on tracefs lockdown? */  | 
|---|
| 1306 | 1407 |   | 
|---|
| 1307 | 1408 |  	ret = seq_open(file, &trace_format_seq_ops); | 
|---|
| 1308 | 1409 |  	if (ret < 0) | 
|---|
| .. | .. | 
|---|
| 1450 | 1551 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 1451 | 1552 |  	int ret; | 
|---|
| 1452 | 1553 |   | 
|---|
| 1453 |  | -	if (tracing_is_disabled())  | 
|---|
| 1454 |  | -		return -ENODEV;  | 
|---|
| 1455 |  | -  | 
|---|
| 1456 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 1457 |  | -		return -ENODEV;  | 
|---|
| 1458 |  | -  | 
|---|
| 1459 | 1554 |  	/* Make a temporary dir that has no system but points to tr */ | 
|---|
| 1460 | 1555 |  	dir = kzalloc(sizeof(*dir), GFP_KERNEL); | 
|---|
| 1461 |  | -	if (!dir) {  | 
|---|
| 1462 |  | -		trace_array_put(tr);  | 
|---|
 | 1556 | +	if (!dir)  | 
|---|
| 1463 | 1557 |  		return -ENOMEM; | 
|---|
| 1464 |  | -	}  | 
|---|
| 1465 | 1558 |   | 
|---|
| 1466 |  | -	dir->tr = tr;  | 
|---|
| 1467 |  | -  | 
|---|
| 1468 |  | -	ret = tracing_open_generic(inode, filp);  | 
|---|
 | 1559 | +	ret = tracing_open_generic_tr(inode, filp);  | 
|---|
| 1469 | 1560 |  	if (ret < 0) { | 
|---|
| 1470 |  | -		trace_array_put(tr);  | 
|---|
| 1471 | 1561 |  		kfree(dir); | 
|---|
| 1472 | 1562 |  		return ret; | 
|---|
| 1473 | 1563 |  	} | 
|---|
| 1474 |  | -  | 
|---|
 | 1564 | +	dir->tr = tr;  | 
|---|
| 1475 | 1565 |  	filp->private_data = dir; | 
|---|
| 1476 | 1566 |   | 
|---|
| 1477 | 1567 |  	return 0; | 
|---|
| .. | .. | 
|---|
| 1577 | 1667 |  { | 
|---|
| 1578 | 1668 |  	struct trace_array *tr = data; | 
|---|
| 1579 | 1669 |  	struct trace_pid_list *pid_list; | 
|---|
 | 1670 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 1580 | 1671 |   | 
|---|
| 1581 | 1672 |  	/* | 
|---|
| 1582 | 1673 |  	 * This function is called by on_each_cpu() while the | 
|---|
| .. | .. | 
|---|
| 1584 | 1675 |  	 */ | 
|---|
| 1585 | 1676 |  	pid_list = rcu_dereference_protected(tr->filtered_pids, | 
|---|
| 1586 | 1677 |  					     mutex_is_locked(&event_mutex)); | 
|---|
 | 1678 | +	no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,  | 
|---|
 | 1679 | +					     mutex_is_locked(&event_mutex));  | 
|---|
| 1587 | 1680 |   | 
|---|
| 1588 |  | -	this_cpu_write(tr->trace_buffer.data->ignore_pid,  | 
|---|
| 1589 |  | -		       trace_ignore_this_task(pid_list, current));  | 
|---|
 | 1681 | +	this_cpu_write(tr->array_buffer.data->ignore_pid,  | 
|---|
 | 1682 | +		       trace_ignore_this_task(pid_list, no_pid_list, current));  | 
|---|
 | 1683 | +}  | 
|---|
 | 1684 | +  | 
|---|
 | 1685 | +static void register_pid_events(struct trace_array *tr)  | 
|---|
 | 1686 | +{  | 
|---|
 | 1687 | +	/*  | 
|---|
 | 1688 | +	 * Register a probe that is called before all other probes  | 
|---|
 | 1689 | +	 * to set ignore_pid if next or prev do not match.  | 
|---|
 | 1690 | +	 * Register a probe this is called after all other probes  | 
|---|
 | 1691 | +	 * to only keep ignore_pid set if next pid matches.  | 
|---|
 | 1692 | +	 */  | 
|---|
 | 1693 | +	register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,  | 
|---|
 | 1694 | +					 tr, INT_MAX);  | 
|---|
 | 1695 | +	register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,  | 
|---|
 | 1696 | +					 tr, 0);  | 
|---|
 | 1697 | +  | 
|---|
 | 1698 | +	register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,  | 
|---|
 | 1699 | +					 tr, INT_MAX);  | 
|---|
 | 1700 | +	register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,  | 
|---|
 | 1701 | +					 tr, 0);  | 
|---|
 | 1702 | +  | 
|---|
 | 1703 | +	register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,  | 
|---|
 | 1704 | +					     tr, INT_MAX);  | 
|---|
 | 1705 | +	register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,  | 
|---|
 | 1706 | +					     tr, 0);  | 
|---|
 | 1707 | +  | 
|---|
 | 1708 | +	register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,  | 
|---|
 | 1709 | +					 tr, INT_MAX);  | 
|---|
 | 1710 | +	register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,  | 
|---|
 | 1711 | +					 tr, 0);  | 
|---|
| 1590 | 1712 |  } | 
|---|
| 1591 | 1713 |   | 
|---|
| 1592 | 1714 |  static ssize_t | 
|---|
| 1593 |  | -ftrace_event_pid_write(struct file *filp, const char __user *ubuf,  | 
|---|
| 1594 |  | -		       size_t cnt, loff_t *ppos)  | 
|---|
 | 1715 | +event_pid_write(struct file *filp, const char __user *ubuf,  | 
|---|
 | 1716 | +		size_t cnt, loff_t *ppos, int type)  | 
|---|
| 1595 | 1717 |  { | 
|---|
| 1596 | 1718 |  	struct seq_file *m = filp->private_data; | 
|---|
| 1597 | 1719 |  	struct trace_array *tr = m->private; | 
|---|
| 1598 | 1720 |  	struct trace_pid_list *filtered_pids = NULL; | 
|---|
 | 1721 | +	struct trace_pid_list *other_pids = NULL;  | 
|---|
| 1599 | 1722 |  	struct trace_pid_list *pid_list; | 
|---|
| 1600 | 1723 |  	struct trace_event_file *file; | 
|---|
| 1601 | 1724 |  	ssize_t ret; | 
|---|
| .. | .. | 
|---|
| 1609 | 1732 |   | 
|---|
| 1610 | 1733 |  	mutex_lock(&event_mutex); | 
|---|
| 1611 | 1734 |   | 
|---|
| 1612 |  | -	filtered_pids = rcu_dereference_protected(tr->filtered_pids,  | 
|---|
| 1613 |  | -					     lockdep_is_held(&event_mutex));  | 
|---|
 | 1735 | +	if (type == TRACE_PIDS) {  | 
|---|
 | 1736 | +		filtered_pids = rcu_dereference_protected(tr->filtered_pids,  | 
|---|
 | 1737 | +							  lockdep_is_held(&event_mutex));  | 
|---|
 | 1738 | +		other_pids = rcu_dereference_protected(tr->filtered_no_pids,  | 
|---|
 | 1739 | +							  lockdep_is_held(&event_mutex));  | 
|---|
 | 1740 | +	} else {  | 
|---|
 | 1741 | +		filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,  | 
|---|
 | 1742 | +							  lockdep_is_held(&event_mutex));  | 
|---|
 | 1743 | +		other_pids = rcu_dereference_protected(tr->filtered_pids,  | 
|---|
 | 1744 | +							  lockdep_is_held(&event_mutex));  | 
|---|
 | 1745 | +	}  | 
|---|
| 1614 | 1746 |   | 
|---|
| 1615 | 1747 |  	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); | 
|---|
| 1616 | 1748 |  	if (ret < 0) | 
|---|
| 1617 | 1749 |  		goto out; | 
|---|
| 1618 | 1750 |   | 
|---|
| 1619 |  | -	rcu_assign_pointer(tr->filtered_pids, pid_list);  | 
|---|
 | 1751 | +	if (type == TRACE_PIDS)  | 
|---|
 | 1752 | +		rcu_assign_pointer(tr->filtered_pids, pid_list);  | 
|---|
 | 1753 | +	else  | 
|---|
 | 1754 | +		rcu_assign_pointer(tr->filtered_no_pids, pid_list);  | 
|---|
| 1620 | 1755 |   | 
|---|
| 1621 | 1756 |  	list_for_each_entry(file, &tr->events, list) { | 
|---|
| 1622 | 1757 |  		set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); | 
|---|
| .. | .. | 
|---|
| 1625 | 1760 |  	if (filtered_pids) { | 
|---|
| 1626 | 1761 |  		tracepoint_synchronize_unregister(); | 
|---|
| 1627 | 1762 |  		trace_free_pid_list(filtered_pids); | 
|---|
| 1628 |  | -	} else if (pid_list) {  | 
|---|
| 1629 |  | -		/*  | 
|---|
| 1630 |  | -		 * Register a probe that is called before all other probes  | 
|---|
| 1631 |  | -		 * to set ignore_pid if next or prev do not match.  | 
|---|
| 1632 |  | -		 * Register a probe this is called after all other probes  | 
|---|
| 1633 |  | -		 * to only keep ignore_pid set if next pid matches.  | 
|---|
| 1634 |  | -		 */  | 
|---|
| 1635 |  | -		register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,  | 
|---|
| 1636 |  | -						 tr, INT_MAX);  | 
|---|
| 1637 |  | -		register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,  | 
|---|
| 1638 |  | -						 tr, 0);  | 
|---|
| 1639 |  | -  | 
|---|
| 1640 |  | -		register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,  | 
|---|
| 1641 |  | -						 tr, INT_MAX);  | 
|---|
| 1642 |  | -		register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,  | 
|---|
| 1643 |  | -						 tr, 0);  | 
|---|
| 1644 |  | -  | 
|---|
| 1645 |  | -		register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,  | 
|---|
| 1646 |  | -						     tr, INT_MAX);  | 
|---|
| 1647 |  | -		register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,  | 
|---|
| 1648 |  | -						     tr, 0);  | 
|---|
| 1649 |  | -  | 
|---|
| 1650 |  | -		register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,  | 
|---|
| 1651 |  | -						 tr, INT_MAX);  | 
|---|
| 1652 |  | -		register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,  | 
|---|
| 1653 |  | -						 tr, 0);  | 
|---|
 | 1763 | +	} else if (pid_list && !other_pids) {  | 
|---|
 | 1764 | +		register_pid_events(tr);  | 
|---|
| 1654 | 1765 |  	} | 
|---|
| 1655 | 1766 |   | 
|---|
| 1656 | 1767 |  	/* | 
|---|
| .. | .. | 
|---|
| 1669 | 1780 |  	return ret; | 
|---|
| 1670 | 1781 |  } | 
|---|
| 1671 | 1782 |   | 
|---|
 | 1783 | +static ssize_t  | 
|---|
 | 1784 | +ftrace_event_pid_write(struct file *filp, const char __user *ubuf,  | 
|---|
 | 1785 | +		       size_t cnt, loff_t *ppos)  | 
|---|
 | 1786 | +{  | 
|---|
 | 1787 | +	return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);  | 
|---|
 | 1788 | +}  | 
|---|
 | 1789 | +  | 
|---|
 | 1790 | +static ssize_t  | 
|---|
 | 1791 | +ftrace_event_npid_write(struct file *filp, const char __user *ubuf,  | 
|---|
 | 1792 | +			size_t cnt, loff_t *ppos)  | 
|---|
 | 1793 | +{  | 
|---|
 | 1794 | +	return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);  | 
|---|
 | 1795 | +}  | 
|---|
 | 1796 | +  | 
|---|
| 1672 | 1797 |  static int ftrace_event_avail_open(struct inode *inode, struct file *file); | 
|---|
| 1673 | 1798 |  static int ftrace_event_set_open(struct inode *inode, struct file *file); | 
|---|
| 1674 | 1799 |  static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); | 
|---|
 | 1800 | +static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);  | 
|---|
| 1675 | 1801 |  static int ftrace_event_release(struct inode *inode, struct file *file); | 
|---|
| 1676 | 1802 |   | 
|---|
| 1677 | 1803 |  static const struct seq_operations show_event_seq_ops = { | 
|---|
| .. | .. | 
|---|
| 1695 | 1821 |  	.stop = p_stop, | 
|---|
| 1696 | 1822 |  }; | 
|---|
| 1697 | 1823 |   | 
|---|
 | 1824 | +static const struct seq_operations show_set_no_pid_seq_ops = {  | 
|---|
 | 1825 | +	.start = np_start,  | 
|---|
 | 1826 | +	.next = np_next,  | 
|---|
 | 1827 | +	.show = trace_pid_show,  | 
|---|
 | 1828 | +	.stop = p_stop,  | 
|---|
 | 1829 | +};  | 
|---|
 | 1830 | +  | 
|---|
| 1698 | 1831 |  static const struct file_operations ftrace_avail_fops = { | 
|---|
| 1699 | 1832 |  	.open = ftrace_event_avail_open, | 
|---|
| 1700 | 1833 |  	.read = seq_read, | 
|---|
| .. | .. | 
|---|
| 1714 | 1847 |  	.open = ftrace_event_set_pid_open, | 
|---|
| 1715 | 1848 |  	.read = seq_read, | 
|---|
| 1716 | 1849 |  	.write = ftrace_event_pid_write, | 
|---|
 | 1850 | +	.llseek = seq_lseek,  | 
|---|
 | 1851 | +	.release = ftrace_event_release,  | 
|---|
 | 1852 | +};  | 
|---|
 | 1853 | +  | 
|---|
 | 1854 | +static const struct file_operations ftrace_set_event_notrace_pid_fops = {  | 
|---|
 | 1855 | +	.open = ftrace_event_set_npid_open,  | 
|---|
 | 1856 | +	.read = seq_read,  | 
|---|
 | 1857 | +	.write = ftrace_event_npid_write,  | 
|---|
| 1717 | 1858 |  	.llseek = seq_lseek, | 
|---|
| 1718 | 1859 |  	.release = ftrace_event_release, | 
|---|
| 1719 | 1860 |  }; | 
|---|
| .. | .. | 
|---|
| 1781 | 1922 |  	struct seq_file *m; | 
|---|
| 1782 | 1923 |  	int ret; | 
|---|
| 1783 | 1924 |   | 
|---|
 | 1925 | +	ret = security_locked_down(LOCKDOWN_TRACEFS);  | 
|---|
 | 1926 | +	if (ret)  | 
|---|
 | 1927 | +		return ret;  | 
|---|
 | 1928 | +  | 
|---|
| 1784 | 1929 |  	ret = seq_open(file, seq_ops); | 
|---|
| 1785 | 1930 |  	if (ret < 0) | 
|---|
| 1786 | 1931 |  		return ret; | 
|---|
| .. | .. | 
|---|
| 1805 | 1950 |  { | 
|---|
| 1806 | 1951 |  	const struct seq_operations *seq_ops = &show_event_seq_ops; | 
|---|
| 1807 | 1952 |   | 
|---|
 | 1953 | +	/* Checks for tracefs lockdown */  | 
|---|
| 1808 | 1954 |  	return ftrace_event_open(inode, file, seq_ops); | 
|---|
| 1809 | 1955 |  } | 
|---|
| 1810 | 1956 |   | 
|---|
| .. | .. | 
|---|
| 1815 | 1961 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 1816 | 1962 |  	int ret; | 
|---|
| 1817 | 1963 |   | 
|---|
| 1818 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 1819 |  | -		return -ENODEV;  | 
|---|
 | 1964 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 1965 | +	if (ret)  | 
|---|
 | 1966 | +		return ret;  | 
|---|
| 1820 | 1967 |   | 
|---|
| 1821 | 1968 |  	if ((file->f_mode & FMODE_WRITE) && | 
|---|
| 1822 | 1969 |  	    (file->f_flags & O_TRUNC)) | 
|---|
| .. | .. | 
|---|
| 1835 | 1982 |  	struct trace_array *tr = inode->i_private; | 
|---|
| 1836 | 1983 |  	int ret; | 
|---|
| 1837 | 1984 |   | 
|---|
| 1838 |  | -	if (trace_array_get(tr) < 0)  | 
|---|
| 1839 |  | -		return -ENODEV;  | 
|---|
 | 1985 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 1986 | +	if (ret)  | 
|---|
 | 1987 | +		return ret;  | 
|---|
| 1840 | 1988 |   | 
|---|
| 1841 | 1989 |  	if ((file->f_mode & FMODE_WRITE) && | 
|---|
| 1842 | 1990 |  	    (file->f_flags & O_TRUNC)) | 
|---|
| 1843 |  | -		ftrace_clear_event_pids(tr);  | 
|---|
 | 1991 | +		ftrace_clear_event_pids(tr, TRACE_PIDS);  | 
|---|
 | 1992 | +  | 
|---|
 | 1993 | +	ret = ftrace_event_open(inode, file, seq_ops);  | 
|---|
 | 1994 | +	if (ret < 0)  | 
|---|
 | 1995 | +		trace_array_put(tr);  | 
|---|
 | 1996 | +	return ret;  | 
|---|
 | 1997 | +}  | 
|---|
 | 1998 | +  | 
|---|
 | 1999 | +static int  | 
|---|
 | 2000 | +ftrace_event_set_npid_open(struct inode *inode, struct file *file)  | 
|---|
 | 2001 | +{  | 
|---|
 | 2002 | +	const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;  | 
|---|
 | 2003 | +	struct trace_array *tr = inode->i_private;  | 
|---|
 | 2004 | +	int ret;  | 
|---|
 | 2005 | +  | 
|---|
 | 2006 | +	ret = tracing_check_open_get_tr(tr);  | 
|---|
 | 2007 | +	if (ret)  | 
|---|
 | 2008 | +		return ret;  | 
|---|
 | 2009 | +  | 
|---|
 | 2010 | +	if ((file->f_mode & FMODE_WRITE) &&  | 
|---|
 | 2011 | +	    (file->f_flags & O_TRUNC))  | 
|---|
 | 2012 | +		ftrace_clear_event_pids(tr, TRACE_NO_PIDS);  | 
|---|
| 1844 | 2013 |   | 
|---|
| 1845 | 2014 |  	ret = ftrace_event_open(inode, file, seq_ops); | 
|---|
| 1846 | 2015 |  	if (ret < 0) | 
|---|
| .. | .. | 
|---|
| 1957 | 2126 |  } | 
|---|
| 1958 | 2127 |   | 
|---|
| 1959 | 2128 |  static int | 
|---|
 | 2129 | +event_define_fields(struct trace_event_call *call)  | 
|---|
 | 2130 | +{  | 
|---|
 | 2131 | +	struct list_head *head;  | 
|---|
 | 2132 | +	int ret = 0;  | 
|---|
 | 2133 | +  | 
|---|
 | 2134 | +	/*  | 
|---|
 | 2135 | +	 * Other events may have the same class. Only update  | 
|---|
 | 2136 | +	 * the fields if they are not already defined.  | 
|---|
 | 2137 | +	 */  | 
|---|
 | 2138 | +	head = trace_get_fields(call);  | 
|---|
 | 2139 | +	if (list_empty(head)) {  | 
|---|
 | 2140 | +		struct trace_event_fields *field = call->class->fields_array;  | 
|---|
 | 2141 | +		unsigned int offset = sizeof(struct trace_entry);  | 
|---|
 | 2142 | +  | 
|---|
 | 2143 | +		for (; field->type; field++) {  | 
|---|
 | 2144 | +			if (field->type == TRACE_FUNCTION_TYPE) {  | 
|---|
 | 2145 | +				field->define_fields(call);  | 
|---|
 | 2146 | +				break;  | 
|---|
 | 2147 | +			}  | 
|---|
 | 2148 | +  | 
|---|
 | 2149 | +			offset = ALIGN(offset, field->align);  | 
|---|
 | 2150 | +			ret = trace_define_field(call, field->type, field->name,  | 
|---|
 | 2151 | +						 offset, field->size,  | 
|---|
 | 2152 | +						 field->is_signed, field->filter_type);  | 
|---|
 | 2153 | +			if (WARN_ON_ONCE(ret)) {  | 
|---|
 | 2154 | +				pr_err("error code is %d\n", ret);  | 
|---|
 | 2155 | +				break;  | 
|---|
 | 2156 | +			}  | 
|---|
 | 2157 | +  | 
|---|
 | 2158 | +			offset += field->size;  | 
|---|
 | 2159 | +		}  | 
|---|
 | 2160 | +	}  | 
|---|
 | 2161 | +  | 
|---|
 | 2162 | +	return ret;  | 
|---|
 | 2163 | +}  | 
|---|
 | 2164 | +  | 
|---|
 | 2165 | +static int  | 
|---|
| 1960 | 2166 |  event_create_dir(struct dentry *parent, struct trace_event_file *file) | 
|---|
| 1961 | 2167 |  { | 
|---|
| 1962 | 2168 |  	struct trace_event_call *call = file->event_call; | 
|---|
| 1963 | 2169 |  	struct trace_array *tr = file->tr; | 
|---|
| 1964 |  | -	struct list_head *head;  | 
|---|
| 1965 | 2170 |  	struct dentry *d_events; | 
|---|
| 1966 | 2171 |  	const char *name; | 
|---|
| 1967 | 2172 |  	int ret; | 
|---|
| .. | .. | 
|---|
| 1995 | 2200 |  				  &ftrace_event_id_fops); | 
|---|
| 1996 | 2201 |  #endif | 
|---|
| 1997 | 2202 |   | 
|---|
| 1998 |  | -	/*  | 
|---|
| 1999 |  | -	 * Other events may have the same class. Only update  | 
|---|
| 2000 |  | -	 * the fields if they are not already defined.  | 
|---|
| 2001 |  | -	 */  | 
|---|
| 2002 |  | -	head = trace_get_fields(call);  | 
|---|
| 2003 |  | -	if (list_empty(head)) {  | 
|---|
| 2004 |  | -		ret = call->class->define_fields(call);  | 
|---|
| 2005 |  | -		if (ret < 0) {  | 
|---|
| 2006 |  | -			pr_warn("Could not initialize trace point events/%s\n",  | 
|---|
| 2007 |  | -				name);  | 
|---|
| 2008 |  | -			return -1;  | 
|---|
| 2009 |  | -		}  | 
|---|
 | 2203 | +	ret = event_define_fields(call);  | 
|---|
 | 2204 | +	if (ret < 0) {  | 
|---|
 | 2205 | +		pr_warn("Could not initialize trace point events/%s\n", name);  | 
|---|
 | 2206 | +		return ret;  | 
|---|
| 2010 | 2207 |  	} | 
|---|
| 2011 | 2208 |   | 
|---|
| 2012 | 2209 |  	/* | 
|---|
| .. | .. | 
|---|
| 2025 | 2222 |  	trace_create_file("hist", 0444, file->dir, file, | 
|---|
| 2026 | 2223 |  			  &event_hist_fops); | 
|---|
| 2027 | 2224 |  #endif | 
|---|
 | 2225 | +#ifdef CONFIG_HIST_TRIGGERS_DEBUG  | 
|---|
 | 2226 | +	trace_create_file("hist_debug", 0444, file->dir, file,  | 
|---|
 | 2227 | +			  &event_hist_debug_fops);  | 
|---|
 | 2228 | +#endif  | 
|---|
| 2028 | 2229 |  	trace_create_file("format", 0444, file->dir, call, | 
|---|
| 2029 | 2230 |  			  &ftrace_event_format_fops); | 
|---|
 | 2231 | +  | 
|---|
 | 2232 | +#ifdef CONFIG_TRACE_EVENT_INJECT  | 
|---|
 | 2233 | +	if (call->event.type && call->class->reg)  | 
|---|
 | 2234 | +		trace_create_file("inject", 0200, file->dir, file,  | 
|---|
 | 2235 | +				  &event_inject_fops);  | 
|---|
 | 2236 | +#endif  | 
|---|
| 2030 | 2237 |   | 
|---|
| 2031 | 2238 |  	return 0; | 
|---|
| 2032 | 2239 |  } | 
|---|
| .. | .. | 
|---|
| 2255 | 2462 |  trace_create_new_event(struct trace_event_call *call, | 
|---|
| 2256 | 2463 |  		       struct trace_array *tr) | 
|---|
| 2257 | 2464 |  { | 
|---|
 | 2465 | +	struct trace_pid_list *no_pid_list;  | 
|---|
| 2258 | 2466 |  	struct trace_pid_list *pid_list; | 
|---|
| 2259 | 2467 |  	struct trace_event_file *file; | 
|---|
| 2260 | 2468 |   | 
|---|
| .. | .. | 
|---|
| 2264 | 2472 |   | 
|---|
| 2265 | 2473 |  	pid_list = rcu_dereference_protected(tr->filtered_pids, | 
|---|
| 2266 | 2474 |  					     lockdep_is_held(&event_mutex)); | 
|---|
 | 2475 | +	no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,  | 
|---|
 | 2476 | +					     lockdep_is_held(&event_mutex));  | 
|---|
| 2267 | 2477 |   | 
|---|
| 2268 |  | -	if (pid_list)  | 
|---|
 | 2478 | +	if (pid_list || no_pid_list)  | 
|---|
| 2269 | 2479 |  		file->flags |= EVENT_FILE_FL_PID_FILTER; | 
|---|
| 2270 | 2480 |   | 
|---|
| 2271 | 2481 |  	file->event_call = call; | 
|---|
| .. | .. | 
|---|
| 2288 | 2498 |  	if (!file) | 
|---|
| 2289 | 2499 |  		return -ENOMEM; | 
|---|
| 2290 | 2500 |   | 
|---|
| 2291 |  | -	return event_create_dir(tr->event_dir, file);  | 
|---|
 | 2501 | +	if (eventdir_initialized)  | 
|---|
 | 2502 | +		return event_create_dir(tr->event_dir, file);  | 
|---|
 | 2503 | +	else  | 
|---|
 | 2504 | +		return event_define_fields(call);  | 
|---|
| 2292 | 2505 |  } | 
|---|
| 2293 | 2506 |   | 
|---|
| 2294 | 2507 |  /* | 
|---|
| .. | .. | 
|---|
| 2296 | 2509 |   * for enabling events at boot. We want to enable events before | 
|---|
| 2297 | 2510 |   * the filesystem is initialized. | 
|---|
| 2298 | 2511 |   */ | 
|---|
| 2299 |  | -static __init int  | 
|---|
 | 2512 | +static int  | 
|---|
| 2300 | 2513 |  __trace_early_add_new_event(struct trace_event_call *call, | 
|---|
| 2301 | 2514 |  			    struct trace_array *tr) | 
|---|
| 2302 | 2515 |  { | 
|---|
| .. | .. | 
|---|
| 2306 | 2519 |  	if (!file) | 
|---|
| 2307 | 2520 |  		return -ENOMEM; | 
|---|
| 2308 | 2521 |   | 
|---|
| 2309 |  | -	return 0;  | 
|---|
 | 2522 | +	return event_define_fields(call);  | 
|---|
| 2310 | 2523 |  } | 
|---|
| 2311 | 2524 |   | 
|---|
| 2312 | 2525 |  struct ftrace_module_file_ops; | 
|---|
| 2313 | 2526 |  static void __add_event_to_tracers(struct trace_event_call *call); | 
|---|
| 2314 | 2527 |   | 
|---|
| 2315 |  | -int trace_add_event_call_nolock(struct trace_event_call *call)  | 
|---|
 | 2528 | +/* Add an additional event_call dynamically */  | 
|---|
 | 2529 | +int trace_add_event_call(struct trace_event_call *call)  | 
|---|
| 2316 | 2530 |  { | 
|---|
| 2317 | 2531 |  	int ret; | 
|---|
| 2318 | 2532 |  	lockdep_assert_held(&event_mutex); | 
|---|
| .. | .. | 
|---|
| 2324 | 2538 |  		__add_event_to_tracers(call); | 
|---|
| 2325 | 2539 |   | 
|---|
| 2326 | 2540 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 2327 |  | -	return ret;  | 
|---|
| 2328 |  | -}  | 
|---|
| 2329 |  | -  | 
|---|
| 2330 |  | -/* Add an additional event_call dynamically */  | 
|---|
| 2331 |  | -int trace_add_event_call(struct trace_event_call *call)  | 
|---|
| 2332 |  | -{  | 
|---|
| 2333 |  | -	int ret;  | 
|---|
| 2334 |  | -  | 
|---|
| 2335 |  | -	mutex_lock(&event_mutex);  | 
|---|
| 2336 |  | -	ret = trace_add_event_call_nolock(call);  | 
|---|
| 2337 |  | -	mutex_unlock(&event_mutex);  | 
|---|
| 2338 | 2541 |  	return ret; | 
|---|
| 2339 | 2542 |  } | 
|---|
| 2340 | 2543 |   | 
|---|
| .. | .. | 
|---|
| 2368 | 2571 |  		 * TRACE_REG_UNREGISTER. | 
|---|
| 2369 | 2572 |  		 */ | 
|---|
| 2370 | 2573 |  		if (file->flags & EVENT_FILE_FL_ENABLED) | 
|---|
| 2371 |  | -			return -EBUSY;  | 
|---|
 | 2574 | +			goto busy;  | 
|---|
 | 2575 | +  | 
|---|
 | 2576 | +		if (file->flags & EVENT_FILE_FL_WAS_ENABLED)  | 
|---|
 | 2577 | +			tr->clear_trace = true;  | 
|---|
| 2372 | 2578 |  		/* | 
|---|
| 2373 | 2579 |  		 * The do_for_each_event_file_safe() is | 
|---|
| 2374 | 2580 |  		 * a double loop. After finding the call for this | 
|---|
| .. | .. | 
|---|
| 2381 | 2587 |  	__trace_remove_event_call(call); | 
|---|
| 2382 | 2588 |   | 
|---|
| 2383 | 2589 |  	return 0; | 
|---|
 | 2590 | + busy:  | 
|---|
 | 2591 | +	/* No need to clear the trace now */  | 
|---|
 | 2592 | +	list_for_each_entry(tr, &ftrace_trace_arrays, list) {  | 
|---|
 | 2593 | +		tr->clear_trace = false;  | 
|---|
 | 2594 | +	}  | 
|---|
 | 2595 | +	return -EBUSY;  | 
|---|
| 2384 | 2596 |  } | 
|---|
| 2385 | 2597 |   | 
|---|
| 2386 |  | -/* no event_mutex version */  | 
|---|
| 2387 |  | -int trace_remove_event_call_nolock(struct trace_event_call *call)  | 
|---|
 | 2598 | +/* Remove an event_call */  | 
|---|
 | 2599 | +int trace_remove_event_call(struct trace_event_call *call)  | 
|---|
| 2388 | 2600 |  { | 
|---|
| 2389 | 2601 |  	int ret; | 
|---|
| 2390 | 2602 |   | 
|---|
| .. | .. | 
|---|
| 2395 | 2607 |  	ret = probe_remove_event_call(call); | 
|---|
| 2396 | 2608 |  	up_write(&trace_event_sem); | 
|---|
| 2397 | 2609 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 2398 |  | -  | 
|---|
| 2399 |  | -	return ret;  | 
|---|
| 2400 |  | -}  | 
|---|
| 2401 |  | -  | 
|---|
| 2402 |  | -/* Remove an event_call */  | 
|---|
| 2403 |  | -int trace_remove_event_call(struct trace_event_call *call)  | 
|---|
| 2404 |  | -{  | 
|---|
| 2405 |  | -	int ret;  | 
|---|
| 2406 |  | -  | 
|---|
| 2407 |  | -	mutex_lock(&event_mutex);  | 
|---|
| 2408 |  | -	ret = trace_remove_event_call_nolock(call);  | 
|---|
| 2409 |  | -	mutex_unlock(&event_mutex);  | 
|---|
| 2410 | 2610 |   | 
|---|
| 2411 | 2611 |  	return ret; | 
|---|
| 2412 | 2612 |  } | 
|---|
| .. | .. | 
|---|
| 2481 | 2681 |  	mutex_unlock(&trace_types_lock); | 
|---|
| 2482 | 2682 |  	mutex_unlock(&event_mutex); | 
|---|
| 2483 | 2683 |   | 
|---|
| 2484 |  | -	return 0;  | 
|---|
 | 2684 | +	return NOTIFY_OK;  | 
|---|
| 2485 | 2685 |  } | 
|---|
| 2486 | 2686 |   | 
|---|
| 2487 | 2687 |  static struct notifier_block trace_module_nb = { | 
|---|
| .. | .. | 
|---|
| 2541 | 2741 |   | 
|---|
| 2542 | 2742 |  	return file; | 
|---|
| 2543 | 2743 |  } | 
|---|
 | 2744 | +  | 
|---|
 | 2745 | +/**  | 
|---|
 | 2746 | + * trace_get_event_file - Find and return a trace event file  | 
|---|
 | 2747 | + * @instance: The name of the trace instance containing the event  | 
|---|
 | 2748 | + * @system: The name of the system containing the event  | 
|---|
 | 2749 | + * @event: The name of the event  | 
|---|
 | 2750 | + *  | 
|---|
 | 2751 | + * Return a trace event file given the trace instance name, trace  | 
|---|
 | 2752 | + * system, and trace event name.  If the instance name is NULL, it  | 
|---|
 | 2753 | + * refers to the top-level trace array.  | 
|---|
 | 2754 | + *  | 
|---|
 | 2755 | + * This function will look it up and return it if found, after calling  | 
|---|
 | 2756 | + * trace_array_get() to prevent the instance from going away, and  | 
|---|
 | 2757 | + * increment the event's module refcount to prevent it from being  | 
|---|
 | 2758 | + * removed.  | 
|---|
 | 2759 | + *  | 
|---|
 | 2760 | + * To release the file, call trace_put_event_file(), which will call  | 
|---|
 | 2761 | + * trace_array_put() and decrement the event's module refcount.  | 
|---|
 | 2762 | + *  | 
|---|
 | 2763 | + * Return: The trace event on success, ERR_PTR otherwise.  | 
|---|
 | 2764 | + */  | 
|---|
 | 2765 | +struct trace_event_file *trace_get_event_file(const char *instance,  | 
|---|
 | 2766 | +					      const char *system,  | 
|---|
 | 2767 | +					      const char *event)  | 
|---|
 | 2768 | +{  | 
|---|
 | 2769 | +	struct trace_array *tr = top_trace_array();  | 
|---|
 | 2770 | +	struct trace_event_file *file = NULL;  | 
|---|
 | 2771 | +	int ret = -EINVAL;  | 
|---|
 | 2772 | +  | 
|---|
 | 2773 | +	if (instance) {  | 
|---|
 | 2774 | +		tr = trace_array_find_get(instance);  | 
|---|
 | 2775 | +		if (!tr)  | 
|---|
 | 2776 | +			return ERR_PTR(-ENOENT);  | 
|---|
 | 2777 | +	} else {  | 
|---|
 | 2778 | +		ret = trace_array_get(tr);  | 
|---|
 | 2779 | +		if (ret)  | 
|---|
 | 2780 | +			return ERR_PTR(ret);  | 
|---|
 | 2781 | +	}  | 
|---|
 | 2782 | +  | 
|---|
 | 2783 | +	mutex_lock(&event_mutex);  | 
|---|
 | 2784 | +  | 
|---|
 | 2785 | +	file = find_event_file(tr, system, event);  | 
|---|
 | 2786 | +	if (!file) {  | 
|---|
 | 2787 | +		trace_array_put(tr);  | 
|---|
 | 2788 | +		ret = -EINVAL;  | 
|---|
 | 2789 | +		goto out;  | 
|---|
 | 2790 | +	}  | 
|---|
 | 2791 | +  | 
|---|
 | 2792 | +	/* Don't let event modules unload while in use */  | 
|---|
 | 2793 | +	ret = try_module_get(file->event_call->mod);  | 
|---|
 | 2794 | +	if (!ret) {  | 
|---|
 | 2795 | +		trace_array_put(tr);  | 
|---|
 | 2796 | +		ret = -EBUSY;  | 
|---|
 | 2797 | +		goto out;  | 
|---|
 | 2798 | +	}  | 
|---|
 | 2799 | +  | 
|---|
 | 2800 | +	ret = 0;  | 
|---|
 | 2801 | + out:  | 
|---|
 | 2802 | +	mutex_unlock(&event_mutex);  | 
|---|
 | 2803 | +  | 
|---|
 | 2804 | +	if (ret)  | 
|---|
 | 2805 | +		file = ERR_PTR(ret);  | 
|---|
 | 2806 | +  | 
|---|
 | 2807 | +	return file;  | 
|---|
 | 2808 | +}  | 
|---|
 | 2809 | +EXPORT_SYMBOL_GPL(trace_get_event_file);  | 
|---|
 | 2810 | +  | 
|---|
 | 2811 | +/**  | 
|---|
 | 2812 | + * trace_put_event_file - Release a file from trace_get_event_file()  | 
|---|
 | 2813 | + * @file: The trace event file  | 
|---|
 | 2814 | + *  | 
|---|
 | 2815 | + * If a file was retrieved using trace_get_event_file(), this should  | 
|---|
 | 2816 | + * be called when it's no longer needed.  It will cancel the previous  | 
|---|
 | 2817 | + * trace_array_get() called by that function, and decrement the  | 
|---|
 | 2818 | + * event's module refcount.  | 
|---|
 | 2819 | + */  | 
|---|
 | 2820 | +void trace_put_event_file(struct trace_event_file *file)  | 
|---|
 | 2821 | +{  | 
|---|
 | 2822 | +	mutex_lock(&event_mutex);  | 
|---|
 | 2823 | +	module_put(file->event_call->mod);  | 
|---|
 | 2824 | +	mutex_unlock(&event_mutex);  | 
|---|
 | 2825 | +  | 
|---|
 | 2826 | +	trace_array_put(file->tr);  | 
|---|
 | 2827 | +}  | 
|---|
 | 2828 | +EXPORT_SYMBOL_GPL(trace_put_event_file);  | 
|---|
| 2544 | 2829 |   | 
|---|
| 2545 | 2830 |  #ifdef CONFIG_DYNAMIC_FTRACE | 
|---|
| 2546 | 2831 |   | 
|---|
| .. | .. | 
|---|
| 2866 | 3151 |  #endif /* CONFIG_DYNAMIC_FTRACE */ | 
|---|
| 2867 | 3152 |   | 
|---|
| 2868 | 3153 |  /* | 
|---|
| 2869 |  | - * The top level array has already had its trace_event_file  | 
|---|
| 2870 |  | - * descriptors created in order to allow for early events to  | 
|---|
| 2871 |  | - * be recorded. This function is called after the tracefs has been  | 
|---|
| 2872 |  | - * initialized, and we now have to create the files associated  | 
|---|
| 2873 |  | - * to the events.  | 
|---|
 | 3154 | + * The top level array and trace arrays created by boot-time tracing  | 
|---|
 | 3155 | + * have already had its trace_event_file descriptors created in order  | 
|---|
 | 3156 | + * to allow for early events to be recorded.  | 
|---|
 | 3157 | + * This function is called after the tracefs has been initialized,  | 
|---|
 | 3158 | + * and we now have to create the files associated to the events.  | 
|---|
| 2874 | 3159 |   */ | 
|---|
| 2875 |  | -static __init void  | 
|---|
| 2876 |  | -__trace_early_add_event_dirs(struct trace_array *tr)  | 
|---|
 | 3160 | +static void __trace_early_add_event_dirs(struct trace_array *tr)  | 
|---|
| 2877 | 3161 |  { | 
|---|
| 2878 | 3162 |  	struct trace_event_file *file; | 
|---|
| 2879 | 3163 |  	int ret; | 
|---|
| .. | .. | 
|---|
| 2888 | 3172 |  } | 
|---|
| 2889 | 3173 |   | 
|---|
| 2890 | 3174 |  /* | 
|---|
| 2891 |  | - * For early boot up, the top trace array requires to have  | 
|---|
| 2892 |  | - * a list of events that can be enabled. This must be done before  | 
|---|
| 2893 |  | - * the filesystem is set up in order to allow events to be traced  | 
|---|
| 2894 |  | - * early.  | 
|---|
 | 3175 | + * For early boot up, the top trace array and the trace arrays created  | 
|---|
 | 3176 | + * by boot-time tracing require to have a list of events that can be  | 
|---|
 | 3177 | + * enabled. This must be done before the filesystem is set up in order  | 
|---|
 | 3178 | + * to allow events to be traced early.  | 
|---|
| 2895 | 3179 |   */ | 
|---|
| 2896 |  | -static __init void  | 
|---|
| 2897 |  | -__trace_early_add_events(struct trace_array *tr)  | 
|---|
 | 3180 | +void __trace_early_add_events(struct trace_array *tr)  | 
|---|
| 2898 | 3181 |  { | 
|---|
| 2899 | 3182 |  	struct trace_event_call *call; | 
|---|
| 2900 | 3183 |  	int ret; | 
|---|
| .. | .. | 
|---|
| 2938 | 3221 |  { | 
|---|
| 2939 | 3222 |  	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); | 
|---|
| 2940 | 3223 |  	ring_buffer_expanded = true; | 
|---|
| 2941 |  | -	tracing_selftest_disabled = true;  | 
|---|
 | 3224 | +	disable_tracing_selftest("running event tracing");  | 
|---|
| 2942 | 3225 |   | 
|---|
| 2943 | 3226 |  	return 1; | 
|---|
| 2944 | 3227 |  } | 
|---|
| .. | .. | 
|---|
| 2977 | 3260 |  				    tr, &ftrace_set_event_pid_fops); | 
|---|
| 2978 | 3261 |  	if (!entry) | 
|---|
| 2979 | 3262 |  		pr_warn("Could not create tracefs 'set_event_pid' entry\n"); | 
|---|
 | 3263 | +  | 
|---|
 | 3264 | +	entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,  | 
|---|
 | 3265 | +				    tr, &ftrace_set_event_notrace_pid_fops);  | 
|---|
 | 3266 | +	if (!entry)  | 
|---|
 | 3267 | +		pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");  | 
|---|
| 2980 | 3268 |   | 
|---|
| 2981 | 3269 |  	/* ring buffer internal formats */ | 
|---|
| 2982 | 3270 |  	entry = trace_create_file("header_page", 0444, d_events, | 
|---|
| .. | .. | 
|---|
| 3020 | 3308 |  		goto out; | 
|---|
| 3021 | 3309 |   | 
|---|
| 3022 | 3310 |  	down_write(&trace_event_sem); | 
|---|
| 3023 |  | -	__trace_add_event_dirs(tr);  | 
|---|
 | 3311 | +	/* If tr already has the event list, it is initialized in early boot. */  | 
|---|
 | 3312 | +	if (unlikely(!list_empty(&tr->events)))  | 
|---|
 | 3313 | +		__trace_early_add_event_dirs(tr);  | 
|---|
 | 3314 | +	else  | 
|---|
 | 3315 | +		__trace_add_event_dirs(tr);  | 
|---|
| 3024 | 3316 |  	up_write(&trace_event_sem); | 
|---|
| 3025 | 3317 |   | 
|---|
| 3026 | 3318 |   out: | 
|---|
| .. | .. | 
|---|
| 3061 | 3353 |  	clear_event_triggers(tr); | 
|---|
| 3062 | 3354 |   | 
|---|
| 3063 | 3355 |  	/* Clear the pid list */ | 
|---|
| 3064 |  | -	__ftrace_clear_event_pids(tr);  | 
|---|
 | 3356 | +	__ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);  | 
|---|
| 3065 | 3357 |   | 
|---|
| 3066 | 3358 |  	/* Disable any running events */ | 
|---|
| 3067 | 3359 |  	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); | 
|---|
| .. | .. | 
|---|
| 3071 | 3363 |   | 
|---|
| 3072 | 3364 |  	down_write(&trace_event_sem); | 
|---|
| 3073 | 3365 |  	__trace_remove_event_dirs(tr); | 
|---|
| 3074 |  | -	tracefs_remove_recursive(tr->event_dir);  | 
|---|
 | 3366 | +	tracefs_remove(tr->event_dir);  | 
|---|
| 3075 | 3367 |  	up_write(&trace_event_sem); | 
|---|
| 3076 | 3368 |   | 
|---|
| 3077 | 3369 |  	tr->event_dir = NULL; | 
|---|
| .. | .. | 
|---|
| 3176 | 3468 |   | 
|---|
| 3177 | 3469 |  early_initcall(event_trace_enable_again); | 
|---|
| 3178 | 3470 |   | 
|---|
 | 3471 | +/* Init fields which doesn't related to the tracefs */  | 
|---|
 | 3472 | +static __init int event_trace_init_fields(void)  | 
|---|
 | 3473 | +{  | 
|---|
 | 3474 | +	if (trace_define_generic_fields())  | 
|---|
 | 3475 | +		pr_warn("tracing: Failed to allocated generic fields");  | 
|---|
 | 3476 | +  | 
|---|
 | 3477 | +	if (trace_define_common_fields())  | 
|---|
 | 3478 | +		pr_warn("tracing: Failed to allocate common fields");  | 
|---|
 | 3479 | +  | 
|---|
 | 3480 | +	return 0;  | 
|---|
 | 3481 | +}  | 
|---|
 | 3482 | +  | 
|---|
| 3179 | 3483 |  __init int event_trace_init(void) | 
|---|
| 3180 | 3484 |  { | 
|---|
| 3181 | 3485 |  	struct trace_array *tr; | 
|---|
| 3182 |  | -	struct dentry *d_tracer;  | 
|---|
| 3183 | 3486 |  	struct dentry *entry; | 
|---|
| 3184 | 3487 |  	int ret; | 
|---|
| 3185 | 3488 |   | 
|---|
| .. | .. | 
|---|
| 3187 | 3490 |  	if (!tr) | 
|---|
| 3188 | 3491 |  		return -ENODEV; | 
|---|
| 3189 | 3492 |   | 
|---|
| 3190 |  | -	d_tracer = tracing_init_dentry();  | 
|---|
| 3191 |  | -	if (IS_ERR(d_tracer))  | 
|---|
| 3192 |  | -		return 0;  | 
|---|
| 3193 |  | -  | 
|---|
| 3194 |  | -	entry = tracefs_create_file("available_events", 0444, d_tracer,  | 
|---|
 | 3493 | +	entry = tracefs_create_file("available_events", 0444, NULL,  | 
|---|
| 3195 | 3494 |  				    tr, &ftrace_avail_fops); | 
|---|
| 3196 | 3495 |  	if (!entry) | 
|---|
| 3197 | 3496 |  		pr_warn("Could not create tracefs 'available_events' entry\n"); | 
|---|
| 3198 | 3497 |   | 
|---|
| 3199 |  | -	if (trace_define_generic_fields())  | 
|---|
| 3200 |  | -		pr_warn("tracing: Failed to allocated generic fields");  | 
|---|
| 3201 |  | -  | 
|---|
| 3202 |  | -	if (trace_define_common_fields())  | 
|---|
| 3203 |  | -		pr_warn("tracing: Failed to allocate common fields");  | 
|---|
| 3204 |  | -  | 
|---|
| 3205 |  | -	ret = early_event_add_tracer(d_tracer, tr);  | 
|---|
 | 3498 | +	ret = early_event_add_tracer(NULL, tr);  | 
|---|
| 3206 | 3499 |  	if (ret) | 
|---|
| 3207 | 3500 |  		return ret; | 
|---|
| 3208 | 3501 |   | 
|---|
| .. | .. | 
|---|
| 3211 | 3504 |  	if (ret) | 
|---|
| 3212 | 3505 |  		pr_warn("Failed to register trace events module notifier\n"); | 
|---|
| 3213 | 3506 |  #endif | 
|---|
 | 3507 | +  | 
|---|
 | 3508 | +	eventdir_initialized = true;  | 
|---|
 | 3509 | +  | 
|---|
| 3214 | 3510 |  	return 0; | 
|---|
| 3215 | 3511 |  } | 
|---|
| 3216 | 3512 |   | 
|---|
| .. | .. | 
|---|
| 3219 | 3515 |  	event_trace_memsetup(); | 
|---|
| 3220 | 3516 |  	init_ftrace_syscalls(); | 
|---|
| 3221 | 3517 |  	event_trace_enable(); | 
|---|
 | 3518 | +	event_trace_init_fields();  | 
|---|
| 3222 | 3519 |  } | 
|---|
| 3223 | 3520 |   | 
|---|
| 3224 |  | -#ifdef CONFIG_FTRACE_STARTUP_TEST  | 
|---|
 | 3521 | +#ifdef CONFIG_EVENT_TRACE_STARTUP_TEST  | 
|---|
| 3225 | 3522 |   | 
|---|
| 3226 | 3523 |  static DEFINE_SPINLOCK(test_spinlock); | 
|---|
| 3227 | 3524 |  static DEFINE_SPINLOCK(test_spinlock_irq); | 
|---|
| .. | .. | 
|---|
| 3398 | 3695 |  function_test_events_call(unsigned long ip, unsigned long parent_ip, | 
|---|
| 3399 | 3696 |  			  struct ftrace_ops *op, struct pt_regs *pt_regs) | 
|---|
| 3400 | 3697 |  { | 
|---|
 | 3698 | +	struct trace_buffer *buffer;  | 
|---|
| 3401 | 3699 |  	struct ring_buffer_event *event; | 
|---|
| 3402 |  | -	struct ring_buffer *buffer;  | 
|---|
| 3403 | 3700 |  	struct ftrace_entry *entry; | 
|---|
| 3404 |  | -	unsigned long flags;  | 
|---|
 | 3701 | +	unsigned int trace_ctx;  | 
|---|
| 3405 | 3702 |  	long disabled; | 
|---|
| 3406 | 3703 |  	int cpu; | 
|---|
| 3407 |  | -	int pc;  | 
|---|
| 3408 | 3704 |   | 
|---|
| 3409 |  | -	pc = preempt_count();  | 
|---|
 | 3705 | +	trace_ctx = tracing_gen_ctx();  | 
|---|
| 3410 | 3706 |  	preempt_disable_notrace(); | 
|---|
| 3411 | 3707 |  	cpu = raw_smp_processor_id(); | 
|---|
| 3412 | 3708 |  	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); | 
|---|
| .. | .. | 
|---|
| 3414 | 3710 |  	if (disabled != 1) | 
|---|
| 3415 | 3711 |  		goto out; | 
|---|
| 3416 | 3712 |   | 
|---|
| 3417 |  | -	local_save_flags(flags);  | 
|---|
| 3418 |  | -  | 
|---|
| 3419 | 3713 |  	event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, | 
|---|
| 3420 | 3714 |  						TRACE_FN, sizeof(*entry), | 
|---|
| 3421 |  | -						flags, pc);  | 
|---|
 | 3715 | +						trace_ctx);  | 
|---|
| 3422 | 3716 |  	if (!event) | 
|---|
| 3423 | 3717 |  		goto out; | 
|---|
| 3424 | 3718 |  	entry	= ring_buffer_event_data(event); | 
|---|
| .. | .. | 
|---|
| 3426 | 3720 |  	entry->parent_ip		= parent_ip; | 
|---|
| 3427 | 3721 |   | 
|---|
| 3428 | 3722 |  	event_trigger_unlock_commit(&event_trace_file, buffer, event, | 
|---|
| 3429 |  | -				    entry, flags, pc);  | 
|---|
 | 3723 | +				    entry, trace_ctx);  | 
|---|
| 3430 | 3724 |   out: | 
|---|
| 3431 | 3725 |  	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); | 
|---|
| 3432 | 3726 |  	preempt_enable_notrace(); | 
|---|