hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/trace/trace_events.c
....@@ -12,6 +12,7 @@
1212 #define pr_fmt(fmt) fmt
1313
1414 #include <linux/workqueue.h>
15
+#include <linux/security.h>
1516 #include <linux/spinlock.h>
1617 #include <linux/kthread.h>
1718 #include <linux/tracefs.h>
....@@ -23,6 +24,7 @@
2324 #include <linux/delay.h>
2425
2526 #include <trace/events/sched.h>
27
+#include <trace/syscall.h>
2628
2729 #include <asm/setup.h>
2830
....@@ -36,6 +38,7 @@
3638 LIST_HEAD(ftrace_events);
3739 static LIST_HEAD(ftrace_generic_fields);
3840 static LIST_HEAD(ftrace_common_fields);
41
+static bool eventdir_initialized;
3942
4043 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
4144
....@@ -69,14 +72,6 @@
6972
7073 #define while_for_each_event_file() \
7174 }
72
-
73
-static struct list_head *
74
-trace_get_fields(struct trace_event_call *event_call)
75
-{
76
- if (!event_call->class->get_fields)
77
- return &event_call->class->fields;
78
- return event_call->class->get_fields(event_call);
79
-}
8075
8176 static struct ftrace_event_field *
8277 __find_event_field(struct list_head *head, char *name)
....@@ -173,6 +168,7 @@
173168
174169 __generic_field(int, CPU, FILTER_CPU);
175170 __generic_field(int, cpu, FILTER_CPU);
171
+ __generic_field(int, common_cpu, FILTER_CPU);
176172 __generic_field(char *, COMM, FILTER_COMM);
177173 __generic_field(char *, comm, FILTER_COMM);
178174
....@@ -238,13 +234,16 @@
238234 {
239235 struct trace_array *tr = trace_file->tr;
240236 struct trace_array_cpu *data;
237
+ struct trace_pid_list *no_pid_list;
241238 struct trace_pid_list *pid_list;
242239
243240 pid_list = rcu_dereference_raw(tr->filtered_pids);
244
- if (!pid_list)
241
+ no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
242
+
243
+ if (!pid_list && !no_pid_list)
245244 return false;
246245
247
- data = this_cpu_ptr(tr->trace_buffer.data);
246
+ data = this_cpu_ptr(tr->array_buffer.data);
248247
249248 return data->ignore_pid;
250249 }
....@@ -263,12 +262,12 @@
263262 local_save_flags(fbuffer->flags);
264263 fbuffer->pc = preempt_count();
265264 /*
266
- * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
265
+ * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
267266 * preemption (adding one to the preempt_count). Since we are
268267 * interested in the preempt_count at the time the tracepoint was
269268 * hit, we need to subtract one to offset the increment.
270269 */
271
- if (IS_ENABLED(CONFIG_PREEMPT))
270
+ if (IS_ENABLED(CONFIG_PREEMPTION))
272271 fbuffer->pc--;
273272 fbuffer->trace_file = trace_file;
274273
....@@ -279,6 +278,7 @@
279278 if (!fbuffer->event)
280279 return NULL;
281280
281
+ fbuffer->regs = NULL;
282282 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
283283 return fbuffer->entry;
284284 }
....@@ -371,7 +371,6 @@
371371 {
372372 struct trace_event_call *call = file->event_call;
373373 struct trace_array *tr = file->tr;
374
- unsigned long file_flags = file->flags;
375374 int ret = 0;
376375 int disable;
377376
....@@ -395,6 +394,8 @@
395394 break;
396395 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
397396 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
397
+ /* Disable use of trace_buffered_event */
398
+ trace_buffered_event_disable();
398399 } else
399400 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
400401
....@@ -433,6 +434,8 @@
433434 if (atomic_inc_return(&file->sm_ref) > 1)
434435 break;
435436 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
437
+ /* Enable use of trace_buffered_event */
438
+ trace_buffered_event_enable();
436439 }
437440
438441 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
....@@ -472,15 +475,6 @@
472475 break;
473476 }
474477
475
- /* Enable or disable use of trace_buffered_event */
476
- if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
477
- (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
478
- if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
479
- trace_buffered_event_enable();
480
- else
481
- trace_buffered_event_disable();
482
- }
483
-
484478 return ret;
485479 }
486480
....@@ -515,6 +509,9 @@
515509
516510 pid_list = rcu_dereference_raw(tr->filtered_pids);
517511 trace_filter_add_remove_task(pid_list, NULL, task);
512
+
513
+ pid_list = rcu_dereference_raw(tr->filtered_no_pids);
514
+ trace_filter_add_remove_task(pid_list, NULL, task);
518515 }
519516
520517 static void
....@@ -526,6 +523,9 @@
526523 struct trace_array *tr = data;
527524
528525 pid_list = rcu_dereference_sched(tr->filtered_pids);
526
+ trace_filter_add_remove_task(pid_list, self, task);
527
+
528
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
529529 trace_filter_add_remove_task(pid_list, self, task);
530530 }
531531
....@@ -549,13 +549,23 @@
549549 struct task_struct *prev, struct task_struct *next)
550550 {
551551 struct trace_array *tr = data;
552
+ struct trace_pid_list *no_pid_list;
552553 struct trace_pid_list *pid_list;
554
+ bool ret;
553555
554556 pid_list = rcu_dereference_sched(tr->filtered_pids);
557
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
555558
556
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
557
- trace_ignore_this_task(pid_list, prev) &&
558
- trace_ignore_this_task(pid_list, next));
559
+ /*
560
+ * Sched switch is funny, as we only want to ignore it
561
+ * in the notrace case if both prev and next should be ignored.
562
+ */
563
+ ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
564
+ trace_ignore_this_task(NULL, no_pid_list, next);
565
+
566
+ this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
567
+ (trace_ignore_this_task(pid_list, NULL, prev) &&
568
+ trace_ignore_this_task(pid_list, NULL, next)));
559569 }
560570
561571 static void
....@@ -563,58 +573,55 @@
563573 struct task_struct *prev, struct task_struct *next)
564574 {
565575 struct trace_array *tr = data;
576
+ struct trace_pid_list *no_pid_list;
566577 struct trace_pid_list *pid_list;
567578
568579 pid_list = rcu_dereference_sched(tr->filtered_pids);
580
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
569581
570
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
571
- trace_ignore_this_task(pid_list, next));
582
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
583
+ trace_ignore_this_task(pid_list, no_pid_list, next));
572584 }
573585
574586 static void
575587 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
576588 {
577589 struct trace_array *tr = data;
590
+ struct trace_pid_list *no_pid_list;
578591 struct trace_pid_list *pid_list;
579592
580593 /* Nothing to do if we are already tracing */
581
- if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
594
+ if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
582595 return;
583596
584597 pid_list = rcu_dereference_sched(tr->filtered_pids);
598
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
585599
586
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
587
- trace_ignore_this_task(pid_list, task));
600
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
601
+ trace_ignore_this_task(pid_list, no_pid_list, task));
588602 }
589603
590604 static void
591605 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
592606 {
593607 struct trace_array *tr = data;
608
+ struct trace_pid_list *no_pid_list;
594609 struct trace_pid_list *pid_list;
595610
596611 /* Nothing to do if we are not tracing */
597
- if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
612
+ if (this_cpu_read(tr->array_buffer.data->ignore_pid))
598613 return;
599614
600615 pid_list = rcu_dereference_sched(tr->filtered_pids);
616
+ no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
601617
602618 /* Set tracing if current is enabled */
603
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
604
- trace_ignore_this_task(pid_list, current));
619
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
620
+ trace_ignore_this_task(pid_list, no_pid_list, current));
605621 }
606622
607
-static void __ftrace_clear_event_pids(struct trace_array *tr)
623
+static void unregister_pid_events(struct trace_array *tr)
608624 {
609
- struct trace_pid_list *pid_list;
610
- struct trace_event_file *file;
611
- int cpu;
612
-
613
- pid_list = rcu_dereference_protected(tr->filtered_pids,
614
- lockdep_is_held(&event_mutex));
615
- if (!pid_list)
616
- return;
617
-
618625 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
619626 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
620627
....@@ -626,26 +633,55 @@
626633
627634 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
628635 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
636
+}
629637
630
- list_for_each_entry(file, &tr->events, list) {
631
- clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
638
+static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
639
+{
640
+ struct trace_pid_list *pid_list;
641
+ struct trace_pid_list *no_pid_list;
642
+ struct trace_event_file *file;
643
+ int cpu;
644
+
645
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
646
+ lockdep_is_held(&event_mutex));
647
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
648
+ lockdep_is_held(&event_mutex));
649
+
650
+ /* Make sure there's something to do */
651
+ if (!pid_type_enabled(type, pid_list, no_pid_list))
652
+ return;
653
+
654
+ if (!still_need_pid_events(type, pid_list, no_pid_list)) {
655
+ unregister_pid_events(tr);
656
+
657
+ list_for_each_entry(file, &tr->events, list) {
658
+ clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
659
+ }
660
+
661
+ for_each_possible_cpu(cpu)
662
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
632663 }
633664
634
- for_each_possible_cpu(cpu)
635
- per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
665
+ if (type & TRACE_PIDS)
666
+ rcu_assign_pointer(tr->filtered_pids, NULL);
636667
637
- rcu_assign_pointer(tr->filtered_pids, NULL);
668
+ if (type & TRACE_NO_PIDS)
669
+ rcu_assign_pointer(tr->filtered_no_pids, NULL);
638670
639671 /* Wait till all users are no longer using pid filtering */
640672 tracepoint_synchronize_unregister();
641673
642
- trace_free_pid_list(pid_list);
674
+ if ((type & TRACE_PIDS) && pid_list)
675
+ trace_free_pid_list(pid_list);
676
+
677
+ if ((type & TRACE_NO_PIDS) && no_pid_list)
678
+ trace_free_pid_list(no_pid_list);
643679 }
644680
645
-static void ftrace_clear_event_pids(struct trace_array *tr)
681
+static void ftrace_clear_event_pids(struct trace_array *tr, int type)
646682 {
647683 mutex_lock(&event_mutex);
648
- __ftrace_clear_event_pids(tr);
684
+ __ftrace_clear_event_pids(tr, type);
649685 mutex_unlock(&event_mutex);
650686 }
651687
....@@ -704,7 +740,7 @@
704740 return;
705741
706742 if (!--dir->nr_events) {
707
- tracefs_remove_recursive(dir->entry);
743
+ tracefs_remove(dir->entry);
708744 list_del(&dir->list);
709745 __put_system_dir(dir);
710746 }
....@@ -723,7 +759,7 @@
723759 }
724760 spin_unlock(&dir->d_lock);
725761
726
- tracefs_remove_recursive(dir);
762
+ tracefs_remove(dir);
727763 }
728764
729765 list_del(&file->list);
....@@ -795,7 +831,7 @@
795831 return ret;
796832 }
797833
798
-static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
834
+int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
799835 {
800836 char *event = NULL, *sub = NULL, *match;
801837 int ret;
....@@ -857,6 +893,32 @@
857893 return __ftrace_set_clr_event(tr, NULL, system, event, set);
858894 }
859895 EXPORT_SYMBOL_GPL(trace_set_clr_event);
896
+
897
+/**
898
+ * trace_array_set_clr_event - enable or disable an event for a trace array.
899
+ * @tr: concerned trace array.
900
+ * @system: system name to match (NULL for any system)
901
+ * @event: event name to match (NULL for all events, within system)
902
+ * @enable: true to enable, false to disable
903
+ *
904
+ * This is a way for other parts of the kernel to enable or disable
905
+ * event recording.
906
+ *
907
+ * Returns 0 on success, -EINVAL if the parameters do not match any
908
+ * registered events.
909
+ */
910
+int trace_array_set_clr_event(struct trace_array *tr, const char *system,
911
+ const char *event, bool enable)
912
+{
913
+ int set;
914
+
915
+ if (!tr)
916
+ return -ENOENT;
917
+
918
+ set = (enable == true) ? 1 : 0;
919
+ return __ftrace_set_clr_event(tr, NULL, system, event, set);
920
+}
921
+EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
860922
861923 /* 128 should be much more than enough */
862924 #define EVENT_BUF_SIZE 127
....@@ -992,15 +1054,32 @@
9921054 }
9931055
9941056 static void *
995
-p_next(struct seq_file *m, void *v, loff_t *pos)
1057
+__next(struct seq_file *m, void *v, loff_t *pos, int type)
9961058 {
9971059 struct trace_array *tr = m->private;
998
- struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
1060
+ struct trace_pid_list *pid_list;
1061
+
1062
+ if (type == TRACE_PIDS)
1063
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
1064
+ else
1065
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
9991066
10001067 return trace_pid_next(pid_list, v, pos);
10011068 }
10021069
1003
-static void *p_start(struct seq_file *m, loff_t *pos)
1070
+static void *
1071
+p_next(struct seq_file *m, void *v, loff_t *pos)
1072
+{
1073
+ return __next(m, v, pos, TRACE_PIDS);
1074
+}
1075
+
1076
+static void *
1077
+np_next(struct seq_file *m, void *v, loff_t *pos)
1078
+{
1079
+ return __next(m, v, pos, TRACE_NO_PIDS);
1080
+}
1081
+
1082
+static void *__start(struct seq_file *m, loff_t *pos, int type)
10041083 __acquires(RCU)
10051084 {
10061085 struct trace_pid_list *pid_list;
....@@ -1015,12 +1094,27 @@
10151094 mutex_lock(&event_mutex);
10161095 rcu_read_lock_sched();
10171096
1018
- pid_list = rcu_dereference_sched(tr->filtered_pids);
1097
+ if (type == TRACE_PIDS)
1098
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
1099
+ else
1100
+ pid_list = rcu_dereference_sched(tr->filtered_no_pids);
10191101
10201102 if (!pid_list)
10211103 return NULL;
10221104
10231105 return trace_pid_start(pid_list, pos);
1106
+}
1107
+
1108
+static void *p_start(struct seq_file *m, loff_t *pos)
1109
+ __acquires(RCU)
1110
+{
1111
+ return __start(m, pos, TRACE_PIDS);
1112
+}
1113
+
1114
+static void *np_start(struct seq_file *m, loff_t *pos)
1115
+ __acquires(RCU)
1116
+{
1117
+ return __start(m, pos, TRACE_NO_PIDS);
10241118 }
10251119
10261120 static void p_stop(struct seq_file *m, void *p)
....@@ -1254,7 +1348,7 @@
12541348 */
12551349 array_descriptor = strchr(field->type, '[');
12561350
1257
- if (!strncmp(field->type, "__data_loc", 10))
1351
+ if (str_has_prefix(field->type, "__data_loc"))
12581352 array_descriptor = NULL;
12591353
12601354 if (!array_descriptor)
....@@ -1303,6 +1397,8 @@
13031397 {
13041398 struct seq_file *m;
13051399 int ret;
1400
+
1401
+ /* Do we want to hide event format files on tracefs lockdown? */
13061402
13071403 ret = seq_open(file, &trace_format_seq_ops);
13081404 if (ret < 0)
....@@ -1450,28 +1546,17 @@
14501546 struct trace_array *tr = inode->i_private;
14511547 int ret;
14521548
1453
- if (tracing_is_disabled())
1454
- return -ENODEV;
1455
-
1456
- if (trace_array_get(tr) < 0)
1457
- return -ENODEV;
1458
-
14591549 /* Make a temporary dir that has no system but points to tr */
14601550 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1461
- if (!dir) {
1462
- trace_array_put(tr);
1551
+ if (!dir)
14631552 return -ENOMEM;
1464
- }
14651553
1466
- dir->tr = tr;
1467
-
1468
- ret = tracing_open_generic(inode, filp);
1554
+ ret = tracing_open_generic_tr(inode, filp);
14691555 if (ret < 0) {
1470
- trace_array_put(tr);
14711556 kfree(dir);
14721557 return ret;
14731558 }
1474
-
1559
+ dir->tr = tr;
14751560 filp->private_data = dir;
14761561
14771562 return 0;
....@@ -1577,6 +1662,7 @@
15771662 {
15781663 struct trace_array *tr = data;
15791664 struct trace_pid_list *pid_list;
1665
+ struct trace_pid_list *no_pid_list;
15801666
15811667 /*
15821668 * This function is called by on_each_cpu() while the
....@@ -1584,18 +1670,50 @@
15841670 */
15851671 pid_list = rcu_dereference_protected(tr->filtered_pids,
15861672 mutex_is_locked(&event_mutex));
1673
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1674
+ mutex_is_locked(&event_mutex));
15871675
1588
- this_cpu_write(tr->trace_buffer.data->ignore_pid,
1589
- trace_ignore_this_task(pid_list, current));
1676
+ this_cpu_write(tr->array_buffer.data->ignore_pid,
1677
+ trace_ignore_this_task(pid_list, no_pid_list, current));
1678
+}
1679
+
1680
+static void register_pid_events(struct trace_array *tr)
1681
+{
1682
+ /*
1683
+ * Register a probe that is called before all other probes
1684
+ * to set ignore_pid if next or prev do not match.
1685
+ * Register a probe this is called after all other probes
1686
+ * to only keep ignore_pid set if next pid matches.
1687
+ */
1688
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1689
+ tr, INT_MAX);
1690
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1691
+ tr, 0);
1692
+
1693
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1694
+ tr, INT_MAX);
1695
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1696
+ tr, 0);
1697
+
1698
+ register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1699
+ tr, INT_MAX);
1700
+ register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1701
+ tr, 0);
1702
+
1703
+ register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1704
+ tr, INT_MAX);
1705
+ register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1706
+ tr, 0);
15901707 }
15911708
15921709 static ssize_t
1593
-ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1594
- size_t cnt, loff_t *ppos)
1710
+event_pid_write(struct file *filp, const char __user *ubuf,
1711
+ size_t cnt, loff_t *ppos, int type)
15951712 {
15961713 struct seq_file *m = filp->private_data;
15971714 struct trace_array *tr = m->private;
15981715 struct trace_pid_list *filtered_pids = NULL;
1716
+ struct trace_pid_list *other_pids = NULL;
15991717 struct trace_pid_list *pid_list;
16001718 struct trace_event_file *file;
16011719 ssize_t ret;
....@@ -1609,14 +1727,26 @@
16091727
16101728 mutex_lock(&event_mutex);
16111729
1612
- filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1613
- lockdep_is_held(&event_mutex));
1730
+ if (type == TRACE_PIDS) {
1731
+ filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1732
+ lockdep_is_held(&event_mutex));
1733
+ other_pids = rcu_dereference_protected(tr->filtered_no_pids,
1734
+ lockdep_is_held(&event_mutex));
1735
+ } else {
1736
+ filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
1737
+ lockdep_is_held(&event_mutex));
1738
+ other_pids = rcu_dereference_protected(tr->filtered_pids,
1739
+ lockdep_is_held(&event_mutex));
1740
+ }
16141741
16151742 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
16161743 if (ret < 0)
16171744 goto out;
16181745
1619
- rcu_assign_pointer(tr->filtered_pids, pid_list);
1746
+ if (type == TRACE_PIDS)
1747
+ rcu_assign_pointer(tr->filtered_pids, pid_list);
1748
+ else
1749
+ rcu_assign_pointer(tr->filtered_no_pids, pid_list);
16201750
16211751 list_for_each_entry(file, &tr->events, list) {
16221752 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
....@@ -1625,32 +1755,8 @@
16251755 if (filtered_pids) {
16261756 tracepoint_synchronize_unregister();
16271757 trace_free_pid_list(filtered_pids);
1628
- } else if (pid_list) {
1629
- /*
1630
- * Register a probe that is called before all other probes
1631
- * to set ignore_pid if next or prev do not match.
1632
- * Register a probe this is called after all other probes
1633
- * to only keep ignore_pid set if next pid matches.
1634
- */
1635
- register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1636
- tr, INT_MAX);
1637
- register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1638
- tr, 0);
1639
-
1640
- register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1641
- tr, INT_MAX);
1642
- register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1643
- tr, 0);
1644
-
1645
- register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1646
- tr, INT_MAX);
1647
- register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1648
- tr, 0);
1649
-
1650
- register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1651
- tr, INT_MAX);
1652
- register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1653
- tr, 0);
1758
+ } else if (pid_list && !other_pids) {
1759
+ register_pid_events(tr);
16541760 }
16551761
16561762 /*
....@@ -1669,9 +1775,24 @@
16691775 return ret;
16701776 }
16711777
1778
+static ssize_t
1779
+ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1780
+ size_t cnt, loff_t *ppos)
1781
+{
1782
+ return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
1783
+}
1784
+
1785
+static ssize_t
1786
+ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
1787
+ size_t cnt, loff_t *ppos)
1788
+{
1789
+ return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
1790
+}
1791
+
16721792 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
16731793 static int ftrace_event_set_open(struct inode *inode, struct file *file);
16741794 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1795
+static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
16751796 static int ftrace_event_release(struct inode *inode, struct file *file);
16761797
16771798 static const struct seq_operations show_event_seq_ops = {
....@@ -1691,6 +1812,13 @@
16911812 static const struct seq_operations show_set_pid_seq_ops = {
16921813 .start = p_start,
16931814 .next = p_next,
1815
+ .show = trace_pid_show,
1816
+ .stop = p_stop,
1817
+};
1818
+
1819
+static const struct seq_operations show_set_no_pid_seq_ops = {
1820
+ .start = np_start,
1821
+ .next = np_next,
16941822 .show = trace_pid_show,
16951823 .stop = p_stop,
16961824 };
....@@ -1718,10 +1846,19 @@
17181846 .release = ftrace_event_release,
17191847 };
17201848
1849
+static const struct file_operations ftrace_set_event_notrace_pid_fops = {
1850
+ .open = ftrace_event_set_npid_open,
1851
+ .read = seq_read,
1852
+ .write = ftrace_event_npid_write,
1853
+ .llseek = seq_lseek,
1854
+ .release = ftrace_event_release,
1855
+};
1856
+
17211857 static const struct file_operations ftrace_enable_fops = {
1722
- .open = tracing_open_generic,
1858
+ .open = tracing_open_file_tr,
17231859 .read = event_enable_read,
17241860 .write = event_enable_write,
1861
+ .release = tracing_release_file_tr,
17251862 .llseek = default_llseek,
17261863 };
17271864
....@@ -1738,9 +1875,10 @@
17381875 };
17391876
17401877 static const struct file_operations ftrace_event_filter_fops = {
1741
- .open = tracing_open_generic,
1878
+ .open = tracing_open_file_tr,
17421879 .read = event_filter_read,
17431880 .write = event_filter_write,
1881
+ .release = tracing_release_file_tr,
17441882 .llseek = default_llseek,
17451883 };
17461884
....@@ -1781,6 +1919,10 @@
17811919 struct seq_file *m;
17821920 int ret;
17831921
1922
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
1923
+ if (ret)
1924
+ return ret;
1925
+
17841926 ret = seq_open(file, seq_ops);
17851927 if (ret < 0)
17861928 return ret;
....@@ -1805,6 +1947,7 @@
18051947 {
18061948 const struct seq_operations *seq_ops = &show_event_seq_ops;
18071949
1950
+ /* Checks for tracefs lockdown */
18081951 return ftrace_event_open(inode, file, seq_ops);
18091952 }
18101953
....@@ -1815,8 +1958,9 @@
18151958 struct trace_array *tr = inode->i_private;
18161959 int ret;
18171960
1818
- if (trace_array_get(tr) < 0)
1819
- return -ENODEV;
1961
+ ret = tracing_check_open_get_tr(tr);
1962
+ if (ret)
1963
+ return ret;
18201964
18211965 if ((file->f_mode & FMODE_WRITE) &&
18221966 (file->f_flags & O_TRUNC))
....@@ -1835,12 +1979,34 @@
18351979 struct trace_array *tr = inode->i_private;
18361980 int ret;
18371981
1838
- if (trace_array_get(tr) < 0)
1839
- return -ENODEV;
1982
+ ret = tracing_check_open_get_tr(tr);
1983
+ if (ret)
1984
+ return ret;
18401985
18411986 if ((file->f_mode & FMODE_WRITE) &&
18421987 (file->f_flags & O_TRUNC))
1843
- ftrace_clear_event_pids(tr);
1988
+ ftrace_clear_event_pids(tr, TRACE_PIDS);
1989
+
1990
+ ret = ftrace_event_open(inode, file, seq_ops);
1991
+ if (ret < 0)
1992
+ trace_array_put(tr);
1993
+ return ret;
1994
+}
1995
+
1996
+static int
1997
+ftrace_event_set_npid_open(struct inode *inode, struct file *file)
1998
+{
1999
+ const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2000
+ struct trace_array *tr = inode->i_private;
2001
+ int ret;
2002
+
2003
+ ret = tracing_check_open_get_tr(tr);
2004
+ if (ret)
2005
+ return ret;
2006
+
2007
+ if ((file->f_mode & FMODE_WRITE) &&
2008
+ (file->f_flags & O_TRUNC))
2009
+ ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
18442010
18452011 ret = ftrace_event_open(inode, file, seq_ops);
18462012 if (ret < 0)
....@@ -1957,11 +2123,47 @@
19572123 }
19582124
19592125 static int
2126
+event_define_fields(struct trace_event_call *call)
2127
+{
2128
+ struct list_head *head;
2129
+ int ret = 0;
2130
+
2131
+ /*
2132
+ * Other events may have the same class. Only update
2133
+ * the fields if they are not already defined.
2134
+ */
2135
+ head = trace_get_fields(call);
2136
+ if (list_empty(head)) {
2137
+ struct trace_event_fields *field = call->class->fields_array;
2138
+ unsigned int offset = sizeof(struct trace_entry);
2139
+
2140
+ for (; field->type; field++) {
2141
+ if (field->type == TRACE_FUNCTION_TYPE) {
2142
+ field->define_fields(call);
2143
+ break;
2144
+ }
2145
+
2146
+ offset = ALIGN(offset, field->align);
2147
+ ret = trace_define_field(call, field->type, field->name,
2148
+ offset, field->size,
2149
+ field->is_signed, field->filter_type);
2150
+ if (WARN_ON_ONCE(ret)) {
2151
+ pr_err("error code is %d\n", ret);
2152
+ break;
2153
+ }
2154
+
2155
+ offset += field->size;
2156
+ }
2157
+ }
2158
+
2159
+ return ret;
2160
+}
2161
+
2162
+static int
19602163 event_create_dir(struct dentry *parent, struct trace_event_file *file)
19612164 {
19622165 struct trace_event_call *call = file->event_call;
19632166 struct trace_array *tr = file->tr;
1964
- struct list_head *head;
19652167 struct dentry *d_events;
19662168 const char *name;
19672169 int ret;
....@@ -1995,18 +2197,10 @@
19952197 &ftrace_event_id_fops);
19962198 #endif
19972199
1998
- /*
1999
- * Other events may have the same class. Only update
2000
- * the fields if they are not already defined.
2001
- */
2002
- head = trace_get_fields(call);
2003
- if (list_empty(head)) {
2004
- ret = call->class->define_fields(call);
2005
- if (ret < 0) {
2006
- pr_warn("Could not initialize trace point events/%s\n",
2007
- name);
2008
- return -1;
2009
- }
2200
+ ret = event_define_fields(call);
2201
+ if (ret < 0) {
2202
+ pr_warn("Could not initialize trace point events/%s\n", name);
2203
+ return ret;
20102204 }
20112205
20122206 /*
....@@ -2025,8 +2219,18 @@
20252219 trace_create_file("hist", 0444, file->dir, file,
20262220 &event_hist_fops);
20272221 #endif
2222
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
2223
+ trace_create_file("hist_debug", 0444, file->dir, file,
2224
+ &event_hist_debug_fops);
2225
+#endif
20282226 trace_create_file("format", 0444, file->dir, call,
20292227 &ftrace_event_format_fops);
2228
+
2229
+#ifdef CONFIG_TRACE_EVENT_INJECT
2230
+ if (call->event.type && call->class->reg)
2231
+ trace_create_file("inject", 0200, file->dir, file,
2232
+ &event_inject_fops);
2233
+#endif
20302234
20312235 return 0;
20322236 }
....@@ -2255,6 +2459,7 @@
22552459 trace_create_new_event(struct trace_event_call *call,
22562460 struct trace_array *tr)
22572461 {
2462
+ struct trace_pid_list *no_pid_list;
22582463 struct trace_pid_list *pid_list;
22592464 struct trace_event_file *file;
22602465
....@@ -2264,8 +2469,10 @@
22642469
22652470 pid_list = rcu_dereference_protected(tr->filtered_pids,
22662471 lockdep_is_held(&event_mutex));
2472
+ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
2473
+ lockdep_is_held(&event_mutex));
22672474
2268
- if (pid_list)
2475
+ if (pid_list || no_pid_list)
22692476 file->flags |= EVENT_FILE_FL_PID_FILTER;
22702477
22712478 file->event_call = call;
....@@ -2288,7 +2495,10 @@
22882495 if (!file)
22892496 return -ENOMEM;
22902497
2291
- return event_create_dir(tr->event_dir, file);
2498
+ if (eventdir_initialized)
2499
+ return event_create_dir(tr->event_dir, file);
2500
+ else
2501
+ return event_define_fields(call);
22922502 }
22932503
22942504 /*
....@@ -2296,7 +2506,7 @@
22962506 * for enabling events at boot. We want to enable events before
22972507 * the filesystem is initialized.
22982508 */
2299
-static __init int
2509
+static int
23002510 __trace_early_add_new_event(struct trace_event_call *call,
23012511 struct trace_array *tr)
23022512 {
....@@ -2306,13 +2516,14 @@
23062516 if (!file)
23072517 return -ENOMEM;
23082518
2309
- return 0;
2519
+ return event_define_fields(call);
23102520 }
23112521
23122522 struct ftrace_module_file_ops;
23132523 static void __add_event_to_tracers(struct trace_event_call *call);
23142524
2315
-int trace_add_event_call_nolock(struct trace_event_call *call)
2525
+/* Add an additional event_call dynamically */
2526
+int trace_add_event_call(struct trace_event_call *call)
23162527 {
23172528 int ret;
23182529 lockdep_assert_held(&event_mutex);
....@@ -2324,17 +2535,6 @@
23242535 __add_event_to_tracers(call);
23252536
23262537 mutex_unlock(&trace_types_lock);
2327
- return ret;
2328
-}
2329
-
2330
-/* Add an additional event_call dynamically */
2331
-int trace_add_event_call(struct trace_event_call *call)
2332
-{
2333
- int ret;
2334
-
2335
- mutex_lock(&event_mutex);
2336
- ret = trace_add_event_call_nolock(call);
2337
- mutex_unlock(&event_mutex);
23382538 return ret;
23392539 }
23402540
....@@ -2368,7 +2568,10 @@
23682568 * TRACE_REG_UNREGISTER.
23692569 */
23702570 if (file->flags & EVENT_FILE_FL_ENABLED)
2371
- return -EBUSY;
2571
+ goto busy;
2572
+
2573
+ if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2574
+ tr->clear_trace = true;
23722575 /*
23732576 * The do_for_each_event_file_safe() is
23742577 * a double loop. After finding the call for this
....@@ -2381,10 +2584,16 @@
23812584 __trace_remove_event_call(call);
23822585
23832586 return 0;
2587
+ busy:
2588
+ /* No need to clear the trace now */
2589
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2590
+ tr->clear_trace = false;
2591
+ }
2592
+ return -EBUSY;
23842593 }
23852594
2386
-/* no event_mutex version */
2387
-int trace_remove_event_call_nolock(struct trace_event_call *call)
2595
+/* Remove an event_call */
2596
+int trace_remove_event_call(struct trace_event_call *call)
23882597 {
23892598 int ret;
23902599
....@@ -2395,18 +2604,6 @@
23952604 ret = probe_remove_event_call(call);
23962605 up_write(&trace_event_sem);
23972606 mutex_unlock(&trace_types_lock);
2398
-
2399
- return ret;
2400
-}
2401
-
2402
-/* Remove an event_call */
2403
-int trace_remove_event_call(struct trace_event_call *call)
2404
-{
2405
- int ret;
2406
-
2407
- mutex_lock(&event_mutex);
2408
- ret = trace_remove_event_call_nolock(call);
2409
- mutex_unlock(&event_mutex);
24102607
24112608 return ret;
24122609 }
....@@ -2460,7 +2657,7 @@
24602657 * over from this module may be passed to the new module events and
24612658 * unexpected results may occur.
24622659 */
2463
- tracing_reset_all_online_cpus();
2660
+ tracing_reset_all_online_cpus_unlocked();
24642661 }
24652662
24662663 static int trace_module_notify(struct notifier_block *self,
....@@ -2481,7 +2678,7 @@
24812678 mutex_unlock(&trace_types_lock);
24822679 mutex_unlock(&event_mutex);
24832680
2484
- return 0;
2681
+ return NOTIFY_OK;
24852682 }
24862683
24872684 static struct notifier_block trace_module_nb = {
....@@ -2541,6 +2738,91 @@
25412738
25422739 return file;
25432740 }
2741
+
2742
+/**
2743
+ * trace_get_event_file - Find and return a trace event file
2744
+ * @instance: The name of the trace instance containing the event
2745
+ * @system: The name of the system containing the event
2746
+ * @event: The name of the event
2747
+ *
2748
+ * Return a trace event file given the trace instance name, trace
2749
+ * system, and trace event name. If the instance name is NULL, it
2750
+ * refers to the top-level trace array.
2751
+ *
2752
+ * This function will look it up and return it if found, after calling
2753
+ * trace_array_get() to prevent the instance from going away, and
2754
+ * increment the event's module refcount to prevent it from being
2755
+ * removed.
2756
+ *
2757
+ * To release the file, call trace_put_event_file(), which will call
2758
+ * trace_array_put() and decrement the event's module refcount.
2759
+ *
2760
+ * Return: The trace event on success, ERR_PTR otherwise.
2761
+ */
2762
+struct trace_event_file *trace_get_event_file(const char *instance,
2763
+ const char *system,
2764
+ const char *event)
2765
+{
2766
+ struct trace_array *tr = top_trace_array();
2767
+ struct trace_event_file *file = NULL;
2768
+ int ret = -EINVAL;
2769
+
2770
+ if (instance) {
2771
+ tr = trace_array_find_get(instance);
2772
+ if (!tr)
2773
+ return ERR_PTR(-ENOENT);
2774
+ } else {
2775
+ ret = trace_array_get(tr);
2776
+ if (ret)
2777
+ return ERR_PTR(ret);
2778
+ }
2779
+
2780
+ mutex_lock(&event_mutex);
2781
+
2782
+ file = find_event_file(tr, system, event);
2783
+ if (!file) {
2784
+ trace_array_put(tr);
2785
+ ret = -EINVAL;
2786
+ goto out;
2787
+ }
2788
+
2789
+ /* Don't let event modules unload while in use */
2790
+ ret = try_module_get(file->event_call->mod);
2791
+ if (!ret) {
2792
+ trace_array_put(tr);
2793
+ ret = -EBUSY;
2794
+ goto out;
2795
+ }
2796
+
2797
+ ret = 0;
2798
+ out:
2799
+ mutex_unlock(&event_mutex);
2800
+
2801
+ if (ret)
2802
+ file = ERR_PTR(ret);
2803
+
2804
+ return file;
2805
+}
2806
+EXPORT_SYMBOL_GPL(trace_get_event_file);
2807
+
2808
+/**
2809
+ * trace_put_event_file - Release a file from trace_get_event_file()
2810
+ * @file: The trace event file
2811
+ *
2812
+ * If a file was retrieved using trace_get_event_file(), this should
2813
+ * be called when it's no longer needed. It will cancel the previous
2814
+ * trace_array_get() called by that function, and decrement the
2815
+ * event's module refcount.
2816
+ */
2817
+void trace_put_event_file(struct trace_event_file *file)
2818
+{
2819
+ mutex_lock(&event_mutex);
2820
+ module_put(file->event_call->mod);
2821
+ mutex_unlock(&event_mutex);
2822
+
2823
+ trace_array_put(file->tr);
2824
+}
2825
+EXPORT_SYMBOL_GPL(trace_put_event_file);
25442826
25452827 #ifdef CONFIG_DYNAMIC_FTRACE
25462828
....@@ -2866,14 +3148,13 @@
28663148 #endif /* CONFIG_DYNAMIC_FTRACE */
28673149
28683150 /*
2869
- * The top level array has already had its trace_event_file
2870
- * descriptors created in order to allow for early events to
2871
- * be recorded. This function is called after the tracefs has been
2872
- * initialized, and we now have to create the files associated
2873
- * to the events.
3151
+ * The top level array and trace arrays created by boot-time tracing
3152
+ * have already had its trace_event_file descriptors created in order
3153
+ * to allow for early events to be recorded.
3154
+ * This function is called after the tracefs has been initialized,
3155
+ * and we now have to create the files associated to the events.
28743156 */
2875
-static __init void
2876
-__trace_early_add_event_dirs(struct trace_array *tr)
3157
+static void __trace_early_add_event_dirs(struct trace_array *tr)
28773158 {
28783159 struct trace_event_file *file;
28793160 int ret;
....@@ -2888,13 +3169,12 @@
28883169 }
28893170
28903171 /*
2891
- * For early boot up, the top trace array requires to have
2892
- * a list of events that can be enabled. This must be done before
2893
- * the filesystem is set up in order to allow events to be traced
2894
- * early.
3172
+ * For early boot up, the top trace array and the trace arrays created
3173
+ * by boot-time tracing require to have a list of events that can be
3174
+ * enabled. This must be done before the filesystem is set up in order
3175
+ * to allow events to be traced early.
28953176 */
2896
-static __init void
2897
-__trace_early_add_events(struct trace_array *tr)
3177
+void __trace_early_add_events(struct trace_array *tr)
28983178 {
28993179 struct trace_event_call *call;
29003180 int ret;
....@@ -2938,7 +3218,7 @@
29383218 {
29393219 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
29403220 ring_buffer_expanded = true;
2941
- tracing_selftest_disabled = true;
3221
+ disable_tracing_selftest("running event tracing");
29423222
29433223 return 1;
29443224 }
....@@ -2977,6 +3257,11 @@
29773257 tr, &ftrace_set_event_pid_fops);
29783258 if (!entry)
29793259 pr_warn("Could not create tracefs 'set_event_pid' entry\n");
3260
+
3261
+ entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
3262
+ tr, &ftrace_set_event_notrace_pid_fops);
3263
+ if (!entry)
3264
+ pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
29803265
29813266 /* ring buffer internal formats */
29823267 entry = trace_create_file("header_page", 0444, d_events,
....@@ -3020,7 +3305,11 @@
30203305 goto out;
30213306
30223307 down_write(&trace_event_sem);
3023
- __trace_add_event_dirs(tr);
3308
+ /* If tr already has the event list, it is initialized in early boot. */
3309
+ if (unlikely(!list_empty(&tr->events)))
3310
+ __trace_early_add_event_dirs(tr);
3311
+ else
3312
+ __trace_add_event_dirs(tr);
30243313 up_write(&trace_event_sem);
30253314
30263315 out:
....@@ -3061,7 +3350,7 @@
30613350 clear_event_triggers(tr);
30623351
30633352 /* Clear the pid list */
3064
- __ftrace_clear_event_pids(tr);
3353
+ __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
30653354
30663355 /* Disable any running events */
30673356 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
....@@ -3071,7 +3360,7 @@
30713360
30723361 down_write(&trace_event_sem);
30733362 __trace_remove_event_dirs(tr);
3074
- tracefs_remove_recursive(tr->event_dir);
3363
+ tracefs_remove(tr->event_dir);
30753364 up_write(&trace_event_sem);
30763365
30773366 tr->event_dir = NULL;
....@@ -3176,10 +3465,21 @@
31763465
31773466 early_initcall(event_trace_enable_again);
31783467
3468
+/* Init fields which doesn't related to the tracefs */
3469
+static __init int event_trace_init_fields(void)
3470
+{
3471
+ if (trace_define_generic_fields())
3472
+ pr_warn("tracing: Failed to allocated generic fields");
3473
+
3474
+ if (trace_define_common_fields())
3475
+ pr_warn("tracing: Failed to allocate common fields");
3476
+
3477
+ return 0;
3478
+}
3479
+
31793480 __init int event_trace_init(void)
31803481 {
31813482 struct trace_array *tr;
3182
- struct dentry *d_tracer;
31833483 struct dentry *entry;
31843484 int ret;
31853485
....@@ -3187,22 +3487,12 @@
31873487 if (!tr)
31883488 return -ENODEV;
31893489
3190
- d_tracer = tracing_init_dentry();
3191
- if (IS_ERR(d_tracer))
3192
- return 0;
3193
-
3194
- entry = tracefs_create_file("available_events", 0444, d_tracer,
3490
+ entry = tracefs_create_file("available_events", 0444, NULL,
31953491 tr, &ftrace_avail_fops);
31963492 if (!entry)
31973493 pr_warn("Could not create tracefs 'available_events' entry\n");
31983494
3199
- if (trace_define_generic_fields())
3200
- pr_warn("tracing: Failed to allocated generic fields");
3201
-
3202
- if (trace_define_common_fields())
3203
- pr_warn("tracing: Failed to allocate common fields");
3204
-
3205
- ret = early_event_add_tracer(d_tracer, tr);
3495
+ ret = early_event_add_tracer(NULL, tr);
32063496 if (ret)
32073497 return ret;
32083498
....@@ -3211,6 +3501,9 @@
32113501 if (ret)
32123502 pr_warn("Failed to register trace events module notifier\n");
32133503 #endif
3504
+
3505
+ eventdir_initialized = true;
3506
+
32143507 return 0;
32153508 }
32163509
....@@ -3219,9 +3512,10 @@
32193512 event_trace_memsetup();
32203513 init_ftrace_syscalls();
32213514 event_trace_enable();
3515
+ event_trace_init_fields();
32223516 }
32233517
3224
-#ifdef CONFIG_FTRACE_STARTUP_TEST
3518
+#ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
32253519
32263520 static DEFINE_SPINLOCK(test_spinlock);
32273521 static DEFINE_SPINLOCK(test_spinlock_irq);
....@@ -3398,8 +3692,8 @@
33983692 function_test_events_call(unsigned long ip, unsigned long parent_ip,
33993693 struct ftrace_ops *op, struct pt_regs *pt_regs)
34003694 {
3695
+ struct trace_buffer *buffer;
34013696 struct ring_buffer_event *event;
3402
- struct ring_buffer *buffer;
34033697 struct ftrace_entry *entry;
34043698 unsigned long flags;
34053699 long disabled;