hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/kernel/trace/trace.c
....@@ -177,7 +177,7 @@
177177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178178 static void ftrace_trace_userstack(struct trace_array *tr,
179179 struct trace_buffer *buffer,
180
- unsigned int trace_ctx);
180
+ unsigned long flags, int pc);
181181
182182 #define MAX_TRACER_SIZE 100
183183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
....@@ -910,23 +910,23 @@
910910
911911 #ifdef CONFIG_STACKTRACE
912912 static void __ftrace_trace_stack(struct trace_buffer *buffer,
913
- unsigned int trace_ctx,
914
- int skip, struct pt_regs *regs);
913
+ unsigned long flags,
914
+ int skip, int pc, struct pt_regs *regs);
915915 static inline void ftrace_trace_stack(struct trace_array *tr,
916916 struct trace_buffer *buffer,
917
- unsigned int trace_ctx,
918
- int skip, struct pt_regs *regs);
917
+ unsigned long flags,
918
+ int skip, int pc, struct pt_regs *regs);
919919
920920 #else
921921 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
922
- unsigned int trace_ctx,
923
- int skip, struct pt_regs *regs)
922
+ unsigned long flags,
923
+ int skip, int pc, struct pt_regs *regs)
924924 {
925925 }
926926 static inline void ftrace_trace_stack(struct trace_array *tr,
927927 struct trace_buffer *buffer,
928
- unsigned long trace_ctx,
929
- int skip, struct pt_regs *regs)
928
+ unsigned long flags,
929
+ int skip, int pc, struct pt_regs *regs)
930930 {
931931 }
932932
....@@ -934,24 +934,24 @@
934934
935935 static __always_inline void
936936 trace_event_setup(struct ring_buffer_event *event,
937
- int type, unsigned int trace_ctx)
937
+ int type, unsigned long flags, int pc)
938938 {
939939 struct trace_entry *ent = ring_buffer_event_data(event);
940940
941
- tracing_generic_entry_update(ent, type, trace_ctx);
941
+ tracing_generic_entry_update(ent, type, flags, pc);
942942 }
943943
944944 static __always_inline struct ring_buffer_event *
945945 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
946946 int type,
947947 unsigned long len,
948
- unsigned int trace_ctx)
948
+ unsigned long flags, int pc)
949949 {
950950 struct ring_buffer_event *event;
951951
952952 event = ring_buffer_lock_reserve(buffer, len);
953953 if (event != NULL)
954
- trace_event_setup(event, type, trace_ctx);
954
+ trace_event_setup(event, type, flags, pc);
955955
956956 return event;
957957 }
....@@ -1012,22 +1012,25 @@
10121012 struct ring_buffer_event *event;
10131013 struct trace_buffer *buffer;
10141014 struct print_entry *entry;
1015
- unsigned int trace_ctx;
1015
+ unsigned long irq_flags;
10161016 int alloc;
1017
+ int pc;
10171018
10181019 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
10191020 return 0;
1021
+
1022
+ pc = preempt_count();
10201023
10211024 if (unlikely(tracing_selftest_running || tracing_disabled))
10221025 return 0;
10231026
10241027 alloc = sizeof(*entry) + size + 2; /* possible \n added */
10251028
1026
- trace_ctx = tracing_gen_ctx();
1029
+ local_save_flags(irq_flags);
10271030 buffer = global_trace.array_buffer.buffer;
10281031 ring_buffer_nest_start(buffer);
1029
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1030
- trace_ctx);
1032
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1033
+ irq_flags, pc);
10311034 if (!event) {
10321035 size = 0;
10331036 goto out;
....@@ -1046,7 +1049,7 @@
10461049 entry->buf[size] = '\0';
10471050
10481051 __buffer_unlock_commit(buffer, event);
1049
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1052
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
10501053 out:
10511054 ring_buffer_nest_end(buffer);
10521055 return size;
....@@ -1063,22 +1066,25 @@
10631066 struct ring_buffer_event *event;
10641067 struct trace_buffer *buffer;
10651068 struct bputs_entry *entry;
1066
- unsigned int trace_ctx;
1069
+ unsigned long irq_flags;
10671070 int size = sizeof(struct bputs_entry);
10681071 int ret = 0;
1072
+ int pc;
10691073
10701074 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
10711075 return 0;
10721076
1077
+ pc = preempt_count();
1078
+
10731079 if (unlikely(tracing_selftest_running || tracing_disabled))
10741080 return 0;
10751081
1076
- trace_ctx = tracing_gen_ctx();
1082
+ local_save_flags(irq_flags);
10771083 buffer = global_trace.array_buffer.buffer;
10781084
10791085 ring_buffer_nest_start(buffer);
10801086 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1081
- trace_ctx);
1087
+ irq_flags, pc);
10821088 if (!event)
10831089 goto out;
10841090
....@@ -1087,7 +1093,7 @@
10871093 entry->str = str;
10881094
10891095 __buffer_unlock_commit(buffer, event);
1090
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1096
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
10911097
10921098 ret = 1;
10931099 out:
....@@ -1877,9 +1883,10 @@
18771883 * place on this CPU. We fail to record, but we reset
18781884 * the max trace buffer (no one writes directly to it)
18791885 * and flag that it failed.
1886
+ * Another reason is resize is in progress.
18801887 */
18811888 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1882
- "Failed to swap buffers due to commit in progress\n");
1889
+ "Failed to swap buffers due to commit or resize in progress\n");
18831890 }
18841891
18851892 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
....@@ -2173,9 +2180,11 @@
21732180 }
21742181
21752182 /* Must have trace_types_lock held */
2176
-void tracing_reset_all_online_cpus(void)
2183
+void tracing_reset_all_online_cpus_unlocked(void)
21772184 {
21782185 struct trace_array *tr;
2186
+
2187
+ lockdep_assert_held(&trace_types_lock);
21792188
21802189 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
21812190 if (!tr->clear_trace)
....@@ -2186,6 +2195,13 @@
21862195 tracing_reset_online_cpus(&tr->max_buffer);
21872196 #endif
21882197 }
2198
+}
2199
+
2200
+void tracing_reset_all_online_cpus(void)
2201
+{
2202
+ mutex_lock(&trace_types_lock);
2203
+ tracing_reset_all_online_cpus_unlocked();
2204
+ mutex_unlock(&trace_types_lock);
21892205 }
21902206
21912207 /*
....@@ -2608,52 +2624,36 @@
26082624 }
26092625 EXPORT_SYMBOL_GPL(trace_handle_return);
26102626
2611
-static unsigned short migration_disable_value(void)
2627
+void
2628
+tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2629
+ unsigned long flags, int pc)
26122630 {
2613
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
2614
- return current->migration_disabled;
2631
+ struct task_struct *tsk = current;
2632
+
2633
+ entry->preempt_count = pc & 0xff;
2634
+ entry->pid = (tsk) ? tsk->pid : 0;
2635
+ entry->type = type;
2636
+ entry->flags =
2637
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2638
+ (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
26152639 #else
2616
- return 0;
2640
+ TRACE_FLAG_IRQS_NOSUPPORT |
26172641 #endif
2642
+ ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2643
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2644
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2645
+ (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2646
+ (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
26182647 }
2619
-
2620
-unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2621
-{
2622
- unsigned int trace_flags = irqs_status;
2623
- unsigned int pc;
2624
-
2625
- pc = preempt_count();
2626
-
2627
- if (pc & NMI_MASK)
2628
- trace_flags |= TRACE_FLAG_NMI;
2629
- if (pc & HARDIRQ_MASK)
2630
- trace_flags |= TRACE_FLAG_HARDIRQ;
2631
- if (in_serving_softirq())
2632
- trace_flags |= TRACE_FLAG_SOFTIRQ;
2633
-
2634
- if (tif_need_resched())
2635
- trace_flags |= TRACE_FLAG_NEED_RESCHED;
2636
- if (test_preempt_need_resched())
2637
- trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2638
-
2639
-#ifdef CONFIG_PREEMPT_LAZY
2640
- if (need_resched_lazy())
2641
- trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2642
-#endif
2643
-
2644
- return (pc & 0xff) |
2645
- (migration_disable_value() & 0xff) << 8 |
2646
- (preempt_lazy_count() & 0xff) << 16 |
2647
- (trace_flags << 24);
2648
-}
2648
+EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
26492649
26502650 struct ring_buffer_event *
26512651 trace_buffer_lock_reserve(struct trace_buffer *buffer,
26522652 int type,
26532653 unsigned long len,
2654
- unsigned int trace_ctx)
2654
+ unsigned long flags, int pc)
26552655 {
2656
- return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2656
+ return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
26572657 }
26582658
26592659 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
....@@ -2773,7 +2773,7 @@
27732773 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
27742774 struct trace_event_file *trace_file,
27752775 int type, unsigned long len,
2776
- unsigned int trace_ctx)
2776
+ unsigned long flags, int pc)
27772777 {
27782778 struct ring_buffer_event *entry;
27792779 int val;
....@@ -2786,7 +2786,7 @@
27862786 /* Try to use the per cpu buffer first */
27872787 val = this_cpu_inc_return(trace_buffered_event_cnt);
27882788 if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
2789
- trace_event_setup(entry, type, trace_ctx);
2789
+ trace_event_setup(entry, type, flags, pc);
27902790 entry->array[0] = len;
27912791 return entry;
27922792 }
....@@ -2794,7 +2794,7 @@
27942794 }
27952795
27962796 entry = __trace_buffer_lock_reserve(*current_rb,
2797
- type, len, trace_ctx);
2797
+ type, len, flags, pc);
27982798 /*
27992799 * If tracing is off, but we have triggers enabled
28002800 * we still need to look at the event data. Use the temp_buffer
....@@ -2803,8 +2803,8 @@
28032803 */
28042804 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
28052805 *current_rb = temp_buffer;
2806
- entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2807
- trace_ctx);
2806
+ entry = __trace_buffer_lock_reserve(*current_rb,
2807
+ type, len, flags, pc);
28082808 }
28092809 return entry;
28102810 }
....@@ -2890,7 +2890,7 @@
28902890 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
28912891 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
28922892 fbuffer->event, fbuffer->entry,
2893
- fbuffer->trace_ctx, fbuffer->regs);
2893
+ fbuffer->flags, fbuffer->pc, fbuffer->regs);
28942894 }
28952895 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
28962896
....@@ -2906,7 +2906,7 @@
29062906 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
29072907 struct trace_buffer *buffer,
29082908 struct ring_buffer_event *event,
2909
- unsigned int trace_ctx,
2909
+ unsigned long flags, int pc,
29102910 struct pt_regs *regs)
29112911 {
29122912 __buffer_unlock_commit(buffer, event);
....@@ -2917,8 +2917,8 @@
29172917 * and mmiotrace, but that's ok if they lose a function or
29182918 * two. They are not that meaningful.
29192919 */
2920
- ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2921
- ftrace_trace_userstack(tr, buffer, trace_ctx);
2920
+ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2921
+ ftrace_trace_userstack(tr, buffer, flags, pc);
29222922 }
29232923
29242924 /*
....@@ -2932,8 +2932,9 @@
29322932 }
29332933
29342934 void
2935
-trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2936
- parent_ip, unsigned int trace_ctx)
2935
+trace_function(struct trace_array *tr,
2936
+ unsigned long ip, unsigned long parent_ip, unsigned long flags,
2937
+ int pc)
29372938 {
29382939 struct trace_event_call *call = &event_function;
29392940 struct trace_buffer *buffer = tr->array_buffer.buffer;
....@@ -2941,7 +2942,7 @@
29412942 struct ftrace_entry *entry;
29422943
29432944 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2944
- trace_ctx);
2945
+ flags, pc);
29452946 if (!event)
29462947 return;
29472948 entry = ring_buffer_event_data(event);
....@@ -2975,8 +2976,8 @@
29752976 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
29762977
29772978 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2978
- unsigned int trace_ctx,
2979
- int skip, struct pt_regs *regs)
2979
+ unsigned long flags,
2980
+ int skip, int pc, struct pt_regs *regs)
29802981 {
29812982 struct trace_event_call *call = &event_kernel_stack;
29822983 struct ring_buffer_event *event;
....@@ -3024,7 +3025,7 @@
30243025 size = nr_entries * sizeof(unsigned long);
30253026 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
30263027 (sizeof(*entry) - sizeof(entry->caller)) + size,
3027
- trace_ctx);
3028
+ flags, pc);
30283029 if (!event)
30293030 goto out;
30303031 entry = ring_buffer_event_data(event);
....@@ -3045,22 +3046,22 @@
30453046
30463047 static inline void ftrace_trace_stack(struct trace_array *tr,
30473048 struct trace_buffer *buffer,
3048
- unsigned int trace_ctx,
3049
- int skip, struct pt_regs *regs)
3049
+ unsigned long flags,
3050
+ int skip, int pc, struct pt_regs *regs)
30503051 {
30513052 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
30523053 return;
30533054
3054
- __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3055
+ __ftrace_trace_stack(buffer, flags, skip, pc, regs);
30553056 }
30563057
3057
-void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3058
- int skip)
3058
+void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3059
+ int pc)
30593060 {
30603061 struct trace_buffer *buffer = tr->array_buffer.buffer;
30613062
30623063 if (rcu_is_watching()) {
3063
- __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3064
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
30643065 return;
30653066 }
30663067
....@@ -3074,7 +3075,7 @@
30743075 return;
30753076
30763077 rcu_irq_enter_irqson();
3077
- __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3078
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
30783079 rcu_irq_exit_irqson();
30793080 }
30803081
....@@ -3084,15 +3085,19 @@
30843085 */
30853086 void trace_dump_stack(int skip)
30863087 {
3088
+ unsigned long flags;
3089
+
30873090 if (tracing_disabled || tracing_selftest_running)
30883091 return;
3092
+
3093
+ local_save_flags(flags);
30893094
30903095 #ifndef CONFIG_UNWINDER_ORC
30913096 /* Skip 1 to skip this function. */
30923097 skip++;
30933098 #endif
30943099 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3095
- tracing_gen_ctx(), skip, NULL);
3100
+ flags, skip, preempt_count(), NULL);
30963101 }
30973102 EXPORT_SYMBOL_GPL(trace_dump_stack);
30983103
....@@ -3101,7 +3106,7 @@
31013106
31023107 static void
31033108 ftrace_trace_userstack(struct trace_array *tr,
3104
- struct trace_buffer *buffer, unsigned int trace_ctx)
3109
+ struct trace_buffer *buffer, unsigned long flags, int pc)
31053110 {
31063111 struct trace_event_call *call = &event_user_stack;
31073112 struct ring_buffer_event *event;
....@@ -3128,7 +3133,7 @@
31283133 __this_cpu_inc(user_stack_count);
31293134
31303135 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3131
- sizeof(*entry), trace_ctx);
3136
+ sizeof(*entry), flags, pc);
31323137 if (!event)
31333138 goto out_drop_count;
31343139 entry = ring_buffer_event_data(event);
....@@ -3148,7 +3153,7 @@
31483153 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
31493154 static void ftrace_trace_userstack(struct trace_array *tr,
31503155 struct trace_buffer *buffer,
3151
- unsigned int trace_ctx)
3156
+ unsigned long flags, int pc)
31523157 {
31533158 }
31543159 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
....@@ -3278,9 +3283,9 @@
32783283 struct trace_buffer *buffer;
32793284 struct trace_array *tr = &global_trace;
32803285 struct bprint_entry *entry;
3281
- unsigned int trace_ctx;
3286
+ unsigned long flags;
32823287 char *tbuffer;
3283
- int len = 0, size;
3288
+ int len = 0, size, pc;
32843289
32853290 if (unlikely(tracing_selftest_running || tracing_disabled))
32863291 return 0;
....@@ -3288,7 +3293,7 @@
32883293 /* Don't pollute graph traces with trace_vprintk internals */
32893294 pause_graph_tracing();
32903295
3291
- trace_ctx = tracing_gen_ctx();
3296
+ pc = preempt_count();
32923297 preempt_disable_notrace();
32933298
32943299 tbuffer = get_trace_buf();
....@@ -3302,11 +3307,12 @@
33023307 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
33033308 goto out_put;
33043309
3310
+ local_save_flags(flags);
33053311 size = sizeof(*entry) + sizeof(u32) * len;
33063312 buffer = tr->array_buffer.buffer;
33073313 ring_buffer_nest_start(buffer);
33083314 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3309
- trace_ctx);
3315
+ flags, pc);
33103316 if (!event)
33113317 goto out;
33123318 entry = ring_buffer_event_data(event);
....@@ -3316,7 +3322,7 @@
33163322 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
33173323 if (!call_filter_check_discard(call, entry, buffer, event)) {
33183324 __buffer_unlock_commit(buffer, event);
3319
- ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3325
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
33203326 }
33213327
33223328 out:
....@@ -3339,9 +3345,9 @@
33393345 {
33403346 struct trace_event_call *call = &event_print;
33413347 struct ring_buffer_event *event;
3342
- int len = 0, size;
3348
+ int len = 0, size, pc;
33433349 struct print_entry *entry;
3344
- unsigned int trace_ctx;
3350
+ unsigned long flags;
33453351 char *tbuffer;
33463352
33473353 if (tracing_disabled || tracing_selftest_running)
....@@ -3350,7 +3356,7 @@
33503356 /* Don't pollute graph traces with trace_vprintk internals */
33513357 pause_graph_tracing();
33523358
3353
- trace_ctx = tracing_gen_ctx();
3359
+ pc = preempt_count();
33543360 preempt_disable_notrace();
33553361
33563362
....@@ -3362,10 +3368,11 @@
33623368
33633369 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
33643370
3371
+ local_save_flags(flags);
33653372 size = sizeof(*entry) + len + 1;
33663373 ring_buffer_nest_start(buffer);
33673374 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3368
- trace_ctx);
3375
+ flags, pc);
33693376 if (!event)
33703377 goto out;
33713378 entry = ring_buffer_event_data(event);
....@@ -3374,7 +3381,7 @@
33743381 memcpy(&entry->buf, tbuffer, len + 1);
33753382 if (!call_filter_check_discard(call, entry, buffer, event)) {
33763383 __buffer_unlock_commit(buffer, event);
3377
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3384
+ ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
33783385 }
33793386
33803387 out:
....@@ -3720,8 +3727,15 @@
37203727 * will point to the same string as current_trace->name.
37213728 */
37223729 mutex_lock(&trace_types_lock);
3723
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3730
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
3731
+ /* Close iter->trace before switching to the new current tracer */
3732
+ if (iter->trace->close)
3733
+ iter->trace->close(iter);
37243734 *iter->trace = *tr->current_trace;
3735
+ /* Reopen the new current tracer */
3736
+ if (iter->trace->open)
3737
+ iter->trace->open(iter);
3738
+ }
37253739 mutex_unlock(&trace_types_lock);
37263740
37273741 #ifdef CONFIG_TRACER_MAX_TRACE
....@@ -3840,17 +3854,14 @@
38403854
38413855 static void print_lat_help_header(struct seq_file *m)
38423856 {
3843
- seq_puts(m, "# _--------=> CPU# \n"
3844
- "# / _-------=> irqs-off \n"
3845
- "# | / _------=> need-resched \n"
3846
- "# || / _-----=> need-resched-lazy\n"
3847
- "# ||| / _----=> hardirq/softirq \n"
3848
- "# |||| / _---=> preempt-depth \n"
3849
- "# ||||| / _--=> preempt-lazy-depth\n"
3850
- "# |||||| / _-=> migrate-disable \n"
3851
- "# ||||||| / delay \n"
3852
- "# cmd pid |||||||| time | caller \n"
3853
- "# \\ / |||||||| \\ | / \n");
3857
+ seq_puts(m, "# _------=> CPU# \n"
3858
+ "# / _-----=> irqs-off \n"
3859
+ "# | / _----=> need-resched \n"
3860
+ "# || / _---=> hardirq/softirq \n"
3861
+ "# ||| / _--=> preempt-depth \n"
3862
+ "# |||| / delay \n"
3863
+ "# cmd pid ||||| time | caller \n"
3864
+ "# \\ / ||||| \\ | / \n");
38543865 }
38553866
38563867 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
....@@ -3884,16 +3895,13 @@
38843895
38853896 print_event_info(buf, m);
38863897
3887
- seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space);
3888
- seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
3889
- seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
3890
- seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
3891
- seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
3892
- seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
3893
- seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
3894
- seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
3895
- seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3896
- seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
3898
+ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3899
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3900
+ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3901
+ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3902
+ seq_printf(m, "# %.*s||| / delay\n", prec, space);
3903
+ seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3904
+ seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
38973905 }
38983906
38993907 void
....@@ -4486,6 +4494,33 @@
44864494 return 0;
44874495 }
44884496
4497
+/*
4498
+ * The private pointer of the inode is the trace_event_file.
4499
+ * Update the tr ref count associated to it.
4500
+ */
4501
+int tracing_open_file_tr(struct inode *inode, struct file *filp)
4502
+{
4503
+ struct trace_event_file *file = inode->i_private;
4504
+ int ret;
4505
+
4506
+ ret = tracing_check_open_get_tr(file->tr);
4507
+ if (ret)
4508
+ return ret;
4509
+
4510
+ filp->private_data = inode->i_private;
4511
+
4512
+ return 0;
4513
+}
4514
+
4515
+int tracing_release_file_tr(struct inode *inode, struct file *filp)
4516
+{
4517
+ struct trace_event_file *file = inode->i_private;
4518
+
4519
+ trace_array_put(file->tr);
4520
+
4521
+ return 0;
4522
+}
4523
+
44894524 static int tracing_release(struct inode *inode, struct file *file)
44904525 {
44914526 struct trace_array *tr = inode->i_private;
....@@ -4715,6 +4750,8 @@
47154750 static const struct file_operations tracing_fops = {
47164751 .open = tracing_open,
47174752 .read = seq_read,
4753
+ .read_iter = seq_read_iter,
4754
+ .splice_read = generic_file_splice_read,
47184755 .write = tracing_write_stub,
47194756 .llseek = tracing_lseek,
47204757 .release = tracing_release,
....@@ -4774,11 +4811,17 @@
47744811 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
47754812 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
47764813 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4814
+#ifdef CONFIG_TRACER_MAX_TRACE
4815
+ ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
4816
+#endif
47774817 }
47784818 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
47794819 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
47804820 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
47814821 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4822
+#ifdef CONFIG_TRACER_MAX_TRACE
4823
+ ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
4824
+#endif
47824825 }
47834826 }
47844827 arch_spin_unlock(&tr->max_lock);
....@@ -6249,6 +6292,7 @@
62496292 mutex_unlock(&trace_types_lock);
62506293
62516294 free_cpumask_var(iter->started);
6295
+ kfree(iter->temp);
62526296 mutex_destroy(&iter->mutex);
62536297 kfree(iter);
62546298
....@@ -6381,7 +6425,20 @@
63816425
63826426 ret = print_trace_line(iter);
63836427 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6384
- /* don't print partial lines */
6428
+ /*
6429
+ * If one print_trace_line() fills entire trace_seq in one shot,
6430
+ * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6431
+ * In this case, we need to consume it, otherwise, loop will peek
6432
+ * this event next time, resulting in an infinite loop.
6433
+ */
6434
+ if (save_len == 0) {
6435
+ iter->seq.full = 0;
6436
+ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6437
+ trace_consume(iter);
6438
+ break;
6439
+ }
6440
+
6441
+ /* In other cases, don't print partial lines */
63856442 iter->seq.seq.len = save_len;
63866443 break;
63876444 }
....@@ -6698,6 +6755,7 @@
66986755 enum event_trigger_type tt = ETT_NONE;
66996756 struct trace_buffer *buffer;
67006757 struct print_entry *entry;
6758
+ unsigned long irq_flags;
67016759 ssize_t written;
67026760 int size;
67036761 int len;
....@@ -6717,6 +6775,7 @@
67176775
67186776 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
67196777
6778
+ local_save_flags(irq_flags);
67206779 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
67216780
67226781 /* If less than "<faulted>", then make sure we can still add that */
....@@ -6725,7 +6784,7 @@
67256784
67266785 buffer = tr->array_buffer.buffer;
67276786 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6728
- tracing_gen_ctx());
6787
+ irq_flags, preempt_count());
67296788 if (unlikely(!event))
67306789 /* Ring buffer disabled, return as if not open for write */
67316790 return -EBADF;
....@@ -6777,6 +6836,7 @@
67776836 struct ring_buffer_event *event;
67786837 struct trace_buffer *buffer;
67796838 struct raw_data_entry *entry;
6839
+ unsigned long irq_flags;
67806840 ssize_t written;
67816841 int size;
67826842 int len;
....@@ -6798,13 +6858,14 @@
67986858
67996859 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
68006860
6861
+ local_save_flags(irq_flags);
68016862 size = sizeof(*entry) + cnt;
68026863 if (cnt < FAULT_SIZE_ID)
68036864 size += FAULT_SIZE_ID - cnt;
68046865
68056866 buffer = tr->array_buffer.buffer;
68066867 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6807
- tracing_gen_ctx());
6868
+ irq_flags, preempt_count());
68086869 if (!event)
68096870 /* Ring buffer disabled, return as if not open for write */
68106871 return -EBADF;
....@@ -7032,6 +7093,11 @@
70327093 return ret;
70337094 }
70347095
7096
+static void tracing_swap_cpu_buffer(void *tr)
7097
+{
7098
+ update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7099
+}
7100
+
70357101 static ssize_t
70367102 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
70377103 loff_t *ppos)
....@@ -7090,13 +7156,15 @@
70907156 ret = tracing_alloc_snapshot_instance(tr);
70917157 if (ret < 0)
70927158 break;
7093
- local_irq_disable();
70947159 /* Now, we're going to swap */
7095
- if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7160
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7161
+ local_irq_disable();
70967162 update_max_tr(tr, current, smp_processor_id(), NULL);
7097
- else
7098
- update_max_tr_single(tr, current, iter->cpu_file);
7099
- local_irq_enable();
7163
+ local_irq_enable();
7164
+ } else {
7165
+ smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7166
+ (void *)tr, 1);
7167
+ }
71007168 break;
71017169 default:
71027170 if (tr->allocated_snapshot) {
....@@ -7185,10 +7253,11 @@
71857253 #endif
71867254
71877255 static const struct file_operations set_tracer_fops = {
7188
- .open = tracing_open_generic,
7256
+ .open = tracing_open_generic_tr,
71897257 .read = tracing_set_trace_read,
71907258 .write = tracing_set_trace_write,
71917259 .llseek = generic_file_llseek,
7260
+ .release = tracing_release_generic_tr,
71927261 };
71937262
71947263 static const struct file_operations tracing_pipe_fops = {
....@@ -7511,7 +7580,7 @@
75117580 .open = tracing_err_log_open,
75127581 .write = tracing_err_log_write,
75137582 .read = seq_read,
7514
- .llseek = seq_lseek,
7583
+ .llseek = tracing_lseek,
75157584 .release = tracing_err_log_release,
75167585 };
75177586
....@@ -8227,12 +8296,33 @@
82278296 return cnt;
82288297 }
82298298
8299
+static int tracing_open_options(struct inode *inode, struct file *filp)
8300
+{
8301
+ struct trace_option_dentry *topt = inode->i_private;
8302
+ int ret;
8303
+
8304
+ ret = tracing_check_open_get_tr(topt->tr);
8305
+ if (ret)
8306
+ return ret;
8307
+
8308
+ filp->private_data = inode->i_private;
8309
+ return 0;
8310
+}
8311
+
8312
+static int tracing_release_options(struct inode *inode, struct file *file)
8313
+{
8314
+ struct trace_option_dentry *topt = file->private_data;
8315
+
8316
+ trace_array_put(topt->tr);
8317
+ return 0;
8318
+}
82308319
82318320 static const struct file_operations trace_options_fops = {
8232
- .open = tracing_open_generic,
8321
+ .open = tracing_open_options,
82338322 .read = trace_options_read,
82348323 .write = trace_options_write,
82358324 .llseek = generic_file_llseek,
8325
+ .release = tracing_release_options,
82368326 };
82378327
82388328 /*
....@@ -8562,9 +8652,6 @@
85628652 if (val > 100)
85638653 return -EINVAL;
85648654
8565
- if (!val)
8566
- val = 1;
8567
-
85688655 tr->buffer_percent = val;
85698656
85708657 (*ppos)++;
....@@ -8889,6 +8976,7 @@
88898976 ftrace_destroy_function_files(tr);
88908977 tracefs_remove(tr->dir);
88918978 free_trace_buffers(tr);
8979
+ clear_tracing_err_log(tr);
88928980
88938981 for (i = 0; i < tr->nr_topts; i++) {
88948982 kfree(tr->topts[i].topts);
....@@ -9391,6 +9479,7 @@
93919479 tracing_off();
93929480
93939481 local_irq_save(flags);
9482
+ printk_nmi_direct_enter();
93949483
93959484 /* Simulate the iterator */
93969485 trace_init_global_iter(&iter);
....@@ -9478,6 +9567,7 @@
94789567 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
94799568 }
94809569 atomic_dec(&dump_running);
9570
+ printk_nmi_direct_exit();
94819571 local_irq_restore(flags);
94829572 }
94839573 EXPORT_SYMBOL_GPL(ftrace_dump);
....@@ -9709,6 +9799,8 @@
97099799 static_key_enable(&tracepoint_printk_key.key);
97109800 }
97119801 tracer_alloc_buffers();
9802
+
9803
+ init_events();
97129804 }
97139805
97149806 void __init trace_init(void)