hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/trace/trace.c
....@@ -177,7 +177,7 @@
177177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178178 static void ftrace_trace_userstack(struct trace_array *tr,
179179 struct trace_buffer *buffer,
180
- unsigned int trace_ctx);
180
+ unsigned long flags, int pc);
181181
182182 #define MAX_TRACER_SIZE 100
183183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
....@@ -910,23 +910,23 @@
910910
911911 #ifdef CONFIG_STACKTRACE
912912 static void __ftrace_trace_stack(struct trace_buffer *buffer,
913
- unsigned int trace_ctx,
914
- int skip, struct pt_regs *regs);
913
+ unsigned long flags,
914
+ int skip, int pc, struct pt_regs *regs);
915915 static inline void ftrace_trace_stack(struct trace_array *tr,
916916 struct trace_buffer *buffer,
917
- unsigned int trace_ctx,
918
- int skip, struct pt_regs *regs);
917
+ unsigned long flags,
918
+ int skip, int pc, struct pt_regs *regs);
919919
920920 #else
921921 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
922
- unsigned int trace_ctx,
923
- int skip, struct pt_regs *regs)
922
+ unsigned long flags,
923
+ int skip, int pc, struct pt_regs *regs)
924924 {
925925 }
926926 static inline void ftrace_trace_stack(struct trace_array *tr,
927927 struct trace_buffer *buffer,
928
- unsigned long trace_ctx,
929
- int skip, struct pt_regs *regs)
928
+ unsigned long flags,
929
+ int skip, int pc, struct pt_regs *regs)
930930 {
931931 }
932932
....@@ -934,24 +934,24 @@
934934
935935 static __always_inline void
936936 trace_event_setup(struct ring_buffer_event *event,
937
- int type, unsigned int trace_ctx)
937
+ int type, unsigned long flags, int pc)
938938 {
939939 struct trace_entry *ent = ring_buffer_event_data(event);
940940
941
- tracing_generic_entry_update(ent, type, trace_ctx);
941
+ tracing_generic_entry_update(ent, type, flags, pc);
942942 }
943943
944944 static __always_inline struct ring_buffer_event *
945945 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
946946 int type,
947947 unsigned long len,
948
- unsigned int trace_ctx)
948
+ unsigned long flags, int pc)
949949 {
950950 struct ring_buffer_event *event;
951951
952952 event = ring_buffer_lock_reserve(buffer, len);
953953 if (event != NULL)
954
- trace_event_setup(event, type, trace_ctx);
954
+ trace_event_setup(event, type, flags, pc);
955955
956956 return event;
957957 }
....@@ -1012,22 +1012,25 @@
10121012 struct ring_buffer_event *event;
10131013 struct trace_buffer *buffer;
10141014 struct print_entry *entry;
1015
- unsigned int trace_ctx;
1015
+ unsigned long irq_flags;
10161016 int alloc;
1017
+ int pc;
10171018
10181019 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
10191020 return 0;
1021
+
1022
+ pc = preempt_count();
10201023
10211024 if (unlikely(tracing_selftest_running || tracing_disabled))
10221025 return 0;
10231026
10241027 alloc = sizeof(*entry) + size + 2; /* possible \n added */
10251028
1026
- trace_ctx = tracing_gen_ctx();
1029
+ local_save_flags(irq_flags);
10271030 buffer = global_trace.array_buffer.buffer;
10281031 ring_buffer_nest_start(buffer);
1029
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1030
- trace_ctx);
1032
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1033
+ irq_flags, pc);
10311034 if (!event) {
10321035 size = 0;
10331036 goto out;
....@@ -1046,7 +1049,7 @@
10461049 entry->buf[size] = '\0';
10471050
10481051 __buffer_unlock_commit(buffer, event);
1049
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1052
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
10501053 out:
10511054 ring_buffer_nest_end(buffer);
10521055 return size;
....@@ -1063,22 +1066,25 @@
10631066 struct ring_buffer_event *event;
10641067 struct trace_buffer *buffer;
10651068 struct bputs_entry *entry;
1066
- unsigned int trace_ctx;
1069
+ unsigned long irq_flags;
10671070 int size = sizeof(struct bputs_entry);
10681071 int ret = 0;
1072
+ int pc;
10691073
10701074 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
10711075 return 0;
10721076
1077
+ pc = preempt_count();
1078
+
10731079 if (unlikely(tracing_selftest_running || tracing_disabled))
10741080 return 0;
10751081
1076
- trace_ctx = tracing_gen_ctx();
1082
+ local_save_flags(irq_flags);
10771083 buffer = global_trace.array_buffer.buffer;
10781084
10791085 ring_buffer_nest_start(buffer);
10801086 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1081
- trace_ctx);
1087
+ irq_flags, pc);
10821088 if (!event)
10831089 goto out;
10841090
....@@ -1087,7 +1093,7 @@
10871093 entry->str = str;
10881094
10891095 __buffer_unlock_commit(buffer, event);
1090
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1096
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
10911097
10921098 ret = 1;
10931099 out:
....@@ -2608,52 +2614,36 @@
26082614 }
26092615 EXPORT_SYMBOL_GPL(trace_handle_return);
26102616
2611
-static unsigned short migration_disable_value(void)
2617
+void
2618
+tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2619
+ unsigned long flags, int pc)
26122620 {
2613
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
2614
- return current->migration_disabled;
2621
+ struct task_struct *tsk = current;
2622
+
2623
+ entry->preempt_count = pc & 0xff;
2624
+ entry->pid = (tsk) ? tsk->pid : 0;
2625
+ entry->type = type;
2626
+ entry->flags =
2627
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2628
+ (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
26152629 #else
2616
- return 0;
2630
+ TRACE_FLAG_IRQS_NOSUPPORT |
26172631 #endif
2632
+ ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2633
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2634
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2635
+ (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2636
+ (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
26182637 }
2619
-
2620
-unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2621
-{
2622
- unsigned int trace_flags = irqs_status;
2623
- unsigned int pc;
2624
-
2625
- pc = preempt_count();
2626
-
2627
- if (pc & NMI_MASK)
2628
- trace_flags |= TRACE_FLAG_NMI;
2629
- if (pc & HARDIRQ_MASK)
2630
- trace_flags |= TRACE_FLAG_HARDIRQ;
2631
- if (in_serving_softirq())
2632
- trace_flags |= TRACE_FLAG_SOFTIRQ;
2633
-
2634
- if (tif_need_resched())
2635
- trace_flags |= TRACE_FLAG_NEED_RESCHED;
2636
- if (test_preempt_need_resched())
2637
- trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2638
-
2639
-#ifdef CONFIG_PREEMPT_LAZY
2640
- if (need_resched_lazy())
2641
- trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2642
-#endif
2643
-
2644
- return (pc & 0xff) |
2645
- (migration_disable_value() & 0xff) << 8 |
2646
- (preempt_lazy_count() & 0xff) << 16 |
2647
- (trace_flags << 24);
2648
-}
2638
+EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
26492639
26502640 struct ring_buffer_event *
26512641 trace_buffer_lock_reserve(struct trace_buffer *buffer,
26522642 int type,
26532643 unsigned long len,
2654
- unsigned int trace_ctx)
2644
+ unsigned long flags, int pc)
26552645 {
2656
- return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2646
+ return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
26572647 }
26582648
26592649 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
....@@ -2773,7 +2763,7 @@
27732763 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
27742764 struct trace_event_file *trace_file,
27752765 int type, unsigned long len,
2776
- unsigned int trace_ctx)
2766
+ unsigned long flags, int pc)
27772767 {
27782768 struct ring_buffer_event *entry;
27792769 int val;
....@@ -2786,7 +2776,7 @@
27862776 /* Try to use the per cpu buffer first */
27872777 val = this_cpu_inc_return(trace_buffered_event_cnt);
27882778 if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
2789
- trace_event_setup(entry, type, trace_ctx);
2779
+ trace_event_setup(entry, type, flags, pc);
27902780 entry->array[0] = len;
27912781 return entry;
27922782 }
....@@ -2794,7 +2784,7 @@
27942784 }
27952785
27962786 entry = __trace_buffer_lock_reserve(*current_rb,
2797
- type, len, trace_ctx);
2787
+ type, len, flags, pc);
27982788 /*
27992789 * If tracing is off, but we have triggers enabled
28002790 * we still need to look at the event data. Use the temp_buffer
....@@ -2803,8 +2793,8 @@
28032793 */
28042794 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
28052795 *current_rb = temp_buffer;
2806
- entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2807
- trace_ctx);
2796
+ entry = __trace_buffer_lock_reserve(*current_rb,
2797
+ type, len, flags, pc);
28082798 }
28092799 return entry;
28102800 }
....@@ -2890,7 +2880,7 @@
28902880 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
28912881 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
28922882 fbuffer->event, fbuffer->entry,
2893
- fbuffer->trace_ctx, fbuffer->regs);
2883
+ fbuffer->flags, fbuffer->pc, fbuffer->regs);
28942884 }
28952885 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
28962886
....@@ -2906,7 +2896,7 @@
29062896 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
29072897 struct trace_buffer *buffer,
29082898 struct ring_buffer_event *event,
2909
- unsigned int trace_ctx,
2899
+ unsigned long flags, int pc,
29102900 struct pt_regs *regs)
29112901 {
29122902 __buffer_unlock_commit(buffer, event);
....@@ -2917,8 +2907,8 @@
29172907 * and mmiotrace, but that's ok if they lose a function or
29182908 * two. They are not that meaningful.
29192909 */
2920
- ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2921
- ftrace_trace_userstack(tr, buffer, trace_ctx);
2910
+ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2911
+ ftrace_trace_userstack(tr, buffer, flags, pc);
29222912 }
29232913
29242914 /*
....@@ -2932,8 +2922,9 @@
29322922 }
29332923
29342924 void
2935
-trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2936
- parent_ip, unsigned int trace_ctx)
2925
+trace_function(struct trace_array *tr,
2926
+ unsigned long ip, unsigned long parent_ip, unsigned long flags,
2927
+ int pc)
29372928 {
29382929 struct trace_event_call *call = &event_function;
29392930 struct trace_buffer *buffer = tr->array_buffer.buffer;
....@@ -2941,7 +2932,7 @@
29412932 struct ftrace_entry *entry;
29422933
29432934 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2944
- trace_ctx);
2935
+ flags, pc);
29452936 if (!event)
29462937 return;
29472938 entry = ring_buffer_event_data(event);
....@@ -2975,8 +2966,8 @@
29752966 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
29762967
29772968 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2978
- unsigned int trace_ctx,
2979
- int skip, struct pt_regs *regs)
2969
+ unsigned long flags,
2970
+ int skip, int pc, struct pt_regs *regs)
29802971 {
29812972 struct trace_event_call *call = &event_kernel_stack;
29822973 struct ring_buffer_event *event;
....@@ -3024,7 +3015,7 @@
30243015 size = nr_entries * sizeof(unsigned long);
30253016 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
30263017 (sizeof(*entry) - sizeof(entry->caller)) + size,
3027
- trace_ctx);
3018
+ flags, pc);
30283019 if (!event)
30293020 goto out;
30303021 entry = ring_buffer_event_data(event);
....@@ -3045,22 +3036,22 @@
30453036
30463037 static inline void ftrace_trace_stack(struct trace_array *tr,
30473038 struct trace_buffer *buffer,
3048
- unsigned int trace_ctx,
3049
- int skip, struct pt_regs *regs)
3039
+ unsigned long flags,
3040
+ int skip, int pc, struct pt_regs *regs)
30503041 {
30513042 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
30523043 return;
30533044
3054
- __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3045
+ __ftrace_trace_stack(buffer, flags, skip, pc, regs);
30553046 }
30563047
3057
-void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3058
- int skip)
3048
+void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3049
+ int pc)
30593050 {
30603051 struct trace_buffer *buffer = tr->array_buffer.buffer;
30613052
30623053 if (rcu_is_watching()) {
3063
- __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3054
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
30643055 return;
30653056 }
30663057
....@@ -3074,7 +3065,7 @@
30743065 return;
30753066
30763067 rcu_irq_enter_irqson();
3077
- __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3068
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
30783069 rcu_irq_exit_irqson();
30793070 }
30803071
....@@ -3084,15 +3075,19 @@
30843075 */
30853076 void trace_dump_stack(int skip)
30863077 {
3078
+ unsigned long flags;
3079
+
30873080 if (tracing_disabled || tracing_selftest_running)
30883081 return;
3082
+
3083
+ local_save_flags(flags);
30893084
30903085 #ifndef CONFIG_UNWINDER_ORC
30913086 /* Skip 1 to skip this function. */
30923087 skip++;
30933088 #endif
30943089 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3095
- tracing_gen_ctx(), skip, NULL);
3090
+ flags, skip, preempt_count(), NULL);
30963091 }
30973092 EXPORT_SYMBOL_GPL(trace_dump_stack);
30983093
....@@ -3101,7 +3096,7 @@
31013096
31023097 static void
31033098 ftrace_trace_userstack(struct trace_array *tr,
3104
- struct trace_buffer *buffer, unsigned int trace_ctx)
3099
+ struct trace_buffer *buffer, unsigned long flags, int pc)
31053100 {
31063101 struct trace_event_call *call = &event_user_stack;
31073102 struct ring_buffer_event *event;
....@@ -3128,7 +3123,7 @@
31283123 __this_cpu_inc(user_stack_count);
31293124
31303125 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3131
- sizeof(*entry), trace_ctx);
3126
+ sizeof(*entry), flags, pc);
31323127 if (!event)
31333128 goto out_drop_count;
31343129 entry = ring_buffer_event_data(event);
....@@ -3148,7 +3143,7 @@
31483143 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
31493144 static void ftrace_trace_userstack(struct trace_array *tr,
31503145 struct trace_buffer *buffer,
3151
- unsigned int trace_ctx)
3146
+ unsigned long flags, int pc)
31523147 {
31533148 }
31543149 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
....@@ -3278,9 +3273,9 @@
32783273 struct trace_buffer *buffer;
32793274 struct trace_array *tr = &global_trace;
32803275 struct bprint_entry *entry;
3281
- unsigned int trace_ctx;
3276
+ unsigned long flags;
32823277 char *tbuffer;
3283
- int len = 0, size;
3278
+ int len = 0, size, pc;
32843279
32853280 if (unlikely(tracing_selftest_running || tracing_disabled))
32863281 return 0;
....@@ -3288,7 +3283,7 @@
32883283 /* Don't pollute graph traces with trace_vprintk internals */
32893284 pause_graph_tracing();
32903285
3291
- trace_ctx = tracing_gen_ctx();
3286
+ pc = preempt_count();
32923287 preempt_disable_notrace();
32933288
32943289 tbuffer = get_trace_buf();
....@@ -3302,11 +3297,12 @@
33023297 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
33033298 goto out_put;
33043299
3300
+ local_save_flags(flags);
33053301 size = sizeof(*entry) + sizeof(u32) * len;
33063302 buffer = tr->array_buffer.buffer;
33073303 ring_buffer_nest_start(buffer);
33083304 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3309
- trace_ctx);
3305
+ flags, pc);
33103306 if (!event)
33113307 goto out;
33123308 entry = ring_buffer_event_data(event);
....@@ -3316,7 +3312,7 @@
33163312 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
33173313 if (!call_filter_check_discard(call, entry, buffer, event)) {
33183314 __buffer_unlock_commit(buffer, event);
3319
- ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3315
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
33203316 }
33213317
33223318 out:
....@@ -3339,9 +3335,9 @@
33393335 {
33403336 struct trace_event_call *call = &event_print;
33413337 struct ring_buffer_event *event;
3342
- int len = 0, size;
3338
+ int len = 0, size, pc;
33433339 struct print_entry *entry;
3344
- unsigned int trace_ctx;
3340
+ unsigned long flags;
33453341 char *tbuffer;
33463342
33473343 if (tracing_disabled || tracing_selftest_running)
....@@ -3350,7 +3346,7 @@
33503346 /* Don't pollute graph traces with trace_vprintk internals */
33513347 pause_graph_tracing();
33523348
3353
- trace_ctx = tracing_gen_ctx();
3349
+ pc = preempt_count();
33543350 preempt_disable_notrace();
33553351
33563352
....@@ -3362,10 +3358,11 @@
33623358
33633359 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
33643360
3361
+ local_save_flags(flags);
33653362 size = sizeof(*entry) + len + 1;
33663363 ring_buffer_nest_start(buffer);
33673364 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3368
- trace_ctx);
3365
+ flags, pc);
33693366 if (!event)
33703367 goto out;
33713368 entry = ring_buffer_event_data(event);
....@@ -3374,7 +3371,7 @@
33743371 memcpy(&entry->buf, tbuffer, len + 1);
33753372 if (!call_filter_check_discard(call, entry, buffer, event)) {
33763373 __buffer_unlock_commit(buffer, event);
3377
- ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3374
+ ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
33783375 }
33793376
33803377 out:
....@@ -3840,17 +3837,14 @@
38403837
38413838 static void print_lat_help_header(struct seq_file *m)
38423839 {
3843
- seq_puts(m, "# _--------=> CPU# \n"
3844
- "# / _-------=> irqs-off \n"
3845
- "# | / _------=> need-resched \n"
3846
- "# || / _-----=> need-resched-lazy\n"
3847
- "# ||| / _----=> hardirq/softirq \n"
3848
- "# |||| / _---=> preempt-depth \n"
3849
- "# ||||| / _--=> preempt-lazy-depth\n"
3850
- "# |||||| / _-=> migrate-disable \n"
3851
- "# ||||||| / delay \n"
3852
- "# cmd pid |||||||| time | caller \n"
3853
- "# \\ / |||||||| \\ | / \n");
3840
+ seq_puts(m, "# _------=> CPU# \n"
3841
+ "# / _-----=> irqs-off \n"
3842
+ "# | / _----=> need-resched \n"
3843
+ "# || / _---=> hardirq/softirq \n"
3844
+ "# ||| / _--=> preempt-depth \n"
3845
+ "# |||| / delay \n"
3846
+ "# cmd pid ||||| time | caller \n"
3847
+ "# \\ / ||||| \\ | / \n");
38543848 }
38553849
38563850 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
....@@ -3884,16 +3878,13 @@
38843878
38853879 print_event_info(buf, m);
38863880
3887
- seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space);
3888
- seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
3889
- seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
3890
- seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
3891
- seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
3892
- seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
3893
- seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
3894
- seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
3895
- seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3896
- seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
3881
+ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3882
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3883
+ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3884
+ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3885
+ seq_printf(m, "# %.*s||| / delay\n", prec, space);
3886
+ seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3887
+ seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
38973888 }
38983889
38993890 void
....@@ -6698,6 +6689,7 @@
66986689 enum event_trigger_type tt = ETT_NONE;
66996690 struct trace_buffer *buffer;
67006691 struct print_entry *entry;
6692
+ unsigned long irq_flags;
67016693 ssize_t written;
67026694 int size;
67036695 int len;
....@@ -6717,6 +6709,7 @@
67176709
67186710 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
67196711
6712
+ local_save_flags(irq_flags);
67206713 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
67216714
67226715 /* If less than "<faulted>", then make sure we can still add that */
....@@ -6725,7 +6718,7 @@
67256718
67266719 buffer = tr->array_buffer.buffer;
67276720 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6728
- tracing_gen_ctx());
6721
+ irq_flags, preempt_count());
67296722 if (unlikely(!event))
67306723 /* Ring buffer disabled, return as if not open for write */
67316724 return -EBADF;
....@@ -6777,6 +6770,7 @@
67776770 struct ring_buffer_event *event;
67786771 struct trace_buffer *buffer;
67796772 struct raw_data_entry *entry;
6773
+ unsigned long irq_flags;
67806774 ssize_t written;
67816775 int size;
67826776 int len;
....@@ -6798,13 +6792,14 @@
67986792
67996793 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
68006794
6795
+ local_save_flags(irq_flags);
68016796 size = sizeof(*entry) + cnt;
68026797 if (cnt < FAULT_SIZE_ID)
68036798 size += FAULT_SIZE_ID - cnt;
68046799
68056800 buffer = tr->array_buffer.buffer;
68066801 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6807
- tracing_gen_ctx());
6802
+ irq_flags, preempt_count());
68086803 if (!event)
68096804 /* Ring buffer disabled, return as if not open for write */
68106805 return -EBADF;
....@@ -9391,6 +9386,7 @@
93919386 tracing_off();
93929387
93939388 local_irq_save(flags);
9389
+ printk_nmi_direct_enter();
93949390
93959391 /* Simulate the iterator */
93969392 trace_init_global_iter(&iter);
....@@ -9478,6 +9474,7 @@
94789474 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
94799475 }
94809476 atomic_dec(&dump_running);
9477
+ printk_nmi_direct_exit();
94819478 local_irq_restore(flags);
94829479 }
94839480 EXPORT_SYMBOL_GPL(ftrace_dump);